* of buf_int_ptr and buf_int_count at each interrupt. A
* better method is to poll the MITE before each user
* "read()" to calculate the number of bytes available.
- * mite_bytes_transferred(), mite_bytes_read(), and
- * mite_bytes_in_transit() are provided to get the number
- * of bytes transferred by the mite so far.
*/
chcr |= CHCR_SET_LC_IE;
MDPRINTK("exit mite_prep_dma\n");
}
-unsigned int mite_bytes_read(struct mite_struct *mite, unsigned int chan)
+u32 mite_device_bytes_transferred(struct mite_struct *mite, unsigned int chan)
{
- return readl(mite->mite_io_addr+MITE_DAR(chan));
+ return readl(mite->mite_io_addr + MITE_DAR(chan));
}
-unsigned int mite_bytes_in_transit(struct mite_struct *mite, unsigned int chan)
+u32 mite_bytes_in_transit(struct mite_struct *mite, unsigned int chan)
{
return readl(mite->mite_io_addr + MITE_FCR(chan)) & 0x000000FF;
}
-unsigned int mite_bytes_transferred(struct mite_struct *mite, unsigned int chan)
+// returns lower bound for number of bytes transferred from device to memory
+u32 mite_bytes_written_to_memory_lb(struct mite_struct *mite, unsigned int chan)
{
- unsigned int bytes_read;
+ u32 device_byte_count;
- bytes_read = mite_bytes_read( mite, chan );
- /* to avoid race, we want to read bytes read before reading bytes
- * in transit */
+ device_byte_count = mite_device_bytes_transferred(mite, chan);
rmb();
- return bytes_read - mite_bytes_in_transit( mite, chan );
+ return device_byte_count - mite_bytes_in_transit(mite, chan);
+}
+
+// returns upper bound for number of bytes transferred from device to memory
+u32 mite_bytes_written_to_memory_ub(struct mite_struct *mite, unsigned int chan)
+{
+ u32 in_transit_count;
+
+ in_transit_count = mite_bytes_in_transit(mite, chan);
+ rmb();
+ return mite_device_bytes_transferred(mite, chan) - in_transit_count;
+}
+
+// returns lower bound for number of bytes read from memory for transfer to device
+u32 mite_bytes_read_from_memory_lb(struct mite_struct *mite, unsigned int chan)
+{
+ u32 device_byte_count;
+
+ device_byte_count = mite_device_bytes_transferred(mite, chan);
+ rmb();
+ return device_byte_count + mite_bytes_in_transit(mite, chan);
+}
+
+// returns upper bound for number of bytes read from memory for transfer to device
+u32 mite_bytes_read_from_memory_ub(struct mite_struct *mite, unsigned int chan)
+{
+ u32 in_transit_count;
+
+ in_transit_count = mite_bytes_in_transit(mite, chan);
+ rmb();
+ return mite_device_bytes_transferred(mite, chan) + in_transit_count;
}
int mite_dma_tcr(struct mite_struct *mite, unsigned int channel)
EXPORT_SYMBOL(mite_list_devices);
EXPORT_SYMBOL(mite_prep_dma);
EXPORT_SYMBOL(mite_buf_change);
-EXPORT_SYMBOL(mite_bytes_transferred);
-EXPORT_SYMBOL(mite_bytes_read);
+EXPORT_SYMBOL(mite_bytes_written_to_memory_lb);
+EXPORT_SYMBOL(mite_bytes_written_to_memory_ub);
+EXPORT_SYMBOL(mite_bytes_read_from_memory_lb);
+EXPORT_SYMBOL(mite_bytes_read_from_memory_ub);
EXPORT_SYMBOL(mite_bytes_in_transit);
#ifdef DEBUG_MITE
EXPORT_SYMBOL(mite_decode);
int mite_dma_tcr(struct mite_struct *mite, unsigned int channel );
void mite_dma_arm(struct mite_struct *mite, unsigned int channel );
void mite_dma_disarm(struct mite_struct *mite, unsigned int channel );
-unsigned int mite_bytes_transferred(struct mite_struct *mite, unsigned int chan);
-unsigned int mite_bytes_read(struct mite_struct *mite, unsigned int chan);
-unsigned int mite_bytes_in_transit(struct mite_struct *mite, unsigned int chan);
+u32 mite_bytes_written_to_memory_lb(struct mite_struct *mite, unsigned int chan);
+u32 mite_bytes_written_to_memory_ub(struct mite_struct *mite, unsigned int chan);
+u32 mite_bytes_read_from_memory_lb(struct mite_struct *mite, unsigned int chan);
+u32 mite_bytes_read_from_memory_ub(struct mite_struct *mite, unsigned int chan);
+u32 mite_bytes_in_transit(struct mite_struct *mite, unsigned int chan);
#if 0
unsigned long mite_ll_from_kvmem(struct mite_struct *mite,comedi_async *async,int len);
// write alloc as much as we can
comedi_buf_write_alloc(s->async, s->async->prealloc_bufsz);
- nbytes = mite_bytes_transferred(mite, AI_DMA_CHAN);
+ nbytes = mite_bytes_written_to_memory_lb(mite, AI_DMA_CHAN);
rmb();
- /* We use mite_bytes_read() for the overrun check
- * because it returns an upper bound, and mite_bytes_transferred
- * returns a lower bound on the number of bytes actually
- * transferred */
- if( (int)(mite_bytes_read(mite, AI_DMA_CHAN) - old_alloc_count) > 0 ){
+ if( (int)(mite_bytes_written_to_memory_ub(mite, AI_DMA_CHAN) - old_alloc_count) > 0 ){
printk("ni_mio_common: DMA overwrite of free area\n");
ni_ai_reset(dev,s);
async->events |= COMEDI_CB_OVERFLOW;
int count;
comedi_subdevice *s = dev->subdevices + 1;
comedi_async *async = s->async;
- unsigned int nbytes, new_write_count;
-
+ u32 nbytes_ub, nbytes_lb;
+ unsigned int new_write_count;
+ u32 stop_count = async->cmd.stop_arg * sizeof(sampl_t);
+
writel(CHOR_CLRLC, mite->mite_io_addr + MITE_CHOR(AO_DMA_CHAN));
new_write_count = async->buf_write_count;
-
- nbytes = mite_bytes_read(mite, AO_DMA_CHAN);
- if( async->cmd.stop_src == TRIG_COUNT &&
- (int) (nbytes - async->cmd.stop_arg * sizeof( sampl_t ) ) > 0 )
- nbytes = async->cmd.stop_arg * sizeof( sampl_t );
- if( (int)(nbytes - devpriv->last_buf_write_count) > 0 ){
+ mb();
+ nbytes_lb = mite_bytes_read_from_memory_lb(mite, AO_DMA_CHAN);
+ if(async->cmd.stop_src == TRIG_COUNT &&
+ (int) (nbytes_lb - stop_count) > 0)
+ nbytes_lb = stop_count;
+ mb();
+ nbytes_ub = mite_bytes_read_from_memory_ub(mite, AO_DMA_CHAN);
+ if(async->cmd.stop_src == TRIG_COUNT &&
+ (int) (nbytes_ub - stop_count) > 0)
+ nbytes_ub = stop_count;
+ if((int)(nbytes_ub - devpriv->last_buf_write_count) > 0){
rt_printk("ni_mio_common: DMA underrun\n");
ni_ao_reset(dev,s);
async->events |= COMEDI_CB_OVERFLOW;
return;
}
-
+ mb();
devpriv->last_buf_write_count = new_write_count;
- count = nbytes - async->buf_read_count;
- if( count < 0 ){
- rt_printk("ni_mio_common: BUG: negative ao count\n");
- count = 0;
+ count = nbytes_lb - async->buf_read_count;
+ if(count < 0){
+ return;
}
comedi_buf_read_free(async, count);