static int do_cancel(comedi_device *dev,comedi_subdevice *s);
static int comedi_fasync (int fd, struct file *file, int on);
-static void init_async_buf( comedi_async *async );
static int comedi_ioctl(struct inode * inode,struct file * file,
unsigned int cmd, unsigned long arg)
}
if(bi.bytes_read && (s->subdev_flags & SDF_CMD_READ)){
+ bi.bytes_read = comedi_buf_read_alloc(async, bi.bytes_read);
comedi_buf_read_free(async, bi.bytes_read);
if(!(s->subdev_flags&SDF_RUNNING) &&
if(bi.bytes_written && (s->subdev_flags & SDF_CMD_WRITE)){
bi.bytes_written = comedi_buf_write_alloc( async, bi.bytes_written );
- comedi_buf_munge(dev, s, async->buf_write_alloc_count - async->munge_count);
comedi_buf_write_free(async, bi.bytes_written);
}
bi.buf_write_ptr = async->buf_write_ptr;
bi.buf_read_count = async->buf_read_count;
bi.buf_read_ptr = async->buf_read_ptr;
- /* FIXME this will bug if we ever have a subdevice that supports both read and write commands.
- We need a flag saying which direction the current command is going (CMDF_WRITE?) */
- if((s->subdev_flags & SDF_CMD_READ)){
- unsigned int n_munge_bytes;
- n_munge_bytes = bi.buf_write_count - s->async->munge_count;
- comedi_buf_munge(dev, s, n_munge_bytes);
- }
copyback:
if(copy_to_user(arg, &bi, sizeof(comedi_bufinfo)))
goto cleanup;
}
- init_async_buf( async );
+ comedi_reset_async_buf( async );
async->cb_mask = COMEDI_CB_EOA | COMEDI_CB_BLOCK | COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW;
if(async->cmd.flags & TRIG_WAKE_EOS){
unsigned long size;
int n_pages;
int i;
+ comedi_subdevice *s;
if(!dev->attached)
{
DPRINTK("no driver configured on comedi%i\n", dev->minor);
return -ENODEV;
}
- comedi_subdevice *s = comedi_get_subdevice_by_minor(minor);
+ s = comedi_get_subdevice_by_minor(minor);
if(s == NULL)
{ if(vma->vm_flags & VM_WRITE){
s = dev->write_subdev;
s = dev->read_subdev;
async = s->async;
if(!s->busy
- || comedi_buf_read_n_available(s)>0
+ || comedi_buf_read_n_available(async)>0
|| !(s->subdev_flags&SDF_RUNNING)){
mask |= POLLIN | POLLRDNORM;
}
async = s->async;
if(!s->busy
|| !(s->subdev_flags&SDF_RUNNING)
- || comedi_buf_write_n_available(s)>0){
+ || comedi_buf_write_n_available(async) > 0){
mask |= POLLOUT | POLLWRNORM;
}
}
n -= m;
retval = -EFAULT;
}
- comedi_buf_munge(dev, s, async->buf_write_alloc_count - async->munge_count);
comedi_buf_write_free(async, n);
count+=n;
n=nbytes;
- m = comedi_buf_read_n_available(s);
+ m = comedi_buf_read_n_available(async);
//printk("%d available\n",m);
if(async->buf_read_ptr + m > async->prealloc_bufsz){
m = async->prealloc_bufsz - async->buf_read_ptr;
schedule();
continue;
}
- comedi_buf_munge(dev, s, async->buf_write_count - async->munge_count);
m = copy_to_user(buf, async->prealloc_buf +
async->buf_read_ptr, n);
if(m){
retval = -EFAULT;
}
+ comedi_buf_read_alloc(async, n);
comedi_buf_read_free(async, n);
count+=n;
#endif
if(async){
- init_async_buf( async );
+ comedi_reset_async_buf( async );
}else{
printk("BUG: (?) do_become_nonbusy called with async=0\n");
}
}
}
}
-
-static void init_async_buf( comedi_async *async )
-{
- async->buf_write_alloc_count = 0;
- async->buf_write_count = 0;
- async->buf_read_count = 0;
-
- async->buf_write_ptr = 0;
- async->buf_read_ptr = 0;
-
- async->cur_chan = 0;
- async->scan_progress = 0;
- async->munge_chan = 0;
- async->munge_count = 0;
- async->munge_ptr = 0;
-
- async->events = 0;
-}
-
EXPORT_SYMBOL(comedi_buf_get);
EXPORT_SYMBOL(comedi_buf_read_n_available);
EXPORT_SYMBOL(comedi_buf_write_free);
-EXPORT_SYMBOL(comedi_buf_munge);
EXPORT_SYMBOL(comedi_buf_write_alloc);
EXPORT_SYMBOL(comedi_buf_read_free);
+EXPORT_SYMBOL(comedi_buf_read_alloc);
EXPORT_SYMBOL(comedi_buf_memcpy_to);
EXPORT_SYMBOL(comedi_buf_memcpy_from);
-
+EXPORT_SYMBOL(comedi_reset_async_buf);
printk("BUG: dev->board_name=<%p>\n",dev->board_name);
dev->board_name="BUG";
}
- mb();
+ smp_wmb();
dev->attached=1;
return 0;
return -ENOMEM;
}
memset(async, 0, sizeof(comedi_async));
+ async->subdevice = s;
s->async = async;
#define DEFAULT_BUF_MAXSIZE (64*1024)
/* munging is applied to data by core as it passes between user
* and kernel space */
-unsigned int comedi_buf_munge( comedi_device *dev, comedi_subdevice *s,
- unsigned int num_bytes )
+unsigned int comedi_buf_munge(comedi_async *async,
+ unsigned int num_bytes)
{
+ comedi_subdevice *s = async->subdevice;
unsigned int count = 0;
+ const unsigned num_sample_bytes = bytes_per_sample(s);
- if( s->munge == NULL || ( s->async->cmd.flags & CMDF_RAWDATA ) )
- return count;
+ if( s->munge == NULL || ( async->cmd.flags & CMDF_RAWDATA ) )
+ {
+ async->munge_count += num_bytes;
+ if((int)(async->munge_count - async->buf_write_count) > 0) BUG();
+ return num_bytes;
+ }
/* don't munge partial samples */
- num_bytes -= num_bytes % bytes_per_sample(s);
+ num_bytes -= num_bytes % num_sample_bytes;
while( count < num_bytes )
{
int block_size;
rt_printk("%s: %s: bug! block_size is negative\n", __FILE__, __FUNCTION__);
break;
}
- if( (int)(s->async->munge_ptr + block_size - s->async->prealloc_bufsz) > 0 )
- block_size = s->async->prealloc_bufsz - s->async->munge_ptr;
+ if( (int)(async->munge_ptr + block_size - async->prealloc_bufsz) > 0 )
+ block_size = async->prealloc_bufsz - async->munge_ptr;
- s->munge( dev, s, s->async->prealloc_buf + s->async->munge_ptr,
- block_size, s->async->munge_chan );
+ s->munge(s->device, s, async->prealloc_buf + async->munge_ptr,
+ block_size, async->munge_chan );
- s->async->munge_chan += block_size / bytes_per_sample( s );
- s->async->munge_chan %= s->async->cmd.chanlist_len;
- s->async->munge_count += block_size;
- s->async->munge_ptr += block_size;
- s->async->munge_ptr %= s->async->prealloc_bufsz;
+ smp_wmb(); //barrier insures data is munged in buffer before munge_count is incremented
+
+ async->munge_chan += block_size / num_sample_bytes;
+ async->munge_chan %= async->cmd.chanlist_len;
+ async->munge_count += block_size;
+ async->munge_ptr += block_size;
+ async->munge_ptr %= async->prealloc_bufsz;
count += block_size;
}
+ if((int)(async->munge_count - async->buf_write_count) > 0) BUG();
return count;
}
-unsigned int comedi_buf_write_n_available(comedi_subdevice *s)
+unsigned int comedi_buf_write_n_available(comedi_async *async)
{
- comedi_async *async=s->async;
- unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
+ unsigned int free_end;
unsigned int nbytes;
+ if(async == NULL) return 0;
+
+ free_end = async->buf_read_count + async->prealloc_bufsz;
nbytes = free_end - async->buf_write_alloc_count;
- nbytes -= nbytes % bytes_per_sample(s);
+ nbytes -= nbytes % bytes_per_sample(async->subdevice);
+ /* barrier insures the read of buf_read_count in this
+ query occurs before any following writes to the buffer which
+ might be based on the return value from this query.
+ */
+ smp_mb();
return nbytes;
}
+/* allocates chunk for the writer from free buffer space */
unsigned int comedi_buf_write_alloc(comedi_async *async, unsigned int nbytes)
{
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
if((int)(async->buf_write_alloc_count + nbytes - free_end) > 0){
nbytes = free_end - async->buf_write_alloc_count;
}
-
async->buf_write_alloc_count += nbytes;
-
+ /* barrier insures the read of buf_read_count above occurs before
+ we write data to the write-alloc'ed buffer space */
+ smp_mb();
return nbytes;
}
+/* allocates nothing unless it can completely fulfill the request */
unsigned int comedi_buf_write_alloc_strict(comedi_async *async,
unsigned int nbytes)
{
if((int)(async->buf_write_alloc_count + nbytes - free_end) > 0){
nbytes = 0;
}
-
async->buf_write_alloc_count += nbytes;
-
+ /* barrier insures the read of buf_read_count above occurs before
+ we write data to the write-alloc'ed buffer space */
+ smp_mb();
return nbytes;
}
-/* transfers control of a chunk from writer to reader */
-void comedi_buf_write_free(comedi_async *async, unsigned int nbytes)
+/* transfers a chunk from writer to filled buffer space */
+unsigned comedi_buf_write_free(comedi_async *async, unsigned int nbytes)
{
+ if((int)(async->buf_write_count + nbytes - async->buf_write_alloc_count) > 0)
+ {
+ rt_printk("comedi: attempted to write-free more bytes than have been write-allocated.\n");
+ nbytes = async->buf_write_alloc_count - async->buf_write_count;
+ }
async->buf_write_count += nbytes;
async->buf_write_ptr += nbytes;
+ comedi_buf_munge(async, async->buf_write_count - async->munge_count);
if(async->buf_write_ptr >= async->prealloc_bufsz){
async->buf_write_ptr %= async->prealloc_bufsz;
async->events |= COMEDI_CB_EOBUF;
}
+ return nbytes;
}
-/* transfers control of a chunk from reader to free area */
-void comedi_buf_read_free(comedi_async *async, unsigned int nbytes)
+/* allocates a chunk for the reader from filled (and munged) buffer space */
+unsigned comedi_buf_read_alloc(comedi_async *async, unsigned nbytes)
{
+ if((int)(async->buf_read_alloc_count + nbytes - async->munge_count) > 0)
+ {
+ nbytes = async->munge_count - async->buf_read_alloc_count;
+ }
+ async->buf_read_alloc_count += nbytes;
+ /* barrier insures read of munge_count occurs before we actually read
+ data out of buffer */
+ smp_rmb();
+ return nbytes;
+}
+
+/* transfers control of a chunk from reader to free buffer space */
+unsigned comedi_buf_read_free(comedi_async *async, unsigned int nbytes)
+{
+ // barrier insures data has been read out of buffer before read count is incremented
+ smp_mb();
+ if((int)(async->buf_read_count + nbytes - async->buf_read_alloc_count) > 0)
+ {
+ rt_printk("comedi: attempted to read-free more bytes than have been read-allocated.\n");
+ nbytes = async->buf_read_alloc_count - async->buf_read_count;
+ }
async->buf_read_count += nbytes;
async->buf_read_ptr += nbytes;
async->buf_read_ptr %= async->prealloc_bufsz;
+ return nbytes;
}
void comedi_buf_memcpy_to( comedi_async *async, unsigned int offset, const void *data,
write_ptr = 0;
}
- barrier();
}
void comedi_buf_memcpy_from(comedi_async *async, unsigned int offset,
if( read_ptr >= async->prealloc_bufsz )
read_ptr %= async->prealloc_bufsz;
- barrier();
while( nbytes )
{
unsigned int block_size;
}
}
-static inline unsigned int _comedi_buf_read_n_available(comedi_async *async)
+unsigned int comedi_buf_read_n_available(comedi_async *async)
{
- return async->buf_write_count - async->buf_read_count;
-}
-
-unsigned int comedi_buf_read_n_available(comedi_subdevice *s)
-{
- comedi_async *async = s->async;
- unsigned int nbytes;
+ unsigned num_bytes;
if(async == NULL)
return 0;
-
- nbytes = _comedi_buf_read_n_available(async);
- nbytes -= nbytes % bytes_per_sample(s);
- return nbytes;
+ num_bytes = async->munge_count - async->buf_read_count;
+ /* barrier insures the read of munge_count in this
+ query occurs before any following reads of the buffer which
+ might be based on the return value from this query.
+ */
+ smp_rmb();
+ return num_bytes;
}
int comedi_buf_get(comedi_async *async, sampl_t *x)
{
- unsigned int n = _comedi_buf_read_n_available(async);
+ unsigned int n = comedi_buf_read_n_available(async);
- if(n<sizeof(sampl_t))return 0;
+ if(n < sizeof(sampl_t)) return 0;
+ comedi_buf_read_alloc(async, sizeof(sampl_t));
*x = *(sampl_t *)(async->prealloc_buf + async->buf_read_ptr);
comedi_buf_read_free(async, sizeof(sampl_t));
return 1;
return 1;
}
+void comedi_reset_async_buf(comedi_async *async)
+{
+ async->buf_write_alloc_count = 0;
+ async->buf_write_count = 0;
+ async->buf_read_alloc_count = 0;
+ async->buf_read_count = 0;
+
+ async->buf_write_ptr = 0;
+ async->buf_read_ptr = 0;
+
+ async->cur_chan = 0;
+ async->scan_progress = 0;
+ async->munge_chan = 0;
+ async->munge_count = 0;
+ async->munge_ptr = 0;
+
+ async->events = 0;
+}
+
if(data[0]){
s->state &= ~data[0];
s->state |= (data[0]&data[1]);
-
+
if(data[0]&0xff)
CALLBACK_FUNC(1,_8255_DATA,s->state&0xff,CALLBACK_ARG);
if(data[0]&0xff00)
if(!cmd->stop_src || tmp!=cmd->stop_src)err++;
if(err) return 1;
-
+
/* step 2 */
if(err) return 2;
s->state=0;
s->io_bits=0;
do_config(dev,s);
-
+
return 0;
}
comedi_subdevice *s;
printk("comedi%d: 8255: remove\n",dev->minor);
-
+
for(i=0;i<dev->n_subdevices;i++){
s=dev->subdevices+i;
if(s->type!=COMEDI_SUBD_UNUSED){
bytes_per_scan = sizeof(sampl_t);
}
/* Determine number of scans available in buffer. */
- num_scans = comedi_buf_read_n_available(s) / bytes_per_scan;
+ num_scans = comedi_buf_read_n_available(s->async) / bytes_per_scan;
if (!devpriv->ao_stop_continuous) {
/* Fixed number of scans. */
if (num_scans > devpriv->ao_stop_count) {
DEBUG_PRINT("attempting to load ao buffer %i (0x%x)\n", buffer_index,
priv(dev)->ao_buffer_bus_addr[buffer_index]);
- num_bytes = comedi_buf_read_n_available(dev->write_subdev);
+ num_bytes = comedi_buf_read_n_available(dev->write_subdev->async);
if(num_bytes > DMA_BUFFER_SIZE) num_bytes = DMA_BUFFER_SIZE;
if(cmd->stop_src == TRIG_COUNT && num_bytes > priv(dev)->ao_count)
num_bytes = priv(dev)->ao_count;
{
int num_samples;
int bits_per_sample;
-
+
switch( subd->type )
{
case COMEDI_SUBD_DI:
unsigned int num_bytes )
{
comedi_async *async = subd->async;
- unsigned int bytes_available;
if( num_bytes == 0 ) return 0;
- bytes_available = comedi_buf_read_n_available(subd);
- if( bytes_available < num_bytes )
- {
- num_bytes = bytes_available;
- }
-
+ num_bytes = comedi_buf_read_alloc(async, num_bytes);
comedi_buf_memcpy_from( async, 0, data, num_bytes);
comedi_buf_read_free( async, num_bytes );
increment_scan_progress( subd, num_bytes );
devpriv->current_dma_index=1-i;
- size = comedi_buf_read_n_available(s);
- if(size>devpriv->dma_maxsize)size=devpriv->dma_maxsize;
+ size = cfc_read_array_from_buffer(s, ptr, devpriv->dma_maxsize);
if( size == 0){
rt_printk("dt282x: AO underrun\n");
dt282x_ao_cancel(dev,s);
s->async->events |= COMEDI_CB_OVERFLOW;
return;
}
- comedi_buf_memcpy_from(s->async, 0, ptr, size);
- comedi_buf_read_free(s->async, size);
prep_ao_dma(dev,i,size);
return;
}
if(x!=0)return -EINVAL;
- size = comedi_buf_read_n_available(s);
- if(size>devpriv->dma_maxsize)size=devpriv->dma_maxsize;
+ size = cfc_read_array_from_buffer(s, devpriv->dma[0].buf, devpriv->dma_maxsize);
if( size == 0){
rt_printk("dt282x: AO underrun\n");
return -EPIPE;
}
- comedi_buf_memcpy_from(s->async, 0, devpriv->dma[0].buf, size);
- comedi_buf_read_free(s->async, size);
prep_ao_dma(dev,0,size);
- size = comedi_buf_read_n_available(s);
- if(size>devpriv->dma_maxsize)size=devpriv->dma_maxsize;
+ size = cfc_read_array_from_buffer(s, devpriv->dma[1].buf, devpriv->dma_maxsize);
if( size == 0){
rt_printk("dt282x: AO underrun\n");
return -EPIPE;
}
- comedi_buf_memcpy_from(s->async, 0, devpriv->dma[1].buf, size);
- comedi_buf_read_free(s->async, size);
prep_ao_dma(dev,1,size);
update_supcsr(DT2821_STRIG);
}
+int mite_alloc_channel(struct mite_struct *mite)
+{
+ //FIXME spin lock so mite_free_channel can be called safely from interrupts
+ int i;
+ for(i = 0; i < mite->num_channels; ++i)
+ {
+ if(mite->channel_allocated[i] == 0)
+ {
+ mite->channel_allocated[i] = 1;
+ return i;
+ }
+ }
+ return -1;
+}
+
+void mite_free_channel(struct mite_struct *mite, unsigned channel)
+{
+ //FIXME spin lock to prevent races with mite_alloc_channel
+ BUG_ON(channel >= mite->num_channels);
+ mite->channel_allocated[channel] = 0;
+}
+
void mite_dma_arm( struct mite_struct *mite, unsigned int channel )
{
int chor;
/* arm */
chor = CHOR_START;
writel(chor, mite->mite_io_addr + MITE_CHOR(channel));
- mite_dma_tcr(mite, channel);
+// mite_dma_tcr(mite, channel);
}
u32 device_byte_count;
device_byte_count = mite_device_bytes_transferred(mite, chan);
- rmb();
return device_byte_count - mite_bytes_in_transit(mite, chan);
}
u32 in_transit_count;
in_transit_count = mite_bytes_in_transit(mite, chan);
- rmb();
return mite_device_bytes_transferred(mite, chan) - in_transit_count;
}
u32 device_byte_count;
device_byte_count = mite_device_bytes_transferred(mite, chan);
- rmb();
return device_byte_count + mite_bytes_in_transit(mite, chan);
}
u32 in_transit_count;
in_transit_count = mite_bytes_in_transit(mite, chan);
- rmb();
return mite_device_bytes_transferred(mite, chan) + in_transit_count;
}
writel(chor, mite->mite_io_addr + MITE_CHOR(channel));
}
+int mite_sync_input_dma(struct mite_struct *mite, unsigned mite_channel, comedi_async *async)
+{
+ int count;
+ unsigned int nbytes, old_alloc_count;
+ unsigned int bytes_per_scan = bytes_per_sample(async->subdevice) * async->cmd.chanlist_len;
+
+ old_alloc_count = async->buf_write_alloc_count;
+ // write alloc as much as we can
+ comedi_buf_write_alloc(async, async->prealloc_bufsz);
+
+ nbytes = mite_bytes_written_to_memory_lb(mite, mite_channel);
+ if((int)(mite_bytes_written_to_memory_ub(mite, mite_channel) - old_alloc_count) > 0)
+ {
+ rt_printk("mite: DMA overwrite of free area\n");
+ async->events |= COMEDI_CB_OVERFLOW;
+ return -1;
+ }
+
+ count = nbytes - async->buf_write_count;
+ /* it's possible count will be negative due to
+ * conservative value returned by mite_bytes_written_to_memory_lb */
+ if( count <= 0 )
+ {
+ return 0;
+ }
+ comedi_buf_write_free(async, count);
+
+ async->scan_progress += count;
+ if(async->scan_progress >= bytes_per_scan)
+ {
+ async->scan_progress %= bytes_per_scan;
+ async->events |= COMEDI_CB_EOS;
+ }
+ async->events |= COMEDI_CB_BLOCK;
+ return 0;
+}
+
+int mite_sync_output_dma(struct mite_struct *mite, unsigned mite_channel, comedi_async *async)
+{
+ int count;
+ u32 nbytes_ub, nbytes_lb;
+ unsigned int old_alloc_count;
+ u32 stop_count = async->cmd.stop_arg * bytes_per_sample(async->subdevice);
+
+ old_alloc_count = async->buf_read_alloc_count;
+ // read alloc as much as we can
+ comedi_buf_read_alloc(async, async->prealloc_bufsz);
+ nbytes_lb = mite_bytes_read_from_memory_lb(mite, mite_channel);
+ if(async->cmd.stop_src == TRIG_COUNT &&
+ (int) (nbytes_lb - stop_count) > 0)
+ nbytes_lb = stop_count;
+ nbytes_ub = mite_bytes_read_from_memory_ub(mite, mite_channel);
+ if(async->cmd.stop_src == TRIG_COUNT &&
+ (int) (nbytes_ub - stop_count) > 0)
+ nbytes_ub = stop_count;
+ if((int)(nbytes_ub - old_alloc_count) > 0)
+ {
+ rt_printk("mite: DMA underrun\n");
+ async->events |= COMEDI_CB_OVERFLOW;
+ return -1;
+ }
+ count = nbytes_lb - async->buf_read_count;
+ if(count <= 0)
+ {
+ return 0;
+ }
+ comedi_buf_read_free(async, count);
+
+ async->events |= COMEDI_CB_BLOCK;
+ return 0;
+}
+
#ifdef DEBUG_MITE
static void mite_decode(char **bit_str, unsigned int bits);
EXPORT_SYMBOL(mite_dma_tcr);
EXPORT_SYMBOL(mite_dma_arm);
EXPORT_SYMBOL(mite_dma_disarm);
+EXPORT_SYMBOL(mite_sync_input_dma);
+EXPORT_SYMBOL(mite_sync_output_dma);
EXPORT_SYMBOL(mite_setup);
EXPORT_SYMBOL(mite_unsetup);
#if 0
#endif
EXPORT_SYMBOL(mite_devices);
EXPORT_SYMBOL(mite_list_devices);
+EXPORT_SYMBOL(mite_alloc_channel);
+EXPORT_SYMBOL(mite_free_channel);
EXPORT_SYMBOL(mite_prep_dma);
EXPORT_SYMBOL(mite_buf_change);
EXPORT_SYMBOL(mite_bytes_written_to_memory_lb);
void *daq_io_addr;
struct mite_channel channels[MAX_MITE_DMA_CHANNELS];
+ short channel_allocated[MAX_MITE_DMA_CHANNELS];
int num_channels;
};
int mite_setup(struct mite_struct *mite);
void mite_unsetup(struct mite_struct *mite);
void mite_list_devices(void);
+int mite_alloc_channel(struct mite_struct *mite);
+void mite_free_channel(struct mite_struct *mite, unsigned channel);
int mite_dma_tcr(struct mite_struct *mite, unsigned int channel );
void mite_dma_arm(struct mite_struct *mite, unsigned int channel );
void mite_dma_disarm(struct mite_struct *mite, unsigned int channel );
+int mite_sync_input_dma(struct mite_struct *mite, unsigned mite_channel, comedi_async *async);
+int mite_sync_output_dma(struct mite_struct *mite, unsigned mite_channel, comedi_async *async);
u32 mite_bytes_written_to_memory_lb(struct mite_struct *mite, unsigned int chan);
u32 mite_bytes_written_to_memory_ub(struct mite_struct *mite, unsigned int chan);
u32 mite_bytes_read_from_memory_lb(struct mite_struct *mite, unsigned int chan);
devpriv->counters[i].read_register = ni_gpct_read_register;
devpriv->counters[i].variant = ni_gpct_variant_660x;
devpriv->counters[i].clock_period_ps = 0;
+ devpriv->counters[i].mite = devpriv->mite;
+ devpriv->counters[i].mite_channel = -1;
}else
{
s->type = COMEDI_SUBD_UNUSED;
#ifdef PCIDMA
static void ni_sync_ai_dma(struct mite_struct *mite, comedi_device *dev)
{
- int count;
comedi_subdevice *s = dev->subdevices + 0;
- comedi_async *async = s->async;
- unsigned int nbytes, old_alloc_count;
- unsigned int bytes_per_scan = bytes_per_sample(s) * async->cmd.chanlist_len;
-
- old_alloc_count = async->buf_write_alloc_count;
- // write alloc as much as we can
- comedi_buf_write_alloc(s->async, s->async->prealloc_bufsz);
-
- nbytes = mite_bytes_written_to_memory_lb(mite, AI_DMA_CHAN);
- rmb();
- if( (int)(mite_bytes_written_to_memory_ub(mite, AI_DMA_CHAN) - old_alloc_count) > 0 ){
- rt_printk("ni_mio_common: DMA overwrite of free area\n");
+ int retval = mite_sync_input_dma(mite, AI_DMA_CHAN, s->async);
+ if(retval < 0)
+ {
ni_ai_reset(dev,s);
- async->events |= COMEDI_CB_OVERFLOW;
return;
}
-
- count = nbytes - async->buf_write_count;
- if( count <= 0 ){
- /* it's possible count will be negative due to
- * conservative value returned by mite_bytes_transferred */
- return;
- }
- comedi_buf_write_free(async, count);
-
- async->scan_progress += count;
- if( async->scan_progress >= bytes_per_scan )
- {
- async->scan_progress %= bytes_per_scan;
- async->events |= COMEDI_CB_EOS;
- }
- async->events |= COMEDI_CB_BLOCK;
}
static void mite_handle_b_linkc(struct mite_struct *mite, comedi_device *dev)
{
- int count;
comedi_subdevice *s = dev->subdevices + 1;
- comedi_async *async = s->async;
- u32 nbytes_ub, nbytes_lb;
- unsigned int new_write_count;
- u32 stop_count = async->cmd.stop_arg * sizeof(sampl_t);
writel(CHOR_CLRLC, mite->mite_io_addr + MITE_CHOR(AO_DMA_CHAN));
- new_write_count = async->buf_write_count;
- mb();
- nbytes_lb = mite_bytes_read_from_memory_lb(mite, AO_DMA_CHAN);
- if(async->cmd.stop_src == TRIG_COUNT &&
- (int) (nbytes_lb - stop_count) > 0)
- nbytes_lb = stop_count;
- mb();
- nbytes_ub = mite_bytes_read_from_memory_ub(mite, AO_DMA_CHAN);
- if(async->cmd.stop_src == TRIG_COUNT &&
- (int) (nbytes_ub - stop_count) > 0)
- nbytes_ub = stop_count;
- if((int)(nbytes_ub - devpriv->last_buf_write_count) > 0){
- rt_printk("ni_mio_common: DMA underrun\n");
+ if(mite_sync_output_dma(mite, AO_DMA_CHAN, s->async) < 0)
+ {
ni_ao_reset(dev,s);
- async->events |= COMEDI_CB_OVERFLOW;
return;
}
- mb();
- devpriv->last_buf_write_count = new_write_count;
-
- count = nbytes_lb - async->buf_read_count;
- if(count < 0){
- return;
- }
- comedi_buf_read_free(async, count);
-
- async->events |= COMEDI_CB_BLOCK;
}
+
// #define DEBUG_DMA_TIMING
static int ni_ao_wait_for_dma_load( comedi_device *dev )
{
{
int n;
- n = comedi_buf_read_n_available(s);
+ n = comedi_buf_read_n_available(s->async);
if(n==0){
s->async->events |= COMEDI_CB_OVERFLOW;
return 0;
ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
/* load some data */
- n = comedi_buf_read_n_available(s);
+ n = comedi_buf_read_n_available(s->async);
if(n==0)return 0;
n /= sizeof(sampl_t);
mite_dma_arm(mite, AI_DMA_CHAN);
}
-static void ni_ao_setup_MITE_dma(comedi_device *dev,comedi_cmd *cmd)
+static void ni_ao_setup_MITE_dma(comedi_device *dev, comedi_cmd *cmd)
{
struct mite_struct *mite = devpriv->mite;
struct mite_channel *mite_chan = &mite->channels[ AO_DMA_CHAN ];
comedi_subdevice *s = dev->subdevices + 1;
- devpriv->last_buf_write_count = s->async->buf_write_count;
+ /* read alloc the entire buffer */
+ comedi_buf_read_alloc(s->async, s->async->prealloc_bufsz);
+ /* Barrier is intended to insure comedi_buf_read_alloc
+ is done touching the async struct before we write
+ to the mite's registers and arm it. */
+ smp_wmb();
+
mite_chan->current_link = 0;
mite_chan->dir = COMEDI_OUTPUT;
if(boardtype.reg_type & (ni_reg_611x | ni_reg_6713))
s->insn_read = ni_gpct_insn_read;
s->insn_write = ni_gpct_insn_write;
s->insn_config = ni_gpct_insn_config;
+ s->do_cmd = ni_gpct_cmd;
+ s->do_cmdtest = ni_gpct_cmdtest;
+ s->cancel = ni_gpct_cancel;
s->private = &devpriv->counters[j];
devpriv->counters[j].dev = dev;
devpriv->counters[j].variant = ni_gpct_variant_e_series;
}
devpriv->counters[j].clock_period_ps = 0;
+#ifdef PCIDMA
+ devpriv->counters[j].mite = devpriv->mite;
+#endif
+ devpriv->counters[j].mite_channel = -1;
ni_tio_init_counter(&devpriv->counters[j]);
}
static int ni_gpct_cmd(comedi_device *dev, comedi_subdevice *s)
{
- return 0;
+#ifdef PCIDMA
+ struct ni_gpct *counter = s->private;
+ return ni_tio_cmd(counter, s->async);
+#else
+ return -EIO;
+#endif
}
static int ni_gpct_cmdtest(comedi_device *dev, comedi_subdevice *s, comedi_cmd *cmd)
{
- return 0;
+ struct ni_gpct *counter = s->private;
+ return ni_tio_cmdtest(counter);
}
static int ni_gpct_cancel(comedi_device *dev, comedi_subdevice *s)
{
- return 0;
+ struct ni_gpct *counter = s->private;
+ return ni_tio_cancel(counter);
}
/*
struct mite_struct *mite;
NI_PRIVATE_COMMON
-
- unsigned int last_buf_write_count;
}ni_private;
#define devpriv ((ni_private *)dev->private)
*/
#include "ni_tio.h"
+#include "mite.h"
static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter, unsigned generic_clock_source);
static unsigned ni_tio_generic_clock_src_select(struct ni_gpct *counter);
return 0;
}
+static inline enum ni_gpct_register NITIO_Gi_DMA_Config_Reg(int counter_index)
+{
+ switch(counter_index)
+ {
+ case 0:
+ return NITIO_G0_DMA_Config_Reg;
+ break;
+ case 1:
+ return NITIO_G1_DMA_Config_Reg;
+ break;
+ case 2:
+ return NITIO_G2_DMA_Config_Reg;
+ break;
+ case 3:
+ return NITIO_G3_DMA_Config_Reg;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ return 0;
+}
+
enum Gi_Auto_Increment_Reg_Bits
{
Gi_Auto_Increment_Mask = 0xff
return 0x1 << (2 + (counter_index % 2));
}
+enum Gi_DMA_Config_Reg_Bits
+{
+ Gi_DMA_Enable_Bit = 0x1,
+ Gi_DMA_Write_Bit = 0x2,
+ Gi_DMA_Int_Bit = 0x4
+};
+
static const lsampl_t counter_status_mask = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
static int __init ni_tio_init_module(void)
return 0;
}
+static int ni_tio_input_cmd(struct ni_gpct *counter, comedi_async *async)
+{
+ comedi_cmd *cmd = &async->cmd;
+ struct mite_channel *mite_chan = &counter->mite->channels[counter->mite_channel];
+
+ /* write alloc the entire buffer */
+ comedi_buf_write_alloc(async, async->prealloc_bufsz);
+
+ mite_chan->current_link = 0;
+ mite_chan->dir = COMEDI_INPUT;
+ mite_prep_dma(counter->mite, counter->mite_channel, 32, 32);
+ if(counter->variant == ni_gpct_variant_m_series ||
+ counter->variant == ni_gpct_variant_660x)
+ {
+ counter->write_register(counter, Gi_DMA_Enable_Bit, NITIO_Gi_DMA_Config_Reg(counter->counter_index));
+ }
+ /*start the MITE*/
+ mite_dma_arm(counter->mite, counter->mite_channel);
+ return ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+}
+
+static int ni_tio_output_cmd(struct ni_gpct *counter, comedi_async *async)
+{
+ return 0;
+}
+
+int ni_tio_cmd(struct ni_gpct *counter, comedi_async *async)
+{
+ comedi_cmd *cmd = &async->cmd;
+
+ if(counter->mite == NULL || counter->mite_channel < 0)
+ {
+ rt_printk("ni_tio: commands only supported with DMA. Interrupt-driven commands not yet implemented.\n");
+ return -EIO;
+ }
+ ni_tio_reset_count_and_disarm(counter);
+ if(cmd->flags & CMDF_WRITE)
+ {
+ return ni_tio_output_cmd(counter, async);
+ }else
+ {
+ return ni_tio_input_cmd(counter, async);
+ }
+}
+
+int ni_tio_cmdtest(struct ni_gpct *counter)
+{
+ return 0;
+}
+
+int ni_tio_cancel(struct ni_gpct *counter)
+{
+ if(counter->mite == NULL || counter->mite_channel < 0) return 0;
+ mite_dma_disarm(counter->mite, counter->mite_channel);
+ return 0;
+}
+
+void ni_tio_interrupt_handler(struct ni_gpct *counter, struct mite_struct *mite, unsigned mite_channel)
+{}
+
EXPORT_SYMBOL_GPL(ni_tio_rinsn);
EXPORT_SYMBOL_GPL(ni_tio_winsn);
+EXPORT_SYMBOL_GPL(ni_tio_cmd);
+EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
+EXPORT_SYMBOL_GPL(ni_tio_cancel);
EXPORT_SYMBOL_GPL(ni_tio_insn_config);
EXPORT_SYMBOL_GPL(ni_tio_init_counter);
#include <linux/comedidev.h>
+// forward declarations
+struct mite_struct;
+
enum ni_gpct_register
{
NITIO_G0_Autoincrement_Reg,
NITIO_G23_Joint_Status1_Reg,
NITIO_G01_Joint_Status2_Reg,
NITIO_G23_Joint_Status2_Reg,
+ NITIO_G0_DMA_Config_Reg,
+ NITIO_G1_DMA_Config_Reg,
+ NITIO_G2_DMA_Config_Reg,
+ NITIO_G3_DMA_Config_Reg,
+ NITIO_G0_DMA_Status_Reg,
+ NITIO_G1_DMA_Status_Reg,
+ NITIO_G2_DMA_Status_Reg,
+ NITIO_G3_DMA_Status_Reg,
NITIO_Num_Registers,
};
unsigned (*read_register)(struct ni_gpct *this, enum ni_gpct_register reg);
enum ni_gpct_variant variant;
uint64_t clock_period_ps; /* clock period in picoseconds */
+ struct mite_struct *mite;
+ int mite_channel; /* -1 when no channel is allocated to the counter */
unsigned regs[MAX_NUM_NITIO_REGS];
};
extern int ni_tio_winsn(struct ni_gpct *counter,
comedi_insn *insn,
lsampl_t * data);
+extern int ni_tio_cmd(struct ni_gpct *counter, comedi_async *async);
+extern int ni_tio_cmdtest(struct ni_gpct *counter);
+extern int ni_tio_cancel(struct ni_gpct *counter);
#endif /* _COMEDI_NI_TIO_H */
if( subdevice >= dev->n_subdevices ) return -1;
async = s->async;
if(async == NULL) return 0;
- num_bytes = comedi_buf_read_n_available(s);
- comedi_buf_munge( dev, s, async->buf_write_count - async->munge_count );
+ num_bytes = comedi_buf_read_n_available(s->async);
return num_bytes;
}
num_bytes = buf_user_count - async->buf_read_count;
if( num_bytes < 0 ) return -1;
- comedi_buf_read_free( async, num_bytes );
+ comedi_buf_read_alloc(async, num_bytes);
+ comedi_buf_read_free(async, num_bytes);
return 0;
}
async = s->async;
if( async == NULL ) return -1;
- comedi_buf_read_free( async, num_bytes );
+ comedi_buf_read_alloc(async, num_bytes);
+ comedi_buf_read_free(async, num_bytes);
return 0;
}
async = s->async;
if( async == NULL ) return -1;
bytes_written = comedi_buf_write_alloc(async, num_bytes);
- comedi_buf_munge(dev, s, async->buf_write_alloc_count - async->munge_count);
comedi_buf_write_free(async, bytes_written);
if(bytes_written != num_bytes) return -1;
return 0;
return dev->minor;
}
-static void init_async_buf( comedi_async *async )
-{
- async->buf_read_count = 0;
- async->buf_write_count = 0;
- async->buf_write_alloc_count = 0;
- async->buf_read_ptr = 0;
- async->buf_write_ptr = 0;
- async->cur_chan = 0;
- async->scan_progress = 0;
- async->munge_chan = 0;
- async->munge_count = 0;
- async->munge_ptr = 0;
- async->events = 0;
-}
-
int comedi_command(comedi_t *d,comedi_cmd *cmd)
{
comedi_device *dev = (comedi_device *)d;
s->subdev_flags |= SDF_RUNNING;
- init_async_buf( async );
+ comedi_reset_async_buf( async );
return s->do_cmd(dev,s);
}
};
struct comedi_async_struct{
+ comedi_subdevice *subdevice;
+
void *prealloc_buf; /* pre-allocated buffer */
unsigned int prealloc_bufsz; /* buffer size, in bytes */
unsigned long *buf_page_list; /* physical address of each page */
unsigned int max_bufsize; /* maximum buffer size, bytes */
unsigned int mmap_count; /* current number of mmaps of prealloc_buf */
- volatile unsigned int buf_write_count; /* byte count for writer (write completed) */
- volatile unsigned int buf_write_alloc_count; /* byte count for writer (allocated for writing) */
- volatile unsigned int buf_read_count; /* byte count for reader (read completed)*/
+ unsigned int buf_write_count; /* byte count for writer (write completed) */
+ unsigned int buf_write_alloc_count; /* byte count for writer (allocated for writing) */
+ unsigned int buf_read_count; /* byte count for reader (read completed)*/
+ unsigned int buf_read_alloc_count; /* byte count for reader (allocated for reading)*/
unsigned int buf_write_ptr; /* buffer marker for writer */
unsigned int buf_read_ptr; /* buffer marker for reader */
static inline int alloc_subdevices(comedi_device *dev, unsigned int num_subdevices)
{
const int size = sizeof(comedi_subdevice) * num_subdevices;
+ unsigned i;
dev->n_subdevices = num_subdevices;
dev->subdevices = kmalloc(size,GFP_KERNEL);
if(!dev->subdevices)
return -ENOMEM;
memset(dev->subdevices,0,size);
- unsigned i;
for(i = 0; i < num_subdevices; ++i)
{
dev->subdevices[i].device = dev;
int comedi_buf_put(comedi_async *async, sampl_t x);
int comedi_buf_get(comedi_async *async, sampl_t *x);
-unsigned int comedi_buf_write_n_available(comedi_subdevice *s);
+unsigned int comedi_buf_write_n_available(comedi_async *async);
unsigned int comedi_buf_write_alloc(comedi_async *async, unsigned int nbytes);
unsigned int comedi_buf_write_alloc_strict(comedi_async *async, unsigned int nbytes);
-void comedi_buf_write_free(comedi_async *async, unsigned int nbytes);
-void comedi_buf_read_free(comedi_async *async, unsigned int nbytes);
-unsigned int comedi_buf_read_n_available(comedi_subdevice *s);
+unsigned comedi_buf_write_free(comedi_async *async, unsigned int nbytes);
+unsigned comedi_buf_read_alloc(comedi_async *async, unsigned nbytes);
+unsigned comedi_buf_read_free(comedi_async *async, unsigned int nbytes);
+unsigned int comedi_buf_read_n_available(comedi_async *async);
void comedi_buf_memcpy_to( comedi_async *async, unsigned int offset, const void *source,
unsigned int num_bytes );
void comedi_buf_memcpy_from( comedi_async *async, unsigned int offset, void *destination,
unsigned int num_bytes );
-unsigned int comedi_buf_munge( comedi_device *dev, comedi_subdevice *s,
- unsigned int num_bytes );
+
+void comedi_reset_async_buf(comedi_async *async);
static inline void* comedi_aux_data(int options[], int n)
{
address += options[COMEDI_DEVCONF_AUX_DATA1_LENGTH];
if(n >= 3)
address += options[COMEDI_DEVCONF_AUX_DATA2_LENGTH];
+ BUG_ON(n > 3);
return (void*) address;
}
//#ifdef CONFIG_COMEDI_RT