us->type = s->type;
us->n_chan = s->n_chan;
us->subd_flags = s->subdev_flags;
+ if(comedi_get_subdevice_runflags(s) & SRF_RUNNING)
+ us->subd_flags |= SDF_RUNNING;
#define TIMER_nanosec 5 /* backwards compatibility */
us->timer_type = TIMER_nanosec;
us->len_chanlist = s->len_chanlist;
bi.bytes_read = comedi_buf_read_alloc(async, bi.bytes_read);
comedi_buf_read_free(async, bi.bytes_read);
- if(!(s->subdev_flags&SDF_RUNNING) &&
- !(s->runflags & SRF_ERROR) &&
- async->buf_write_count==async->buf_read_count){
+ if(!(comedi_get_subdevice_runflags(s) & (SRF_ERROR | SRF_RUNNING)) &&
+ async->buf_write_count == async->buf_read_count){
do_become_nonbusy(dev,s);
}
}
async->cb_mask |= COMEDI_CB_EOS;
}
- s->runflags=SRF_USER;
-
- s->subdev_flags|=SDF_RUNNING;
+ comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
#ifdef CONFIG_COMEDI_RT
if(async->cmd.flags&TRIG_RT){
comedi_switch_to_rt(dev);
- s->runflags |= SRF_RT;
+ comedi_set_subdevice_runflags(s, SRF_RT, SRF_RT);
}
#endif
{
int ret=0;
- if((s->subdev_flags&SDF_RUNNING) && s->cancel)
+ if((comedi_get_subdevice_runflags(s) & SRF_RUNNING) && s->cancel)
ret=s->cancel(dev,s);
do_become_nonbusy(dev,s);
async = s->async;
if(!s->busy
|| comedi_buf_read_n_available(async)>0
- || !(s->subdev_flags&SDF_RUNNING)){
+ || !(comedi_get_subdevice_runflags(s) & SRF_RUNNING)){
mask |= POLLIN | POLLRDNORM;
}
}
s = dev->write_subdev;
async = s->async;
if(!s->busy
- || !(s->subdev_flags&SDF_RUNNING)
+ || !(comedi_get_subdevice_runflags(s) & SRF_RUNNING)
|| comedi_buf_write_n_available(async) > 0){
mask |= POLLOUT | POLLWRNORM;
}
retval=-ERESTARTSYS;
break;
}
- if(!(s->subdev_flags&SDF_RUNNING)){
- if(s->runflags & SRF_ERROR){
+ if(!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)){
+ if(comedi_get_subdevice_runflags(s) & SRF_ERROR){
retval = -EPIPE;
}else{
retval = 0;
if(m<n)n=m;
if(n==0){
- if(!(s->subdev_flags&SDF_RUNNING)){
+ if(!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)){
do_become_nonbusy(dev,s);
- if(s->runflags & SRF_ERROR){
+ if(comedi_get_subdevice_runflags(s) & SRF_ERROR){
retval = -EPIPE;
}else{
retval = 0;
buf+=n;
break; /* makes device work like a pipe */
}
- if(!(s->subdev_flags&SDF_RUNNING) &&
- !(s->runflags & SRF_ERROR) &&
+ if(!(comedi_get_subdevice_runflags(s) & (SRF_ERROR | SRF_RUNNING)) &&
async->buf_read_count - async->buf_write_count == 0)
{
do_become_nonbusy(dev,s);
{
comedi_async *async = s->async;
- s->subdev_flags &= ~SDF_RUNNING;
+ comedi_set_subdevice_runflags(s, SRF_RUNNING, 0);
#ifdef CONFIG_COMEDI_RT
- if(s->runflags&SRF_RT){
+ if(comedi_get_subdevice_runflags(s) & SRF_RT){
comedi_switch_to_non_rt(dev);
- s->runflags &= ~SRF_RT;
+ comedi_set_subdevice_runflags(s, SRF_RT, 0);
}
#endif
if(async){
rt_printk("comedi%d: %s: %s\n", dev->minor, dev->driver->driver_name, s);
}
-void comedi_event(comedi_device *dev,comedi_subdevice *s, unsigned int mask)
+void comedi_event(comedi_device *dev, comedi_subdevice *s, unsigned useless)
{
comedi_async *async = s->async;
- mask = s->async->events;
- s->async->events = 0;
-
//DPRINTK("comedi_event 0x%x\n",mask);
- if( (s->subdev_flags & SDF_RUNNING) == 0)
+ if((comedi_get_subdevice_runflags(s) & SRF_RUNNING) == 0)
return;
- if(mask&(COMEDI_CB_EOA|COMEDI_CB_ERROR|COMEDI_CB_OVERFLOW)){
- s->subdev_flags &= ~SDF_RUNNING;
+ if(s->async->events & (COMEDI_CB_EOA |COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)){
+ comedi_set_subdevice_runflags(s, SRF_RUNNING, 0);
}
/* remember if an error event has occured, so an error
* can be returned the next time the user does a read() */
- if(mask & (COMEDI_CB_ERROR|COMEDI_CB_OVERFLOW)){
- s->runflags |= SRF_ERROR;
+ if(s->async->events & (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)){
+ comedi_set_subdevice_runflags(s, SRF_ERROR, SRF_ERROR);
}
- if(async->cb_mask&mask){
- if(s->runflags&SRF_USER){
+ if(async->cb_mask & s->async->events){
+ if(comedi_get_subdevice_runflags(s) & SRF_USER){
if(dev->rt){
#ifdef CONFIG_COMEDI_RT
}
}
}else{
- if(async->cb_func)async->cb_func(mask,async->cb_arg);
+ if(async->cb_func)async->cb_func(s->async->events, async->cb_arg);
/* XXX bug here. If subdevice A is rt, and
* subdevice B tries to callback to a normal
* linux kernel function, it will be at the
* common, I'm not going to worry about it. */
}
}
+ s->async->events = 0;
+}
+
+void comedi_set_subdevice_runflags(comedi_subdevice *s, unsigned mask, unsigned bits)
+{
+ unsigned long flags;
+
+ comedi_spin_lock_irqsave(&s->runflags_lock, flags);
+ s->runflags &= ~mask;
+ s->runflags |= (bits & mask);
+ comedi_spin_unlock_irqrestore(&s->runflags_lock, flags);
+}
+
+unsigned comedi_get_subdevice_runflags(comedi_subdevice *s)
+{
+ unsigned long flags;
+ unsigned runflags;
+
+ comedi_spin_lock_irqsave(&s->runflags_lock, flags);
+ runflags = s->runflags;
+ comedi_spin_unlock_irqrestore(&s->runflags_lock, flags);
+ return runflags;
}
//EXPORT_SYMBOL(comedi_eobuf);
//EXPORT_SYMBOL(comedi_eos);
EXPORT_SYMBOL(comedi_event);
+EXPORT_SYMBOL(comedi_get_subdevice_runflags);
+EXPORT_SYMBOL(comedi_set_subdevice_runflags);
EXPORT_SYMBOL(range_bipolar10);
EXPORT_SYMBOL(range_bipolar5);
EXPORT_SYMBOL(range_bipolar2_5);
// spin lock to prevent races with mite_request_channel
comedi_spin_lock_irqsave(&mite->lock, flags);
- mite->channel_allocated[mite_chan->channel] = 0;
- mite_chan->ring = NULL;
+ if(mite->channel_allocated[mite_chan->channel])
+ {
+ mite_dma_disarm(mite_chan);
+ mite_dma_reset(mite_chan);
+ mite->channel_allocated[mite_chan->channel] = 0;
+ mite_chan->ring = NULL;
+ mmiowb();
+ }
comedi_spin_unlock_irqrestore(&mite->lock, flags);
}
DAQ 6601/6602 User Manual (NI 322137B-01)
Things to do:
-- Add DMA support (see mite.c and ni_pcidio.c for examples)
-- Add commands (copy from ni_pcidio.c ?)
-- Add interrupts
+- Add commands (see ni_tio.c and ni_mio_common.c)
*/
#include <linux/comedidev.h>
case NITIO_G3_DMA_Status_Reg:
ni_660x_register = G3DMAStatusRegister;
break;
+ case NITIO_G0_Interrupt_Acknowledge_Reg:
+ ni_660x_register = G0InterruptAcknowledge;
+ break;
+ case NITIO_G1_Interrupt_Acknowledge_Reg:
+ ni_660x_register = G1InterruptAcknowledge;
+ break;
+ case NITIO_G2_Interrupt_Acknowledge_Reg:
+ ni_660x_register = G2InterruptAcknowledge;
+ break;
+ case NITIO_G3_Interrupt_Acknowledge_Reg:
+ ni_660x_register = G3InterruptAcknowledge;
+ break;
+ case NITIO_G0_Status_Reg:
+ ni_660x_register = G0StatusRegister;
+ break;
+ case NITIO_G1_Status_Reg:
+ ni_660x_register = G0StatusRegister;
+ break;
+ case NITIO_G2_Status_Reg:
+ ni_660x_register = G0StatusRegister;
+ break;
+ case NITIO_G3_Status_Reg:
+ ni_660x_register = G0StatusRegister;
+ break;
default:
rt_printk("%s: unhandled register 0x%x in switch.\n", __FUNCTION__, reg);
BUG();
devpriv->counter_dev->counters[i].chip_index = i / CTRS_PER_CHIP;
devpriv->counter_dev->counters[i].counter_index = i % CTRS_PER_CHIP;
- devpriv->counter_dev->counters[i].clock_period_ps = 0;
- devpriv->counter_dev->counters[i].mite_chan = NULL;
}else
{
s->type = COMEDI_SUBD_UNUSED;
static int ni_gpct_cmd(comedi_device *dev,comedi_subdevice *s);
static int ni_gpct_cmdtest(comedi_device *dev, comedi_subdevice *s, comedi_cmd *cmd);
static int ni_gpct_cancel(comedi_device *dev,comedi_subdevice *s);
-static void handle_gpct_interrupt(comedi_device *dev, unsigned short counter_index,
- unsigned short is_terminal_count);
+static void handle_gpct_interrupt(comedi_device *dev, unsigned short counter_index);
static int init_cs5529(comedi_device *dev);
static int cs5529_do_conversion(comedi_device *dev, unsigned short *data);
comedi_insn *insn, lsampl_t *data);
static int ni_set_master_clock(comedi_device *dev, unsigned source, unsigned period_ns);
-static void ack_a_interrupt(comedi_device *dev, unsigned short a_status, unsigned short g_status);
-static void ack_b_interrupt(comedi_device *dev, unsigned short b_status, unsigned short g_status);
+static void ack_a_interrupt(comedi_device *dev, unsigned short a_status);
+static void ack_b_interrupt(comedi_device *dev, unsigned short b_status);
enum aimodes
{
static int ni_request_gpct_mite_channel(comedi_device *dev, unsigned gpct_index)
{
unsigned long flags;
+ struct mite_channel *mite_chan;
BUG_ON(gpct_index >= NUM_GPCT);
comedi_spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
BUG_ON(devpriv->counter_dev->counters[gpct_index].mite_chan);
- devpriv->counter_dev->counters[gpct_index].mite_chan = mite_request_channel(devpriv->mite, devpriv->gpct_mite_ring[gpct_index]);
- if(devpriv->counter_dev->counters[gpct_index].mite_chan == NULL)
+ mite_chan = mite_request_channel(devpriv->mite, devpriv->gpct_mite_ring[gpct_index]);
+ if(mite_chan == NULL)
{
comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
comedi_error(dev, "failed to reserve mite dma channel for counter.");
return -EBUSY;
}
- ni_set_gpct_dma_channel(dev, gpct_index, devpriv->counter_dev->counters[gpct_index].mite_chan->channel);
+ ni_tio_set_mite_channel(&devpriv->counter_dev->counters[gpct_index], mite_chan);
+ ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel);
comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
if(devpriv->ai_mite_chan)
{
ni_set_ai_dma_channel(dev, -1);
- mite_dma_disarm(devpriv->ai_mite_chan);
- mite_dma_reset(devpriv->ai_mite_chan);
mite_release_channel(devpriv->ai_mite_chan);
devpriv->ai_mite_chan = NULL;
}
if(devpriv->ao_mite_chan)
{
ni_set_ao_dma_channel(dev, -1);
- mite_dma_disarm(devpriv->ao_mite_chan);
- mite_dma_reset(devpriv->ao_mite_chan);
mite_release_channel(devpriv->ao_mite_chan);
devpriv->ao_mite_chan = NULL;
}
if(devpriv->counter_dev->counters[gpct_index].mite_chan)
{
ni_set_gpct_dma_channel(dev, gpct_index, -1);
- mite_dma_disarm(devpriv->counter_dev->counters[gpct_index].mite_chan);
- mite_dma_reset(devpriv->counter_dev->counters[gpct_index].mite_chan);
mite_release_channel(devpriv->counter_dev->counters[gpct_index].mite_chan);
- devpriv->counter_dev->counters[gpct_index].mite_chan = NULL;
+ ni_tio_set_mite_channel(&devpriv->counter_dev->counters[gpct_index], NULL);
}
comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
#endif // PCIDMA
unsigned short b_status;
unsigned int ai_mite_status = 0;
unsigned int ao_mite_status = 0;
- unsigned g_status;
unsigned long flags;
struct mite_struct *mite = devpriv->mite;
comedi_spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too);
if(devpriv->ai_mite_chan)
+ {
ai_mite_status = mite_get_status(devpriv->ai_mite_chan);
+ if(ai_mite_status & CHSR_LINKC)
+ writel(CHOR_CLRLC, devpriv->mite->mite_io_addr + MITE_CHOR(devpriv->ai_mite_chan->channel));
+ }
if(devpriv->ao_mite_chan)
+ {
ao_mite_status = mite_get_status(devpriv->ao_mite_chan);
+ if(ao_mite_status & CHSR_LINKC)
+ writel(CHOR_CLRLC, mite->mite_io_addr + MITE_CHOR(devpriv->ao_mite_chan->channel));
+ }
comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags_too);
}
- g_status = devpriv->stc_readw(dev, G_Status_Register);
- ack_a_interrupt(dev, a_status, g_status);
- ack_b_interrupt(dev, b_status, g_status);
+ ack_a_interrupt(dev, a_status);
+ ack_b_interrupt(dev, b_status);
if((a_status & Interrupt_A_St) || (ai_mite_status & CHSR_INT))
handle_a_interrupt(dev, a_status, ai_mite_status);
if((b_status & Interrupt_B_St) || (ao_mite_status & CHSR_INT))
handle_b_interrupt(dev, b_status, ao_mite_status);
- handle_gpct_interrupt(dev, 0, (a_status & G0_TC_St));
- handle_gpct_interrupt(dev, 1, (b_status & G1_TC_St));
+ handle_gpct_interrupt(dev, 0);
+ handle_gpct_interrupt(dev, 1);
comedi_spin_unlock_irqrestore(&dev->spinlock, flags);
return IRQ_HANDLED;
static void mite_handle_b_linkc(struct mite_struct *mite, comedi_device *dev)
{
comedi_subdevice *s = dev->subdevices + NI_AO_SUBDEV;
+ unsigned long flags;
- if(devpriv->ao_mite_chan == NULL) return;
- writel(CHOR_CLRLC, mite->mite_io_addr + MITE_CHOR(devpriv->ao_mite_chan->channel));
-
- if(mite_sync_output_dma(devpriv->ao_mite_chan, s->async) < 0)
+ comedi_spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
+ if(devpriv->ao_mite_chan)
{
- s->async->events |= COMEDI_CB_ERROR;
- return;
+ if(mite_sync_output_dma(devpriv->ao_mite_chan, s->async) < 0)
+ {
+ s->async->events |= COMEDI_CB_ERROR;
+ return;
+ }
}
+ comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
-// #define DEBUG_DMA_TIMING
static int ni_ao_wait_for_dma_load( comedi_device *dev )
{
static const int timeout = 10000;
int i;
-#ifdef DEBUG_DMA_TIMING
- struct timeval start;
- do_gettimeofday(&start);
-#endif
for(i = 0; i < timeout; i++)
{
unsigned short b_status;
to slow the dma transfer down */
comedi_udelay(10);
}
-#ifdef DEBUG_DMA_TIMING
- rt_printk("looped %i times waiting for ao fifo load.\n", i);
- struct timeval now;
- do_gettimeofday(&now);
- unsigned elapsed_usec = 1000000 * (now.tv_sec - start.tv_sec) + now.tv_usec - start.tv_usec;
- rt_printk("total elapsed usec=%i\n", elapsed_usec);
- do_gettimeofday(&start);
- unsigned b_status;
- for(i = 0; i < 100; ++i)
- {
-// devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
- b_status = devpriv->stc_readw(dev, AO_Status_1_Register);
- }
- do_gettimeofday(&now);
- elapsed_usec = 1000000 * (now.tv_sec - start.tv_sec) + now.tv_usec - start.tv_usec;
- rt_printk("usec to do 100 word xfers=%i\n", elapsed_usec);
-#endif
if( i == timeout )
{
comedi_error(dev, "timed out waiting for dma load");
comedi_event(dev, s, events);
}
-static void handle_gpct_interrupt(comedi_device *dev, unsigned short counter_index, unsigned short is_terminal_count)
+static void handle_gpct_interrupt(comedi_device *dev, unsigned short counter_index)
{
- unsigned gpct_mite_status;
- unsigned long flags;
- struct mite_channel *mite_chan;
comedi_subdevice *s = dev->subdevices + NI_GPCT_SUBDEV(counter_index);
- comedi_spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- mite_chan = devpriv->counter_dev->counters[counter_index].mite_chan;
- if(mite_chan == NULL)
- {
- comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
- return;
- }
- gpct_mite_status = mite_get_status(mite_chan);
- if(gpct_mite_status & CHSR_LINKC)
- {
- writel(CHOR_CLRLC, devpriv->mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
- }
- mite_sync_input_dma(mite_chan, s->async);
-
- comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
+ ni_tio_handle_interrupt(&devpriv->counter_dev->counters[counter_index], s);
if(s->async->events)
ni_event(dev, s, s->async->events);
}
- /* During buffered input counter operation for e-series, the gate interrupt is acked
- automatically by the dma controller, due to the Gi_Read/Write_Acknowledges_IRQ bits
- in the input select register. */
-int should_ack_gate(comedi_device *dev, unsigned counter_index)
-{
- unsigned long flags;
- int retval = 0;
-
- if(boardtype.reg_type & ni_reg_m_series_mask) return 1;
-
- comedi_spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- {
- struct mite_channel *mite_chan = devpriv->counter_dev->counters[counter_index].mite_chan;
-
- if(mite_chan == NULL ||
- mite_chan->dir != COMEDI_INPUT ||
- (mite_done(devpriv->counter_dev->counters[counter_index].mite_chan)))
- {
- retval = 1;
- }
- }
- comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-
- return retval;
-}
-
-static void ack_a_interrupt(comedi_device *dev, unsigned short a_status, unsigned short g_status)
+static void ack_a_interrupt(comedi_device *dev, unsigned short a_status)
{
unsigned short ack = 0;
/* not sure why we used to ack the START here also, instead of doing it independently. Frank Hess 2007-07-06 */
ack |= AI_STOP_Interrupt_Ack /*| AI_START_Interrupt_Ack*/;
}
- if(a_status & G0_TC_St)
- {
- ack |= G0_TC_Interrupt_Ack;
- }
- if(a_status & G0_Gate_Interrupt_St)
- {
- if(should_ack_gate(dev, 0))
- ack |= G0_Gate_Interrupt_Ack;
- }
- if(g_status & G0_Gate_Error_St)
- {
- ack |= G0_Gate_Error_Confirm;
- }
if(ack) devpriv->stc_writew(dev, ack, Interrupt_A_Ack_Register);
}
ni_mio_print_status_a(status);
#endif
#ifdef PCIDMA
- /* Currently, mite.c requires us to handle LINKC */
if(ai_mite_status & CHSR_LINKC){
- writel(CHOR_CLRLC, devpriv->mite->mite_io_addr + MITE_CHOR(devpriv->ai_mite_chan->channel));
ni_sync_ai_dma(dev);
}
rt_printk("ni_mio_common: a_status=0xffff. Card removed?\n");
/* we probably aren't even running a command now,
* so it's a good idea to be careful. */
- if(s->subdev_flags&SDF_RUNNING){
+ if(comedi_get_subdevice_runflags(s) & SRF_RUNNING){
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
ni_event(dev, s, s->async->events);
}
#endif
}
-static void ack_b_interrupt(comedi_device *dev, unsigned short b_status, unsigned short g_status)
+static void ack_b_interrupt(comedi_device *dev, unsigned short b_status)
{
unsigned short ack = 0;
if(b_status & AO_BC_TC_St)
{
ack |= AO_UPDATE_Interrupt_Ack;
}
- if(b_status & G1_Gate_Interrupt_St)
- {
- if(should_ack_gate(dev, 1))
- ack |= G1_Gate_Interrupt_Ack;
- }
- if(g_status & G1_Gate_Error_St)
- {
- ack |= G1_Gate_Error_Confirm;
- }
- if(b_status & G1_TC_St)
- {
- ack |= G1_TC_Interrupt_Ack;
- }
if(ack) devpriv->stc_writew(dev, ack, Interrupt_B_Ack_Register);
}
{
int i;
static const int timeout = 10000;
+ unsigned long flags;
+ int retval = 0;
- if(devpriv->ai_mite_chan == NULL) return 0;
- for( i = 0; i < timeout; i++ )
- {
- if((devpriv->stc_readw(dev, AI_Status_1_Register) & AI_FIFO_Empty_St) &&
- mite_bytes_in_transit(devpriv->ai_mite_chan) == 0)
- break;
- comedi_udelay(2);
- }
- if(i == timeout)
+ comedi_spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
+ if(devpriv->ai_mite_chan)
{
- rt_printk("ni_mio_common: wait for dma drain timed out\n");
- rt_printk("mite_bytes_in_transit=%i, AI_Status1_Register=0x%x\n",
- mite_bytes_in_transit(devpriv->ai_mite_chan), devpriv->stc_readw(dev, AI_Status_1_Register));
- return -1;
+ for(i = 0; i < timeout; i++)
+ {
+ if((devpriv->stc_readw(dev, AI_Status_1_Register) & AI_FIFO_Empty_St) &&
+ mite_bytes_in_transit(devpriv->ai_mite_chan) == 0)
+ break;
+ comedi_udelay(5);
+ }
+ if(i == timeout)
+ {
+ rt_printk("ni_mio_common: wait for dma drain timed out\n");
+ rt_printk("mite_bytes_in_transit=%i, AI_Status1_Register=0x%x\n",
+ mite_bytes_in_transit(devpriv->ai_mite_chan), devpriv->stc_readw(dev, AI_Status_1_Register));
+ retval = -1;
+ }
}
+ comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
ni_sync_ai_dma(dev);
- return 0;
+ return retval;
}
#endif
/*
{
comedi_subdevice *s = dev->subdevices + NI_AO_SUBDEV;
int retval;
+ unsigned long flags;
retval = ni_request_ao_mite_channel(dev);
if(retval) return retval;
- //rt_printk("comedi_debug: using mite channel %i for ao.\n", devpriv->ao_mite_chan->channel);
/* read alloc the entire buffer */
comedi_buf_read_alloc(s->async, s->async->prealloc_bufsz);
+ comedi_spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
devpriv->ao_mite_chan->dir = COMEDI_OUTPUT;
if(boardtype.reg_type & (ni_reg_611x | ni_reg_6713))
{
makes the mite do 32 bit pci transfers, doubling pci bandwidth. */
mite_prep_dma(devpriv->ao_mite_chan, 16, 32);
}
- /*start the MITE*/
mite_dma_arm(devpriv->ao_mite_chan);
+ comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
+
return 0;
}
case NITIO_G01_Joint_Status2_Reg:
stc_register = Joint_Status_2_Register;
break;
+ case NITIO_G0_Interrupt_Acknowledge_Reg:
+ stc_register = Interrupt_A_Ack_Register;
+ break;
+ case NITIO_G1_Interrupt_Acknowledge_Reg:
+ stc_register = Interrupt_B_Ack_Register;
+ break;
+ case NITIO_G0_Status_Reg:
+ stc_register = AI_Status_1_Register;
+ break;
+ case NITIO_G1_Status_Reg:
+ stc_register = AO_Status_1_Register;
+ break;
default:
rt_printk("%s: unhandled register 0x%x in switch.\n", __FUNCTION__, reg);
BUG();
devpriv->counter_dev->counters[j].chip_index = 0;
devpriv->counter_dev->counters[j].counter_index = j;
- devpriv->counter_dev->counters[j].clock_period_ps = 0;
- devpriv->counter_dev->counters[j].mite_chan = NULL;
ni_tio_init_counter(&devpriv->counter_dev->counters[j]);
}
{
int retval;
#ifdef PCIDMA
- unsigned long flags;
struct ni_gpct *counter = s->private;
const comedi_cmd *cmd = &s->async->cmd;
Gi_Gate_Interrupt_Enable_Bit(counter->counter_index), 1);
}
ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
-
- comedi_spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
retval = ni_tio_cmd(counter, s->async);
- comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
#else
retval = -ENOTSUPP;
#endif
static int ni_gpct_cancel(comedi_device *dev, comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
- unsigned long flags;
int retval;
- comedi_spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
retval = ni_tio_cancel(counter);
- comedi_spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-
ni_e_series_enable_second_irq(dev, counter->counter_index, 0);
ni_set_bits(dev, Gi_Interrupt_Enable_Register(counter->counter_index),
Gi_Gate_Interrupt_Enable_Bit(counter->counter_index), 0);
return 0;
}
+static inline enum ni_gpct_register NITIO_Gi_Interrupt_Acknowledge_Reg(int counter_index)
+{
+ switch(counter_index)
+ {
+ case 0:
+ return NITIO_G0_Interrupt_Acknowledge_Reg;
+ break;
+ case 1:
+ return NITIO_G1_Interrupt_Acknowledge_Reg;
+ break;
+ case 2:
+ return NITIO_G2_Interrupt_Acknowledge_Reg;
+ break;
+ case 3:
+ return NITIO_G3_Interrupt_Acknowledge_Reg;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Status_Reg(int counter_index)
+{
+ switch(counter_index)
+ {
+ case 0:
+ return NITIO_G0_Status_Reg;
+ break;
+ case 1:
+ return NITIO_G1_Status_Reg;
+ break;
+ case 2:
+ return NITIO_G2_Status_Reg;
+ break;
+ case 3:
+ return NITIO_G3_Status_Reg;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ return 0;
+}
+
enum Gi_Auto_Increment_Reg_Bits
{
Gi_Auto_Increment_Mask = 0xff
G0_No_Load_Between_Gates_Bit = 0x400,
G1_No_Load_Between_Gates_Bit = 0x800,
G0_TC_Error_Bit = 0x1000,
- G1_TC_Error_Bit = 0x2000
+ G1_TC_Error_Bit = 0x2000,
+ G0_Gate_Error_Bit = 0x4000,
+ G1_Gate_Error_Bit = 0x8000
};
static inline unsigned Gi_Counting_Bit(unsigned counter_index)
{
if(counter_index % 2) return G1_Next_Load_Source_Bit;
return G0_Next_Load_Source_Bit;
}
+static inline unsigned Gi_TC_Error_Bit(unsigned counter_index)
+{
+ if(counter_index % 2) return G1_TC_Error_Bit;
+ return G0_TC_Error_Bit;
+}
+static inline unsigned Gi_Gate_Error_Bit(unsigned counter_index)
+{
+ if(counter_index % 2) return G1_Gate_Error_Bit;
+ return G0_Gate_Error_Bit;
+}
/* joint reset register bits */
static inline unsigned Gi_Reset_Bit(unsigned counter_index)
Gi_DMA_Int_Bit = 0x4
};
+enum Gi_DMA_Status_Reg_Bits
+{
+ Gi_DMA_Readbank_Bit = 0x2000,
+ Gi_DRQ_Error_Bit = 0x4000,
+ Gi_DRQ_Status_Bit = 0x8000
+};
+
+enum G02_Interrupt_Acknowledge_Bits
+{
+ G0_Gate_Error_Confirm_Bit = 0x20,
+ G0_TC_Error_Confirm_Bit = 0x40
+};
+enum G13_Interrupt_Acknowledge_Bits
+{
+ G1_Gate_Error_Confirm_Bit = 0x2,
+ G1_TC_Error_Confirm_Bit = 0x4
+};
+static inline unsigned Gi_Gate_Error_Confirm_Bit(unsigned counter_index)
+{
+ if(counter_index % 2)
+ return G1_Gate_Error_Confirm_Bit;
+ return G0_Gate_Error_Confirm_Bit;
+}
+static inline unsigned Gi_TC_Error_Confirm_Bit(unsigned counter_index)
+{
+ if(counter_index % 2)
+ return G1_TC_Error_Confirm_Bit;
+ return G0_TC_Error_Confirm_Bit;
+}
+// bits that are the same in G0/G2 and G1/G3 interrupt acknowledge registers
+enum Gxx_Interrupt_Acknowledge_Bits
+{
+ Gi_TC_Interrupt_Ack_Bit = 0x4000,
+ Gi_Gate_Interrupt_Ack_Bit = 0x8000
+};
+
+enum Gi_Status_Bits
+{
+ Gi_Gate_Interrupt_Bit = 0x4,
+ Gi_TC_Bit = 0x8,
+ Gi_Interrupt_Bit = 0x8000
+};
+
static const lsampl_t counter_status_mask = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
static int __init ni_tio_init_module(void)
for(i = 0; i < num_counters; ++i)
{
counter_dev->counters[i].counter_dev = counter_dev;
+ spin_lock_init(&counter_dev->counters[i].lock);
}
counter_dev->num_counters = num_counters;
return counter_dev;
{
comedi_cmd *cmd = &async->cmd;
int retval;
+ unsigned long flags;
+ comedi_spin_lock_irqsave(&counter->lock, flags);
if(counter->mite_chan == NULL)
{
rt_printk("ni_tio: commands only supported with DMA. Interrupt-driven commands not yet implemented.\n");
retval = ni_tio_input_cmd(counter, async);
}
}
+ comedi_spin_unlock_irqrestore(&counter->lock, flags);
return retval;
}
int ni_tio_cancel(struct ni_gpct *counter)
{
+ unsigned long flags;
+
ni_tio_arm(counter, 0, 0);
+ comedi_spin_lock_irqsave(&counter->lock, flags);
if(counter->mite_chan)
{
mite_dma_disarm(counter->mite_chan);
}
+ comedi_spin_unlock_irqrestore(&counter->lock, flags);
ni_tio_configure_dma(counter, 0, 0);
return 0;
}
+ /* During buffered input counter operation for e-series, the gate interrupt is acked
+ automatically by the dma controller, due to the Gi_Read/Write_Acknowledges_IRQ bits
+ in the input select register. */
+static int should_ack_gate(struct ni_gpct *counter)
+{
+ unsigned long flags;
+ int retval = 0;
+
+ switch(counter->counter_dev->variant)
+ {
+ case ni_gpct_variant_m_series:
+ case ni_gpct_variant_660x: // not sure if 660x really supports gate interrupts (the bits are not listed in register-level manual)
+ return 1;
+ break;
+ case ni_gpct_variant_e_series:
+ comedi_spin_lock_irqsave(&counter->lock, flags);
+ {
+ if(counter->mite_chan == NULL ||
+ counter->mite_chan->dir != COMEDI_INPUT ||
+ (mite_done(counter->mite_chan)))
+ {
+ retval = 1;
+ }
+ }
+ comedi_spin_unlock_irqrestore(&counter->lock, flags);
+ break;
+ }
+ return retval;
+}
+
+static unsigned acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error, int *tc_error)
+{
+ const unsigned short gxx_status = counter->counter_dev->read_register(counter, NITIO_Gxx_Status_Reg(counter->counter_index));
+ const unsigned short gi_status = counter->counter_dev->read_register(counter, NITIO_Gi_Status_Reg(counter->counter_index));
+ unsigned ack = 0;
+
+ if(gate_error) *gate_error = 0;
+ if(tc_error) *tc_error = 0;
+
+ if(gxx_status & Gi_Gate_Error_Bit(counter->counter_index))
+ {
+ ack |= Gi_Gate_Error_Confirm_Bit(counter->counter_index);
+ if(gate_error) *gate_error = 1;
+ }
+ if(gxx_status & Gi_TC_Error_Bit(counter->counter_index))
+ {
+ ack |= Gi_TC_Error_Confirm_Bit(counter->counter_index);
+ if(tc_error) *tc_error = 1;
+ }
+ if(gi_status & Gi_TC_Bit)
+ {
+ ack |= Gi_TC_Interrupt_Ack_Bit;
+ }
+ if(gi_status & Gi_Gate_Interrupt_Bit)
+ {
+ if(should_ack_gate(counter))
+ ack |= Gi_Gate_Interrupt_Ack_Bit;
+ }
+ if(ack) counter->counter_dev->write_register(counter, ack, NITIO_Gi_Interrupt_Acknowledge_Reg(counter->counter_index));
+ return gxx_status;
+}
+
+void ni_tio_handle_interrupt(struct ni_gpct *counter, comedi_subdevice *s)
+{
+ unsigned gpct_mite_status;
+ unsigned long flags;
+ int gate_error;
+ int tc_error;
+
+ acknowledge_and_confirm(counter, &gate_error, &tc_error);
+ if(gate_error)
+ s->async->events |= COMEDI_CB_OVERFLOW;
+ switch(counter->counter_dev->variant)
+ {
+ case ni_gpct_variant_m_series:
+ case ni_gpct_variant_660x:
+ if(counter->counter_dev->read_register(counter, NITIO_Gi_DMA_Status_Reg(counter->counter_index)) &
+ Gi_DRQ_Error_Bit)
+ s->async->events |= COMEDI_CB_OVERFLOW;
+ break;
+ case ni_gpct_variant_e_series:
+ break;
+ }
+ comedi_spin_lock_irqsave(&counter->lock, flags);
+ if(counter->mite_chan == NULL)
+ {
+ comedi_spin_unlock_irqrestore(&counter->lock, flags);
+ return;
+ }
+ gpct_mite_status = mite_get_status(counter->mite_chan);
+ if(gpct_mite_status & CHSR_LINKC)
+ {
+ writel(CHOR_CLRLC, counter->mite_chan->mite->mite_io_addr + MITE_CHOR(counter->mite_chan->channel));
+ }
+ mite_sync_input_dma(counter->mite_chan, s->async);
+
+ comedi_spin_unlock_irqrestore(&counter->lock, flags);
+}
+
+void ni_tio_set_mite_channel(struct ni_gpct *counter, struct mite_channel *mite_chan)
+{
+ unsigned long flags;
+
+ comedi_spin_lock_irqsave(&counter->lock, flags);
+ counter->mite_chan = mite_chan;
+ comedi_spin_unlock_irqrestore(&counter->lock, flags);
+}
+
EXPORT_SYMBOL_GPL(ni_tio_rinsn);
EXPORT_SYMBOL_GPL(ni_tio_winsn);
EXPORT_SYMBOL_GPL(ni_tio_cmd);
EXPORT_SYMBOL_GPL(ni_tio_init_counter);
EXPORT_SYMBOL_GPL(ni_gpct_device_construct);
EXPORT_SYMBOL_GPL(ni_gpct_device_destroy);
+EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt);
+EXPORT_SYMBOL_GPL(ni_tio_set_mite_channel);
NITIO_G3_DMA_Status_Reg,
NITIO_G0_ABZ_Reg,
NITIO_G1_ABZ_Reg,
+ NITIO_G0_Interrupt_Acknowledge_Reg,
+ NITIO_G1_Interrupt_Acknowledge_Reg,
+ NITIO_G2_Interrupt_Acknowledge_Reg,
+ NITIO_G3_Interrupt_Acknowledge_Reg,
+ NITIO_G0_Status_Reg,
+ NITIO_G1_Status_Reg,
+ NITIO_G2_Status_Reg,
+ NITIO_G3_Status_Reg,
NITIO_Num_Registers,
};
unsigned chip_index;
uint64_t clock_period_ps; /* clock period in picoseconds */
struct mite_channel *mite_chan;
+ spinlock_t lock;
};
struct ni_gpct_device
extern int ni_tio_cmd(struct ni_gpct *counter, comedi_async *async);
extern int ni_tio_cmdtest(struct ni_gpct *counter);
extern int ni_tio_cancel(struct ni_gpct *counter);
+extern void ni_tio_handle_interrupt(struct ni_gpct *counter, comedi_subdevice *s);
+extern void ni_tio_set_mite_channel(struct ni_gpct *counter, struct mite_channel *mite_chan);
#endif /* _COMEDI_NI_TIO_H */
void *lock;
void *busy;
- volatile unsigned int runflags;
+ unsigned runflags;
+ spinlock_t runflags_lock;
int io_bits;
#endif
/* subdevice runflags */
-#define SRF_USER 0x00000001
-#define SRF_RT 0x00000002
-/* indicates an COMEDI_CB_ERROR event has occurred since the last command was started */
-#define SRF_ERROR 0x00000004
+enum subdevice_runflags
+{
+ SRF_USER = 0x00000001,
+ SRF_RT = 0x00000002,
+ /* indicates an COMEDI_CB_ERROR event has occurred since the last command was started */
+ SRF_ERROR = 0x00000004,
+ SRF_RUNNING = 0x08000000
+};
/*
various internal comedi functions
int do_rangeinfo_ioctl(comedi_device *dev,comedi_rangeinfo *arg);
int check_chanlist(comedi_subdevice *s,int n,unsigned int *chanlist);
+void comedi_set_subdevice_runflags(comedi_subdevice *s, unsigned mask, unsigned bits);
+unsigned comedi_get_subdevice_runflags(comedi_subdevice *s);
/* range stuff */
{
dev->subdevices[i].device = dev;
dev->subdevices[i].async_dma_dir = DMA_NONE;
+ spin_lock_init(&dev->subdevices[i].runflags_lock);
}
return 0;
}