From 257b17ca030387cb17314cd1851507bdd1b4ddd5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:23 -0700 Subject: [PATCH 01/13] dmaengine: fail device registration if channel registration fails Atsushi points out: "If alloc_percpu or kzalloc failed, chan_id does not match with its position in device->channels list. And above "continue" looks buggy anyway. Keeping incomplete channels in device->channels list looks very dangerous..." Also, fix up leakage of idr_ref in the idr_pre_get() and channel init fail cases. Reported-by: Atsushi Nemoto Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 51 +++++++++++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 280a9d263eb..49243d14b89 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -602,6 +602,24 @@ void dmaengine_put(void) } EXPORT_SYMBOL(dmaengine_put); +static int get_dma_id(struct dma_device *device) +{ + int rc; + + idr_retry: + if (!idr_pre_get(&dma_idr, GFP_KERNEL)) + return -ENOMEM; + mutex_lock(&dma_list_mutex); + rc = idr_get_new(&dma_idr, NULL, &device->dev_id); + mutex_unlock(&dma_list_mutex); + if (rc == -EAGAIN) + goto idr_retry; + else if (rc != 0) + return rc; + + return 0; +} + /** * dma_async_device_register - registers DMA devices found * @device: &dma_device @@ -640,27 +658,25 @@ int dma_async_device_register(struct dma_device *device) idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); if (!idr_ref) return -ENOMEM; - atomic_set(idr_ref, 0); - idr_retry: - if (!idr_pre_get(&dma_idr, GFP_KERNEL)) - return -ENOMEM; - mutex_lock(&dma_list_mutex); - rc = idr_get_new(&dma_idr, NULL, &device->dev_id); - mutex_unlock(&dma_list_mutex); - if (rc == -EAGAIN) - goto idr_retry; - else if (rc != 0) + rc = get_dma_id(device); + if (rc != 0) { + kfree(idr_ref); return rc; + } + + atomic_set(idr_ref, 0); /* represent channels in sysfs. Probably want devs too */ list_for_each_entry(chan, &device->channels, device_node) { + rc = -ENOMEM; chan->local = alloc_percpu(typeof(*chan->local)); if (chan->local == NULL) - continue; + goto err_out; chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); if (chan->dev == NULL) { free_percpu(chan->local); - continue; + chan->local = NULL; + goto err_out; } chan->chan_id = chancnt++; @@ -677,6 +693,8 @@ int dma_async_device_register(struct dma_device *device) if (rc) { free_percpu(chan->local); chan->local = NULL; + kfree(chan->dev); + atomic_dec(idr_ref); goto err_out; } chan->client_count = 0; @@ -707,6 +725,15 @@ int dma_async_device_register(struct dma_device *device) return 0; err_out: + /* if we never registered a channel just release the idr */ + if (atomic_read(idr_ref) == 0) { + mutex_lock(&dma_list_mutex); + idr_remove(&dma_idr, device->dev_id); + mutex_unlock(&dma_list_mutex); + kfree(idr_ref); + return rc; + } + list_for_each_entry(chan, &device->channels, device_node) { if (chan->local == NULL) continue; From 0149f7d5dc66dcffbb044ba005a5378a5864d2a3 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 25 Mar 2009 09:13:23 -0700 Subject: [PATCH 02/13] dma: ipu_idmac driver cosmetic clean-up Remove superfluous semicolons, update comments. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Dan Williams --- drivers/dma/ipu/ipu_idmac.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index ae50a9d1a4e..a6f7294c859 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -107,7 +107,7 @@ static uint32_t bytes_per_pixel(enum pixel_fmt fmt) } } -/* Enable / disable direct write to memory by the Camera Sensor Interface */ +/* Enable direct write to memory by the Camera Sensor Interface */ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) { uint32_t ic_conf, mask; @@ -126,6 +126,7 @@ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) idmac_write_icreg(ipu, ic_conf, IC_CONF); } +/* Called under spin_lock_irqsave(&ipu_data.lock) */ static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel) { uint32_t ic_conf, mask; @@ -422,7 +423,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, break; default: dev_err(ipu_data.dev, - "mxc ipu: unimplemented pixel format %d\n", pixel_fmt); + "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt); break; } @@ -433,20 +434,20 @@ static void ipu_ch_param_set_burst_size(union chan_param_mem *params, uint16_t burst_pixels) { params->pp.npb = burst_pixels - 1; -}; +} static void ipu_ch_param_set_buffer(union chan_param_mem *params, dma_addr_t buf0, dma_addr_t buf1) { params->pp.eba0 = buf0; params->pp.eba1 = buf1; -}; +} static void ipu_ch_param_set_rotation(union chan_param_mem *params, enum ipu_rotate_mode rotate) { params->pp.bam = rotate; -}; +} static void ipu_write_param_mem(uint32_t addr, uint32_t *data, uint32_t num_words) @@ -571,7 +572,7 @@ static uint32_t dma_param_addr(uint32_t dma_ch) { /* Channel Parameter Memory */ return 0x10000 | (dma_ch << 4); -}; +} static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel, bool prio) @@ -611,7 +612,8 @@ static uint32_t ipu_channel_conf_mask(enum ipu_channel channel) /** * ipu_enable_channel() - enable an IPU channel. - * @channel: channel ID. + * @idmac: IPU DMAC context. + * @ichan: IDMAC channel. * @return: 0 on success or negative error code on failure. */ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) @@ -649,7 +651,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) /** * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel. - * @channel: channel ID. + * @ichan: IDMAC channel. * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code. * @width: width of buffer in pixels. * @height: height of buffer in pixels. @@ -687,7 +689,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan, } /* IC channel's stride must be a multiple of 8 pixels */ - if ((channel <= 13) && (stride % 8)) { + if ((channel <= IDMAC_IC_13) && (stride % 8)) { dev_err(ipu->dev, "Stride must be 8 pixel multiple\n"); return -EINVAL; } @@ -752,7 +754,7 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) /** * ipu_update_channel_buffer() - update physical address of a channel buffer. - * @channel: channel ID. + * @ichan: IDMAC channel. * @buffer_n: buffer number to update. * 0 or 1 are the only valid values. * @phyaddr: buffer physical address. @@ -1341,13 +1343,7 @@ static void ipu_gc_tasklet(unsigned long arg) } } -/* - * At the time .device_alloc_chan_resources() method is called, we cannot know, - * whether the client will accept the channel. Thus we must only check, if we - * can satisfy client's request but the only real criterion to verify, whether - * the client has accepted our offer is the client_count. That's why we have to - * perform the rest of our allocation tasks on the first call to this function. - */ +/* Allocate and initialise a transfer descriptor. */ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long tx_flags) @@ -1432,8 +1428,7 @@ static void __idmac_terminate_all(struct dma_chan *chan) struct idmac_tx_desc *desc = ichan->desc + i; if (list_empty(&desc->list)) /* Descriptor was prepared, but not submitted */ - list_add(&desc->list, - &ichan->free_list); + list_add(&desc->list, &ichan->free_list); async_tx_clear_ack(&desc->txd); } From 234f2df56f5b05756c444edc9879145deddf69f4 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 25 Mar 2009 09:13:24 -0700 Subject: [PATCH 03/13] dma: improve section assignment in i.MX31 IPU DMA driver The i.MX31 IPU DMA driver is a platform driver, but doesn't need hotplug, so we can use __init and __exit function attributes. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Dan Williams --- drivers/dma/ipu/ipu_idmac.c | 8 ++++---- drivers/dma/ipu/ipu_irq.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index a6f7294c859..b759ae9315b 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1576,7 +1576,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) return dma_async_device_register(&idmac->dma); } -static void ipu_idmac_exit(struct ipu *ipu) +static void __exit ipu_idmac_exit(struct ipu *ipu) { int i; struct idmac *idmac = &ipu->idmac; @@ -1595,7 +1595,7 @@ static void ipu_idmac_exit(struct ipu *ipu) * IPU common probe / remove */ -static int ipu_probe(struct platform_device *pdev) +static int __init ipu_probe(struct platform_device *pdev) { struct ipu_platform_data *pdata = pdev->dev.platform_data; struct resource *mem_ipu, *mem_ic; @@ -1695,7 +1695,7 @@ err_noirq: return ret; } -static int ipu_remove(struct platform_device *pdev) +static int __exit ipu_remove(struct platform_device *pdev) { struct ipu *ipu = platform_get_drvdata(pdev); @@ -1720,7 +1720,7 @@ static struct platform_driver ipu_platform_driver = { .name = "ipu-core", .owner = THIS_MODULE, }, - .remove = ipu_remove, + .remove = __exit_p(ipu_remove), }; static int __init ipu_init(void) diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index 83f532cc767..dd8ebc75b66 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c @@ -352,7 +352,7 @@ static struct irq_chip ipu_irq_chip = { }; /* Install the IRQ handler */ -int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) +int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) { struct ipu_platform_data *pdata = dev->dev.platform_data; unsigned int irq, irq_base, i; From 8d47bae004f062630f69f7f83d098424252e232d Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 25 Mar 2009 09:13:24 -0700 Subject: [PATCH 04/13] dma: i.MX31 IPU DMA robustness improvements Add DMA error handling to the ISR, move common code fragments to functions, fix scatter-gather element queuing in the ISR, survive channel freeing and re-allocation in a quick succession. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Dan Williams --- drivers/dma/ipu/ipu_idmac.c | 265 ++++++++++++++++++++++++------------ 1 file changed, 181 insertions(+), 84 deletions(-) diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index b759ae9315b..0a525c450f2 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -28,6 +28,9 @@ #define FS_VF_IN_VALID 0x00000002 #define FS_ENC_IN_VALID 0x00000001 +static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, + bool wait_for_stop); + /* * There can be only one, we could allocate it dynamically, but then we'd have * to add an extra parameter to some functions, and use something as ugly as @@ -762,9 +765,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) * function will fail if the buffer is set to ready. */ /* Called under spin_lock(_irqsave)(&ichan->lock) */ -static int ipu_update_channel_buffer(enum ipu_channel channel, +static int ipu_update_channel_buffer(struct idmac_channel *ichan, int buffer_n, dma_addr_t phyaddr) { + enum ipu_channel channel = ichan->dma_chan.chan_id; uint32_t reg; unsigned long flags; @@ -773,8 +777,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel, if (buffer_n == 0) { reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); if (reg & (1UL << channel)) { - spin_unlock_irqrestore(&ipu_data.lock, flags); - return -EACCES; + ipu_ic_disable_task(&ipu_data, channel); + ichan->status = IPU_CHANNEL_READY; } /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ @@ -784,8 +788,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel, } else { reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); if (reg & (1UL << channel)) { - spin_unlock_irqrestore(&ipu_data.lock, flags); - return -EACCES; + ipu_ic_disable_task(&ipu_data, channel); + ichan->status = IPU_CHANNEL_READY; } /* Check if double-buffering is already enabled */ @@ -806,6 +810,39 @@ static int ipu_update_channel_buffer(enum ipu_channel channel, return 0; } +/* Called under spin_lock_irqsave(&ichan->lock) */ +static int ipu_submit_buffer(struct idmac_channel *ichan, + struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx) +{ + unsigned int chan_id = ichan->dma_chan.chan_id; + struct device *dev = &ichan->dma_chan.dev->device; + int ret; + + if (async_tx_test_ack(&desc->txd)) + return -EINTR; + + /* + * On first invocation this shouldn't be necessary, the call to + * ipu_init_channel_buffer() above will set addresses for us, so we + * could make it conditional on status >= IPU_CHANNEL_ENABLED, but + * doing it again shouldn't hurt either. + */ + ret = ipu_update_channel_buffer(ichan, buf_idx, + sg_dma_address(sg)); + + if (ret < 0) { + dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n", + sg, chan_id, buf_idx); + return ret; + } + + ipu_select_buffer(chan_id, buf_idx); + dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", + sg, chan_id, buf_idx); + + return 0; +} + /* Called under spin_lock_irqsave(&ichan->lock) */ static int ipu_submit_channel_buffers(struct idmac_channel *ichan, struct idmac_tx_desc *desc) @@ -817,20 +854,10 @@ static int ipu_submit_channel_buffers(struct idmac_channel *ichan, if (!ichan->sg[i]) { ichan->sg[i] = sg; - /* - * On first invocation this shouldn't be necessary, the - * call to ipu_init_channel_buffer() above will set - * addresses for us, so we could make it conditional - * on status >= IPU_CHANNEL_ENABLED, but doing it again - * shouldn't hurt either. - */ - ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i, - sg_dma_address(sg)); + ret = ipu_submit_buffer(ichan, desc, sg, i); if (ret < 0) return ret; - ipu_select_buffer(ichan->dma_chan.chan_id, i); - sg = sg_next(sg); } } @@ -844,19 +871,22 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) struct idmac_channel *ichan = to_idmac_chan(tx->chan); struct idmac *idmac = to_idmac(tx->chan->device); struct ipu *ipu = to_ipu(idmac); + struct device *dev = &ichan->dma_chan.dev->device; dma_cookie_t cookie; unsigned long flags; + int ret; /* Sanity check */ if (!list_empty(&desc->list)) { /* The descriptor doesn't belong to client */ - dev_err(&ichan->dma_chan.dev->device, - "Descriptor %p not prepared!\n", tx); + dev_err(dev, "Descriptor %p not prepared!\n", tx); return -EBUSY; } mutex_lock(&ichan->chan_mutex); + async_tx_clear_ack(tx); + if (ichan->status < IPU_CHANNEL_READY) { struct idmac_video_param *video = &ichan->params.video; /* @@ -880,16 +910,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) goto out; } - /* ipu->lock can be taken under ichan->lock, but not v.v. */ - spin_lock_irqsave(&ichan->lock, flags); - - /* submit_buffers() atomically verifies and fills empty sg slots */ - cookie = ipu_submit_channel_buffers(ichan, desc); - - spin_unlock_irqrestore(&ichan->lock, flags); - - if (cookie < 0) - goto out; + dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); cookie = ichan->dma_chan.cookie; @@ -899,24 +920,40 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) /* from dmaengine.h: "last cookie value returned to client" */ ichan->dma_chan.cookie = cookie; tx->cookie = cookie; + + /* ipu->lock can be taken under ichan->lock, but not v.v. */ spin_lock_irqsave(&ichan->lock, flags); + list_add_tail(&desc->list, &ichan->queue); + /* submit_buffers() atomically verifies and fills empty sg slots */ + ret = ipu_submit_channel_buffers(ichan, desc); + spin_unlock_irqrestore(&ichan->lock, flags); + if (ret < 0) { + cookie = ret; + goto dequeue; + } + if (ichan->status < IPU_CHANNEL_ENABLED) { - int ret = ipu_enable_channel(idmac, ichan); + ret = ipu_enable_channel(idmac, ichan); if (ret < 0) { cookie = ret; - spin_lock_irqsave(&ichan->lock, flags); - list_del_init(&desc->list); - spin_unlock_irqrestore(&ichan->lock, flags); - tx->cookie = cookie; - ichan->dma_chan.cookie = cookie; + goto dequeue; } } dump_idmac_reg(ipu); +dequeue: + if (cookie < 0) { + spin_lock_irqsave(&ichan->lock, flags); + list_del_init(&desc->list); + spin_unlock_irqrestore(&ichan->lock, flags); + tx->cookie = cookie; + ichan->dma_chan.cookie = cookie; + } + out: mutex_unlock(&ichan->chan_mutex); @@ -1163,6 +1200,24 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, return 0; } +static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan, + struct idmac_tx_desc **desc, struct scatterlist *sg) +{ + struct scatterlist *sgnew = sg ? sg_next(sg) : NULL; + + if (sgnew) + /* next sg-element in this list */ + return sgnew; + + if ((*desc)->list.next == &ichan->queue) + /* No more descriptors on the queue */ + return NULL; + + /* Fetch next descriptor */ + *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list); + return (*desc)->sg; +} + /* * We have several possibilities here: * current BUF next BUF @@ -1178,23 +1233,46 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, static irqreturn_t idmac_interrupt(int irq, void *dev_id) { struct idmac_channel *ichan = dev_id; + struct device *dev = &ichan->dma_chan.dev->device; unsigned int chan_id = ichan->dma_chan.chan_id; struct scatterlist **sg, *sgnext, *sgnew = NULL; /* Next transfer descriptor */ - struct idmac_tx_desc *desc = NULL, *descnew; + struct idmac_tx_desc *desc, *descnew; dma_async_tx_callback callback; void *callback_param; bool done = false; - u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY), - ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY), - curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); + u32 ready0, ready1, curbuf, err; + unsigned long flags; /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ - pr_debug("IDMAC irq %d\n", irq); + dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer); + + spin_lock_irqsave(&ipu_data.lock, flags); + + ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); + ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); + curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); + err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4); + + if (err & (1 << chan_id)) { + idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4); + spin_unlock_irqrestore(&ipu_data.lock, flags); + /* + * Doing this + * ichan->sg[0] = ichan->sg[1] = NULL; + * you can force channel re-enable on the next tx_submit(), but + * this is dirty - think about descriptors with multiple + * sg elements. + */ + dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n", + chan_id, ready0, ready1, curbuf); + return IRQ_HANDLED; + } + spin_unlock_irqrestore(&ipu_data.lock, flags); + /* Other interrupts do not interfere with this channel */ spin_lock(&ichan->lock); - if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { int i = 100; @@ -1209,19 +1287,23 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) if (!i) { spin_unlock(&ichan->lock); - dev_dbg(ichan->dma_chan.device->dev, + dev_dbg(dev, "IRQ on active buffer on channel %x, active " "%d, ready %x, %x, current %x!\n", chan_id, ichan->active_buffer, ready0, ready1, curbuf); return IRQ_NONE; - } + } else + dev_dbg(dev, + "Buffer deactivated on channel %x, active " + "%d, ready %x, %x, current %x, rest %d!\n", chan_id, + ichan->active_buffer, ready0, ready1, curbuf, i); } if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || (!ichan->active_buffer && (ready0 >> chan_id) & 1) )) { spin_unlock(&ichan->lock); - dev_dbg(ichan->dma_chan.device->dev, + dev_dbg(dev, "IRQ with active buffer still ready on channel %x, " "active %d, ready %x, %x!\n", chan_id, ichan->active_buffer, ready0, ready1); @@ -1229,8 +1311,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) } if (unlikely(list_empty(&ichan->queue))) { + ichan->sg[ichan->active_buffer] = NULL; spin_unlock(&ichan->lock); - dev_err(ichan->dma_chan.device->dev, + dev_err(dev, "IRQ without queued buffers on channel %x, active %d, " "ready %x, %x!\n", chan_id, ichan->active_buffer, ready0, ready1); @@ -1245,40 +1328,44 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) sg = &ichan->sg[ichan->active_buffer]; sgnext = ichan->sg[!ichan->active_buffer]; + if (!*sg) { + spin_unlock(&ichan->lock); + return IRQ_HANDLED; + } + + desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); + descnew = desc; + + dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", + irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); + + /* Find the descriptor of sgnext */ + sgnew = idmac_sg_next(ichan, &descnew, *sg); + if (sgnext != sgnew) + dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew); + /* * if sgnext == NULL sg must be the last element in a scatterlist and * queue must be empty */ if (unlikely(!sgnext)) { - if (unlikely(sg_next(*sg))) { - dev_err(ichan->dma_chan.device->dev, - "Broken buffer-update locking on channel %x!\n", - chan_id); - /* We'll let the user catch up */ + if (!WARN_ON(sg_next(*sg))) + dev_dbg(dev, "Underrun on channel %x\n", chan_id); + ichan->sg[!ichan->active_buffer] = sgnew; + + if (unlikely(sgnew)) { + ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer); } else { - /* Underrun */ + spin_lock_irqsave(&ipu_data.lock, flags); ipu_ic_disable_task(&ipu_data, chan_id); - dev_dbg(ichan->dma_chan.device->dev, - "Underrun on channel %x\n", chan_id); + spin_unlock_irqrestore(&ipu_data.lock, flags); ichan->status = IPU_CHANNEL_READY; /* Continue to check for complete descriptor */ } } - desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); - - /* First calculate and submit the next sg element */ - if (likely(sgnext)) - sgnew = sg_next(sgnext); - - if (unlikely(!sgnew)) { - /* Start a new scatterlist, if any queued */ - if (likely(desc->list.next != &ichan->queue)) { - descnew = list_entry(desc->list.next, - struct idmac_tx_desc, list); - sgnew = &descnew->sg[0]; - } - } + /* Calculate and submit the next sg element */ + sgnew = idmac_sg_next(ichan, &descnew, sgnew); if (unlikely(!sg_next(*sg)) || !sgnext) { /* @@ -1291,17 +1378,13 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) *sg = sgnew; - if (likely(sgnew)) { - int ret; - - ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer, - sg_dma_address(*sg)); - if (ret < 0) - dev_err(ichan->dma_chan.device->dev, - "Failed to update buffer on channel %x buffer %d!\n", - chan_id, ichan->active_buffer); - else - ipu_select_buffer(chan_id, ichan->active_buffer); + if (likely(sgnew) && + ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { + callback = desc->txd.callback; + callback_param = desc->txd.callback_param; + spin_unlock(&ichan->lock); + callback(callback_param); + spin_lock(&ichan->lock); } /* Flip the active buffer - even if update above failed */ @@ -1329,13 +1412,20 @@ static void ipu_gc_tasklet(unsigned long arg) struct idmac_channel *ichan = ipu->channel + i; struct idmac_tx_desc *desc; unsigned long flags; - int j; + struct scatterlist *sg; + int j, k; for (j = 0; j < ichan->n_tx_desc; j++) { desc = ichan->desc + j; spin_lock_irqsave(&ichan->lock, flags); if (async_tx_test_ack(&desc->txd)) { list_move(&desc->list, &ichan->free_list); + for_each_sg(desc->sg, sg, desc->sg_len, k) { + if (ichan->sg[0] == sg) + ichan->sg[0] = NULL; + else if (ichan->sg[1] == sg) + ichan->sg[1] = NULL; + } async_tx_clear_ack(&desc->txd); } spin_unlock_irqrestore(&ichan->lock, flags); @@ -1471,15 +1561,22 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) goto eimap; ichan->eof_irq = ret; - ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, - ichan->eof_name, ichan); - if (ret < 0) - goto erirq; + + /* + * Important to first disable the channel, because maybe someone + * used it before us, e.g., the bootloader + */ + ipu_disable_channel(idmac, ichan, true); ret = ipu_init_channel(idmac, ichan); if (ret < 0) goto eichan; + ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, + ichan->eof_name, ichan); + if (ret < 0) + goto erirq; + ichan->status = IPU_CHANNEL_INITIALIZED; dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n", @@ -1487,9 +1584,9 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) return ret; -eichan: - free_irq(ichan->eof_irq, ichan); erirq: + ipu_uninit_channel(idmac, ichan); +eichan: ipu_irq_unmap(ichan->dma_chan.chan_id); eimap: return ret; From ccccce229c633a92c42cd1a40c0738d7b0d12644 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:24 -0700 Subject: [PATCH 05/13] dmaengine: initialize tx_list in dma_async_tx_descriptor_init Centralize this common initialization (and one case where ipu_idmac is duplicating ->chan initialization). Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 1 + drivers/dma/dw_dmac.c | 1 - drivers/dma/fsldma.c | 1 - drivers/dma/ioat_dma.c | 1 - drivers/dma/iop-adma.c | 1 - drivers/dma/ipu/ipu_idmac.c | 2 -- drivers/dma/mv_xor.c | 1 - 7 files changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 49243d14b89..a41d1ea10fa 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -920,6 +920,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, { tx->chan = chan; spin_lock_init(&tx->lock); + INIT_LIST_HEAD(&tx->tx_list); } EXPORT_SYMBOL(dma_async_tx_descriptor_init); diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index a97c07eef7e..862fc9ce9d8 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -826,7 +826,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) dma_async_tx_descriptor_init(&desc->txd, chan); desc->txd.tx_submit = dwc_tx_submit; desc->txd.flags = DMA_CTRL_ACK; - INIT_LIST_HEAD(&desc->txd.tx_list); desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, sizeof(desc->lli), DMA_TO_DEVICE); dwc_desc_put(dwc, desc); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 86d6da47f55..da8a8ed9e41 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -354,7 +354,6 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( dma_async_tx_descriptor_init(&desc_sw->async_tx, &fsl_chan->common); desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; - INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); desc_sw->async_tx.phys = pdesc; } diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 5905cd36bcd..e4fc33c1c32 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -693,7 +693,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( desc_sw->async_tx.tx_submit = ioat2_tx_submit; break; } - INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); desc_sw->hw = desc; desc_sw->async_tx.phys = phys; diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 16adbe61cfb..2f052265122 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -498,7 +498,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) slot->async_tx.tx_submit = iop_adma_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); - INIT_LIST_HEAD(&slot->async_tx.tx_list); hw_desc = (char *) iop_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 0a525c450f2..7ce0e25266d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -983,8 +983,6 @@ static int idmac_desc_alloc(struct idmac_channel *ichan, int n) memset(txd, 0, sizeof(*txd)); dma_async_tx_descriptor_init(txd, &ichan->dma_chan); txd->tx_submit = idmac_tx_submit; - txd->chan = &ichan->dma_chan; - INIT_LIST_HEAD(&txd->tx_list); list_add(&desc->list, &ichan->free_list); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index cb7f26fb9f1..ddab94f5122 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -632,7 +632,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); - INIT_LIST_HEAD(&slot->async_tx.tx_list); hw_desc = (char *) mv_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; From 54aee6a5f560d0e1bf3f39987c6ebe06daeb0ce1 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:24 -0700 Subject: [PATCH 06/13] dmaengine: kill some unused headers The dmaengine redux left some unneeded headers in include/linux/dmaengine.h, clean them up. Signed-off-by: Dan Williams --- include/linux/dmaengine.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 1956c8d46d3..96e676e5bf9 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -23,9 +23,6 @@ #include #include -#include -#include -#include #include /** From 06164f3194e01ea4c76941ac60f541d656c8975f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:25 -0700 Subject: [PATCH 07/13] async_tx: provide __async_inline for HAS_DMA=n archs To allow an async_tx routine to be compiled away on HAS_DMA=n arch it needs to be declared __always_inline otherwise the compiler may emit code and cause a link error. Signed-off-by: Dan Williams --- crypto/async_tx/async_xor.c | 7 ++----- include/linux/async_tx.h | 9 +++++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 595b78672b3..95fe2c8d6c5 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -30,11 +30,8 @@ #include #include -/* do_async_xor - dma map the pages and perform the xor with an engine. - * This routine is marked __always_inline so it can be compiled away - * when CONFIG_DMA_ENGINE=n - */ -static __always_inline struct dma_async_tx_descriptor * +/* do_async_xor - dma map the pages and perform the xor with an engine */ +static __async_inline struct dma_async_tx_descriptor * do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum async_tx_flags flags, diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 45f6297821b..5fc2ef8d97f 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -21,6 +21,15 @@ #include #include +/* on architectures without dma-mapping capabilities we need to ensure + * that the asynchronous path compiles away + */ +#ifdef CONFIG_HAS_DMA +#define __async_inline +#else +#define __async_inline __always_inline +#endif + /** * dma_chan_ref - object used to manage dma channels received from the * dmaengine core. From 729b5d1b8ec72c28e99840b3f300ba67726e3ab9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:25 -0700 Subject: [PATCH 08/13] dmaengine: allow dma support for async_tx to be toggled Provide a config option for blocking the allocation of dma channels to the async_tx api. Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 6 +++--- drivers/dma/Kconfig | 11 +++++++++++ include/linux/dmaengine.h | 18 ++++++++++++++++++ 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index f21147f3626..06eb6cc09fe 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -30,7 +30,7 @@ #ifdef CONFIG_DMA_ENGINE static int __init async_tx_init(void) { - dmaengine_get(); + async_dmaengine_get(); printk(KERN_INFO "async_tx: api initialized (async)\n"); @@ -39,7 +39,7 @@ static int __init async_tx_init(void) static void __exit async_tx_exit(void) { - dmaengine_put(); + async_dmaengine_put(); } /** @@ -56,7 +56,7 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, if (depend_tx && dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) return depend_tx->chan; - return dma_find_channel(tx_type); + return async_dma_find_channel(tx_type); } EXPORT_SYMBOL_GPL(__async_tx_find_channel); #else diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 48ea59e7967..3b3c01b6f1e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -98,6 +98,17 @@ config NET_DMA Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise say N. +config ASYNC_TX_DMA + bool "Async_tx: Offload support for the async_tx api" + depends on DMA_ENGINE + help + This allows the async_tx api to take advantage of offload engines for + memcpy, memset, xor, and raid6 p+q operations. If your platform has + a dma engine that can perform raid operations and you have enabled + MD_RAID456 say Y. + + If unsure, say N. + config DMATEST tristate "DMA Test client" depends on DMA_ENGINE diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 96e676e5bf9..2afc2c95e42 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -288,6 +288,24 @@ static inline void net_dmaengine_put(void) } #endif +#ifdef CONFIG_ASYNC_TX_DMA +#define async_dmaengine_get() dmaengine_get() +#define async_dmaengine_put() dmaengine_put() +#define async_dma_find_channel(type) dma_find_channel(type) +#else +static inline void async_dmaengine_get(void) +{ +} +static inline void async_dmaengine_put(void) +{ +} +static inline struct dma_chan * +async_dma_find_channel(enum dma_transaction_type type) +{ + return NULL; +} +#endif + dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len); dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, From b54d5cb9156868fb4f27ccd46a3afb0bf3ef8e0c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:25 -0700 Subject: [PATCH 09/13] dmatest: add xor test Extend dmatest to launch a thread per supported operation type and add an xor test. Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 278 +++++++++++++++++++++++++++++------------- 1 file changed, 191 insertions(+), 87 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index e190d8b3070..224acf478fe 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -38,6 +38,11 @@ module_param(max_channels, uint, S_IRUGO); MODULE_PARM_DESC(max_channels, "Maximum number of channels to use (default: all)"); +static unsigned int xor_sources = 3; +module_param(xor_sources, uint, S_IRUGO); +MODULE_PARM_DESC(xor_sources, + "Number of xor source buffers (default: 3)"); + /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. @@ -59,8 +64,9 @@ struct dmatest_thread { struct list_head node; struct task_struct *task; struct dma_chan *chan; - u8 *srcbuf; - u8 *dstbuf; + u8 **srcs; + u8 **dsts; + enum dma_transaction_type type; }; struct dmatest_chan { @@ -98,30 +104,37 @@ static unsigned long dmatest_random(void) return buf; } -static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len) +static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) { unsigned int i; + u8 *buf; - for (i = 0; i < start; i++) - buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); - for ( ; i < start + len; i++) - buf[i] = PATTERN_SRC | PATTERN_COPY - | (~i & PATTERN_COUNT_MASK);; - for ( ; i < test_buf_size; i++) - buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); + for (; (buf = *bufs); bufs++) { + for (i = 0; i < start; i++) + buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); + for ( ; i < start + len; i++) + buf[i] = PATTERN_SRC | PATTERN_COPY + | (~i & PATTERN_COUNT_MASK);; + for ( ; i < test_buf_size; i++) + buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); + buf++; + } } -static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len) +static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) { unsigned int i; + u8 *buf; - for (i = 0; i < start; i++) - buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); - for ( ; i < start + len; i++) - buf[i] = PATTERN_DST | PATTERN_OVERWRITE - | (~i & PATTERN_COUNT_MASK); - for ( ; i < test_buf_size; i++) - buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); + for (; (buf = *bufs); bufs++) { + for (i = 0; i < start; i++) + buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); + for ( ; i < start + len; i++) + buf[i] = PATTERN_DST | PATTERN_OVERWRITE + | (~i & PATTERN_COUNT_MASK); + for ( ; i < test_buf_size; i++) + buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); + } } static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, @@ -150,23 +163,30 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, thread_name, index, expected, actual); } -static unsigned int dmatest_verify(u8 *buf, unsigned int start, +static unsigned int dmatest_verify(u8 **bufs, unsigned int start, unsigned int end, unsigned int counter, u8 pattern, bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; u8 actual; + u8 expected; + u8 *buf; + unsigned int counter_orig = counter; - for (i = start; i < end; i++) { - actual = buf[i]; - if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) { - if (error_count < 32) - dmatest_mismatch(actual, pattern, i, counter, - is_srcbuf); - error_count++; + for (; (buf = *bufs); bufs++) { + counter = counter_orig; + for (i = start; i < end; i++) { + actual = buf[i]; + expected = pattern | (~counter & PATTERN_COUNT_MASK); + if (actual != expected) { + if (error_count < 32) + dmatest_mismatch(actual, pattern, i, + counter, is_srcbuf); + error_count++; + } + counter++; } - counter++; } if (error_count > 32) @@ -178,10 +198,10 @@ static unsigned int dmatest_verify(u8 *buf, unsigned int start, /* * This function repeatedly tests DMA transfers of various lengths and - * offsets until it is told to exit by kthread_stop(). There may be - * multiple threads running this function in parallel for a single - * channel, and there may be multiple channels being tested in - * parallel. + * offsets for a given operation type until it is told to exit by + * kthread_stop(). There may be multiple threads running this function + * in parallel for a single channel, and there may be multiple channels + * being tested in parallel. * * Before each test, the source and destination buffer is initialized * with a known pattern. This pattern is different depending on @@ -201,25 +221,53 @@ static int dmatest_func(void *data) unsigned int total_tests = 0; dma_cookie_t cookie; enum dma_status status; + enum dma_ctrl_flags flags; int ret; + int src_cnt; + int dst_cnt; + int i; thread_name = current->comm; ret = -ENOMEM; - thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL); - if (!thread->srcbuf) - goto err_srcbuf; - thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL); - if (!thread->dstbuf) - goto err_dstbuf; smp_rmb(); chan = thread->chan; + if (thread->type == DMA_MEMCPY) + src_cnt = dst_cnt = 1; + else if (thread->type == DMA_XOR) { + src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ + dst_cnt = 1; + } else + goto err_srcs; + + thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); + if (!thread->srcs) + goto err_srcs; + for (i = 0; i < src_cnt; i++) { + thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL); + if (!thread->srcs[i]) + goto err_srcbuf; + } + thread->srcs[i] = NULL; + + thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); + if (!thread->dsts) + goto err_dsts; + for (i = 0; i < dst_cnt; i++) { + thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL); + if (!thread->dsts[i]) + goto err_dstbuf; + } + thread->dsts[i] = NULL; + + flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP; while (!kthread_should_stop()) { struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_src, dma_dest; + struct dma_async_tx_descriptor *tx = NULL; + dma_addr_t dma_srcs[src_cnt]; + dma_addr_t dma_dsts[dst_cnt]; total_tests++; @@ -227,22 +275,41 @@ static int dmatest_func(void *data) src_off = dmatest_random() % (test_buf_size - len + 1); dst_off = dmatest_random() % (test_buf_size - len + 1); - dmatest_init_srcbuf(thread->srcbuf, src_off, len); - dmatest_init_dstbuf(thread->dstbuf, dst_off, len); + dmatest_init_srcs(thread->srcs, src_off, len); + dmatest_init_dsts(thread->dsts, dst_off, len); - dma_src = dma_map_single(dev->dev, thread->srcbuf + src_off, - len, DMA_TO_DEVICE); + for (i = 0; i < src_cnt; i++) { + u8 *buf = thread->srcs[i] + src_off; + + dma_srcs[i] = dma_map_single(dev->dev, buf, len, + DMA_TO_DEVICE); + } /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ - dma_dest = dma_map_single(dev->dev, thread->dstbuf, - test_buf_size, DMA_BIDIRECTIONAL); + for (i = 0; i < dst_cnt; i++) { + dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], + test_buf_size, + DMA_BIDIRECTIONAL); + } + + if (thread->type == DMA_MEMCPY) + tx = dev->device_prep_dma_memcpy(chan, + dma_dsts[0] + dst_off, + dma_srcs[0], len, + flags); + else if (thread->type == DMA_XOR) + tx = dev->device_prep_dma_xor(chan, + dma_dsts[0] + dst_off, + dma_srcs, xor_sources, + len, flags); - tx = dev->device_prep_dma_memcpy(chan, dma_dest + dst_off, - dma_src, len, - DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_single(dev->dev, dma_dest, - test_buf_size, DMA_BIDIRECTIONAL); + for (i = 0; i < src_cnt; i++) + dma_unmap_single(dev->dev, dma_srcs[i], len, + DMA_TO_DEVICE); + for (i = 0; i < dst_cnt; i++) + dma_unmap_single(dev->dev, dma_dsts[i], + test_buf_size, + DMA_BIDIRECTIONAL); pr_warning("%s: #%u: prep error with src_off=0x%x " "dst_off=0x%x len=0x%x\n", thread_name, total_tests - 1, @@ -263,11 +330,11 @@ static int dmatest_func(void *data) failed_tests++; continue; } - dma_async_memcpy_issue_pending(chan); + dma_async_issue_pending(chan); do { msleep(1); - status = dma_async_memcpy_complete( + status = dma_async_is_tx_complete( chan, cookie, NULL, NULL); } while (status == DMA_IN_PROGRESS); @@ -278,29 +345,30 @@ static int dmatest_func(void *data) continue; } /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ - dma_unmap_single(dev->dev, dma_dest, - test_buf_size, DMA_BIDIRECTIONAL); + for (i = 0; i < dst_cnt; i++) + dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size, + DMA_BIDIRECTIONAL); error_count = 0; pr_debug("%s: verifying source buffer...\n", thread_name); - error_count += dmatest_verify(thread->srcbuf, 0, src_off, + error_count += dmatest_verify(thread->srcs, 0, src_off, 0, PATTERN_SRC, true); - error_count += dmatest_verify(thread->srcbuf, src_off, + error_count += dmatest_verify(thread->srcs, src_off, src_off + len, src_off, PATTERN_SRC | PATTERN_COPY, true); - error_count += dmatest_verify(thread->srcbuf, src_off + len, + error_count += dmatest_verify(thread->srcs, src_off + len, test_buf_size, src_off + len, PATTERN_SRC, true); pr_debug("%s: verifying dest buffer...\n", thread->task->comm); - error_count += dmatest_verify(thread->dstbuf, 0, dst_off, + error_count += dmatest_verify(thread->dsts, 0, dst_off, 0, PATTERN_DST, false); - error_count += dmatest_verify(thread->dstbuf, dst_off, + error_count += dmatest_verify(thread->dsts, dst_off, dst_off + len, src_off, PATTERN_SRC | PATTERN_COPY, false); - error_count += dmatest_verify(thread->dstbuf, dst_off + len, + error_count += dmatest_verify(thread->dsts, dst_off + len, test_buf_size, dst_off + len, PATTERN_DST, false); @@ -319,10 +387,16 @@ static int dmatest_func(void *data) } ret = 0; - kfree(thread->dstbuf); + for (i = 0; thread->dsts[i]; i++) + kfree(thread->dsts[i]); err_dstbuf: - kfree(thread->srcbuf); + kfree(thread->dsts); +err_dsts: + for (i = 0; thread->srcs[i]; i++) + kfree(thread->srcs[i]); err_srcbuf: + kfree(thread->srcs); +err_srcs: pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", thread_name, total_tests, failed_tests, ret); return ret; @@ -344,11 +418,54 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) kfree(dtc); } +static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type) +{ + struct dmatest_thread *thread; + struct dma_chan *chan = dtc->chan; + char *op; + unsigned int i; + + if (type == DMA_MEMCPY) + op = "copy"; + else if (type == DMA_XOR) + op = "xor"; + else + return -EINVAL; + + for (i = 0; i < threads_per_chan; i++) { + thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); + if (!thread) { + pr_warning("dmatest: No memory for %s-%s%u\n", + dma_chan_name(chan), op, i); + + break; + } + thread->chan = dtc->chan; + thread->type = type; + smp_wmb(); + thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", + dma_chan_name(chan), op, i); + if (IS_ERR(thread->task)) { + pr_warning("dmatest: Failed to run thread %s-%s%u\n", + dma_chan_name(chan), op, i); + kfree(thread); + break; + } + + /* srcbuf and dstbuf are allocated by the thread itself */ + + list_add_tail(&thread->node, &dtc->threads); + } + + return i; +} + static int dmatest_add_channel(struct dma_chan *chan) { struct dmatest_chan *dtc; - struct dmatest_thread *thread; - unsigned int i; + struct dma_device *dma_dev = chan->device; + unsigned int thread_count = 0; + unsigned int cnt; dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { @@ -359,30 +476,17 @@ static int dmatest_add_channel(struct dma_chan *chan) dtc->chan = chan; INIT_LIST_HEAD(&dtc->threads); - for (i = 0; i < threads_per_chan; i++) { - thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); - if (!thread) { - pr_warning("dmatest: No memory for %s-test%u\n", - dma_chan_name(chan), i); - break; - } - thread->chan = dtc->chan; - smp_wmb(); - thread->task = kthread_run(dmatest_func, thread, "%s-test%u", - dma_chan_name(chan), i); - if (IS_ERR(thread->task)) { - pr_warning("dmatest: Failed to run thread %s-test%u\n", - dma_chan_name(chan), i); - kfree(thread); - break; - } - - /* srcbuf and dstbuf are allocated by the thread itself */ - - list_add_tail(&thread->node, &dtc->threads); + if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { + cnt = dmatest_add_threads(dtc, DMA_MEMCPY); + thread_count += cnt > 0 ?: 0; + } + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { + cnt = dmatest_add_threads(dtc, DMA_XOR); + thread_count += cnt > 0 ?: 0; } - pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan)); + pr_info("dmatest: Started %u threads using %s\n", + thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &dmatest_channels); nr_channels++; From e44e0aa3cfa97cddff01704751a4b25151830c72 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:25 -0700 Subject: [PATCH 10/13] dmatest: add dma interrupts and callbacks Use the callback infrastructure to report driver/hardware hangs or missed interrupts. Since this makes the test threads much more aggressive (from: explicit 1ms sleep to: wait_for_completion) we set the nice value to 10 so as to not swamp legitimate tasks. Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 224acf478fe..a27c0fb1bc1 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -196,6 +196,11 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, return error_count; } +static void dmatest_callback(void *completion) +{ + complete(completion); +} + /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by @@ -261,13 +266,17 @@ static int dmatest_func(void *data) } thread->dsts[i] = NULL; - flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP; + set_user_nice(current, 10); + + flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; while (!kthread_should_stop()) { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx = NULL; dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt]; + struct completion cmp; + unsigned long tmo = msecs_to_jiffies(3000); total_tests++; @@ -318,7 +327,10 @@ static int dmatest_func(void *data) failed_tests++; continue; } - tx->callback = NULL; + + init_completion(&cmp); + tx->callback = dmatest_callback; + tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { @@ -332,18 +344,23 @@ static int dmatest_func(void *data) } dma_async_issue_pending(chan); - do { - msleep(1); - status = dma_async_is_tx_complete( - chan, cookie, NULL, NULL); - } while (status == DMA_IN_PROGRESS); + tmo = wait_for_completion_timeout(&cmp, tmo); + status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); - if (status == DMA_ERROR) { - pr_warning("%s: #%u: error during copy\n", - thread_name, total_tests - 1); + if (tmo == 0) { + pr_warning("%s: #%u: test timed out\n", + thread_name, total_tests - 1); + failed_tests++; + continue; + } else if (status != DMA_SUCCESS) { + pr_warning("%s: #%u: got completion callback," + " but status is \'%s\'\n", + thread_name, total_tests - 1, + status == DMA_ERROR ? "error" : "in progress"); failed_tests++; continue; } + /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ for (i = 0; i < dst_cnt; i++) dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size, From 0f571515c332e00b3515dbe0859ceaa30ab66e00 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Fri, 6 Mar 2009 20:07:14 +0900 Subject: [PATCH 11/13] dmaengine: Add privatecnt to revert DMA_PRIVATE property Currently dma_request_channel() set DMA_PRIVATE capability but never clear it. So if a public channel was once grabbed by dma_request_channel(), the device stay PRIVATE forever. Add privatecnt member to dma_device to correctly revert it. [lg@denx.de: fix bad usage of 'chan' in dma_async_device_register] Signed-off-by: Atsushi Nemoto Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 8 ++++++++ include/linux/dmaengine.h | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a41d1ea10fa..92438e9dacc 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v * published in the general-purpose allocator */ dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); if (err == -ENODEV) { @@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v dma_chan_name(chan), err); else break; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); chan->private = NULL; chan = NULL; } @@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan) WARN_ONCE(chan->client_count != 1, "chan reference count %d != 1\n", chan->client_count); dma_chan_put(chan); + /* drop PRIVATE cap enabled by __dma_request_channel() */ + if (--chan->device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); chan->private = NULL; mutex_unlock(&dma_list_mutex); } @@ -719,6 +725,8 @@ int dma_async_device_register(struct dma_device *device) } } list_add_tail_rcu(&device->global_node, &dma_device_list); + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + device->privatecnt++; /* Always private */ dma_channel_rebalance(); mutex_unlock(&dma_list_mutex); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 2afc2c95e42..2e2aa3df170 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -202,6 +202,7 @@ struct dma_async_tx_descriptor { /** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported + * @privatecnt: how many DMA channels are requested by dma_request_channel * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list * @cap_mask: one or more dma_capability flags @@ -224,6 +225,7 @@ struct dma_async_tx_descriptor { struct dma_device { unsigned int chancnt; + unsigned int privatecnt; struct list_head channels; struct list_head global_node; dma_cap_mask_t cap_mask; @@ -352,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) set_bit(tx_type, dstp->bits); } +#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) +static inline void +__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) +{ + clear_bit(tx_type, dstp->bits); +} + #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) static inline void __dma_cap_zero(dma_cap_mask_t *dstp) { From d9de451989a88a2003ca06e524aca4665c0c7f06 Mon Sep 17 00:00:00 2001 From: Hans-Christian Egtvedt Date: Wed, 1 Apr 2009 15:47:02 +0200 Subject: [PATCH 12/13] dw_dmac: add cyclic API to DW DMA driver This patch adds a cyclic DMA interface to the DW DMA driver. This is very useful if you want to use the DMA controller in combination with a sound device which uses cyclic buffers. Using a DMA channel for cyclic DMA will disable the possibility to use it as a normal DMA engine until the user calls the cyclic free function on the DMA channel. Also a cyclic DMA list can not be prepared if the channel is already active. Signed-off-by: Hans-Christian Egtvedt Acked-by: Haavard Skinnemoen Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/dw_dmac.c | 332 ++++++++++++++++++++++++++++++++++++- drivers/dma/dw_dmac_regs.h | 7 +- include/linux/dw_dmac.h | 19 +++ 3 files changed, 356 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 862fc9ce9d8..0b8aada08aa 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -363,6 +363,82 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) dwc_descriptor_complete(dwc, bad_desc); } +/* --------------------- Cyclic DMA API extensions -------------------- */ + +inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + return channel_readl(dwc, SAR); +} +EXPORT_SYMBOL(dw_dma_get_src_addr); + +inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + return channel_readl(dwc, DAR); +} +EXPORT_SYMBOL(dw_dma_get_dst_addr); + +/* called with dwc->lock held and all DMAC interrupts disabled */ +static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, + u32 status_block, u32 status_err, u32 status_xfer) +{ + if (status_block & dwc->mask) { + void (*callback)(void *param); + void *callback_param; + + dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", + channel_readl(dwc, LLP)); + dma_writel(dw, CLEAR.BLOCK, dwc->mask); + + callback = dwc->cdesc->period_callback; + callback_param = dwc->cdesc->period_callback_param; + if (callback) { + spin_unlock(&dwc->lock); + callback(callback_param); + spin_lock(&dwc->lock); + } + } + + /* + * Error and transfer complete are highly unlikely, and will most + * likely be due to a configuration error by the user. + */ + if (unlikely(status_err & dwc->mask) || + unlikely(status_xfer & dwc->mask)) { + int i; + + dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " + "interrupt, stopping DMA transfer\n", + status_xfer ? "xfer" : "error"); + dev_err(chan2dev(&dwc->chan), + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", + channel_readl(dwc, SAR), + channel_readl(dwc, DAR), + channel_readl(dwc, LLP), + channel_readl(dwc, CTL_HI), + channel_readl(dwc, CTL_LO)); + + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + + /* make sure DMA does not restart by loading a new list */ + channel_writel(dwc, LLP, 0); + channel_writel(dwc, CTL_LO, 0); + channel_writel(dwc, CTL_HI, 0); + + dma_writel(dw, CLEAR.BLOCK, dwc->mask); + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + for (i = 0; i < dwc->cdesc->periods; i++) + dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); + } +} + +/* ------------------------------------------------------------------------- */ + static void dw_dma_tasklet(unsigned long data) { struct dw_dma *dw = (struct dw_dma *)data; @@ -382,7 +458,10 @@ static void dw_dma_tasklet(unsigned long data) for (i = 0; i < dw->dma.chancnt; i++) { dwc = &dw->chan[i]; spin_lock(&dwc->lock); - if (status_err & (1 << i)) + if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) + dwc_handle_cyclic(dw, dwc, status_block, status_err, + status_xfer); + else if (status_err & (1 << i)) dwc_handle_error(dw, dwc); else if ((status_block | status_xfer) & (1 << i)) dwc_scan_descriptors(dw, dwc); @@ -883,6 +962,257 @@ static void dwc_free_chan_resources(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); } +/* --------------------- Cyclic DMA API extensions -------------------- */ + +/** + * dw_dma_cyclic_start - start the cyclic DMA transfer + * @chan: the DMA channel to start + * + * Must be called with soft interrupts disabled. Returns zero on success or + * -errno on failure. + */ +int dw_dma_cyclic_start(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + + if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { + dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); + return -ENODEV; + } + + spin_lock(&dwc->lock); + + /* assert channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(chan2dev(&dwc->chan), + "BUG: Attempted to start non-idle channel\n"); + dev_err(chan2dev(&dwc->chan), + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", + channel_readl(dwc, SAR), + channel_readl(dwc, DAR), + channel_readl(dwc, LLP), + channel_readl(dwc, CTL_HI), + channel_readl(dwc, CTL_LO)); + spin_unlock(&dwc->lock); + return -EBUSY; + } + + dma_writel(dw, CLEAR.BLOCK, dwc->mask); + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + /* setup DMAC channel registers */ + channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); + channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); + channel_writel(dwc, CTL_HI, 0); + + channel_set_bit(dw, CH_EN, dwc->mask); + + spin_unlock(&dwc->lock); + + return 0; +} +EXPORT_SYMBOL(dw_dma_cyclic_start); + +/** + * dw_dma_cyclic_stop - stop the cyclic DMA transfer + * @chan: the DMA channel to stop + * + * Must be called with soft interrupts disabled. + */ +void dw_dma_cyclic_stop(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + + spin_lock(&dwc->lock); + + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + + spin_unlock(&dwc->lock); +} +EXPORT_SYMBOL(dw_dma_cyclic_stop); + +/** + * dw_dma_cyclic_prep - prepare the cyclic DMA transfer + * @chan: the DMA channel to prepare + * @buf_addr: physical DMA address where the buffer starts + * @buf_len: total number of bytes for the entire buffer + * @period_len: number of bytes for each period + * @direction: transfer direction, to or from device + * + * Must be called before trying to start the transfer. Returns a valid struct + * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. + */ +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_data_direction direction) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_cyclic_desc *cdesc; + struct dw_cyclic_desc *retval = NULL; + struct dw_desc *desc; + struct dw_desc *last = NULL; + struct dw_dma_slave *dws = chan->private; + unsigned long was_cyclic; + unsigned int reg_width; + unsigned int periods; + unsigned int i; + + spin_lock_bh(&dwc->lock); + if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { + spin_unlock_bh(&dwc->lock); + dev_dbg(chan2dev(&dwc->chan), + "queue and/or active list are not empty\n"); + return ERR_PTR(-EBUSY); + } + + was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); + spin_unlock_bh(&dwc->lock); + if (was_cyclic) { + dev_dbg(chan2dev(&dwc->chan), + "channel already prepared for cyclic DMA\n"); + return ERR_PTR(-EBUSY); + } + + retval = ERR_PTR(-EINVAL); + reg_width = dws->reg_width; + periods = buf_len / period_len; + + /* Check for too big/unaligned periods and unaligned DMA buffer. */ + if (period_len > (DWC_MAX_COUNT << reg_width)) + goto out_err; + if (unlikely(period_len & ((1 << reg_width) - 1))) + goto out_err; + if (unlikely(buf_addr & ((1 << reg_width) - 1))) + goto out_err; + if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) + goto out_err; + + retval = ERR_PTR(-ENOMEM); + + if (periods > NR_DESCS_PER_CHANNEL) + goto out_err; + + cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); + if (!cdesc) + goto out_err; + + cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); + if (!cdesc->desc) + goto out_err_alloc; + + for (i = 0; i < periods; i++) { + desc = dwc_desc_get(dwc); + if (!desc) + goto out_err_desc_get; + + switch (direction) { + case DMA_TO_DEVICE: + desc->lli.dar = dws->tx_reg; + desc->lli.sar = buf_addr + (period_len * i); + desc->lli.ctllo = (DWC_DEFAULT_CTLLO + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_FIX + | DWC_CTLL_SRC_INC + | DWC_CTLL_FC_M2P + | DWC_CTLL_INT_EN); + break; + case DMA_FROM_DEVICE: + desc->lli.dar = buf_addr + (period_len * i); + desc->lli.sar = dws->rx_reg; + desc->lli.ctllo = (DWC_DEFAULT_CTLLO + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_DST_INC + | DWC_CTLL_SRC_FIX + | DWC_CTLL_FC_P2M + | DWC_CTLL_INT_EN); + break; + default: + break; + } + + desc->lli.ctlhi = (period_len >> reg_width); + cdesc->desc[i] = desc; + + if (last) { + last->lli.llp = desc->txd.phys; + dma_sync_single_for_device(chan2parent(chan), + last->txd.phys, sizeof(last->lli), + DMA_TO_DEVICE); + } + + last = desc; + } + + /* lets make a cyclic list */ + last->lli.llp = cdesc->desc[0]->txd.phys; + dma_sync_single_for_device(chan2parent(chan), last->txd.phys, + sizeof(last->lli), DMA_TO_DEVICE); + + dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " + "period %zu periods %d\n", buf_addr, buf_len, + period_len, periods); + + cdesc->periods = periods; + dwc->cdesc = cdesc; + + return cdesc; + +out_err_desc_get: + while (i--) + dwc_desc_put(dwc, cdesc->desc[i]); +out_err_alloc: + kfree(cdesc); +out_err: + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); + return (struct dw_cyclic_desc *)retval; +} +EXPORT_SYMBOL(dw_dma_cyclic_prep); + +/** + * dw_dma_cyclic_free - free a prepared cyclic DMA transfer + * @chan: the DMA channel to free + */ +void dw_dma_cyclic_free(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_cyclic_desc *cdesc = dwc->cdesc; + int i; + + dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); + + if (!cdesc) + return; + + spin_lock_bh(&dwc->lock); + + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + + dma_writel(dw, CLEAR.BLOCK, dwc->mask); + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + spin_unlock_bh(&dwc->lock); + + for (i = 0; i < cdesc->periods; i++) + dwc_desc_put(dwc, cdesc->desc[i]); + + kfree(cdesc->desc); + kfree(cdesc); + + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); +} +EXPORT_SYMBOL(dw_dma_cyclic_free); + /*----------------------------------------------------------------------*/ static void dw_dma_off(struct dw_dma *dw) diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index b252b202c5c..13a58076703 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -126,6 +126,10 @@ struct dw_dma_regs { #define DW_REGLEN 0x400 +enum dw_dmac_flags { + DW_DMA_IS_CYCLIC = 0, +}; + struct dw_dma_chan { struct dma_chan chan; void __iomem *ch_regs; @@ -134,10 +138,12 @@ struct dw_dma_chan { spinlock_t lock; /* these other elements are all protected by lock */ + unsigned long flags; dma_cookie_t completed; struct list_head active_list; struct list_head queue; struct list_head free_list; + struct dw_cyclic_desc *cdesc; unsigned int descs_allocated; }; @@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) return container_of(chan, struct dw_dma_chan, chan); } - struct dw_dma { struct dma_device dma; void __iomem *regs; diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index d797dde247f..c8aad713a04 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -74,4 +74,23 @@ struct dw_dma_slave { #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ +/* DMA API extensions */ +struct dw_cyclic_desc { + struct dw_desc **desc; + unsigned long periods; + void (*period_callback)(void *param); + void *period_callback_param; +}; + +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_data_direction direction); +void dw_dma_cyclic_free(struct dma_chan *chan); +int dw_dma_cyclic_start(struct dma_chan *chan); +void dw_dma_cyclic_stop(struct dma_chan *chan); + +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); + +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); + #endif /* DW_DMAC_H */ From 8c6db1bbf80123839ec87bdd6cb364aea384623d Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 2 Apr 2009 11:36:58 +0200 Subject: [PATCH 13/13] dma: Add SoF and EoF debugging to ipu_idmac.c, minor cleanup Add Start-of-Frame and End-of-Frame debugging to ipu_idmac.c, in the future it might also be needed for the actual video processing in mx3-camera, at which point, the ISRs will have to be transferred to mx3_camera.c, for which ipu_irq_map() and ipu_irq_unmap() functions will have to be exported. Also simplify a couple of pointer-dereferences. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Dan Williams --- drivers/dma/ipu/ipu_idmac.c | 65 ++++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 9 deletions(-) diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 7ce0e25266d..90773844cc8 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1442,8 +1442,8 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan unsigned long flags; /* We only can handle these three channels so far */ - if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 && - ichan->dma_chan.chan_id != IDMAC_IC_7) + if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 && + chan->chan_id != IDMAC_IC_7) return NULL; if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { @@ -1484,7 +1484,7 @@ static void idmac_issue_pending(struct dma_chan *chan) /* This is not always needed, but doesn't hurt either */ spin_lock_irqsave(&ipu->lock, flags); - ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer); + ipu_select_buffer(chan->chan_id, ichan->active_buffer); spin_unlock_irqrestore(&ipu->lock, flags); /* @@ -1541,6 +1541,28 @@ static void idmac_terminate_all(struct dma_chan *chan) mutex_unlock(&ichan->chan_mutex); } +#ifdef DEBUG +static irqreturn_t ic_sof_irq(int irq, void *dev_id) +{ + struct idmac_channel *ichan = dev_id; + printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n", + irq, ichan->dma_chan.chan_id); + disable_irq(irq); + return IRQ_HANDLED; +} + +static irqreturn_t ic_eof_irq(int irq, void *dev_id) +{ + struct idmac_channel *ichan = dev_id; + printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n", + irq, ichan->dma_chan.chan_id); + disable_irq(irq); + return IRQ_HANDLED; +} + +static int ic_sof = -EINVAL, ic_eof = -EINVAL; +#endif + static int idmac_alloc_chan_resources(struct dma_chan *chan) { struct idmac_channel *ichan = to_idmac_chan(chan); @@ -1554,7 +1576,7 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) chan->cookie = 1; ichan->completed = -ENXIO; - ret = ipu_irq_map(ichan->dma_chan.chan_id); + ret = ipu_irq_map(chan->chan_id); if (ret < 0) goto eimap; @@ -1575,17 +1597,28 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) if (ret < 0) goto erirq; +#ifdef DEBUG + if (chan->chan_id == IDMAC_IC_7) { + ic_sof = ipu_irq_map(69); + if (ic_sof > 0) + request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); + ic_eof = ipu_irq_map(70); + if (ic_eof > 0) + request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); + } +#endif + ichan->status = IPU_CHANNEL_INITIALIZED; - dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n", - ichan->dma_chan.chan_id, ichan->eof_irq); + dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n", + chan->chan_id, ichan->eof_irq); return ret; erirq: ipu_uninit_channel(idmac, ichan); eichan: - ipu_irq_unmap(ichan->dma_chan.chan_id); + ipu_irq_unmap(chan->chan_id); eimap: return ret; } @@ -1600,8 +1633,22 @@ static void idmac_free_chan_resources(struct dma_chan *chan) __idmac_terminate_all(chan); if (ichan->status > IPU_CHANNEL_FREE) { +#ifdef DEBUG + if (chan->chan_id == IDMAC_IC_7) { + if (ic_sof > 0) { + free_irq(ic_sof, ichan); + ipu_irq_unmap(69); + ic_sof = -EINVAL; + } + if (ic_eof > 0) { + free_irq(ic_eof, ichan); + ipu_irq_unmap(70); + ic_eof = -EINVAL; + } + } +#endif free_irq(ichan->eof_irq, ichan); - ipu_irq_unmap(ichan->dma_chan.chan_id); + ipu_irq_unmap(chan->chan_id); } ichan->status = IPU_CHANNEL_FREE; @@ -1663,7 +1710,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) dma_chan->device = &idmac->dma; dma_chan->cookie = 1; dma_chan->chan_id = i; - list_add_tail(&ichan->dma_chan.device_node, &dma->channels); + list_add_tail(&dma_chan->device_node, &dma->channels); } idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);