From: Dan Williams Date: Wed, 9 Sep 2009 00:55:21 +0000 (-0700) Subject: Merge branch 'dmaengine' into async-tx-next X-Git-Tag: v2.6.32-rc1~60^2^2~13 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=bbb20089a3275a19e475dbc21320c3742e3ca423;p=linux-2.6-block.git Merge branch 'dmaengine' into async-tx-next Conflicts: crypto/async_tx/async_xor.c drivers/dma/ioat/dma_v2.h drivers/dma/ioat/pci.c drivers/md/raid5.c --- bbb20089a3275a19e475dbc21320c3742e3ca423 diff --cc drivers/dma/dmatest.c index a3722a7384b5,d93017fc7872..a32a4cf7b1e0 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@@ -535,12 -512,8 +548,12 @@@ static int dmatest_add_channel(struct d } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { cnt = dmatest_add_threads(dtc, DMA_XOR); - thread_count += cnt > 0 ?: 0; + thread_count += cnt > 0 ? cnt : 0; } + if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { + cnt = dmatest_add_threads(dtc, DMA_PQ); + thread_count += cnt > 0 ?: 0; + } pr_info("dmatest: Started %u threads using %s\n", thread_count, dma_chan_name(chan)); diff --cc drivers/dma/ioat/dma.h index 0e37e426c729,8966fa5453a7..6a675a2a2d1c --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@@ -169,9 -155,9 +169,9 @@@ ioat_is_complete(struct dma_chan *c, dm /** * struct ioat_desc_sw - wrapper around hardware descriptor - * @hw: hardware DMA descriptor + * @hw: hardware DMA descriptor (for memcpy) * @node: this descriptor will either be on the free list, - * or attached to a transaction list (async_tx.tx_list) + * or attached to a transaction list (tx_list) * @txd: the generic software descriptor for all engines * @id: identifier for debug */ diff --cc drivers/dma/ioat/dma_v2.h index 246e646b1904,ac00adc81974..1d849ef74d5f --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@@ -115,36 -114,10 +115,36 @@@ static inline u16 ioat2_xferlen_to_desc return num_descs; } +/** + * struct ioat_ring_ent - wrapper around hardware descriptor + * @hw: hardware DMA descriptor (for memcpy) + * @fill: hardware fill descriptor + * @xor: hardware xor descriptor + * @xor_ex: hardware xor extension descriptor + * @pq: hardware pq descriptor + * @pq_ex: hardware pq extension descriptor + * @pqu: hardware pq update descriptor + * @raw: hardware raw (un-typed) descriptor + * @txd: the generic software descriptor for all engines + * @len: total transaction length for unmap + * @result: asynchronous result of validate operations + * @id: identifier for debug + */ + struct ioat_ring_ent { - struct ioat_dma_descriptor *hw; + union { + struct ioat_dma_descriptor *hw; + struct ioat_fill_descriptor *fill; + struct ioat_xor_descriptor *xor; + struct ioat_xor_ext_descriptor *xor_ex; + struct ioat_pq_descriptor *pq; + struct ioat_pq_ext_descriptor *pq_ex; + struct ioat_pq_update_descriptor *pqu; + struct ioat_raw_descriptor *raw; + }; - struct dma_async_tx_descriptor txd; size_t len; + struct dma_async_tx_descriptor txd; + enum sum_check_flags *result; #ifdef DEBUG int id; #endif @@@ -170,20 -143,5 +170,21 @@@ int __devinit ioat2_dma_probe(struct io int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); +int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs); +int ioat2_enumerate_channels(struct ioatdma_device *device); +struct dma_async_tx_descriptor * +ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags); +void ioat2_issue_pending(struct dma_chan *chan); +int ioat2_alloc_chan_resources(struct dma_chan *c); +void ioat2_free_chan_resources(struct dma_chan *c); +enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used); +void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); +bool reshape_ring(struct ioat2_dma_chan *ioat, int order); +void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); +void ioat2_cleanup_tasklet(unsigned long data); +void ioat2_timer_event(unsigned long data); +extern struct kobj_type ioat2_ktype; + extern struct kmem_cache *ioat2_cache; #endif /* IOATDMA_V2_H */ diff --cc drivers/dma/ioat/pci.c index b77d3a2864ad,61086c6bbf42..c788fa266470 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@@ -182,10 -170,18 +184,21 @@@ static void __devexit ioat_remove(struc static int __init ioat_init_module(void) { + int err; + + pr_info("%s: Intel(R) QuickData Technology Driver %s\n", + DRV_NAME, IOAT_DMA_VERSION); + - return pci_register_driver(&ioat_pci_driver); + ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!ioat2_cache) + return -ENOMEM; + + err = pci_register_driver(&ioat_pci_driver); + if (err) + kmem_cache_destroy(ioat2_cache); + + return err; } module_init(ioat_init_module); diff --cc drivers/md/raid5.c index 54ef8d75541d,f9f991e6e138..cac6f4d3a143 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@@ -4371,9 -4129,8 +4371,9 @@@ static void synchronize_stripe_processi static void raid5d(mddev_t *mddev) { struct stripe_head *sh; - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; int handled; + LIST_HEAD(raid_domain); pr_debug("+++ raid5d active\n");