mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
ioat2,3: cacheline align software descriptor allocations
All the necessary fields for handling an ioat2,3 ring entry can fit into one cacheline. Move ->len prior to ->txd in struct ioat_ring_ent, and move allocation of these entries to a hw-cache-aligned kmem cache to reduce the number of cachelines dirtied for descriptor management. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
0803172778
commit
162b96e63e
3 changed files with 20 additions and 4 deletions
|
@ -399,11 +399,12 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f
|
|||
return NULL;
|
||||
memset(hw, 0, sizeof(*hw));
|
||||
|
||||
desc = kzalloc(sizeof(*desc), flags);
|
||||
desc = kmem_cache_alloc(ioat2_cache, flags);
|
||||
if (!desc) {
|
||||
pci_pool_free(dma->dma_pool, hw, phys);
|
||||
return NULL;
|
||||
}
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
|
||||
dma_async_tx_descriptor_init(&desc->txd, chan);
|
||||
desc->txd.tx_submit = ioat2_tx_submit_unlock;
|
||||
|
@ -418,7 +419,7 @@ static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *cha
|
|||
|
||||
dma = to_ioatdma_device(chan->device);
|
||||
pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
|
||||
kfree(desc);
|
||||
kmem_cache_free(ioat2_cache, desc);
|
||||
}
|
||||
|
||||
static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
|
||||
|
|
|
@ -116,8 +116,8 @@ static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len
|
|||
|
||||
struct ioat_ring_ent {
|
||||
struct ioat_dma_descriptor *hw;
|
||||
struct dma_async_tx_descriptor txd;
|
||||
size_t len;
|
||||
struct dma_async_tx_descriptor txd;
|
||||
#ifdef DEBUG
|
||||
int id;
|
||||
#endif
|
||||
|
@ -143,4 +143,5 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
|
|||
int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
|
||||
struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
|
||||
struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
|
||||
extern struct kmem_cache *ioat2_cache;
|
||||
#endif /* IOATDMA_V2_H */
|
||||
|
|
|
@ -69,6 +69,8 @@ static int ioat_dca_enabled = 1;
|
|||
module_param(ioat_dca_enabled, int, 0644);
|
||||
MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
|
||||
|
||||
struct kmem_cache *ioat2_cache;
|
||||
|
||||
#define DRV_NAME "ioatdma"
|
||||
|
||||
static struct pci_driver ioat_pci_driver = {
|
||||
|
@ -168,12 +170,24 @@ static void __devexit ioat_remove(struct pci_dev *pdev)
|
|||
|
||||
static int __init ioat_init_module(void)
|
||||
{
|
||||
return pci_register_driver(&ioat_pci_driver);
|
||||
int err;
|
||||
|
||||
ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent),
|
||||
0, SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!ioat2_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
err = pci_register_driver(&ioat_pci_driver);
|
||||
if (err)
|
||||
kmem_cache_destroy(ioat2_cache);
|
||||
|
||||
return err;
|
||||
}
|
||||
module_init(ioat_init_module);
|
||||
|
||||
static void __exit ioat_exit_module(void)
|
||||
{
|
||||
pci_unregister_driver(&ioat_pci_driver);
|
||||
kmem_cache_destroy(ioat2_cache);
|
||||
}
|
||||
module_exit(ioat_exit_module);
|
||||
|
|
Loading…
Reference in a new issue