mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
Merge branch 'sglist-arch' into for-linus
This commit is contained in:
commit
3eed13fd93
110 changed files with 870 additions and 597 deletions
|
@ -514,7 +514,7 @@ With scatterlists, you map a region gathered from several regions by:
|
||||||
int i, count = pci_map_sg(dev, sglist, nents, direction);
|
int i, count = pci_map_sg(dev, sglist, nents, direction);
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
|
|
||||||
for (i = 0, sg = sglist; i < count; i++, sg++) {
|
for_each_sg(sglist, sg, count, i) {
|
||||||
hw_address[i] = sg_dma_address(sg);
|
hw_address[i] = sg_dma_address(sg);
|
||||||
hw_len[i] = sg_dma_len(sg);
|
hw_len[i] = sg_dma_len(sg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -396,7 +396,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
|
||||||
printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
|
printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
|
||||||
startsg->dma_address, startsg->dma_length,
|
startsg->dma_address, startsg->dma_length,
|
||||||
sba_sg_address(startsg));
|
sba_sg_address(startsg));
|
||||||
startsg++;
|
startsg = sg_next(startsg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -409,7 +409,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
|
||||||
while (the_nents-- > 0) {
|
while (the_nents-- > 0) {
|
||||||
if (sba_sg_address(the_sg) == 0x0UL)
|
if (sba_sg_address(the_sg) == 0x0UL)
|
||||||
sba_dump_sg(NULL, startsg, nents);
|
sba_dump_sg(NULL, startsg, nents);
|
||||||
the_sg++;
|
the_sg = sg_next(the_sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1201,7 +1201,7 @@ sba_fill_pdir(
|
||||||
u32 pide = startsg->dma_address & ~PIDE_FLAG;
|
u32 pide = startsg->dma_address & ~PIDE_FLAG;
|
||||||
dma_offset = (unsigned long) pide & ~iovp_mask;
|
dma_offset = (unsigned long) pide & ~iovp_mask;
|
||||||
startsg->dma_address = 0;
|
startsg->dma_address = 0;
|
||||||
dma_sg++;
|
dma_sg = sg_next(dma_sg);
|
||||||
dma_sg->dma_address = pide | ioc->ibase;
|
dma_sg->dma_address = pide | ioc->ibase;
|
||||||
pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
|
pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
|
||||||
n_mappings++;
|
n_mappings++;
|
||||||
|
@ -1228,7 +1228,7 @@ sba_fill_pdir(
|
||||||
pdirp++;
|
pdirp++;
|
||||||
} while (cnt > 0);
|
} while (cnt > 0);
|
||||||
}
|
}
|
||||||
startsg++;
|
startsg = sg_next(startsg);
|
||||||
}
|
}
|
||||||
/* force pdir update */
|
/* force pdir update */
|
||||||
wmb();
|
wmb();
|
||||||
|
@ -1297,7 +1297,7 @@ sba_coalesce_chunks( struct ioc *ioc,
|
||||||
while (--nents > 0) {
|
while (--nents > 0) {
|
||||||
unsigned long vaddr; /* tmp */
|
unsigned long vaddr; /* tmp */
|
||||||
|
|
||||||
startsg++;
|
startsg = sg_next(startsg);
|
||||||
|
|
||||||
/* PARANOID */
|
/* PARANOID */
|
||||||
startsg->dma_address = startsg->dma_length = 0;
|
startsg->dma_address = startsg->dma_length = 0;
|
||||||
|
@ -1407,7 +1407,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
|
||||||
#ifdef ALLOW_IOV_BYPASS_SG
|
#ifdef ALLOW_IOV_BYPASS_SG
|
||||||
ASSERT(to_pci_dev(dev)->dma_mask);
|
ASSERT(to_pci_dev(dev)->dma_mask);
|
||||||
if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
|
if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
|
||||||
for (sg = sglist ; filled < nents ; filled++, sg++){
|
for_each_sg(sglist, sg, nents, filled) {
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
sg->dma_address = virt_to_phys(sba_sg_address(sg));
|
sg->dma_address = virt_to_phys(sba_sg_address(sg));
|
||||||
}
|
}
|
||||||
|
@ -1501,7 +1501,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
|
||||||
while (nents && sglist->dma_length) {
|
while (nents && sglist->dma_length) {
|
||||||
|
|
||||||
sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
|
sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
|
||||||
sglist++;
|
sglist = sg_next(sglist);
|
||||||
nents--;
|
nents--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -360,6 +360,7 @@ static struct scsi_host_template driver_template = {
|
||||||
.max_sectors = 1024,
|
.max_sectors = 1024,
|
||||||
.cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN,
|
.cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN,
|
||||||
.use_clustering = DISABLE_CLUSTERING,
|
.use_clustering = DISABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init
|
static int __init
|
||||||
|
|
|
@ -218,16 +218,17 @@ EXPORT_SYMBOL(sn_dma_unmap_single);
|
||||||
*
|
*
|
||||||
* Unmap a set of streaming mode DMA translations.
|
* Unmap a set of streaming mode DMA translations.
|
||||||
*/
|
*/
|
||||||
void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
int nhwentries, int direction)
|
int nhwentries, int direction)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct pci_dev *pdev = to_pci_dev(dev);
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
|
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
|
||||||
|
struct scatterlist *sg;
|
||||||
|
|
||||||
BUG_ON(dev->bus != &pci_bus_type);
|
BUG_ON(dev->bus != &pci_bus_type);
|
||||||
|
|
||||||
for (i = 0; i < nhwentries; i++, sg++) {
|
for_each_sg(sgl, sg, nhwentries, i) {
|
||||||
provider->dma_unmap(pdev, sg->dma_address, direction);
|
provider->dma_unmap(pdev, sg->dma_address, direction);
|
||||||
sg->dma_address = (dma_addr_t) NULL;
|
sg->dma_address = (dma_addr_t) NULL;
|
||||||
sg->dma_length = 0;
|
sg->dma_length = 0;
|
||||||
|
@ -244,11 +245,11 @@ EXPORT_SYMBOL(sn_dma_unmap_sg);
|
||||||
*
|
*
|
||||||
* Maps each entry of @sg for DMA.
|
* Maps each entry of @sg for DMA.
|
||||||
*/
|
*/
|
||||||
int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
|
||||||
int direction)
|
int direction)
|
||||||
{
|
{
|
||||||
unsigned long phys_addr;
|
unsigned long phys_addr;
|
||||||
struct scatterlist *saved_sg = sg;
|
struct scatterlist *saved_sg = sgl, *sg;
|
||||||
struct pci_dev *pdev = to_pci_dev(dev);
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
|
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
|
||||||
int i;
|
int i;
|
||||||
|
@ -258,7 +259,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
||||||
/*
|
/*
|
||||||
* Setup a DMA address for each entry in the scatterlist.
|
* Setup a DMA address for each entry in the scatterlist.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < nhwentries; i++, sg++) {
|
for_each_sg(sgl, sg, nhwentries, i) {
|
||||||
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
|
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
|
||||||
sg->dma_address = provider->dma_map(pdev,
|
sg->dma_address = provider->dma_map(pdev,
|
||||||
phys_addr, sg->length,
|
phys_addr, sg->length,
|
||||||
|
|
|
@ -154,12 +154,13 @@ static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
|
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
int nents, enum dma_data_direction direction)
|
int nents, enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nents; i++, sg++) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
|
sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
|
||||||
dma_direct_offset;
|
dma_direct_offset;
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
|
|
|
@ -87,15 +87,16 @@ static void ibmebus_unmap_single(struct device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ibmebus_map_sg(struct device *dev,
|
static int ibmebus_map_sg(struct device *dev,
|
||||||
struct scatterlist *sg,
|
struct scatterlist *sgl,
|
||||||
int nents, enum dma_data_direction direction)
|
int nents, enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nents; i++) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
sg[i].dma_address = (dma_addr_t)page_address(sg[i].page)
|
sg->dma_address = (dma_addr_t)page_address(sg->page)
|
||||||
+ sg[i].offset;
|
+ sg->offset;
|
||||||
sg[i].dma_length = sg[i].length;
|
sg->dma_length = sg->length;
|
||||||
}
|
}
|
||||||
|
|
||||||
return nents;
|
return nents;
|
||||||
|
|
|
@ -277,7 +277,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
dma_addr_t dma_next = 0, dma_addr;
|
dma_addr_t dma_next = 0, dma_addr;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct scatterlist *s, *outs, *segstart;
|
struct scatterlist *s, *outs, *segstart;
|
||||||
int outcount, incount;
|
int outcount, incount, i;
|
||||||
unsigned long handle;
|
unsigned long handle;
|
||||||
|
|
||||||
BUG_ON(direction == DMA_NONE);
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
@ -297,7 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
|
|
||||||
spin_lock_irqsave(&(tbl->it_lock), flags);
|
spin_lock_irqsave(&(tbl->it_lock), flags);
|
||||||
|
|
||||||
for (s = outs; nelems; nelems--, s++) {
|
for_each_sg(sglist, s, nelems, i) {
|
||||||
unsigned long vaddr, npages, entry, slen;
|
unsigned long vaddr, npages, entry, slen;
|
||||||
|
|
||||||
slen = s->length;
|
slen = s->length;
|
||||||
|
@ -341,7 +341,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
if (novmerge || (dma_addr != dma_next)) {
|
if (novmerge || (dma_addr != dma_next)) {
|
||||||
/* Can't merge: create a new segment */
|
/* Can't merge: create a new segment */
|
||||||
segstart = s;
|
segstart = s;
|
||||||
outcount++; outs++;
|
outcount++;
|
||||||
|
outs = sg_next(outs);
|
||||||
DBG(" can't merge, new segment.\n");
|
DBG(" can't merge, new segment.\n");
|
||||||
} else {
|
} else {
|
||||||
outs->dma_length += s->length;
|
outs->dma_length += s->length;
|
||||||
|
@ -374,7 +375,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
* next entry of the sglist if we didn't fill the list completely
|
* next entry of the sglist if we didn't fill the list completely
|
||||||
*/
|
*/
|
||||||
if (outcount < incount) {
|
if (outcount < incount) {
|
||||||
outs++;
|
outs = sg_next(outs);
|
||||||
outs->dma_address = DMA_ERROR_CODE;
|
outs->dma_address = DMA_ERROR_CODE;
|
||||||
outs->dma_length = 0;
|
outs->dma_length = 0;
|
||||||
}
|
}
|
||||||
|
@ -385,7 +386,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
return outcount;
|
return outcount;
|
||||||
|
|
||||||
failure:
|
failure:
|
||||||
for (s = &sglist[0]; s <= outs; s++) {
|
for_each_sg(sglist, s, nelems, i) {
|
||||||
if (s->dma_length != 0) {
|
if (s->dma_length != 0) {
|
||||||
unsigned long vaddr, npages;
|
unsigned long vaddr, npages;
|
||||||
|
|
||||||
|
@ -395,6 +396,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
s->dma_address = DMA_ERROR_CODE;
|
s->dma_address = DMA_ERROR_CODE;
|
||||||
s->dma_length = 0;
|
s->dma_length = 0;
|
||||||
}
|
}
|
||||||
|
if (s == outs)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&(tbl->it_lock), flags);
|
spin_unlock_irqrestore(&(tbl->it_lock), flags);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -404,6 +407,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
int nelems, enum dma_data_direction direction)
|
int nelems, enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
BUG_ON(direction == DMA_NONE);
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
@ -413,15 +417,16 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
|
|
||||||
spin_lock_irqsave(&(tbl->it_lock), flags);
|
spin_lock_irqsave(&(tbl->it_lock), flags);
|
||||||
|
|
||||||
|
sg = sglist;
|
||||||
while (nelems--) {
|
while (nelems--) {
|
||||||
unsigned int npages;
|
unsigned int npages;
|
||||||
dma_addr_t dma_handle = sglist->dma_address;
|
dma_addr_t dma_handle = sg->dma_address;
|
||||||
|
|
||||||
if (sglist->dma_length == 0)
|
if (sg->dma_length == 0)
|
||||||
break;
|
break;
|
||||||
npages = iommu_num_pages(dma_handle,sglist->dma_length);
|
npages = iommu_num_pages(dma_handle, sg->dma_length);
|
||||||
__iommu_free(tbl, dma_handle, npages);
|
__iommu_free(tbl, dma_handle, npages);
|
||||||
sglist++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
|
/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
|
||||||
|
|
|
@ -616,17 +616,18 @@ static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sg, int nents,
|
static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
|
||||||
enum dma_data_direction direction)
|
int nents, enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_PS3_DYNAMIC_DMA)
|
#if defined(CONFIG_PS3_DYNAMIC_DMA)
|
||||||
BUG_ON("do");
|
BUG_ON("do");
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
#else
|
#else
|
||||||
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
|
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
|
||||||
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nents; i++, sg++) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
int result = ps3_dma_map(dev->d_region,
|
int result = ps3_dma_map(dev->d_region,
|
||||||
page_to_phys(sg->page) + sg->offset, sg->length,
|
page_to_phys(sg->page) + sg->offset, sg->length,
|
||||||
&sg->dma_address, 0);
|
&sg->dma_address, 0);
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/pci.h> /* struct pci_dev */
|
#include <linux/pci.h> /* struct pci_dev */
|
||||||
#include <linux/proc_fs.h>
|
#include <linux/proc_fs.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/vaddrs.h>
|
#include <asm/vaddrs.h>
|
||||||
|
@ -717,19 +718,19 @@ void pci_unmap_page(struct pci_dev *hwdev,
|
||||||
* Device ownership issues as mentioned above for pci_map_single are
|
* Device ownership issues as mentioned above for pci_map_single are
|
||||||
* the same here.
|
* the same here.
|
||||||
*/
|
*/
|
||||||
int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
|
int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
||||||
int direction)
|
int direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
BUG_ON(direction == PCI_DMA_NONE);
|
BUG_ON(direction == PCI_DMA_NONE);
|
||||||
/* IIep is write-through, not flushing. */
|
/* IIep is write-through, not flushing. */
|
||||||
for (n = 0; n < nents; n++) {
|
for_each_sg(sgl, sg, nents, n) {
|
||||||
BUG_ON(page_address(sg->page) == NULL);
|
BUG_ON(page_address(sg->page) == NULL);
|
||||||
sg->dvma_address =
|
sg->dvma_address =
|
||||||
virt_to_phys(page_address(sg->page)) + sg->offset;
|
virt_to_phys(page_address(sg->page)) + sg->offset;
|
||||||
sg->dvma_length = sg->length;
|
sg->dvma_length = sg->length;
|
||||||
sg++;
|
|
||||||
}
|
}
|
||||||
return nents;
|
return nents;
|
||||||
}
|
}
|
||||||
|
@ -738,19 +739,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
|
||||||
* Again, cpu read rules concerning calls here are the same as for
|
* Again, cpu read rules concerning calls here are the same as for
|
||||||
* pci_unmap_single() above.
|
* pci_unmap_single() above.
|
||||||
*/
|
*/
|
||||||
void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
|
void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
||||||
int direction)
|
int direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
BUG_ON(direction == PCI_DMA_NONE);
|
BUG_ON(direction == PCI_DMA_NONE);
|
||||||
if (direction != PCI_DMA_TODEVICE) {
|
if (direction != PCI_DMA_TODEVICE) {
|
||||||
for (n = 0; n < nents; n++) {
|
for_each_sg(sgl, sg, nents, n) {
|
||||||
BUG_ON(page_address(sg->page) == NULL);
|
BUG_ON(page_address(sg->page) == NULL);
|
||||||
mmu_inval_dma_area(
|
mmu_inval_dma_area(
|
||||||
(unsigned long) page_address(sg->page),
|
(unsigned long) page_address(sg->page),
|
||||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||||
sg++;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -789,34 +790,34 @@ void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t
|
||||||
* The same as pci_dma_sync_single_* but for a scatter-gather list,
|
* The same as pci_dma_sync_single_* but for a scatter-gather list,
|
||||||
* same rules and usage.
|
* same rules and usage.
|
||||||
*/
|
*/
|
||||||
void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
|
void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
BUG_ON(direction == PCI_DMA_NONE);
|
BUG_ON(direction == PCI_DMA_NONE);
|
||||||
if (direction != PCI_DMA_TODEVICE) {
|
if (direction != PCI_DMA_TODEVICE) {
|
||||||
for (n = 0; n < nents; n++) {
|
for_each_sg(sgl, sg, nents, n) {
|
||||||
BUG_ON(page_address(sg->page) == NULL);
|
BUG_ON(page_address(sg->page) == NULL);
|
||||||
mmu_inval_dma_area(
|
mmu_inval_dma_area(
|
||||||
(unsigned long) page_address(sg->page),
|
(unsigned long) page_address(sg->page),
|
||||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||||
sg++;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
|
void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
BUG_ON(direction == PCI_DMA_NONE);
|
BUG_ON(direction == PCI_DMA_NONE);
|
||||||
if (direction != PCI_DMA_TODEVICE) {
|
if (direction != PCI_DMA_TODEVICE) {
|
||||||
for (n = 0; n < nents; n++) {
|
for_each_sg(sgl, sg, nents, n) {
|
||||||
BUG_ON(page_address(sg->page) == NULL);
|
BUG_ON(page_address(sg->page) == NULL);
|
||||||
mmu_inval_dma_area(
|
mmu_inval_dma_area(
|
||||||
(unsigned long) page_address(sg->page),
|
(unsigned long) page_address(sg->page),
|
||||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||||
sg++;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <asm/scatterlist.h>
|
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/sbus.h>
|
#include <asm/sbus.h>
|
||||||
|
@ -144,8 +144,9 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus
|
||||||
spin_lock_irqsave(&iounit->lock, flags);
|
spin_lock_irqsave(&iounit->lock, flags);
|
||||||
while (sz != 0) {
|
while (sz != 0) {
|
||||||
--sz;
|
--sz;
|
||||||
sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
|
sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length);
|
||||||
sg[sz].dvma_length = sg[sz].length;
|
sg->dvma_length = sg->length;
|
||||||
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -173,11 +174,12 @@ static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_
|
||||||
spin_lock_irqsave(&iounit->lock, flags);
|
spin_lock_irqsave(&iounit->lock, flags);
|
||||||
while (sz != 0) {
|
while (sz != 0) {
|
||||||
--sz;
|
--sz;
|
||||||
len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||||
vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
|
vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
|
||||||
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
|
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
|
||||||
for (len += vaddr; vaddr < len; vaddr++)
|
for (len += vaddr; vaddr < len; vaddr++)
|
||||||
clear_bit(vaddr, iounit->bmap);
|
clear_bit(vaddr, iounit->bmap);
|
||||||
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,8 +12,8 @@
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <asm/scatterlist.h>
|
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/sbus.h>
|
#include <asm/sbus.h>
|
||||||
|
@ -240,7 +240,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb
|
||||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||||
sg->dvma_length = (__u32) sg->length;
|
sg->dvma_length = (__u32) sg->length;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,7 +254,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu
|
||||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||||
sg->dvma_length = (__u32) sg->length;
|
sg->dvma_length = (__u32) sg->length;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,7 +285,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
|
||||||
|
|
||||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||||
sg->dvma_length = (__u32) sg->length;
|
sg->dvma_length = (__u32) sg->length;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b
|
||||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||||
iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
|
iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
|
||||||
sg->dvma_address = 0x21212121;
|
sg->dvma_address = 0x21212121;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,8 @@
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <asm/scatterlist.h>
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
@ -1228,8 +1228,9 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *
|
||||||
{
|
{
|
||||||
while (sz != 0) {
|
while (sz != 0) {
|
||||||
--sz;
|
--sz;
|
||||||
sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
|
sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length);
|
||||||
sg[sz].dvma_length = sg[sz].length;
|
sg->dvma_length = sg->length;
|
||||||
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1244,7 +1245,8 @@ static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b
|
||||||
{
|
{
|
||||||
while (sz != 0) {
|
while (sz != 0) {
|
||||||
--sz;
|
--sz;
|
||||||
sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length);
|
sun4c_unlockarea((char *)sg->dvma_address, sg->length);
|
||||||
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
||||||
unsigned long iopte_protection)
|
unsigned long iopte_protection)
|
||||||
{
|
{
|
||||||
struct scatterlist *dma_sg = sg;
|
struct scatterlist *dma_sg = sg;
|
||||||
struct scatterlist *sg_end = sg + nelems;
|
struct scatterlist *sg_end = sg_last(sg, nelems);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nused; i++) {
|
for (i = 0; i < nused; i++) {
|
||||||
|
@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
||||||
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
pteval = iopte_protection | (pteval & IOPTE_PAGE);
|
pteval = iopte_protection | (pteval & IOPTE_PAGE);
|
||||||
|
@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
||||||
}
|
}
|
||||||
|
|
||||||
pteval = (pteval & IOPTE_PAGE) + len;
|
pteval = (pteval & IOPTE_PAGE) + len;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
|
|
||||||
/* Skip over any tail mappings we've fully mapped,
|
/* Skip over any tail mappings we've fully mapped,
|
||||||
* adjusting pteval along the way. Stop when we
|
* adjusting pteval along the way. Stop when we
|
||||||
* detect a page crossing event.
|
* detect a page crossing event.
|
||||||
*/
|
*/
|
||||||
while (sg < sg_end &&
|
while (sg != sg_end &&
|
||||||
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
||||||
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
||||||
((pteval ^
|
((pteval ^
|
||||||
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
||||||
pteval += sg->length;
|
pteval += sg->length;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
||||||
pteval = ~0UL;
|
pteval = ~0UL;
|
||||||
} while (dma_npages != 0);
|
} while (dma_npages != 0);
|
||||||
dma_sg++;
|
dma_sg = sg_next(dma_sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
sgtmp = sglist;
|
sgtmp = sglist;
|
||||||
while (used && sgtmp->dma_length) {
|
while (used && sgtmp->dma_length) {
|
||||||
sgtmp->dma_address += dma_base;
|
sgtmp->dma_address += dma_base;
|
||||||
sgtmp++;
|
sgtmp = sg_next(sgtmp);
|
||||||
used--;
|
used--;
|
||||||
}
|
}
|
||||||
used = nelems - used;
|
used = nelems - used;
|
||||||
|
@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
struct strbuf *strbuf;
|
struct strbuf *strbuf;
|
||||||
iopte_t *base;
|
iopte_t *base;
|
||||||
unsigned long flags, ctx, i, npages;
|
unsigned long flags, ctx, i, npages;
|
||||||
|
struct scatterlist *sg, *sgprv;
|
||||||
u32 bus_addr;
|
u32 bus_addr;
|
||||||
|
|
||||||
if (unlikely(direction == DMA_NONE)) {
|
if (unlikely(direction == DMA_NONE)) {
|
||||||
|
@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
|
|
||||||
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
||||||
|
|
||||||
for (i = 1; i < nelems; i++)
|
sgprv = NULL;
|
||||||
if (sglist[i].dma_length == 0)
|
for_each_sg(sglist, sg, nelems, i) {
|
||||||
|
if (sg->dma_length == 0)
|
||||||
break;
|
break;
|
||||||
i--;
|
sgprv = sg;
|
||||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
|
}
|
||||||
|
|
||||||
|
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
|
||||||
bus_addr) >> IO_PAGE_SHIFT;
|
bus_addr) >> IO_PAGE_SHIFT;
|
||||||
|
|
||||||
base = iommu->page_table +
|
base = iommu->page_table +
|
||||||
|
@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
struct strbuf *strbuf;
|
struct strbuf *strbuf;
|
||||||
unsigned long flags, ctx, npages, i;
|
unsigned long flags, ctx, npages, i;
|
||||||
|
struct scatterlist *sg, *sgprv;
|
||||||
u32 bus_addr;
|
u32 bus_addr;
|
||||||
|
|
||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
|
@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
|
||||||
|
|
||||||
/* Step 2: Kick data out of streaming buffers. */
|
/* Step 2: Kick data out of streaming buffers. */
|
||||||
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
|
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
|
||||||
for(i = 1; i < nelems; i++)
|
sgprv = NULL;
|
||||||
if (!sglist[i].dma_length)
|
for_each_sg(sglist, sg, nelems, i) {
|
||||||
|
if (sg->dma_length == 0)
|
||||||
break;
|
break;
|
||||||
i--;
|
sgprv = sg;
|
||||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
|
}
|
||||||
|
|
||||||
|
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
|
||||||
- bus_addr) >> IO_PAGE_SHIFT;
|
- bus_addr) >> IO_PAGE_SHIFT;
|
||||||
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
|
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/msi.h>
|
#include <linux/msi.h>
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
|
@ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev,
|
||||||
int nused, int nelems, unsigned long prot)
|
int nused, int nelems, unsigned long prot)
|
||||||
{
|
{
|
||||||
struct scatterlist *dma_sg = sg;
|
struct scatterlist *dma_sg = sg;
|
||||||
struct scatterlist *sg_end = sg + nelems;
|
struct scatterlist *sg_end = sg_last(sg, nelems);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev,
|
||||||
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
pteval = (pteval & IOPTE_PAGE);
|
pteval = (pteval & IOPTE_PAGE);
|
||||||
|
@ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
pteval = (pteval & IOPTE_PAGE) + len;
|
pteval = (pteval & IOPTE_PAGE) + len;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
|
|
||||||
/* Skip over any tail mappings we've fully mapped,
|
/* Skip over any tail mappings we've fully mapped,
|
||||||
* adjusting pteval along the way. Stop when we
|
* adjusting pteval along the way. Stop when we
|
||||||
* detect a page crossing event.
|
* detect a page crossing event.
|
||||||
*/
|
*/
|
||||||
while (sg < sg_end &&
|
while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
||||||
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
|
||||||
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
||||||
((pteval ^
|
((pteval ^
|
||||||
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
||||||
pteval += sg->length;
|
pteval += sg->length;
|
||||||
sg++;
|
if (sg == sg_end)
|
||||||
|
break;
|
||||||
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
||||||
pteval = ~0UL;
|
pteval = ~0UL;
|
||||||
} while (dma_npages != 0);
|
} while (dma_npages != 0);
|
||||||
dma_sg++;
|
dma_sg = sg_next(dma_sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(iommu_batch_end() < 0L))
|
if (unlikely(iommu_batch_end() < 0L))
|
||||||
|
@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
sgtmp = sglist;
|
sgtmp = sglist;
|
||||||
while (used && sgtmp->dma_length) {
|
while (used && sgtmp->dma_length) {
|
||||||
sgtmp->dma_address += dma_base;
|
sgtmp->dma_address += dma_base;
|
||||||
sgtmp++;
|
sgtmp = sg_next(sgtmp);
|
||||||
used--;
|
used--;
|
||||||
}
|
}
|
||||||
used = nelems - used;
|
used = nelems - used;
|
||||||
|
@ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
struct pci_pbm_info *pbm;
|
struct pci_pbm_info *pbm;
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
unsigned long flags, i, npages;
|
unsigned long flags, i, npages;
|
||||||
|
struct scatterlist *sg, *sgprv;
|
||||||
long entry;
|
long entry;
|
||||||
u32 devhandle, bus_addr;
|
u32 devhandle, bus_addr;
|
||||||
|
|
||||||
|
@ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
devhandle = pbm->devhandle;
|
devhandle = pbm->devhandle;
|
||||||
|
|
||||||
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
||||||
|
sgprv = NULL;
|
||||||
for (i = 1; i < nelems; i++)
|
for_each_sg(sglist, sg, nelems, i) {
|
||||||
if (sglist[i].dma_length == 0)
|
if (sg->dma_length == 0)
|
||||||
break;
|
break;
|
||||||
i--;
|
|
||||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
|
sgprv = sg;
|
||||||
|
}
|
||||||
|
|
||||||
|
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
|
||||||
bus_addr) >> IO_PAGE_SHIFT;
|
bus_addr) >> IO_PAGE_SHIFT;
|
||||||
|
|
||||||
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
#include <linux/pci_ids.h>
|
#include <linux/pci_ids.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
#include <asm/calgary.h>
|
#include <asm/calgary.h>
|
||||||
#include <asm/tce.h>
|
#include <asm/tce.h>
|
||||||
|
@ -384,31 +385,32 @@ static void calgary_unmap_sg(struct device *dev,
|
||||||
struct scatterlist *sglist, int nelems, int direction)
|
struct scatterlist *sglist, int nelems, int direction)
|
||||||
{
|
{
|
||||||
struct iommu_table *tbl = find_iommu_table(dev);
|
struct iommu_table *tbl = find_iommu_table(dev);
|
||||||
|
struct scatterlist *s;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (!translate_phb(to_pci_dev(dev)))
|
if (!translate_phb(to_pci_dev(dev)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
while (nelems--) {
|
for_each_sg(sglist, s, nelems, i) {
|
||||||
unsigned int npages;
|
unsigned int npages;
|
||||||
dma_addr_t dma = sglist->dma_address;
|
dma_addr_t dma = s->dma_address;
|
||||||
unsigned int dmalen = sglist->dma_length;
|
unsigned int dmalen = s->dma_length;
|
||||||
|
|
||||||
if (dmalen == 0)
|
if (dmalen == 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
npages = num_dma_pages(dma, dmalen);
|
npages = num_dma_pages(dma, dmalen);
|
||||||
iommu_free(tbl, dma, npages);
|
iommu_free(tbl, dma, npages);
|
||||||
sglist++;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int calgary_nontranslate_map_sg(struct device* dev,
|
static int calgary_nontranslate_map_sg(struct device* dev,
|
||||||
struct scatterlist *sg, int nelems, int direction)
|
struct scatterlist *sg, int nelems, int direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *s;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nelems; i++ ) {
|
for_each_sg(sg, s, nelems, i) {
|
||||||
struct scatterlist *s = &sg[i];
|
|
||||||
BUG_ON(!s->page);
|
BUG_ON(!s->page);
|
||||||
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
|
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
|
||||||
s->dma_length = s->length;
|
s->dma_length = s->length;
|
||||||
|
@ -420,6 +422,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
||||||
int nelems, int direction)
|
int nelems, int direction)
|
||||||
{
|
{
|
||||||
struct iommu_table *tbl = find_iommu_table(dev);
|
struct iommu_table *tbl = find_iommu_table(dev);
|
||||||
|
struct scatterlist *s;
|
||||||
unsigned long vaddr;
|
unsigned long vaddr;
|
||||||
unsigned int npages;
|
unsigned int npages;
|
||||||
unsigned long entry;
|
unsigned long entry;
|
||||||
|
@ -428,8 +431,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
||||||
if (!translate_phb(to_pci_dev(dev)))
|
if (!translate_phb(to_pci_dev(dev)))
|
||||||
return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
|
return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
|
||||||
|
|
||||||
for (i = 0; i < nelems; i++ ) {
|
for_each_sg(sg, s, nelems, i) {
|
||||||
struct scatterlist *s = &sg[i];
|
|
||||||
BUG_ON(!s->page);
|
BUG_ON(!s->page);
|
||||||
|
|
||||||
vaddr = (unsigned long)page_address(s->page) + s->offset;
|
vaddr = (unsigned long)page_address(s->page) + s->offset;
|
||||||
|
@ -454,9 +456,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
||||||
return nelems;
|
return nelems;
|
||||||
error:
|
error:
|
||||||
calgary_unmap_sg(dev, sg, nelems, direction);
|
calgary_unmap_sg(dev, sg, nelems, direction);
|
||||||
for (i = 0; i < nelems; i++) {
|
for_each_sg(sg, s, nelems, i) {
|
||||||
sg[i].dma_address = bad_dma_address;
|
sg->dma_address = bad_dma_address;
|
||||||
sg[i].dma_length = 0;
|
sg->dma_length = 0;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/kdebug.h>
|
#include <linux/kdebug.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/mtrr.h>
|
#include <asm/mtrr.h>
|
||||||
|
@ -278,10 +279,10 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||||
*/
|
*/
|
||||||
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *s;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nents; i++) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
struct scatterlist *s = &sg[i];
|
|
||||||
if (!s->dma_length || !s->length)
|
if (!s->dma_length || !s->length)
|
||||||
break;
|
break;
|
||||||
gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
|
gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
|
||||||
|
@ -292,14 +293,14 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
||||||
int nents, int dir)
|
int nents, int dir)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *s;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_DEBUG
|
#ifdef CONFIG_IOMMU_DEBUG
|
||||||
printk(KERN_DEBUG "dma_map_sg overflow\n");
|
printk(KERN_DEBUG "dma_map_sg overflow\n");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (i = 0; i < nents; i++ ) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
struct scatterlist *s = &sg[i];
|
|
||||||
unsigned long addr = page_to_phys(s->page) + s->offset;
|
unsigned long addr = page_to_phys(s->page) + s->offset;
|
||||||
if (nonforced_iommu(dev, addr, s->length)) {
|
if (nonforced_iommu(dev, addr, s->length)) {
|
||||||
addr = dma_map_area(dev, addr, s->length, dir);
|
addr = dma_map_area(dev, addr, s->length, dir);
|
||||||
|
@ -319,23 +320,23 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map multiple scatterlist entries continuous into the first. */
|
/* Map multiple scatterlist entries continuous into the first. */
|
||||||
static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
|
static int __dma_map_cont(struct scatterlist *start, int nelems,
|
||||||
struct scatterlist *sout, unsigned long pages)
|
struct scatterlist *sout, unsigned long pages)
|
||||||
{
|
{
|
||||||
unsigned long iommu_start = alloc_iommu(pages);
|
unsigned long iommu_start = alloc_iommu(pages);
|
||||||
unsigned long iommu_page = iommu_start;
|
unsigned long iommu_page = iommu_start;
|
||||||
|
struct scatterlist *s;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (iommu_start == -1)
|
if (iommu_start == -1)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
for (i = start; i < stopat; i++) {
|
for_each_sg(start, s, nelems, i) {
|
||||||
struct scatterlist *s = &sg[i];
|
|
||||||
unsigned long pages, addr;
|
unsigned long pages, addr;
|
||||||
unsigned long phys_addr = s->dma_address;
|
unsigned long phys_addr = s->dma_address;
|
||||||
|
|
||||||
BUG_ON(i > start && s->offset);
|
BUG_ON(s != start && s->offset);
|
||||||
if (i == start) {
|
if (s == start) {
|
||||||
*sout = *s;
|
*sout = *s;
|
||||||
sout->dma_address = iommu_bus_base;
|
sout->dma_address = iommu_bus_base;
|
||||||
sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
|
sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
|
||||||
|
@ -357,17 +358,17 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
|
static inline int dma_map_cont(struct scatterlist *start, int nelems,
|
||||||
struct scatterlist *sout,
|
struct scatterlist *sout,
|
||||||
unsigned long pages, int need)
|
unsigned long pages, int need)
|
||||||
{
|
{
|
||||||
if (!need) {
|
if (!need) {
|
||||||
BUG_ON(stopat - start != 1);
|
BUG_ON(nelems != 1);
|
||||||
*sout = sg[start];
|
*sout = *start;
|
||||||
sout->dma_length = sg[start].length;
|
sout->dma_length = start->length;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return __dma_map_cont(sg, start, stopat, sout, pages);
|
return __dma_map_cont(start, nelems, sout, pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -381,6 +382,7 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
||||||
int start;
|
int start;
|
||||||
unsigned long pages = 0;
|
unsigned long pages = 0;
|
||||||
int need = 0, nextneed;
|
int need = 0, nextneed;
|
||||||
|
struct scatterlist *s, *ps, *start_sg, *sgmap;
|
||||||
|
|
||||||
if (nents == 0)
|
if (nents == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -390,8 +392,9 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
||||||
|
|
||||||
out = 0;
|
out = 0;
|
||||||
start = 0;
|
start = 0;
|
||||||
for (i = 0; i < nents; i++) {
|
start_sg = sgmap = sg;
|
||||||
struct scatterlist *s = &sg[i];
|
ps = NULL; /* shut up gcc */
|
||||||
|
for_each_sg(sg, s, nents, i) {
|
||||||
dma_addr_t addr = page_to_phys(s->page) + s->offset;
|
dma_addr_t addr = page_to_phys(s->page) + s->offset;
|
||||||
s->dma_address = addr;
|
s->dma_address = addr;
|
||||||
BUG_ON(s->length == 0);
|
BUG_ON(s->length == 0);
|
||||||
|
@ -400,29 +403,33 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
||||||
|
|
||||||
/* Handle the previous not yet processed entries */
|
/* Handle the previous not yet processed entries */
|
||||||
if (i > start) {
|
if (i > start) {
|
||||||
struct scatterlist *ps = &sg[i-1];
|
|
||||||
/* Can only merge when the last chunk ends on a page
|
/* Can only merge when the last chunk ends on a page
|
||||||
boundary and the new one doesn't have an offset. */
|
boundary and the new one doesn't have an offset. */
|
||||||
if (!iommu_merge || !nextneed || !need || s->offset ||
|
if (!iommu_merge || !nextneed || !need || s->offset ||
|
||||||
(ps->offset + ps->length) % PAGE_SIZE) {
|
(ps->offset + ps->length) % PAGE_SIZE) {
|
||||||
if (dma_map_cont(sg, start, i, sg+out, pages,
|
if (dma_map_cont(start_sg, i - start, sgmap,
|
||||||
need) < 0)
|
pages, need) < 0)
|
||||||
goto error;
|
goto error;
|
||||||
out++;
|
out++;
|
||||||
|
sgmap = sg_next(sgmap);
|
||||||
pages = 0;
|
pages = 0;
|
||||||
start = i;
|
start = i;
|
||||||
|
start_sg = s;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
need = nextneed;
|
need = nextneed;
|
||||||
pages += to_pages(s->offset, s->length);
|
pages += to_pages(s->offset, s->length);
|
||||||
|
ps = s;
|
||||||
}
|
}
|
||||||
if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
|
if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
|
||||||
goto error;
|
goto error;
|
||||||
out++;
|
out++;
|
||||||
flush_gart();
|
flush_gart();
|
||||||
if (out < nents)
|
if (out < nents) {
|
||||||
sg[out].dma_length = 0;
|
sgmap = sg_next(sgmap);
|
||||||
|
sgmap->dma_length = 0;
|
||||||
|
}
|
||||||
return out;
|
return out;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
@ -437,8 +444,8 @@ error:
|
||||||
if (panic_on_overflow)
|
if (panic_on_overflow)
|
||||||
panic("dma_map_sg: overflow on %lu pages\n", pages);
|
panic("dma_map_sg: overflow on %lu pages\n", pages);
|
||||||
iommu_full(dev, pages << PAGE_SHIFT, dir);
|
iommu_full(dev, pages << PAGE_SHIFT, dir);
|
||||||
for (i = 0; i < nents; i++)
|
for_each_sg(sg, s, nents, i)
|
||||||
sg[i].dma_address = bad_dma_address;
|
s->dma_address = bad_dma_address;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
@ -57,10 +58,10 @@ static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
|
||||||
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||||
int nents, int direction)
|
int nents, int direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *s;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nents; i++ ) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
struct scatterlist *s = &sg[i];
|
|
||||||
BUG_ON(!s->page);
|
BUG_ON(!s->page);
|
||||||
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
|
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
|
||||||
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
|
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/blktrace_api.h>
|
#include <linux/blktrace_api.h>
|
||||||
#include <linux/fault-inject.h>
|
#include <linux/fault-inject.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* for max sense size
|
* for max sense size
|
||||||
|
@ -1318,9 +1319,10 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
|
||||||
* must make sure sg can hold rq->nr_phys_segments entries
|
* must make sure sg can hold rq->nr_phys_segments entries
|
||||||
*/
|
*/
|
||||||
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||||||
struct scatterlist *sg)
|
struct scatterlist *sglist)
|
||||||
{
|
{
|
||||||
struct bio_vec *bvec, *bvprv;
|
struct bio_vec *bvec, *bvprv;
|
||||||
|
struct scatterlist *next_sg, *sg;
|
||||||
struct req_iterator iter;
|
struct req_iterator iter;
|
||||||
int nsegs, cluster;
|
int nsegs, cluster;
|
||||||
|
|
||||||
|
@ -1331,11 +1333,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||||||
* for each bio in rq
|
* for each bio in rq
|
||||||
*/
|
*/
|
||||||
bvprv = NULL;
|
bvprv = NULL;
|
||||||
|
sg = next_sg = &sglist[0];
|
||||||
rq_for_each_segment(bvec, rq, iter) {
|
rq_for_each_segment(bvec, rq, iter) {
|
||||||
int nbytes = bvec->bv_len;
|
int nbytes = bvec->bv_len;
|
||||||
|
|
||||||
if (bvprv && cluster) {
|
if (bvprv && cluster) {
|
||||||
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
|
if (sg->length + nbytes > q->max_segment_size)
|
||||||
goto new_segment;
|
goto new_segment;
|
||||||
|
|
||||||
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
|
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
|
||||||
|
@ -1343,14 +1346,15 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||||||
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
|
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
|
||||||
goto new_segment;
|
goto new_segment;
|
||||||
|
|
||||||
sg[nsegs - 1].length += nbytes;
|
sg->length += nbytes;
|
||||||
} else {
|
} else {
|
||||||
new_segment:
|
new_segment:
|
||||||
memset(&sg[nsegs],0,sizeof(struct scatterlist));
|
sg = next_sg;
|
||||||
sg[nsegs].page = bvec->bv_page;
|
next_sg = sg_next(sg);
|
||||||
sg[nsegs].length = nbytes;
|
|
||||||
sg[nsegs].offset = bvec->bv_offset;
|
|
||||||
|
|
||||||
|
sg->page = bvec->bv_page;
|
||||||
|
sg->length = nbytes;
|
||||||
|
sg->offset = bvec->bv_offset;
|
||||||
nsegs++;
|
nsegs++;
|
||||||
}
|
}
|
||||||
bvprv = bvec;
|
bvprv = bvec;
|
||||||
|
@ -4068,7 +4072,23 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
|
||||||
return queue_var_show(max_hw_sectors_kb, (page));
|
return queue_var_show(max_hw_sectors_kb, (page));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
|
||||||
|
{
|
||||||
|
return queue_var_show(q->max_phys_segments, page);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t queue_max_segments_store(struct request_queue *q,
|
||||||
|
const char *page, size_t count)
|
||||||
|
{
|
||||||
|
unsigned long segments;
|
||||||
|
ssize_t ret = queue_var_store(&segments, page, count);
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
q->max_phys_segments = segments;
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
static struct queue_sysfs_entry queue_requests_entry = {
|
static struct queue_sysfs_entry queue_requests_entry = {
|
||||||
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
|
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
|
||||||
.show = queue_requests_show,
|
.show = queue_requests_show,
|
||||||
|
@ -4092,6 +4112,12 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
|
||||||
.show = queue_max_hw_sectors_show,
|
.show = queue_max_hw_sectors_show,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct queue_sysfs_entry queue_max_segments_entry = {
|
||||||
|
.attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
|
||||||
|
.show = queue_max_segments_show,
|
||||||
|
.store = queue_max_segments_store,
|
||||||
|
};
|
||||||
|
|
||||||
static struct queue_sysfs_entry queue_iosched_entry = {
|
static struct queue_sysfs_entry queue_iosched_entry = {
|
||||||
.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
|
.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
|
||||||
.show = elv_iosched_show,
|
.show = elv_iosched_show,
|
||||||
|
@ -4103,6 +4129,7 @@ static struct attribute *default_attrs[] = {
|
||||||
&queue_ra_entry.attr,
|
&queue_ra_entry.attr,
|
||||||
&queue_max_hw_sectors_entry.attr,
|
&queue_max_hw_sectors_entry.attr,
|
||||||
&queue_max_sectors_entry.attr,
|
&queue_max_sectors_entry.attr,
|
||||||
|
&queue_max_segments_entry.attr,
|
||||||
&queue_iosched_entry.attr,
|
&queue_iosched_entry.attr,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
|
@ -77,7 +77,7 @@ static int update2(struct hash_desc *desc,
|
||||||
|
|
||||||
if (!nbytes)
|
if (!nbytes)
|
||||||
break;
|
break;
|
||||||
sg = sg_next(sg);
|
sg = scatterwalk_sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
|
||||||
walk->offset += PAGE_SIZE - 1;
|
walk->offset += PAGE_SIZE - 1;
|
||||||
walk->offset &= PAGE_MASK;
|
walk->offset &= PAGE_MASK;
|
||||||
if (walk->offset >= walk->sg->offset + walk->sg->length)
|
if (walk->offset >= walk->sg->offset + walk->sg->length)
|
||||||
scatterwalk_start(walk, sg_next(walk->sg));
|
scatterwalk_start(walk, scatterwalk_sg_next(walk->sg));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
static inline struct scatterlist *sg_next(struct scatterlist *sg)
|
static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
|
||||||
{
|
{
|
||||||
return (++sg)->length ? sg : (void *)sg->page;
|
return (++sg)->length ? sg : (void *)sg->page;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1410,7 +1410,7 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
|
||||||
*/
|
*/
|
||||||
unsigned ata_exec_internal_sg(struct ata_device *dev,
|
unsigned ata_exec_internal_sg(struct ata_device *dev,
|
||||||
struct ata_taskfile *tf, const u8 *cdb,
|
struct ata_taskfile *tf, const u8 *cdb,
|
||||||
int dma_dir, struct scatterlist *sg,
|
int dma_dir, struct scatterlist *sgl,
|
||||||
unsigned int n_elem, unsigned long timeout)
|
unsigned int n_elem, unsigned long timeout)
|
||||||
{
|
{
|
||||||
struct ata_link *link = dev->link;
|
struct ata_link *link = dev->link;
|
||||||
|
@ -1472,11 +1472,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
|
||||||
qc->dma_dir = dma_dir;
|
qc->dma_dir = dma_dir;
|
||||||
if (dma_dir != DMA_NONE) {
|
if (dma_dir != DMA_NONE) {
|
||||||
unsigned int i, buflen = 0;
|
unsigned int i, buflen = 0;
|
||||||
|
struct scatterlist *sg;
|
||||||
|
|
||||||
for (i = 0; i < n_elem; i++)
|
for_each_sg(sgl, sg, n_elem, i)
|
||||||
buflen += sg[i].length;
|
buflen += sg->length;
|
||||||
|
|
||||||
ata_sg_init(qc, sg, n_elem);
|
ata_sg_init(qc, sgl, n_elem);
|
||||||
qc->nbytes = buflen;
|
qc->nbytes = buflen;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4292,7 +4293,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
|
||||||
if (qc->n_elem)
|
if (qc->n_elem)
|
||||||
dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
|
dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
|
||||||
/* restore last sg */
|
/* restore last sg */
|
||||||
sg[qc->orig_n_elem - 1].length += qc->pad_len;
|
sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
|
||||||
if (pad_buf) {
|
if (pad_buf) {
|
||||||
struct scatterlist *psg = &qc->pad_sgent;
|
struct scatterlist *psg = &qc->pad_sgent;
|
||||||
void *addr = kmap_atomic(psg->page, KM_IRQ0);
|
void *addr = kmap_atomic(psg->page, KM_IRQ0);
|
||||||
|
@ -4547,6 +4548,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
|
||||||
qc->orig_n_elem = 1;
|
qc->orig_n_elem = 1;
|
||||||
qc->buf_virt = buf;
|
qc->buf_virt = buf;
|
||||||
qc->nbytes = buflen;
|
qc->nbytes = buflen;
|
||||||
|
qc->cursg = qc->__sg;
|
||||||
|
|
||||||
sg_init_one(&qc->sgent, buf, buflen);
|
sg_init_one(&qc->sgent, buf, buflen);
|
||||||
}
|
}
|
||||||
|
@ -4572,6 +4574,7 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
|
||||||
qc->__sg = sg;
|
qc->__sg = sg;
|
||||||
qc->n_elem = n_elem;
|
qc->n_elem = n_elem;
|
||||||
qc->orig_n_elem = n_elem;
|
qc->orig_n_elem = n_elem;
|
||||||
|
qc->cursg = qc->__sg;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -4661,7 +4664,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
|
||||||
{
|
{
|
||||||
struct ata_port *ap = qc->ap;
|
struct ata_port *ap = qc->ap;
|
||||||
struct scatterlist *sg = qc->__sg;
|
struct scatterlist *sg = qc->__sg;
|
||||||
struct scatterlist *lsg = &sg[qc->n_elem - 1];
|
struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
|
||||||
int n_elem, pre_n_elem, dir, trim_sg = 0;
|
int n_elem, pre_n_elem, dir, trim_sg = 0;
|
||||||
|
|
||||||
VPRINTK("ENTER, ata%u\n", ap->print_id);
|
VPRINTK("ENTER, ata%u\n", ap->print_id);
|
||||||
|
@ -4825,7 +4828,6 @@ void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
|
||||||
static void ata_pio_sector(struct ata_queued_cmd *qc)
|
static void ata_pio_sector(struct ata_queued_cmd *qc)
|
||||||
{
|
{
|
||||||
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
|
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
|
||||||
struct scatterlist *sg = qc->__sg;
|
|
||||||
struct ata_port *ap = qc->ap;
|
struct ata_port *ap = qc->ap;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned int offset;
|
unsigned int offset;
|
||||||
|
@ -4834,8 +4836,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
||||||
if (qc->curbytes == qc->nbytes - qc->sect_size)
|
if (qc->curbytes == qc->nbytes - qc->sect_size)
|
||||||
ap->hsm_task_state = HSM_ST_LAST;
|
ap->hsm_task_state = HSM_ST_LAST;
|
||||||
|
|
||||||
page = sg[qc->cursg].page;
|
page = qc->cursg->page;
|
||||||
offset = sg[qc->cursg].offset + qc->cursg_ofs;
|
offset = qc->cursg->offset + qc->cursg_ofs;
|
||||||
|
|
||||||
/* get the current page and offset */
|
/* get the current page and offset */
|
||||||
page = nth_page(page, (offset >> PAGE_SHIFT));
|
page = nth_page(page, (offset >> PAGE_SHIFT));
|
||||||
|
@ -4863,8 +4865,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
||||||
qc->curbytes += qc->sect_size;
|
qc->curbytes += qc->sect_size;
|
||||||
qc->cursg_ofs += qc->sect_size;
|
qc->cursg_ofs += qc->sect_size;
|
||||||
|
|
||||||
if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
|
if (qc->cursg_ofs == qc->cursg->length) {
|
||||||
qc->cursg++;
|
qc->cursg = sg_next(qc->cursg);
|
||||||
qc->cursg_ofs = 0;
|
qc->cursg_ofs = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4950,16 +4952,18 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
|
||||||
{
|
{
|
||||||
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
|
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
|
||||||
struct scatterlist *sg = qc->__sg;
|
struct scatterlist *sg = qc->__sg;
|
||||||
|
struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
|
||||||
struct ata_port *ap = qc->ap;
|
struct ata_port *ap = qc->ap;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned char *buf;
|
unsigned char *buf;
|
||||||
unsigned int offset, count;
|
unsigned int offset, count;
|
||||||
|
int no_more_sg = 0;
|
||||||
|
|
||||||
if (qc->curbytes + bytes >= qc->nbytes)
|
if (qc->curbytes + bytes >= qc->nbytes)
|
||||||
ap->hsm_task_state = HSM_ST_LAST;
|
ap->hsm_task_state = HSM_ST_LAST;
|
||||||
|
|
||||||
next_sg:
|
next_sg:
|
||||||
if (unlikely(qc->cursg >= qc->n_elem)) {
|
if (unlikely(no_more_sg)) {
|
||||||
/*
|
/*
|
||||||
* The end of qc->sg is reached and the device expects
|
* The end of qc->sg is reached and the device expects
|
||||||
* more data to transfer. In order not to overrun qc->sg
|
* more data to transfer. In order not to overrun qc->sg
|
||||||
|
@ -4982,7 +4986,7 @@ next_sg:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
sg = &qc->__sg[qc->cursg];
|
sg = qc->cursg;
|
||||||
|
|
||||||
page = sg->page;
|
page = sg->page;
|
||||||
offset = sg->offset + qc->cursg_ofs;
|
offset = sg->offset + qc->cursg_ofs;
|
||||||
|
@ -5021,7 +5025,10 @@ next_sg:
|
||||||
qc->cursg_ofs += count;
|
qc->cursg_ofs += count;
|
||||||
|
|
||||||
if (qc->cursg_ofs == sg->length) {
|
if (qc->cursg_ofs == sg->length) {
|
||||||
qc->cursg++;
|
if (qc->cursg == lsg)
|
||||||
|
no_more_sg = 1;
|
||||||
|
|
||||||
|
qc->cursg = sg_next(qc->cursg);
|
||||||
qc->cursg_ofs = 0;
|
qc->cursg_ofs = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -801,8 +801,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
|
||||||
|
|
||||||
ata_scsi_sdev_config(sdev);
|
ata_scsi_sdev_config(sdev);
|
||||||
|
|
||||||
blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
|
|
||||||
|
|
||||||
sdev->manage_start_stop = 1;
|
sdev->manage_start_stop = 1;
|
||||||
|
|
||||||
if (dev)
|
if (dev)
|
||||||
|
|
|
@ -2569,6 +2569,7 @@ static void do_cciss_request(struct request_queue *q)
|
||||||
(int)creq->nr_sectors);
|
(int)creq->nr_sectors);
|
||||||
#endif /* CCISS_DEBUG */
|
#endif /* CCISS_DEBUG */
|
||||||
|
|
||||||
|
memset(tmp_sg, 0, sizeof(tmp_sg));
|
||||||
seg = blk_rq_map_sg(q, creq, tmp_sg);
|
seg = blk_rq_map_sg(q, creq, tmp_sg);
|
||||||
|
|
||||||
/* get the DMA records for the setup */
|
/* get the DMA records for the setup */
|
||||||
|
|
|
@ -939,7 +939,8 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
|
||||||
/* group sequential buffers into one large buffer */
|
/* group sequential buffers into one large buffer */
|
||||||
addr = page_to_phys(sg->page) + sg->offset;
|
addr = page_to_phys(sg->page) + sg->offset;
|
||||||
size = sg_dma_len(sg);
|
size = sg_dma_len(sg);
|
||||||
while (sg++, --i) {
|
while (--i) {
|
||||||
|
sg = sg_next(sg);
|
||||||
if ((addr + size) != page_to_phys(sg->page) + sg->offset)
|
if ((addr + size) != page_to_phys(sg->page) + sg->offset)
|
||||||
break;
|
break;
|
||||||
size += sg_dma_len(sg);
|
size += sg_dma_len(sg);
|
||||||
|
|
|
@ -280,7 +280,7 @@ int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
i--;
|
i--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -846,7 +846,8 @@ void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
|
||||||
ide_hwif_t *hwif = drive->hwif;
|
ide_hwif_t *hwif = drive->hwif;
|
||||||
|
|
||||||
hwif->nsect = hwif->nleft = rq->nr_sectors;
|
hwif->nsect = hwif->nleft = rq->nr_sectors;
|
||||||
hwif->cursg = hwif->cursg_ofs = 0;
|
hwif->cursg_ofs = 0;
|
||||||
|
hwif->cursg = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
|
EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
|
||||||
|
|
|
@ -1349,7 +1349,7 @@ static int hwif_init(ide_hwif_t *hwif)
|
||||||
if (!hwif->sg_max_nents)
|
if (!hwif->sg_max_nents)
|
||||||
hwif->sg_max_nents = PRD_ENTRIES;
|
hwif->sg_max_nents = PRD_ENTRIES;
|
||||||
|
|
||||||
hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
|
hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!hwif->sg_table) {
|
if (!hwif->sg_table) {
|
||||||
printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
|
printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
|
||||||
|
|
|
@ -45,6 +45,7 @@
|
||||||
#include <linux/hdreg.h>
|
#include <linux/hdreg.h>
|
||||||
#include <linux/ide.h>
|
#include <linux/ide.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
|
@ -263,6 +264,7 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
|
||||||
{
|
{
|
||||||
ide_hwif_t *hwif = drive->hwif;
|
ide_hwif_t *hwif = drive->hwif;
|
||||||
struct scatterlist *sg = hwif->sg_table;
|
struct scatterlist *sg = hwif->sg_table;
|
||||||
|
struct scatterlist *cursg = hwif->cursg;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -270,8 +272,14 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
|
||||||
unsigned int offset;
|
unsigned int offset;
|
||||||
u8 *buf;
|
u8 *buf;
|
||||||
|
|
||||||
page = sg[hwif->cursg].page;
|
cursg = hwif->cursg;
|
||||||
offset = sg[hwif->cursg].offset + hwif->cursg_ofs * SECTOR_SIZE;
|
if (!cursg) {
|
||||||
|
cursg = sg;
|
||||||
|
hwif->cursg = sg;
|
||||||
|
}
|
||||||
|
|
||||||
|
page = cursg->page;
|
||||||
|
offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
|
||||||
|
|
||||||
/* get the current page and offset */
|
/* get the current page and offset */
|
||||||
page = nth_page(page, (offset >> PAGE_SHIFT));
|
page = nth_page(page, (offset >> PAGE_SHIFT));
|
||||||
|
@ -285,8 +293,8 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
|
||||||
hwif->nleft--;
|
hwif->nleft--;
|
||||||
hwif->cursg_ofs++;
|
hwif->cursg_ofs++;
|
||||||
|
|
||||||
if ((hwif->cursg_ofs * SECTOR_SIZE) == sg[hwif->cursg].length) {
|
if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
|
||||||
hwif->cursg++;
|
hwif->cursg = sg_next(hwif->cursg);
|
||||||
hwif->cursg_ofs = 0;
|
hwif->cursg_ofs = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,6 +375,8 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
|
||||||
|
|
||||||
static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
|
static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
|
||||||
{
|
{
|
||||||
|
HWIF(drive)->cursg = NULL;
|
||||||
|
|
||||||
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
|
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
|
||||||
ide_task_t *task = rq->special;
|
ide_task_t *task = rq->special;
|
||||||
|
|
||||||
|
|
|
@ -296,7 +296,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
|
||||||
cur_addr += tc;
|
cur_addr += tc;
|
||||||
cur_len -= tc;
|
cur_len -= tc;
|
||||||
}
|
}
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
i--;
|
i--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/ioc4.h>
|
#include <linux/ioc4.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
|
||||||
|
@ -537,7 +538,7 @@ sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
i--;
|
i--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1539,7 +1539,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
|
||||||
cur_len -= tc;
|
cur_len -= tc;
|
||||||
++table;
|
++table;
|
||||||
}
|
}
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
i--;
|
i--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
* SOFTWARE.
|
* SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
#include <rdma/ib_verbs.h>
|
#include <rdma/ib_verbs.h>
|
||||||
|
|
||||||
#include "ipath_verbs.h"
|
#include "ipath_verbs.h"
|
||||||
|
@ -96,17 +97,18 @@ static void ipath_dma_unmap_page(struct ib_device *dev,
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
|
static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
|
||||||
enum dma_data_direction direction)
|
int nents, enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
u64 addr;
|
u64 addr;
|
||||||
int i;
|
int i;
|
||||||
int ret = nents;
|
int ret = nents;
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
|
||||||
for (i = 0; i < nents; i++) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
addr = (u64) page_address(sg[i].page);
|
addr = (u64) page_address(sg->page);
|
||||||
/* TODO: handle highmem pages */
|
/* TODO: handle highmem pages */
|
||||||
if (!addr) {
|
if (!addr) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
|
@ -124,17 +124,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
|
||||||
|
|
||||||
if (cmd_dir == ISER_DIR_OUT) {
|
if (cmd_dir == ISER_DIR_OUT) {
|
||||||
/* copy the unaligned sg the buffer which is used for RDMA */
|
/* copy the unaligned sg the buffer which is used for RDMA */
|
||||||
struct scatterlist *sg = (struct scatterlist *)data->buf;
|
struct scatterlist *sgl = (struct scatterlist *)data->buf;
|
||||||
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
char *p, *from;
|
char *p, *from;
|
||||||
|
|
||||||
for (p = mem, i = 0; i < data->size; i++) {
|
p = mem;
|
||||||
from = kmap_atomic(sg[i].page, KM_USER0);
|
for_each_sg(sgl, sg, data->size, i) {
|
||||||
|
from = kmap_atomic(sg->page, KM_USER0);
|
||||||
memcpy(p,
|
memcpy(p,
|
||||||
from + sg[i].offset,
|
from + sg->offset,
|
||||||
sg[i].length);
|
sg->length);
|
||||||
kunmap_atomic(from, KM_USER0);
|
kunmap_atomic(from, KM_USER0);
|
||||||
p += sg[i].length;
|
p += sg->length;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,7 +178,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
|
||||||
|
|
||||||
if (cmd_dir == ISER_DIR_IN) {
|
if (cmd_dir == ISER_DIR_IN) {
|
||||||
char *mem;
|
char *mem;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sgl, *sg;
|
||||||
unsigned char *p, *to;
|
unsigned char *p, *to;
|
||||||
unsigned int sg_size;
|
unsigned int sg_size;
|
||||||
int i;
|
int i;
|
||||||
|
@ -184,16 +186,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
|
||||||
/* copy back read RDMA to unaligned sg */
|
/* copy back read RDMA to unaligned sg */
|
||||||
mem = mem_copy->copy_buf;
|
mem = mem_copy->copy_buf;
|
||||||
|
|
||||||
sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
|
sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
|
||||||
sg_size = iser_ctask->data[ISER_DIR_IN].size;
|
sg_size = iser_ctask->data[ISER_DIR_IN].size;
|
||||||
|
|
||||||
for (p = mem, i = 0; i < sg_size; i++){
|
p = mem;
|
||||||
to = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
|
for_each_sg(sgl, sg, sg_size, i) {
|
||||||
memcpy(to + sg[i].offset,
|
to = kmap_atomic(sg->page, KM_SOFTIRQ0);
|
||||||
|
memcpy(to + sg->offset,
|
||||||
p,
|
p,
|
||||||
sg[i].length);
|
sg->length);
|
||||||
kunmap_atomic(to, KM_SOFTIRQ0);
|
kunmap_atomic(to, KM_SOFTIRQ0);
|
||||||
p += sg[i].length;
|
p += sg->length;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -224,7 +227,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
|
||||||
struct iser_page_vec *page_vec,
|
struct iser_page_vec *page_vec,
|
||||||
struct ib_device *ibdev)
|
struct ib_device *ibdev)
|
||||||
{
|
{
|
||||||
struct scatterlist *sg = (struct scatterlist *)data->buf;
|
struct scatterlist *sgl = (struct scatterlist *)data->buf;
|
||||||
|
struct scatterlist *sg;
|
||||||
u64 first_addr, last_addr, page;
|
u64 first_addr, last_addr, page;
|
||||||
int end_aligned;
|
int end_aligned;
|
||||||
unsigned int cur_page = 0;
|
unsigned int cur_page = 0;
|
||||||
|
@ -232,24 +236,25 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* compute the offset of first element */
|
/* compute the offset of first element */
|
||||||
page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
|
page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
|
||||||
|
|
||||||
for (i = 0; i < data->dma_nents; i++) {
|
for_each_sg(sgl, sg, data->dma_nents, i) {
|
||||||
unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]);
|
unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
|
||||||
|
|
||||||
total_sz += dma_len;
|
total_sz += dma_len;
|
||||||
|
|
||||||
first_addr = ib_sg_dma_address(ibdev, &sg[i]);
|
first_addr = ib_sg_dma_address(ibdev, sg);
|
||||||
last_addr = first_addr + dma_len;
|
last_addr = first_addr + dma_len;
|
||||||
|
|
||||||
end_aligned = !(last_addr & ~MASK_4K);
|
end_aligned = !(last_addr & ~MASK_4K);
|
||||||
|
|
||||||
/* continue to collect page fragments till aligned or SG ends */
|
/* continue to collect page fragments till aligned or SG ends */
|
||||||
while (!end_aligned && (i + 1 < data->dma_nents)) {
|
while (!end_aligned && (i + 1 < data->dma_nents)) {
|
||||||
|
sg = sg_next(sg);
|
||||||
i++;
|
i++;
|
||||||
dma_len = ib_sg_dma_len(ibdev, &sg[i]);
|
dma_len = ib_sg_dma_len(ibdev, sg);
|
||||||
total_sz += dma_len;
|
total_sz += dma_len;
|
||||||
last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len;
|
last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
|
||||||
end_aligned = !(last_addr & ~MASK_4K);
|
end_aligned = !(last_addr & ~MASK_4K);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -284,25 +289,26 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
|
||||||
static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
|
static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
|
||||||
struct ib_device *ibdev)
|
struct ib_device *ibdev)
|
||||||
{
|
{
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sgl, *sg;
|
||||||
u64 end_addr, next_addr;
|
u64 end_addr, next_addr;
|
||||||
int i, cnt;
|
int i, cnt;
|
||||||
unsigned int ret_len = 0;
|
unsigned int ret_len = 0;
|
||||||
|
|
||||||
sg = (struct scatterlist *)data->buf;
|
sgl = (struct scatterlist *)data->buf;
|
||||||
|
|
||||||
for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) {
|
cnt = 0;
|
||||||
|
for_each_sg(sgl, sg, data->dma_nents, i) {
|
||||||
/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
|
/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
|
||||||
"offset: %ld sz: %ld\n", i,
|
"offset: %ld sz: %ld\n", i,
|
||||||
(unsigned long)page_to_phys(sg[i].page),
|
(unsigned long)page_to_phys(sg->page),
|
||||||
(unsigned long)sg[i].offset,
|
(unsigned long)sg->offset,
|
||||||
(unsigned long)sg[i].length); */
|
(unsigned long)sg->length); */
|
||||||
end_addr = ib_sg_dma_address(ibdev, &sg[i]) +
|
end_addr = ib_sg_dma_address(ibdev, sg) +
|
||||||
ib_sg_dma_len(ibdev, &sg[i]);
|
ib_sg_dma_len(ibdev, sg);
|
||||||
/* iser_dbg("Checking sg iobuf end address "
|
/* iser_dbg("Checking sg iobuf end address "
|
||||||
"0x%08lX\n", end_addr); */
|
"0x%08lX\n", end_addr); */
|
||||||
if (i + 1 < data->dma_nents) {
|
if (i + 1 < data->dma_nents) {
|
||||||
next_addr = ib_sg_dma_address(ibdev, &sg[i+1]);
|
next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
|
||||||
/* are i, i+1 fragments of the same page? */
|
/* are i, i+1 fragments of the same page? */
|
||||||
if (end_addr == next_addr)
|
if (end_addr == next_addr)
|
||||||
continue;
|
continue;
|
||||||
|
@ -322,15 +328,16 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
|
||||||
static void iser_data_buf_dump(struct iser_data_buf *data,
|
static void iser_data_buf_dump(struct iser_data_buf *data,
|
||||||
struct ib_device *ibdev)
|
struct ib_device *ibdev)
|
||||||
{
|
{
|
||||||
struct scatterlist *sg = (struct scatterlist *)data->buf;
|
struct scatterlist *sgl = (struct scatterlist *)data->buf;
|
||||||
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < data->dma_nents; i++)
|
for_each_sg(sgl, sg, data->dma_nents, i)
|
||||||
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
|
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
|
||||||
"off:0x%x sz:0x%x dma_len:0x%x\n",
|
"off:0x%x sz:0x%x dma_len:0x%x\n",
|
||||||
i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]),
|
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
|
||||||
sg[i].page, sg[i].offset,
|
sg->page, sg->offset,
|
||||||
sg[i].length, ib_sg_dma_len(ibdev, &sg[i]));
|
sg->length, ib_sg_dma_len(ibdev, sg));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iser_dump_page_vec(struct iser_page_vec *page_vec)
|
static void iser_dump_page_vec(struct iser_page_vec *page_vec)
|
||||||
|
|
|
@ -293,7 +293,7 @@ nextSGEset:
|
||||||
for (ii=0; ii < (numSgeThisFrame-1); ii++) {
|
for (ii=0; ii < (numSgeThisFrame-1); ii++) {
|
||||||
thisxfer = sg_dma_len(sg);
|
thisxfer = sg_dma_len(sg);
|
||||||
if (thisxfer == 0) {
|
if (thisxfer == 0) {
|
||||||
sg ++; /* Get next SG element from the OS */
|
sg = sg_next(sg); /* Get next SG element from the OS */
|
||||||
sg_done++;
|
sg_done++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -301,7 +301,7 @@ nextSGEset:
|
||||||
v2 = sg_dma_address(sg);
|
v2 = sg_dma_address(sg);
|
||||||
mptscsih_add_sge(psge, sgflags | thisxfer, v2);
|
mptscsih_add_sge(psge, sgflags | thisxfer, v2);
|
||||||
|
|
||||||
sg++; /* Get next SG element from the OS */
|
sg = sg_next(sg); /* Get next SG element from the OS */
|
||||||
psge += (sizeof(u32) + sizeof(dma_addr_t));
|
psge += (sizeof(u32) + sizeof(dma_addr_t));
|
||||||
sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
|
sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
|
||||||
sg_done++;
|
sg_done++;
|
||||||
|
@ -322,7 +322,7 @@ nextSGEset:
|
||||||
v2 = sg_dma_address(sg);
|
v2 = sg_dma_address(sg);
|
||||||
mptscsih_add_sge(psge, sgflags | thisxfer, v2);
|
mptscsih_add_sge(psge, sgflags | thisxfer, v2);
|
||||||
/*
|
/*
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
psge += (sizeof(u32) + sizeof(dma_addr_t));
|
psge += (sizeof(u32) + sizeof(dma_addr_t));
|
||||||
*/
|
*/
|
||||||
sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
|
sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
|
||||||
|
|
|
@ -153,14 +153,14 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
|
||||||
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
|
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
|
||||||
blk_queue_max_segment_size(mq->queue, bouncesz);
|
blk_queue_max_segment_size(mq->queue, bouncesz);
|
||||||
|
|
||||||
mq->sg = kmalloc(sizeof(struct scatterlist),
|
mq->sg = kzalloc(sizeof(struct scatterlist),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!mq->sg) {
|
if (!mq->sg) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto cleanup_queue;
|
goto cleanup_queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
|
mq->bounce_sg = kzalloc(sizeof(struct scatterlist) *
|
||||||
bouncesz / 512, GFP_KERNEL);
|
bouncesz / 512, GFP_KERNEL);
|
||||||
if (!mq->bounce_sg) {
|
if (!mq->bounce_sg) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -177,7 +177,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
|
||||||
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
|
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
|
||||||
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
|
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
|
||||||
|
|
||||||
mq->sg = kmalloc(sizeof(struct scatterlist) *
|
mq->sg = kzalloc(sizeof(struct scatterlist) *
|
||||||
host->max_phys_segs, GFP_KERNEL);
|
host->max_phys_segs, GFP_KERNEL);
|
||||||
if (!mq->sg) {
|
if (!mq->sg) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/mempool.h>
|
#include <linux/mempool.h>
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/ioctl.h>
|
#include <linux/ioctl.h>
|
||||||
#include <scsi/scsi.h>
|
#include <scsi/scsi.h>
|
||||||
#include <scsi/scsi_tcq.h>
|
#include <scsi/scsi_tcq.h>
|
||||||
|
|
|
@ -590,7 +590,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
|
zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
|
||||||
struct scatterlist *sg, int sg_count, int max_sbals)
|
struct scatterlist *sgl, int sg_count, int max_sbals)
|
||||||
{
|
{
|
||||||
int sg_index;
|
int sg_index;
|
||||||
struct scatterlist *sg_segment;
|
struct scatterlist *sg_segment;
|
||||||
|
@ -606,9 +606,7 @@ zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
|
||||||
sbale->flags |= sbtype;
|
sbale->flags |= sbtype;
|
||||||
|
|
||||||
/* process all segements of scatter-gather list */
|
/* process all segements of scatter-gather list */
|
||||||
for (sg_index = 0, sg_segment = sg, bytes = 0;
|
for_each_sg(sgl, sg_segment, sg_count, sg_index) {
|
||||||
sg_index < sg_count;
|
|
||||||
sg_index++, sg_segment++) {
|
|
||||||
retval = zfcp_qdio_sbals_from_segment(
|
retval = zfcp_qdio_sbals_from_segment(
|
||||||
fsf_req,
|
fsf_req,
|
||||||
sbtype,
|
sbtype,
|
||||||
|
|
|
@ -1990,6 +1990,7 @@ static struct scsi_host_template driver_template = {
|
||||||
.max_sectors = TW_MAX_SECTORS,
|
.max_sectors = TW_MAX_SECTORS,
|
||||||
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
|
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.shost_attrs = twa_host_attrs,
|
.shost_attrs = twa_host_attrs,
|
||||||
.emulated = 1
|
.emulated = 1
|
||||||
};
|
};
|
||||||
|
|
|
@ -2261,6 +2261,7 @@ static struct scsi_host_template driver_template = {
|
||||||
.max_sectors = TW_MAX_SECTORS,
|
.max_sectors = TW_MAX_SECTORS,
|
||||||
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
|
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.shost_attrs = tw_host_attrs,
|
.shost_attrs = tw_host_attrs,
|
||||||
.emulated = 1
|
.emulated = 1
|
||||||
};
|
};
|
||||||
|
|
|
@ -3575,6 +3575,7 @@ static struct scsi_host_template Bus_Logic_template = {
|
||||||
.unchecked_isa_dma = 1,
|
.unchecked_isa_dma = 1,
|
||||||
.max_sectors = 128,
|
.max_sectors = 128,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1066,7 +1066,8 @@ static struct scsi_host_template driver_template =
|
||||||
.sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/,
|
.sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/,
|
||||||
.cmd_per_lun = 1 /* commands per lun */,
|
.cmd_per_lun = 1 /* commands per lun */,
|
||||||
.unchecked_isa_dma = 1 /* unchecked_isa_dma */,
|
.unchecked_isa_dma = 1 /* unchecked_isa_dma */,
|
||||||
.use_clustering = ENABLE_CLUSTERING
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
#include "scsi_module.c"
|
#include "scsi_module.c"
|
||||||
|
|
|
@ -1071,6 +1071,7 @@ static struct scsi_host_template inia100_template = {
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __devinit inia100_probe_one(struct pci_dev *pdev,
|
static int __devinit inia100_probe_one(struct pci_dev *pdev,
|
||||||
|
|
|
@ -944,6 +944,7 @@ static struct scsi_host_template aac_driver_template = {
|
||||||
.cmd_per_lun = AAC_NUM_IO_FIB,
|
.cmd_per_lun = AAC_NUM_IO_FIB,
|
||||||
#endif
|
#endif
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.emulated = 1,
|
.emulated = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -61,15 +61,15 @@ static void BAD_DMA(void *address, unsigned int length)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
|
static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
|
||||||
struct scatterlist *sgpnt,
|
struct scatterlist *sgp,
|
||||||
int nseg,
|
int nseg,
|
||||||
int badseg)
|
int badseg)
|
||||||
{
|
{
|
||||||
printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
|
printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
|
||||||
badseg, nseg,
|
badseg, nseg,
|
||||||
page_address(sgpnt[badseg].page) + sgpnt[badseg].offset,
|
page_address(sgp->page) + sgp->offset,
|
||||||
(unsigned long long)SCSI_SG_PA(&sgpnt[badseg]),
|
(unsigned long long)SCSI_SG_PA(sgp),
|
||||||
sgpnt[badseg].length);
|
sgp->length);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Not safe to continue.
|
* Not safe to continue.
|
||||||
|
@ -691,7 +691,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
||||||
memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
|
memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
|
||||||
|
|
||||||
if (SCpnt->use_sg) {
|
if (SCpnt->use_sg) {
|
||||||
struct scatterlist *sgpnt;
|
struct scatterlist *sg;
|
||||||
struct chain *cptr;
|
struct chain *cptr;
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
unsigned char *ptr;
|
unsigned char *ptr;
|
||||||
|
@ -699,23 +699,21 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
||||||
int i;
|
int i;
|
||||||
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
|
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
|
||||||
SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA);
|
SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA);
|
||||||
sgpnt = (struct scatterlist *) SCpnt->request_buffer;
|
|
||||||
cptr = (struct chain *) SCpnt->host_scribble;
|
cptr = (struct chain *) SCpnt->host_scribble;
|
||||||
if (cptr == NULL) {
|
if (cptr == NULL) {
|
||||||
/* free the claimed mailbox slot */
|
/* free the claimed mailbox slot */
|
||||||
HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
|
HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
|
||||||
return SCSI_MLQUEUE_HOST_BUSY;
|
return SCSI_MLQUEUE_HOST_BUSY;
|
||||||
}
|
}
|
||||||
for (i = 0; i < SCpnt->use_sg; i++) {
|
scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
|
||||||
if (sgpnt[i].length == 0 || SCpnt->use_sg > 16 ||
|
if (sg->length == 0 || SCpnt->use_sg > 16 ||
|
||||||
(((int) sgpnt[i].offset) & 1) || (sgpnt[i].length & 1)) {
|
(((int) sg->offset) & 1) || (sg->length & 1)) {
|
||||||
unsigned char *ptr;
|
unsigned char *ptr;
|
||||||
printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
|
printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
|
||||||
for (i = 0; i < SCpnt->use_sg; i++) {
|
scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
|
||||||
printk(KERN_CRIT "%d: %p %d\n", i,
|
printk(KERN_CRIT "%d: %p %d\n", i,
|
||||||
(page_address(sgpnt[i].page) +
|
(page_address(sg->page) +
|
||||||
sgpnt[i].offset),
|
sg->offset), sg->length);
|
||||||
sgpnt[i].length);
|
|
||||||
};
|
};
|
||||||
printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
|
printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
|
||||||
ptr = (unsigned char *) &cptr[i];
|
ptr = (unsigned char *) &cptr[i];
|
||||||
|
@ -723,10 +721,10 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
||||||
printk("%02x ", ptr[i]);
|
printk("%02x ", ptr[i]);
|
||||||
panic("Foooooooood fight!");
|
panic("Foooooooood fight!");
|
||||||
};
|
};
|
||||||
any2scsi(cptr[i].dataptr, SCSI_SG_PA(&sgpnt[i]));
|
any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
|
||||||
if (SCSI_SG_PA(&sgpnt[i]) + sgpnt[i].length - 1 > ISA_DMA_THRESHOLD)
|
if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD)
|
||||||
BAD_SG_DMA(SCpnt, sgpnt, SCpnt->use_sg, i);
|
BAD_SG_DMA(SCpnt, sg, SCpnt->use_sg, i);
|
||||||
any2scsi(cptr[i].datalen, sgpnt[i].length);
|
any2scsi(cptr[i].datalen, sg->length);
|
||||||
};
|
};
|
||||||
any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
|
any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
|
||||||
any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
|
any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
|
||||||
|
|
|
@ -563,6 +563,7 @@ static struct scsi_host_template aha1740_template = {
|
||||||
.sg_tablesize = AHA1740_SCATTER,
|
.sg_tablesize = AHA1740_SCATTER,
|
||||||
.cmd_per_lun = AHA1740_CMDLUN,
|
.cmd_per_lun = AHA1740_CMDLUN,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.eh_abort_handler = aha1740_eh_abort_handler,
|
.eh_abort_handler = aha1740_eh_abort_handler,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -766,6 +766,7 @@ struct scsi_host_template aic79xx_driver_template = {
|
||||||
.max_sectors = 8192,
|
.max_sectors = 8192,
|
||||||
.cmd_per_lun = 2,
|
.cmd_per_lun = 2,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.slave_alloc = ahd_linux_slave_alloc,
|
.slave_alloc = ahd_linux_slave_alloc,
|
||||||
.slave_configure = ahd_linux_slave_configure,
|
.slave_configure = ahd_linux_slave_configure,
|
||||||
.target_alloc = ahd_linux_target_alloc,
|
.target_alloc = ahd_linux_target_alloc,
|
||||||
|
|
|
@ -747,6 +747,7 @@ struct scsi_host_template aic7xxx_driver_template = {
|
||||||
.max_sectors = 8192,
|
.max_sectors = 8192,
|
||||||
.cmd_per_lun = 2,
|
.cmd_per_lun = 2,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.slave_alloc = ahc_linux_slave_alloc,
|
.slave_alloc = ahc_linux_slave_alloc,
|
||||||
.slave_configure = ahc_linux_slave_configure,
|
.slave_configure = ahc_linux_slave_configure,
|
||||||
.target_alloc = ahc_linux_target_alloc,
|
.target_alloc = ahc_linux_target_alloc,
|
||||||
|
|
|
@ -11142,6 +11142,7 @@ static struct scsi_host_template driver_template = {
|
||||||
.max_sectors = 2048,
|
.max_sectors = 2048,
|
||||||
.cmd_per_lun = 3,
|
.cmd_per_lun = 3,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
#include "scsi_module.c"
|
#include "scsi_module.c"
|
||||||
|
|
|
@ -94,7 +94,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
|
||||||
res = -ENOMEM;
|
res = -ENOMEM;
|
||||||
goto err_unmap;
|
goto err_unmap;
|
||||||
}
|
}
|
||||||
for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
|
for_each_sg(task->scatter, sc, num_sg, i) {
|
||||||
struct sg_el *sg =
|
struct sg_el *sg =
|
||||||
&((struct sg_el *)ascb->sg_arr->vaddr)[i];
|
&((struct sg_el *)ascb->sg_arr->vaddr)[i];
|
||||||
sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
|
sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
|
||||||
|
@ -103,7 +103,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
|
||||||
sg->flags |= ASD_SG_EL_LIST_EOL;
|
sg->flags |= ASD_SG_EL_LIST_EOL;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (sc = task->scatter, i = 0; i < 2; i++, sc++) {
|
for_each_sg(task->scatter, sc, 2, i) {
|
||||||
sg_arr[i].bus_addr =
|
sg_arr[i].bus_addr =
|
||||||
cpu_to_le64((u64)sg_dma_address(sc));
|
cpu_to_le64((u64)sg_dma_address(sc));
|
||||||
sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
|
sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
|
||||||
|
@ -115,7 +115,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
|
||||||
sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
|
sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
|
||||||
} else {
|
} else {
|
||||||
int i;
|
int i;
|
||||||
for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
|
for_each_sg(task->scatter, sc, num_sg, i) {
|
||||||
sg_arr[i].bus_addr =
|
sg_arr[i].bus_addr =
|
||||||
cpu_to_le64((u64)sg_dma_address(sc));
|
cpu_to_le64((u64)sg_dma_address(sc));
|
||||||
sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
|
sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
|
||||||
|
|
|
@ -122,6 +122,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
|
||||||
.max_sectors = ARCMSR_MAX_XFER_SECTORS,
|
.max_sectors = ARCMSR_MAX_XFER_SECTORS,
|
||||||
.cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
|
.cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.shost_attrs = arcmsr_host_attrs,
|
.shost_attrs = arcmsr_host_attrs,
|
||||||
};
|
};
|
||||||
#ifdef CONFIG_SCSI_ARCMSR_AER
|
#ifdef CONFIG_SCSI_ARCMSR_AER
|
||||||
|
|
|
@ -4765,6 +4765,7 @@ static struct scsi_host_template dc395x_driver_template = {
|
||||||
.eh_bus_reset_handler = dc395x_eh_bus_reset,
|
.eh_bus_reset_handler = dc395x_eh_bus_reset,
|
||||||
.unchecked_isa_dma = 0,
|
.unchecked_isa_dma = 0,
|
||||||
.use_clustering = DISABLE_CLUSTERING,
|
.use_clustering = DISABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3295,6 +3295,7 @@ static struct scsi_host_template adpt_template = {
|
||||||
.this_id = 7,
|
.this_id = 7,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static s32 adpt_scsi_register(adpt_hba* pHba)
|
static s32 adpt_scsi_register(adpt_hba* pHba)
|
||||||
|
|
|
@ -523,7 +523,8 @@ static struct scsi_host_template driver_template = {
|
||||||
.slave_configure = eata2x_slave_configure,
|
.slave_configure = eata2x_slave_configure,
|
||||||
.this_id = 7,
|
.this_id = 7,
|
||||||
.unchecked_isa_dma = 1,
|
.unchecked_isa_dma = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
|
#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
|
||||||
|
|
|
@ -343,6 +343,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
||||||
shost->use_clustering = sht->use_clustering;
|
shost->use_clustering = sht->use_clustering;
|
||||||
shost->ordered_tag = sht->ordered_tag;
|
shost->ordered_tag = sht->ordered_tag;
|
||||||
shost->active_mode = sht->supported_mode;
|
shost->active_mode = sht->supported_mode;
|
||||||
|
shost->use_sg_chaining = sht->use_sg_chaining;
|
||||||
|
|
||||||
if (sht->max_host_blocked)
|
if (sht->max_host_blocked)
|
||||||
shost->max_host_blocked = sht->max_host_blocked;
|
shost->max_host_blocked = sht->max_host_blocked;
|
||||||
|
|
|
@ -655,6 +655,7 @@ static struct scsi_host_template driver_template = {
|
||||||
.unchecked_isa_dma = 0,
|
.unchecked_isa_dma = 0,
|
||||||
.emulated = 0,
|
.emulated = 0,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.proc_name = driver_name,
|
.proc_name = driver_name,
|
||||||
.shost_attrs = hptiop_attrs,
|
.shost_attrs = hptiop_attrs,
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
|
|
|
@ -1501,6 +1501,7 @@ static struct scsi_host_template ibmmca_driver_template = {
|
||||||
.sg_tablesize = 16,
|
.sg_tablesize = 16,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int ibmmca_probe(struct device *dev)
|
static int ibmmca_probe(struct device *dev)
|
||||||
|
|
|
@ -1548,6 +1548,7 @@ static struct scsi_host_template driver_template = {
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.shost_attrs = ibmvscsi_attrs,
|
.shost_attrs = ibmvscsi_attrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -70,6 +70,7 @@ typedef struct idescsi_pc_s {
|
||||||
u8 *buffer; /* Data buffer */
|
u8 *buffer; /* Data buffer */
|
||||||
u8 *current_position; /* Pointer into the above buffer */
|
u8 *current_position; /* Pointer into the above buffer */
|
||||||
struct scatterlist *sg; /* Scatter gather table */
|
struct scatterlist *sg; /* Scatter gather table */
|
||||||
|
struct scatterlist *last_sg; /* Last sg element */
|
||||||
int b_count; /* Bytes transferred from current entry */
|
int b_count; /* Bytes transferred from current entry */
|
||||||
struct scsi_cmnd *scsi_cmd; /* SCSI command */
|
struct scsi_cmnd *scsi_cmd; /* SCSI command */
|
||||||
void (*done)(struct scsi_cmnd *); /* Scsi completion routine */
|
void (*done)(struct scsi_cmnd *); /* Scsi completion routine */
|
||||||
|
@ -173,12 +174,6 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
|
||||||
char *buf;
|
char *buf;
|
||||||
|
|
||||||
while (bcount) {
|
while (bcount) {
|
||||||
if (pc->sg - scsi_sglist(pc->scsi_cmd) >
|
|
||||||
scsi_sg_count(pc->scsi_cmd)) {
|
|
||||||
printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
|
|
||||||
idescsi_discard_data (drive, bcount);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
count = min(pc->sg->length - pc->b_count, bcount);
|
count = min(pc->sg->length - pc->b_count, bcount);
|
||||||
if (PageHighMem(pc->sg->page)) {
|
if (PageHighMem(pc->sg->page)) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -197,10 +192,17 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
|
||||||
}
|
}
|
||||||
bcount -= count; pc->b_count += count;
|
bcount -= count; pc->b_count += count;
|
||||||
if (pc->b_count == pc->sg->length) {
|
if (pc->b_count == pc->sg->length) {
|
||||||
pc->sg++;
|
if (pc->sg == pc->last_sg)
|
||||||
|
break;
|
||||||
|
pc->sg = sg_next(pc->sg);
|
||||||
pc->b_count = 0;
|
pc->b_count = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bcount) {
|
||||||
|
printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
|
||||||
|
idescsi_discard_data (drive, bcount);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount)
|
static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount)
|
||||||
|
@ -209,12 +211,6 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
|
||||||
char *buf;
|
char *buf;
|
||||||
|
|
||||||
while (bcount) {
|
while (bcount) {
|
||||||
if (pc->sg - scsi_sglist(pc->scsi_cmd) >
|
|
||||||
scsi_sg_count(pc->scsi_cmd)) {
|
|
||||||
printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
|
|
||||||
idescsi_output_zeros (drive, bcount);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
count = min(pc->sg->length - pc->b_count, bcount);
|
count = min(pc->sg->length - pc->b_count, bcount);
|
||||||
if (PageHighMem(pc->sg->page)) {
|
if (PageHighMem(pc->sg->page)) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -233,10 +229,17 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
|
||||||
}
|
}
|
||||||
bcount -= count; pc->b_count += count;
|
bcount -= count; pc->b_count += count;
|
||||||
if (pc->b_count == pc->sg->length) {
|
if (pc->b_count == pc->sg->length) {
|
||||||
pc->sg++;
|
if (pc->sg == pc->last_sg)
|
||||||
|
break;
|
||||||
|
pc->sg = sg_next(pc->sg);
|
||||||
pc->b_count = 0;
|
pc->b_count = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bcount) {
|
||||||
|
printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
|
||||||
|
idescsi_output_zeros (drive, bcount);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hexdump(u8 *x, int len)
|
static void hexdump(u8 *x, int len)
|
||||||
|
@ -804,6 +807,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
|
||||||
memcpy (pc->c, cmd->cmnd, cmd->cmd_len);
|
memcpy (pc->c, cmd->cmnd, cmd->cmd_len);
|
||||||
pc->buffer = NULL;
|
pc->buffer = NULL;
|
||||||
pc->sg = scsi_sglist(cmd);
|
pc->sg = scsi_sglist(cmd);
|
||||||
|
pc->last_sg = sg_last(pc->sg, cmd->use_sg);
|
||||||
pc->b_count = 0;
|
pc->b_count = 0;
|
||||||
pc->request_transfer = pc->buffer_size = scsi_bufflen(cmd);
|
pc->request_transfer = pc->buffer_size = scsi_bufflen(cmd);
|
||||||
pc->scsi_cmd = cmd;
|
pc->scsi_cmd = cmd;
|
||||||
|
|
|
@ -2831,6 +2831,7 @@ static struct scsi_host_template initio_template = {
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int initio_probe_one(struct pci_dev *pdev,
|
static int initio_probe_one(struct pci_dev *pdev,
|
||||||
|
|
|
@ -3252,7 +3252,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
|
||||||
*/
|
*/
|
||||||
if ((scb->breakup) || (scb->sg_break)) {
|
if ((scb->breakup) || (scb->sg_break)) {
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
int sg_dma_index, ips_sg_index = 0;
|
int i, sg_dma_index, ips_sg_index = 0;
|
||||||
|
|
||||||
/* we had a data breakup */
|
/* we had a data breakup */
|
||||||
scb->data_len = 0;
|
scb->data_len = 0;
|
||||||
|
@ -3261,20 +3261,22 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
|
||||||
|
|
||||||
/* Spin forward to last dma chunk */
|
/* Spin forward to last dma chunk */
|
||||||
sg_dma_index = scb->breakup;
|
sg_dma_index = scb->breakup;
|
||||||
|
for (i = 0; i < scb->breakup; i++)
|
||||||
|
sg = sg_next(sg);
|
||||||
|
|
||||||
/* Take care of possible partial on last chunk */
|
/* Take care of possible partial on last chunk */
|
||||||
ips_fill_scb_sg_single(ha,
|
ips_fill_scb_sg_single(ha,
|
||||||
sg_dma_address(&sg[sg_dma_index]),
|
sg_dma_address(sg),
|
||||||
scb, ips_sg_index++,
|
scb, ips_sg_index++,
|
||||||
sg_dma_len(&sg[sg_dma_index]));
|
sg_dma_len(sg));
|
||||||
|
|
||||||
for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
|
for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
|
||||||
sg_dma_index++) {
|
sg_dma_index++, sg = sg_next(sg)) {
|
||||||
if (ips_fill_scb_sg_single
|
if (ips_fill_scb_sg_single
|
||||||
(ha,
|
(ha,
|
||||||
sg_dma_address(&sg[sg_dma_index]),
|
sg_dma_address(sg),
|
||||||
scb, ips_sg_index++,
|
scb, ips_sg_index++,
|
||||||
sg_dma_len(&sg[sg_dma_index])) < 0)
|
sg_dma_len(sg)) < 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1438,6 +1438,7 @@ struct scsi_host_template lpfc_template = {
|
||||||
.scan_finished = lpfc_scan_finished,
|
.scan_finished = lpfc_scan_finished,
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.sg_tablesize = LPFC_SG_SEG_CNT,
|
.sg_tablesize = LPFC_SG_SEG_CNT,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.cmd_per_lun = LPFC_CMD_PER_LUN,
|
.cmd_per_lun = LPFC_CMD_PER_LUN,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
.shost_attrs = lpfc_hba_attrs,
|
.shost_attrs = lpfc_hba_attrs,
|
||||||
|
@ -1460,6 +1461,7 @@ struct scsi_host_template lpfc_vport_template = {
|
||||||
.sg_tablesize = LPFC_SG_SEG_CNT,
|
.sg_tablesize = LPFC_SG_SEG_CNT,
|
||||||
.cmd_per_lun = LPFC_CMD_PER_LUN,
|
.cmd_per_lun = LPFC_CMD_PER_LUN,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.shost_attrs = lpfc_vport_attrs,
|
.shost_attrs = lpfc_vport_attrs,
|
||||||
.max_sectors = 0xFFFF,
|
.max_sectors = 0xFFFF,
|
||||||
};
|
};
|
||||||
|
|
|
@ -402,6 +402,7 @@ static struct scsi_host_template mac53c94_template = {
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.use_clustering = DISABLE_CLUSTERING,
|
.use_clustering = DISABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
|
static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
|
||||||
|
|
|
@ -4492,6 +4492,7 @@ static struct scsi_host_template megaraid_template = {
|
||||||
.sg_tablesize = MAX_SGLIST,
|
.sg_tablesize = MAX_SGLIST,
|
||||||
.cmd_per_lun = DEF_CMD_PER_LUN,
|
.cmd_per_lun = DEF_CMD_PER_LUN,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.eh_abort_handler = megaraid_abort,
|
.eh_abort_handler = megaraid_abort,
|
||||||
.eh_device_reset_handler = megaraid_reset,
|
.eh_device_reset_handler = megaraid_reset,
|
||||||
.eh_bus_reset_handler = megaraid_reset,
|
.eh_bus_reset_handler = megaraid_reset,
|
||||||
|
|
|
@ -361,6 +361,7 @@ static struct scsi_host_template megaraid_template_g = {
|
||||||
.eh_host_reset_handler = megaraid_reset_handler,
|
.eh_host_reset_handler = megaraid_reset_handler,
|
||||||
.change_queue_depth = megaraid_change_queue_depth,
|
.change_queue_depth = megaraid_change_queue_depth,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.sdev_attrs = megaraid_sdev_attrs,
|
.sdev_attrs = megaraid_sdev_attrs,
|
||||||
.shost_attrs = megaraid_shost_attrs,
|
.shost_attrs = megaraid_shost_attrs,
|
||||||
};
|
};
|
||||||
|
|
|
@ -1110,6 +1110,7 @@ static struct scsi_host_template megasas_template = {
|
||||||
.eh_timed_out = megasas_reset_timer,
|
.eh_timed_out = megasas_reset_timer,
|
||||||
.bios_param = megasas_bios_param,
|
.bios_param = megasas_bios_param,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1843,6 +1843,7 @@ static struct scsi_host_template mesh_template = {
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
.cmd_per_lun = 2,
|
.cmd_per_lun = 2,
|
||||||
.use_clustering = DISABLE_CLUSTERING,
|
.use_clustering = DISABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
|
static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
|
||||||
|
|
|
@ -281,6 +281,7 @@ static struct scsi_host_template nsp32_template = {
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.this_id = NSP32_HOST_SCSIID,
|
.this_id = NSP32_HOST_SCSIID,
|
||||||
.use_clustering = DISABLE_CLUSTERING,
|
.use_clustering = DISABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.eh_abort_handler = nsp32_eh_abort,
|
.eh_abort_handler = nsp32_eh_abort,
|
||||||
.eh_bus_reset_handler = nsp32_eh_bus_reset,
|
.eh_bus_reset_handler = nsp32_eh_bus_reset,
|
||||||
.eh_host_reset_handler = nsp32_eh_host_reset,
|
.eh_host_reset_handler = nsp32_eh_host_reset,
|
||||||
|
|
|
@ -694,6 +694,7 @@ static struct scsi_host_template sym53c500_driver_template = {
|
||||||
.sg_tablesize = 32,
|
.sg_tablesize = 32,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.shost_attrs = SYM53C500_shost_attrs
|
.shost_attrs = SYM53C500_shost_attrs
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -2775,7 +2775,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||||
struct device_reg __iomem *reg = ha->iobase;
|
struct device_reg __iomem *reg = ha->iobase;
|
||||||
struct scsi_cmnd *cmd = sp->cmd;
|
struct scsi_cmnd *cmd = sp->cmd;
|
||||||
cmd_a64_entry_t *pkt;
|
cmd_a64_entry_t *pkt;
|
||||||
struct scatterlist *sg = NULL;
|
struct scatterlist *sg = NULL, *s;
|
||||||
__le32 *dword_ptr;
|
__le32 *dword_ptr;
|
||||||
dma_addr_t dma_handle;
|
dma_addr_t dma_handle;
|
||||||
int status = 0;
|
int status = 0;
|
||||||
|
@ -2889,13 +2889,16 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||||
* Load data segments.
|
* Load data segments.
|
||||||
*/
|
*/
|
||||||
if (seg_cnt) { /* If data transfer. */
|
if (seg_cnt) { /* If data transfer. */
|
||||||
|
int remseg = seg_cnt;
|
||||||
/* Setup packet address segment pointer. */
|
/* Setup packet address segment pointer. */
|
||||||
dword_ptr = (u32 *)&pkt->dseg_0_address;
|
dword_ptr = (u32 *)&pkt->dseg_0_address;
|
||||||
|
|
||||||
if (cmd->use_sg) { /* If scatter gather */
|
if (cmd->use_sg) { /* If scatter gather */
|
||||||
/* Load command entry data segments. */
|
/* Load command entry data segments. */
|
||||||
for (cnt = 0; cnt < 2 && seg_cnt; cnt++, seg_cnt--) {
|
for_each_sg(sg, s, seg_cnt, cnt) {
|
||||||
dma_handle = sg_dma_address(sg);
|
if (cnt == 2)
|
||||||
|
break;
|
||||||
|
dma_handle = sg_dma_address(s);
|
||||||
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
|
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
|
||||||
if (ha->flags.use_pci_vchannel)
|
if (ha->flags.use_pci_vchannel)
|
||||||
sn_pci_set_vchan(ha->pdev,
|
sn_pci_set_vchan(ha->pdev,
|
||||||
|
@ -2906,12 +2909,12 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||||
cpu_to_le32(pci_dma_lo32(dma_handle));
|
cpu_to_le32(pci_dma_lo32(dma_handle));
|
||||||
*dword_ptr++ =
|
*dword_ptr++ =
|
||||||
cpu_to_le32(pci_dma_hi32(dma_handle));
|
cpu_to_le32(pci_dma_hi32(dma_handle));
|
||||||
*dword_ptr++ = cpu_to_le32(sg_dma_len(sg));
|
*dword_ptr++ = cpu_to_le32(sg_dma_len(s));
|
||||||
sg++;
|
|
||||||
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
|
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
|
||||||
cpu_to_le32(pci_dma_hi32(dma_handle)),
|
cpu_to_le32(pci_dma_hi32(dma_handle)),
|
||||||
cpu_to_le32(pci_dma_lo32(dma_handle)),
|
cpu_to_le32(pci_dma_lo32(dma_handle)),
|
||||||
cpu_to_le32(sg_dma_len(sg)));
|
cpu_to_le32(sg_dma_len(sg_next(s))));
|
||||||
|
remseg--;
|
||||||
}
|
}
|
||||||
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
|
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
|
||||||
"command packet data - b %i, t %i, l %i \n",
|
"command packet data - b %i, t %i, l %i \n",
|
||||||
|
@ -2926,7 +2929,9 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||||
dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
|
dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
|
||||||
"remains\n", seg_cnt);
|
"remains\n", seg_cnt);
|
||||||
|
|
||||||
while (seg_cnt > 0) {
|
while (remseg > 0) {
|
||||||
|
/* Update sg start */
|
||||||
|
sg = s;
|
||||||
/* Adjust ring index. */
|
/* Adjust ring index. */
|
||||||
ha->req_ring_index++;
|
ha->req_ring_index++;
|
||||||
if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
|
if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
|
||||||
|
@ -2952,9 +2957,10 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||||
(u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
|
(u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
|
||||||
|
|
||||||
/* Load continuation entry data segments. */
|
/* Load continuation entry data segments. */
|
||||||
for (cnt = 0; cnt < 5 && seg_cnt;
|
for_each_sg(sg, s, remseg, cnt) {
|
||||||
cnt++, seg_cnt--) {
|
if (cnt == 5)
|
||||||
dma_handle = sg_dma_address(sg);
|
break;
|
||||||
|
dma_handle = sg_dma_address(s);
|
||||||
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
|
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
|
||||||
if (ha->flags.use_pci_vchannel)
|
if (ha->flags.use_pci_vchannel)
|
||||||
sn_pci_set_vchan(ha->pdev,
|
sn_pci_set_vchan(ha->pdev,
|
||||||
|
@ -2966,13 +2972,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||||
*dword_ptr++ =
|
*dword_ptr++ =
|
||||||
cpu_to_le32(pci_dma_hi32(dma_handle));
|
cpu_to_le32(pci_dma_hi32(dma_handle));
|
||||||
*dword_ptr++ =
|
*dword_ptr++ =
|
||||||
cpu_to_le32(sg_dma_len(sg));
|
cpu_to_le32(sg_dma_len(s));
|
||||||
dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
|
dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
|
||||||
cpu_to_le32(pci_dma_hi32(dma_handle)),
|
cpu_to_le32(pci_dma_hi32(dma_handle)),
|
||||||
cpu_to_le32(pci_dma_lo32(dma_handle)),
|
cpu_to_le32(pci_dma_lo32(dma_handle)),
|
||||||
cpu_to_le32(sg_dma_len(sg)));
|
cpu_to_le32(sg_dma_len(s)));
|
||||||
sg++;
|
|
||||||
}
|
}
|
||||||
|
remseg -= cnt;
|
||||||
dprintk(5, "qla1280_64bit_start_scsi: "
|
dprintk(5, "qla1280_64bit_start_scsi: "
|
||||||
"continuation packet data - b %i, t "
|
"continuation packet data - b %i, t "
|
||||||
"%i, l %i \n", SCSI_BUS_32(cmd),
|
"%i, l %i \n", SCSI_BUS_32(cmd),
|
||||||
|
@ -3062,7 +3068,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||||
struct device_reg __iomem *reg = ha->iobase;
|
struct device_reg __iomem *reg = ha->iobase;
|
||||||
struct scsi_cmnd *cmd = sp->cmd;
|
struct scsi_cmnd *cmd = sp->cmd;
|
||||||
struct cmd_entry *pkt;
|
struct cmd_entry *pkt;
|
||||||
struct scatterlist *sg = NULL;
|
struct scatterlist *sg = NULL, *s;
|
||||||
__le32 *dword_ptr;
|
__le32 *dword_ptr;
|
||||||
int status = 0;
|
int status = 0;
|
||||||
int cnt;
|
int cnt;
|
||||||
|
@ -3188,6 +3194,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||||
* Load data segments.
|
* Load data segments.
|
||||||
*/
|
*/
|
||||||
if (seg_cnt) {
|
if (seg_cnt) {
|
||||||
|
int remseg = seg_cnt;
|
||||||
/* Setup packet address segment pointer. */
|
/* Setup packet address segment pointer. */
|
||||||
dword_ptr = &pkt->dseg_0_address;
|
dword_ptr = &pkt->dseg_0_address;
|
||||||
|
|
||||||
|
@ -3196,22 +3203,25 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||||
qla1280_dump_buffer(1, (char *)sg, 4 * 16);
|
qla1280_dump_buffer(1, (char *)sg, 4 * 16);
|
||||||
|
|
||||||
/* Load command entry data segments. */
|
/* Load command entry data segments. */
|
||||||
for (cnt = 0; cnt < 4 && seg_cnt; cnt++, seg_cnt--) {
|
for_each_sg(sg, s, seg_cnt, cnt) {
|
||||||
|
if (cnt == 4)
|
||||||
|
break;
|
||||||
*dword_ptr++ =
|
*dword_ptr++ =
|
||||||
cpu_to_le32(pci_dma_lo32(sg_dma_address(sg)));
|
cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
|
||||||
*dword_ptr++ =
|
*dword_ptr++ = cpu_to_le32(sg_dma_len(s));
|
||||||
cpu_to_le32(sg_dma_len(sg));
|
|
||||||
dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
|
dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
|
||||||
(pci_dma_lo32(sg_dma_address(sg))),
|
(pci_dma_lo32(sg_dma_address(s))),
|
||||||
(sg_dma_len(sg)));
|
(sg_dma_len(s)));
|
||||||
sg++;
|
remseg--;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Build continuation packets.
|
* Build continuation packets.
|
||||||
*/
|
*/
|
||||||
dprintk(3, "S/G Building Continuation"
|
dprintk(3, "S/G Building Continuation"
|
||||||
"...seg_cnt=0x%x remains\n", seg_cnt);
|
"...seg_cnt=0x%x remains\n", seg_cnt);
|
||||||
while (seg_cnt > 0) {
|
while (remseg > 0) {
|
||||||
|
/* Continue from end point */
|
||||||
|
sg = s;
|
||||||
/* Adjust ring index. */
|
/* Adjust ring index. */
|
||||||
ha->req_ring_index++;
|
ha->req_ring_index++;
|
||||||
if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
|
if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
|
||||||
|
@ -3239,19 +3249,20 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
|
||||||
&((struct cont_entry *) pkt)->dseg_0_address;
|
&((struct cont_entry *) pkt)->dseg_0_address;
|
||||||
|
|
||||||
/* Load continuation entry data segments. */
|
/* Load continuation entry data segments. */
|
||||||
for (cnt = 0; cnt < 7 && seg_cnt;
|
for_each_sg(sg, s, remseg, cnt) {
|
||||||
cnt++, seg_cnt--) {
|
if (cnt == 7)
|
||||||
|
break;
|
||||||
*dword_ptr++ =
|
*dword_ptr++ =
|
||||||
cpu_to_le32(pci_dma_lo32(sg_dma_address(sg)));
|
cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
|
||||||
*dword_ptr++ =
|
*dword_ptr++ =
|
||||||
cpu_to_le32(sg_dma_len(sg));
|
cpu_to_le32(sg_dma_len(s));
|
||||||
dprintk(1,
|
dprintk(1,
|
||||||
"S/G Segment Cont. phys_addr=0x%x, "
|
"S/G Segment Cont. phys_addr=0x%x, "
|
||||||
"len=0x%x\n",
|
"len=0x%x\n",
|
||||||
cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))),
|
cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
|
||||||
cpu_to_le32(sg_dma_len(sg)));
|
cpu_to_le32(sg_dma_len(s)));
|
||||||
sg++;
|
|
||||||
}
|
}
|
||||||
|
remseg -= cnt;
|
||||||
dprintk(5, "qla1280_32bit_start_scsi: "
|
dprintk(5, "qla1280_32bit_start_scsi: "
|
||||||
"continuation packet data - "
|
"continuation packet data - "
|
||||||
"scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
|
"scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
|
||||||
|
@ -4248,6 +4259,7 @@ static struct scsi_host_template qla1280_driver_template = {
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -132,6 +132,7 @@ struct scsi_host_template qla2x00_driver_template = {
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.cmd_per_lun = 3,
|
.cmd_per_lun = 3,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -163,6 +164,7 @@ struct scsi_host_template qla24xx_driver_template = {
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.cmd_per_lun = 3,
|
.cmd_per_lun = 3,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
|
|
||||||
.max_sectors = 0xFFFF,
|
.max_sectors = 0xFFFF,
|
||||||
|
|
|
@ -94,6 +94,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.cmd_per_lun = 3,
|
.cmd_per_lun = 3,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
|
|
||||||
.max_sectors = 0xFFFF,
|
.max_sectors = 0xFFFF,
|
||||||
|
|
|
@ -197,6 +197,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.use_clustering = DISABLE_CLUSTERING,
|
.use_clustering = DISABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static __init int qlogicfas_init(void)
|
static __init int qlogicfas_init(void)
|
||||||
|
|
|
@ -868,7 +868,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
|
||||||
struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
|
struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
|
||||||
{
|
{
|
||||||
struct dataseg *ds;
|
struct dataseg *ds;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg, *s;
|
||||||
int i, n;
|
int i, n;
|
||||||
|
|
||||||
if (Cmnd->use_sg) {
|
if (Cmnd->use_sg) {
|
||||||
|
@ -884,11 +884,12 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
|
||||||
n = sg_count;
|
n = sg_count;
|
||||||
if (n > 4)
|
if (n > 4)
|
||||||
n = 4;
|
n = 4;
|
||||||
for (i = 0; i < n; i++, sg++) {
|
for_each_sg(sg, s, n, i) {
|
||||||
ds[i].d_base = sg_dma_address(sg);
|
ds[i].d_base = sg_dma_address(s);
|
||||||
ds[i].d_count = sg_dma_len(sg);
|
ds[i].d_count = sg_dma_len(s);
|
||||||
}
|
}
|
||||||
sg_count -= 4;
|
sg_count -= 4;
|
||||||
|
sg = s;
|
||||||
while (sg_count > 0) {
|
while (sg_count > 0) {
|
||||||
struct Continuation_Entry *cont;
|
struct Continuation_Entry *cont;
|
||||||
|
|
||||||
|
@ -907,9 +908,9 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
|
||||||
n = sg_count;
|
n = sg_count;
|
||||||
if (n > 7)
|
if (n > 7)
|
||||||
n = 7;
|
n = 7;
|
||||||
for (i = 0; i < n; i++, sg++) {
|
for_each_sg(sg, s, n, i) {
|
||||||
ds[i].d_base = sg_dma_address(sg);
|
ds[i].d_base = sg_dma_address(s);
|
||||||
ds[i].d_count = sg_dma_len(sg);
|
ds[i].d_count = sg_dma_len(s);
|
||||||
}
|
}
|
||||||
sg_count -= n;
|
sg_count -= n;
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,6 +38,7 @@
|
||||||
#include <linux/proc_fs.h>
|
#include <linux/proc_fs.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include "scsi.h"
|
#include "scsi.h"
|
||||||
|
@ -600,7 +601,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
|
||||||
int k, req_len, act_len, len, active;
|
int k, req_len, act_len, len, active;
|
||||||
void * kaddr;
|
void * kaddr;
|
||||||
void * kaddr_off;
|
void * kaddr_off;
|
||||||
struct scatterlist * sgpnt;
|
struct scatterlist * sg;
|
||||||
|
|
||||||
if (0 == scp->request_bufflen)
|
if (0 == scp->request_bufflen)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -619,16 +620,16 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
|
||||||
scp->resid = req_len - act_len;
|
scp->resid = req_len - act_len;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
sgpnt = (struct scatterlist *)scp->request_buffer;
|
|
||||||
active = 1;
|
active = 1;
|
||||||
for (k = 0, req_len = 0, act_len = 0; k < scp->use_sg; ++k, ++sgpnt) {
|
req_len = act_len = 0;
|
||||||
|
scsi_for_each_sg(scp, sg, scp->use_sg, k) {
|
||||||
if (active) {
|
if (active) {
|
||||||
kaddr = (unsigned char *)
|
kaddr = (unsigned char *)
|
||||||
kmap_atomic(sgpnt->page, KM_USER0);
|
kmap_atomic(sg->page, KM_USER0);
|
||||||
if (NULL == kaddr)
|
if (NULL == kaddr)
|
||||||
return (DID_ERROR << 16);
|
return (DID_ERROR << 16);
|
||||||
kaddr_off = (unsigned char *)kaddr + sgpnt->offset;
|
kaddr_off = (unsigned char *)kaddr + sg->offset;
|
||||||
len = sgpnt->length;
|
len = sg->length;
|
||||||
if ((req_len + len) > arr_len) {
|
if ((req_len + len) > arr_len) {
|
||||||
active = 0;
|
active = 0;
|
||||||
len = arr_len - req_len;
|
len = arr_len - req_len;
|
||||||
|
@ -637,7 +638,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
kunmap_atomic(kaddr, KM_USER0);
|
||||||
act_len += len;
|
act_len += len;
|
||||||
}
|
}
|
||||||
req_len += sgpnt->length;
|
req_len += sg->length;
|
||||||
}
|
}
|
||||||
if (scp->resid)
|
if (scp->resid)
|
||||||
scp->resid -= act_len;
|
scp->resid -= act_len;
|
||||||
|
@ -653,7 +654,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
|
||||||
int k, req_len, len, fin;
|
int k, req_len, len, fin;
|
||||||
void * kaddr;
|
void * kaddr;
|
||||||
void * kaddr_off;
|
void * kaddr_off;
|
||||||
struct scatterlist * sgpnt;
|
struct scatterlist * sg;
|
||||||
|
|
||||||
if (0 == scp->request_bufflen)
|
if (0 == scp->request_bufflen)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -668,13 +669,14 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
|
||||||
memcpy(arr, scp->request_buffer, len);
|
memcpy(arr, scp->request_buffer, len);
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
sgpnt = (struct scatterlist *)scp->request_buffer;
|
sg = scsi_sglist(scp);
|
||||||
for (k = 0, req_len = 0, fin = 0; k < scp->use_sg; ++k, ++sgpnt) {
|
req_len = fin = 0;
|
||||||
kaddr = (unsigned char *)kmap_atomic(sgpnt->page, KM_USER0);
|
for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) {
|
||||||
|
kaddr = (unsigned char *)kmap_atomic(sg->page, KM_USER0);
|
||||||
if (NULL == kaddr)
|
if (NULL == kaddr)
|
||||||
return -1;
|
return -1;
|
||||||
kaddr_off = (unsigned char *)kaddr + sgpnt->offset;
|
kaddr_off = (unsigned char *)kaddr + sg->offset;
|
||||||
len = sgpnt->length;
|
len = sg->length;
|
||||||
if ((req_len + len) > max_arr_len) {
|
if ((req_len + len) > max_arr_len) {
|
||||||
len = max_arr_len - req_len;
|
len = max_arr_len - req_len;
|
||||||
fin = 1;
|
fin = 1;
|
||||||
|
@ -683,7 +685,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
kunmap_atomic(kaddr, KM_USER0);
|
||||||
if (fin)
|
if (fin)
|
||||||
return req_len + len;
|
return req_len + len;
|
||||||
req_len += sgpnt->length;
|
req_len += sg->length;
|
||||||
}
|
}
|
||||||
return req_len;
|
return req_len;
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <scsi/scsi.h>
|
#include <scsi/scsi.h>
|
||||||
#include <scsi/scsi_cmnd.h>
|
#include <scsi/scsi_cmnd.h>
|
||||||
|
@ -33,35 +34,34 @@
|
||||||
#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
|
#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
|
||||||
#define SG_MEMPOOL_SIZE 2
|
#define SG_MEMPOOL_SIZE 2
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The maximum number of SG segments that we will put inside a scatterlist
|
||||||
|
* (unless chaining is used). Should ideally fit inside a single page, to
|
||||||
|
* avoid a higher order allocation.
|
||||||
|
*/
|
||||||
|
#define SCSI_MAX_SG_SEGMENTS 128
|
||||||
|
|
||||||
struct scsi_host_sg_pool {
|
struct scsi_host_sg_pool {
|
||||||
size_t size;
|
size_t size;
|
||||||
char *name;
|
char *name;
|
||||||
struct kmem_cache *slab;
|
struct kmem_cache *slab;
|
||||||
mempool_t *pool;
|
mempool_t *pool;
|
||||||
};
|
};
|
||||||
|
|
||||||
#if (SCSI_MAX_PHYS_SEGMENTS < 32)
|
#define SP(x) { x, "sgpool-" #x }
|
||||||
#error SCSI_MAX_PHYS_SEGMENTS is too small
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define SP(x) { x, "sgpool-" #x }
|
|
||||||
static struct scsi_host_sg_pool scsi_sg_pools[] = {
|
static struct scsi_host_sg_pool scsi_sg_pools[] = {
|
||||||
SP(8),
|
SP(8),
|
||||||
SP(16),
|
SP(16),
|
||||||
|
#if (SCSI_MAX_SG_SEGMENTS > 16)
|
||||||
SP(32),
|
SP(32),
|
||||||
#if (SCSI_MAX_PHYS_SEGMENTS > 32)
|
#if (SCSI_MAX_SG_SEGMENTS > 32)
|
||||||
SP(64),
|
SP(64),
|
||||||
#if (SCSI_MAX_PHYS_SEGMENTS > 64)
|
#if (SCSI_MAX_SG_SEGMENTS > 64)
|
||||||
SP(128),
|
SP(128),
|
||||||
#if (SCSI_MAX_PHYS_SEGMENTS > 128)
|
|
||||||
SP(256),
|
|
||||||
#if (SCSI_MAX_PHYS_SEGMENTS > 256)
|
|
||||||
#error SCSI_MAX_PHYS_SEGMENTS is too large
|
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#endif
|
};
|
||||||
};
|
|
||||||
#undef SP
|
#undef SP
|
||||||
|
|
||||||
static void scsi_run_queue(struct request_queue *q);
|
static void scsi_run_queue(struct request_queue *q);
|
||||||
|
@ -289,14 +289,16 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
unsigned int data_len = bufflen, len, bytes, off;
|
unsigned int data_len = bufflen, len, bytes, off;
|
||||||
|
struct scatterlist *sg;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct bio *bio = NULL;
|
struct bio *bio = NULL;
|
||||||
int i, err, nr_vecs = 0;
|
int i, err, nr_vecs = 0;
|
||||||
|
|
||||||
for (i = 0; i < nsegs; i++) {
|
for_each_sg(sgl, sg, nsegs, i) {
|
||||||
page = sgl[i].page;
|
page = sg->page;
|
||||||
off = sgl[i].offset;
|
off = sg->offset;
|
||||||
len = sgl[i].length;
|
len = sg->length;
|
||||||
|
data_len += len;
|
||||||
|
|
||||||
while (len > 0 && data_len > 0) {
|
while (len > 0 && data_len > 0) {
|
||||||
/*
|
/*
|
||||||
|
@ -695,56 +697,170 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
/*
|
||||||
|
* Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
|
||||||
|
* is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
|
||||||
|
*/
|
||||||
|
#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048
|
||||||
|
|
||||||
|
static inline unsigned int scsi_sgtable_index(unsigned short nents)
|
||||||
{
|
{
|
||||||
struct scsi_host_sg_pool *sgp;
|
unsigned int index;
|
||||||
struct scatterlist *sgl;
|
|
||||||
|
|
||||||
BUG_ON(!cmd->use_sg);
|
switch (nents) {
|
||||||
|
|
||||||
switch (cmd->use_sg) {
|
|
||||||
case 1 ... 8:
|
case 1 ... 8:
|
||||||
cmd->sglist_len = 0;
|
index = 0;
|
||||||
break;
|
break;
|
||||||
case 9 ... 16:
|
case 9 ... 16:
|
||||||
cmd->sglist_len = 1;
|
index = 1;
|
||||||
break;
|
break;
|
||||||
|
#if (SCSI_MAX_SG_SEGMENTS > 16)
|
||||||
case 17 ... 32:
|
case 17 ... 32:
|
||||||
cmd->sglist_len = 2;
|
index = 2;
|
||||||
break;
|
break;
|
||||||
#if (SCSI_MAX_PHYS_SEGMENTS > 32)
|
#if (SCSI_MAX_SG_SEGMENTS > 32)
|
||||||
case 33 ... 64:
|
case 33 ... 64:
|
||||||
cmd->sglist_len = 3;
|
index = 3;
|
||||||
break;
|
break;
|
||||||
#if (SCSI_MAX_PHYS_SEGMENTS > 64)
|
#if (SCSI_MAX_SG_SEGMENTS > 64)
|
||||||
case 65 ... 128:
|
case 65 ... 128:
|
||||||
cmd->sglist_len = 4;
|
index = 4;
|
||||||
break;
|
|
||||||
#if (SCSI_MAX_PHYS_SEGMENTS > 128)
|
|
||||||
case 129 ... 256:
|
|
||||||
cmd->sglist_len = 5;
|
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
default:
|
default:
|
||||||
return NULL;
|
printk(KERN_ERR "scsi: bad segment count=%d\n", nents);
|
||||||
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
sgp = scsi_sg_pools + cmd->sglist_len;
|
return index;
|
||||||
sgl = mempool_alloc(sgp->pool, gfp_mask);
|
}
|
||||||
return sgl;
|
|
||||||
|
struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
struct scsi_host_sg_pool *sgp;
|
||||||
|
struct scatterlist *sgl, *prev, *ret;
|
||||||
|
unsigned int index;
|
||||||
|
int this, left;
|
||||||
|
|
||||||
|
BUG_ON(!cmd->use_sg);
|
||||||
|
|
||||||
|
left = cmd->use_sg;
|
||||||
|
ret = prev = NULL;
|
||||||
|
do {
|
||||||
|
this = left;
|
||||||
|
if (this > SCSI_MAX_SG_SEGMENTS) {
|
||||||
|
this = SCSI_MAX_SG_SEGMENTS - 1;
|
||||||
|
index = SG_MEMPOOL_NR - 1;
|
||||||
|
} else
|
||||||
|
index = scsi_sgtable_index(this);
|
||||||
|
|
||||||
|
left -= this;
|
||||||
|
|
||||||
|
sgp = scsi_sg_pools + index;
|
||||||
|
|
||||||
|
sgl = mempool_alloc(sgp->pool, gfp_mask);
|
||||||
|
if (unlikely(!sgl))
|
||||||
|
goto enomem;
|
||||||
|
|
||||||
|
memset(sgl, 0, sizeof(*sgl) * sgp->size);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* first loop through, set initial index and return value
|
||||||
|
*/
|
||||||
|
if (!ret)
|
||||||
|
ret = sgl;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* chain previous sglist, if any. we know the previous
|
||||||
|
* sglist must be the biggest one, or we would not have
|
||||||
|
* ended up doing another loop.
|
||||||
|
*/
|
||||||
|
if (prev)
|
||||||
|
sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* don't allow subsequent mempool allocs to sleep, it would
|
||||||
|
* violate the mempool principle.
|
||||||
|
*/
|
||||||
|
gfp_mask &= ~__GFP_WAIT;
|
||||||
|
gfp_mask |= __GFP_HIGH;
|
||||||
|
prev = sgl;
|
||||||
|
} while (left);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ->use_sg may get modified after dma mapping has potentially
|
||||||
|
* shrunk the number of segments, so keep a copy of it for free.
|
||||||
|
*/
|
||||||
|
cmd->__use_sg = cmd->use_sg;
|
||||||
|
return ret;
|
||||||
|
enomem:
|
||||||
|
if (ret) {
|
||||||
|
/*
|
||||||
|
* Free entries chained off ret. Since we were trying to
|
||||||
|
* allocate another sglist, we know that all entries are of
|
||||||
|
* the max size.
|
||||||
|
*/
|
||||||
|
sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
|
||||||
|
prev = ret;
|
||||||
|
ret = &ret[SCSI_MAX_SG_SEGMENTS - 1];
|
||||||
|
|
||||||
|
while ((sgl = sg_chain_ptr(ret)) != NULL) {
|
||||||
|
ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1];
|
||||||
|
mempool_free(sgl, sgp->pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
mempool_free(prev, sgp->pool);
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(scsi_alloc_sgtable);
|
EXPORT_SYMBOL(scsi_alloc_sgtable);
|
||||||
|
|
||||||
void scsi_free_sgtable(struct scatterlist *sgl, int index)
|
void scsi_free_sgtable(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sgl = cmd->request_buffer;
|
||||||
struct scsi_host_sg_pool *sgp;
|
struct scsi_host_sg_pool *sgp;
|
||||||
|
|
||||||
BUG_ON(index >= SG_MEMPOOL_NR);
|
/*
|
||||||
|
* if this is the biggest size sglist, check if we have
|
||||||
|
* chained parts we need to free
|
||||||
|
*/
|
||||||
|
if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) {
|
||||||
|
unsigned short this, left;
|
||||||
|
struct scatterlist *next;
|
||||||
|
unsigned int index;
|
||||||
|
|
||||||
|
left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1);
|
||||||
|
next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]);
|
||||||
|
while (left && next) {
|
||||||
|
sgl = next;
|
||||||
|
this = left;
|
||||||
|
if (this > SCSI_MAX_SG_SEGMENTS) {
|
||||||
|
this = SCSI_MAX_SG_SEGMENTS - 1;
|
||||||
|
index = SG_MEMPOOL_NR - 1;
|
||||||
|
} else
|
||||||
|
index = scsi_sgtable_index(this);
|
||||||
|
|
||||||
|
left -= this;
|
||||||
|
|
||||||
|
sgp = scsi_sg_pools + index;
|
||||||
|
|
||||||
|
if (left)
|
||||||
|
next = sg_chain_ptr(&sgl[sgp->size - 1]);
|
||||||
|
|
||||||
|
mempool_free(sgl, sgp->pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restore original, will be freed below
|
||||||
|
*/
|
||||||
|
sgl = cmd->request_buffer;
|
||||||
|
sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
|
||||||
|
} else
|
||||||
|
sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg);
|
||||||
|
|
||||||
sgp = scsi_sg_pools + index;
|
|
||||||
mempool_free(sgl, sgp->pool);
|
mempool_free(sgl, sgp->pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -770,7 +886,7 @@ EXPORT_SYMBOL(scsi_free_sgtable);
|
||||||
static void scsi_release_buffers(struct scsi_cmnd *cmd)
|
static void scsi_release_buffers(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
if (cmd->use_sg)
|
if (cmd->use_sg)
|
||||||
scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
|
scsi_free_sgtable(cmd);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Zero these out. They now point to freed memory, and it is
|
* Zero these out. They now point to freed memory, and it is
|
||||||
|
@ -984,7 +1100,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||||
static int scsi_init_io(struct scsi_cmnd *cmd)
|
static int scsi_init_io(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
struct request *req = cmd->request;
|
struct request *req = cmd->request;
|
||||||
struct scatterlist *sgpnt;
|
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -997,14 +1112,13 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
|
||||||
/*
|
/*
|
||||||
* If sg table allocation fails, requeue request later.
|
* If sg table allocation fails, requeue request later.
|
||||||
*/
|
*/
|
||||||
sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
|
cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
|
||||||
if (unlikely(!sgpnt)) {
|
if (unlikely(!cmd->request_buffer)) {
|
||||||
scsi_unprep_request(req);
|
scsi_unprep_request(req);
|
||||||
return BLKPREP_DEFER;
|
return BLKPREP_DEFER;
|
||||||
}
|
}
|
||||||
|
|
||||||
req->buffer = NULL;
|
req->buffer = NULL;
|
||||||
cmd->request_buffer = (char *) sgpnt;
|
|
||||||
if (blk_pc_request(req))
|
if (blk_pc_request(req))
|
||||||
cmd->request_bufflen = req->data_len;
|
cmd->request_bufflen = req->data_len;
|
||||||
else
|
else
|
||||||
|
@ -1529,8 +1643,25 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
|
||||||
if (!q)
|
if (!q)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* this limit is imposed by hardware restrictions
|
||||||
|
*/
|
||||||
blk_queue_max_hw_segments(q, shost->sg_tablesize);
|
blk_queue_max_hw_segments(q, shost->sg_tablesize);
|
||||||
blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
|
|
||||||
|
/*
|
||||||
|
* In the future, sg chaining support will be mandatory and this
|
||||||
|
* ifdef can then go away. Right now we don't have all archs
|
||||||
|
* converted, so better keep it safe.
|
||||||
|
*/
|
||||||
|
#ifdef ARCH_HAS_SG_CHAIN
|
||||||
|
if (shost->use_sg_chaining)
|
||||||
|
blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
|
||||||
|
else
|
||||||
|
blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
|
||||||
|
#else
|
||||||
|
blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
|
||||||
|
#endif
|
||||||
|
|
||||||
blk_queue_max_sectors(q, shost->max_sectors);
|
blk_queue_max_sectors(q, shost->max_sectors);
|
||||||
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
|
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
|
||||||
blk_queue_segment_boundary(q, shost->dma_boundary);
|
blk_queue_segment_boundary(q, shost->dma_boundary);
|
||||||
|
@ -2193,18 +2324,19 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock);
|
||||||
*
|
*
|
||||||
* Returns virtual address of the start of the mapped page
|
* Returns virtual address of the start of the mapped page
|
||||||
*/
|
*/
|
||||||
void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
|
void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
|
||||||
size_t *offset, size_t *len)
|
size_t *offset, size_t *len)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
size_t sg_len = 0, len_complete = 0;
|
size_t sg_len = 0, len_complete = 0;
|
||||||
|
struct scatterlist *sg;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
WARN_ON(!irqs_disabled());
|
WARN_ON(!irqs_disabled());
|
||||||
|
|
||||||
for (i = 0; i < sg_count; i++) {
|
for_each_sg(sgl, sg, sg_count, i) {
|
||||||
len_complete = sg_len; /* Complete sg-entries */
|
len_complete = sg_len; /* Complete sg-entries */
|
||||||
sg_len += sg[i].length;
|
sg_len += sg->length;
|
||||||
if (sg_len > *offset)
|
if (sg_len > *offset)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2218,10 +2350,10 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Offset starting from the beginning of first page in this sg-entry */
|
/* Offset starting from the beginning of first page in this sg-entry */
|
||||||
*offset = *offset - len_complete + sg[i].offset;
|
*offset = *offset - len_complete + sg->offset;
|
||||||
|
|
||||||
/* Assumption: contiguous pages can be accessed as "page + i" */
|
/* Assumption: contiguous pages can be accessed as "page + i" */
|
||||||
page = nth_page(sg[i].page, (*offset >> PAGE_SHIFT));
|
page = nth_page(sg->page, (*offset >> PAGE_SHIFT));
|
||||||
*offset &= ~PAGE_MASK;
|
*offset &= ~PAGE_MASK;
|
||||||
|
|
||||||
/* Bytes in this sg-entry from *offset to the end of the page */
|
/* Bytes in this sg-entry from *offset to the end of the page */
|
||||||
|
|
|
@ -332,7 +332,7 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
|
||||||
scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
|
scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
|
||||||
|
|
||||||
if (cmd->request_buffer)
|
if (cmd->request_buffer)
|
||||||
scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
|
scsi_free_sgtable(cmd);
|
||||||
|
|
||||||
queue_work(scsi_tgtd, &tcmd->work);
|
queue_work(scsi_tgtd, &tcmd->work);
|
||||||
}
|
}
|
||||||
|
@ -373,7 +373,7 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
|
eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
|
||||||
scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
|
scsi_free_sgtable(cmd);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1165,7 +1165,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
|
||||||
sg = rsv_schp->buffer;
|
sg = rsv_schp->buffer;
|
||||||
sa = vma->vm_start;
|
sa = vma->vm_start;
|
||||||
for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
|
for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
|
||||||
++k, ++sg) {
|
++k, sg = sg_next(sg)) {
|
||||||
len = vma->vm_end - sa;
|
len = vma->vm_end - sa;
|
||||||
len = (len < sg->length) ? len : sg->length;
|
len = (len < sg->length) ? len : sg->length;
|
||||||
if (offset < len) {
|
if (offset < len) {
|
||||||
|
@ -1209,7 +1209,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
sa = vma->vm_start;
|
sa = vma->vm_start;
|
||||||
sg = rsv_schp->buffer;
|
sg = rsv_schp->buffer;
|
||||||
for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
|
for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
|
||||||
++k, ++sg) {
|
++k, sg = sg_next(sg)) {
|
||||||
len = vma->vm_end - sa;
|
len = vma->vm_end - sa;
|
||||||
len = (len < sg->length) ? len : sg->length;
|
len = (len < sg->length) ? len : sg->length;
|
||||||
sa += len;
|
sa += len;
|
||||||
|
@ -1840,7 +1840,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
||||||
}
|
}
|
||||||
for (k = 0, sg = schp->buffer, rem_sz = blk_size;
|
for (k = 0, sg = schp->buffer, rem_sz = blk_size;
|
||||||
(rem_sz > 0) && (k < mx_sc_elems);
|
(rem_sz > 0) && (k < mx_sc_elems);
|
||||||
++k, rem_sz -= ret_sz, ++sg) {
|
++k, rem_sz -= ret_sz, sg = sg_next(sg)) {
|
||||||
|
|
||||||
num = (rem_sz > scatter_elem_sz_prev) ?
|
num = (rem_sz > scatter_elem_sz_prev) ?
|
||||||
scatter_elem_sz_prev : rem_sz;
|
scatter_elem_sz_prev : rem_sz;
|
||||||
|
@ -1913,7 +1913,7 @@ sg_write_xfer(Sg_request * srp)
|
||||||
if (res)
|
if (res)
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
for (; p; ++sg, ksglen = sg->length,
|
for (; p; sg = sg_next(sg), ksglen = sg->length,
|
||||||
p = page_address(sg->page)) {
|
p = page_address(sg->page)) {
|
||||||
if (usglen <= 0)
|
if (usglen <= 0)
|
||||||
break;
|
break;
|
||||||
|
@ -1992,7 +1992,7 @@ sg_remove_scat(Sg_scatter_hold * schp)
|
||||||
int k;
|
int k;
|
||||||
|
|
||||||
for (k = 0; (k < schp->k_use_sg) && sg->page;
|
for (k = 0; (k < schp->k_use_sg) && sg->page;
|
||||||
++k, ++sg) {
|
++k, sg = sg_next(sg)) {
|
||||||
SCSI_LOG_TIMEOUT(5, printk(
|
SCSI_LOG_TIMEOUT(5, printk(
|
||||||
"sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
|
"sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
|
||||||
k, sg->page, sg->length));
|
k, sg->page, sg->length));
|
||||||
|
@ -2045,7 +2045,7 @@ sg_read_xfer(Sg_request * srp)
|
||||||
if (res)
|
if (res)
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
for (; p; ++sg, ksglen = sg->length,
|
for (; p; sg = sg_next(sg), ksglen = sg->length,
|
||||||
p = page_address(sg->page)) {
|
p = page_address(sg->page)) {
|
||||||
if (usglen <= 0)
|
if (usglen <= 0)
|
||||||
break;
|
break;
|
||||||
|
@ -2092,7 +2092,7 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
|
||||||
if ((!outp) || (num_read_xfer <= 0))
|
if ((!outp) || (num_read_xfer <= 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) {
|
for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, sg = sg_next(sg)) {
|
||||||
num = sg->length;
|
num = sg->length;
|
||||||
if (num > num_read_xfer) {
|
if (num > num_read_xfer) {
|
||||||
if (__copy_to_user(outp, page_address(sg->page),
|
if (__copy_to_user(outp, page_address(sg->page),
|
||||||
|
@ -2142,7 +2142,7 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
|
||||||
SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
|
SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
|
||||||
rem = size;
|
rem = size;
|
||||||
|
|
||||||
for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
|
for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) {
|
||||||
num = sg->length;
|
num = sg->length;
|
||||||
if (rem <= num) {
|
if (rem <= num) {
|
||||||
sfp->save_scat_len = num;
|
sfp->save_scat_len = num;
|
||||||
|
|
|
@ -1123,6 +1123,7 @@ static struct scsi_host_template driver_template = {
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.sg_tablesize = ST_MAX_SG,
|
.sg_tablesize = ST_MAX_SG,
|
||||||
.cmd_per_lun = ST_CMD_PER_LUN,
|
.cmd_per_lun = ST_CMD_PER_LUN,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int stex_set_dma_mask(struct pci_dev * pdev)
|
static int stex_set_dma_mask(struct pci_dev * pdev)
|
||||||
|
|
|
@ -854,5 +854,6 @@ static struct scsi_host_template driver_template = {
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.unchecked_isa_dma = 1,
|
.unchecked_isa_dma = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
#include "scsi_module.c"
|
#include "scsi_module.c"
|
||||||
|
|
|
@ -1808,6 +1808,7 @@ static struct scsi_host_template sym2_template = {
|
||||||
.eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
|
.eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
|
||||||
.this_id = 7,
|
.this_id = 7,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
.max_sectors = 0xFFFF,
|
.max_sectors = 0xFFFF,
|
||||||
#ifdef SYM_LINUX_PROC_INFO_SUPPORT
|
#ifdef SYM_LINUX_PROC_INFO_SUPPORT
|
||||||
.proc_info = sym53c8xx_proc_info,
|
.proc_info = sym53c8xx_proc_info,
|
||||||
|
|
|
@ -450,7 +450,8 @@ static struct scsi_host_template driver_template = {
|
||||||
.slave_configure = u14_34f_slave_configure,
|
.slave_configure = u14_34f_slave_configure,
|
||||||
.this_id = 7,
|
.this_id = 7,
|
||||||
.unchecked_isa_dma = 1,
|
.unchecked_isa_dma = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
|
#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
|
||||||
|
|
|
@ -1197,5 +1197,6 @@ static struct scsi_host_template driver_template = {
|
||||||
.cmd_per_lun = ULTRASTOR_MAX_CMDS_PER_LUN,
|
.cmd_per_lun = ULTRASTOR_MAX_CMDS_PER_LUN,
|
||||||
.unchecked_isa_dma = 1,
|
.unchecked_isa_dma = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
#include "scsi_module.c"
|
#include "scsi_module.c"
|
||||||
|
|
|
@ -1671,6 +1671,7 @@ static struct scsi_host_template driver_template = {
|
||||||
.cmd_per_lun = 1,
|
.cmd_per_lun = 1,
|
||||||
.unchecked_isa_dma = 1,
|
.unchecked_isa_dma = 1,
|
||||||
.use_clustering = ENABLE_CLUSTERING,
|
.use_clustering = ENABLE_CLUSTERING,
|
||||||
|
.use_sg_chaining = ENABLE_SG_CHAINING,
|
||||||
};
|
};
|
||||||
|
|
||||||
#include "scsi_module.c"
|
#include "scsi_module.c"
|
||||||
|
|
|
@ -798,12 +798,13 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
|
||||||
{
|
{
|
||||||
unsigned char *buffer;
|
unsigned char *buffer;
|
||||||
u16 lba, max_lba;
|
u16 lba, max_lba;
|
||||||
unsigned int page, len, index, offset;
|
unsigned int page, len, offset;
|
||||||
unsigned int blockshift = MEDIA_INFO(us).blockshift;
|
unsigned int blockshift = MEDIA_INFO(us).blockshift;
|
||||||
unsigned int pageshift = MEDIA_INFO(us).pageshift;
|
unsigned int pageshift = MEDIA_INFO(us).pageshift;
|
||||||
unsigned int blocksize = MEDIA_INFO(us).blocksize;
|
unsigned int blocksize = MEDIA_INFO(us).blocksize;
|
||||||
unsigned int pagesize = MEDIA_INFO(us).pagesize;
|
unsigned int pagesize = MEDIA_INFO(us).pagesize;
|
||||||
unsigned int uzonesize = MEDIA_INFO(us).uzonesize;
|
unsigned int uzonesize = MEDIA_INFO(us).uzonesize;
|
||||||
|
struct scatterlist *sg;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -827,7 +828,8 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
|
||||||
max_lba = MEDIA_INFO(us).capacity >> (blockshift + pageshift);
|
max_lba = MEDIA_INFO(us).capacity >> (blockshift + pageshift);
|
||||||
|
|
||||||
result = USB_STOR_TRANSPORT_GOOD;
|
result = USB_STOR_TRANSPORT_GOOD;
|
||||||
index = offset = 0;
|
offset = 0;
|
||||||
|
sg = NULL;
|
||||||
|
|
||||||
while (sectors > 0) {
|
while (sectors > 0) {
|
||||||
unsigned int zone = lba / uzonesize; /* integer division */
|
unsigned int zone = lba / uzonesize; /* integer division */
|
||||||
|
@ -873,7 +875,7 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
|
||||||
|
|
||||||
/* Store the data in the transfer buffer */
|
/* Store the data in the transfer buffer */
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&index, &offset, TO_XFER_BUF);
|
&sg, &offset, TO_XFER_BUF);
|
||||||
|
|
||||||
page = 0;
|
page = 0;
|
||||||
lba++;
|
lba++;
|
||||||
|
@ -891,11 +893,12 @@ static int alauda_write_data(struct us_data *us, unsigned long address,
|
||||||
unsigned int sectors)
|
unsigned int sectors)
|
||||||
{
|
{
|
||||||
unsigned char *buffer, *blockbuffer;
|
unsigned char *buffer, *blockbuffer;
|
||||||
unsigned int page, len, index, offset;
|
unsigned int page, len, offset;
|
||||||
unsigned int blockshift = MEDIA_INFO(us).blockshift;
|
unsigned int blockshift = MEDIA_INFO(us).blockshift;
|
||||||
unsigned int pageshift = MEDIA_INFO(us).pageshift;
|
unsigned int pageshift = MEDIA_INFO(us).pageshift;
|
||||||
unsigned int blocksize = MEDIA_INFO(us).blocksize;
|
unsigned int blocksize = MEDIA_INFO(us).blocksize;
|
||||||
unsigned int pagesize = MEDIA_INFO(us).pagesize;
|
unsigned int pagesize = MEDIA_INFO(us).pagesize;
|
||||||
|
struct scatterlist *sg;
|
||||||
u16 lba, max_lba;
|
u16 lba, max_lba;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
|
@ -929,7 +932,8 @@ static int alauda_write_data(struct us_data *us, unsigned long address,
|
||||||
max_lba = MEDIA_INFO(us).capacity >> (pageshift + blockshift);
|
max_lba = MEDIA_INFO(us).capacity >> (pageshift + blockshift);
|
||||||
|
|
||||||
result = USB_STOR_TRANSPORT_GOOD;
|
result = USB_STOR_TRANSPORT_GOOD;
|
||||||
index = offset = 0;
|
offset = 0;
|
||||||
|
sg = NULL;
|
||||||
|
|
||||||
while (sectors > 0) {
|
while (sectors > 0) {
|
||||||
/* Write as many sectors as possible in this block */
|
/* Write as many sectors as possible in this block */
|
||||||
|
@ -946,7 +950,7 @@ static int alauda_write_data(struct us_data *us, unsigned long address,
|
||||||
|
|
||||||
/* Get the data from the transfer buffer */
|
/* Get the data from the transfer buffer */
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&index, &offset, FROM_XFER_BUF);
|
&sg, &offset, FROM_XFER_BUF);
|
||||||
|
|
||||||
result = alauda_write_lba(us, lba, page, pages, buffer,
|
result = alauda_write_lba(us, lba, page, pages, buffer,
|
||||||
blockbuffer);
|
blockbuffer);
|
||||||
|
|
|
@ -98,7 +98,8 @@ static int datafab_read_data(struct us_data *us,
|
||||||
unsigned char thistime;
|
unsigned char thistime;
|
||||||
unsigned int totallen, alloclen;
|
unsigned int totallen, alloclen;
|
||||||
int len, result;
|
int len, result;
|
||||||
unsigned int sg_idx = 0, sg_offset = 0;
|
unsigned int sg_offset = 0;
|
||||||
|
struct scatterlist *sg = NULL;
|
||||||
|
|
||||||
// we're working in LBA mode. according to the ATA spec,
|
// we're working in LBA mode. according to the ATA spec,
|
||||||
// we can support up to 28-bit addressing. I don't know if Datafab
|
// we can support up to 28-bit addressing. I don't know if Datafab
|
||||||
|
@ -155,7 +156,7 @@ static int datafab_read_data(struct us_data *us,
|
||||||
|
|
||||||
// Store the data in the transfer buffer
|
// Store the data in the transfer buffer
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&sg_idx, &sg_offset, TO_XFER_BUF);
|
&sg, &sg_offset, TO_XFER_BUF);
|
||||||
|
|
||||||
sector += thistime;
|
sector += thistime;
|
||||||
totallen -= len;
|
totallen -= len;
|
||||||
|
@ -181,7 +182,8 @@ static int datafab_write_data(struct us_data *us,
|
||||||
unsigned char thistime;
|
unsigned char thistime;
|
||||||
unsigned int totallen, alloclen;
|
unsigned int totallen, alloclen;
|
||||||
int len, result;
|
int len, result;
|
||||||
unsigned int sg_idx = 0, sg_offset = 0;
|
unsigned int sg_offset = 0;
|
||||||
|
struct scatterlist *sg = NULL;
|
||||||
|
|
||||||
// we're working in LBA mode. according to the ATA spec,
|
// we're working in LBA mode. according to the ATA spec,
|
||||||
// we can support up to 28-bit addressing. I don't know if Datafab
|
// we can support up to 28-bit addressing. I don't know if Datafab
|
||||||
|
@ -217,7 +219,7 @@ static int datafab_write_data(struct us_data *us,
|
||||||
|
|
||||||
// Get the data from the transfer buffer
|
// Get the data from the transfer buffer
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&sg_idx, &sg_offset, FROM_XFER_BUF);
|
&sg, &sg_offset, FROM_XFER_BUF);
|
||||||
|
|
||||||
command[0] = 0;
|
command[0] = 0;
|
||||||
command[1] = thistime;
|
command[1] = thistime;
|
||||||
|
|
|
@ -119,7 +119,8 @@ static int jumpshot_read_data(struct us_data *us,
|
||||||
unsigned char thistime;
|
unsigned char thistime;
|
||||||
unsigned int totallen, alloclen;
|
unsigned int totallen, alloclen;
|
||||||
int len, result;
|
int len, result;
|
||||||
unsigned int sg_idx = 0, sg_offset = 0;
|
unsigned int sg_offset = 0;
|
||||||
|
struct scatterlist *sg = NULL;
|
||||||
|
|
||||||
// we're working in LBA mode. according to the ATA spec,
|
// we're working in LBA mode. according to the ATA spec,
|
||||||
// we can support up to 28-bit addressing. I don't know if Jumpshot
|
// we can support up to 28-bit addressing. I don't know if Jumpshot
|
||||||
|
@ -170,7 +171,7 @@ static int jumpshot_read_data(struct us_data *us,
|
||||||
|
|
||||||
// Store the data in the transfer buffer
|
// Store the data in the transfer buffer
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&sg_idx, &sg_offset, TO_XFER_BUF);
|
&sg, &sg_offset, TO_XFER_BUF);
|
||||||
|
|
||||||
sector += thistime;
|
sector += thistime;
|
||||||
totallen -= len;
|
totallen -= len;
|
||||||
|
@ -195,7 +196,8 @@ static int jumpshot_write_data(struct us_data *us,
|
||||||
unsigned char thistime;
|
unsigned char thistime;
|
||||||
unsigned int totallen, alloclen;
|
unsigned int totallen, alloclen;
|
||||||
int len, result, waitcount;
|
int len, result, waitcount;
|
||||||
unsigned int sg_idx = 0, sg_offset = 0;
|
unsigned int sg_offset = 0;
|
||||||
|
struct scatterlist *sg = NULL;
|
||||||
|
|
||||||
// we're working in LBA mode. according to the ATA spec,
|
// we're working in LBA mode. according to the ATA spec,
|
||||||
// we can support up to 28-bit addressing. I don't know if Jumpshot
|
// we can support up to 28-bit addressing. I don't know if Jumpshot
|
||||||
|
@ -225,7 +227,7 @@ static int jumpshot_write_data(struct us_data *us,
|
||||||
|
|
||||||
// Get the data from the transfer buffer
|
// Get the data from the transfer buffer
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&sg_idx, &sg_offset, FROM_XFER_BUF);
|
&sg, &sg_offset, FROM_XFER_BUF);
|
||||||
|
|
||||||
command[0] = 0;
|
command[0] = 0;
|
||||||
command[1] = thistime;
|
command[1] = thistime;
|
||||||
|
|
|
@ -157,7 +157,7 @@ void usb_stor_transparent_scsi_command(struct scsi_cmnd *srb,
|
||||||
* pick up from where this one left off. */
|
* pick up from where this one left off. */
|
||||||
|
|
||||||
unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
|
unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
|
||||||
unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
|
unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr,
|
||||||
unsigned int *offset, enum xfer_buf_dir dir)
|
unsigned int *offset, enum xfer_buf_dir dir)
|
||||||
{
|
{
|
||||||
unsigned int cnt;
|
unsigned int cnt;
|
||||||
|
@ -184,16 +184,17 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
|
||||||
* located in high memory -- then kmap() will map it to a temporary
|
* located in high memory -- then kmap() will map it to a temporary
|
||||||
* position in the kernel's virtual address space. */
|
* position in the kernel's virtual address space. */
|
||||||
} else {
|
} else {
|
||||||
struct scatterlist *sg =
|
struct scatterlist *sg = *sgptr;
|
||||||
(struct scatterlist *) srb->request_buffer
|
|
||||||
+ *index;
|
if (!sg)
|
||||||
|
sg = (struct scatterlist *) srb->request_buffer;
|
||||||
|
|
||||||
/* This loop handles a single s-g list entry, which may
|
/* This loop handles a single s-g list entry, which may
|
||||||
* include multiple pages. Find the initial page structure
|
* include multiple pages. Find the initial page structure
|
||||||
* and the starting offset within the page, and update
|
* and the starting offset within the page, and update
|
||||||
* the *offset and *index values for the next loop. */
|
* the *offset and *index values for the next loop. */
|
||||||
cnt = 0;
|
cnt = 0;
|
||||||
while (cnt < buflen && *index < srb->use_sg) {
|
while (cnt < buflen) {
|
||||||
struct page *page = sg->page +
|
struct page *page = sg->page +
|
||||||
((sg->offset + *offset) >> PAGE_SHIFT);
|
((sg->offset + *offset) >> PAGE_SHIFT);
|
||||||
unsigned int poff =
|
unsigned int poff =
|
||||||
|
@ -209,8 +210,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
|
||||||
|
|
||||||
/* Transfer continues to next s-g entry */
|
/* Transfer continues to next s-g entry */
|
||||||
*offset = 0;
|
*offset = 0;
|
||||||
++*index;
|
sg = sg_next(sg);
|
||||||
++sg;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Transfer the data for all the pages in this
|
/* Transfer the data for all the pages in this
|
||||||
|
@ -234,6 +234,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
|
||||||
sglen -= plen;
|
sglen -= plen;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
*sgptr = sg;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return the amount actually transferred */
|
/* Return the amount actually transferred */
|
||||||
|
@ -245,9 +246,10 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
|
||||||
void usb_stor_set_xfer_buf(unsigned char *buffer,
|
void usb_stor_set_xfer_buf(unsigned char *buffer,
|
||||||
unsigned int buflen, struct scsi_cmnd *srb)
|
unsigned int buflen, struct scsi_cmnd *srb)
|
||||||
{
|
{
|
||||||
unsigned int index = 0, offset = 0;
|
unsigned int offset = 0;
|
||||||
|
struct scatterlist *sg = NULL;
|
||||||
|
|
||||||
usb_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
|
usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
|
||||||
TO_XFER_BUF);
|
TO_XFER_BUF);
|
||||||
if (buflen < srb->request_bufflen)
|
if (buflen < srb->request_bufflen)
|
||||||
srb->resid = srb->request_bufflen - buflen;
|
srb->resid = srb->request_bufflen - buflen;
|
||||||
|
|
|
@ -52,7 +52,7 @@ extern void usb_stor_transparent_scsi_command(struct scsi_cmnd*,
|
||||||
enum xfer_buf_dir {TO_XFER_BUF, FROM_XFER_BUF};
|
enum xfer_buf_dir {TO_XFER_BUF, FROM_XFER_BUF};
|
||||||
|
|
||||||
extern unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
|
extern unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
|
||||||
unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
|
unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **,
|
||||||
unsigned int *offset, enum xfer_buf_dir dir);
|
unsigned int *offset, enum xfer_buf_dir dir);
|
||||||
|
|
||||||
extern void usb_stor_set_xfer_buf(unsigned char *buffer,
|
extern void usb_stor_set_xfer_buf(unsigned char *buffer,
|
||||||
|
|
|
@ -705,7 +705,8 @@ sddr09_read_data(struct us_data *us,
|
||||||
unsigned char *buffer;
|
unsigned char *buffer;
|
||||||
unsigned int lba, maxlba, pba;
|
unsigned int lba, maxlba, pba;
|
||||||
unsigned int page, pages;
|
unsigned int page, pages;
|
||||||
unsigned int len, index, offset;
|
unsigned int len, offset;
|
||||||
|
struct scatterlist *sg;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
// Figure out the initial LBA and page
|
// Figure out the initial LBA and page
|
||||||
|
@ -730,7 +731,8 @@ sddr09_read_data(struct us_data *us,
|
||||||
// contiguous LBA's. Another exercise left to the student.
|
// contiguous LBA's. Another exercise left to the student.
|
||||||
|
|
||||||
result = 0;
|
result = 0;
|
||||||
index = offset = 0;
|
offset = 0;
|
||||||
|
sg = NULL;
|
||||||
|
|
||||||
while (sectors > 0) {
|
while (sectors > 0) {
|
||||||
|
|
||||||
|
@ -777,7 +779,7 @@ sddr09_read_data(struct us_data *us,
|
||||||
|
|
||||||
// Store the data in the transfer buffer
|
// Store the data in the transfer buffer
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&index, &offset, TO_XFER_BUF);
|
&sg, &offset, TO_XFER_BUF);
|
||||||
|
|
||||||
page = 0;
|
page = 0;
|
||||||
lba++;
|
lba++;
|
||||||
|
@ -931,7 +933,8 @@ sddr09_write_data(struct us_data *us,
|
||||||
unsigned int pagelen, blocklen;
|
unsigned int pagelen, blocklen;
|
||||||
unsigned char *blockbuffer;
|
unsigned char *blockbuffer;
|
||||||
unsigned char *buffer;
|
unsigned char *buffer;
|
||||||
unsigned int len, index, offset;
|
unsigned int len, offset;
|
||||||
|
struct scatterlist *sg;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
// Figure out the initial LBA and page
|
// Figure out the initial LBA and page
|
||||||
|
@ -968,7 +971,8 @@ sddr09_write_data(struct us_data *us,
|
||||||
}
|
}
|
||||||
|
|
||||||
result = 0;
|
result = 0;
|
||||||
index = offset = 0;
|
offset = 0;
|
||||||
|
sg = NULL;
|
||||||
|
|
||||||
while (sectors > 0) {
|
while (sectors > 0) {
|
||||||
|
|
||||||
|
@ -987,7 +991,7 @@ sddr09_write_data(struct us_data *us,
|
||||||
|
|
||||||
// Get the data from the transfer buffer
|
// Get the data from the transfer buffer
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&index, &offset, FROM_XFER_BUF);
|
&sg, &offset, FROM_XFER_BUF);
|
||||||
|
|
||||||
result = sddr09_write_lba(us, lba, page, pages,
|
result = sddr09_write_lba(us, lba, page, pages,
|
||||||
buffer, blockbuffer);
|
buffer, blockbuffer);
|
||||||
|
|
|
@ -167,7 +167,8 @@ static int sddr55_read_data(struct us_data *us,
|
||||||
unsigned long address;
|
unsigned long address;
|
||||||
|
|
||||||
unsigned short pages;
|
unsigned short pages;
|
||||||
unsigned int len, index, offset;
|
unsigned int len, offset;
|
||||||
|
struct scatterlist *sg;
|
||||||
|
|
||||||
// Since we only read in one block at a time, we have to create
|
// Since we only read in one block at a time, we have to create
|
||||||
// a bounce buffer and move the data a piece at a time between the
|
// a bounce buffer and move the data a piece at a time between the
|
||||||
|
@ -178,7 +179,8 @@ static int sddr55_read_data(struct us_data *us,
|
||||||
buffer = kmalloc(len, GFP_NOIO);
|
buffer = kmalloc(len, GFP_NOIO);
|
||||||
if (buffer == NULL)
|
if (buffer == NULL)
|
||||||
return USB_STOR_TRANSPORT_ERROR; /* out of memory */
|
return USB_STOR_TRANSPORT_ERROR; /* out of memory */
|
||||||
index = offset = 0;
|
offset = 0;
|
||||||
|
sg = NULL;
|
||||||
|
|
||||||
while (sectors>0) {
|
while (sectors>0) {
|
||||||
|
|
||||||
|
@ -255,7 +257,7 @@ static int sddr55_read_data(struct us_data *us,
|
||||||
|
|
||||||
// Store the data in the transfer buffer
|
// Store the data in the transfer buffer
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&index, &offset, TO_XFER_BUF);
|
&sg, &offset, TO_XFER_BUF);
|
||||||
|
|
||||||
page = 0;
|
page = 0;
|
||||||
lba++;
|
lba++;
|
||||||
|
@ -287,7 +289,8 @@ static int sddr55_write_data(struct us_data *us,
|
||||||
|
|
||||||
unsigned short pages;
|
unsigned short pages;
|
||||||
int i;
|
int i;
|
||||||
unsigned int len, index, offset;
|
unsigned int len, offset;
|
||||||
|
struct scatterlist *sg;
|
||||||
|
|
||||||
/* check if we are allowed to write */
|
/* check if we are allowed to write */
|
||||||
if (info->read_only || info->force_read_only) {
|
if (info->read_only || info->force_read_only) {
|
||||||
|
@ -304,7 +307,8 @@ static int sddr55_write_data(struct us_data *us,
|
||||||
buffer = kmalloc(len, GFP_NOIO);
|
buffer = kmalloc(len, GFP_NOIO);
|
||||||
if (buffer == NULL)
|
if (buffer == NULL)
|
||||||
return USB_STOR_TRANSPORT_ERROR;
|
return USB_STOR_TRANSPORT_ERROR;
|
||||||
index = offset = 0;
|
offset = 0;
|
||||||
|
sg = NULL;
|
||||||
|
|
||||||
while (sectors > 0) {
|
while (sectors > 0) {
|
||||||
|
|
||||||
|
@ -322,7 +326,7 @@ static int sddr55_write_data(struct us_data *us,
|
||||||
|
|
||||||
// Get the data from the transfer buffer
|
// Get the data from the transfer buffer
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&index, &offset, FROM_XFER_BUF);
|
&sg, &offset, FROM_XFER_BUF);
|
||||||
|
|
||||||
US_DEBUGP("Write %02X pages, to PBA %04X"
|
US_DEBUGP("Write %02X pages, to PBA %04X"
|
||||||
" (LBA %04X) page %02X\n",
|
" (LBA %04X) page %02X\n",
|
||||||
|
|
|
@ -993,7 +993,8 @@ static int usbat_flash_read_data(struct us_data *us,
|
||||||
unsigned char thistime;
|
unsigned char thistime;
|
||||||
unsigned int totallen, alloclen;
|
unsigned int totallen, alloclen;
|
||||||
int len, result;
|
int len, result;
|
||||||
unsigned int sg_idx = 0, sg_offset = 0;
|
unsigned int sg_offset = 0;
|
||||||
|
struct scatterlist *sg = NULL;
|
||||||
|
|
||||||
result = usbat_flash_check_media(us, info);
|
result = usbat_flash_check_media(us, info);
|
||||||
if (result != USB_STOR_TRANSPORT_GOOD)
|
if (result != USB_STOR_TRANSPORT_GOOD)
|
||||||
|
@ -1047,7 +1048,7 @@ static int usbat_flash_read_data(struct us_data *us,
|
||||||
|
|
||||||
/* Store the data in the transfer buffer */
|
/* Store the data in the transfer buffer */
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&sg_idx, &sg_offset, TO_XFER_BUF);
|
&sg, &sg_offset, TO_XFER_BUF);
|
||||||
|
|
||||||
sector += thistime;
|
sector += thistime;
|
||||||
totallen -= len;
|
totallen -= len;
|
||||||
|
@ -1083,7 +1084,8 @@ static int usbat_flash_write_data(struct us_data *us,
|
||||||
unsigned char thistime;
|
unsigned char thistime;
|
||||||
unsigned int totallen, alloclen;
|
unsigned int totallen, alloclen;
|
||||||
int len, result;
|
int len, result;
|
||||||
unsigned int sg_idx = 0, sg_offset = 0;
|
unsigned int sg_offset = 0;
|
||||||
|
struct scatterlist *sg = NULL;
|
||||||
|
|
||||||
result = usbat_flash_check_media(us, info);
|
result = usbat_flash_check_media(us, info);
|
||||||
if (result != USB_STOR_TRANSPORT_GOOD)
|
if (result != USB_STOR_TRANSPORT_GOOD)
|
||||||
|
@ -1122,7 +1124,7 @@ static int usbat_flash_write_data(struct us_data *us,
|
||||||
|
|
||||||
/* Get the data from the transfer buffer */
|
/* Get the data from the transfer buffer */
|
||||||
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
usb_stor_access_xfer_buf(buffer, len, us->srb,
|
||||||
&sg_idx, &sg_offset, FROM_XFER_BUF);
|
&sg, &sg_offset, FROM_XFER_BUF);
|
||||||
|
|
||||||
/* ATA command 0x30 (WRITE SECTORS) */
|
/* ATA command 0x30 (WRITE SECTORS) */
|
||||||
usbat_pack_ata_sector_cmd(command, thistime, sector, 0x30);
|
usbat_pack_ata_sector_cmd(command, thistime, sector, 0x30);
|
||||||
|
@ -1162,8 +1164,8 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
|
||||||
unsigned char *buffer;
|
unsigned char *buffer;
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
unsigned int sector;
|
unsigned int sector;
|
||||||
unsigned int sg_segment = 0;
|
|
||||||
unsigned int sg_offset = 0;
|
unsigned int sg_offset = 0;
|
||||||
|
struct scatterlist *sg = NULL;
|
||||||
|
|
||||||
US_DEBUGP("handle_read10: transfersize %d\n",
|
US_DEBUGP("handle_read10: transfersize %d\n",
|
||||||
srb->transfersize);
|
srb->transfersize);
|
||||||
|
@ -1220,9 +1222,6 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
|
||||||
sector |= short_pack(data[7+5], data[7+4]);
|
sector |= short_pack(data[7+5], data[7+4]);
|
||||||
transferred = 0;
|
transferred = 0;
|
||||||
|
|
||||||
sg_segment = 0; /* for keeping track of where we are in */
|
|
||||||
sg_offset = 0; /* the scatter/gather list */
|
|
||||||
|
|
||||||
while (transferred != srb->request_bufflen) {
|
while (transferred != srb->request_bufflen) {
|
||||||
|
|
||||||
if (len > srb->request_bufflen - transferred)
|
if (len > srb->request_bufflen - transferred)
|
||||||
|
@ -1255,7 +1254,7 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
|
||||||
|
|
||||||
/* Store the data in the transfer buffer */
|
/* Store the data in the transfer buffer */
|
||||||
usb_stor_access_xfer_buf(buffer, len, srb,
|
usb_stor_access_xfer_buf(buffer, len, srb,
|
||||||
&sg_segment, &sg_offset, TO_XFER_BUF);
|
&sg, &sg_offset, TO_XFER_BUF);
|
||||||
|
|
||||||
/* Update the amount transferred and the sector number */
|
/* Update the amount transferred and the sector number */
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||||
*/
|
*/
|
||||||
#include <asm/machvec.h>
|
#include <asm/machvec.h>
|
||||||
#include <asm/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#define dma_alloc_coherent platform_dma_alloc_coherent
|
#define dma_alloc_coherent platform_dma_alloc_coherent
|
||||||
/* coherent mem. is cheap */
|
/* coherent mem. is cheap */
|
||||||
|
|
|
@ -30,4 +30,6 @@ struct scatterlist {
|
||||||
#define sg_dma_len(sg) ((sg)->dma_length)
|
#define sg_dma_len(sg) ((sg)->dma_length)
|
||||||
#define sg_dma_address(sg) ((sg)->dma_address)
|
#define sg_dma_address(sg) ((sg)->dma_address)
|
||||||
|
|
||||||
|
#define ARCH_HAS_SG_CHAIN
|
||||||
|
|
||||||
#endif /* _ASM_IA64_SCATTERLIST_H */
|
#endif /* _ASM_IA64_SCATTERLIST_H */
|
||||||
|
|
|
@ -6,149 +6,6 @@
|
||||||
*/
|
*/
|
||||||
#ifndef _ASM_DMA_MAPPING_H
|
#ifndef _ASM_DMA_MAPPING_H
|
||||||
#define _ASM_DMA_MAPPING_H
|
#define _ASM_DMA_MAPPING_H
|
||||||
#ifdef __KERNEL__
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/cache.h>
|
|
||||||
/* need struct page definitions */
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <asm/scatterlist.h>
|
|
||||||
#include <asm/io.h>
|
|
||||||
|
|
||||||
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
|
||||||
|
|
||||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
||||||
/*
|
|
||||||
* DMA-consistent mapping functions for PowerPCs that don't support
|
|
||||||
* cache snooping. These allocate/free a region of uncached mapped
|
|
||||||
* memory space for use with DMA devices. Alternatively, you could
|
|
||||||
* allocate the space "normally" and use the cache management functions
|
|
||||||
* to ensure it is consistent.
|
|
||||||
*/
|
|
||||||
extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
|
|
||||||
extern void __dma_free_coherent(size_t size, void *vaddr);
|
|
||||||
extern void __dma_sync(void *vaddr, size_t size, int direction);
|
|
||||||
extern void __dma_sync_page(struct page *page, unsigned long offset,
|
|
||||||
size_t size, int direction);
|
|
||||||
|
|
||||||
#else /* ! CONFIG_NOT_COHERENT_CACHE */
|
|
||||||
/*
|
|
||||||
* Cache coherent cores.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define __dma_alloc_coherent(gfp, size, handle) NULL
|
|
||||||
#define __dma_free_coherent(size, addr) ((void)0)
|
|
||||||
#define __dma_sync(addr, size, rw) ((void)0)
|
|
||||||
#define __dma_sync_page(pg, off, sz, rw) ((void)0)
|
|
||||||
|
|
||||||
#endif /* ! CONFIG_NOT_COHERENT_CACHE */
|
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
|
||||||
/*
|
|
||||||
* DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
|
|
||||||
*/
|
|
||||||
struct dma_mapping_ops {
|
|
||||||
void * (*alloc_coherent)(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag);
|
|
||||||
void (*free_coherent)(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dma_handle);
|
|
||||||
dma_addr_t (*map_single)(struct device *dev, void *ptr,
|
|
||||||
size_t size, enum dma_data_direction direction);
|
|
||||||
void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
|
|
||||||
size_t size, enum dma_data_direction direction);
|
|
||||||
int (*map_sg)(struct device *dev, struct scatterlist *sg,
|
|
||||||
int nents, enum dma_data_direction direction);
|
|
||||||
void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
|
|
||||||
int nents, enum dma_data_direction direction);
|
|
||||||
int (*dma_supported)(struct device *dev, u64 mask);
|
|
||||||
int (*set_dma_mask)(struct device *dev, u64 dma_mask);
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
|
|
||||||
{
|
|
||||||
/* We don't handle the NULL dev case for ISA for now. We could
|
|
||||||
* do it via an out of line call but it is not needed for now. The
|
|
||||||
* only ISA DMA device we support is the floppy and we have a hack
|
|
||||||
* in the floppy driver directly to get a device for us.
|
|
||||||
*/
|
|
||||||
if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
|
|
||||||
return NULL;
|
|
||||||
return dev->archdata.dma_ops;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (unlikely(dma_ops == NULL))
|
|
||||||
return 0;
|
|
||||||
if (dma_ops->dma_supported == NULL)
|
|
||||||
return 1;
|
|
||||||
return dma_ops->dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
|
||||||
{
|
|
||||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (unlikely(dma_ops == NULL))
|
|
||||||
return -EIO;
|
|
||||||
if (dma_ops->set_dma_mask != NULL)
|
|
||||||
return dma_ops->set_dma_mask(dev, dma_mask);
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
|
||||||
return -EIO;
|
|
||||||
*dev->dma_mask = dma_mask;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag)
|
|
||||||
{
|
|
||||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
BUG_ON(!dma_ops);
|
|
||||||
return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle)
|
|
||||||
{
|
|
||||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
BUG_ON(!dma_ops);
|
|
||||||
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
|
||||||
size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
BUG_ON(!dma_ops);
|
|
||||||
return dma_ops->map_single(dev, cpu_addr, size, direction);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
|
||||||
size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
BUG_ON(!dma_ops);
|
|
||||||
dma_ops->unmap_single(dev, dma_addr, size, direction);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|
||||||
unsigned long offset, size_t size,
|
|
||||||
enum dma_data_direction direction)
|
|
||||||
{
|
|
||||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
BUG_ON(!dma_ops);
|
|
||||||
return dma_ops->map_single(dev, page_address(page) + offset, size,
|
|
||||||
direction);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
|
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
|
||||||
size_t size,
|
size_t size,
|
||||||
|
@ -276,14 +133,15 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
BUG_ON(direction == DMA_NONE);
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
|
||||||
for (i = 0; i < nents; i++, sg++) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
BUG_ON(!sg->page);
|
BUG_ON(!sg->page);
|
||||||
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
|
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
|
||||||
sg->dma_address = page_to_bus(sg->page) + sg->offset;
|
sg->dma_address = page_to_bus(sg->page) + sg->offset;
|
||||||
|
@ -318,26 +176,28 @@ static inline void dma_sync_single_for_device(struct device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dma_sync_sg_for_cpu(struct device *dev,
|
static inline void dma_sync_sg_for_cpu(struct device *dev,
|
||||||
struct scatterlist *sg, int nents,
|
struct scatterlist *sgl, int nents,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
BUG_ON(direction == DMA_NONE);
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
|
||||||
for (i = 0; i < nents; i++, sg++)
|
for_each_sg(sgl, sg, nents, i)
|
||||||
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
|
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dma_sync_sg_for_device(struct device *dev,
|
static inline void dma_sync_sg_for_device(struct device *dev,
|
||||||
struct scatterlist *sg, int nents,
|
struct scatterlist *sgl, int nents,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
BUG_ON(direction == DMA_NONE);
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
|
||||||
for (i = 0; i < nents; i++, sg++)
|
for_each_sg(sgl, sg, nents, i)
|
||||||
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
|
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,5 +41,7 @@ struct scatterlist {
|
||||||
#define ISA_DMA_THRESHOLD (~0UL)
|
#define ISA_DMA_THRESHOLD (~0UL)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define ARCH_HAS_SG_CHAIN
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ASM_POWERPC_SCATTERLIST_H */
|
#endif /* _ASM_POWERPC_SCATTERLIST_H */
|
||||||
|
|
|
@ -19,4 +19,6 @@ struct scatterlist {
|
||||||
|
|
||||||
#define ISA_DMA_THRESHOLD (~0UL)
|
#define ISA_DMA_THRESHOLD (~0UL)
|
||||||
|
|
||||||
|
#define ARCH_HAS_SG_CHAIN
|
||||||
|
|
||||||
#endif /* !(_SPARC_SCATTERLIST_H) */
|
#endif /* !(_SPARC_SCATTERLIST_H) */
|
||||||
|
|
|
@ -20,4 +20,6 @@ struct scatterlist {
|
||||||
|
|
||||||
#define ISA_DMA_THRESHOLD (~0UL)
|
#define ISA_DMA_THRESHOLD (~0UL)
|
||||||
|
|
||||||
|
#define ARCH_HAS_SG_CHAIN
|
||||||
|
|
||||||
#endif /* !(_SPARC64_SCATTERLIST_H) */
|
#endif /* !(_SPARC64_SCATTERLIST_H) */
|
||||||
|
|
|
@ -2,10 +2,10 @@
|
||||||
#define _ASM_I386_DMA_MAPPING_H
|
#define _ASM_I386_DMA_MAPPING_H
|
||||||
|
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <asm/cache.h>
|
#include <asm/cache.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/scatterlist.h>
|
|
||||||
#include <asm/bug.h>
|
#include <asm/bug.h>
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||||
|
@ -35,18 +35,19 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
WARN_ON(nents == 0 || sg[0].length == 0);
|
WARN_ON(nents == 0 || sglist[0].length == 0);
|
||||||
|
|
||||||
for (i = 0; i < nents; i++ ) {
|
for_each_sg(sglist, sg, nents, i) {
|
||||||
BUG_ON(!sg[i].page);
|
BUG_ON(!sg->page);
|
||||||
|
|
||||||
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
|
sg->dma_address = page_to_phys(sg->page) + sg->offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
flush_write_buffers();
|
flush_write_buffers();
|
||||||
|
|
|
@ -6,8 +6,7 @@
|
||||||
* documentation.
|
* documentation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
#include <asm/scatterlist.h>
|
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/swiotlb.h>
|
#include <asm/swiotlb.h>
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue