scatterlist: use sg_phys()
Coccinelle cleanup to replace open coded sg to physical address translations. This is in preparation for introducing scatterlists that reference __pfn_t. // sg_phys.cocci: convert usage page_to_phys(sg_page(sg)) to sg_phys(sg) // usage: make coccicheck COCCI=sg_phys.cocci MODE=patch virtual patch @@ struct scatterlist *sg; @@ - page_to_phys(sg_page(sg)) + sg->offset + sg_phys(sg) @@ struct scatterlist *sg; @@ - page_to_phys(sg_page(sg)) + sg_phys(sg) & PAGE_MASK Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
89e2a8404e
commit
db0fa0cb01
|
@ -1520,7 +1520,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||
return -ENOMEM;
|
||||
|
||||
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
||||
phys_addr_t phys = page_to_phys(sg_page(s));
|
||||
phys_addr_t phys = sg_phys(s) & PAGE_MASK;
|
||||
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
||||
|
||||
if (!is_coherent &&
|
||||
|
|
|
@ -61,8 +61,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||
/* FIXME this part of code is untested */
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = sg_phys(sg);
|
||||
__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
|
||||
sg->length, direction);
|
||||
__dma_sync(sg_phys(sg), sg->length, direction);
|
||||
}
|
||||
|
||||
return nents;
|
||||
|
|
|
@ -2094,7 +2094,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|||
sg_res = aligned_nrpages(sg->offset, sg->length);
|
||||
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
pteval = page_to_phys(sg_page(sg)) | prot;
|
||||
pteval = (sg_phys(sg) & PAGE_MASK) | prot;
|
||||
phys_pfn = pteval >> VTD_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
|
@ -3620,7 +3620,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
|
|||
|
||||
for_each_sg(sglist, sg, nelems, i) {
|
||||
BUG_ON(!sg_page(sg));
|
||||
sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
|
||||
sg->dma_address = sg_phys(sg);
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
return nelems;
|
||||
|
|
|
@ -1408,7 +1408,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
|
||||
phys_addr_t phys = sg_phys(s);
|
||||
|
||||
/*
|
||||
* We are mapping on IOMMU page boundaries, so offset within
|
||||
|
|
|
@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
|
|||
err:
|
||||
sg = table->sgl;
|
||||
for (i -= 1; i >= 0; i--) {
|
||||
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
|
||||
gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
|
||||
sg->length);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
|
|||
DMA_BIDIRECTIONAL);
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
|
||||
gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
|
||||
sg->length);
|
||||
}
|
||||
chunk_heap->allocated -= allocated_size;
|
||||
|
|
Loading…
Reference in New Issue