mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	 187f1882b5
			
		
	
	
		187f1882b5
		
	
	
	
	
		
			
			If a header file is making use of BUG, BUG_ON, BUILD_BUG_ON, or any other BUG variant in a static inline (i.e. not in a #define) then that header really should be including <linux/bug.h> and not just expecting it to be implicitly present. We can make this change risk-free, since if the files using these headers didn't have exposure to linux/bug.h already, they would have been causing compile failures/warnings. Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
		
			
				
	
	
		
			179 lines
		
	
	
	
		
			5.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			179 lines
		
	
	
	
		
			5.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef _ASM_GENERIC_DMA_MAPPING_H
 | |
| #define _ASM_GENERIC_DMA_MAPPING_H
 | |
| 
 | |
| #include <linux/kmemcheck.h>
 | |
| #include <linux/bug.h>
 | |
| #include <linux/scatterlist.h>
 | |
| #include <linux/dma-debug.h>
 | |
| #include <linux/dma-attrs.h>
 | |
| 
 | |
| static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
 | |
| 					      size_t size,
 | |
| 					      enum dma_data_direction dir,
 | |
| 					      struct dma_attrs *attrs)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 	dma_addr_t addr;
 | |
| 
 | |
| 	kmemcheck_mark_initialized(ptr, size);
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	addr = ops->map_page(dev, virt_to_page(ptr),
 | |
| 			     (unsigned long)ptr & ~PAGE_MASK, size,
 | |
| 			     dir, attrs);
 | |
| 	debug_dma_map_page(dev, virt_to_page(ptr),
 | |
| 			   (unsigned long)ptr & ~PAGE_MASK, size,
 | |
| 			   dir, addr, true);
 | |
| 	return addr;
 | |
| }
 | |
| 
 | |
| static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
 | |
| 					  size_t size,
 | |
| 					  enum dma_data_direction dir,
 | |
| 					  struct dma_attrs *attrs)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->unmap_page)
 | |
| 		ops->unmap_page(dev, addr, size, dir, attrs);
 | |
| 	debug_dma_unmap_page(dev, addr, size, dir, true);
 | |
| }
 | |
| 
 | |
| static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 | |
| 				   int nents, enum dma_data_direction dir,
 | |
| 				   struct dma_attrs *attrs)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 	int i, ents;
 | |
| 	struct scatterlist *s;
 | |
| 
 | |
| 	for_each_sg(sg, s, nents, i)
 | |
| 		kmemcheck_mark_initialized(sg_virt(s), s->length);
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	ents = ops->map_sg(dev, sg, nents, dir, attrs);
 | |
| 	debug_dma_map_sg(dev, sg, nents, ents, dir);
 | |
| 
 | |
| 	return ents;
 | |
| }
 | |
| 
 | |
| static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
 | |
| 				      int nents, enum dma_data_direction dir,
 | |
| 				      struct dma_attrs *attrs)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	debug_dma_unmap_sg(dev, sg, nents, dir);
 | |
| 	if (ops->unmap_sg)
 | |
| 		ops->unmap_sg(dev, sg, nents, dir, attrs);
 | |
| }
 | |
| 
 | |
| static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 | |
| 				      size_t offset, size_t size,
 | |
| 				      enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 	dma_addr_t addr;
 | |
| 
 | |
| 	kmemcheck_mark_initialized(page_address(page) + offset, size);
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	addr = ops->map_page(dev, page, offset, size, dir, NULL);
 | |
| 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 | |
| 
 | |
| 	return addr;
 | |
| }
 | |
| 
 | |
| static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 | |
| 				  size_t size, enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->unmap_page)
 | |
| 		ops->unmap_page(dev, addr, size, dir, NULL);
 | |
| 	debug_dma_unmap_page(dev, addr, size, dir, false);
 | |
| }
 | |
| 
 | |
| static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
 | |
| 					   size_t size,
 | |
| 					   enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_single_for_cpu)
 | |
| 		ops->sync_single_for_cpu(dev, addr, size, dir);
 | |
| 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
 | |
| }
 | |
| 
 | |
| static inline void dma_sync_single_for_device(struct device *dev,
 | |
| 					      dma_addr_t addr, size_t size,
 | |
| 					      enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_single_for_device)
 | |
| 		ops->sync_single_for_device(dev, addr, size, dir);
 | |
| 	debug_dma_sync_single_for_device(dev, addr, size, dir);
 | |
| }
 | |
| 
 | |
| static inline void dma_sync_single_range_for_cpu(struct device *dev,
 | |
| 						 dma_addr_t addr,
 | |
| 						 unsigned long offset,
 | |
| 						 size_t size,
 | |
| 						 enum dma_data_direction dir)
 | |
| {
 | |
| 	const struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_single_for_cpu)
 | |
| 		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
 | |
| 	debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
 | |
| }
 | |
| 
 | |
| static inline void dma_sync_single_range_for_device(struct device *dev,
 | |
| 						    dma_addr_t addr,
 | |
| 						    unsigned long offset,
 | |
| 						    size_t size,
 | |
| 						    enum dma_data_direction dir)
 | |
| {
 | |
| 	const struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_single_for_device)
 | |
| 		ops->sync_single_for_device(dev, addr + offset, size, dir);
 | |
| 	debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
 | |
| }
 | |
| 
 | |
| static inline void
 | |
| dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 | |
| 		    int nelems, enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_sg_for_cpu)
 | |
| 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
 | |
| 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
 | |
| }
 | |
| 
 | |
| static inline void
 | |
| dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 | |
| 		       int nelems, enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_sg_for_device)
 | |
| 		ops->sync_sg_for_device(dev, sg, nelems, dir);
 | |
| 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
 | |
| 
 | |
| }
 | |
| 
 | |
| #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
 | |
| #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
 | |
| #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
 | |
| #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
 | |
| 
 | |
| #endif
 |