Merge tag 'perf-tools-for-v6.4-3-2023-05-06' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / include / linux / swiotlb.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1648993f
JR
2#ifndef __LINUX_SWIOTLB_H
3#define __LINUX_SWIOTLB_H
4
7fd856aa 5#include <linux/device.h>
38674442
TR
6#include <linux/dma-direction.h>
7#include <linux/init.h>
1648993f 8#include <linux/types.h>
f51778db 9#include <linux/limits.h>
73f62095 10#include <linux/spinlock.h>
1648993f
JR
11
12struct device;
38674442 13struct page;
1648993f
JR
14struct scatterlist;
15
c6af2aa9
CH
16#define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */
17#define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */
8ba2ed1b 18#define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */
ae7871be 19
0016fdee
IC
20/*
21 * Maximum allowable number of contiguous slabs to map,
22 * must be a power of 2. What is the appropriate value ?
23 * The complexity of {map,unmap}_single is linearly dependent on this value.
24 */
25#define IO_TLB_SEGSIZE 128
26
0016fdee
IC
27/*
28 * log of the size of each IO TLB slab. The number of slabs is command line
29 * controllable.
30 */
31#define IO_TLB_SHIFT 11
b5d7ccb7 32#define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
0016fdee 33
e998879d
AK
34/* default to 64MB */
35#define IO_TLB_DEFAULT_SIZE (64UL<<20)
36
c729de8f 37unsigned long swiotlb_size_or_default(void);
7374153d
CH
38void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
39 int (*remap)(void *tlb, unsigned long nslabs));
40int swiotlb_init_late(size_t size, gfp_t gfp_mask,
41 int (*remap)(void *tlb, unsigned long nslabs));
c7753208 42extern void __init swiotlb_update_mem_attributes(void);
1648993f 43
fc0021aa
CH
44phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
45 size_t mapping_size, size_t alloc_size,
e81e99ba
DS
46 unsigned int alloc_aligned_mask, enum dma_data_direction dir,
47 unsigned long attrs);
d7ef1533 48
61ca08c3
AD
49extern void swiotlb_tbl_unmap_single(struct device *hwdev,
50 phys_addr_t tlb_addr,
3fc1ca00 51 size_t mapping_size,
3fc1ca00 52 enum dma_data_direction dir,
0443fa00 53 unsigned long attrs);
d7ef1533 54
80808d27
CH
55void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
56 size_t size, enum dma_data_direction dir);
57void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
58 size_t size, enum dma_data_direction dir);
4a47cbae
CH
59dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
60 size_t size, enum dma_data_direction dir, unsigned long attrs);
61
5740afdb 62#ifdef CONFIG_SWIOTLB
73f62095
CC
63
64/**
65 * struct io_tlb_mem - IO TLB Memory Pool Descriptor
66 *
67 * @start: The start address of the swiotlb memory pool. Used to do a quick
68 * range check to see if the memory was in fact allocated by this
69 * API.
70 * @end: The end address of the swiotlb memory pool. Used to do a quick
71 * range check to see if the memory was in fact allocated by this
72 * API.
1a5e91d8
TL
73 * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
74 * may be remapped in the memory encrypted case and store virtual
75 * address for bounce buffer operation.
73f62095 76 * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
0b84e4f8
CC
77 * @end. For default swiotlb, this is command line adjustable via
78 * setup_io_tlb_npages.
73f62095
CC
79 * @used: The number of used IO TLB block.
80 * @list: The free list describing the number of free entries available
81 * from each index.
73f62095
CC
82 * @orig_addr: The original address corresponding to a mapped entry.
83 * @alloc_size: Size of the allocated buffer.
73f62095
CC
84 * @debugfs: The dentry to debugfs.
85 * @late_alloc: %true if allocated using the page allocator
903cd0f3 86 * @force_bounce: %true if swiotlb bouncing is forced
f4111e39 87 * @for_alloc: %true if the pool is used for memory allocation
20347fca
TL
88 * @nareas: The area number in the pool.
89 * @area_nslabs: The slot number in the area.
8b0977ec
MK
90 * @total_used: The total number of slots in the pool that are currently used
91 * across all areas. Used only for calculating used_hiwater in
92 * debugfs.
93 * @used_hiwater: The high water mark for total_used. Used only for reporting
94 * in debugfs.
73f62095
CC
95 */
96struct io_tlb_mem {
97 phys_addr_t start;
98 phys_addr_t end;
1a5e91d8 99 void *vaddr;
73f62095
CC
100 unsigned long nslabs;
101 unsigned long used;
73f62095
CC
102 struct dentry *debugfs;
103 bool late_alloc;
903cd0f3 104 bool force_bounce;
f4111e39 105 bool for_alloc;
20347fca
TL
106 unsigned int nareas;
107 unsigned int area_nslabs;
108 struct io_tlb_area *areas;
942a8186 109 struct io_tlb_slot *slots;
ec274aff 110#ifdef CONFIG_DEBUG_FS
8b0977ec
MK
111 atomic_long_t total_used;
112 atomic_long_t used_hiwater;
ec274aff 113#endif
73f62095 114};
463e862a 115extern struct io_tlb_mem io_tlb_default_mem;
55897af6 116
7fd856aa 117static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
55897af6 118{
7fd856aa 119 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
73f62095 120
2d29960a 121 return mem && paddr >= mem->start && paddr < mem->end;
55897af6
CH
122}
123
903cd0f3
CC
124static inline bool is_swiotlb_force_bounce(struct device *dev)
125{
126 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
127
128 return mem && mem->force_bounce;
129}
130
c6af2aa9 131void swiotlb_init(bool addressing_limited, unsigned int flags);
55897af6 132void __init swiotlb_exit(void);
abe420bf 133size_t swiotlb_max_mapping_size(struct device *dev);
6f2beb26 134bool is_swiotlb_active(struct device *dev);
2d29960a 135void __init swiotlb_adjust_size(unsigned long size);
5740afdb 136#else
c6af2aa9
CH
137static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
138{
139}
7fd856aa 140static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
55897af6
CH
141{
142 return false;
143}
903cd0f3
CC
144static inline bool is_swiotlb_force_bounce(struct device *dev)
145{
146 return false;
147}
55897af6
CH
148static inline void swiotlb_exit(void)
149{
150}
abe420bf
JR
151static inline size_t swiotlb_max_mapping_size(struct device *dev)
152{
153 return SIZE_MAX;
154}
492366f7 155
6f2beb26 156static inline bool is_swiotlb_active(struct device *dev)
492366f7
JR
157{
158 return false;
159}
e998879d 160
2d29960a 161static inline void swiotlb_adjust_size(unsigned long size)
e998879d
AK
162{
163}
55897af6 164#endif /* CONFIG_SWIOTLB */
5740afdb 165
ad32e8cb 166extern void swiotlb_print_info(void);
9c5a3621 167
f4111e39
CC
168#ifdef CONFIG_DMA_RESTRICTED_POOL
169struct page *swiotlb_alloc(struct device *dev, size_t size);
170bool swiotlb_free(struct device *dev, struct page *page, size_t size);
171
172static inline bool is_swiotlb_for_alloc(struct device *dev)
173{
174 return dev->dma_io_tlb_mem->for_alloc;
175}
176#else
177static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
178{
179 return NULL;
180}
181static inline bool swiotlb_free(struct device *dev, struct page *page,
182 size_t size)
183{
184 return false;
185}
186static inline bool is_swiotlb_for_alloc(struct device *dev)
187{
188 return false;
189}
190#endif /* CONFIG_DMA_RESTRICTED_POOL */
191
1648993f 192#endif /* __LINUX_SWIOTLB_H */