Commit | Line | Data |
---|---|---|
2b9feef2 AE |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. | |
a4388da5 | 4 | * Copyright (C) 2018-2022 Linaro Ltd. |
2b9feef2 AE |
5 | */ |
6 | ||
7 | #include <linux/types.h> | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/bits.h> | |
10 | #include <linux/bitops.h> | |
11 | #include <linux/bitfield.h> | |
12 | #include <linux/io.h> | |
13 | #include <linux/build_bug.h> | |
14 | #include <linux/device.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | ||
17 | #include "ipa.h" | |
18 | #include "ipa_version.h" | |
19 | #include "ipa_endpoint.h" | |
20 | #include "ipa_table.h" | |
21 | #include "ipa_reg.h" | |
22 | #include "ipa_mem.h" | |
23 | #include "ipa_cmd.h" | |
24 | #include "gsi.h" | |
25 | #include "gsi_trans.h" | |
26 | ||
27 | /** | |
28 | * DOC: IPA Filter and Route Tables | |
29 | * | |
4ea29143 AE |
30 | * The IPA has tables defined in its local (IPA-resident) memory that define |
31 | * filter and routing rules. An entry in either of these tables is a little | |
32 | * endian 64-bit "slot" that holds the address of a rule definition. (The | |
33 | * size of these slots is 64 bits regardless of the host DMA address size.) | |
34 | * | |
35 | * Separate tables (both filter and route) used for IPv4 and IPv6. There | |
36 | * are normally another set of "hashed" filter and route tables, which are | |
37 | * used with a hash of message metadata. Hashed operation is not supported | |
38 | * by all IPA hardware (IPA v4.2 doesn't support hashed tables). | |
39 | * | |
40 | * Rules can be in local memory or in DRAM (system memory). The offset of | |
41 | * an object (such as a route or filter table) in IPA-resident memory must | |
42 | * 128-byte aligned. An object in system memory (such as a route or filter | |
43 | * rule) must be at an 8-byte aligned address. We currently only place | |
44 | * route or filter rules in system memory. | |
45 | * | |
2b9feef2 AE |
46 | * A rule consists of a contiguous block of 32-bit values terminated with |
47 | * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits | |
48 | * represents "no filtering" or "no routing," and is the reset value for | |
4ea29143 | 49 | * filter or route table rules. |
2b9feef2 AE |
50 | * |
51 | * Each filter rule is associated with an AP or modem TX endpoint, though | |
4ea29143 | 52 | * not all TX endpoints support filtering. The first 64-bit slot in a |
2b9feef2 AE |
53 | * filter table is a bitmap indicating which endpoints have entries in |
54 | * the table. The low-order bit (bit 0) in this bitmap represents a | |
55 | * special global filter, which applies to all traffic. This is not | |
56 | * used in the current code. Bit 1, if set, indicates that there is an | |
4ea29143 AE |
57 | * entry (i.e. slot containing a system address referring to a rule) for |
58 | * endpoint 0 in the table. Bit 3, if set, indicates there is an entry | |
59 | * for endpoint 2, and so on. Space is set aside in IPA local memory to | |
60 | * hold as many filter table entries as might be required, but typically | |
61 | * they are not all used. | |
2b9feef2 AE |
62 | * |
63 | * The AP initializes all entries in a filter table to refer to a "zero" | |
64 | * entry. Once initialized the modem and AP update the entries for | |
65 | * endpoints they "own" directly. Currently the AP does not use the | |
66 | * IPA filtering functionality. | |
67 | * | |
68 | * IPA Filter Table | |
69 | * ---------------------- | |
70 | * endpoint bitmap | 0x0000000000000048 | Bits 3 and 6 set (endpoints 2 and 5) | |
71 | * |--------------------| | |
72 | * 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule | |
73 | * |--------------------| | |
74 | * 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule | |
75 | * |--------------------| | |
76 | * (unused) | | (Unused space in filter table) | |
77 | * |--------------------| | |
78 | * . . . | |
79 | * |--------------------| | |
80 | * (unused) | | (Unused space in filter table) | |
81 | * ---------------------- | |
82 | * | |
83 | * The set of available route rules is divided about equally between the AP | |
84 | * and modem. The AP initializes all entries in a route table to refer to | |
85 | * a "zero entry". Once initialized, the modem and AP are responsible for | |
86 | * updating their own entries. All entries in a route table are usable, | |
87 | * though the AP currently does not use the IPA routing functionality. | |
88 | * | |
89 | * IPA Route Table | |
90 | * ---------------------- | |
91 | * 1st modem route | 0x0001234500001100 | DMA address for first route rule | |
92 | * |--------------------| | |
93 | * 2nd modem route | 0x0001234500001140 | DMA address for second route rule | |
94 | * |--------------------| | |
95 | * . . . | |
96 | * |--------------------| | |
97 | * Last modem route| 0x0001234500002280 | DMA address for Nth route rule | |
98 | * |--------------------| | |
99 | * 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1) | |
100 | * |--------------------| | |
101 | * 2nd AP route | 0x0001234500001140 | DMA address for next route rule | |
102 | * |--------------------| | |
103 | * . . . | |
104 | * |--------------------| | |
105 | * Last AP route | 0x0001234500002280 | DMA address for last route rule | |
106 | * ---------------------- | |
107 | */ | |
108 | ||
2b9feef2 AE |
109 | /* Filter or route rules consist of a set of 32-bit values followed by a |
110 | * 32-bit all-zero rule list terminator. The "zero rule" is simply an | |
111 | * all-zero rule followed by the list terminator. | |
112 | */ | |
113 | #define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32)) | |
114 | ||
2b9feef2 AE |
115 | /* Check things that can be validated at build time. */ |
116 | static void ipa_table_validate_build(void) | |
117 | { | |
d2fd2311 AE |
118 | /* Filter and route tables contain DMA addresses that refer |
119 | * to filter or route rules. But the size of a table entry | |
120 | * is 64 bits regardless of what the size of an AP DMA address | |
121 | * is. A fixed constant defines the size of an entry, and | |
122 | * code in ipa_table_init() uses a pointer to __le64 to | |
123 | * initialize tables. | |
2b9feef2 | 124 | */ |
4ea29143 | 125 | BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64)); |
2b9feef2 AE |
126 | |
127 | /* A "zero rule" is used to represent no filtering or no routing. | |
128 | * It is a 64-bit block of zeroed memory. Code in ipa_table_init() | |
129 | * assumes that it can be written using a pointer to __le64. | |
130 | */ | |
131 | BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64)); | |
132 | ||
133 | /* Impose a practical limit on the number of routes */ | |
134 | BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32); | |
135 | /* The modem must be allotted at least one route table entry */ | |
136 | BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT); | |
cf139196 AE |
137 | /* AP must too, but we can't use more than what is available */ |
138 | BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT >= IPA_ROUTE_COUNT_MAX); | |
139 | } | |
2b9feef2 | 140 | |
cf139196 AE |
141 | static const struct ipa_mem * |
142 | ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6) | |
143 | { | |
144 | enum ipa_mem_id mem_id; | |
145 | ||
146 | mem_id = filter ? hashed ? ipv6 ? IPA_MEM_V6_FILTER_HASHED | |
147 | : IPA_MEM_V4_FILTER_HASHED | |
148 | : ipv6 ? IPA_MEM_V6_FILTER | |
149 | : IPA_MEM_V4_FILTER | |
150 | : hashed ? ipv6 ? IPA_MEM_V6_ROUTE_HASHED | |
151 | : IPA_MEM_V4_ROUTE_HASHED | |
152 | : ipv6 ? IPA_MEM_V6_ROUTE | |
153 | : IPA_MEM_V4_ROUTE; | |
154 | ||
155 | return ipa_mem_find(ipa, mem_id); | |
2b9feef2 AE |
156 | } |
157 | ||
158 | static bool | |
e9f5b276 | 159 | ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route) |
2b9feef2 | 160 | { |
5e3bc1e5 | 161 | const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id); |
2b9feef2 | 162 | struct device *dev = &ipa->pdev->dev; |
2b9feef2 AE |
163 | u32 size; |
164 | ||
e9f5b276 | 165 | if (route) |
4ea29143 | 166 | size = IPA_ROUTE_COUNT_MAX * sizeof(__le64); |
e9f5b276 | 167 | else |
4ea29143 | 168 | size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64); |
2b9feef2 | 169 | |
f2c1dac0 | 170 | if (!ipa_cmd_table_valid(ipa, mem, route)) |
2b9feef2 AE |
171 | return false; |
172 | ||
173 | /* mem->size >= size is sufficient, but we'll demand more */ | |
174 | if (mem->size == size) | |
175 | return true; | |
176 | ||
177 | /* Hashed table regions can be zero size if hashing is not supported */ | |
546948bf | 178 | if (ipa_table_hash_support(ipa) && !mem->size) |
2b9feef2 AE |
179 | return true; |
180 | ||
e9f5b276 AE |
181 | dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n", |
182 | route ? "route" : "filter", mem_id, mem->size, size); | |
2b9feef2 AE |
183 | |
184 | return false; | |
185 | } | |
186 | ||
187 | /* Verify the filter and route table memory regions are the expected size */ | |
188 | bool ipa_table_valid(struct ipa *ipa) | |
189 | { | |
e9f5b276 AE |
190 | bool valid; |
191 | ||
546948bf AE |
192 | valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false); |
193 | valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false); | |
194 | valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true); | |
195 | valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true); | |
196 | ||
197 | if (!ipa_table_hash_support(ipa)) | |
198 | return valid; | |
199 | ||
200 | valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED, | |
201 | false); | |
202 | valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED, | |
203 | false); | |
204 | valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED, | |
205 | true); | |
206 | valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED, | |
207 | true); | |
2b9feef2 AE |
208 | |
209 | return valid; | |
210 | } | |
211 | ||
212 | bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map) | |
213 | { | |
214 | struct device *dev = &ipa->pdev->dev; | |
215 | u32 count; | |
216 | ||
217 | if (!filter_map) { | |
218 | dev_err(dev, "at least one filtering endpoint is required\n"); | |
219 | ||
220 | return false; | |
221 | } | |
222 | ||
223 | count = hweight32(filter_map); | |
224 | if (count > IPA_FILTER_COUNT_MAX) { | |
225 | dev_err(dev, "too many filtering endpoints (%u, max %u)\n", | |
226 | count, IPA_FILTER_COUNT_MAX); | |
227 | ||
228 | return false; | |
229 | } | |
230 | ||
231 | return true; | |
232 | } | |
233 | ||
2b9feef2 AE |
234 | /* Zero entry count means no table, so just return a 0 address */ |
235 | static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) | |
236 | { | |
237 | u32 skip; | |
238 | ||
239 | if (!count) | |
240 | return 0; | |
241 | ||
5bc55884 | 242 | WARN_ON(count > max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); |
2b9feef2 AE |
243 | |
244 | /* Skip over the zero rule and possibly the filter mask */ | |
245 | skip = filter_mask ? 1 : 2; | |
246 | ||
247 | return ipa->table_addr + skip * sizeof(*ipa->table_virt); | |
248 | } | |
249 | ||
250 | static void ipa_table_reset_add(struct gsi_trans *trans, bool filter, | |
25116645 | 251 | u16 first, u16 count, enum ipa_mem_id mem_id) |
2b9feef2 AE |
252 | { |
253 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
5e3bc1e5 | 254 | const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id); |
2b9feef2 AE |
255 | dma_addr_t addr; |
256 | u32 offset; | |
257 | u16 size; | |
258 | ||
25116645 | 259 | /* Nothing to do if the table memory region is empty */ |
2b9feef2 AE |
260 | if (!mem->size) |
261 | return; | |
262 | ||
263 | if (filter) | |
264 | first++; /* skip over bitmap */ | |
265 | ||
4ea29143 AE |
266 | offset = mem->offset + first * sizeof(__le64); |
267 | size = count * sizeof(__le64); | |
2b9feef2 AE |
268 | addr = ipa_table_addr(ipa, false, count); |
269 | ||
270 | ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true); | |
271 | } | |
272 | ||
273 | /* Reset entries in a single filter table belonging to either the AP or | |
274 | * modem to refer to the zero entry. The memory region supplied will be | |
275 | * for the IPv4 and IPv6 non-hashed and hashed filter tables. | |
276 | */ | |
277 | static int | |
07c525a6 | 278 | ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem) |
2b9feef2 AE |
279 | { |
280 | u32 ep_mask = ipa->filter_map; | |
281 | u32 count = hweight32(ep_mask); | |
282 | struct gsi_trans *trans; | |
283 | enum gsi_ee_id ee_id; | |
284 | ||
2b9feef2 AE |
285 | trans = ipa_cmd_trans_alloc(ipa, count); |
286 | if (!trans) { | |
287 | dev_err(&ipa->pdev->dev, | |
288 | "no transaction for %s filter reset\n", | |
289 | modem ? "modem" : "AP"); | |
290 | return -EBUSY; | |
291 | } | |
292 | ||
293 | ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP; | |
294 | while (ep_mask) { | |
295 | u32 endpoint_id = __ffs(ep_mask); | |
296 | struct ipa_endpoint *endpoint; | |
297 | ||
298 | ep_mask ^= BIT(endpoint_id); | |
299 | ||
300 | endpoint = &ipa->endpoint[endpoint_id]; | |
301 | if (endpoint->ee_id != ee_id) | |
302 | continue; | |
303 | ||
25116645 | 304 | ipa_table_reset_add(trans, true, endpoint_id, 1, mem_id); |
2b9feef2 AE |
305 | } |
306 | ||
307 | gsi_trans_commit_wait(trans); | |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
312 | /* Theoretically, each filter table could have more filter slots to | |
313 | * update than the maximum number of commands in a transaction. So | |
314 | * we do each table separately. | |
315 | */ | |
316 | static int ipa_filter_reset(struct ipa *ipa, bool modem) | |
317 | { | |
318 | int ret; | |
319 | ||
07c525a6 | 320 | ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER, modem); |
2b9feef2 AE |
321 | if (ret) |
322 | return ret; | |
323 | ||
07c525a6 | 324 | ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem); |
2b9feef2 AE |
325 | if (ret) |
326 | return ret; | |
327 | ||
07c525a6 | 328 | ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem); |
2b9feef2 AE |
329 | if (ret) |
330 | return ret; | |
07c525a6 | 331 | ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem); |
2b9feef2 AE |
332 | |
333 | return ret; | |
334 | } | |
335 | ||
336 | /* The AP routes and modem routes are each contiguous within the | |
337 | * table. We can update each table with a single command, and we | |
338 | * won't exceed the per-transaction command limit. | |
339 | * */ | |
340 | static int ipa_route_reset(struct ipa *ipa, bool modem) | |
341 | { | |
342 | struct gsi_trans *trans; | |
343 | u16 first; | |
344 | u16 count; | |
345 | ||
346 | trans = ipa_cmd_trans_alloc(ipa, 4); | |
347 | if (!trans) { | |
348 | dev_err(&ipa->pdev->dev, | |
349 | "no transaction for %s route reset\n", | |
350 | modem ? "modem" : "AP"); | |
351 | return -EBUSY; | |
352 | } | |
353 | ||
354 | if (modem) { | |
fb4014ac | 355 | first = 0; |
2b9feef2 AE |
356 | count = IPA_ROUTE_MODEM_COUNT; |
357 | } else { | |
fb4014ac AE |
358 | first = IPA_ROUTE_MODEM_COUNT; |
359 | count = IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT; | |
2b9feef2 AE |
360 | } |
361 | ||
25116645 | 362 | ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE); |
2b9feef2 | 363 | ipa_table_reset_add(trans, false, first, count, |
25116645 | 364 | IPA_MEM_V4_ROUTE_HASHED); |
2b9feef2 | 365 | |
25116645 | 366 | ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE); |
2b9feef2 | 367 | ipa_table_reset_add(trans, false, first, count, |
25116645 | 368 | IPA_MEM_V6_ROUTE_HASHED); |
2b9feef2 AE |
369 | |
370 | gsi_trans_commit_wait(trans); | |
371 | ||
372 | return 0; | |
373 | } | |
374 | ||
375 | void ipa_table_reset(struct ipa *ipa, bool modem) | |
376 | { | |
377 | struct device *dev = &ipa->pdev->dev; | |
378 | const char *ee_name; | |
379 | int ret; | |
380 | ||
381 | ee_name = modem ? "modem" : "AP"; | |
382 | ||
383 | /* Report errors, but reset filter and route tables */ | |
384 | ret = ipa_filter_reset(ipa, modem); | |
385 | if (ret) | |
386 | dev_err(dev, "error %d resetting filter table for %s\n", | |
387 | ret, ee_name); | |
388 | ||
389 | ret = ipa_route_reset(ipa, modem); | |
390 | if (ret) | |
391 | dev_err(dev, "error %d resetting route table for %s\n", | |
392 | ret, ee_name); | |
393 | } | |
394 | ||
395 | int ipa_table_hash_flush(struct ipa *ipa) | |
396 | { | |
6a244b75 | 397 | const struct ipa_reg *reg; |
2b9feef2 | 398 | struct gsi_trans *trans; |
6a244b75 | 399 | u32 offset; |
2b9feef2 AE |
400 | u32 val; |
401 | ||
a266ad6b | 402 | if (!ipa_table_hash_support(ipa)) |
2b9feef2 AE |
403 | return 0; |
404 | ||
405 | trans = ipa_cmd_trans_alloc(ipa, 1); | |
406 | if (!trans) { | |
407 | dev_err(&ipa->pdev->dev, "no transaction for hash flush\n"); | |
408 | return -EBUSY; | |
409 | } | |
410 | ||
6a244b75 AE |
411 | reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH); |
412 | offset = ipa_reg_offset(reg); | |
413 | ||
62b9c009 AE |
414 | val = ipa_reg_bit(reg, IPV6_ROUTER_HASH); |
415 | val |= ipa_reg_bit(reg, IPV6_FILTER_HASH); | |
416 | val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH); | |
417 | val |= ipa_reg_bit(reg, IPV4_FILTER_HASH); | |
2b9feef2 AE |
418 | |
419 | ipa_cmd_register_write_add(trans, offset, val, val, false); | |
420 | ||
421 | gsi_trans_commit_wait(trans); | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
426 | static void ipa_table_init_add(struct gsi_trans *trans, bool filter, | |
427 | enum ipa_cmd_opcode opcode, | |
25116645 AE |
428 | enum ipa_mem_id mem_id, |
429 | enum ipa_mem_id hash_mem_id) | |
2b9feef2 AE |
430 | { |
431 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
5e3bc1e5 AE |
432 | const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id); |
433 | const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id); | |
2b9feef2 AE |
434 | dma_addr_t hash_addr; |
435 | dma_addr_t addr; | |
dc901505 | 436 | u32 zero_offset; |
2b9feef2 | 437 | u16 hash_count; |
dc901505 | 438 | u32 zero_size; |
2b9feef2 AE |
439 | u16 hash_size; |
440 | u16 count; | |
441 | u16 size; | |
442 | ||
dc901505 | 443 | /* Compute the number of table entries to initialize */ |
2b9feef2 | 444 | if (filter) { |
dc901505 AE |
445 | /* The number of filtering endpoints determines number of |
446 | * entries in the filter table; we also add one more "slot" | |
447 | * to hold the bitmap itself. The size of the hashed filter | |
448 | * table is either the same as the non-hashed one, or zero. | |
449 | */ | |
b5c10223 | 450 | count = 1 + hweight32(ipa->filter_map); |
2b9feef2 AE |
451 | hash_count = hash_mem->size ? count : 0; |
452 | } else { | |
dc901505 AE |
453 | /* The size of a route table region determines the number |
454 | * of entries it has. | |
455 | */ | |
4ea29143 AE |
456 | count = mem->size / sizeof(__le64); |
457 | hash_count = hash_mem->size / sizeof(__le64); | |
2b9feef2 | 458 | } |
4ea29143 AE |
459 | size = count * sizeof(__le64); |
460 | hash_size = hash_count * sizeof(__le64); | |
2b9feef2 AE |
461 | |
462 | addr = ipa_table_addr(ipa, filter, count); | |
463 | hash_addr = ipa_table_addr(ipa, filter, hash_count); | |
464 | ||
465 | ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr, | |
466 | hash_size, hash_mem->offset, hash_addr); | |
dc901505 AE |
467 | if (!filter) |
468 | return; | |
469 | ||
470 | /* Zero the unused space in the filter table */ | |
471 | zero_offset = mem->offset + size; | |
472 | zero_size = mem->size - size; | |
473 | ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size, | |
474 | ipa->zero_addr, true); | |
475 | if (!hash_size) | |
476 | return; | |
477 | ||
478 | /* Zero the unused space in the hashed filter table */ | |
479 | zero_offset = hash_mem->offset + hash_size; | |
480 | zero_size = hash_mem->size - hash_size; | |
481 | ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size, | |
482 | ipa->zero_addr, true); | |
2b9feef2 AE |
483 | } |
484 | ||
485 | int ipa_table_setup(struct ipa *ipa) | |
486 | { | |
487 | struct gsi_trans *trans; | |
488 | ||
dc901505 AE |
489 | /* We will need at most 8 TREs: |
490 | * - IPv4: | |
491 | * - One for route table initialization (non-hashed and hashed) | |
492 | * - One for filter table initialization (non-hashed and hashed) | |
493 | * - One to zero unused entries in the non-hashed filter table | |
494 | * - One to zero unused entries in the hashed filter table | |
495 | * - IPv6: | |
496 | * - One for route table initialization (non-hashed and hashed) | |
497 | * - One for filter table initialization (non-hashed and hashed) | |
498 | * - One to zero unused entries in the non-hashed filter table | |
499 | * - One to zero unused entries in the hashed filter table | |
500 | * All platforms support at least 8 TREs in a transaction. | |
501 | */ | |
502 | trans = ipa_cmd_trans_alloc(ipa, 8); | |
2b9feef2 AE |
503 | if (!trans) { |
504 | dev_err(&ipa->pdev->dev, "no transaction for table setup\n"); | |
505 | return -EBUSY; | |
506 | } | |
507 | ||
508 | ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT, | |
25116645 | 509 | IPA_MEM_V4_ROUTE, IPA_MEM_V4_ROUTE_HASHED); |
2b9feef2 AE |
510 | |
511 | ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT, | |
25116645 | 512 | IPA_MEM_V6_ROUTE, IPA_MEM_V6_ROUTE_HASHED); |
2b9feef2 AE |
513 | |
514 | ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT, | |
25116645 | 515 | IPA_MEM_V4_FILTER, IPA_MEM_V4_FILTER_HASHED); |
2b9feef2 AE |
516 | |
517 | ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT, | |
25116645 | 518 | IPA_MEM_V6_FILTER, IPA_MEM_V6_FILTER_HASHED); |
2b9feef2 AE |
519 | |
520 | gsi_trans_commit_wait(trans); | |
521 | ||
522 | return 0; | |
523 | } | |
524 | ||
2b9feef2 AE |
525 | /** |
526 | * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple | |
e3eea08e | 527 | * @endpoint: Endpoint whose filter hash tuple should be zeroed |
2b9feef2 AE |
528 | * |
529 | * Endpoint must be for the AP (not modem) and support filtering. Updates | |
530 | * the filter hash values without changing route ones. | |
531 | */ | |
532 | static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint) | |
533 | { | |
534 | u32 endpoint_id = endpoint->endpoint_id; | |
6bfb7538 | 535 | struct ipa *ipa = endpoint->ipa; |
6a244b75 | 536 | const struct ipa_reg *reg; |
2b9feef2 AE |
537 | u32 offset; |
538 | u32 val; | |
539 | ||
6a244b75 | 540 | reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG); |
2b9feef2 | 541 | |
181ca020 | 542 | offset = ipa_reg_n_offset(reg, endpoint_id); |
2b9feef2 AE |
543 | val = ioread32(endpoint->ipa->reg_virt + offset); |
544 | ||
545 | /* Zero all filter-related fields, preserving the rest */ | |
181ca020 | 546 | val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL); |
2b9feef2 AE |
547 | |
548 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
549 | } | |
550 | ||
74858b63 | 551 | /* Configure a hashed filter table; there is no ipa_filter_deconfig() */ |
2b9feef2 AE |
552 | static void ipa_filter_config(struct ipa *ipa, bool modem) |
553 | { | |
554 | enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP; | |
555 | u32 ep_mask = ipa->filter_map; | |
556 | ||
a266ad6b | 557 | if (!ipa_table_hash_support(ipa)) |
2b9feef2 AE |
558 | return; |
559 | ||
560 | while (ep_mask) { | |
561 | u32 endpoint_id = __ffs(ep_mask); | |
562 | struct ipa_endpoint *endpoint; | |
563 | ||
564 | ep_mask ^= BIT(endpoint_id); | |
565 | ||
566 | endpoint = &ipa->endpoint[endpoint_id]; | |
567 | if (endpoint->ee_id == ee_id) | |
568 | ipa_filter_tuple_zero(endpoint); | |
569 | } | |
570 | } | |
571 | ||
2b9feef2 AE |
572 | static bool ipa_route_id_modem(u32 route_id) |
573 | { | |
fb4014ac | 574 | return route_id < IPA_ROUTE_MODEM_COUNT; |
2b9feef2 AE |
575 | } |
576 | ||
577 | /** | |
578 | * ipa_route_tuple_zero() - Zero a hashed route table entry tuple | |
e3eea08e | 579 | * @ipa: IPA pointer |
2b9feef2 AE |
580 | * @route_id: Route table entry whose hash tuple should be zeroed |
581 | * | |
582 | * Updates the route hash values without changing filter ones. | |
583 | */ | |
584 | static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id) | |
585 | { | |
6a244b75 | 586 | const struct ipa_reg *reg; |
6bfb7538 | 587 | u32 offset; |
2b9feef2 AE |
588 | u32 val; |
589 | ||
6a244b75 AE |
590 | reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG); |
591 | offset = ipa_reg_n_offset(reg, route_id); | |
6bfb7538 | 592 | |
2b9feef2 AE |
593 | val = ioread32(ipa->reg_virt + offset); |
594 | ||
595 | /* Zero all route-related fields, preserving the rest */ | |
181ca020 | 596 | val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL); |
2b9feef2 AE |
597 | |
598 | iowrite32(val, ipa->reg_virt + offset); | |
599 | } | |
600 | ||
74858b63 | 601 | /* Configure a hashed route table; there is no ipa_route_deconfig() */ |
2b9feef2 AE |
602 | static void ipa_route_config(struct ipa *ipa, bool modem) |
603 | { | |
604 | u32 route_id; | |
605 | ||
a266ad6b | 606 | if (!ipa_table_hash_support(ipa)) |
2b9feef2 AE |
607 | return; |
608 | ||
609 | for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++) | |
610 | if (ipa_route_id_modem(route_id) == modem) | |
611 | ipa_route_tuple_zero(ipa, route_id); | |
612 | } | |
613 | ||
74858b63 | 614 | /* Configure a filter and route tables; there is no ipa_table_deconfig() */ |
2b9feef2 AE |
615 | void ipa_table_config(struct ipa *ipa) |
616 | { | |
617 | ipa_filter_config(ipa, false); | |
618 | ipa_filter_config(ipa, true); | |
619 | ipa_route_config(ipa, false); | |
620 | ipa_route_config(ipa, true); | |
621 | } | |
622 | ||
cf139196 AE |
623 | /* Zero modem_route_count means filter table memory check */ |
624 | static bool ipa_table_mem_valid(struct ipa *ipa, bool modem_route_count) | |
625 | { | |
626 | bool hash_support = ipa_table_hash_support(ipa); | |
627 | bool filter = !modem_route_count; | |
628 | const struct ipa_mem *mem_hashed; | |
629 | const struct ipa_mem *mem_ipv4; | |
630 | const struct ipa_mem *mem_ipv6; | |
631 | u32 count; | |
632 | ||
633 | /* IPv4 and IPv6 non-hashed tables are expected to be defined and | |
634 | * have the same size. Both must have at least two entries (and | |
635 | * would normally have more than that). | |
636 | */ | |
637 | mem_ipv4 = ipa_table_mem(ipa, filter, false, false); | |
638 | if (!mem_ipv4) | |
639 | return false; | |
640 | ||
641 | mem_ipv6 = ipa_table_mem(ipa, filter, false, true); | |
642 | if (!mem_ipv6) | |
643 | return false; | |
644 | ||
645 | if (mem_ipv4->size != mem_ipv6->size) | |
646 | return false; | |
647 | ||
648 | /* Make sure the regions are big enough */ | |
649 | count = mem_ipv4->size / sizeof(__le64); | |
650 | if (count < 2) | |
651 | return false; | |
652 | if (filter) { | |
653 | /* Filter tables must able to hold the endpoint bitmap plus | |
654 | * an entry for each endpoint that supports filtering | |
655 | */ | |
656 | if (count < 1 + hweight32(ipa->filter_map)) | |
657 | return false; | |
658 | } else { | |
659 | /* Routing tables must be able to hold all modem entries, | |
660 | * plus at least one entry for the AP. | |
661 | */ | |
662 | if (count < modem_route_count + 1) | |
663 | return false; | |
664 | } | |
665 | ||
666 | /* If hashing is supported, hashed tables are expected to be defined, | |
667 | * and have the same size as non-hashed tables. If hashing is not | |
668 | * supported, hashed tables are expected to have zero size (or not | |
669 | * be defined). | |
670 | */ | |
671 | mem_hashed = ipa_table_mem(ipa, filter, true, false); | |
672 | if (hash_support) { | |
673 | if (!mem_hashed || mem_hashed->size != mem_ipv4->size) | |
674 | return false; | |
675 | } else { | |
676 | if (mem_hashed && mem_hashed->size) | |
677 | return false; | |
678 | } | |
679 | ||
680 | /* Same check for IPv6 tables */ | |
681 | mem_hashed = ipa_table_mem(ipa, filter, true, true); | |
682 | if (hash_support) { | |
683 | if (!mem_hashed || mem_hashed->size != mem_ipv6->size) | |
684 | return false; | |
685 | } else { | |
686 | if (mem_hashed && mem_hashed->size) | |
687 | return false; | |
688 | } | |
689 | ||
690 | return true; | |
691 | } | |
692 | ||
693 | /* Initialize a coherent DMA allocation containing initialized filter and | |
2b9feef2 AE |
694 | * route table data. This is used when initializing or resetting the IPA |
695 | * filter or route table. | |
696 | * | |
697 | * The first entry in a filter table contains a bitmap indicating which | |
698 | * endpoints contain entries in the table. In addition to that first entry, | |
699 | * there are at most IPA_FILTER_COUNT_MAX entries that follow. Filter table | |
700 | * entries are 64 bits wide, and (other than the bitmap) contain the DMA | |
701 | * address of a filter rule. A "zero rule" indicates no filtering, and | |
702 | * consists of 64 bits of zeroes. When a filter table is initialized (or | |
703 | * reset) its entries are made to refer to the zero rule. | |
704 | * | |
705 | * Each entry in a route table is the DMA address of a routing rule. For | |
706 | * routing there is also a 64-bit "zero rule" that means no routing, and | |
707 | * when a route table is initialized or reset, its entries are made to refer | |
708 | * to the zero rule. The zero rule is shared for route and filter tables. | |
709 | * | |
710 | * Note that the IPA hardware requires a filter or route rule address to be | |
711 | * aligned on a 128 byte boundary. The coherent DMA buffer we allocate here | |
712 | * has a minimum alignment, and we place the zero rule at the base of that | |
713 | * allocated space. In ipa_table_init() we verify the minimum DMA allocation | |
714 | * meets our requirement. | |
715 | * | |
716 | * +-------------------+ | |
717 | * --> | zero rule | | |
718 | * / |-------------------| | |
719 | * | | filter mask | | |
720 | * |\ |-------------------| | |
721 | * | ---- zero rule address | \ | |
722 | * |\ |-------------------| | | |
723 | * | ---- zero rule address | | IPA_FILTER_COUNT_MAX | |
724 | * | |-------------------| > or IPA_ROUTE_COUNT_MAX, | |
725 | * | ... | whichever is greater | |
726 | * \ |-------------------| | | |
727 | * ---- zero rule address | / | |
728 | * +-------------------+ | |
729 | */ | |
730 | int ipa_table_init(struct ipa *ipa) | |
731 | { | |
732 | u32 count = max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX); | |
733 | struct device *dev = &ipa->pdev->dev; | |
734 | dma_addr_t addr; | |
735 | __le64 le_addr; | |
736 | __le64 *virt; | |
737 | size_t size; | |
738 | ||
739 | ipa_table_validate_build(); | |
740 | ||
cf139196 AE |
741 | if (!ipa_table_mem_valid(ipa, 0)) |
742 | return -EINVAL; | |
743 | if (!ipa_table_mem_valid(ipa, IPA_ROUTE_MODEM_COUNT)) | |
744 | return -EINVAL; | |
745 | ||
19aaf72c AE |
746 | /* The IPA hardware requires route and filter table rules to be |
747 | * aligned on a 128-byte boundary. We put the "zero rule" at the | |
748 | * base of the table area allocated here. The DMA address returned | |
749 | * by dma_alloc_coherent() is guaranteed to be a power-of-2 number | |
750 | * of pages, which satisfies the rule alignment requirement. | |
751 | */ | |
4ea29143 | 752 | size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); |
2b9feef2 AE |
753 | virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); |
754 | if (!virt) | |
755 | return -ENOMEM; | |
756 | ||
757 | ipa->table_virt = virt; | |
758 | ipa->table_addr = addr; | |
759 | ||
760 | /* First slot is the zero rule */ | |
761 | *virt++ = 0; | |
762 | ||
763 | /* Next is the filter table bitmap. The "soft" bitmap value | |
764 | * must be converted to the hardware representation by shifting | |
765 | * it left one position. (Bit 0 repesents global filtering, | |
766 | * which is possible but not used.) | |
767 | */ | |
768 | *virt++ = cpu_to_le64((u64)ipa->filter_map << 1); | |
769 | ||
770 | /* All the rest contain the DMA address of the zero rule */ | |
771 | le_addr = cpu_to_le64(addr); | |
772 | while (count--) | |
773 | *virt++ = le_addr; | |
774 | ||
775 | return 0; | |
776 | } | |
777 | ||
778 | void ipa_table_exit(struct ipa *ipa) | |
779 | { | |
780 | u32 count = max_t(u32, 1 + IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX); | |
781 | struct device *dev = &ipa->pdev->dev; | |
782 | size_t size; | |
783 | ||
4ea29143 | 784 | size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); |
2b9feef2 AE |
785 | |
786 | dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); | |
787 | ipa->table_addr = 0; | |
788 | ipa->table_virt = NULL; | |
789 | } |