Commit | Line | Data |
---|---|---|
2b9feef2 AE |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. | |
a4388da5 | 4 | * Copyright (C) 2018-2022 Linaro Ltd. |
2b9feef2 AE |
5 | */ |
6 | ||
7 | #include <linux/types.h> | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/bits.h> | |
10 | #include <linux/bitops.h> | |
11 | #include <linux/bitfield.h> | |
12 | #include <linux/io.h> | |
13 | #include <linux/build_bug.h> | |
14 | #include <linux/device.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | ||
17 | #include "ipa.h" | |
18 | #include "ipa_version.h" | |
19 | #include "ipa_endpoint.h" | |
20 | #include "ipa_table.h" | |
21 | #include "ipa_reg.h" | |
22 | #include "ipa_mem.h" | |
23 | #include "ipa_cmd.h" | |
24 | #include "gsi.h" | |
25 | #include "gsi_trans.h" | |
26 | ||
27 | /** | |
28 | * DOC: IPA Filter and Route Tables | |
29 | * | |
4ea29143 AE |
30 | * The IPA has tables defined in its local (IPA-resident) memory that define |
31 | * filter and routing rules. An entry in either of these tables is a little | |
32 | * endian 64-bit "slot" that holds the address of a rule definition. (The | |
33 | * size of these slots is 64 bits regardless of the host DMA address size.) | |
34 | * | |
35 | * Separate tables (both filter and route) used for IPv4 and IPv6. There | |
36 | * are normally another set of "hashed" filter and route tables, which are | |
37 | * used with a hash of message metadata. Hashed operation is not supported | |
38 | * by all IPA hardware (IPA v4.2 doesn't support hashed tables). | |
39 | * | |
40 | * Rules can be in local memory or in DRAM (system memory). The offset of | |
41 | * an object (such as a route or filter table) in IPA-resident memory must | |
42 | * 128-byte aligned. An object in system memory (such as a route or filter | |
43 | * rule) must be at an 8-byte aligned address. We currently only place | |
44 | * route or filter rules in system memory. | |
45 | * | |
2b9feef2 AE |
46 | * A rule consists of a contiguous block of 32-bit values terminated with |
47 | * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits | |
48 | * represents "no filtering" or "no routing," and is the reset value for | |
4ea29143 | 49 | * filter or route table rules. |
2b9feef2 AE |
50 | * |
51 | * Each filter rule is associated with an AP or modem TX endpoint, though | |
4ea29143 | 52 | * not all TX endpoints support filtering. The first 64-bit slot in a |
2b9feef2 AE |
53 | * filter table is a bitmap indicating which endpoints have entries in |
54 | * the table. The low-order bit (bit 0) in this bitmap represents a | |
55 | * special global filter, which applies to all traffic. This is not | |
56 | * used in the current code. Bit 1, if set, indicates that there is an | |
4ea29143 AE |
57 | * entry (i.e. slot containing a system address referring to a rule) for |
58 | * endpoint 0 in the table. Bit 3, if set, indicates there is an entry | |
59 | * for endpoint 2, and so on. Space is set aside in IPA local memory to | |
60 | * hold as many filter table entries as might be required, but typically | |
61 | * they are not all used. | |
2b9feef2 AE |
62 | * |
63 | * The AP initializes all entries in a filter table to refer to a "zero" | |
64 | * entry. Once initialized the modem and AP update the entries for | |
65 | * endpoints they "own" directly. Currently the AP does not use the | |
66 | * IPA filtering functionality. | |
67 | * | |
68 | * IPA Filter Table | |
69 | * ---------------------- | |
70 | * endpoint bitmap | 0x0000000000000048 | Bits 3 and 6 set (endpoints 2 and 5) | |
71 | * |--------------------| | |
72 | * 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule | |
73 | * |--------------------| | |
74 | * 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule | |
75 | * |--------------------| | |
76 | * (unused) | | (Unused space in filter table) | |
77 | * |--------------------| | |
78 | * . . . | |
79 | * |--------------------| | |
80 | * (unused) | | (Unused space in filter table) | |
81 | * ---------------------- | |
82 | * | |
83 | * The set of available route rules is divided about equally between the AP | |
84 | * and modem. The AP initializes all entries in a route table to refer to | |
85 | * a "zero entry". Once initialized, the modem and AP are responsible for | |
86 | * updating their own entries. All entries in a route table are usable, | |
87 | * though the AP currently does not use the IPA routing functionality. | |
88 | * | |
89 | * IPA Route Table | |
90 | * ---------------------- | |
91 | * 1st modem route | 0x0001234500001100 | DMA address for first route rule | |
92 | * |--------------------| | |
93 | * 2nd modem route | 0x0001234500001140 | DMA address for second route rule | |
94 | * |--------------------| | |
95 | * . . . | |
96 | * |--------------------| | |
97 | * Last modem route| 0x0001234500002280 | DMA address for Nth route rule | |
98 | * |--------------------| | |
99 | * 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1) | |
100 | * |--------------------| | |
101 | * 2nd AP route | 0x0001234500001140 | DMA address for next route rule | |
102 | * |--------------------| | |
103 | * . . . | |
104 | * |--------------------| | |
105 | * Last AP route | 0x0001234500002280 | DMA address for last route rule | |
106 | * ---------------------- | |
107 | */ | |
108 | ||
2b9feef2 AE |
109 | /* Filter or route rules consist of a set of 32-bit values followed by a |
110 | * 32-bit all-zero rule list terminator. The "zero rule" is simply an | |
111 | * all-zero rule followed by the list terminator. | |
112 | */ | |
113 | #define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32)) | |
114 | ||
2b9feef2 AE |
115 | /* Check things that can be validated at build time. */ |
116 | static void ipa_table_validate_build(void) | |
117 | { | |
d2fd2311 AE |
118 | /* Filter and route tables contain DMA addresses that refer |
119 | * to filter or route rules. But the size of a table entry | |
120 | * is 64 bits regardless of what the size of an AP DMA address | |
121 | * is. A fixed constant defines the size of an entry, and | |
122 | * code in ipa_table_init() uses a pointer to __le64 to | |
123 | * initialize tables. | |
2b9feef2 | 124 | */ |
4ea29143 | 125 | BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64)); |
2b9feef2 AE |
126 | |
127 | /* A "zero rule" is used to represent no filtering or no routing. | |
128 | * It is a 64-bit block of zeroed memory. Code in ipa_table_init() | |
129 | * assumes that it can be written using a pointer to __le64. | |
130 | */ | |
131 | BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64)); | |
132 | ||
133 | /* Impose a practical limit on the number of routes */ | |
134 | BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32); | |
135 | /* The modem must be allotted at least one route table entry */ | |
136 | BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT); | |
cf139196 AE |
137 | /* AP must too, but we can't use more than what is available */ |
138 | BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT >= IPA_ROUTE_COUNT_MAX); | |
139 | } | |
2b9feef2 | 140 | |
cf139196 AE |
141 | static const struct ipa_mem * |
142 | ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6) | |
143 | { | |
144 | enum ipa_mem_id mem_id; | |
145 | ||
146 | mem_id = filter ? hashed ? ipv6 ? IPA_MEM_V6_FILTER_HASHED | |
147 | : IPA_MEM_V4_FILTER_HASHED | |
148 | : ipv6 ? IPA_MEM_V6_FILTER | |
149 | : IPA_MEM_V4_FILTER | |
150 | : hashed ? ipv6 ? IPA_MEM_V6_ROUTE_HASHED | |
151 | : IPA_MEM_V4_ROUTE_HASHED | |
152 | : ipv6 ? IPA_MEM_V6_ROUTE | |
153 | : IPA_MEM_V4_ROUTE; | |
154 | ||
155 | return ipa_mem_find(ipa, mem_id); | |
2b9feef2 AE |
156 | } |
157 | ||
2b9feef2 AE |
158 | bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map) |
159 | { | |
160 | struct device *dev = &ipa->pdev->dev; | |
161 | u32 count; | |
162 | ||
163 | if (!filter_map) { | |
164 | dev_err(dev, "at least one filtering endpoint is required\n"); | |
165 | ||
166 | return false; | |
167 | } | |
168 | ||
169 | count = hweight32(filter_map); | |
170 | if (count > IPA_FILTER_COUNT_MAX) { | |
171 | dev_err(dev, "too many filtering endpoints (%u, max %u)\n", | |
172 | count, IPA_FILTER_COUNT_MAX); | |
173 | ||
174 | return false; | |
175 | } | |
176 | ||
177 | return true; | |
178 | } | |
179 | ||
2b9feef2 AE |
180 | /* Zero entry count means no table, so just return a 0 address */ |
181 | static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) | |
182 | { | |
183 | u32 skip; | |
184 | ||
185 | if (!count) | |
186 | return 0; | |
187 | ||
5bc55884 | 188 | WARN_ON(count > max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); |
2b9feef2 AE |
189 | |
190 | /* Skip over the zero rule and possibly the filter mask */ | |
191 | skip = filter_mask ? 1 : 2; | |
192 | ||
193 | return ipa->table_addr + skip * sizeof(*ipa->table_virt); | |
194 | } | |
195 | ||
196 | static void ipa_table_reset_add(struct gsi_trans *trans, bool filter, | |
25116645 | 197 | u16 first, u16 count, enum ipa_mem_id mem_id) |
2b9feef2 AE |
198 | { |
199 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
5e3bc1e5 | 200 | const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id); |
2b9feef2 AE |
201 | dma_addr_t addr; |
202 | u32 offset; | |
203 | u16 size; | |
204 | ||
25116645 | 205 | /* Nothing to do if the table memory region is empty */ |
2b9feef2 AE |
206 | if (!mem->size) |
207 | return; | |
208 | ||
209 | if (filter) | |
210 | first++; /* skip over bitmap */ | |
211 | ||
4ea29143 AE |
212 | offset = mem->offset + first * sizeof(__le64); |
213 | size = count * sizeof(__le64); | |
2b9feef2 AE |
214 | addr = ipa_table_addr(ipa, false, count); |
215 | ||
216 | ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true); | |
217 | } | |
218 | ||
219 | /* Reset entries in a single filter table belonging to either the AP or | |
220 | * modem to refer to the zero entry. The memory region supplied will be | |
221 | * for the IPv4 and IPv6 non-hashed and hashed filter tables. | |
222 | */ | |
223 | static int | |
07c525a6 | 224 | ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem) |
2b9feef2 AE |
225 | { |
226 | u32 ep_mask = ipa->filter_map; | |
227 | u32 count = hweight32(ep_mask); | |
228 | struct gsi_trans *trans; | |
229 | enum gsi_ee_id ee_id; | |
230 | ||
2b9feef2 AE |
231 | trans = ipa_cmd_trans_alloc(ipa, count); |
232 | if (!trans) { | |
233 | dev_err(&ipa->pdev->dev, | |
234 | "no transaction for %s filter reset\n", | |
235 | modem ? "modem" : "AP"); | |
236 | return -EBUSY; | |
237 | } | |
238 | ||
239 | ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP; | |
240 | while (ep_mask) { | |
241 | u32 endpoint_id = __ffs(ep_mask); | |
242 | struct ipa_endpoint *endpoint; | |
243 | ||
244 | ep_mask ^= BIT(endpoint_id); | |
245 | ||
246 | endpoint = &ipa->endpoint[endpoint_id]; | |
247 | if (endpoint->ee_id != ee_id) | |
248 | continue; | |
249 | ||
25116645 | 250 | ipa_table_reset_add(trans, true, endpoint_id, 1, mem_id); |
2b9feef2 AE |
251 | } |
252 | ||
253 | gsi_trans_commit_wait(trans); | |
254 | ||
255 | return 0; | |
256 | } | |
257 | ||
258 | /* Theoretically, each filter table could have more filter slots to | |
259 | * update than the maximum number of commands in a transaction. So | |
260 | * we do each table separately. | |
261 | */ | |
262 | static int ipa_filter_reset(struct ipa *ipa, bool modem) | |
263 | { | |
264 | int ret; | |
265 | ||
07c525a6 | 266 | ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER, modem); |
2b9feef2 AE |
267 | if (ret) |
268 | return ret; | |
269 | ||
07c525a6 | 270 | ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem); |
2b9feef2 AE |
271 | if (ret) |
272 | return ret; | |
273 | ||
07c525a6 | 274 | ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem); |
2b9feef2 AE |
275 | if (ret) |
276 | return ret; | |
07c525a6 | 277 | ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem); |
2b9feef2 AE |
278 | |
279 | return ret; | |
280 | } | |
281 | ||
282 | /* The AP routes and modem routes are each contiguous within the | |
283 | * table. We can update each table with a single command, and we | |
284 | * won't exceed the per-transaction command limit. | |
285 | * */ | |
286 | static int ipa_route_reset(struct ipa *ipa, bool modem) | |
287 | { | |
288 | struct gsi_trans *trans; | |
289 | u16 first; | |
290 | u16 count; | |
291 | ||
292 | trans = ipa_cmd_trans_alloc(ipa, 4); | |
293 | if (!trans) { | |
294 | dev_err(&ipa->pdev->dev, | |
295 | "no transaction for %s route reset\n", | |
296 | modem ? "modem" : "AP"); | |
297 | return -EBUSY; | |
298 | } | |
299 | ||
300 | if (modem) { | |
fb4014ac | 301 | first = 0; |
2b9feef2 AE |
302 | count = IPA_ROUTE_MODEM_COUNT; |
303 | } else { | |
fb4014ac AE |
304 | first = IPA_ROUTE_MODEM_COUNT; |
305 | count = IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT; | |
2b9feef2 AE |
306 | } |
307 | ||
25116645 | 308 | ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE); |
2b9feef2 | 309 | ipa_table_reset_add(trans, false, first, count, |
25116645 | 310 | IPA_MEM_V4_ROUTE_HASHED); |
2b9feef2 | 311 | |
25116645 | 312 | ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE); |
2b9feef2 | 313 | ipa_table_reset_add(trans, false, first, count, |
25116645 | 314 | IPA_MEM_V6_ROUTE_HASHED); |
2b9feef2 AE |
315 | |
316 | gsi_trans_commit_wait(trans); | |
317 | ||
318 | return 0; | |
319 | } | |
320 | ||
321 | void ipa_table_reset(struct ipa *ipa, bool modem) | |
322 | { | |
323 | struct device *dev = &ipa->pdev->dev; | |
324 | const char *ee_name; | |
325 | int ret; | |
326 | ||
327 | ee_name = modem ? "modem" : "AP"; | |
328 | ||
329 | /* Report errors, but reset filter and route tables */ | |
330 | ret = ipa_filter_reset(ipa, modem); | |
331 | if (ret) | |
332 | dev_err(dev, "error %d resetting filter table for %s\n", | |
333 | ret, ee_name); | |
334 | ||
335 | ret = ipa_route_reset(ipa, modem); | |
336 | if (ret) | |
337 | dev_err(dev, "error %d resetting route table for %s\n", | |
338 | ret, ee_name); | |
339 | } | |
340 | ||
341 | int ipa_table_hash_flush(struct ipa *ipa) | |
342 | { | |
6a244b75 | 343 | const struct ipa_reg *reg; |
2b9feef2 | 344 | struct gsi_trans *trans; |
6a244b75 | 345 | u32 offset; |
2b9feef2 AE |
346 | u32 val; |
347 | ||
a266ad6b | 348 | if (!ipa_table_hash_support(ipa)) |
2b9feef2 AE |
349 | return 0; |
350 | ||
351 | trans = ipa_cmd_trans_alloc(ipa, 1); | |
352 | if (!trans) { | |
353 | dev_err(&ipa->pdev->dev, "no transaction for hash flush\n"); | |
354 | return -EBUSY; | |
355 | } | |
356 | ||
6a244b75 AE |
357 | reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH); |
358 | offset = ipa_reg_offset(reg); | |
359 | ||
62b9c009 AE |
360 | val = ipa_reg_bit(reg, IPV6_ROUTER_HASH); |
361 | val |= ipa_reg_bit(reg, IPV6_FILTER_HASH); | |
362 | val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH); | |
363 | val |= ipa_reg_bit(reg, IPV4_FILTER_HASH); | |
2b9feef2 AE |
364 | |
365 | ipa_cmd_register_write_add(trans, offset, val, val, false); | |
366 | ||
367 | gsi_trans_commit_wait(trans); | |
368 | ||
369 | return 0; | |
370 | } | |
371 | ||
372 | static void ipa_table_init_add(struct gsi_trans *trans, bool filter, | |
373 | enum ipa_cmd_opcode opcode, | |
25116645 AE |
374 | enum ipa_mem_id mem_id, |
375 | enum ipa_mem_id hash_mem_id) | |
2b9feef2 AE |
376 | { |
377 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
5e3bc1e5 AE |
378 | const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id); |
379 | const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id); | |
2b9feef2 AE |
380 | dma_addr_t hash_addr; |
381 | dma_addr_t addr; | |
dc901505 | 382 | u32 zero_offset; |
2b9feef2 | 383 | u16 hash_count; |
dc901505 | 384 | u32 zero_size; |
2b9feef2 AE |
385 | u16 hash_size; |
386 | u16 count; | |
387 | u16 size; | |
388 | ||
dc901505 | 389 | /* Compute the number of table entries to initialize */ |
2b9feef2 | 390 | if (filter) { |
dc901505 AE |
391 | /* The number of filtering endpoints determines number of |
392 | * entries in the filter table; we also add one more "slot" | |
393 | * to hold the bitmap itself. The size of the hashed filter | |
394 | * table is either the same as the non-hashed one, or zero. | |
395 | */ | |
b5c10223 | 396 | count = 1 + hweight32(ipa->filter_map); |
2b9feef2 AE |
397 | hash_count = hash_mem->size ? count : 0; |
398 | } else { | |
dc901505 AE |
399 | /* The size of a route table region determines the number |
400 | * of entries it has. | |
401 | */ | |
4ea29143 AE |
402 | count = mem->size / sizeof(__le64); |
403 | hash_count = hash_mem->size / sizeof(__le64); | |
2b9feef2 | 404 | } |
4ea29143 AE |
405 | size = count * sizeof(__le64); |
406 | hash_size = hash_count * sizeof(__le64); | |
2b9feef2 AE |
407 | |
408 | addr = ipa_table_addr(ipa, filter, count); | |
409 | hash_addr = ipa_table_addr(ipa, filter, hash_count); | |
410 | ||
411 | ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr, | |
412 | hash_size, hash_mem->offset, hash_addr); | |
dc901505 AE |
413 | if (!filter) |
414 | return; | |
415 | ||
416 | /* Zero the unused space in the filter table */ | |
417 | zero_offset = mem->offset + size; | |
418 | zero_size = mem->size - size; | |
419 | ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size, | |
420 | ipa->zero_addr, true); | |
421 | if (!hash_size) | |
422 | return; | |
423 | ||
424 | /* Zero the unused space in the hashed filter table */ | |
425 | zero_offset = hash_mem->offset + hash_size; | |
426 | zero_size = hash_mem->size - hash_size; | |
427 | ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size, | |
428 | ipa->zero_addr, true); | |
2b9feef2 AE |
429 | } |
430 | ||
431 | int ipa_table_setup(struct ipa *ipa) | |
432 | { | |
433 | struct gsi_trans *trans; | |
434 | ||
dc901505 AE |
435 | /* We will need at most 8 TREs: |
436 | * - IPv4: | |
437 | * - One for route table initialization (non-hashed and hashed) | |
438 | * - One for filter table initialization (non-hashed and hashed) | |
439 | * - One to zero unused entries in the non-hashed filter table | |
440 | * - One to zero unused entries in the hashed filter table | |
441 | * - IPv6: | |
442 | * - One for route table initialization (non-hashed and hashed) | |
443 | * - One for filter table initialization (non-hashed and hashed) | |
444 | * - One to zero unused entries in the non-hashed filter table | |
445 | * - One to zero unused entries in the hashed filter table | |
446 | * All platforms support at least 8 TREs in a transaction. | |
447 | */ | |
448 | trans = ipa_cmd_trans_alloc(ipa, 8); | |
2b9feef2 AE |
449 | if (!trans) { |
450 | dev_err(&ipa->pdev->dev, "no transaction for table setup\n"); | |
451 | return -EBUSY; | |
452 | } | |
453 | ||
454 | ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT, | |
25116645 | 455 | IPA_MEM_V4_ROUTE, IPA_MEM_V4_ROUTE_HASHED); |
2b9feef2 AE |
456 | |
457 | ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT, | |
25116645 | 458 | IPA_MEM_V6_ROUTE, IPA_MEM_V6_ROUTE_HASHED); |
2b9feef2 AE |
459 | |
460 | ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT, | |
25116645 | 461 | IPA_MEM_V4_FILTER, IPA_MEM_V4_FILTER_HASHED); |
2b9feef2 AE |
462 | |
463 | ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT, | |
25116645 | 464 | IPA_MEM_V6_FILTER, IPA_MEM_V6_FILTER_HASHED); |
2b9feef2 AE |
465 | |
466 | gsi_trans_commit_wait(trans); | |
467 | ||
468 | return 0; | |
469 | } | |
470 | ||
2b9feef2 AE |
471 | /** |
472 | * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple | |
e3eea08e | 473 | * @endpoint: Endpoint whose filter hash tuple should be zeroed |
2b9feef2 AE |
474 | * |
475 | * Endpoint must be for the AP (not modem) and support filtering. Updates | |
476 | * the filter hash values without changing route ones. | |
477 | */ | |
478 | static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint) | |
479 | { | |
480 | u32 endpoint_id = endpoint->endpoint_id; | |
6bfb7538 | 481 | struct ipa *ipa = endpoint->ipa; |
6a244b75 | 482 | const struct ipa_reg *reg; |
2b9feef2 AE |
483 | u32 offset; |
484 | u32 val; | |
485 | ||
6a244b75 | 486 | reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG); |
2b9feef2 | 487 | |
181ca020 | 488 | offset = ipa_reg_n_offset(reg, endpoint_id); |
2b9feef2 AE |
489 | val = ioread32(endpoint->ipa->reg_virt + offset); |
490 | ||
491 | /* Zero all filter-related fields, preserving the rest */ | |
181ca020 | 492 | val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL); |
2b9feef2 AE |
493 | |
494 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
495 | } | |
496 | ||
74858b63 | 497 | /* Configure a hashed filter table; there is no ipa_filter_deconfig() */ |
2b9feef2 AE |
498 | static void ipa_filter_config(struct ipa *ipa, bool modem) |
499 | { | |
500 | enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP; | |
501 | u32 ep_mask = ipa->filter_map; | |
502 | ||
a266ad6b | 503 | if (!ipa_table_hash_support(ipa)) |
2b9feef2 AE |
504 | return; |
505 | ||
506 | while (ep_mask) { | |
507 | u32 endpoint_id = __ffs(ep_mask); | |
508 | struct ipa_endpoint *endpoint; | |
509 | ||
510 | ep_mask ^= BIT(endpoint_id); | |
511 | ||
512 | endpoint = &ipa->endpoint[endpoint_id]; | |
513 | if (endpoint->ee_id == ee_id) | |
514 | ipa_filter_tuple_zero(endpoint); | |
515 | } | |
516 | } | |
517 | ||
2b9feef2 AE |
518 | static bool ipa_route_id_modem(u32 route_id) |
519 | { | |
fb4014ac | 520 | return route_id < IPA_ROUTE_MODEM_COUNT; |
2b9feef2 AE |
521 | } |
522 | ||
523 | /** | |
524 | * ipa_route_tuple_zero() - Zero a hashed route table entry tuple | |
e3eea08e | 525 | * @ipa: IPA pointer |
2b9feef2 AE |
526 | * @route_id: Route table entry whose hash tuple should be zeroed |
527 | * | |
528 | * Updates the route hash values without changing filter ones. | |
529 | */ | |
530 | static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id) | |
531 | { | |
6a244b75 | 532 | const struct ipa_reg *reg; |
6bfb7538 | 533 | u32 offset; |
2b9feef2 AE |
534 | u32 val; |
535 | ||
6a244b75 AE |
536 | reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG); |
537 | offset = ipa_reg_n_offset(reg, route_id); | |
6bfb7538 | 538 | |
2b9feef2 AE |
539 | val = ioread32(ipa->reg_virt + offset); |
540 | ||
541 | /* Zero all route-related fields, preserving the rest */ | |
181ca020 | 542 | val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL); |
2b9feef2 AE |
543 | |
544 | iowrite32(val, ipa->reg_virt + offset); | |
545 | } | |
546 | ||
74858b63 | 547 | /* Configure a hashed route table; there is no ipa_route_deconfig() */ |
2b9feef2 AE |
548 | static void ipa_route_config(struct ipa *ipa, bool modem) |
549 | { | |
550 | u32 route_id; | |
551 | ||
a266ad6b | 552 | if (!ipa_table_hash_support(ipa)) |
2b9feef2 AE |
553 | return; |
554 | ||
555 | for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++) | |
556 | if (ipa_route_id_modem(route_id) == modem) | |
557 | ipa_route_tuple_zero(ipa, route_id); | |
558 | } | |
559 | ||
74858b63 | 560 | /* Configure a filter and route tables; there is no ipa_table_deconfig() */ |
2b9feef2 AE |
561 | void ipa_table_config(struct ipa *ipa) |
562 | { | |
563 | ipa_filter_config(ipa, false); | |
564 | ipa_filter_config(ipa, true); | |
565 | ipa_route_config(ipa, false); | |
566 | ipa_route_config(ipa, true); | |
567 | } | |
568 | ||
cf139196 | 569 | /* Zero modem_route_count means filter table memory check */ |
73da9cac | 570 | bool ipa_table_mem_valid(struct ipa *ipa, bool modem_route_count) |
cf139196 AE |
571 | { |
572 | bool hash_support = ipa_table_hash_support(ipa); | |
573 | bool filter = !modem_route_count; | |
574 | const struct ipa_mem *mem_hashed; | |
575 | const struct ipa_mem *mem_ipv4; | |
576 | const struct ipa_mem *mem_ipv6; | |
577 | u32 count; | |
578 | ||
579 | /* IPv4 and IPv6 non-hashed tables are expected to be defined and | |
580 | * have the same size. Both must have at least two entries (and | |
581 | * would normally have more than that). | |
582 | */ | |
583 | mem_ipv4 = ipa_table_mem(ipa, filter, false, false); | |
584 | if (!mem_ipv4) | |
585 | return false; | |
586 | ||
587 | mem_ipv6 = ipa_table_mem(ipa, filter, false, true); | |
588 | if (!mem_ipv6) | |
589 | return false; | |
590 | ||
591 | if (mem_ipv4->size != mem_ipv6->size) | |
592 | return false; | |
593 | ||
5444b0ea AE |
594 | /* Table offset and size must fit in TABLE_INIT command fields */ |
595 | if (!ipa_cmd_table_init_valid(ipa, mem_ipv4, !filter)) | |
596 | return false; | |
597 | ||
cf139196 AE |
598 | /* Make sure the regions are big enough */ |
599 | count = mem_ipv4->size / sizeof(__le64); | |
600 | if (count < 2) | |
601 | return false; | |
602 | if (filter) { | |
603 | /* Filter tables must able to hold the endpoint bitmap plus | |
604 | * an entry for each endpoint that supports filtering | |
605 | */ | |
606 | if (count < 1 + hweight32(ipa->filter_map)) | |
607 | return false; | |
608 | } else { | |
609 | /* Routing tables must be able to hold all modem entries, | |
610 | * plus at least one entry for the AP. | |
611 | */ | |
612 | if (count < modem_route_count + 1) | |
613 | return false; | |
614 | } | |
615 | ||
616 | /* If hashing is supported, hashed tables are expected to be defined, | |
617 | * and have the same size as non-hashed tables. If hashing is not | |
618 | * supported, hashed tables are expected to have zero size (or not | |
619 | * be defined). | |
620 | */ | |
621 | mem_hashed = ipa_table_mem(ipa, filter, true, false); | |
622 | if (hash_support) { | |
623 | if (!mem_hashed || mem_hashed->size != mem_ipv4->size) | |
624 | return false; | |
625 | } else { | |
626 | if (mem_hashed && mem_hashed->size) | |
627 | return false; | |
628 | } | |
629 | ||
630 | /* Same check for IPv6 tables */ | |
631 | mem_hashed = ipa_table_mem(ipa, filter, true, true); | |
632 | if (hash_support) { | |
633 | if (!mem_hashed || mem_hashed->size != mem_ipv6->size) | |
634 | return false; | |
635 | } else { | |
636 | if (mem_hashed && mem_hashed->size) | |
637 | return false; | |
638 | } | |
639 | ||
640 | return true; | |
641 | } | |
642 | ||
643 | /* Initialize a coherent DMA allocation containing initialized filter and | |
2b9feef2 AE |
644 | * route table data. This is used when initializing or resetting the IPA |
645 | * filter or route table. | |
646 | * | |
647 | * The first entry in a filter table contains a bitmap indicating which | |
648 | * endpoints contain entries in the table. In addition to that first entry, | |
649 | * there are at most IPA_FILTER_COUNT_MAX entries that follow. Filter table | |
650 | * entries are 64 bits wide, and (other than the bitmap) contain the DMA | |
651 | * address of a filter rule. A "zero rule" indicates no filtering, and | |
652 | * consists of 64 bits of zeroes. When a filter table is initialized (or | |
653 | * reset) its entries are made to refer to the zero rule. | |
654 | * | |
655 | * Each entry in a route table is the DMA address of a routing rule. For | |
656 | * routing there is also a 64-bit "zero rule" that means no routing, and | |
657 | * when a route table is initialized or reset, its entries are made to refer | |
658 | * to the zero rule. The zero rule is shared for route and filter tables. | |
659 | * | |
660 | * Note that the IPA hardware requires a filter or route rule address to be | |
661 | * aligned on a 128 byte boundary. The coherent DMA buffer we allocate here | |
662 | * has a minimum alignment, and we place the zero rule at the base of that | |
663 | * allocated space. In ipa_table_init() we verify the minimum DMA allocation | |
664 | * meets our requirement. | |
665 | * | |
666 | * +-------------------+ | |
667 | * --> | zero rule | | |
668 | * / |-------------------| | |
669 | * | | filter mask | | |
670 | * |\ |-------------------| | |
671 | * | ---- zero rule address | \ | |
672 | * |\ |-------------------| | | |
673 | * | ---- zero rule address | | IPA_FILTER_COUNT_MAX | |
674 | * | |-------------------| > or IPA_ROUTE_COUNT_MAX, | |
675 | * | ... | whichever is greater | |
676 | * \ |-------------------| | | |
677 | * ---- zero rule address | / | |
678 | * +-------------------+ | |
679 | */ | |
680 | int ipa_table_init(struct ipa *ipa) | |
681 | { | |
682 | u32 count = max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX); | |
683 | struct device *dev = &ipa->pdev->dev; | |
684 | dma_addr_t addr; | |
685 | __le64 le_addr; | |
686 | __le64 *virt; | |
687 | size_t size; | |
688 | ||
689 | ipa_table_validate_build(); | |
690 | ||
19aaf72c AE |
691 | /* The IPA hardware requires route and filter table rules to be |
692 | * aligned on a 128-byte boundary. We put the "zero rule" at the | |
693 | * base of the table area allocated here. The DMA address returned | |
694 | * by dma_alloc_coherent() is guaranteed to be a power-of-2 number | |
695 | * of pages, which satisfies the rule alignment requirement. | |
696 | */ | |
4ea29143 | 697 | size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); |
2b9feef2 AE |
698 | virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); |
699 | if (!virt) | |
700 | return -ENOMEM; | |
701 | ||
702 | ipa->table_virt = virt; | |
703 | ipa->table_addr = addr; | |
704 | ||
705 | /* First slot is the zero rule */ | |
706 | *virt++ = 0; | |
707 | ||
708 | /* Next is the filter table bitmap. The "soft" bitmap value | |
709 | * must be converted to the hardware representation by shifting | |
710 | * it left one position. (Bit 0 repesents global filtering, | |
711 | * which is possible but not used.) | |
712 | */ | |
713 | *virt++ = cpu_to_le64((u64)ipa->filter_map << 1); | |
714 | ||
715 | /* All the rest contain the DMA address of the zero rule */ | |
716 | le_addr = cpu_to_le64(addr); | |
717 | while (count--) | |
718 | *virt++ = le_addr; | |
719 | ||
720 | return 0; | |
721 | } | |
722 | ||
723 | void ipa_table_exit(struct ipa *ipa) | |
724 | { | |
725 | u32 count = max_t(u32, 1 + IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX); | |
726 | struct device *dev = &ipa->pdev->dev; | |
727 | size_t size; | |
728 | ||
4ea29143 | 729 | size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); |
2b9feef2 AE |
730 | |
731 | dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); | |
732 | ipa->table_addr = 0; | |
733 | ipa->table_virt = NULL; | |
734 | } |