Commit | Line | Data |
---|---|---|
84f9bd12 AE |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. | |
647a05f3 | 4 | * Copyright (C) 2019-2021 Linaro Ltd. |
84f9bd12 AE |
5 | */ |
6 | ||
7 | #include <linux/types.h> | |
8 | #include <linux/device.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/bitfield.h> | |
11 | #include <linux/if_rmnet.h> | |
84f9bd12 AE |
12 | #include <linux/dma-direction.h> |
13 | ||
14 | #include "gsi.h" | |
15 | #include "gsi_trans.h" | |
16 | #include "ipa.h" | |
17 | #include "ipa_data.h" | |
18 | #include "ipa_endpoint.h" | |
19 | #include "ipa_cmd.h" | |
20 | #include "ipa_mem.h" | |
21 | #include "ipa_modem.h" | |
22 | #include "ipa_table.h" | |
23 | #include "ipa_gsi.h" | |
2775cbc5 | 24 | #include "ipa_power.h" |
84f9bd12 AE |
25 | |
26 | #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) | |
27 | ||
9654d8c4 AE |
28 | /* Hardware is told about receive buffers once a "batch" has been queued */ |
29 | #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */ | |
84f9bd12 | 30 | |
84f9bd12 AE |
31 | /* The amount of RX buffer space consumed by standard skb overhead */ |
32 | #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) | |
33 | ||
8730f45d AE |
34 | /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ |
35 | #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ | |
36 | ||
84f9bd12 | 37 | #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 |
84f9bd12 | 38 | |
84f9bd12 AE |
39 | /** enum ipa_status_opcode - status element opcode hardware values */ |
40 | enum ipa_status_opcode { | |
41 | IPA_STATUS_OPCODE_PACKET = 0x01, | |
84f9bd12 AE |
42 | IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, |
43 | IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, | |
84f9bd12 AE |
44 | IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, |
45 | }; | |
46 | ||
47 | /** enum ipa_status_exception - status element exception type */ | |
48 | enum ipa_status_exception { | |
49 | /* 0 means no exception */ | |
50 | IPA_STATUS_EXCEPTION_DEAGGR = 0x01, | |
84f9bd12 AE |
51 | }; |
52 | ||
53 | /* Status element provided by hardware */ | |
54 | struct ipa_status { | |
55 | u8 opcode; /* enum ipa_status_opcode */ | |
56 | u8 exception; /* enum ipa_status_exception */ | |
57 | __le16 mask; | |
58 | __le16 pkt_len; | |
59 | u8 endp_src_idx; | |
60 | u8 endp_dst_idx; | |
61 | __le32 metadata; | |
62 | __le32 flags1; | |
63 | __le64 flags2; | |
64 | __le32 flags3; | |
65 | __le32 flags4; | |
66 | }; | |
67 | ||
68 | /* Field masks for struct ipa_status structure fields */ | |
f6aba7b5 AE |
69 | #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4) |
70 | #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) | |
84f9bd12 | 71 | #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) |
84f9bd12 | 72 | #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) |
f6aba7b5 | 73 | #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) |
84f9bd12 | 74 | |
ed23f026 AE |
75 | static u32 aggr_byte_limit_max(enum ipa_version version) |
76 | { | |
77 | if (version < IPA_VERSION_4_5) | |
78 | return field_max(aggr_byte_limit_fmask(true)); | |
79 | ||
80 | return field_max(aggr_byte_limit_fmask(false)); | |
81 | } | |
82 | ||
3cebb7c2 AE |
83 | /* Compute the aggregation size value to use for a given buffer size */ |
84 | static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit) | |
85 | { | |
86 | /* A hard aggregation limit will not be crossed; aggregation closes | |
87 | * if saving incoming data would cross the hard byte limit boundary. | |
88 | * | |
89 | * With a soft limit, aggregation closes *after* the size boundary | |
90 | * has been crossed. In that case the limit must leave enough space | |
91 | * after that limit to receive a full MTU of data plus overhead. | |
92 | */ | |
93 | if (!aggr_hard_limit) | |
94 | rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; | |
95 | ||
96 | /* The byte limit is encoded as a number of kilobytes */ | |
97 | ||
98 | return rx_buffer_size / SZ_1K; | |
99 | } | |
100 | ||
84f9bd12 AE |
101 | static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, |
102 | const struct ipa_gsi_endpoint_data *all_data, | |
103 | const struct ipa_gsi_endpoint_data *data) | |
104 | { | |
105 | const struct ipa_gsi_endpoint_data *other_data; | |
106 | struct device *dev = &ipa->pdev->dev; | |
107 | enum ipa_endpoint_name other_name; | |
108 | ||
109 | if (ipa_gsi_endpoint_data_empty(data)) | |
110 | return true; | |
111 | ||
112 | if (!data->toward_ipa) { | |
3cebb7c2 | 113 | const struct ipa_endpoint_rx *rx_config; |
ed23f026 | 114 | u32 buffer_size; |
3cebb7c2 | 115 | u32 aggr_size; |
ed23f026 AE |
116 | u32 limit; |
117 | ||
84f9bd12 AE |
118 | if (data->endpoint.filter_support) { |
119 | dev_err(dev, "filtering not supported for " | |
120 | "RX endpoint %u\n", | |
121 | data->endpoint_id); | |
122 | return false; | |
123 | } | |
124 | ||
ed23f026 AE |
125 | /* Nothing more to check for non-AP RX */ |
126 | if (data->ee_id != GSI_EE_AP) | |
127 | return true; | |
128 | ||
3cebb7c2 AE |
129 | rx_config = &data->endpoint.config.rx; |
130 | ||
ed23f026 | 131 | /* The buffer size must hold an MTU plus overhead */ |
3cebb7c2 | 132 | buffer_size = rx_config->buffer_size; |
ed23f026 AE |
133 | limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD; |
134 | if (buffer_size < limit) { | |
135 | dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n", | |
136 | data->endpoint_id, buffer_size, limit); | |
137 | return false; | |
138 | } | |
139 | ||
3cebb7c2 AE |
140 | if (!data->endpoint.config.aggregation) { |
141 | bool result = true; | |
ed23f026 | 142 | |
3cebb7c2 | 143 | /* No aggregation; check for bogus aggregation data */ |
beb90cba AE |
144 | if (rx_config->aggr_time_limit) { |
145 | dev_err(dev, | |
146 | "time limit with no aggregation for RX endpoint %u\n", | |
147 | data->endpoint_id); | |
148 | result = false; | |
149 | } | |
150 | ||
3cebb7c2 AE |
151 | if (rx_config->aggr_hard_limit) { |
152 | dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n", | |
153 | data->endpoint_id); | |
154 | result = false; | |
155 | } | |
156 | ||
157 | if (rx_config->aggr_close_eof) { | |
158 | dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n", | |
159 | data->endpoint_id); | |
160 | result = false; | |
ed23f026 | 161 | } |
3cebb7c2 AE |
162 | |
163 | return result; /* Nothing more to check */ | |
164 | } | |
165 | ||
166 | /* For an endpoint supporting receive aggregation, the byte | |
167 | * limit defines the point at which aggregation closes. This | |
168 | * check ensures the receive buffer size doesn't result in a | |
169 | * limit that exceeds what's representable in the aggregation | |
170 | * byte limit field. | |
171 | */ | |
172 | aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, | |
173 | rx_config->aggr_hard_limit); | |
174 | limit = aggr_byte_limit_max(ipa->version); | |
175 | if (aggr_size > limit) { | |
176 | dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n", | |
177 | data->endpoint_id, aggr_size, limit); | |
178 | ||
179 | return false; | |
ed23f026 AE |
180 | } |
181 | ||
84f9bd12 AE |
182 | return true; /* Nothing more to check for RX */ |
183 | } | |
184 | ||
185 | if (data->endpoint.config.status_enable) { | |
186 | other_name = data->endpoint.config.tx.status_endpoint; | |
187 | if (other_name >= count) { | |
188 | dev_err(dev, "status endpoint name %u out of range " | |
189 | "for endpoint %u\n", | |
190 | other_name, data->endpoint_id); | |
191 | return false; | |
192 | } | |
193 | ||
194 | /* Status endpoint must be defined... */ | |
195 | other_data = &all_data[other_name]; | |
196 | if (ipa_gsi_endpoint_data_empty(other_data)) { | |
197 | dev_err(dev, "DMA endpoint name %u undefined " | |
198 | "for endpoint %u\n", | |
199 | other_name, data->endpoint_id); | |
200 | return false; | |
201 | } | |
202 | ||
203 | /* ...and has to be an RX endpoint... */ | |
204 | if (other_data->toward_ipa) { | |
205 | dev_err(dev, | |
206 | "status endpoint for endpoint %u not RX\n", | |
207 | data->endpoint_id); | |
208 | return false; | |
209 | } | |
210 | ||
211 | /* ...and if it's to be an AP endpoint... */ | |
212 | if (other_data->ee_id == GSI_EE_AP) { | |
213 | /* ...make sure it has status enabled. */ | |
214 | if (!other_data->endpoint.config.status_enable) { | |
215 | dev_err(dev, | |
216 | "status not enabled for endpoint %u\n", | |
217 | other_data->endpoint_id); | |
218 | return false; | |
219 | } | |
220 | } | |
221 | } | |
222 | ||
223 | if (data->endpoint.config.dma_mode) { | |
224 | other_name = data->endpoint.config.dma_endpoint; | |
225 | if (other_name >= count) { | |
226 | dev_err(dev, "DMA endpoint name %u out of range " | |
227 | "for endpoint %u\n", | |
228 | other_name, data->endpoint_id); | |
229 | return false; | |
230 | } | |
231 | ||
232 | other_data = &all_data[other_name]; | |
233 | if (ipa_gsi_endpoint_data_empty(other_data)) { | |
234 | dev_err(dev, "DMA endpoint name %u undefined " | |
235 | "for endpoint %u\n", | |
236 | other_name, data->endpoint_id); | |
237 | return false; | |
238 | } | |
239 | } | |
240 | ||
241 | return true; | |
242 | } | |
243 | ||
244 | static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, | |
245 | const struct ipa_gsi_endpoint_data *data) | |
246 | { | |
247 | const struct ipa_gsi_endpoint_data *dp = data; | |
248 | struct device *dev = &ipa->pdev->dev; | |
249 | enum ipa_endpoint_name name; | |
250 | ||
84f9bd12 AE |
251 | if (count > IPA_ENDPOINT_COUNT) { |
252 | dev_err(dev, "too many endpoints specified (%u > %u)\n", | |
253 | count, IPA_ENDPOINT_COUNT); | |
254 | return false; | |
255 | } | |
256 | ||
257 | /* Make sure needed endpoints have defined data */ | |
258 | if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { | |
259 | dev_err(dev, "command TX endpoint not defined\n"); | |
260 | return false; | |
261 | } | |
262 | if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { | |
263 | dev_err(dev, "LAN RX endpoint not defined\n"); | |
264 | return false; | |
265 | } | |
266 | if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { | |
267 | dev_err(dev, "AP->modem TX endpoint not defined\n"); | |
268 | return false; | |
269 | } | |
270 | if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { | |
271 | dev_err(dev, "AP<-modem RX endpoint not defined\n"); | |
272 | return false; | |
273 | } | |
274 | ||
275 | for (name = 0; name < count; name++, dp++) | |
276 | if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) | |
277 | return false; | |
278 | ||
279 | return true; | |
280 | } | |
281 | ||
84f9bd12 AE |
282 | /* Allocate a transaction to use on a non-command endpoint */ |
283 | static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, | |
284 | u32 tre_count) | |
285 | { | |
286 | struct gsi *gsi = &endpoint->ipa->gsi; | |
287 | u32 channel_id = endpoint->channel_id; | |
288 | enum dma_data_direction direction; | |
289 | ||
290 | direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | |
291 | ||
292 | return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); | |
293 | } | |
294 | ||
295 | /* suspend_delay represents suspend for RX, delay for TX endpoints. | |
4c9d631a AE |
296 | * Note that suspend is not supported starting with IPA v4.0, and |
297 | * delay mode should not be used starting with IPA v4.2. | |
84f9bd12 | 298 | */ |
4900bf34 | 299 | static bool |
84f9bd12 AE |
300 | ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) |
301 | { | |
302 | u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); | |
303 | struct ipa *ipa = endpoint->ipa; | |
4900bf34 | 304 | bool state; |
84f9bd12 AE |
305 | u32 mask; |
306 | u32 val; | |
307 | ||
5bc55884 | 308 | if (endpoint->toward_ipa) |
4c9d631a | 309 | WARN_ON(ipa->version >= IPA_VERSION_4_2); |
5bc55884 AE |
310 | else |
311 | WARN_ON(ipa->version >= IPA_VERSION_4_0); | |
312 | ||
84f9bd12 AE |
313 | mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; |
314 | ||
315 | val = ioread32(ipa->reg_virt + offset); | |
4900bf34 | 316 | state = !!(val & mask); |
5bc55884 AE |
317 | |
318 | /* Don't bother if it's already in the requested state */ | |
4900bf34 AE |
319 | if (suspend_delay != state) { |
320 | val ^= mask; | |
321 | iowrite32(val, ipa->reg_virt + offset); | |
322 | } | |
84f9bd12 | 323 | |
4900bf34 | 324 | return state; |
84f9bd12 AE |
325 | } |
326 | ||
4c9d631a | 327 | /* We don't care what the previous state was for delay mode */ |
4fa95248 AE |
328 | static void |
329 | ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) | |
330 | { | |
4c9d631a AE |
331 | /* Delay mode should not be used for IPA v4.2+ */ |
332 | WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); | |
5bc55884 | 333 | WARN_ON(!endpoint->toward_ipa); |
4fa95248 | 334 | |
4c9d631a | 335 | (void)ipa_endpoint_init_ctrl(endpoint, enable); |
4fa95248 AE |
336 | } |
337 | ||
fff89971 AE |
338 | static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) |
339 | { | |
340 | u32 mask = BIT(endpoint->endpoint_id); | |
341 | struct ipa *ipa = endpoint->ipa; | |
342 | u32 offset; | |
343 | u32 val; | |
344 | ||
5bc55884 AE |
345 | WARN_ON(!(mask & ipa->available)); |
346 | ||
fff89971 AE |
347 | offset = ipa_reg_state_aggr_active_offset(ipa->version); |
348 | val = ioread32(ipa->reg_virt + offset); | |
349 | ||
350 | return !!(val & mask); | |
351 | } | |
352 | ||
353 | static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) | |
354 | { | |
355 | u32 mask = BIT(endpoint->endpoint_id); | |
356 | struct ipa *ipa = endpoint->ipa; | |
357 | ||
5bc55884 AE |
358 | WARN_ON(!(mask & ipa->available)); |
359 | ||
fff89971 AE |
360 | iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); |
361 | } | |
362 | ||
363 | /** | |
364 | * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt | |
e3eea08e | 365 | * @endpoint: Endpoint on which to emulate a suspend |
fff89971 AE |
366 | * |
367 | * Emulate suspend IPA interrupt to unsuspend an endpoint suspended | |
368 | * with an open aggregation frame. This is to work around a hardware | |
369 | * issue in IPA version 3.5.1 where the suspend interrupt will not be | |
370 | * generated when it should be. | |
371 | */ | |
372 | static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) | |
373 | { | |
374 | struct ipa *ipa = endpoint->ipa; | |
375 | ||
660e52d6 | 376 | if (!endpoint->config.aggregation) |
fff89971 AE |
377 | return; |
378 | ||
379 | /* Nothing to do if the endpoint doesn't have aggregation open */ | |
380 | if (!ipa_endpoint_aggr_active(endpoint)) | |
381 | return; | |
382 | ||
383 | /* Force close aggregation */ | |
384 | ipa_endpoint_force_close(endpoint); | |
385 | ||
386 | ipa_interrupt_simulate_suspend(ipa->interrupt); | |
387 | } | |
388 | ||
389 | /* Returns previous suspend state (true means suspend was enabled) */ | |
4fa95248 AE |
390 | static bool |
391 | ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) | |
392 | { | |
fff89971 AE |
393 | bool suspended; |
394 | ||
d7f3087b | 395 | if (endpoint->ipa->version >= IPA_VERSION_4_0) |
b07f283e AE |
396 | return enable; /* For IPA v4.0+, no change made */ |
397 | ||
5bc55884 | 398 | WARN_ON(endpoint->toward_ipa); |
4fa95248 | 399 | |
fff89971 AE |
400 | suspended = ipa_endpoint_init_ctrl(endpoint, enable); |
401 | ||
402 | /* A client suspended with an open aggregation frame will not | |
403 | * generate a SUSPEND IPA interrupt. If enabling suspend, have | |
404 | * ipa_endpoint_suspend_aggr() handle this. | |
405 | */ | |
406 | if (enable && !suspended) | |
407 | ipa_endpoint_suspend_aggr(endpoint); | |
408 | ||
409 | return suspended; | |
4fa95248 AE |
410 | } |
411 | ||
4c9d631a AE |
412 | /* Put all modem RX endpoints into suspend mode, and stop transmission |
413 | * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is | |
414 | * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow | |
415 | * control instead. | |
416 | */ | |
84f9bd12 AE |
417 | void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) |
418 | { | |
84f9bd12 AE |
419 | u32 endpoint_id; |
420 | ||
84f9bd12 AE |
421 | for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { |
422 | struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; | |
423 | ||
424 | if (endpoint->ee_id != GSI_EE_MODEM) | |
425 | continue; | |
426 | ||
4c9d631a AE |
427 | if (!endpoint->toward_ipa) |
428 | (void)ipa_endpoint_program_suspend(endpoint, enable); | |
429 | else if (ipa->version < IPA_VERSION_4_2) | |
4fa95248 | 430 | ipa_endpoint_program_delay(endpoint, enable); |
b07f283e | 431 | else |
4c9d631a AE |
432 | gsi_modem_channel_flow_control(&ipa->gsi, |
433 | endpoint->channel_id, | |
434 | enable); | |
84f9bd12 AE |
435 | } |
436 | } | |
437 | ||
438 | /* Reset all modem endpoints to use the default exception endpoint */ | |
439 | int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) | |
440 | { | |
441 | u32 initialized = ipa->initialized; | |
442 | struct gsi_trans *trans; | |
443 | u32 count; | |
444 | ||
2091c79a AE |
445 | /* We need one command per modem TX endpoint, plus the commands |
446 | * that clear the pipeline. | |
84f9bd12 | 447 | */ |
2091c79a | 448 | count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); |
84f9bd12 AE |
449 | trans = ipa_cmd_trans_alloc(ipa, count); |
450 | if (!trans) { | |
451 | dev_err(&ipa->pdev->dev, | |
452 | "no transaction to reset modem exception endpoints\n"); | |
453 | return -EBUSY; | |
454 | } | |
455 | ||
456 | while (initialized) { | |
457 | u32 endpoint_id = __ffs(initialized); | |
458 | struct ipa_endpoint *endpoint; | |
459 | u32 offset; | |
460 | ||
461 | initialized ^= BIT(endpoint_id); | |
462 | ||
463 | /* We only reset modem TX endpoints */ | |
464 | endpoint = &ipa->endpoint[endpoint_id]; | |
465 | if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) | |
466 | continue; | |
467 | ||
468 | offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); | |
469 | ||
470 | /* Value written is 0, and all bits are updated. That | |
471 | * means status is disabled on the endpoint, and as a | |
472 | * result all other fields in the register are ignored. | |
473 | */ | |
474 | ipa_cmd_register_write_add(trans, offset, 0, ~0, false); | |
475 | } | |
476 | ||
aa56e3e5 | 477 | ipa_cmd_pipeline_clear_add(trans); |
84f9bd12 | 478 | |
84f9bd12 AE |
479 | gsi_trans_commit_wait(trans); |
480 | ||
51c48ce2 AE |
481 | ipa_cmd_pipeline_clear_wait(ipa); |
482 | ||
84f9bd12 AE |
483 | return 0; |
484 | } | |
485 | ||
486 | static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) | |
487 | { | |
488 | u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); | |
5567d4d9 | 489 | enum ipa_cs_offload_en enabled; |
84f9bd12 AE |
490 | u32 val = 0; |
491 | ||
492 | /* FRAG_OFFLOAD_EN is 0 */ | |
660e52d6 | 493 | if (endpoint->config.checksum) { |
5567d4d9 AE |
494 | enum ipa_version version = endpoint->ipa->version; |
495 | ||
84f9bd12 AE |
496 | if (endpoint->toward_ipa) { |
497 | u32 checksum_offset; | |
498 | ||
84f9bd12 AE |
499 | /* Checksum header offset is in 4-byte units */ |
500 | checksum_offset = sizeof(struct rmnet_map_header); | |
501 | checksum_offset /= sizeof(u32); | |
502 | val |= u32_encode_bits(checksum_offset, | |
503 | CS_METADATA_HDR_OFFSET_FMASK); | |
5567d4d9 AE |
504 | |
505 | enabled = version < IPA_VERSION_4_5 | |
506 | ? IPA_CS_OFFLOAD_UL | |
507 | : IPA_CS_OFFLOAD_INLINE; | |
84f9bd12 | 508 | } else { |
5567d4d9 AE |
509 | enabled = version < IPA_VERSION_4_5 |
510 | ? IPA_CS_OFFLOAD_DL | |
511 | : IPA_CS_OFFLOAD_INLINE; | |
84f9bd12 AE |
512 | } |
513 | } else { | |
5567d4d9 | 514 | enabled = IPA_CS_OFFLOAD_NONE; |
84f9bd12 | 515 | } |
5567d4d9 | 516 | val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK); |
84f9bd12 AE |
517 | /* CS_GEN_QMB_MASTER_SEL is 0 */ |
518 | ||
519 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
520 | } | |
521 | ||
647a05f3 AE |
522 | static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) |
523 | { | |
524 | u32 offset; | |
525 | u32 val; | |
526 | ||
527 | if (!endpoint->toward_ipa) | |
528 | return; | |
529 | ||
530 | offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id); | |
531 | val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK); | |
532 | ||
533 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
534 | } | |
535 | ||
5567d4d9 AE |
536 | static u32 |
537 | ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) | |
538 | { | |
539 | u32 header_size = sizeof(struct rmnet_map_header); | |
540 | ||
541 | /* Without checksum offload, we just have the MAP header */ | |
660e52d6 | 542 | if (!endpoint->config.checksum) |
5567d4d9 AE |
543 | return header_size; |
544 | ||
545 | if (version < IPA_VERSION_4_5) { | |
546 | /* Checksum header inserted for AP TX endpoints only */ | |
547 | if (endpoint->toward_ipa) | |
548 | header_size += sizeof(struct rmnet_map_ul_csum_header); | |
549 | } else { | |
550 | /* Checksum header is used in both directions */ | |
551 | header_size += sizeof(struct rmnet_map_v5_csum_header); | |
552 | } | |
553 | ||
554 | return header_size; | |
555 | } | |
556 | ||
8730f45d | 557 | /** |
e3eea08e AE |
558 | * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register |
559 | * @endpoint: Endpoint pointer | |
560 | * | |
8730f45d AE |
561 | * We program QMAP endpoints so each packet received is preceded by a QMAP |
562 | * header structure. The QMAP header contains a 1-byte mux_id and 2-byte | |
563 | * packet size field, and we have the IPA hardware populate both for each | |
564 | * received packet. The header is configured (in the HDR_EXT register) | |
565 | * to use big endian format. | |
566 | * | |
567 | * The packet size is written into the QMAP header's pkt_len field. That | |
568 | * location is defined here using the HDR_OFST_PKT_SIZE field. | |
569 | * | |
570 | * The mux_id comes from a 4-byte metadata value supplied with each packet | |
571 | * by the modem. It is *not* a QMAP header, but it does contain the mux_id | |
572 | * value that we want, in its low-order byte. A bitmask defined in the | |
573 | * endpoint's METADATA_MASK register defines which byte within the modem | |
574 | * metadata contains the mux_id. And the OFST_METADATA field programmed | |
575 | * here indicates where the extracted byte should be placed within the QMAP | |
576 | * header. | |
577 | */ | |
84f9bd12 AE |
578 | static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) |
579 | { | |
580 | u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); | |
1af15c2a | 581 | struct ipa *ipa = endpoint->ipa; |
84f9bd12 AE |
582 | u32 val = 0; |
583 | ||
660e52d6 | 584 | if (endpoint->config.qmap) { |
1af15c2a | 585 | enum ipa_version version = ipa->version; |
5567d4d9 | 586 | size_t header_size; |
84f9bd12 | 587 | |
5567d4d9 AE |
588 | header_size = ipa_qmap_header_size(version, endpoint); |
589 | val = ipa_header_size_encoded(version, header_size); | |
8730f45d | 590 | |
f330fda3 | 591 | /* Define how to fill fields in a received QMAP header */ |
84f9bd12 | 592 | if (!endpoint->toward_ipa) { |
1af15c2a | 593 | u32 offset; /* Field offset within header */ |
8730f45d AE |
594 | |
595 | /* Where IPA will write the metadata value */ | |
1af15c2a AE |
596 | offset = offsetof(struct rmnet_map_header, mux_id); |
597 | val |= ipa_metadata_offset_encoded(version, offset); | |
84f9bd12 | 598 | |
8730f45d | 599 | /* Where IPA will write the length */ |
1af15c2a AE |
600 | offset = offsetof(struct rmnet_map_header, pkt_len); |
601 | /* Upper bits are stored in HDR_EXT with IPA v4.5 */ | |
d7f3087b | 602 | if (version >= IPA_VERSION_4_5) |
1af15c2a AE |
603 | offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK); |
604 | ||
84f9bd12 | 605 | val |= HDR_OFST_PKT_SIZE_VALID_FMASK; |
1af15c2a | 606 | val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK); |
84f9bd12 | 607 | } |
8730f45d AE |
608 | /* For QMAP TX, metadata offset is 0 (modem assumes this) */ |
609 | val |= HDR_OFST_METADATA_VALID_FMASK; | |
610 | ||
611 | /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ | |
84f9bd12 AE |
612 | /* HDR_A5_MUX is 0 */ |
613 | /* HDR_LEN_INC_DEAGG_HDR is 0 */ | |
8bfc4e21 | 614 | /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ |
84f9bd12 AE |
615 | } |
616 | ||
1af15c2a | 617 | iowrite32(val, ipa->reg_virt + offset); |
84f9bd12 AE |
618 | } |
619 | ||
620 | static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) | |
621 | { | |
622 | u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); | |
660e52d6 | 623 | u32 pad_align = endpoint->config.rx.pad_align; |
1af15c2a | 624 | struct ipa *ipa = endpoint->ipa; |
84f9bd12 AE |
625 | u32 val = 0; |
626 | ||
660e52d6 | 627 | if (endpoint->config.qmap) { |
332ef7c8 AE |
628 | /* We have a header, so we must specify its endianness */ |
629 | val |= HDR_ENDIANNESS_FMASK; /* big endian */ | |
630 | ||
631 | /* A QMAP header contains a 6 bit pad field at offset 0. | |
632 | * The RMNet driver assumes this field is meaningful in | |
633 | * packets it receives, and assumes the header's payload | |
634 | * length includes that padding. The RMNet driver does | |
635 | * *not* pad packets it sends, however, so the pad field | |
636 | * (although 0) should be ignored. | |
637 | */ | |
638 | if (!endpoint->toward_ipa) { | |
639 | val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; | |
640 | /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ | |
641 | val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; | |
642 | /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ | |
643 | } | |
f330fda3 AE |
644 | } |
645 | ||
84f9bd12 | 646 | /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ |
84f9bd12 AE |
647 | if (!endpoint->toward_ipa) |
648 | val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); | |
649 | ||
1af15c2a AE |
650 | /* IPA v4.5 adds some most-significant bits to a few fields, |
651 | * two of which are defined in the HDR (not HDR_EXT) register. | |
652 | */ | |
d7f3087b | 653 | if (ipa->version >= IPA_VERSION_4_5) { |
1af15c2a | 654 | /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ |
660e52d6 | 655 | if (endpoint->config.qmap && !endpoint->toward_ipa) { |
1af15c2a AE |
656 | u32 offset; |
657 | ||
658 | offset = offsetof(struct rmnet_map_header, pkt_len); | |
659 | offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK); | |
660 | val |= u32_encode_bits(offset, | |
661 | HDR_OFST_PKT_SIZE_MSB_FMASK); | |
662 | /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */ | |
663 | } | |
664 | } | |
665 | iowrite32(val, ipa->reg_virt + offset); | |
84f9bd12 AE |
666 | } |
667 | ||
84f9bd12 AE |
668 | static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) |
669 | { | |
670 | u32 endpoint_id = endpoint->endpoint_id; | |
671 | u32 val = 0; | |
672 | u32 offset; | |
673 | ||
fb57c3ea AE |
674 | if (endpoint->toward_ipa) |
675 | return; /* Register not valid for TX endpoints */ | |
676 | ||
84f9bd12 AE |
677 | offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); |
678 | ||
8730f45d | 679 | /* Note that HDR_ENDIANNESS indicates big endian header fields */ |
660e52d6 | 680 | if (endpoint->config.qmap) |
088f8a23 | 681 | val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); |
84f9bd12 AE |
682 | |
683 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
684 | } | |
685 | ||
686 | static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) | |
687 | { | |
688 | u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); | |
689 | u32 val; | |
690 | ||
fb57c3ea AE |
691 | if (!endpoint->toward_ipa) |
692 | return; /* Register not valid for RX endpoints */ | |
693 | ||
660e52d6 AE |
694 | if (endpoint->config.dma_mode) { |
695 | enum ipa_endpoint_name name = endpoint->config.dma_endpoint; | |
84f9bd12 AE |
696 | u32 dma_endpoint_id; |
697 | ||
698 | dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; | |
699 | ||
700 | val = u32_encode_bits(IPA_DMA, MODE_FMASK); | |
701 | val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); | |
702 | } else { | |
703 | val = u32_encode_bits(IPA_BASIC, MODE_FMASK); | |
704 | } | |
00b9102a | 705 | /* All other bits unspecified (and 0) */ |
84f9bd12 AE |
706 | |
707 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
708 | } | |
709 | ||
6bf754c7 AE |
710 | /* Encoded values for AGGR endpoint register fields */ |
711 | static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit) | |
712 | { | |
713 | if (version < IPA_VERSION_4_5) | |
714 | return u32_encode_bits(limit, aggr_byte_limit_fmask(true)); | |
715 | ||
716 | return u32_encode_bits(limit, aggr_byte_limit_fmask(false)); | |
717 | } | |
718 | ||
19547041 | 719 | /* Encode the aggregation timer limit (microseconds) based on IPA version */ |
6bf754c7 AE |
720 | static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) |
721 | { | |
19547041 AE |
722 | u32 gran_sel; |
723 | u32 fmask; | |
724 | u32 val; | |
725 | ||
726 | if (version < IPA_VERSION_4_5) { | |
727 | /* We set aggregation granularity in ipa_hardware_config() */ | |
beb90cba AE |
728 | fmask = aggr_time_limit_fmask(true); |
729 | val = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); | |
730 | WARN(val > field_max(fmask), | |
731 | "aggr_time_limit too large (%u > %u usec)\n", | |
732 | val, field_max(fmask) * IPA_AGGR_GRANULARITY); | |
19547041 | 733 | |
beb90cba | 734 | return u32_encode_bits(val, fmask); |
19547041 AE |
735 | } |
736 | ||
737 | /* IPA v4.5 expresses the time limit using Qtime. The AP has | |
738 | * pulse generators 0 and 1 available, which were configured | |
739 | * in ipa_qtime_config() to have granularity 100 usec and | |
740 | * 1 msec, respectively. Use pulse generator 0 if possible, | |
741 | * otherwise fall back to pulse generator 1. | |
742 | */ | |
743 | fmask = aggr_time_limit_fmask(false); | |
744 | val = DIV_ROUND_CLOSEST(limit, 100); | |
745 | if (val > field_max(fmask)) { | |
746 | /* Have to use pulse generator 1 (millisecond granularity) */ | |
747 | gran_sel = AGGR_GRAN_SEL_FMASK; | |
748 | val = DIV_ROUND_CLOSEST(limit, 1000); | |
beb90cba AE |
749 | WARN(val > field_max(fmask), |
750 | "aggr_time_limit too large (%u > %u usec)\n", | |
751 | limit, field_max(fmask) * 1000); | |
19547041 AE |
752 | } else { |
753 | /* We can use pulse generator 0 (100 usec granularity) */ | |
754 | gran_sel = 0; | |
755 | } | |
6bf754c7 | 756 | |
19547041 | 757 | return gran_sel | u32_encode_bits(val, fmask); |
6bf754c7 AE |
758 | } |
759 | ||
760 | static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled) | |
761 | { | |
762 | u32 val = enabled ? 1 : 0; | |
763 | ||
764 | if (version < IPA_VERSION_4_5) | |
765 | return u32_encode_bits(val, aggr_sw_eof_active_fmask(true)); | |
766 | ||
767 | return u32_encode_bits(val, aggr_sw_eof_active_fmask(false)); | |
768 | } | |
769 | ||
84f9bd12 AE |
770 | static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) |
771 | { | |
772 | u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); | |
6bf754c7 | 773 | enum ipa_version version = endpoint->ipa->version; |
84f9bd12 AE |
774 | u32 val = 0; |
775 | ||
660e52d6 | 776 | if (endpoint->config.aggregation) { |
84f9bd12 | 777 | if (!endpoint->toward_ipa) { |
cf4e73a1 | 778 | const struct ipa_endpoint_rx *rx_config; |
c5794097 | 779 | u32 buffer_size; |
6bf754c7 | 780 | bool close_eof; |
84f9bd12 AE |
781 | u32 limit; |
782 | ||
660e52d6 | 783 | rx_config = &endpoint->config.rx; |
84f9bd12 AE |
784 | val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); |
785 | val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); | |
9e88cb5f | 786 | |
cf4e73a1 | 787 | buffer_size = rx_config->buffer_size; |
3cebb7c2 AE |
788 | limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, |
789 | rx_config->aggr_hard_limit); | |
6bf754c7 | 790 | val |= aggr_byte_limit_encoded(version, limit); |
1d86652b | 791 | |
beb90cba | 792 | limit = rx_config->aggr_time_limit; |
6bf754c7 | 793 | val |= aggr_time_limit_encoded(version, limit); |
1d86652b | 794 | |
9e88cb5f AE |
795 | /* AGGR_PKT_LIMIT is 0 (unlimited) */ |
796 | ||
cf4e73a1 | 797 | close_eof = rx_config->aggr_close_eof; |
6bf754c7 | 798 | val |= aggr_sw_eof_active_encoded(version, close_eof); |
84f9bd12 AE |
799 | } else { |
800 | val |= u32_encode_bits(IPA_ENABLE_DEAGGR, | |
801 | AGGR_EN_FMASK); | |
802 | val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); | |
803 | /* other fields ignored */ | |
804 | } | |
805 | /* AGGR_FORCE_CLOSE is 0 */ | |
8bfc4e21 | 806 | /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ |
84f9bd12 AE |
807 | } else { |
808 | val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); | |
809 | /* other fields ignored */ | |
810 | } | |
811 | ||
812 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
813 | } | |
814 | ||
63e5afc8 AE |
815 | /* Return the Qtime-based head-of-line blocking timer value that |
816 | * represents the given number of microseconds. The result | |
817 | * includes both the timer value and the selected timer granularity. | |
f13a8c31 | 818 | */ |
63e5afc8 AE |
819 | static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds) |
820 | { | |
821 | u32 gran_sel; | |
822 | u32 val; | |
823 | ||
824 | /* IPA v4.5 expresses time limits using Qtime. The AP has | |
825 | * pulse generators 0 and 1 available, which were configured | |
826 | * in ipa_qtime_config() to have granularity 100 usec and | |
827 | * 1 msec, respectively. Use pulse generator 0 if possible, | |
828 | * otherwise fall back to pulse generator 1. | |
829 | */ | |
830 | val = DIV_ROUND_CLOSEST(microseconds, 100); | |
831 | if (val > field_max(TIME_LIMIT_FMASK)) { | |
832 | /* Have to use pulse generator 1 (millisecond granularity) */ | |
833 | gran_sel = GRAN_SEL_FMASK; | |
834 | val = DIV_ROUND_CLOSEST(microseconds, 1000); | |
835 | } else { | |
836 | /* We can use pulse generator 0 (100 usec granularity) */ | |
837 | gran_sel = 0; | |
838 | } | |
839 | ||
840 | return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK); | |
841 | } | |
842 | ||
843 | /* The head-of-line blocking timer is defined as a tick count. For | |
844 | * IPA version 4.5 the tick count is based on the Qtimer, which is | |
845 | * derived from the 19.2 MHz SoC XO clock. For older IPA versions | |
846 | * each tick represents 128 cycles of the IPA core clock. | |
847 | * | |
848 | * Return the encoded value that should be written to that register | |
849 | * that represents the timeout period provided. For IPA v4.2 this | |
850 | * encodes a base and scale value, while for earlier versions the | |
851 | * value is a simple tick count. | |
852 | */ | |
853 | static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) | |
84f9bd12 | 854 | { |
f13a8c31 | 855 | u32 width; |
84f9bd12 | 856 | u32 scale; |
f13a8c31 AE |
857 | u64 ticks; |
858 | u64 rate; | |
859 | u32 high; | |
84f9bd12 AE |
860 | u32 val; |
861 | ||
862 | if (!microseconds) | |
f13a8c31 AE |
863 | return 0; /* Nothing to compute if timer period is 0 */ |
864 | ||
d7f3087b | 865 | if (ipa->version >= IPA_VERSION_4_5) |
63e5afc8 AE |
866 | return hol_block_timer_qtime_val(ipa, microseconds); |
867 | ||
f13a8c31 | 868 | /* Use 64 bit arithmetic to avoid overflow... */ |
7aa0e8b8 | 869 | rate = ipa_core_clock_rate(ipa); |
f13a8c31 AE |
870 | ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); |
871 | /* ...but we still need to fit into a 32-bit register */ | |
872 | WARN_ON(ticks > U32_MAX); | |
873 | ||
6833a096 AE |
874 | /* IPA v3.5.1 through v4.1 just record the tick count */ |
875 | if (ipa->version < IPA_VERSION_4_2) | |
f13a8c31 AE |
876 | return (u32)ticks; |
877 | ||
878 | /* For IPA v4.2, the tick count is represented by base and | |
879 | * scale fields within the 32-bit timer register, where: | |
880 | * ticks = base << scale; | |
881 | * The best precision is achieved when the base value is as | |
882 | * large as possible. Find the highest set bit in the tick | |
883 | * count, and extract the number of bits in the base field | |
497abc87 | 884 | * such that high bit is included. |
f13a8c31 AE |
885 | */ |
886 | high = fls(ticks); /* 1..32 */ | |
887 | width = HWEIGHT32(BASE_VALUE_FMASK); | |
888 | scale = high > width ? high - width : 0; | |
889 | if (scale) { | |
890 | /* If we're scaling, round up to get a closer result */ | |
891 | ticks += 1 << (scale - 1); | |
892 | /* High bit was set, so rounding might have affected it */ | |
893 | if (fls(ticks) != high) | |
894 | scale++; | |
895 | } | |
84f9bd12 AE |
896 | |
897 | val = u32_encode_bits(scale, SCALE_FMASK); | |
f13a8c31 | 898 | val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); |
84f9bd12 AE |
899 | |
900 | return val; | |
901 | } | |
902 | ||
f13a8c31 AE |
903 | /* If microseconds is 0, timeout is immediate */ |
904 | static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, | |
905 | u32 microseconds) | |
84f9bd12 AE |
906 | { |
907 | u32 endpoint_id = endpoint->endpoint_id; | |
908 | struct ipa *ipa = endpoint->ipa; | |
909 | u32 offset; | |
910 | u32 val; | |
911 | ||
816316ca | 912 | /* This should only be changed when HOL_BLOCK_EN is disabled */ |
84f9bd12 | 913 | offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); |
63e5afc8 | 914 | val = hol_block_timer_val(ipa, microseconds); |
84f9bd12 | 915 | iowrite32(val, ipa->reg_virt + offset); |
84f9bd12 AE |
916 | } |
917 | ||
918 | static void | |
e6aab6b9 | 919 | ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable) |
84f9bd12 AE |
920 | { |
921 | u32 endpoint_id = endpoint->endpoint_id; | |
922 | u32 offset; | |
923 | u32 val; | |
924 | ||
547c8788 | 925 | val = enable ? HOL_BLOCK_EN_FMASK : 0; |
84f9bd12 AE |
926 | offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); |
927 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
6e228d8c AE |
928 | /* When enabling, the register must be written twice for IPA v4.5+ */ |
929 | if (enable && endpoint->ipa->version >= IPA_VERSION_4_5) | |
930 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
84f9bd12 AE |
931 | } |
932 | ||
e6aab6b9 AE |
933 | /* Assumes HOL_BLOCK is in disabled state */ |
934 | static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, | |
935 | u32 microseconds) | |
936 | { | |
937 | ipa_endpoint_init_hol_block_timer(endpoint, microseconds); | |
938 | ipa_endpoint_init_hol_block_en(endpoint, true); | |
939 | } | |
940 | ||
941 | static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint) | |
942 | { | |
943 | ipa_endpoint_init_hol_block_en(endpoint, false); | |
944 | } | |
945 | ||
84f9bd12 AE |
946 | void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) |
947 | { | |
948 | u32 i; | |
949 | ||
950 | for (i = 0; i < IPA_ENDPOINT_MAX; i++) { | |
951 | struct ipa_endpoint *endpoint = &ipa->endpoint[i]; | |
952 | ||
f8d34dfd | 953 | if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) |
84f9bd12 AE |
954 | continue; |
955 | ||
e6aab6b9 AE |
956 | ipa_endpoint_init_hol_block_disable(endpoint); |
957 | ipa_endpoint_init_hol_block_enable(endpoint, 0); | |
84f9bd12 AE |
958 | } |
959 | } | |
960 | ||
961 | static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) | |
962 | { | |
963 | u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); | |
964 | u32 val = 0; | |
965 | ||
fb57c3ea AE |
966 | if (!endpoint->toward_ipa) |
967 | return; /* Register not valid for RX endpoints */ | |
968 | ||
84f9bd12 AE |
969 | /* DEAGGR_HDR_LEN is 0 */ |
970 | /* PACKET_OFFSET_VALID is 0 */ | |
971 | /* PACKET_OFFSET_LOCATION is ignored (not valid) */ | |
972 | /* MAX_PACKET_LEN is 0 (not enforced) */ | |
973 | ||
974 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
975 | } | |
976 | ||
2d265342 AE |
977 | static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) |
978 | { | |
979 | u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); | |
980 | struct ipa *ipa = endpoint->ipa; | |
981 | u32 val; | |
982 | ||
660e52d6 | 983 | val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group); |
2d265342 AE |
984 | iowrite32(val, ipa->reg_virt + offset); |
985 | } | |
986 | ||
84f9bd12 AE |
987 | static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) |
988 | { | |
989 | u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); | |
84f9bd12 AE |
990 | u32 val = 0; |
991 | ||
fb57c3ea AE |
992 | if (!endpoint->toward_ipa) |
993 | return; /* Register not valid for RX endpoints */ | |
994 | ||
8ee5df65 | 995 | /* Low-order byte configures primary packet processing */ |
660e52d6 | 996 | val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK); |
8ee5df65 AE |
997 | |
998 | /* Second byte configures replicated packet processing */ | |
660e52d6 | 999 | val |= u32_encode_bits(endpoint->config.tx.seq_rep_type, |
1690d8a7 | 1000 | SEQ_REP_TYPE_FMASK); |
84f9bd12 AE |
1001 | |
1002 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
1003 | } | |
1004 | ||
1005 | /** | |
1006 | * ipa_endpoint_skb_tx() - Transmit a socket buffer | |
1007 | * @endpoint: Endpoint pointer | |
1008 | * @skb: Socket buffer to send | |
1009 | * | |
1010 | * Returns: 0 if successful, or a negative error code | |
1011 | */ | |
1012 | int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) | |
1013 | { | |
1014 | struct gsi_trans *trans; | |
1015 | u32 nr_frags; | |
1016 | int ret; | |
1017 | ||
1018 | /* Make sure source endpoint's TLV FIFO has enough entries to | |
1019 | * hold the linear portion of the skb and all its fragments. | |
1020 | * If not, see if we can linearize it before giving up. | |
1021 | */ | |
1022 | nr_frags = skb_shinfo(skb)->nr_frags; | |
317595d2 | 1023 | if (nr_frags > endpoint->skb_frag_max) { |
84f9bd12 AE |
1024 | if (skb_linearize(skb)) |
1025 | return -E2BIG; | |
1026 | nr_frags = 0; | |
1027 | } | |
1028 | ||
1029 | trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); | |
1030 | if (!trans) | |
1031 | return -EBUSY; | |
1032 | ||
1033 | ret = gsi_trans_skb_add(trans, skb); | |
1034 | if (ret) | |
1035 | goto err_trans_free; | |
1036 | trans->data = skb; /* transaction owns skb now */ | |
1037 | ||
1038 | gsi_trans_commit(trans, !netdev_xmit_more()); | |
1039 | ||
1040 | return 0; | |
1041 | ||
1042 | err_trans_free: | |
1043 | gsi_trans_free(trans); | |
1044 | ||
1045 | return -ENOMEM; | |
1046 | } | |
1047 | ||
1048 | static void ipa_endpoint_status(struct ipa_endpoint *endpoint) | |
1049 | { | |
1050 | u32 endpoint_id = endpoint->endpoint_id; | |
1051 | struct ipa *ipa = endpoint->ipa; | |
1052 | u32 val = 0; | |
1053 | u32 offset; | |
1054 | ||
1055 | offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); | |
1056 | ||
660e52d6 | 1057 | if (endpoint->config.status_enable) { |
84f9bd12 AE |
1058 | val |= STATUS_EN_FMASK; |
1059 | if (endpoint->toward_ipa) { | |
1060 | enum ipa_endpoint_name name; | |
1061 | u32 status_endpoint_id; | |
1062 | ||
660e52d6 | 1063 | name = endpoint->config.tx.status_endpoint; |
84f9bd12 AE |
1064 | status_endpoint_id = ipa->name_map[name]->endpoint_id; |
1065 | ||
1066 | val |= u32_encode_bits(status_endpoint_id, | |
1067 | STATUS_ENDP_FMASK); | |
1068 | } | |
8bfc4e21 AE |
1069 | /* STATUS_LOCATION is 0, meaning status element precedes |
1070 | * packet (not present for IPA v4.5) | |
1071 | */ | |
1072 | /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */ | |
84f9bd12 AE |
1073 | } |
1074 | ||
1075 | iowrite32(val, ipa->reg_virt + offset); | |
1076 | } | |
1077 | ||
6a606b90 AE |
1078 | static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, |
1079 | struct gsi_trans *trans) | |
84f9bd12 | 1080 | { |
84f9bd12 | 1081 | struct page *page; |
ed23f026 | 1082 | u32 buffer_size; |
84f9bd12 AE |
1083 | u32 offset; |
1084 | u32 len; | |
1085 | int ret; | |
1086 | ||
660e52d6 | 1087 | buffer_size = endpoint->config.rx.buffer_size; |
ed23f026 | 1088 | page = dev_alloc_pages(get_order(buffer_size)); |
84f9bd12 | 1089 | if (!page) |
6a606b90 | 1090 | return -ENOMEM; |
84f9bd12 AE |
1091 | |
1092 | /* Offset the buffer to make space for skb headroom */ | |
1093 | offset = NET_SKB_PAD; | |
ed23f026 | 1094 | len = buffer_size - offset; |
84f9bd12 AE |
1095 | |
1096 | ret = gsi_trans_page_add(trans, page, len, offset); | |
1097 | if (ret) | |
70132763 | 1098 | put_page(page); |
6a606b90 AE |
1099 | else |
1100 | trans->data = page; /* transaction owns page now */ | |
84f9bd12 | 1101 | |
6a606b90 | 1102 | return ret; |
84f9bd12 AE |
1103 | } |
1104 | ||
1105 | /** | |
9af5ccf3 | 1106 | * ipa_endpoint_replenish() - Replenish endpoint receive buffers |
e3eea08e | 1107 | * @endpoint: Endpoint to be replenished |
84f9bd12 | 1108 | * |
9af5ccf3 AE |
1109 | * The IPA hardware can hold a fixed number of receive buffers for an RX |
1110 | * endpoint, based on the number of entries in the underlying channel ring | |
1111 | * buffer. If an endpoint's "backlog" is non-zero, it indicates how many | |
1112 | * more receive buffers can be supplied to the hardware. Replenishing for | |
a9bec7ae AE |
1113 | * an endpoint can be disabled, in which case buffers are not queued to |
1114 | * the hardware. | |
84f9bd12 | 1115 | */ |
4b22d841 | 1116 | static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint) |
84f9bd12 | 1117 | { |
6a606b90 | 1118 | struct gsi_trans *trans; |
84f9bd12 | 1119 | |
4b22d841 | 1120 | if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) |
84f9bd12 | 1121 | return; |
84f9bd12 | 1122 | |
4b22d841 AE |
1123 | /* Skip it if it's already active */ |
1124 | if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) | |
998c0bd2 | 1125 | return; |
998c0bd2 | 1126 | |
d0ac30e7 | 1127 | while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) { |
9654d8c4 AE |
1128 | bool doorbell; |
1129 | ||
6a606b90 AE |
1130 | if (ipa_endpoint_replenish_one(endpoint, trans)) |
1131 | goto try_again_later; | |
b9dbabc5 | 1132 | |
b9dbabc5 AE |
1133 | |
1134 | /* Ring the doorbell if we've got a full batch */ | |
9654d8c4 AE |
1135 | doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH); |
1136 | gsi_trans_commit(trans, doorbell); | |
b9dbabc5 | 1137 | } |
998c0bd2 AE |
1138 | |
1139 | clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); | |
1140 | ||
84f9bd12 AE |
1141 | return; |
1142 | ||
1143 | try_again_later: | |
6a606b90 | 1144 | gsi_trans_free(trans); |
998c0bd2 AE |
1145 | clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); |
1146 | ||
84f9bd12 AE |
1147 | /* Whenever a receive buffer transaction completes we'll try to |
1148 | * replenish again. It's unlikely, but if we fail to supply even | |
1149 | * one buffer, nothing will trigger another replenish attempt. | |
5fc7f9ba AE |
1150 | * If the hardware has no receive buffers queued, schedule work to |
1151 | * try replenishing again. | |
84f9bd12 | 1152 | */ |
5fc7f9ba | 1153 | if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) |
84f9bd12 AE |
1154 | schedule_delayed_work(&endpoint->replenish_work, |
1155 | msecs_to_jiffies(1)); | |
1156 | } | |
1157 | ||
1158 | static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) | |
1159 | { | |
c1aaa01d | 1160 | set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); |
84f9bd12 AE |
1161 | |
1162 | /* Start replenishing if hardware currently has no buffers */ | |
5fc7f9ba | 1163 | if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) |
4b22d841 | 1164 | ipa_endpoint_replenish(endpoint); |
84f9bd12 AE |
1165 | } |
1166 | ||
1167 | static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) | |
1168 | { | |
c1aaa01d | 1169 | clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); |
84f9bd12 AE |
1170 | } |
1171 | ||
1172 | static void ipa_endpoint_replenish_work(struct work_struct *work) | |
1173 | { | |
1174 | struct delayed_work *dwork = to_delayed_work(work); | |
1175 | struct ipa_endpoint *endpoint; | |
1176 | ||
1177 | endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); | |
1178 | ||
4b22d841 | 1179 | ipa_endpoint_replenish(endpoint); |
84f9bd12 AE |
1180 | } |
1181 | ||
1182 | static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, | |
1183 | void *data, u32 len, u32 extra) | |
1184 | { | |
1185 | struct sk_buff *skb; | |
1186 | ||
1b65bbcc AE |
1187 | if (!endpoint->netdev) |
1188 | return; | |
1189 | ||
84f9bd12 | 1190 | skb = __dev_alloc_skb(len, GFP_ATOMIC); |
30b338ff AE |
1191 | if (skb) { |
1192 | /* Copy the data into the socket buffer and receive it */ | |
1193 | skb_put(skb, len); | |
1194 | memcpy(skb->data, data, len); | |
1195 | skb->truesize += extra; | |
1196 | } | |
84f9bd12 | 1197 | |
1b65bbcc | 1198 | ipa_modem_skb_rx(endpoint->netdev, skb); |
84f9bd12 AE |
1199 | } |
1200 | ||
1201 | static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, | |
1202 | struct page *page, u32 len) | |
1203 | { | |
660e52d6 | 1204 | u32 buffer_size = endpoint->config.rx.buffer_size; |
84f9bd12 AE |
1205 | struct sk_buff *skb; |
1206 | ||
1207 | /* Nothing to do if there's no netdev */ | |
1208 | if (!endpoint->netdev) | |
1209 | return false; | |
1210 | ||
ed23f026 | 1211 | WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD)); |
5bc55884 | 1212 | |
ed23f026 | 1213 | skb = build_skb(page_address(page), buffer_size); |
84f9bd12 AE |
1214 | if (skb) { |
1215 | /* Reserve the headroom and account for the data */ | |
1216 | skb_reserve(skb, NET_SKB_PAD); | |
1217 | skb_put(skb, len); | |
1218 | } | |
1219 | ||
1220 | /* Receive the buffer (or record drop if unable to build it) */ | |
1221 | ipa_modem_skb_rx(endpoint->netdev, skb); | |
1222 | ||
1223 | return skb != NULL; | |
1224 | } | |
1225 | ||
1226 | /* The format of a packet status element is the same for several status | |
45921390 | 1227 | * types (opcodes). Other types aren't currently supported. |
84f9bd12 AE |
1228 | */ |
1229 | static bool ipa_status_format_packet(enum ipa_status_opcode opcode) | |
1230 | { | |
1231 | switch (opcode) { | |
1232 | case IPA_STATUS_OPCODE_PACKET: | |
1233 | case IPA_STATUS_OPCODE_DROPPED_PACKET: | |
1234 | case IPA_STATUS_OPCODE_SUSPENDED_PACKET: | |
1235 | case IPA_STATUS_OPCODE_PACKET_2ND_PASS: | |
1236 | return true; | |
1237 | default: | |
1238 | return false; | |
1239 | } | |
1240 | } | |
1241 | ||
1242 | static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, | |
1243 | const struct ipa_status *status) | |
1244 | { | |
1245 | u32 endpoint_id; | |
1246 | ||
1247 | if (!ipa_status_format_packet(status->opcode)) | |
1248 | return true; | |
1249 | if (!status->pkt_len) | |
1250 | return true; | |
c13899f1 AE |
1251 | endpoint_id = u8_get_bits(status->endp_dst_idx, |
1252 | IPA_STATUS_DST_IDX_FMASK); | |
84f9bd12 AE |
1253 | if (endpoint_id != endpoint->endpoint_id) |
1254 | return true; | |
1255 | ||
1256 | return false; /* Don't skip this packet, process it */ | |
1257 | } | |
1258 | ||
f6aba7b5 AE |
1259 | static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint, |
1260 | const struct ipa_status *status) | |
1261 | { | |
51c48ce2 AE |
1262 | struct ipa_endpoint *command_endpoint; |
1263 | struct ipa *ipa = endpoint->ipa; | |
1264 | u32 endpoint_id; | |
1265 | ||
1266 | if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK)) | |
1267 | return false; /* No valid tag */ | |
1268 | ||
1269 | /* The status contains a valid tag. We know the packet was sent to | |
1270 | * this endpoint (already verified by ipa_endpoint_status_skip()). | |
1271 | * If the packet came from the AP->command TX endpoint we know | |
1272 | * this packet was sent as part of the pipeline clear process. | |
1273 | */ | |
1274 | endpoint_id = u8_get_bits(status->endp_src_idx, | |
1275 | IPA_STATUS_SRC_IDX_FMASK); | |
1276 | command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; | |
1277 | if (endpoint_id == command_endpoint->endpoint_id) { | |
1278 | complete(&ipa->completion); | |
1279 | } else { | |
1280 | dev_err(&ipa->pdev->dev, | |
1281 | "unexpected tagged packet from endpoint %u\n", | |
1282 | endpoint_id); | |
1283 | } | |
1284 | ||
1285 | return true; | |
f6aba7b5 AE |
1286 | } |
1287 | ||
84f9bd12 | 1288 | /* Return whether the status indicates the packet should be dropped */ |
f6aba7b5 AE |
1289 | static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, |
1290 | const struct ipa_status *status) | |
84f9bd12 AE |
1291 | { |
1292 | u32 val; | |
1293 | ||
f6aba7b5 AE |
1294 | /* If the status indicates a tagged transfer, we'll drop the packet */ |
1295 | if (ipa_endpoint_status_tag(endpoint, status)) | |
1296 | return true; | |
1297 | ||
ab4f71e5 | 1298 | /* Deaggregation exceptions we drop; all other types we consume */ |
84f9bd12 AE |
1299 | if (status->exception) |
1300 | return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; | |
1301 | ||
1302 | /* Drop the packet if it fails to match a routing rule; otherwise no */ | |
1303 | val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); | |
1304 | ||
1305 | return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); | |
1306 | } | |
1307 | ||
1308 | static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, | |
1309 | struct page *page, u32 total_len) | |
1310 | { | |
660e52d6 | 1311 | u32 buffer_size = endpoint->config.rx.buffer_size; |
84f9bd12 | 1312 | void *data = page_address(page) + NET_SKB_PAD; |
ed23f026 | 1313 | u32 unused = buffer_size - total_len; |
84f9bd12 AE |
1314 | u32 resid = total_len; |
1315 | ||
1316 | while (resid) { | |
1317 | const struct ipa_status *status = data; | |
1318 | u32 align; | |
1319 | u32 len; | |
1320 | ||
1321 | if (resid < sizeof(*status)) { | |
1322 | dev_err(&endpoint->ipa->pdev->dev, | |
1323 | "short message (%u bytes < %zu byte status)\n", | |
1324 | resid, sizeof(*status)); | |
1325 | break; | |
1326 | } | |
1327 | ||
1328 | /* Skip over status packets that lack packet data */ | |
1329 | if (ipa_endpoint_status_skip(endpoint, status)) { | |
1330 | data += sizeof(*status); | |
1331 | resid -= sizeof(*status); | |
1332 | continue; | |
1333 | } | |
1334 | ||
162fbc6f AE |
1335 | /* Compute the amount of buffer space consumed by the packet, |
1336 | * including the status element. If the hardware is configured | |
1337 | * to pad packet data to an aligned boundary, account for that. | |
1338 | * And if checksum offload is enabled a trailer containing | |
1339 | * computed checksum information will be appended. | |
84f9bd12 | 1340 | */ |
660e52d6 | 1341 | align = endpoint->config.rx.pad_align ? : 1; |
84f9bd12 AE |
1342 | len = le16_to_cpu(status->pkt_len); |
1343 | len = sizeof(*status) + ALIGN(len, align); | |
660e52d6 | 1344 | if (endpoint->config.checksum) |
84f9bd12 AE |
1345 | len += sizeof(struct rmnet_map_dl_csum_trailer); |
1346 | ||
f6aba7b5 | 1347 | if (!ipa_endpoint_status_drop(endpoint, status)) { |
162fbc6f AE |
1348 | void *data2; |
1349 | u32 extra; | |
1350 | u32 len2; | |
84f9bd12 AE |
1351 | |
1352 | /* Client receives only packet data (no status) */ | |
162fbc6f AE |
1353 | data2 = data + sizeof(*status); |
1354 | len2 = le16_to_cpu(status->pkt_len); | |
1355 | ||
1356 | /* Have the true size reflect the extra unused space in | |
1357 | * the original receive buffer. Distribute the "cost" | |
1358 | * proportionately across all aggregated packets in the | |
1359 | * buffer. | |
1360 | */ | |
1361 | extra = DIV_ROUND_CLOSEST(unused * len, total_len); | |
84f9bd12 AE |
1362 | ipa_endpoint_skb_copy(endpoint, data2, len2, extra); |
1363 | } | |
1364 | ||
1365 | /* Consume status and the full packet it describes */ | |
1366 | data += len; | |
1367 | resid -= len; | |
1368 | } | |
1369 | } | |
1370 | ||
983a1a30 AE |
1371 | void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, |
1372 | struct gsi_trans *trans) | |
84f9bd12 AE |
1373 | { |
1374 | struct page *page; | |
1375 | ||
983a1a30 AE |
1376 | if (endpoint->toward_ipa) |
1377 | return; | |
1378 | ||
84f9bd12 | 1379 | if (trans->cancelled) |
5d6ac24f | 1380 | goto done; |
84f9bd12 AE |
1381 | |
1382 | /* Parse or build a socket buffer using the actual received length */ | |
1383 | page = trans->data; | |
660e52d6 | 1384 | if (endpoint->config.status_enable) |
84f9bd12 AE |
1385 | ipa_endpoint_status_parse(endpoint, page, trans->len); |
1386 | else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) | |
1387 | trans->data = NULL; /* Pages have been consumed */ | |
5d6ac24f AE |
1388 | done: |
1389 | ipa_endpoint_replenish(endpoint); | |
84f9bd12 AE |
1390 | } |
1391 | ||
84f9bd12 AE |
1392 | void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, |
1393 | struct gsi_trans *trans) | |
1394 | { | |
1395 | if (endpoint->toward_ipa) { | |
1396 | struct ipa *ipa = endpoint->ipa; | |
1397 | ||
1398 | /* Nothing to do for command transactions */ | |
1399 | if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { | |
1400 | struct sk_buff *skb = trans->data; | |
1401 | ||
1402 | if (skb) | |
1403 | dev_kfree_skb_any(skb); | |
1404 | } | |
1405 | } else { | |
1406 | struct page *page = trans->data; | |
1407 | ||
155c0c90 AE |
1408 | if (page) |
1409 | put_page(page); | |
84f9bd12 AE |
1410 | } |
1411 | } | |
1412 | ||
1413 | void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) | |
1414 | { | |
1415 | u32 val; | |
1416 | ||
1417 | /* ROUTE_DIS is 0 */ | |
1418 | val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); | |
1419 | val |= ROUTE_DEF_HDR_TABLE_FMASK; | |
1420 | val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); | |
1421 | val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); | |
1422 | val |= ROUTE_DEF_RETAIN_HDR_FMASK; | |
1423 | ||
1424 | iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); | |
1425 | } | |
1426 | ||
1427 | void ipa_endpoint_default_route_clear(struct ipa *ipa) | |
1428 | { | |
1429 | ipa_endpoint_default_route_set(ipa, 0); | |
1430 | } | |
1431 | ||
84f9bd12 AE |
1432 | /** |
1433 | * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active | |
1434 | * @endpoint: Endpoint to be reset | |
1435 | * | |
1436 | * If aggregation is active on an RX endpoint when a reset is performed | |
1437 | * on its underlying GSI channel, a special sequence of actions must be | |
1438 | * taken to ensure the IPA pipeline is properly cleared. | |
1439 | * | |
e3eea08e | 1440 | * Return: 0 if successful, or a negative error code |
84f9bd12 AE |
1441 | */ |
1442 | static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) | |
1443 | { | |
1444 | struct device *dev = &endpoint->ipa->pdev->dev; | |
1445 | struct ipa *ipa = endpoint->ipa; | |
84f9bd12 | 1446 | struct gsi *gsi = &ipa->gsi; |
4fa95248 | 1447 | bool suspended = false; |
84f9bd12 | 1448 | dma_addr_t addr; |
84f9bd12 AE |
1449 | u32 retries; |
1450 | u32 len = 1; | |
1451 | void *virt; | |
1452 | int ret; | |
1453 | ||
1454 | virt = kzalloc(len, GFP_KERNEL); | |
1455 | if (!virt) | |
1456 | return -ENOMEM; | |
1457 | ||
1458 | addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); | |
1459 | if (dma_mapping_error(dev, addr)) { | |
1460 | ret = -ENOMEM; | |
1461 | goto out_kfree; | |
1462 | } | |
1463 | ||
1464 | /* Force close aggregation before issuing the reset */ | |
1465 | ipa_endpoint_force_close(endpoint); | |
1466 | ||
1467 | /* Reset and reconfigure the channel with the doorbell engine | |
1468 | * disabled. Then poll until we know aggregation is no longer | |
1469 | * active. We'll re-enable the doorbell (if appropriate) when | |
1470 | * we reset again below. | |
1471 | */ | |
1472 | gsi_channel_reset(gsi, endpoint->channel_id, false); | |
1473 | ||
1474 | /* Make sure the channel isn't suspended */ | |
b07f283e | 1475 | suspended = ipa_endpoint_program_suspend(endpoint, false); |
84f9bd12 AE |
1476 | |
1477 | /* Start channel and do a 1 byte read */ | |
1478 | ret = gsi_channel_start(gsi, endpoint->channel_id); | |
1479 | if (ret) | |
1480 | goto out_suspend_again; | |
1481 | ||
1482 | ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); | |
1483 | if (ret) | |
1484 | goto err_endpoint_stop; | |
1485 | ||
1486 | /* Wait for aggregation to be closed on the channel */ | |
1487 | retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; | |
1488 | do { | |
1489 | if (!ipa_endpoint_aggr_active(endpoint)) | |
1490 | break; | |
74401946 | 1491 | usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); |
84f9bd12 AE |
1492 | } while (retries--); |
1493 | ||
1494 | /* Check one last time */ | |
1495 | if (ipa_endpoint_aggr_active(endpoint)) | |
1496 | dev_err(dev, "endpoint %u still active during reset\n", | |
1497 | endpoint->endpoint_id); | |
1498 | ||
1499 | gsi_trans_read_byte_done(gsi, endpoint->channel_id); | |
1500 | ||
f30dcb7d | 1501 | ret = gsi_channel_stop(gsi, endpoint->channel_id); |
84f9bd12 AE |
1502 | if (ret) |
1503 | goto out_suspend_again; | |
1504 | ||
497abc87 | 1505 | /* Finally, reset and reconfigure the channel again (re-enabling |
84f9bd12 AE |
1506 | * the doorbell engine if appropriate). Sleep for 1 millisecond to |
1507 | * complete the channel reset sequence. Finish by suspending the | |
1508 | * channel again (if necessary). | |
1509 | */ | |
ce54993d | 1510 | gsi_channel_reset(gsi, endpoint->channel_id, true); |
84f9bd12 | 1511 | |
74401946 | 1512 | usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); |
84f9bd12 AE |
1513 | |
1514 | goto out_suspend_again; | |
1515 | ||
1516 | err_endpoint_stop: | |
f30dcb7d | 1517 | (void)gsi_channel_stop(gsi, endpoint->channel_id); |
84f9bd12 | 1518 | out_suspend_again: |
4fa95248 AE |
1519 | if (suspended) |
1520 | (void)ipa_endpoint_program_suspend(endpoint, true); | |
84f9bd12 AE |
1521 | dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); |
1522 | out_kfree: | |
1523 | kfree(virt); | |
1524 | ||
1525 | return ret; | |
1526 | } | |
1527 | ||
1528 | static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) | |
1529 | { | |
1530 | u32 channel_id = endpoint->channel_id; | |
1531 | struct ipa *ipa = endpoint->ipa; | |
84f9bd12 AE |
1532 | bool special; |
1533 | int ret = 0; | |
1534 | ||
1535 | /* On IPA v3.5.1, if an RX endpoint is reset while aggregation | |
1536 | * is active, we need to handle things specially to recover. | |
1537 | * All other cases just need to reset the underlying GSI channel. | |
84f9bd12 | 1538 | */ |
d7f3087b | 1539 | special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && |
660e52d6 | 1540 | endpoint->config.aggregation; |
ce54993d | 1541 | if (special && ipa_endpoint_aggr_active(endpoint)) |
84f9bd12 AE |
1542 | ret = ipa_endpoint_reset_rx_aggr(endpoint); |
1543 | else | |
ce54993d | 1544 | gsi_channel_reset(&ipa->gsi, channel_id, true); |
84f9bd12 AE |
1545 | |
1546 | if (ret) | |
1547 | dev_err(&ipa->pdev->dev, | |
1548 | "error %d resetting channel %u for endpoint %u\n", | |
1549 | ret, endpoint->channel_id, endpoint->endpoint_id); | |
1550 | } | |
1551 | ||
84f9bd12 AE |
1552 | static void ipa_endpoint_program(struct ipa_endpoint *endpoint) |
1553 | { | |
4c9d631a AE |
1554 | if (endpoint->toward_ipa) { |
1555 | /* Newer versions of IPA use GSI channel flow control | |
1556 | * instead of endpoint DELAY mode to prevent sending data. | |
1557 | * Flow control is disabled for newly-allocated channels, | |
1558 | * and we can assume flow control is not (ever) enabled | |
1559 | * for AP TX channels. | |
1560 | */ | |
1561 | if (endpoint->ipa->version < IPA_VERSION_4_2) | |
1562 | ipa_endpoint_program_delay(endpoint, false); | |
1563 | } else { | |
1564 | /* Ensure suspend mode is off on all AP RX endpoints */ | |
b07f283e | 1565 | (void)ipa_endpoint_program_suspend(endpoint, false); |
4c9d631a | 1566 | } |
84f9bd12 | 1567 | ipa_endpoint_init_cfg(endpoint); |
647a05f3 | 1568 | ipa_endpoint_init_nat(endpoint); |
84f9bd12 | 1569 | ipa_endpoint_init_hdr(endpoint); |
fb57c3ea AE |
1570 | ipa_endpoint_init_hdr_ext(endpoint); |
1571 | ipa_endpoint_init_hdr_metadata_mask(endpoint); | |
1572 | ipa_endpoint_init_mode(endpoint); | |
1573 | ipa_endpoint_init_aggr(endpoint); | |
153213f0 AE |
1574 | if (!endpoint->toward_ipa) { |
1575 | if (endpoint->config.rx.holb_drop) | |
1576 | ipa_endpoint_init_hol_block_enable(endpoint, 0); | |
1577 | else | |
1578 | ipa_endpoint_init_hol_block_disable(endpoint); | |
1579 | } | |
fb57c3ea | 1580 | ipa_endpoint_init_deaggr(endpoint); |
2d265342 | 1581 | ipa_endpoint_init_rsrc_grp(endpoint); |
fb57c3ea | 1582 | ipa_endpoint_init_seq(endpoint); |
84f9bd12 AE |
1583 | ipa_endpoint_status(endpoint); |
1584 | } | |
1585 | ||
1586 | int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) | |
1587 | { | |
1588 | struct ipa *ipa = endpoint->ipa; | |
1589 | struct gsi *gsi = &ipa->gsi; | |
1590 | int ret; | |
1591 | ||
1592 | ret = gsi_channel_start(gsi, endpoint->channel_id); | |
1593 | if (ret) { | |
1594 | dev_err(&ipa->pdev->dev, | |
1595 | "error %d starting %cX channel %u for endpoint %u\n", | |
1596 | ret, endpoint->toward_ipa ? 'T' : 'R', | |
1597 | endpoint->channel_id, endpoint->endpoint_id); | |
1598 | return ret; | |
1599 | } | |
1600 | ||
1601 | if (!endpoint->toward_ipa) { | |
1602 | ipa_interrupt_suspend_enable(ipa->interrupt, | |
1603 | endpoint->endpoint_id); | |
1604 | ipa_endpoint_replenish_enable(endpoint); | |
1605 | } | |
1606 | ||
1607 | ipa->enabled |= BIT(endpoint->endpoint_id); | |
1608 | ||
1609 | return 0; | |
1610 | } | |
1611 | ||
1612 | void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) | |
1613 | { | |
1614 | u32 mask = BIT(endpoint->endpoint_id); | |
1615 | struct ipa *ipa = endpoint->ipa; | |
f30dcb7d | 1616 | struct gsi *gsi = &ipa->gsi; |
84f9bd12 AE |
1617 | int ret; |
1618 | ||
f30dcb7d | 1619 | if (!(ipa->enabled & mask)) |
84f9bd12 AE |
1620 | return; |
1621 | ||
f30dcb7d | 1622 | ipa->enabled ^= mask; |
84f9bd12 AE |
1623 | |
1624 | if (!endpoint->toward_ipa) { | |
1625 | ipa_endpoint_replenish_disable(endpoint); | |
1626 | ipa_interrupt_suspend_disable(ipa->interrupt, | |
1627 | endpoint->endpoint_id); | |
1628 | } | |
1629 | ||
1630 | /* Note that if stop fails, the channel's state is not well-defined */ | |
f30dcb7d | 1631 | ret = gsi_channel_stop(gsi, endpoint->channel_id); |
84f9bd12 AE |
1632 | if (ret) |
1633 | dev_err(&ipa->pdev->dev, | |
1634 | "error %d attempting to stop endpoint %u\n", ret, | |
1635 | endpoint->endpoint_id); | |
1636 | } | |
1637 | ||
84f9bd12 AE |
1638 | void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) |
1639 | { | |
1640 | struct device *dev = &endpoint->ipa->pdev->dev; | |
1641 | struct gsi *gsi = &endpoint->ipa->gsi; | |
84f9bd12 AE |
1642 | int ret; |
1643 | ||
1644 | if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) | |
1645 | return; | |
1646 | ||
ab4f71e5 | 1647 | if (!endpoint->toward_ipa) { |
84f9bd12 | 1648 | ipa_endpoint_replenish_disable(endpoint); |
4fa95248 | 1649 | (void)ipa_endpoint_program_suspend(endpoint, true); |
ab4f71e5 | 1650 | } |
84f9bd12 | 1651 | |
decfef0f | 1652 | ret = gsi_channel_suspend(gsi, endpoint->channel_id); |
84f9bd12 AE |
1653 | if (ret) |
1654 | dev_err(dev, "error %d suspending channel %u\n", ret, | |
1655 | endpoint->channel_id); | |
1656 | } | |
1657 | ||
1658 | void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) | |
1659 | { | |
1660 | struct device *dev = &endpoint->ipa->pdev->dev; | |
1661 | struct gsi *gsi = &endpoint->ipa->gsi; | |
84f9bd12 AE |
1662 | int ret; |
1663 | ||
1664 | if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) | |
1665 | return; | |
1666 | ||
b07f283e | 1667 | if (!endpoint->toward_ipa) |
4fa95248 | 1668 | (void)ipa_endpoint_program_suspend(endpoint, false); |
84f9bd12 | 1669 | |
decfef0f | 1670 | ret = gsi_channel_resume(gsi, endpoint->channel_id); |
84f9bd12 AE |
1671 | if (ret) |
1672 | dev_err(dev, "error %d resuming channel %u\n", ret, | |
1673 | endpoint->channel_id); | |
1674 | else if (!endpoint->toward_ipa) | |
1675 | ipa_endpoint_replenish_enable(endpoint); | |
1676 | } | |
1677 | ||
1678 | void ipa_endpoint_suspend(struct ipa *ipa) | |
1679 | { | |
d1704382 AE |
1680 | if (!ipa->setup_complete) |
1681 | return; | |
1682 | ||
84f9bd12 AE |
1683 | if (ipa->modem_netdev) |
1684 | ipa_modem_suspend(ipa->modem_netdev); | |
1685 | ||
1686 | ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); | |
1687 | ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); | |
1688 | } | |
1689 | ||
1690 | void ipa_endpoint_resume(struct ipa *ipa) | |
1691 | { | |
d1704382 AE |
1692 | if (!ipa->setup_complete) |
1693 | return; | |
1694 | ||
84f9bd12 AE |
1695 | ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); |
1696 | ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); | |
1697 | ||
1698 | if (ipa->modem_netdev) | |
1699 | ipa_modem_resume(ipa->modem_netdev); | |
1700 | } | |
1701 | ||
1702 | static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) | |
1703 | { | |
1704 | struct gsi *gsi = &endpoint->ipa->gsi; | |
1705 | u32 channel_id = endpoint->channel_id; | |
1706 | ||
1707 | /* Only AP endpoints get set up */ | |
1708 | if (endpoint->ee_id != GSI_EE_AP) | |
1709 | return; | |
1710 | ||
317595d2 | 1711 | endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1; |
84f9bd12 AE |
1712 | if (!endpoint->toward_ipa) { |
1713 | /* RX transactions require a single TRE, so the maximum | |
1714 | * backlog is the same as the maximum outstanding TREs. | |
1715 | */ | |
c1aaa01d | 1716 | clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); |
998c0bd2 | 1717 | clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); |
84f9bd12 AE |
1718 | INIT_DELAYED_WORK(&endpoint->replenish_work, |
1719 | ipa_endpoint_replenish_work); | |
1720 | } | |
1721 | ||
1722 | ipa_endpoint_program(endpoint); | |
1723 | ||
1724 | endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); | |
1725 | } | |
1726 | ||
1727 | static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) | |
1728 | { | |
1729 | endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); | |
1730 | ||
1731 | if (!endpoint->toward_ipa) | |
1732 | cancel_delayed_work_sync(&endpoint->replenish_work); | |
1733 | ||
1734 | ipa_endpoint_reset(endpoint); | |
1735 | } | |
1736 | ||
1737 | void ipa_endpoint_setup(struct ipa *ipa) | |
1738 | { | |
1739 | u32 initialized = ipa->initialized; | |
1740 | ||
1741 | ipa->set_up = 0; | |
1742 | while (initialized) { | |
1743 | u32 endpoint_id = __ffs(initialized); | |
1744 | ||
1745 | initialized ^= BIT(endpoint_id); | |
1746 | ||
1747 | ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); | |
1748 | } | |
1749 | } | |
1750 | ||
1751 | void ipa_endpoint_teardown(struct ipa *ipa) | |
1752 | { | |
1753 | u32 set_up = ipa->set_up; | |
1754 | ||
1755 | while (set_up) { | |
1756 | u32 endpoint_id = __fls(set_up); | |
1757 | ||
1758 | set_up ^= BIT(endpoint_id); | |
1759 | ||
1760 | ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); | |
1761 | } | |
1762 | ipa->set_up = 0; | |
1763 | } | |
1764 | ||
1765 | int ipa_endpoint_config(struct ipa *ipa) | |
1766 | { | |
1767 | struct device *dev = &ipa->pdev->dev; | |
1768 | u32 initialized; | |
1769 | u32 rx_base; | |
1770 | u32 rx_mask; | |
1771 | u32 tx_mask; | |
1772 | int ret = 0; | |
1773 | u32 max; | |
1774 | u32 val; | |
1775 | ||
110971d1 AE |
1776 | /* Prior to IPAv3.5, the FLAVOR_0 register was not supported. |
1777 | * Furthermore, the endpoints were not grouped such that TX | |
1778 | * endpoint numbers started with 0 and RX endpoints had numbers | |
1779 | * higher than all TX endpoints, so we can't do the simple | |
1780 | * direction check used for newer hardware below. | |
1781 | * | |
1782 | * For hardware that doesn't support the FLAVOR_0 register, | |
1783 | * just set the available mask to support any endpoint, and | |
1784 | * assume the configuration is valid. | |
1785 | */ | |
1786 | if (ipa->version < IPA_VERSION_3_5) { | |
1787 | ipa->available = ~0; | |
1788 | return 0; | |
1789 | } | |
1790 | ||
84f9bd12 AE |
1791 | /* Find out about the endpoints supplied by the hardware, and ensure |
1792 | * the highest one doesn't exceed the number we support. | |
1793 | */ | |
1794 | val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); | |
1795 | ||
1796 | /* Our RX is an IPA producer */ | |
716a115b AE |
1797 | rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK); |
1798 | max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK); | |
84f9bd12 AE |
1799 | if (max > IPA_ENDPOINT_MAX) { |
1800 | dev_err(dev, "too many endpoints (%u > %u)\n", | |
1801 | max, IPA_ENDPOINT_MAX); | |
1802 | return -EINVAL; | |
1803 | } | |
1804 | rx_mask = GENMASK(max - 1, rx_base); | |
1805 | ||
1806 | /* Our TX is an IPA consumer */ | |
716a115b | 1807 | max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK); |
84f9bd12 AE |
1808 | tx_mask = GENMASK(max - 1, 0); |
1809 | ||
1810 | ipa->available = rx_mask | tx_mask; | |
1811 | ||
1812 | /* Check for initialized endpoints not supported by the hardware */ | |
1813 | if (ipa->initialized & ~ipa->available) { | |
1814 | dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", | |
1815 | ipa->initialized & ~ipa->available); | |
1816 | ret = -EINVAL; /* Report other errors too */ | |
1817 | } | |
1818 | ||
1819 | initialized = ipa->initialized; | |
1820 | while (initialized) { | |
1821 | u32 endpoint_id = __ffs(initialized); | |
1822 | struct ipa_endpoint *endpoint; | |
1823 | ||
1824 | initialized ^= BIT(endpoint_id); | |
1825 | ||
1826 | /* Make sure it's pointing in the right direction */ | |
1827 | endpoint = &ipa->endpoint[endpoint_id]; | |
602a1c76 | 1828 | if ((endpoint_id < rx_base) != endpoint->toward_ipa) { |
84f9bd12 AE |
1829 | dev_err(dev, "endpoint id %u wrong direction\n", |
1830 | endpoint_id); | |
1831 | ret = -EINVAL; | |
1832 | } | |
1833 | } | |
1834 | ||
1835 | return ret; | |
1836 | } | |
1837 | ||
1838 | void ipa_endpoint_deconfig(struct ipa *ipa) | |
1839 | { | |
1840 | ipa->available = 0; /* Nothing more to do */ | |
1841 | } | |
1842 | ||
1843 | static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, | |
1844 | const struct ipa_gsi_endpoint_data *data) | |
1845 | { | |
1846 | struct ipa_endpoint *endpoint; | |
1847 | ||
1848 | endpoint = &ipa->endpoint[data->endpoint_id]; | |
1849 | ||
1850 | if (data->ee_id == GSI_EE_AP) | |
1851 | ipa->channel_map[data->channel_id] = endpoint; | |
1852 | ipa->name_map[name] = endpoint; | |
1853 | ||
1854 | endpoint->ipa = ipa; | |
1855 | endpoint->ee_id = data->ee_id; | |
84f9bd12 AE |
1856 | endpoint->channel_id = data->channel_id; |
1857 | endpoint->endpoint_id = data->endpoint_id; | |
1858 | endpoint->toward_ipa = data->toward_ipa; | |
660e52d6 | 1859 | endpoint->config = data->endpoint.config; |
84f9bd12 AE |
1860 | |
1861 | ipa->initialized |= BIT(endpoint->endpoint_id); | |
1862 | } | |
1863 | ||
602a1c76 | 1864 | static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) |
84f9bd12 AE |
1865 | { |
1866 | endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); | |
1867 | ||
1868 | memset(endpoint, 0, sizeof(*endpoint)); | |
1869 | } | |
1870 | ||
1871 | void ipa_endpoint_exit(struct ipa *ipa) | |
1872 | { | |
1873 | u32 initialized = ipa->initialized; | |
1874 | ||
1875 | while (initialized) { | |
1876 | u32 endpoint_id = __fls(initialized); | |
1877 | ||
1878 | initialized ^= BIT(endpoint_id); | |
1879 | ||
1880 | ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); | |
1881 | } | |
1882 | memset(ipa->name_map, 0, sizeof(ipa->name_map)); | |
1883 | memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); | |
1884 | } | |
1885 | ||
1886 | /* Returns a bitmask of endpoints that support filtering, or 0 on error */ | |
1887 | u32 ipa_endpoint_init(struct ipa *ipa, u32 count, | |
1888 | const struct ipa_gsi_endpoint_data *data) | |
1889 | { | |
1890 | enum ipa_endpoint_name name; | |
1891 | u32 filter_map; | |
1892 | ||
9654d8c4 AE |
1893 | BUILD_BUG_ON(!IPA_REPLENISH_BATCH); |
1894 | ||
84f9bd12 AE |
1895 | if (!ipa_endpoint_data_valid(ipa, count, data)) |
1896 | return 0; /* Error */ | |
1897 | ||
1898 | ipa->initialized = 0; | |
1899 | ||
1900 | filter_map = 0; | |
1901 | for (name = 0; name < count; name++, data++) { | |
1902 | if (ipa_gsi_endpoint_data_empty(data)) | |
1903 | continue; /* Skip over empty slots */ | |
1904 | ||
1905 | ipa_endpoint_init_one(ipa, name, data); | |
1906 | ||
1907 | if (data->endpoint.filter_support) | |
1908 | filter_map |= BIT(data->endpoint_id); | |
2091c79a AE |
1909 | if (data->ee_id == GSI_EE_MODEM && data->toward_ipa) |
1910 | ipa->modem_tx_count++; | |
84f9bd12 AE |
1911 | } |
1912 | ||
1913 | if (!ipa_filter_map_valid(ipa, filter_map)) | |
1914 | goto err_endpoint_exit; | |
1915 | ||
1916 | return filter_map; /* Non-zero bitmask */ | |
1917 | ||
1918 | err_endpoint_exit: | |
1919 | ipa_endpoint_exit(ipa); | |
1920 | ||
1921 | return 0; /* Error */ | |
1922 | } |