Commit | Line | Data |
---|---|---|
5fd54ace | 1 | // SPDX-License-Identifier: GPL-2.0 |
66d4eadd SS |
2 | /* |
3 | * xHCI host controller driver | |
4 | * | |
5 | * Copyright (C) 2008 Intel Corp. | |
6 | * | |
7 | * Author: Sarah Sharp | |
8 | * Some code borrowed from the Linux EHCI driver. | |
66d4eadd SS |
9 | */ |
10 | ||
42b75813 | 11 | #include <linux/jiffies.h> |
43b86af8 | 12 | #include <linux/pci.h> |
ecaa4902 | 13 | #include <linux/iommu.h> |
f7fac17c | 14 | #include <linux/iopoll.h> |
66d4eadd | 15 | #include <linux/irq.h> |
8df75f42 | 16 | #include <linux/log2.h> |
66d4eadd | 17 | #include <linux/module.h> |
b0567b3f | 18 | #include <linux/moduleparam.h> |
5a0e3ad6 | 19 | #include <linux/slab.h> |
789a1714 | 20 | #include <linux/string_choices.h> |
71c731a2 | 21 | #include <linux/dmi.h> |
008eb957 | 22 | #include <linux/dma-mapping.h> |
b85a2ebd | 23 | #include <linux/usb/xhci-sideband.h> |
66d4eadd SS |
24 | |
25 | #include "xhci.h" | |
84a99f6f | 26 | #include "xhci-trace.h" |
02b6fdc2 | 27 | #include "xhci-debugfs.h" |
dfba2174 | 28 | #include "xhci-dbgcap.h" |
66d4eadd SS |
29 | |
30 | #define DRIVER_AUTHOR "Sarah Sharp" | |
31 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" | |
32 | ||
a1377e53 LB |
33 | #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) |
34 | ||
b0567b3f SS |
35 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ |
36 | static int link_quirk; | |
37 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); | |
38 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); | |
39 | ||
36b68579 MZ |
40 | static unsigned long long quirks; |
41 | module_param(quirks, ullong, S_IRUGO); | |
4e6a1ee7 TI |
42 | MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); |
43 | ||
4937213b MN |
44 | static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) |
45 | { | |
3f970bd0 | 46 | struct xhci_segment *seg; |
4937213b MN |
47 | |
48 | if (!td || !td->start_seg) | |
49 | return false; | |
3f970bd0 NN |
50 | |
51 | xhci_for_each_ring_seg(ring->first_seg, seg) { | |
4937213b MN |
52 | if (seg == td->start_seg) |
53 | return true; | |
3f970bd0 | 54 | } |
4937213b MN |
55 | |
56 | return false; | |
57 | } | |
58 | ||
66d4eadd | 59 | /* |
2611bd18 | 60 | * xhci_handshake - spin reading hc until handshake completes or fails |
66d4eadd SS |
61 | * @ptr: address of hc register to be read |
62 | * @mask: bits to look at in result of read | |
63 | * @done: value of those bits when handshake succeeds | |
64 | * @usec: timeout in microseconds | |
65 | * | |
66 | * Returns negative errno, or zero on success | |
67 | * | |
68 | * Success happens when the "mask" bits have the specified value (hardware | |
69 | * handshake done). There are two failure modes: "usec" have passed (major | |
70 | * hardware flakeout), or the register reads as all-ones (hardware removed). | |
71 | */ | |
14073ce9 | 72 | int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) |
66d4eadd SS |
73 | { |
74 | u32 result; | |
f7fac17c | 75 | int ret; |
66d4eadd | 76 | |
f7fac17c AS |
77 | ret = readl_poll_timeout_atomic(ptr, result, |
78 | (result & mask) == done || | |
79 | result == U32_MAX, | |
14073ce9 | 80 | 1, timeout_us); |
f7fac17c AS |
81 | if (result == U32_MAX) /* card removed */ |
82 | return -ENODEV; | |
83 | ||
84 | return ret; | |
66d4eadd SS |
85 | } |
86 | ||
87 | /* | |
4f0f0bae | 88 | * Disable interrupts and begin the xHCI halting process. |
66d4eadd | 89 | */ |
4f0f0bae | 90 | void xhci_quiesce(struct xhci_hcd *xhci) |
66d4eadd SS |
91 | { |
92 | u32 halted; | |
93 | u32 cmd; | |
94 | u32 mask; | |
95 | ||
66d4eadd | 96 | mask = ~(XHCI_IRQS); |
b0ba9720 | 97 | halted = readl(&xhci->op_regs->status) & STS_HALT; |
66d4eadd SS |
98 | if (!halted) |
99 | mask &= ~CMD_RUN; | |
100 | ||
b0ba9720 | 101 | cmd = readl(&xhci->op_regs->command); |
66d4eadd | 102 | cmd &= mask; |
204b7793 | 103 | writel(cmd, &xhci->op_regs->command); |
4f0f0bae SS |
104 | } |
105 | ||
106 | /* | |
107 | * Force HC into halt state. | |
108 | * | |
109 | * Disable any IRQs and clear the run/stop bit. | |
110 | * HC will complete any current and actively pipelined transactions, and | |
bdfca502 | 111 | * should halt within 16 ms of the run/stop bit being cleared. |
4f0f0bae | 112 | * Read HC Halted bit in the status register to see when the HC is finished. |
4f0f0bae SS |
113 | */ |
114 | int xhci_halt(struct xhci_hcd *xhci) | |
115 | { | |
c6cc27c7 | 116 | int ret; |
c2b0d550 | 117 | |
d195fcff | 118 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); |
4f0f0bae | 119 | xhci_quiesce(xhci); |
66d4eadd | 120 | |
dc0b177c | 121 | ret = xhci_handshake(&xhci->op_regs->status, |
66d4eadd | 122 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); |
99154fd3 MN |
123 | if (ret) { |
124 | xhci_warn(xhci, "Host halt failed, %d\n", ret); | |
125 | return ret; | |
126 | } | |
c2b0d550 | 127 | |
99154fd3 MN |
128 | xhci->xhc_state |= XHCI_STATE_HALTED; |
129 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; | |
c2b0d550 | 130 | |
c6cc27c7 | 131 | return ret; |
66d4eadd SS |
132 | } |
133 | ||
ed07453f SS |
134 | /* |
135 | * Set the run bit and wait for the host to be running. | |
136 | */ | |
26bba5c7 | 137 | int xhci_start(struct xhci_hcd *xhci) |
ed07453f SS |
138 | { |
139 | u32 temp; | |
140 | int ret; | |
141 | ||
b0ba9720 | 142 | temp = readl(&xhci->op_regs->command); |
ed07453f | 143 | temp |= (CMD_RUN); |
d195fcff | 144 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", |
ed07453f | 145 | temp); |
204b7793 | 146 | writel(temp, &xhci->op_regs->command); |
ed07453f SS |
147 | |
148 | /* | |
149 | * Wait for the HCHalted Status bit to be 0 to indicate the host is | |
150 | * running. | |
151 | */ | |
dc0b177c | 152 | ret = xhci_handshake(&xhci->op_regs->status, |
ed07453f SS |
153 | STS_HALT, 0, XHCI_MAX_HALT_USEC); |
154 | if (ret == -ETIMEDOUT) | |
155 | xhci_err(xhci, "Host took too long to start, " | |
156 | "waited %u microseconds.\n", | |
157 | XHCI_MAX_HALT_USEC); | |
33e32158 | 158 | if (!ret) { |
98d74f9c MN |
159 | /* clear state flags. Including dying, halted or removing */ |
160 | xhci->xhc_state = 0; | |
33e32158 MN |
161 | xhci->run_graceperiod = jiffies + msecs_to_jiffies(500); |
162 | } | |
e5bfeab0 | 163 | |
ed07453f SS |
164 | return ret; |
165 | } | |
166 | ||
66d4eadd | 167 | /* |
ac04e6ff | 168 | * Reset a halted HC. |
66d4eadd SS |
169 | * |
170 | * This resets pipelines, timers, counters, state machines, etc. | |
171 | * Transactions will be terminated immediately, and operational registers | |
172 | * will be set to their defaults. | |
173 | */ | |
14073ce9 | 174 | int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) |
66d4eadd SS |
175 | { |
176 | u32 command; | |
177 | u32 state; | |
f6187f42 | 178 | int ret; |
66d4eadd | 179 | |
b0ba9720 | 180 | state = readl(&xhci->op_regs->status); |
c11ae038 MN |
181 | |
182 | if (state == ~(u32)0) { | |
183 | xhci_warn(xhci, "Host not accessible, reset failed.\n"); | |
184 | return -ENODEV; | |
185 | } | |
186 | ||
d3512f63 SS |
187 | if ((state & STS_HALT) == 0) { |
188 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); | |
189 | return 0; | |
190 | } | |
66d4eadd | 191 | |
d195fcff | 192 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); |
b0ba9720 | 193 | command = readl(&xhci->op_regs->command); |
66d4eadd | 194 | command |= CMD_RESET; |
204b7793 | 195 | writel(command, &xhci->op_regs->command); |
66d4eadd | 196 | |
a5964396 RM |
197 | /* Existing Intel xHCI controllers require a delay of 1 mS, |
198 | * after setting the CMD_RESET bit, and before accessing any | |
199 | * HC registers. This allows the HC to complete the | |
200 | * reset operation and be ready for HC register access. | |
201 | * Without this delay, the subsequent HC register access, | |
202 | * may result in a system hang very rarely. | |
203 | */ | |
204 | if (xhci->quirks & XHCI_INTEL_HOST) | |
205 | udelay(1000); | |
206 | ||
7aed1537 | 207 | ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); |
2d62f3ee SS |
208 | if (ret) |
209 | return ret; | |
210 | ||
9da5a109 JC |
211 | if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) |
212 | usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); | |
213 | ||
d195fcff XR |
214 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
215 | "Wait for controller to be ready for doorbell rings"); | |
2d62f3ee SS |
216 | /* |
217 | * xHCI cannot write to any doorbells or operational registers other | |
218 | * than status until the "Controller Not Ready" flag is cleared. | |
219 | */ | |
14073ce9 | 220 | ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us); |
f370b996 | 221 | |
f6187f42 MN |
222 | xhci->usb2_rhub.bus_state.port_c_suspend = 0; |
223 | xhci->usb2_rhub.bus_state.suspended_ports = 0; | |
224 | xhci->usb2_rhub.bus_state.resuming_ports = 0; | |
225 | xhci->usb3_rhub.bus_state.port_c_suspend = 0; | |
226 | xhci->usb3_rhub.bus_state.suspended_ports = 0; | |
227 | xhci->usb3_rhub.bus_state.resuming_ports = 0; | |
f370b996 AX |
228 | |
229 | return ret; | |
66d4eadd SS |
230 | } |
231 | ||
12de0a35 MZ |
232 | static void xhci_zero_64b_regs(struct xhci_hcd *xhci) |
233 | { | |
234 | struct device *dev = xhci_to_hcd(xhci)->self.sysdev; | |
ecaa4902 | 235 | struct iommu_domain *domain; |
12de0a35 MZ |
236 | int err, i; |
237 | u64 val; | |
286fd02f | 238 | u32 intrs; |
12de0a35 MZ |
239 | |
240 | /* | |
241 | * Some Renesas controllers get into a weird state if they are | |
242 | * reset while programmed with 64bit addresses (they will preserve | |
243 | * the top half of the address in internal, non visible | |
244 | * registers). You end up with half the address coming from the | |
245 | * kernel, and the other half coming from the firmware. Also, | |
246 | * changing the programming leads to extra accesses even if the | |
247 | * controller is supposed to be halted. The controller ends up with | |
248 | * a fatal fault, and is then ripe for being properly reset. | |
249 | * | |
250 | * Special care is taken to only apply this if the device is behind | |
251 | * an iommu. Doing anything when there is no iommu is definitely | |
252 | * unsafe... | |
253 | */ | |
ecaa4902 SP |
254 | domain = iommu_get_domain_for_dev(dev); |
255 | if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain || | |
256 | domain->type == IOMMU_DOMAIN_IDENTITY) | |
12de0a35 MZ |
257 | return; |
258 | ||
259 | xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); | |
260 | ||
261 | /* Clear HSEIE so that faults do not get signaled */ | |
262 | val = readl(&xhci->op_regs->command); | |
263 | val &= ~CMD_HSEIE; | |
264 | writel(val, &xhci->op_regs->command); | |
265 | ||
266 | /* Clear HSE (aka FATAL) */ | |
267 | val = readl(&xhci->op_regs->status); | |
268 | val |= STS_FATAL; | |
269 | writel(val, &xhci->op_regs->status); | |
270 | ||
271 | /* Now zero the registers, and brace for impact */ | |
272 | val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); | |
273 | if (upper_32_bits(val)) | |
274 | xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); | |
275 | val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | |
276 | if (upper_32_bits(val)) | |
277 | xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); | |
278 | ||
286fd02f MN |
279 | intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1), |
280 | ARRAY_SIZE(xhci->run_regs->ir_set)); | |
281 | ||
282 | for (i = 0; i < intrs; i++) { | |
12de0a35 MZ |
283 | struct xhci_intr_reg __iomem *ir; |
284 | ||
285 | ir = &xhci->run_regs->ir_set[i]; | |
286 | val = xhci_read_64(xhci, &ir->erst_base); | |
287 | if (upper_32_bits(val)) | |
288 | xhci_write_64(xhci, 0, &ir->erst_base); | |
289 | val= xhci_read_64(xhci, &ir->erst_dequeue); | |
290 | if (upper_32_bits(val)) | |
291 | xhci_write_64(xhci, 0, &ir->erst_dequeue); | |
292 | } | |
293 | ||
294 | /* Wait for the fault to appear. It will be cleared on reset */ | |
295 | err = xhci_handshake(&xhci->op_regs->status, | |
296 | STS_FATAL, STS_FATAL, | |
297 | XHCI_MAX_HALT_USEC); | |
298 | if (!err) | |
299 | xhci_info(xhci, "Fault detected\n"); | |
300 | } | |
43b86af8 | 301 | |
bea5892d | 302 | int xhci_enable_interrupter(struct xhci_interrupter *ir) |
52dd0483 MN |
303 | { |
304 | u32 iman; | |
305 | ||
306 | if (!ir || !ir->ir_set) | |
307 | return -EINVAL; | |
308 | ||
bf9cce90 | 309 | iman = readl(&ir->ir_set->iman); |
9f7f7473 | 310 | iman |= IMAN_IE; |
bf9cce90 | 311 | writel(iman, &ir->ir_set->iman); |
52dd0483 | 312 | |
f5bce30a | 313 | /* Read operation to guarantee the write has been flushed from posted buffers */ |
bf9cce90 | 314 | readl(&ir->ir_set->iman); |
52dd0483 MN |
315 | return 0; |
316 | } | |
317 | ||
e1db856b | 318 | int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) |
52dd0483 MN |
319 | { |
320 | u32 iman; | |
321 | ||
322 | if (!ir || !ir->ir_set) | |
323 | return -EINVAL; | |
324 | ||
bf9cce90 | 325 | iman = readl(&ir->ir_set->iman); |
9f7f7473 | 326 | iman &= ~IMAN_IE; |
bf9cce90 | 327 | writel(iman, &ir->ir_set->iman); |
52dd0483 | 328 | |
bf9cce90 | 329 | iman = readl(&ir->ir_set->iman); |
e1db856b NN |
330 | if (iman & IMAN_IP) |
331 | xhci_dbg(xhci, "%s: Interrupt pending\n", __func__); | |
332 | ||
52dd0483 MN |
333 | return 0; |
334 | } | |
335 | ||
ace21625 | 336 | /* interrupt moderation interval imod_interval in nanoseconds */ |
9c0c11bb MN |
337 | int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, |
338 | u32 imod_interval) | |
ace21625 MN |
339 | { |
340 | u32 imod; | |
341 | ||
1fdeb069 | 342 | if (!ir || !ir->ir_set) |
ace21625 MN |
343 | return -EINVAL; |
344 | ||
1fdeb069 | 345 | /* IMODI value in IMOD register is in 250ns increments */ |
f27c6da5 | 346 | imod_interval = umin(imod_interval / 250, IMODI_MASK); |
1fdeb069 | 347 | |
f27c6da5 NN |
348 | imod = readl(&ir->ir_set->imod); |
349 | imod &= ~IMODI_MASK; | |
1fdeb069 | 350 | imod |= imod_interval; |
f27c6da5 | 351 | writel(imod, &ir->ir_set->imod); |
ace21625 MN |
352 | |
353 | return 0; | |
354 | } | |
355 | ||
e99e88a9 | 356 | static void compliance_mode_recovery(struct timer_list *t) |
71c731a2 AC |
357 | { |
358 | struct xhci_hcd *xhci; | |
359 | struct usb_hcd *hcd; | |
38986ffa | 360 | struct xhci_hub *rhub; |
71c731a2 AC |
361 | u32 temp; |
362 | int i; | |
363 | ||
41cb0855 | 364 | xhci = timer_container_of(xhci, t, comp_mode_recovery_timer); |
38986ffa | 365 | rhub = &xhci->usb3_rhub; |
873f3236 HK |
366 | hcd = rhub->hcd; |
367 | ||
368 | if (!hcd) | |
369 | return; | |
71c731a2 | 370 | |
38986ffa MN |
371 | for (i = 0; i < rhub->num_ports; i++) { |
372 | temp = readl(rhub->ports[i]->addr); | |
71c731a2 AC |
373 | if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { |
374 | /* | |
375 | * Compliance Mode Detected. Letting USB Core | |
376 | * handle the Warm Reset | |
377 | */ | |
4bdfe4c3 XR |
378 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
379 | "Compliance mode detected->port %d", | |
71c731a2 | 380 | i + 1); |
4bdfe4c3 XR |
381 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
382 | "Attempting compliance mode recovery"); | |
71c731a2 AC |
383 | |
384 | if (hcd->state == HC_STATE_SUSPENDED) | |
385 | usb_hcd_resume_root_hub(hcd); | |
386 | ||
387 | usb_hcd_poll_rh_status(hcd); | |
388 | } | |
389 | } | |
390 | ||
38986ffa | 391 | if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) |
71c731a2 AC |
392 | mod_timer(&xhci->comp_mode_recovery_timer, |
393 | jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); | |
394 | } | |
395 | ||
396 | /* | |
397 | * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver | |
398 | * that causes ports behind that hardware to enter compliance mode sometimes. | |
399 | * The quirk creates a timer that polls every 2 seconds the link state of | |
400 | * each host controller's port and recovers it by issuing a Warm reset | |
401 | * if Compliance mode is detected, otherwise the port will become "dead" (no | |
402 | * device connections or disconnections will be detected anymore). Becasue no | |
403 | * status event is generated when entering compliance mode (per xhci spec), | |
404 | * this quirk is needed on systems that have the failing hardware installed. | |
405 | */ | |
406 | static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) | |
407 | { | |
408 | xhci->port_status_u0 = 0; | |
e99e88a9 KC |
409 | timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, |
410 | 0); | |
71c731a2 AC |
411 | xhci->comp_mode_recovery_timer.expires = jiffies + |
412 | msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); | |
413 | ||
71c731a2 | 414 | add_timer(&xhci->comp_mode_recovery_timer); |
4bdfe4c3 XR |
415 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
416 | "Compliance mode recovery timer initialized"); | |
71c731a2 AC |
417 | } |
418 | ||
419 | /* | |
420 | * This function identifies the systems that have installed the SN65LVPE502CP | |
421 | * USB3.0 re-driver and that need the Compliance Mode Quirk. | |
422 | * Systems: | |
423 | * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 | |
424 | */ | |
e1cd9727 | 425 | static bool xhci_compliance_mode_recovery_timer_quirk_check(void) |
71c731a2 AC |
426 | { |
427 | const char *dmi_product_name, *dmi_sys_vendor; | |
428 | ||
429 | dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); | |
430 | dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); | |
457a73d3 VG |
431 | if (!dmi_product_name || !dmi_sys_vendor) |
432 | return false; | |
71c731a2 AC |
433 | |
434 | if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) | |
435 | return false; | |
436 | ||
437 | if (strstr(dmi_product_name, "Z420") || | |
438 | strstr(dmi_product_name, "Z620") || | |
47080974 | 439 | strstr(dmi_product_name, "Z820") || |
b0e4e606 | 440 | strstr(dmi_product_name, "Z1 Workstation")) |
71c731a2 AC |
441 | return true; |
442 | ||
443 | return false; | |
444 | } | |
445 | ||
446 | static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) | |
447 | { | |
38986ffa | 448 | return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); |
71c731a2 AC |
449 | } |
450 | ||
22f9b3c2 NN |
451 | static void xhci_hcd_page_size(struct xhci_hcd *xhci) |
452 | { | |
453 | u32 page_size; | |
454 | ||
455 | page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK; | |
456 | if (!is_power_of_2(page_size)) { | |
457 | xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size); | |
458 | /* Fallback to 4K page size, since that's common */ | |
459 | page_size = 1; | |
460 | } | |
461 | ||
462 | xhci->page_size = page_size << 12; | |
463 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK", | |
464 | xhci->page_size >> 10); | |
465 | } | |
71c731a2 | 466 | |
84f00770 NN |
467 | static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci) |
468 | { | |
469 | u32 config_reg; | |
470 | u32 max_slots; | |
471 | ||
472 | max_slots = HCS_MAX_SLOTS(xhci->hcs_params1); | |
473 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots", | |
474 | max_slots); | |
475 | ||
476 | config_reg = readl(&xhci->op_regs->config_reg); | |
477 | config_reg &= ~HCS_SLOTS_MASK; | |
478 | config_reg |= max_slots; | |
479 | ||
480 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x", | |
481 | config_reg); | |
482 | writel(config_reg, &xhci->op_regs->config_reg); | |
483 | } | |
484 | ||
743cb737 NN |
485 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) |
486 | { | |
1711b255 NN |
487 | dma_addr_t deq_dma; |
488 | u64 crcr; | |
489 | ||
490 | deq_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue); | |
491 | deq_dma &= CMD_RING_PTR_MASK; | |
492 | ||
493 | crcr = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | |
494 | crcr &= ~CMD_RING_PTR_MASK; | |
495 | crcr |= deq_dma; | |
496 | ||
497 | crcr &= ~CMD_RING_CYCLE; | |
498 | crcr |= xhci->cmd_ring->cycle_state; | |
499 | ||
500 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting command ring address to 0x%llx", crcr); | |
501 | xhci_write_64(xhci, crcr, &xhci->op_regs->cmd_ring); | |
743cb737 NN |
502 | } |
503 | ||
943f7fdd NN |
504 | static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci) |
505 | { | |
506 | u32 offset; | |
507 | ||
508 | offset = readl(&xhci->cap_regs->db_off) & DBOFF_MASK; | |
509 | xhci->dba = (void __iomem *)xhci->cap_regs + offset; | |
510 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
511 | "Doorbell array is located at offset 0x%x from cap regs base addr", offset); | |
512 | } | |
513 | ||
d41031bc NN |
514 | /* |
515 | * Enable USB 3.0 device notifications for function remote wake, which is necessary | |
516 | * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend). | |
517 | */ | |
518 | static void xhci_set_dev_notifications(struct xhci_hcd *xhci) | |
519 | { | |
520 | u32 dev_notf; | |
521 | ||
522 | dev_notf = readl(&xhci->op_regs->dev_notification); | |
523 | dev_notf &= ~DEV_NOTE_MASK; | |
524 | dev_notf |= DEV_NOTE_FWAKE; | |
525 | writel(dev_notf, &xhci->op_regs->dev_notification); | |
526 | } | |
527 | ||
66d4eadd SS |
528 | /* |
529 | * Initialize memory for HCD and xHC (one-time init). | |
530 | * | |
531 | * Program the PAGESIZE register, initialize the device context array, create | |
532 | * device contexts (?), set up a command ring segment (or two?), create event | |
533 | * ring (one for now). | |
534 | */ | |
3969384c | 535 | static int xhci_init(struct usb_hcd *hcd) |
66d4eadd SS |
536 | { |
537 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
98d107b8 | 538 | int retval; |
66d4eadd | 539 | |
22f9b3c2 | 540 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Starting %s", __func__); |
66d4eadd | 541 | spin_lock_init(&xhci->lock); |
083ba4c4 | 542 | |
22f9b3c2 NN |
543 | INIT_LIST_HEAD(&xhci->cmd_list); |
544 | INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); | |
545 | init_completion(&xhci->cmd_ring_stop_completion); | |
546 | xhci_hcd_page_size(xhci); | |
547 | memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs)); | |
548 | ||
66d4eadd | 549 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
22f9b3c2 NN |
550 | if (retval) |
551 | return retval; | |
66d4eadd | 552 | |
84f00770 NN |
553 | /* Set the Number of Device Slots Enabled to the maximum supported value */ |
554 | xhci_enable_max_dev_slots(xhci); | |
555 | ||
743cb737 NN |
556 | /* Set the address in the Command Ring Control register */ |
557 | xhci_set_cmd_ring_deq(xhci); | |
558 | ||
44455f66 NN |
559 | /* Set Device Context Base Address Array pointer */ |
560 | xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr); | |
561 | ||
943f7fdd NN |
562 | /* Set Doorbell array pointer */ |
563 | xhci_set_doorbell_ptr(xhci); | |
564 | ||
d41031bc NN |
565 | /* Set USB 3.0 device notifications for function remote wake */ |
566 | xhci_set_dev_notifications(xhci); | |
567 | ||
daed871b NN |
568 | /* Initialize the Primary interrupter */ |
569 | xhci_add_interrupter(xhci, 0); | |
570 | xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; | |
571 | ||
71c731a2 | 572 | /* Initializing Compliance Mode Recovery Data If Needed */ |
c3897aa5 | 573 | if (xhci_compliance_mode_recovery_timer_quirk_check()) { |
71c731a2 AC |
574 | xhci->quirks |= XHCI_COMP_MODE_QUIRK; |
575 | compliance_mode_recovery_timer_init(xhci); | |
576 | } | |
577 | ||
22f9b3c2 NN |
578 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished %s", __func__); |
579 | return 0; | |
66d4eadd SS |
580 | } |
581 | ||
7f84eef0 SS |
582 | /*-------------------------------------------------------------------------*/ |
583 | ||
f6ff0ac8 SS |
584 | static int xhci_run_finished(struct xhci_hcd *xhci) |
585 | { | |
c99b38c4 | 586 | struct xhci_interrupter *ir = xhci->interrupters[0]; |
a8089250 HX |
587 | unsigned long flags; |
588 | u32 temp; | |
589 | ||
590 | /* | |
591 | * Enable interrupts before starting the host (xhci 4.2 and 5.5.2). | |
592 | * Protect the short window before host is running with a lock | |
593 | */ | |
594 | spin_lock_irqsave(&xhci->lock, flags); | |
595 | ||
596 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts"); | |
597 | temp = readl(&xhci->op_regs->command); | |
598 | temp |= (CMD_EIE); | |
599 | writel(temp, &xhci->op_regs->command); | |
600 | ||
601 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter"); | |
52dd0483 | 602 | xhci_enable_interrupter(ir); |
a8089250 | 603 | |
f6ff0ac8 SS |
604 | if (xhci_start(xhci)) { |
605 | xhci_halt(xhci); | |
a8089250 | 606 | spin_unlock_irqrestore(&xhci->lock, flags); |
f6ff0ac8 SS |
607 | return -ENODEV; |
608 | } | |
a8089250 | 609 | |
c181bc5b | 610 | xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; |
f6ff0ac8 SS |
611 | |
612 | if (xhci->quirks & XHCI_NEC_HOST) | |
613 | xhci_ring_cmd_db(xhci); | |
614 | ||
a8089250 HX |
615 | spin_unlock_irqrestore(&xhci->lock, flags); |
616 | ||
f6ff0ac8 SS |
617 | return 0; |
618 | } | |
619 | ||
66d4eadd SS |
620 | /* |
621 | * Start the HC after it was halted. | |
622 | * | |
623 | * This function is called by the USB core when the HC driver is added. | |
624 | * Its opposite is xhci_stop(). | |
625 | * | |
626 | * xhci_init() must be called once before this function can be called. | |
627 | * Reset the HC, enable device slot contexts, program DCBAAP, and | |
628 | * set command ring pointer and event ring pointer. | |
629 | * | |
630 | * Setup MSI-X vectors and enable interrupts. | |
631 | */ | |
632 | int xhci_run(struct usb_hcd *hcd) | |
633 | { | |
8e595a5d | 634 | u64 temp_64; |
3fd1ec58 | 635 | int ret; |
66d4eadd | 636 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
c99b38c4 | 637 | struct xhci_interrupter *ir = xhci->interrupters[0]; |
f6ff0ac8 SS |
638 | /* Start the xHCI host controller running only after the USB 2.0 roothub |
639 | * is setup. | |
640 | */ | |
66d4eadd | 641 | |
0f2a7930 | 642 | hcd->uses_new_polling = 1; |
4f022aad MN |
643 | if (hcd->msi_enabled) |
644 | ir->ip_autoclear = true; | |
645 | ||
f6ff0ac8 SS |
646 | if (!usb_hcd_is_primary_hcd(hcd)) |
647 | return xhci_run_finished(xhci); | |
0f2a7930 | 648 | |
d195fcff | 649 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); |
43b86af8 | 650 | |
b17a57f8 | 651 | temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); |
08cc5616 | 652 | temp_64 &= ERST_PTR_MASK; |
d195fcff XR |
653 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
654 | "ERST deq = 64'h%0lx", (long unsigned int) temp_64); | |
66e49d87 | 655 | |
ace21625 | 656 | xhci_set_interrupter_moderation(ir, xhci->imod_interval); |
66d4eadd | 657 | |
ddba5cd0 MN |
658 | if (xhci->quirks & XHCI_NEC_HOST) { |
659 | struct xhci_command *command; | |
74e0b564 | 660 | |
103afda0 | 661 | command = xhci_alloc_command(xhci, false, GFP_KERNEL); |
ddba5cd0 MN |
662 | if (!command) |
663 | return -ENOMEM; | |
74e0b564 | 664 | |
d6f5f071 | 665 | ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, |
0238634d | 666 | TRB_TYPE(TRB_NEC_GET_FW)); |
d6f5f071 SW |
667 | if (ret) |
668 | xhci_free_command(xhci, command); | |
ddba5cd0 | 669 | } |
d195fcff | 670 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
873f3236 | 671 | "Finished %s for main hcd", __func__); |
02b6fdc2 | 672 | |
5c44d9d7 | 673 | xhci_create_dbc_dev(xhci); |
dfba2174 | 674 | |
02b6fdc2 LB |
675 | xhci_debugfs_init(xhci); |
676 | ||
873f3236 HK |
677 | if (xhci_has_one_roothub(xhci)) |
678 | return xhci_run_finished(xhci); | |
679 | ||
1bd8bb7d MN |
680 | set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags); |
681 | ||
f6ff0ac8 SS |
682 | return 0; |
683 | } | |
436e8c7d | 684 | EXPORT_SYMBOL_GPL(xhci_run); |
ed07453f | 685 | |
66d4eadd SS |
686 | /* |
687 | * Stop xHCI driver. | |
688 | * | |
689 | * This function is called by the USB core when the HC driver is removed. | |
690 | * Its opposite is xhci_run(). | |
691 | * | |
692 | * Disable device contexts, disable IRQs, and quiesce the HC. | |
693 | * Reset the HC, finish any completed transactions, and cleanup memory. | |
694 | */ | |
ed526ba2 | 695 | void xhci_stop(struct usb_hcd *hcd) |
66d4eadd SS |
696 | { |
697 | u32 temp; | |
698 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
c99b38c4 | 699 | struct xhci_interrupter *ir = xhci->interrupters[0]; |
66d4eadd | 700 | |
8c24d6d7 | 701 | mutex_lock(&xhci->mutex); |
8c24d6d7 | 702 | |
fe190ed0 | 703 | /* Only halt host and free memory after both hcds are removed */ |
27a41a83 GKB |
704 | if (!usb_hcd_is_primary_hcd(hcd)) { |
705 | mutex_unlock(&xhci->mutex); | |
706 | return; | |
707 | } | |
66d4eadd | 708 | |
5c44d9d7 | 709 | xhci_remove_dbc_dev(xhci); |
dfba2174 | 710 | |
fe190ed0 JS |
711 | spin_lock_irq(&xhci->lock); |
712 | xhci->xhc_state |= XHCI_STATE_HALTED; | |
713 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; | |
714 | xhci_halt(xhci); | |
14073ce9 | 715 | xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
fe190ed0 JS |
716 | spin_unlock_irq(&xhci->lock); |
717 | ||
71c731a2 AC |
718 | /* Deleting Compliance Mode Recovery Timer */ |
719 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && | |
58b1d799 | 720 | (!(xhci_all_ports_seen_u0(xhci)))) { |
8fa7292f | 721 | timer_delete_sync(&xhci->comp_mode_recovery_timer); |
4bdfe4c3 XR |
722 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
723 | "%s: compliance mode recovery timer deleted", | |
58b1d799 TC |
724 | __func__); |
725 | } | |
71c731a2 | 726 | |
c41136b0 AX |
727 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
728 | usb_amd_dev_put(); | |
729 | ||
d195fcff XR |
730 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
731 | "// Disabling event ring interrupts"); | |
b0ba9720 | 732 | temp = readl(&xhci->op_regs->status); |
d1001ab4 | 733 | writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); |
e1db856b | 734 | xhci_disable_interrupter(xhci, ir); |
66d4eadd | 735 | |
d195fcff | 736 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); |
66d4eadd | 737 | xhci_mem_cleanup(xhci); |
11cd764d | 738 | xhci_debugfs_exit(xhci); |
d195fcff XR |
739 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
740 | "xhci_stop completed - status = %x", | |
b0ba9720 | 741 | readl(&xhci->op_regs->status)); |
85ac90f8 | 742 | mutex_unlock(&xhci->mutex); |
66d4eadd | 743 | } |
ed526ba2 | 744 | EXPORT_SYMBOL_GPL(xhci_stop); |
66d4eadd SS |
745 | |
746 | /* | |
747 | * Shutdown HC (not bus-specific) | |
748 | * | |
749 | * This is called when the machine is rebooting or halting. We assume that the | |
750 | * machine will be powered off, and the HC's internal state will be reset. | |
751 | * Don't bother to free memory. | |
f6ff0ac8 SS |
752 | * |
753 | * This will only ever be called with the main usb_hcd (the USB3 roothub). | |
66d4eadd | 754 | */ |
f2c710f7 | 755 | void xhci_shutdown(struct usb_hcd *hcd) |
66d4eadd SS |
756 | { |
757 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
758 | ||
052c7f9f | 759 | if (xhci->quirks & XHCI_SPURIOUS_REBOOT) |
4c39d4b9 | 760 | usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); |
e95829f4 | 761 | |
dc92944a HL |
762 | /* Don't poll the roothubs after shutdown. */ |
763 | xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", | |
764 | __func__, hcd->self.busnum); | |
765 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); | |
8fa7292f | 766 | timer_delete_sync(&hcd->rh_timer); |
dc92944a HL |
767 | |
768 | if (xhci->shared_hcd) { | |
769 | clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); | |
8fa7292f | 770 | timer_delete_sync(&xhci->shared_hcd->rh_timer); |
dc92944a HL |
771 | } |
772 | ||
8531aa16 | 773 | spin_lock_irq(&xhci->lock); |
66d4eadd | 774 | xhci_halt(xhci); |
34cd2db4 MN |
775 | |
776 | /* | |
777 | * Workaround for spurious wakeps at shutdown with HSW, and for boot | |
778 | * firmware delay in ADL-P PCH if port are left in U3 at shutdown | |
779 | */ | |
780 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || | |
781 | xhci->quirks & XHCI_RESET_TO_DEFAULT) | |
14073ce9 | 782 | xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
34cd2db4 | 783 | |
8531aa16 | 784 | spin_unlock_irq(&xhci->lock); |
66d4eadd | 785 | |
d195fcff XR |
786 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
787 | "xhci_shutdown completed - status = %x", | |
b0ba9720 | 788 | readl(&xhci->op_regs->status)); |
66d4eadd | 789 | } |
f2c710f7 | 790 | EXPORT_SYMBOL_GPL(xhci_shutdown); |
66d4eadd | 791 | |
b5b5c3ac | 792 | #ifdef CONFIG_PM |
5535b1d5 AX |
793 | static void xhci_save_registers(struct xhci_hcd *xhci) |
794 | { | |
c99b38c4 MN |
795 | struct xhci_interrupter *ir; |
796 | unsigned int i; | |
b17a57f8 | 797 | |
b0ba9720 XR |
798 | xhci->s3.command = readl(&xhci->op_regs->command); |
799 | xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); | |
f7b2e403 | 800 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
b0ba9720 | 801 | xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); |
b17a57f8 | 802 | |
c99b38c4 MN |
803 | /* save both primary and all secondary interrupters */ |
804 | /* fixme, shold we lock to prevent race with remove secondary interrupter? */ | |
805 | for (i = 0; i < xhci->max_interrupters; i++) { | |
806 | ir = xhci->interrupters[i]; | |
807 | if (!ir) | |
808 | continue; | |
809 | ||
810 | ir->s3_erst_size = readl(&ir->ir_set->erst_size); | |
811 | ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); | |
812 | ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); | |
bf9cce90 | 813 | ir->s3_iman = readl(&ir->ir_set->iman); |
f27c6da5 | 814 | ir->s3_imod = readl(&ir->ir_set->imod); |
c99b38c4 | 815 | } |
5535b1d5 AX |
816 | } |
817 | ||
818 | static void xhci_restore_registers(struct xhci_hcd *xhci) | |
819 | { | |
c99b38c4 MN |
820 | struct xhci_interrupter *ir; |
821 | unsigned int i; | |
b17a57f8 | 822 | |
204b7793 XR |
823 | writel(xhci->s3.command, &xhci->op_regs->command); |
824 | writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); | |
477632df | 825 | xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); |
204b7793 | 826 | writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); |
c99b38c4 MN |
827 | |
828 | /* FIXME should we lock to protect against freeing of interrupters */ | |
829 | for (i = 0; i < xhci->max_interrupters; i++) { | |
830 | ir = xhci->interrupters[i]; | |
831 | if (!ir) | |
832 | continue; | |
833 | ||
834 | writel(ir->s3_erst_size, &ir->ir_set->erst_size); | |
835 | xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base); | |
836 | xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue); | |
bf9cce90 | 837 | writel(ir->s3_iman, &ir->ir_set->iman); |
f27c6da5 | 838 | writel(ir->s3_imod, &ir->ir_set->imod); |
c99b38c4 | 839 | } |
5535b1d5 AX |
840 | } |
841 | ||
89821320 SS |
842 | /* |
843 | * The whole command ring must be cleared to zero when we suspend the host. | |
844 | * | |
845 | * The host doesn't save the command ring pointer in the suspend well, so we | |
846 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte | |
847 | * aligned, because of the reserved bits in the command ring dequeue pointer | |
848 | * register. Therefore, we can't just set the dequeue pointer back in the | |
849 | * middle of the ring (TRBs are 16-byte aligned). | |
850 | */ | |
851 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) | |
852 | { | |
853 | struct xhci_ring *ring; | |
854 | struct xhci_segment *seg; | |
855 | ||
856 | ring = xhci->cmd_ring; | |
c7c1f3b0 MP |
857 | xhci_for_each_ring_seg(ring->first_seg, seg) { |
858 | /* erase all TRBs before the link */ | |
3f970bd0 | 859 | memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); |
c7c1f3b0 MP |
860 | /* clear link cycle bit */ |
861 | seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); | |
862 | } | |
89821320 | 863 | |
e1b0fa86 | 864 | xhci_initialize_ring_info(ring); |
89821320 SS |
865 | /* |
866 | * Reset the hardware dequeue pointer. | |
867 | * Yes, this will need to be re-written after resume, but we're paranoid | |
868 | * and want to make sure the hardware doesn't access bogus memory | |
869 | * because, say, the BIOS or an SMI started the host without changing | |
870 | * the command ring pointers. | |
871 | */ | |
872 | xhci_set_cmd_ring_deq(xhci); | |
873 | } | |
874 | ||
d26c00e7 MN |
875 | /* |
876 | * Disable port wake bits if do_wakeup is not set. | |
877 | * | |
878 | * Also clear a possible internal port wake state left hanging for ports that | |
879 | * detected termination but never successfully enumerated (trained to 0U). | |
880 | * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done | |
881 | * at enumeration clears this wake, force one here as well for unconnected ports | |
882 | */ | |
883 | ||
884 | static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, | |
885 | struct xhci_hub *rhub, | |
886 | bool do_wakeup) | |
a1377e53 | 887 | { |
a1377e53 | 888 | unsigned long flags; |
d70d5a84 | 889 | u32 t1, t2, portsc; |
d26c00e7 | 890 | int i; |
a1377e53 LB |
891 | |
892 | spin_lock_irqsave(&xhci->lock, flags); | |
893 | ||
d26c00e7 MN |
894 | for (i = 0; i < rhub->num_ports; i++) { |
895 | portsc = readl(rhub->ports[i]->addr); | |
896 | t1 = xhci_port_state_to_neutral(portsc); | |
897 | t2 = t1; | |
898 | ||
899 | /* clear wake bits if do_wake is not set */ | |
900 | if (!do_wakeup) | |
901 | t2 &= ~PORT_WAKE_BITS; | |
902 | ||
903 | /* Don't touch csc bit if connected or connect change is set */ | |
904 | if (!(portsc & (PORT_CSC | PORT_CONNECT))) | |
905 | t2 |= PORT_CSC; | |
a1377e53 | 906 | |
d70d5a84 | 907 | if (t1 != t2) { |
d26c00e7 MN |
908 | writel(t2, rhub->ports[i]->addr); |
909 | xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", | |
910 | rhub->hcd->self.busnum, i + 1, portsc, t2); | |
d70d5a84 | 911 | } |
a1377e53 | 912 | } |
a1377e53 LB |
913 | spin_unlock_irqrestore(&xhci->lock, flags); |
914 | } | |
915 | ||
229bc19f MN |
916 | static bool xhci_pending_portevent(struct xhci_hcd *xhci) |
917 | { | |
918 | struct xhci_port **ports; | |
919 | int port_index; | |
920 | u32 status; | |
921 | u32 portsc; | |
922 | ||
923 | status = readl(&xhci->op_regs->status); | |
924 | if (status & STS_EINT) | |
925 | return true; | |
926 | /* | |
927 | * Checking STS_EINT is not enough as there is a lag between a change | |
928 | * bit being set and the Port Status Change Event that it generated | |
929 | * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. | |
930 | */ | |
931 | ||
932 | port_index = xhci->usb2_rhub.num_ports; | |
933 | ports = xhci->usb2_rhub.ports; | |
934 | while (port_index--) { | |
935 | portsc = readl(ports[port_index]->addr); | |
936 | if (portsc & PORT_CHANGE_MASK || | |
937 | (portsc & PORT_PLS_MASK) == XDEV_RESUME) | |
938 | return true; | |
939 | } | |
940 | port_index = xhci->usb3_rhub.num_ports; | |
941 | ports = xhci->usb3_rhub.ports; | |
942 | while (port_index--) { | |
943 | portsc = readl(ports[port_index]->addr); | |
b9e43779 | 944 | if (portsc & (PORT_CHANGE_MASK | PORT_CAS) || |
229bc19f MN |
945 | (portsc & PORT_PLS_MASK) == XDEV_RESUME) |
946 | return true; | |
947 | } | |
948 | return false; | |
949 | } | |
950 | ||
5535b1d5 AX |
951 | /* |
952 | * Stop HC (not bus-specific) | |
953 | * | |
954 | * This is called when the machine transition into S3/S4 mode. | |
955 | * | |
956 | */ | |
a1377e53 | 957 | int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) |
5535b1d5 AX |
958 | { |
959 | int rc = 0; | |
7c67cf66 | 960 | unsigned int delay = XHCI_MAX_HALT_USEC * 2; |
5535b1d5 AX |
961 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
962 | u32 command; | |
a7d57abc | 963 | u32 res; |
5535b1d5 | 964 | |
9fa733f2 RQ |
965 | if (!hcd->state) |
966 | return 0; | |
967 | ||
77b84767 | 968 | if (hcd->state != HC_STATE_SUSPENDED || |
873f3236 | 969 | (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED)) |
77b84767 FB |
970 | return -EINVAL; |
971 | ||
a1377e53 | 972 | /* Clear root port wake on bits if wakeup not allowed. */ |
d26c00e7 MN |
973 | xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); |
974 | xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); | |
a1377e53 | 975 | |
18a367e8 PC |
976 | if (!HCD_HW_ACCESSIBLE(hcd)) |
977 | return 0; | |
978 | ||
979 | xhci_dbc_suspend(xhci); | |
980 | ||
c52804a4 | 981 | /* Don't poll the roothubs on bus suspend. */ |
669bc5a1 MN |
982 | xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", |
983 | __func__, hcd->self.busnum); | |
c52804a4 | 984 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
8fa7292f | 985 | timer_delete_sync(&hcd->rh_timer); |
873f3236 HK |
986 | if (xhci->shared_hcd) { |
987 | clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); | |
8fa7292f | 988 | timer_delete_sync(&xhci->shared_hcd->rh_timer); |
873f3236 | 989 | } |
c52804a4 | 990 | |
191edc5e KHF |
991 | if (xhci->quirks & XHCI_SUSPEND_DELAY) |
992 | usleep_range(1000, 1500); | |
993 | ||
5535b1d5 AX |
994 | spin_lock_irq(&xhci->lock); |
995 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | |
873f3236 HK |
996 | if (xhci->shared_hcd) |
997 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | |
5535b1d5 AX |
998 | /* step 1: stop endpoint */ |
999 | /* skipped assuming that port suspend has done */ | |
1000 | ||
1001 | /* step 2: clear Run/Stop bit */ | |
b0ba9720 | 1002 | command = readl(&xhci->op_regs->command); |
5535b1d5 | 1003 | command &= ~CMD_RUN; |
204b7793 | 1004 | writel(command, &xhci->op_regs->command); |
455f5892 ON |
1005 | |
1006 | /* Some chips from Fresco Logic need an extraordinary delay */ | |
1007 | delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; | |
1008 | ||
dc0b177c | 1009 | if (xhci_handshake(&xhci->op_regs->status, |
455f5892 | 1010 | STS_HALT, STS_HALT, delay)) { |
5535b1d5 AX |
1011 | xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); |
1012 | spin_unlock_irq(&xhci->lock); | |
1013 | return -ETIMEDOUT; | |
1014 | } | |
89821320 | 1015 | xhci_clear_command_ring(xhci); |
5535b1d5 AX |
1016 | |
1017 | /* step 3: save registers */ | |
1018 | xhci_save_registers(xhci); | |
1019 | ||
1020 | /* step 4: set CSS flag */ | |
b0ba9720 | 1021 | command = readl(&xhci->op_regs->command); |
5535b1d5 | 1022 | command |= CMD_CSS; |
204b7793 | 1023 | writel(command, &xhci->op_regs->command); |
a7d57abc | 1024 | xhci->broken_suspend = 0; |
dc0b177c | 1025 | if (xhci_handshake(&xhci->op_regs->status, |
ac343366 | 1026 | STS_SAVE, 0, 20 * 1000)) { |
a7d57abc SS |
1027 | /* |
1028 | * AMD SNPS xHC 3.0 occasionally does not clear the | |
1029 | * SSS bit of USBSTS and when driver tries to poll | |
1030 | * to see if the xHC clears BIT(8) which never happens | |
1031 | * and driver assumes that controller is not responding | |
1032 | * and times out. To workaround this, its good to check | |
1033 | * if SRE and HCE bits are not set (as per xhci | |
1034 | * Section 5.4.2) and bypass the timeout. | |
1035 | */ | |
1036 | res = readl(&xhci->op_regs->status); | |
1037 | if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && | |
1038 | (((res & STS_SRE) == 0) && | |
1039 | ((res & STS_HCE) == 0))) { | |
1040 | xhci->broken_suspend = 1; | |
1041 | } else { | |
1042 | xhci_warn(xhci, "WARN: xHC save state timeout\n"); | |
1043 | spin_unlock_irq(&xhci->lock); | |
1044 | return -ETIMEDOUT; | |
1045 | } | |
5535b1d5 | 1046 | } |
5535b1d5 AX |
1047 | spin_unlock_irq(&xhci->lock); |
1048 | ||
71c731a2 AC |
1049 | /* |
1050 | * Deleting Compliance Mode Recovery Timer because the xHCI Host | |
1051 | * is about to be suspended. | |
1052 | */ | |
1053 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && | |
1054 | (!(xhci_all_ports_seen_u0(xhci)))) { | |
8fa7292f | 1055 | timer_delete_sync(&xhci->comp_mode_recovery_timer); |
4bdfe4c3 XR |
1056 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
1057 | "%s: compliance mode recovery timer deleted", | |
58b1d799 | 1058 | __func__); |
71c731a2 AC |
1059 | } |
1060 | ||
5535b1d5 AX |
1061 | return rc; |
1062 | } | |
436e8c7d | 1063 | EXPORT_SYMBOL_GPL(xhci_suspend); |
5535b1d5 AX |
1064 | |
1065 | /* | |
1066 | * start xHC (not bus-specific) | |
1067 | * | |
1068 | * This is called when the machine transition from S3/S4 mode. | |
1069 | * | |
1070 | */ | |
34cca0ce | 1071 | int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume) |
5535b1d5 | 1072 | { |
229bc19f | 1073 | u32 command, temp = 0; |
5535b1d5 | 1074 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
f69e3120 | 1075 | int retval = 0; |
77df9e0b | 1076 | bool comp_timer_running = false; |
253f588c | 1077 | bool pending_portevent = false; |
6add6dd3 | 1078 | bool suspended_usb3_devs = false; |
5535b1d5 | 1079 | |
9fa733f2 RQ |
1080 | if (!hcd->state) |
1081 | return 0; | |
1082 | ||
f6ff0ac8 | 1083 | /* Wait a bit if either of the roothubs need to settle from the |
25985edc | 1084 | * transition into bus suspend. |
20b67cf5 | 1085 | */ |
f6187f42 MN |
1086 | |
1087 | if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || | |
1088 | time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) | |
5535b1d5 AX |
1089 | msleep(100); |
1090 | ||
f69e3120 | 1091 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
873f3236 HK |
1092 | if (xhci->shared_hcd) |
1093 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | |
f69e3120 | 1094 | |
5535b1d5 AX |
1095 | spin_lock_irq(&xhci->lock); |
1096 | ||
34cca0ce TL |
1097 | if (xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) |
1098 | power_lost = true; | |
8b328f80 | 1099 | |
34cca0ce | 1100 | if (!power_lost) { |
a70bcbc3 RT |
1101 | /* |
1102 | * Some controllers might lose power during suspend, so wait | |
1103 | * for controller not ready bit to clear, just as in xHC init. | |
1104 | */ | |
1105 | retval = xhci_handshake(&xhci->op_regs->status, | |
1106 | STS_CNR, 0, 10 * 1000 * 1000); | |
1107 | if (retval) { | |
1108 | xhci_warn(xhci, "Controller not ready at resume %d\n", | |
1109 | retval); | |
1110 | spin_unlock_irq(&xhci->lock); | |
1111 | return retval; | |
1112 | } | |
5535b1d5 AX |
1113 | /* step 1: restore register */ |
1114 | xhci_restore_registers(xhci); | |
1115 | /* step 2: initialize command ring buffer */ | |
89821320 | 1116 | xhci_set_cmd_ring_deq(xhci); |
5535b1d5 AX |
1117 | /* step 3: restore state and start state*/ |
1118 | /* step 3: set CRS flag */ | |
b0ba9720 | 1119 | command = readl(&xhci->op_regs->command); |
5535b1d5 | 1120 | command |= CMD_CRS; |
204b7793 | 1121 | writel(command, &xhci->op_regs->command); |
305886ca AG |
1122 | /* |
1123 | * Some controllers take up to 55+ ms to complete the controller | |
1124 | * restore so setting the timeout to 100ms. Xhci specification | |
1125 | * doesn't mention any timeout value. | |
1126 | */ | |
dc0b177c | 1127 | if (xhci_handshake(&xhci->op_regs->status, |
305886ca | 1128 | STS_RESTORE, 0, 100 * 1000)) { |
622eb783 | 1129 | xhci_warn(xhci, "WARN: xHC restore state timeout\n"); |
5535b1d5 AX |
1130 | spin_unlock_irq(&xhci->lock); |
1131 | return -ETIMEDOUT; | |
1132 | } | |
5535b1d5 AX |
1133 | } |
1134 | ||
8b328f80 | 1135 | temp = readl(&xhci->op_regs->status); |
77df9e0b | 1136 | |
8b328f80 | 1137 | /* re-initialize the HC on Restore Error, or Host Controller Error */ |
fb2ce178 WC |
1138 | if ((temp & (STS_SRE | STS_HCE)) && |
1139 | !(xhci->xhc_state & XHCI_STATE_REMOVING)) { | |
34cca0ce | 1140 | if (!power_lost) |
484d6f7a | 1141 | xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); |
34cca0ce | 1142 | power_lost = true; |
8b328f80 | 1143 | } |
77df9e0b | 1144 | |
34cca0ce | 1145 | if (power_lost) { |
77df9e0b TC |
1146 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
1147 | !(xhci_all_ports_seen_u0(xhci))) { | |
8fa7292f | 1148 | timer_delete_sync(&xhci->comp_mode_recovery_timer); |
4bdfe4c3 XR |
1149 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
1150 | "Compliance Mode Recovery Timer deleted!"); | |
77df9e0b TC |
1151 | } |
1152 | ||
fedd383e SS |
1153 | /* Let the USB core know _both_ roothubs lost power. */ |
1154 | usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); | |
873f3236 HK |
1155 | if (xhci->shared_hcd) |
1156 | usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); | |
5535b1d5 AX |
1157 | |
1158 | xhci_dbg(xhci, "Stop HCD\n"); | |
1159 | xhci_halt(xhci); | |
12de0a35 | 1160 | xhci_zero_64b_regs(xhci); |
3eff494f RL |
1161 | if (xhci->xhc_state & XHCI_STATE_REMOVING) |
1162 | retval = -ENODEV; | |
1163 | else | |
1164 | retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); | |
5535b1d5 | 1165 | spin_unlock_irq(&xhci->lock); |
72ae1947 MN |
1166 | if (retval) |
1167 | return retval; | |
5535b1d5 | 1168 | |
5535b1d5 | 1169 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
b0ba9720 | 1170 | temp = readl(&xhci->op_regs->status); |
d1001ab4 | 1171 | writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); |
e1db856b | 1172 | xhci_disable_interrupter(xhci, xhci->interrupters[0]); |
5535b1d5 AX |
1173 | |
1174 | xhci_dbg(xhci, "cleaning up memory\n"); | |
1175 | xhci_mem_cleanup(xhci); | |
d9167671 | 1176 | xhci_debugfs_exit(xhci); |
5535b1d5 | 1177 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", |
b0ba9720 | 1178 | readl(&xhci->op_regs->status)); |
5535b1d5 | 1179 | |
65b22f93 SS |
1180 | /* USB core calls the PCI reinit and start functions twice: |
1181 | * first with the primary HCD, and then with the secondary HCD. | |
1182 | * If we don't do the same, the host will never be started. | |
1183 | */ | |
65b22f93 | 1184 | xhci_dbg(xhci, "Initialize the xhci_hcd\n"); |
802dcafc | 1185 | retval = xhci_init(hcd); |
5535b1d5 AX |
1186 | if (retval) |
1187 | return retval; | |
77df9e0b TC |
1188 | comp_timer_running = true; |
1189 | ||
65b22f93 | 1190 | xhci_dbg(xhci, "Start the primary HCD\n"); |
802dcafc MN |
1191 | retval = xhci_run(hcd); |
1192 | if (!retval && xhci->shared_hcd) { | |
f69e3120 | 1193 | xhci_dbg(xhci, "Start the secondary HCD\n"); |
802dcafc | 1194 | retval = xhci_run(xhci->shared_hcd); |
b3209379 | 1195 | } |
79989bd4 MN |
1196 | if (retval) |
1197 | return retval; | |
1198 | /* | |
1199 | * Resume roothubs unconditionally as PORTSC change bits are not | |
1200 | * immediately visible after xHC reset | |
1201 | */ | |
5535b1d5 | 1202 | hcd->state = HC_STATE_SUSPENDED; |
79989bd4 MN |
1203 | |
1204 | if (xhci->shared_hcd) { | |
873f3236 | 1205 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; |
79989bd4 MN |
1206 | usb_hcd_resume_root_hub(xhci->shared_hcd); |
1207 | } | |
1208 | usb_hcd_resume_root_hub(hcd); | |
1209 | ||
f69e3120 | 1210 | goto done; |
5535b1d5 AX |
1211 | } |
1212 | ||
5535b1d5 | 1213 | /* step 4: set Run/Stop bit */ |
b0ba9720 | 1214 | command = readl(&xhci->op_regs->command); |
5535b1d5 | 1215 | command |= CMD_RUN; |
204b7793 | 1216 | writel(command, &xhci->op_regs->command); |
dc0b177c | 1217 | xhci_handshake(&xhci->op_regs->status, STS_HALT, |
5535b1d5 AX |
1218 | 0, 250 * 1000); |
1219 | ||
1220 | /* step 5: walk topology and initialize portsc, | |
1221 | * portpmsc and portli | |
1222 | */ | |
1223 | /* this is done in bus_resume */ | |
1224 | ||
1225 | /* step 6: restart each of the previously | |
1226 | * Running endpoints by ringing their doorbells | |
1227 | */ | |
1228 | ||
5535b1d5 | 1229 | spin_unlock_irq(&xhci->lock); |
f69e3120 | 1230 | |
dfba2174 LB |
1231 | xhci_dbc_resume(xhci); |
1232 | ||
f69e3120 | 1233 | if (retval == 0) { |
253f588c MN |
1234 | /* |
1235 | * Resume roothubs only if there are pending events. | |
1236 | * USB 3 devices resend U3 LFPS wake after a 100ms delay if | |
6add6dd3 WC |
1237 | * the first wake signalling failed, give it that chance if |
1238 | * there are suspended USB 3 devices. | |
253f588c | 1239 | */ |
6add6dd3 WC |
1240 | if (xhci->usb3_rhub.bus_state.suspended_ports || |
1241 | xhci->usb3_rhub.bus_state.bus_suspended) | |
1242 | suspended_usb3_devs = true; | |
1243 | ||
253f588c | 1244 | pending_portevent = xhci_pending_portevent(xhci); |
6add6dd3 | 1245 | |
34cca0ce | 1246 | if (suspended_usb3_devs && !pending_portevent && is_auto_resume) { |
253f588c MN |
1247 | msleep(120); |
1248 | pending_portevent = xhci_pending_portevent(xhci); | |
1249 | } | |
1250 | ||
1251 | if (pending_portevent) { | |
873f3236 HK |
1252 | if (xhci->shared_hcd) |
1253 | usb_hcd_resume_root_hub(xhci->shared_hcd); | |
671ffdff | 1254 | usb_hcd_resume_root_hub(hcd); |
d6236f6d | 1255 | } |
f69e3120 | 1256 | } |
79989bd4 | 1257 | done: |
71c731a2 AC |
1258 | /* |
1259 | * If system is subject to the Quirk, Compliance Mode Timer needs to | |
1260 | * be re-initialized Always after a system resume. Ports are subject | |
1261 | * to suffer the Compliance Mode issue again. It doesn't matter if | |
1262 | * ports have entered previously to U0 before system's suspension. | |
1263 | */ | |
77df9e0b | 1264 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) |
71c731a2 AC |
1265 | compliance_mode_recovery_timer_init(xhci); |
1266 | ||
9da5a109 JC |
1267 | if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) |
1268 | usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); | |
1269 | ||
c52804a4 | 1270 | /* Re-enable port polling. */ |
669bc5a1 MN |
1271 | xhci_dbg(xhci, "%s: starting usb%d port polling.\n", |
1272 | __func__, hcd->self.busnum); | |
873f3236 HK |
1273 | if (xhci->shared_hcd) { |
1274 | set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); | |
1275 | usb_hcd_poll_rh_status(xhci->shared_hcd); | |
1276 | } | |
671ffdff MN |
1277 | set_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
1278 | usb_hcd_poll_rh_status(hcd); | |
c52804a4 | 1279 | |
f69e3120 | 1280 | return retval; |
5535b1d5 | 1281 | } |
436e8c7d | 1282 | EXPORT_SYMBOL_GPL(xhci_resume); |
b5b5c3ac SS |
1283 | #endif /* CONFIG_PM */ |
1284 | ||
7f84eef0 SS |
1285 | /*-------------------------------------------------------------------------*/ |
1286 | ||
2017a1e5 TJ |
1287 | static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) |
1288 | { | |
1289 | void *temp; | |
1290 | int ret = 0; | |
1291 | unsigned int buf_len; | |
1292 | enum dma_data_direction dir; | |
1293 | ||
1294 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
1295 | buf_len = urb->transfer_buffer_length; | |
1296 | ||
1297 | temp = kzalloc_node(buf_len, GFP_ATOMIC, | |
1298 | dev_to_node(hcd->self.sysdev)); | |
be95cc6d P |
1299 | if (!temp) |
1300 | return -ENOMEM; | |
2017a1e5 TJ |
1301 | |
1302 | if (usb_urb_dir_out(urb)) | |
1303 | sg_pcopy_to_buffer(urb->sg, urb->num_sgs, | |
1304 | temp, buf_len, 0); | |
1305 | ||
1306 | urb->transfer_buffer = temp; | |
1307 | urb->transfer_dma = dma_map_single(hcd->self.sysdev, | |
1308 | urb->transfer_buffer, | |
1309 | urb->transfer_buffer_length, | |
1310 | dir); | |
1311 | ||
1312 | if (dma_mapping_error(hcd->self.sysdev, | |
1313 | urb->transfer_dma)) { | |
1314 | ret = -EAGAIN; | |
1315 | kfree(temp); | |
1316 | } else { | |
1317 | urb->transfer_flags |= URB_DMA_MAP_SINGLE; | |
1318 | } | |
1319 | ||
1320 | return ret; | |
1321 | } | |
1322 | ||
1323 | static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd, | |
1324 | struct urb *urb) | |
1325 | { | |
1326 | bool ret = false; | |
1327 | unsigned int i; | |
1328 | unsigned int len = 0; | |
1329 | unsigned int trb_size; | |
1330 | unsigned int max_pkt; | |
1331 | struct scatterlist *sg; | |
1332 | struct scatterlist *tail_sg; | |
1333 | ||
1334 | tail_sg = urb->sg; | |
1335 | max_pkt = usb_endpoint_maxp(&urb->ep->desc); | |
1336 | ||
1337 | if (!urb->num_sgs) | |
1338 | return ret; | |
1339 | ||
1340 | if (urb->dev->speed >= USB_SPEED_SUPER) | |
1341 | trb_size = TRB_CACHE_SIZE_SS; | |
1342 | else | |
1343 | trb_size = TRB_CACHE_SIZE_HS; | |
1344 | ||
1345 | if (urb->transfer_buffer_length != 0 && | |
1346 | !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { | |
1347 | for_each_sg(urb->sg, sg, urb->num_sgs, i) { | |
1348 | len = len + sg->length; | |
1349 | if (i > trb_size - 2) { | |
1350 | len = len - tail_sg->length; | |
1351 | if (len < max_pkt) { | |
1352 | ret = true; | |
1353 | break; | |
1354 | } | |
1355 | ||
1356 | tail_sg = sg_next(tail_sg); | |
1357 | } | |
1358 | } | |
1359 | } | |
1360 | return ret; | |
1361 | } | |
1362 | ||
1363 | static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb) | |
1364 | { | |
1365 | unsigned int len; | |
1366 | unsigned int buf_len; | |
1367 | enum dma_data_direction dir; | |
1368 | ||
1369 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
1370 | ||
1371 | buf_len = urb->transfer_buffer_length; | |
1372 | ||
1373 | if (IS_ENABLED(CONFIG_HAS_DMA) && | |
1374 | (urb->transfer_flags & URB_DMA_MAP_SINGLE)) | |
1375 | dma_unmap_single(hcd->self.sysdev, | |
1376 | urb->transfer_dma, | |
1377 | urb->transfer_buffer_length, | |
1378 | dir); | |
1379 | ||
271a21d8 | 1380 | if (usb_urb_dir_in(urb)) { |
2017a1e5 TJ |
1381 | len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, |
1382 | urb->transfer_buffer, | |
1383 | buf_len, | |
1384 | 0); | |
271a21d8 MN |
1385 | if (len != buf_len) { |
1386 | xhci_dbg(hcd_to_xhci(hcd), | |
1387 | "Copy from tmp buf to urb sg list failed\n"); | |
1388 | urb->actual_length = len; | |
1389 | } | |
1390 | } | |
2017a1e5 TJ |
1391 | urb->transfer_flags &= ~URB_DMA_MAP_SINGLE; |
1392 | kfree(urb->transfer_buffer); | |
1393 | urb->transfer_buffer = NULL; | |
1394 | } | |
1395 | ||
33e39350 NSJ |
1396 | /* |
1397 | * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), | |
1398 | * we'll copy the actual data into the TRB address register. This is limited to | |
1399 | * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize | |
1400 | * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. | |
1401 | */ | |
1402 | static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, | |
1403 | gfp_t mem_flags) | |
1404 | { | |
2017a1e5 TJ |
1405 | struct xhci_hcd *xhci; |
1406 | ||
1407 | xhci = hcd_to_xhci(hcd); | |
1408 | ||
33e39350 NSJ |
1409 | if (xhci_urb_suitable_for_idt(urb)) |
1410 | return 0; | |
1411 | ||
2017a1e5 TJ |
1412 | if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) { |
1413 | if (xhci_urb_temp_buffer_required(hcd, urb)) | |
1414 | return xhci_map_temp_buffer(hcd, urb); | |
1415 | } | |
33e39350 NSJ |
1416 | return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); |
1417 | } | |
1418 | ||
2017a1e5 TJ |
1419 | static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) |
1420 | { | |
1421 | struct xhci_hcd *xhci; | |
1422 | bool unmap_temp_buf = false; | |
1423 | ||
1424 | xhci = hcd_to_xhci(hcd); | |
1425 | ||
1426 | if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE)) | |
1427 | unmap_temp_buf = true; | |
1428 | ||
1429 | if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf) | |
1430 | xhci_unmap_temp_buf(hcd, urb); | |
1431 | else | |
1432 | usb_hcd_unmap_urb_for_dma(hcd, urb); | |
1433 | } | |
1434 | ||
1435 | /** | |
d0e96f5a SS |
1436 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and |
1437 | * HCDs. Find the index for an endpoint given its descriptor. Use the return | |
1438 | * value to right shift 1 for the bitmask. | |
85c4aa0a | 1439 | * @desc: USB endpoint descriptor to determine index for |
d0e96f5a SS |
1440 | * |
1441 | * Index = (epnum * 2) + direction - 1, | |
1442 | * where direction = 0 for OUT, 1 for IN. | |
1443 | * For control endpoints, the IN index is used (OUT index is unused), so | |
1444 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) | |
1445 | */ | |
1446 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) | |
1447 | { | |
1448 | unsigned int index; | |
1449 | if (usb_endpoint_xfer_control(desc)) | |
1450 | index = (unsigned int) (usb_endpoint_num(desc)*2); | |
1451 | else | |
1452 | index = (unsigned int) (usb_endpoint_num(desc)*2) + | |
1453 | (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; | |
1454 | return index; | |
1455 | } | |
14295a15 | 1456 | EXPORT_SYMBOL_GPL(xhci_get_endpoint_index); |
d0e96f5a | 1457 | |
01c5f447 JW |
1458 | /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint |
1459 | * address from the XHCI endpoint index. | |
1460 | */ | |
d017aeaf | 1461 | static unsigned int xhci_get_endpoint_address(unsigned int ep_index) |
01c5f447 JW |
1462 | { |
1463 | unsigned int number = DIV_ROUND_UP(ep_index, 2); | |
1464 | unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; | |
1465 | return direction | number; | |
1466 | } | |
1467 | ||
f94e0186 SS |
1468 | /* Find the flag for this endpoint (for use in the control context). Use the |
1469 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | |
1470 | * bit 1, etc. | |
1471 | */ | |
3969384c | 1472 | static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) |
f94e0186 SS |
1473 | { |
1474 | return 1 << (xhci_get_endpoint_index(desc) + 1); | |
1475 | } | |
1476 | ||
1477 | /* Compute the last valid endpoint context index. Basically, this is the | |
1478 | * endpoint index plus one. For slot contexts with more than valid endpoint, | |
1479 | * we find the most significant bit set in the added contexts flags. | |
1480 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 | |
1481 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. | |
1482 | */ | |
ac9d8fe7 | 1483 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
f94e0186 SS |
1484 | { |
1485 | return fls(added_ctxs) - 1; | |
1486 | } | |
1487 | ||
d0e96f5a SS |
1488 | /* Returns 1 if the arguments are OK; |
1489 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. | |
1490 | */ | |
8212a49d | 1491 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
64927730 AX |
1492 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
1493 | const char *func) { | |
1494 | struct xhci_hcd *xhci; | |
1495 | struct xhci_virt_device *virt_dev; | |
1496 | ||
d0e96f5a | 1497 | if (!hcd || (check_ep && !ep) || !udev) { |
5c1127d3 | 1498 | pr_debug("xHCI %s called with invalid args\n", func); |
d0e96f5a SS |
1499 | return -EINVAL; |
1500 | } | |
1501 | if (!udev->parent) { | |
5c1127d3 | 1502 | pr_debug("xHCI %s called for root hub\n", func); |
d0e96f5a SS |
1503 | return 0; |
1504 | } | |
64927730 | 1505 | |
7bd89b40 | 1506 | xhci = hcd_to_xhci(hcd); |
64927730 | 1507 | if (check_virt_dev) { |
73ddc247 | 1508 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { |
5c1127d3 XR |
1509 | xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", |
1510 | func); | |
64927730 AX |
1511 | return -EINVAL; |
1512 | } | |
1513 | ||
1514 | virt_dev = xhci->devs[udev->slot_id]; | |
1515 | if (virt_dev->udev != udev) { | |
5c1127d3 | 1516 | xhci_dbg(xhci, "xHCI %s called with udev and " |
64927730 AX |
1517 | "virt_dev does not match\n", func); |
1518 | return -EINVAL; | |
1519 | } | |
d0e96f5a | 1520 | } |
64927730 | 1521 | |
203a8661 SS |
1522 | if (xhci->xhc_state & XHCI_STATE_HALTED) |
1523 | return -ENODEV; | |
1524 | ||
d0e96f5a SS |
1525 | return 1; |
1526 | } | |
1527 | ||
2d3f1fac | 1528 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
913a8a34 SS |
1529 | struct usb_device *udev, struct xhci_command *command, |
1530 | bool ctx_change, bool must_succeed); | |
2d3f1fac SS |
1531 | |
1532 | /* | |
1533 | * Full speed devices may have a max packet size greater than 8 bytes, but the | |
1534 | * USB core doesn't know that until it reads the first 8 bytes of the | |
1535 | * descriptor. If the usb_device's max packet size changes after that point, | |
1536 | * we need to issue an evaluate context command and wait on it. | |
1537 | */ | |
e34900f4 | 1538 | static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev) |
2d3f1fac | 1539 | { |
2d3f1fac SS |
1540 | struct xhci_input_control_ctx *ctrl_ctx; |
1541 | struct xhci_ep_ctx *ep_ctx; | |
ddba5cd0 | 1542 | struct xhci_command *command; |
2d3f1fac SS |
1543 | int max_packet_size; |
1544 | int hw_max_packet_size; | |
1545 | int ret = 0; | |
1546 | ||
e34900f4 | 1547 | ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, 0); |
28ccd296 | 1548 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
e34900f4 MN |
1549 | max_packet_size = usb_endpoint_maxp(&vdev->udev->ep0.desc); |
1550 | ||
1551 | if (hw_max_packet_size == max_packet_size) | |
1552 | return 0; | |
1553 | ||
1554 | switch (max_packet_size) { | |
1555 | case 8: case 16: case 32: case 64: case 9: | |
3a7fa5be XR |
1556 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
1557 | "Max Packet Size for ep 0 changed."); | |
1558 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | |
1559 | "Max packet size in usb_device = %d", | |
2d3f1fac | 1560 | max_packet_size); |
3a7fa5be XR |
1561 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
1562 | "Max packet size in xHCI HW = %d", | |
2d3f1fac | 1563 | hw_max_packet_size); |
3a7fa5be XR |
1564 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
1565 | "Issuing evaluate context command."); | |
2d3f1fac | 1566 | |
e34900f4 | 1567 | command = xhci_alloc_command(xhci, true, GFP_KERNEL); |
ddba5cd0 MN |
1568 | if (!command) |
1569 | return -ENOMEM; | |
1570 | ||
e34900f4 | 1571 | command->in_ctx = vdev->in_ctx; |
4daf9df5 | 1572 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); |
92f8e767 SS |
1573 | if (!ctrl_ctx) { |
1574 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
1575 | __func__); | |
ddba5cd0 | 1576 | ret = -ENOMEM; |
e34900f4 | 1577 | break; |
92f8e767 | 1578 | } |
2d3f1fac | 1579 | /* Set up the modified control endpoint 0 */ |
e34900f4 | 1580 | xhci_endpoint_copy(xhci, vdev->in_ctx, vdev->out_ctx, 0); |
92f8e767 | 1581 | |
e34900f4 | 1582 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, 0); |
a73d9d9c | 1583 | ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ |
28ccd296 ME |
1584 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
1585 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); | |
2d3f1fac | 1586 | |
28ccd296 | 1587 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); |
2d3f1fac SS |
1588 | ctrl_ctx->drop_flags = 0; |
1589 | ||
e34900f4 MN |
1590 | ret = xhci_configure_endpoint(xhci, vdev->udev, command, |
1591 | true, false); | |
1592 | /* Clean up the input context for later use by bandwidth functions */ | |
28ccd296 | 1593 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
e34900f4 MN |
1594 | break; |
1595 | default: | |
1596 | dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n", | |
1597 | max_packet_size); | |
1598 | return -EINVAL; | |
2d3f1fac | 1599 | } |
e34900f4 MN |
1600 | |
1601 | kfree(command->completion); | |
1602 | kfree(command); | |
1603 | ||
2d3f1fac SS |
1604 | return ret; |
1605 | } | |
1606 | ||
d0e96f5a SS |
1607 | /* |
1608 | * non-error returns are a promise to giveback() the urb later | |
1609 | * we drop ownership so next owner (or urb unlink) can get it | |
1610 | */ | |
3969384c | 1611 | static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) |
d0e96f5a SS |
1612 | { |
1613 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
1614 | unsigned long flags; | |
1615 | int ret = 0; | |
15febf5e MN |
1616 | unsigned int slot_id, ep_index; |
1617 | unsigned int *ep_state; | |
8e51adcc | 1618 | struct urb_priv *urb_priv; |
7e64b037 | 1619 | int num_tds; |
2d3f1fac | 1620 | |
d0e96f5a | 1621 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
8e51adcc AX |
1622 | |
1623 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) | |
e6f7caa3 | 1624 | num_tds = urb->number_of_packets; |
4758dcd1 RA |
1625 | else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && |
1626 | urb->transfer_buffer_length > 0 && | |
1627 | urb->transfer_flags & URB_ZERO_PACKET && | |
1628 | !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) | |
e6f7caa3 | 1629 | num_tds = 2; |
8e51adcc | 1630 | else |
e6f7caa3 | 1631 | num_tds = 1; |
8e51adcc | 1632 | |
da79ff6e | 1633 | urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags); |
8e51adcc AX |
1634 | if (!urb_priv) |
1635 | return -ENOMEM; | |
1636 | ||
9ef7fbbb MN |
1637 | urb_priv->num_tds = num_tds; |
1638 | urb_priv->num_tds_done = 0; | |
8e51adcc AX |
1639 | urb->hcpriv = urb_priv; |
1640 | ||
5abdc2e6 FB |
1641 | trace_xhci_urb_enqueue(urb); |
1642 | ||
6969408d MN |
1643 | spin_lock_irqsave(&xhci->lock, flags); |
1644 | ||
e2e2aacf MN |
1645 | ret = xhci_check_args(hcd, urb->dev, urb->ep, |
1646 | true, true, __func__); | |
1647 | if (ret <= 0) { | |
1648 | ret = ret ? ret : -EINVAL; | |
1649 | goto free_priv; | |
1650 | } | |
1651 | ||
1652 | slot_id = urb->dev->slot_id; | |
1653 | ||
1654 | if (!HCD_HW_ACCESSIBLE(hcd)) { | |
1655 | ret = -ESHUTDOWN; | |
1656 | goto free_priv; | |
1657 | } | |
1658 | ||
1659 | if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { | |
1660 | xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); | |
1661 | ret = -ENODEV; | |
1662 | goto free_priv; | |
1663 | } | |
1664 | ||
6969408d MN |
1665 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
1666 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", | |
1667 | urb->ep->desc.bEndpointAddress, urb); | |
1668 | ret = -ESHUTDOWN; | |
1669 | goto free_priv; | |
1670 | } | |
e2e2aacf MN |
1671 | |
1672 | ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; | |
1673 | ||
15febf5e MN |
1674 | if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { |
1675 | xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", | |
1676 | *ep_state); | |
1677 | ret = -EINVAL; | |
1678 | goto free_priv; | |
1679 | } | |
f5249461 MN |
1680 | if (*ep_state & EP_SOFT_CLEAR_TOGGLE) { |
1681 | xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n"); | |
1682 | ret = -EINVAL; | |
1683 | goto free_priv; | |
1684 | } | |
6969408d MN |
1685 | |
1686 | switch (usb_endpoint_type(&urb->ep->desc)) { | |
1687 | ||
1688 | case USB_ENDPOINT_XFER_CONTROL: | |
b11069f5 | 1689 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
6969408d MN |
1690 | slot_id, ep_index); |
1691 | break; | |
1692 | case USB_ENDPOINT_XFER_BULK: | |
6969408d MN |
1693 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
1694 | slot_id, ep_index); | |
1695 | break; | |
6969408d | 1696 | case USB_ENDPOINT_XFER_INT: |
624defa1 SS |
1697 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
1698 | slot_id, ep_index); | |
6969408d | 1699 | break; |
6969408d | 1700 | case USB_ENDPOINT_XFER_ISOC: |
787f4e5a AX |
1701 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, |
1702 | slot_id, ep_index); | |
2d3f1fac | 1703 | } |
6969408d MN |
1704 | |
1705 | if (ret) { | |
d13565c1 | 1706 | free_priv: |
6969408d MN |
1707 | xhci_urb_free_priv(urb_priv); |
1708 | urb->hcpriv = NULL; | |
1709 | } | |
6f5165cf | 1710 | spin_unlock_irqrestore(&xhci->lock, flags); |
d13565c1 | 1711 | return ret; |
d0e96f5a SS |
1712 | } |
1713 | ||
ae636747 SS |
1714 | /* |
1715 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop | |
1716 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC | |
1717 | * should pick up where it left off in the TD, unless a Set Transfer Ring | |
1718 | * Dequeue Pointer is issued. | |
1719 | * | |
1720 | * The TRBs that make up the buffers for the canceled URB will be "removed" from | |
1721 | * the ring. Since the ring is a contiguous structure, they can't be physically | |
1722 | * removed. Instead, there are two options: | |
1723 | * | |
1724 | * 1) If the HC is in the middle of processing the URB to be canceled, we | |
1725 | * simply move the ring's dequeue pointer past those TRBs using the Set | |
1726 | * Transfer Ring Dequeue Pointer command. This will be the common case, | |
1727 | * when drivers timeout on the last submitted URB and attempt to cancel. | |
1728 | * | |
1729 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a | |
1730 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The | |
1731 | * HC will need to invalidate the any TRBs it has cached after the stop | |
1732 | * endpoint command, as noted in the xHCI 0.95 errata. | |
1733 | * | |
1734 | * 3) The TD may have completed by the time the Stop Endpoint Command | |
1735 | * completes, so software needs to handle that case too. | |
1736 | * | |
1737 | * This function should protect against the TD enqueueing code ringing the | |
1738 | * doorbell while this code is waiting for a Stop Endpoint command to complete. | |
1739 | * It also needs to account for multiple cancellations on happening at the same | |
1740 | * time for the same endpoint. | |
1741 | * | |
1742 | * Note that this function can be called in any context, or so says | |
1743 | * usb_hcd_unlink_urb() | |
d0e96f5a | 1744 | */ |
3969384c | 1745 | static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
d0e96f5a | 1746 | { |
ae636747 | 1747 | unsigned long flags; |
8e51adcc | 1748 | int ret, i; |
e34b2fbf | 1749 | u32 temp; |
ae636747 | 1750 | struct xhci_hcd *xhci; |
8e51adcc | 1751 | struct urb_priv *urb_priv; |
ae636747 SS |
1752 | struct xhci_td *td; |
1753 | unsigned int ep_index; | |
1754 | struct xhci_ring *ep_ring; | |
63a0d9ab | 1755 | struct xhci_virt_ep *ep; |
ddba5cd0 | 1756 | struct xhci_command *command; |
d3519b9d | 1757 | struct xhci_virt_device *vdev; |
ae636747 SS |
1758 | |
1759 | xhci = hcd_to_xhci(hcd); | |
1760 | spin_lock_irqsave(&xhci->lock, flags); | |
5abdc2e6 FB |
1761 | |
1762 | trace_xhci_urb_dequeue(urb); | |
1763 | ||
ae636747 SS |
1764 | /* Make sure the URB hasn't completed or been unlinked already */ |
1765 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | |
d3519b9d | 1766 | if (ret) |
ae636747 | 1767 | goto done; |
d3519b9d MN |
1768 | |
1769 | /* give back URB now if we can't queue it for cancel */ | |
1770 | vdev = xhci->devs[urb->dev->slot_id]; | |
1771 | urb_priv = urb->hcpriv; | |
1772 | if (!vdev || !urb_priv) | |
1773 | goto err_giveback; | |
1774 | ||
1775 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | |
1776 | ep = &vdev->eps[ep_index]; | |
1777 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); | |
1778 | if (!ep || !ep_ring) | |
1779 | goto err_giveback; | |
1780 | ||
d9f11ba9 | 1781 | /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */ |
b0ba9720 | 1782 | temp = readl(&xhci->op_regs->status); |
d9f11ba9 MN |
1783 | if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { |
1784 | xhci_hc_died(xhci); | |
1785 | goto done; | |
1786 | } | |
1787 | ||
4937213b MN |
1788 | /* |
1789 | * check ring is not re-allocated since URB was enqueued. If it is, then | |
1790 | * make sure none of the ring related pointers in this URB private data | |
1791 | * are touched, such as td_list, otherwise we overwrite freed data | |
1792 | */ | |
1793 | if (!td_on_ring(&urb_priv->td[0], ep_ring)) { | |
1794 | xhci_err(xhci, "Canceled URB td not found on endpoint ring"); | |
1795 | for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { | |
1796 | td = &urb_priv->td[i]; | |
1797 | if (!list_empty(&td->cancelled_td_list)) | |
1798 | list_del_init(&td->cancelled_td_list); | |
1799 | } | |
1800 | goto err_giveback; | |
1801 | } | |
1802 | ||
d9f11ba9 | 1803 | if (xhci->xhc_state & XHCI_STATE_HALTED) { |
aa50b290 | 1804 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
d9f11ba9 | 1805 | "HC halted, freeing TD manually."); |
9ef7fbbb | 1806 | for (i = urb_priv->num_tds_done; |
d3519b9d | 1807 | i < urb_priv->num_tds; |
5c821711 | 1808 | i++) { |
7e64b037 | 1809 | td = &urb_priv->td[i]; |
585df1d9 SS |
1810 | if (!list_empty(&td->td_list)) |
1811 | list_del_init(&td->td_list); | |
1812 | if (!list_empty(&td->cancelled_td_list)) | |
1813 | list_del_init(&td->cancelled_td_list); | |
1814 | } | |
d3519b9d | 1815 | goto err_giveback; |
e34b2fbf | 1816 | } |
ae636747 | 1817 | |
9ef7fbbb MN |
1818 | i = urb_priv->num_tds_done; |
1819 | if (i < urb_priv->num_tds) | |
aa50b290 XR |
1820 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
1821 | "Cancel URB %p, dev %s, ep 0x%x, " | |
1822 | "starting at offset 0x%llx", | |
79688acf SS |
1823 | urb, urb->dev->devpath, |
1824 | urb->ep->desc.bEndpointAddress, | |
1825 | (unsigned long long) xhci_trb_virt_to_dma( | |
7e64b037 | 1826 | urb_priv->td[i].start_seg, |
39b52aae | 1827 | urb_priv->td[i].start_trb)); |
79688acf | 1828 | |
9ef7fbbb | 1829 | for (; i < urb_priv->num_tds; i++) { |
7e64b037 | 1830 | td = &urb_priv->td[i]; |
674f8438 MN |
1831 | /* TD can already be on cancelled list if ep halted on it */ |
1832 | if (list_empty(&td->cancelled_td_list)) { | |
1833 | td->cancel_status = TD_DIRTY; | |
1834 | list_add_tail(&td->cancelled_td_list, | |
1835 | &ep->cancelled_td_list); | |
1836 | } | |
8e51adcc AX |
1837 | } |
1838 | ||
474538b8 MP |
1839 | /* These completion handlers will sort out cancelled TDs for us */ |
1840 | if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) { | |
1841 | xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n", | |
1842 | urb->dev->slot_id, ep_index, ep->ep_state); | |
1843 | goto done; | |
1844 | } | |
1845 | ||
af1352f8 MN |
1846 | /* In this case no commands are pending but the endpoint is stopped */ |
1847 | if (ep->ep_state & EP_CLEARING_TT) { | |
474538b8 MP |
1848 | /* and cancelled TDs can be given back right away */ |
1849 | xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n", | |
1850 | urb->dev->slot_id, ep_index, ep->ep_state); | |
1851 | xhci_process_cancelled_tds(ep); | |
1852 | } else { | |
1853 | /* Otherwise, queue a new Stop Endpoint command */ | |
103afda0 | 1854 | command = xhci_alloc_command(xhci, false, GFP_ATOMIC); |
a0ee619f HG |
1855 | if (!command) { |
1856 | ret = -ENOMEM; | |
1857 | goto done; | |
1858 | } | |
42b75813 | 1859 | ep->stop_time = jiffies; |
9983a5fc | 1860 | ep->ep_state |= EP_STOP_CMD_PENDING; |
ddba5cd0 MN |
1861 | xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, |
1862 | ep_index, 0); | |
23e3be11 | 1863 | xhci_ring_cmd_db(xhci); |
ae636747 SS |
1864 | } |
1865 | done: | |
1866 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1867 | return ret; | |
d3519b9d MN |
1868 | |
1869 | err_giveback: | |
1870 | if (urb_priv) | |
1871 | xhci_urb_free_priv(urb_priv); | |
1872 | usb_hcd_unlink_urb_from_ep(hcd, urb); | |
1873 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1874 | usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); | |
1875 | return ret; | |
d0e96f5a SS |
1876 | } |
1877 | ||
f94e0186 SS |
1878 | /* Drop an endpoint from a new bandwidth configuration for this device. |
1879 | * Only one call to this function is allowed per endpoint before | |
1880 | * check_bandwidth() or reset_bandwidth() must be called. | |
1881 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will | |
1882 | * add the endpoint to the schedule with possibly new parameters denoted by a | |
1883 | * different endpoint descriptor in usb_host_endpoint. | |
1884 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is | |
1885 | * not allowed. | |
f88ba78d SS |
1886 | * |
1887 | * The USB core will not allow URBs to be queued to an endpoint that is being | |
1888 | * disabled, so there's no need for mutual exclusion to protect | |
1889 | * the xhci->devs[slot_id] structure. | |
f94e0186 | 1890 | */ |
14295a15 CY |
1891 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
1892 | struct usb_host_endpoint *ep) | |
f94e0186 | 1893 | { |
f94e0186 | 1894 | struct xhci_hcd *xhci; |
d115b048 JY |
1895 | struct xhci_container_ctx *in_ctx, *out_ctx; |
1896 | struct xhci_input_control_ctx *ctrl_ctx; | |
f94e0186 SS |
1897 | unsigned int ep_index; |
1898 | struct xhci_ep_ctx *ep_ctx; | |
1899 | u32 drop_flag; | |
d6759133 | 1900 | u32 new_add_flags, new_drop_flags; |
f94e0186 SS |
1901 | int ret; |
1902 | ||
64927730 | 1903 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
f94e0186 SS |
1904 | if (ret <= 0) |
1905 | return ret; | |
1906 | xhci = hcd_to_xhci(hcd); | |
fe6c6c13 SS |
1907 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1908 | return -ENODEV; | |
f94e0186 | 1909 | |
fe6c6c13 | 1910 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
f94e0186 SS |
1911 | drop_flag = xhci_get_endpoint_flag(&ep->desc); |
1912 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { | |
1913 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", | |
1914 | __func__, drop_flag); | |
1915 | return 0; | |
1916 | } | |
1917 | ||
f94e0186 | 1918 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
d115b048 | 1919 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
4daf9df5 | 1920 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); |
92f8e767 SS |
1921 | if (!ctrl_ctx) { |
1922 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
1923 | __func__); | |
1924 | return 0; | |
1925 | } | |
1926 | ||
f94e0186 | 1927 | ep_index = xhci_get_endpoint_index(&ep->desc); |
d115b048 | 1928 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
f94e0186 SS |
1929 | /* If the HC already knows the endpoint is disabled, |
1930 | * or the HCD has noted it is disabled, ignore this request | |
1931 | */ | |
5071e6b2 | 1932 | if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || |
28ccd296 ME |
1933 | le32_to_cpu(ctrl_ctx->drop_flags) & |
1934 | xhci_get_endpoint_flag(&ep->desc)) { | |
a6134136 HG |
1935 | /* Do not warn when called after a usb_device_reset */ |
1936 | if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) | |
1937 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", | |
1938 | __func__, ep); | |
f94e0186 SS |
1939 | return 0; |
1940 | } | |
1941 | ||
28ccd296 ME |
1942 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); |
1943 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); | |
f94e0186 | 1944 | |
28ccd296 ME |
1945 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); |
1946 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); | |
f94e0186 | 1947 | |
02b6fdc2 LB |
1948 | xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); |
1949 | ||
f94e0186 SS |
1950 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); |
1951 | ||
d6759133 | 1952 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", |
f94e0186 SS |
1953 | (unsigned int) ep->desc.bEndpointAddress, |
1954 | udev->slot_id, | |
1955 | (unsigned int) new_drop_flags, | |
d6759133 | 1956 | (unsigned int) new_add_flags); |
f94e0186 SS |
1957 | return 0; |
1958 | } | |
14295a15 | 1959 | EXPORT_SYMBOL_GPL(xhci_drop_endpoint); |
f94e0186 SS |
1960 | |
1961 | /* Add an endpoint to a new possible bandwidth configuration for this device. | |
1962 | * Only one call to this function is allowed per endpoint before | |
1963 | * check_bandwidth() or reset_bandwidth() must be called. | |
1964 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will | |
1965 | * add the endpoint to the schedule with possibly new parameters denoted by a | |
1966 | * different endpoint descriptor in usb_host_endpoint. | |
1967 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is | |
1968 | * not allowed. | |
f88ba78d SS |
1969 | * |
1970 | * The USB core will not allow URBs to be queued to an endpoint until the | |
1971 | * configuration or alt setting is installed in the device, so there's no need | |
1972 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. | |
f94e0186 | 1973 | */ |
14295a15 CY |
1974 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
1975 | struct usb_host_endpoint *ep) | |
f94e0186 | 1976 | { |
f94e0186 | 1977 | struct xhci_hcd *xhci; |
92c9691b | 1978 | struct xhci_container_ctx *in_ctx; |
f94e0186 | 1979 | unsigned int ep_index; |
d115b048 | 1980 | struct xhci_input_control_ctx *ctrl_ctx; |
5afa0a5e | 1981 | struct xhci_ep_ctx *ep_ctx; |
f94e0186 | 1982 | u32 added_ctxs; |
d6759133 | 1983 | u32 new_add_flags, new_drop_flags; |
fa75ac37 | 1984 | struct xhci_virt_device *virt_dev; |
f94e0186 SS |
1985 | int ret = 0; |
1986 | ||
64927730 | 1987 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
a1587d97 SS |
1988 | if (ret <= 0) { |
1989 | /* So we won't queue a reset ep command for a root hub */ | |
1990 | ep->hcpriv = NULL; | |
f94e0186 | 1991 | return ret; |
a1587d97 | 1992 | } |
f94e0186 | 1993 | xhci = hcd_to_xhci(hcd); |
fe6c6c13 SS |
1994 | if (xhci->xhc_state & XHCI_STATE_DYING) |
1995 | return -ENODEV; | |
f94e0186 SS |
1996 | |
1997 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); | |
f94e0186 SS |
1998 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { |
1999 | /* FIXME when we have to issue an evaluate endpoint command to | |
2000 | * deal with ep0 max packet size changing once we get the | |
2001 | * descriptors | |
2002 | */ | |
2003 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", | |
2004 | __func__, added_ctxs); | |
2005 | return 0; | |
2006 | } | |
2007 | ||
fa75ac37 SS |
2008 | virt_dev = xhci->devs[udev->slot_id]; |
2009 | in_ctx = virt_dev->in_ctx; | |
4daf9df5 | 2010 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); |
92f8e767 SS |
2011 | if (!ctrl_ctx) { |
2012 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
2013 | __func__); | |
2014 | return 0; | |
2015 | } | |
fa75ac37 | 2016 | |
92f8e767 | 2017 | ep_index = xhci_get_endpoint_index(&ep->desc); |
fa75ac37 SS |
2018 | /* If this endpoint is already in use, and the upper layers are trying |
2019 | * to add it again without dropping it, reject the addition. | |
2020 | */ | |
2021 | if (virt_dev->eps[ep_index].ring && | |
92c9691b | 2022 | !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { |
fa75ac37 SS |
2023 | xhci_warn(xhci, "Trying to add endpoint 0x%x " |
2024 | "without dropping it.\n", | |
2025 | (unsigned int) ep->desc.bEndpointAddress); | |
2026 | return -EINVAL; | |
2027 | } | |
2028 | ||
f94e0186 SS |
2029 | /* If the HCD has already noted the endpoint is enabled, |
2030 | * ignore this request. | |
2031 | */ | |
92c9691b | 2032 | if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { |
700e2052 GKH |
2033 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
2034 | __func__, ep); | |
f94e0186 SS |
2035 | return 0; |
2036 | } | |
2037 | ||
f88ba78d SS |
2038 | /* |
2039 | * Configuration and alternate setting changes must be done in | |
2040 | * process context, not interrupt context (or so documenation | |
2041 | * for usb_set_interface() and usb_set_configuration() claim). | |
2042 | */ | |
fa75ac37 | 2043 | if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { |
f94e0186 SS |
2044 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", |
2045 | __func__, ep->desc.bEndpointAddress); | |
f94e0186 SS |
2046 | return -ENOMEM; |
2047 | } | |
2048 | ||
28ccd296 ME |
2049 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
2050 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); | |
f94e0186 SS |
2051 | |
2052 | /* If xhci_endpoint_disable() was called for this endpoint, but the | |
2053 | * xHC hasn't been notified yet through the check_bandwidth() call, | |
2054 | * this re-adds a new state for the endpoint from the new endpoint | |
2055 | * descriptors. We must drop and re-add this endpoint, so we leave the | |
2056 | * drop flags alone. | |
2057 | */ | |
28ccd296 | 2058 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
f94e0186 | 2059 | |
a1587d97 SS |
2060 | /* Store the usb_device pointer for later use */ |
2061 | ep->hcpriv = udev; | |
2062 | ||
5afa0a5e MN |
2063 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
2064 | trace_xhci_add_endpoint(ep_ctx); | |
2065 | ||
d6759133 | 2066 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", |
f94e0186 SS |
2067 | (unsigned int) ep->desc.bEndpointAddress, |
2068 | udev->slot_id, | |
2069 | (unsigned int) new_drop_flags, | |
d6759133 | 2070 | (unsigned int) new_add_flags); |
f94e0186 SS |
2071 | return 0; |
2072 | } | |
14295a15 | 2073 | EXPORT_SYMBOL_GPL(xhci_add_endpoint); |
f94e0186 | 2074 | |
d115b048 | 2075 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
f94e0186 | 2076 | { |
d115b048 | 2077 | struct xhci_input_control_ctx *ctrl_ctx; |
f94e0186 | 2078 | struct xhci_ep_ctx *ep_ctx; |
d115b048 | 2079 | struct xhci_slot_ctx *slot_ctx; |
f94e0186 SS |
2080 | int i; |
2081 | ||
4daf9df5 | 2082 | ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); |
92f8e767 SS |
2083 | if (!ctrl_ctx) { |
2084 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
2085 | __func__); | |
2086 | return; | |
2087 | } | |
2088 | ||
f94e0186 SS |
2089 | /* When a device's add flag and drop flag are zero, any subsequent |
2090 | * configure endpoint command will leave that endpoint's state | |
2091 | * untouched. Make sure we don't leave any old state in the input | |
2092 | * endpoint contexts. | |
2093 | */ | |
d115b048 JY |
2094 | ctrl_ctx->drop_flags = 0; |
2095 | ctrl_ctx->add_flags = 0; | |
2096 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | |
28ccd296 | 2097 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
f94e0186 | 2098 | /* Endpoint 0 is always valid */ |
28ccd296 | 2099 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
98871e94 | 2100 | for (i = 1; i < 31; i++) { |
d115b048 | 2101 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
f94e0186 SS |
2102 | ep_ctx->ep_info = 0; |
2103 | ep_ctx->ep_info2 = 0; | |
8e595a5d | 2104 | ep_ctx->deq = 0; |
f94e0186 SS |
2105 | ep_ctx->tx_info = 0; |
2106 | } | |
2107 | } | |
2108 | ||
f2217e8e | 2109 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
00161f7d | 2110 | struct usb_device *udev, u32 *cmd_status) |
f2217e8e SS |
2111 | { |
2112 | int ret; | |
2113 | ||
913a8a34 | 2114 | switch (*cmd_status) { |
0b7c105a | 2115 | case COMP_COMMAND_ABORTED: |
604d02a2 | 2116 | case COMP_COMMAND_RING_STOPPED: |
c311e391 MN |
2117 | xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); |
2118 | ret = -ETIME; | |
2119 | break; | |
0b7c105a | 2120 | case COMP_RESOURCE_ERROR: |
288c0f44 ON |
2121 | dev_warn(&udev->dev, |
2122 | "Not enough host controller resources for new device state.\n"); | |
f2217e8e SS |
2123 | ret = -ENOMEM; |
2124 | /* FIXME: can we allocate more resources for the HC? */ | |
2125 | break; | |
0b7c105a FB |
2126 | case COMP_BANDWIDTH_ERROR: |
2127 | case COMP_SECONDARY_BANDWIDTH_ERROR: | |
288c0f44 ON |
2128 | dev_warn(&udev->dev, |
2129 | "Not enough bandwidth for new device state.\n"); | |
f2217e8e SS |
2130 | ret = -ENOSPC; |
2131 | /* FIXME: can we go back to the old state? */ | |
2132 | break; | |
0b7c105a | 2133 | case COMP_TRB_ERROR: |
f2217e8e SS |
2134 | /* the HCD set up something wrong */ |
2135 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " | |
2136 | "add flag = 1, " | |
2137 | "and endpoint is not disabled.\n"); | |
2138 | ret = -EINVAL; | |
2139 | break; | |
0b7c105a | 2140 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
288c0f44 ON |
2141 | dev_warn(&udev->dev, |
2142 | "ERROR: Incompatible device for endpoint configure command.\n"); | |
f6ba6fe2 AH |
2143 | ret = -ENODEV; |
2144 | break; | |
f2217e8e | 2145 | case COMP_SUCCESS: |
3a7fa5be XR |
2146 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
2147 | "Successful Endpoint Configure command"); | |
f2217e8e SS |
2148 | ret = 0; |
2149 | break; | |
2150 | default: | |
288c0f44 ON |
2151 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", |
2152 | *cmd_status); | |
f2217e8e SS |
2153 | ret = -EINVAL; |
2154 | break; | |
2155 | } | |
2156 | return ret; | |
2157 | } | |
2158 | ||
2159 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |
00161f7d | 2160 | struct usb_device *udev, u32 *cmd_status) |
f2217e8e SS |
2161 | { |
2162 | int ret; | |
2163 | ||
913a8a34 | 2164 | switch (*cmd_status) { |
0b7c105a | 2165 | case COMP_COMMAND_ABORTED: |
604d02a2 | 2166 | case COMP_COMMAND_RING_STOPPED: |
c311e391 MN |
2167 | xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); |
2168 | ret = -ETIME; | |
2169 | break; | |
0b7c105a | 2170 | case COMP_PARAMETER_ERROR: |
288c0f44 ON |
2171 | dev_warn(&udev->dev, |
2172 | "WARN: xHCI driver setup invalid evaluate context command.\n"); | |
f2217e8e SS |
2173 | ret = -EINVAL; |
2174 | break; | |
0b7c105a | 2175 | case COMP_SLOT_NOT_ENABLED_ERROR: |
288c0f44 ON |
2176 | dev_warn(&udev->dev, |
2177 | "WARN: slot not enabled for evaluate context command.\n"); | |
b8031342 SS |
2178 | ret = -EINVAL; |
2179 | break; | |
0b7c105a | 2180 | case COMP_CONTEXT_STATE_ERROR: |
288c0f44 ON |
2181 | dev_warn(&udev->dev, |
2182 | "WARN: invalid context state for evaluate context command.\n"); | |
f2217e8e SS |
2183 | ret = -EINVAL; |
2184 | break; | |
0b7c105a | 2185 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
288c0f44 ON |
2186 | dev_warn(&udev->dev, |
2187 | "ERROR: Incompatible device for evaluate context command.\n"); | |
f6ba6fe2 AH |
2188 | ret = -ENODEV; |
2189 | break; | |
0b7c105a | 2190 | case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: |
1bb73a88 AH |
2191 | /* Max Exit Latency too large error */ |
2192 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); | |
2193 | ret = -EINVAL; | |
2194 | break; | |
f2217e8e | 2195 | case COMP_SUCCESS: |
3a7fa5be XR |
2196 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
2197 | "Successful evaluate context command"); | |
f2217e8e SS |
2198 | ret = 0; |
2199 | break; | |
2200 | default: | |
288c0f44 ON |
2201 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", |
2202 | *cmd_status); | |
f2217e8e SS |
2203 | ret = -EINVAL; |
2204 | break; | |
2205 | } | |
2206 | return ret; | |
2207 | } | |
2208 | ||
2cf95c18 | 2209 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, |
92f8e767 | 2210 | struct xhci_input_control_ctx *ctrl_ctx) |
2cf95c18 | 2211 | { |
2cf95c18 SS |
2212 | u32 valid_add_flags; |
2213 | u32 valid_drop_flags; | |
2214 | ||
2cf95c18 SS |
2215 | /* Ignore the slot flag (bit 0), and the default control endpoint flag |
2216 | * (bit 1). The default control endpoint is added during the Address | |
2217 | * Device command and is never removed until the slot is disabled. | |
2218 | */ | |
ef73400c XR |
2219 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; |
2220 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; | |
2cf95c18 SS |
2221 | |
2222 | /* Use hweight32 to count the number of ones in the add flags, or | |
2223 | * number of endpoints added. Don't count endpoints that are changed | |
2224 | * (both added and dropped). | |
2225 | */ | |
2226 | return hweight32(valid_add_flags) - | |
2227 | hweight32(valid_add_flags & valid_drop_flags); | |
2228 | } | |
2229 | ||
2230 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, | |
92f8e767 | 2231 | struct xhci_input_control_ctx *ctrl_ctx) |
2cf95c18 | 2232 | { |
2cf95c18 SS |
2233 | u32 valid_add_flags; |
2234 | u32 valid_drop_flags; | |
2235 | ||
78d1ff02 XR |
2236 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; |
2237 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; | |
2cf95c18 SS |
2238 | |
2239 | return hweight32(valid_drop_flags) - | |
2240 | hweight32(valid_add_flags & valid_drop_flags); | |
2241 | } | |
2242 | ||
2243 | /* | |
2244 | * We need to reserve the new number of endpoints before the configure endpoint | |
2245 | * command completes. We can't subtract the dropped endpoints from the number | |
2246 | * of active endpoints until the command completes because we can oversubscribe | |
2247 | * the host in this case: | |
2248 | * | |
2249 | * - the first configure endpoint command drops more endpoints than it adds | |
2250 | * - a second configure endpoint command that adds more endpoints is queued | |
2251 | * - the first configure endpoint command fails, so the config is unchanged | |
2252 | * - the second command may succeed, even though there isn't enough resources | |
2253 | * | |
2254 | * Must be called with xhci->lock held. | |
2255 | */ | |
2256 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, | |
92f8e767 | 2257 | struct xhci_input_control_ctx *ctrl_ctx) |
2cf95c18 SS |
2258 | { |
2259 | u32 added_eps; | |
2260 | ||
92f8e767 | 2261 | added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); |
2cf95c18 | 2262 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { |
4bdfe4c3 XR |
2263 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
2264 | "Not enough ep ctxs: " | |
2265 | "%u active, need to add %u, limit is %u.", | |
2cf95c18 SS |
2266 | xhci->num_active_eps, added_eps, |
2267 | xhci->limit_active_eps); | |
2268 | return -ENOMEM; | |
2269 | } | |
2270 | xhci->num_active_eps += added_eps; | |
4bdfe4c3 XR |
2271 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
2272 | "Adding %u ep ctxs, %u now active.", added_eps, | |
2cf95c18 SS |
2273 | xhci->num_active_eps); |
2274 | return 0; | |
2275 | } | |
2276 | ||
2277 | /* | |
2278 | * The configure endpoint was failed by the xHC for some other reason, so we | |
2279 | * need to revert the resources that failed configuration would have used. | |
2280 | * | |
2281 | * Must be called with xhci->lock held. | |
2282 | */ | |
2283 | static void xhci_free_host_resources(struct xhci_hcd *xhci, | |
92f8e767 | 2284 | struct xhci_input_control_ctx *ctrl_ctx) |
2cf95c18 SS |
2285 | { |
2286 | u32 num_failed_eps; | |
2287 | ||
92f8e767 | 2288 | num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); |
2cf95c18 | 2289 | xhci->num_active_eps -= num_failed_eps; |
4bdfe4c3 XR |
2290 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
2291 | "Removing %u failed ep ctxs, %u now active.", | |
2cf95c18 SS |
2292 | num_failed_eps, |
2293 | xhci->num_active_eps); | |
2294 | } | |
2295 | ||
2296 | /* | |
2297 | * Now that the command has completed, clean up the active endpoint count by | |
2298 | * subtracting out the endpoints that were dropped (but not changed). | |
2299 | * | |
2300 | * Must be called with xhci->lock held. | |
2301 | */ | |
2302 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, | |
92f8e767 | 2303 | struct xhci_input_control_ctx *ctrl_ctx) |
2cf95c18 SS |
2304 | { |
2305 | u32 num_dropped_eps; | |
2306 | ||
92f8e767 | 2307 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); |
2cf95c18 SS |
2308 | xhci->num_active_eps -= num_dropped_eps; |
2309 | if (num_dropped_eps) | |
4bdfe4c3 XR |
2310 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
2311 | "Removing %u dropped ep ctxs, %u now active.", | |
2cf95c18 SS |
2312 | num_dropped_eps, |
2313 | xhci->num_active_eps); | |
2314 | } | |
2315 | ||
ed384bd3 | 2316 | static unsigned int xhci_get_block_size(struct usb_device *udev) |
c29eea62 SS |
2317 | { |
2318 | switch (udev->speed) { | |
2319 | case USB_SPEED_LOW: | |
2320 | case USB_SPEED_FULL: | |
2321 | return FS_BLOCK; | |
2322 | case USB_SPEED_HIGH: | |
2323 | return HS_BLOCK; | |
2324 | case USB_SPEED_SUPER: | |
0caf6b33 | 2325 | case USB_SPEED_SUPER_PLUS: |
c29eea62 SS |
2326 | return SS_BLOCK; |
2327 | case USB_SPEED_UNKNOWN: | |
c29eea62 SS |
2328 | default: |
2329 | /* Should never happen */ | |
2330 | return 1; | |
2331 | } | |
2332 | } | |
2333 | ||
ed384bd3 FB |
2334 | static unsigned int |
2335 | xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) | |
c29eea62 SS |
2336 | { |
2337 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) | |
2338 | return LS_OVERHEAD; | |
2339 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) | |
2340 | return FS_OVERHEAD; | |
2341 | return HS_OVERHEAD; | |
2342 | } | |
2343 | ||
2344 | /* If we are changing a LS/FS device under a HS hub, | |
2345 | * make sure (if we are activating a new TT) that the HS bus has enough | |
2346 | * bandwidth for this new TT. | |
2347 | */ | |
2348 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, | |
2349 | struct xhci_virt_device *virt_dev, | |
2350 | int old_active_eps) | |
2351 | { | |
2352 | struct xhci_interval_bw_table *bw_table; | |
2353 | struct xhci_tt_bw_info *tt_info; | |
2354 | ||
2355 | /* Find the bandwidth table for the root port this TT is attached to. */ | |
06790c19 | 2356 | bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table; |
c29eea62 SS |
2357 | tt_info = virt_dev->tt_info; |
2358 | /* If this TT already had active endpoints, the bandwidth for this TT | |
2359 | * has already been added. Removing all periodic endpoints (and thus | |
2360 | * making the TT enactive) will only decrease the bandwidth used. | |
2361 | */ | |
2362 | if (old_active_eps) | |
2363 | return 0; | |
2364 | if (old_active_eps == 0 && tt_info->active_eps != 0) { | |
2365 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) | |
2366 | return -ENOMEM; | |
2367 | return 0; | |
2368 | } | |
2369 | /* Not sure why we would have no new active endpoints... | |
2370 | * | |
2371 | * Maybe because of an Evaluate Context change for a hub update or a | |
2372 | * control endpoint 0 max packet size change? | |
2373 | * FIXME: skip the bandwidth calculation in that case. | |
2374 | */ | |
2375 | return 0; | |
2376 | } | |
2377 | ||
2b698999 SS |
2378 | static int xhci_check_ss_bw(struct xhci_hcd *xhci, |
2379 | struct xhci_virt_device *virt_dev) | |
2380 | { | |
2381 | unsigned int bw_reserved; | |
2382 | ||
2383 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); | |
2384 | if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) | |
2385 | return -ENOMEM; | |
2386 | ||
2387 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); | |
2388 | if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) | |
2389 | return -ENOMEM; | |
2390 | ||
2391 | return 0; | |
2392 | } | |
2393 | ||
c29eea62 SS |
2394 | /* |
2395 | * This algorithm is a very conservative estimate of the worst-case scheduling | |
2396 | * scenario for any one interval. The hardware dynamically schedules the | |
2397 | * packets, so we can't tell which microframe could be the limiting factor in | |
2398 | * the bandwidth scheduling. This only takes into account periodic endpoints. | |
2399 | * | |
2400 | * Obviously, we can't solve an NP complete problem to find the minimum worst | |
2401 | * case scenario. Instead, we come up with an estimate that is no less than | |
2402 | * the worst case bandwidth used for any one microframe, but may be an | |
2403 | * over-estimate. | |
2404 | * | |
2405 | * We walk the requirements for each endpoint by interval, starting with the | |
2406 | * smallest interval, and place packets in the schedule where there is only one | |
2407 | * possible way to schedule packets for that interval. In order to simplify | |
2408 | * this algorithm, we record the largest max packet size for each interval, and | |
2409 | * assume all packets will be that size. | |
2410 | * | |
2411 | * For interval 0, we obviously must schedule all packets for each interval. | |
2412 | * The bandwidth for interval 0 is just the amount of data to be transmitted | |
2413 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times | |
2414 | * the number of packets). | |
2415 | * | |
2416 | * For interval 1, we have two possible microframes to schedule those packets | |
2417 | * in. For this algorithm, if we can schedule the same number of packets for | |
2418 | * each possible scheduling opportunity (each microframe), we will do so. The | |
2419 | * remaining number of packets will be saved to be transmitted in the gaps in | |
2420 | * the next interval's scheduling sequence. | |
2421 | * | |
2422 | * As we move those remaining packets to be scheduled with interval 2 packets, | |
2423 | * we have to double the number of remaining packets to transmit. This is | |
2424 | * because the intervals are actually powers of 2, and we would be transmitting | |
2425 | * the previous interval's packets twice in this interval. We also have to be | |
2426 | * sure that when we look at the largest max packet size for this interval, we | |
2427 | * also look at the largest max packet size for the remaining packets and take | |
2428 | * the greater of the two. | |
2429 | * | |
2430 | * The algorithm continues to evenly distribute packets in each scheduling | |
2431 | * opportunity, and push the remaining packets out, until we get to the last | |
2432 | * interval. Then those packets and their associated overhead are just added | |
2433 | * to the bandwidth used. | |
2e27980e SS |
2434 | */ |
2435 | static int xhci_check_bw_table(struct xhci_hcd *xhci, | |
2436 | struct xhci_virt_device *virt_dev, | |
2437 | int old_active_eps) | |
2438 | { | |
c29eea62 SS |
2439 | unsigned int bw_reserved; |
2440 | unsigned int max_bandwidth; | |
2441 | unsigned int bw_used; | |
2442 | unsigned int block_size; | |
2443 | struct xhci_interval_bw_table *bw_table; | |
2444 | unsigned int packet_size = 0; | |
2445 | unsigned int overhead = 0; | |
2446 | unsigned int packets_transmitted = 0; | |
2447 | unsigned int packets_remaining = 0; | |
2448 | unsigned int i; | |
2449 | ||
0caf6b33 | 2450 | if (virt_dev->udev->speed >= USB_SPEED_SUPER) |
2b698999 SS |
2451 | return xhci_check_ss_bw(xhci, virt_dev); |
2452 | ||
c29eea62 SS |
2453 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { |
2454 | max_bandwidth = HS_BW_LIMIT; | |
2455 | /* Convert percent of bus BW reserved to blocks reserved */ | |
2456 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); | |
2457 | } else { | |
2458 | max_bandwidth = FS_BW_LIMIT; | |
2459 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); | |
2460 | } | |
2461 | ||
2462 | bw_table = virt_dev->bw_table; | |
2463 | /* We need to translate the max packet size and max ESIT payloads into | |
2464 | * the units the hardware uses. | |
2465 | */ | |
2466 | block_size = xhci_get_block_size(virt_dev->udev); | |
2467 | ||
2468 | /* If we are manipulating a LS/FS device under a HS hub, double check | |
2469 | * that the HS bus has enough bandwidth if we are activing a new TT. | |
2470 | */ | |
2471 | if (virt_dev->tt_info) { | |
4bdfe4c3 XR |
2472 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
2473 | "Recalculating BW for rootport %u", | |
06790c19 | 2474 | virt_dev->rhub_port->hw_portnum + 1); |
c29eea62 SS |
2475 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { |
2476 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " | |
2477 | "newly activated TT.\n"); | |
2478 | return -ENOMEM; | |
2479 | } | |
4bdfe4c3 XR |
2480 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
2481 | "Recalculating BW for TT slot %u port %u", | |
c29eea62 SS |
2482 | virt_dev->tt_info->slot_id, |
2483 | virt_dev->tt_info->ttport); | |
2484 | } else { | |
4bdfe4c3 XR |
2485 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
2486 | "Recalculating BW for rootport %u", | |
06790c19 | 2487 | virt_dev->rhub_port->hw_portnum + 1); |
c29eea62 SS |
2488 | } |
2489 | ||
2490 | /* Add in how much bandwidth will be used for interval zero, or the | |
2491 | * rounded max ESIT payload + number of packets * largest overhead. | |
2492 | */ | |
2493 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + | |
2494 | bw_table->interval_bw[0].num_packets * | |
2495 | xhci_get_largest_overhead(&bw_table->interval_bw[0]); | |
2496 | ||
2497 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { | |
2498 | unsigned int bw_added; | |
2499 | unsigned int largest_mps; | |
2500 | unsigned int interval_overhead; | |
2501 | ||
2502 | /* | |
2503 | * How many packets could we transmit in this interval? | |
2504 | * If packets didn't fit in the previous interval, we will need | |
2505 | * to transmit that many packets twice within this interval. | |
2506 | */ | |
2507 | packets_remaining = 2 * packets_remaining + | |
2508 | bw_table->interval_bw[i].num_packets; | |
2509 | ||
2510 | /* Find the largest max packet size of this or the previous | |
2511 | * interval. | |
2512 | */ | |
2513 | if (list_empty(&bw_table->interval_bw[i].endpoints)) | |
2514 | largest_mps = 0; | |
2515 | else { | |
2516 | struct xhci_virt_ep *virt_ep; | |
2517 | struct list_head *ep_entry; | |
2518 | ||
2519 | ep_entry = bw_table->interval_bw[i].endpoints.next; | |
2520 | virt_ep = list_entry(ep_entry, | |
2521 | struct xhci_virt_ep, bw_endpoint_list); | |
2522 | /* Convert to blocks, rounding up */ | |
2523 | largest_mps = DIV_ROUND_UP( | |
2524 | virt_ep->bw_info.max_packet_size, | |
2525 | block_size); | |
2526 | } | |
2527 | if (largest_mps > packet_size) | |
2528 | packet_size = largest_mps; | |
2529 | ||
2530 | /* Use the larger overhead of this or the previous interval. */ | |
2531 | interval_overhead = xhci_get_largest_overhead( | |
2532 | &bw_table->interval_bw[i]); | |
2533 | if (interval_overhead > overhead) | |
2534 | overhead = interval_overhead; | |
2535 | ||
2536 | /* How many packets can we evenly distribute across | |
2537 | * (1 << (i + 1)) possible scheduling opportunities? | |
2538 | */ | |
2539 | packets_transmitted = packets_remaining >> (i + 1); | |
2540 | ||
2541 | /* Add in the bandwidth used for those scheduled packets */ | |
2542 | bw_added = packets_transmitted * (overhead + packet_size); | |
2543 | ||
2544 | /* How many packets do we have remaining to transmit? */ | |
2545 | packets_remaining = packets_remaining % (1 << (i + 1)); | |
2546 | ||
2547 | /* What largest max packet size should those packets have? */ | |
2548 | /* If we've transmitted all packets, don't carry over the | |
2549 | * largest packet size. | |
2550 | */ | |
2551 | if (packets_remaining == 0) { | |
2552 | packet_size = 0; | |
2553 | overhead = 0; | |
2554 | } else if (packets_transmitted > 0) { | |
2555 | /* Otherwise if we do have remaining packets, and we've | |
2556 | * scheduled some packets in this interval, take the | |
2557 | * largest max packet size from endpoints with this | |
2558 | * interval. | |
2559 | */ | |
2560 | packet_size = largest_mps; | |
2561 | overhead = interval_overhead; | |
2562 | } | |
2563 | /* Otherwise carry over packet_size and overhead from the last | |
2564 | * time we had a remainder. | |
2565 | */ | |
2566 | bw_used += bw_added; | |
2567 | if (bw_used > max_bandwidth) { | |
2568 | xhci_warn(xhci, "Not enough bandwidth. " | |
2569 | "Proposed: %u, Max: %u\n", | |
2570 | bw_used, max_bandwidth); | |
2571 | return -ENOMEM; | |
2572 | } | |
2573 | } | |
2574 | /* | |
2575 | * Ok, we know we have some packets left over after even-handedly | |
2576 | * scheduling interval 15. We don't know which microframes they will | |
2577 | * fit into, so we over-schedule and say they will be scheduled every | |
2578 | * microframe. | |
2579 | */ | |
2580 | if (packets_remaining > 0) | |
2581 | bw_used += overhead + packet_size; | |
2582 | ||
2583 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { | |
c29eea62 SS |
2584 | /* OK, we're manipulating a HS device attached to a |
2585 | * root port bandwidth domain. Include the number of active TTs | |
2586 | * in the bandwidth used. | |
2587 | */ | |
2588 | bw_used += TT_HS_OVERHEAD * | |
06790c19 | 2589 | xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts; |
c29eea62 SS |
2590 | } |
2591 | ||
4bdfe4c3 XR |
2592 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
2593 | "Final bandwidth: %u, Limit: %u, Reserved: %u, " | |
2594 | "Available: %u " "percent", | |
c29eea62 SS |
2595 | bw_used, max_bandwidth, bw_reserved, |
2596 | (max_bandwidth - bw_used - bw_reserved) * 100 / | |
2597 | max_bandwidth); | |
2598 | ||
2599 | bw_used += bw_reserved; | |
2600 | if (bw_used > max_bandwidth) { | |
2601 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", | |
2602 | bw_used, max_bandwidth); | |
2603 | return -ENOMEM; | |
2604 | } | |
2605 | ||
2606 | bw_table->bw_used = bw_used; | |
2e27980e SS |
2607 | return 0; |
2608 | } | |
2609 | ||
2610 | static bool xhci_is_async_ep(unsigned int ep_type) | |
2611 | { | |
2612 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && | |
2613 | ep_type != ISOC_IN_EP && | |
2614 | ep_type != INT_IN_EP); | |
2615 | } | |
2616 | ||
2b698999 SS |
2617 | static bool xhci_is_sync_in_ep(unsigned int ep_type) |
2618 | { | |
392a07ae | 2619 | return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); |
2b698999 SS |
2620 | } |
2621 | ||
2622 | static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) | |
2623 | { | |
2624 | unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); | |
2625 | ||
2626 | if (ep_bw->ep_interval == 0) | |
2627 | return SS_OVERHEAD_BURST + | |
2628 | (ep_bw->mult * ep_bw->num_packets * | |
2629 | (SS_OVERHEAD + mps)); | |
2630 | return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * | |
2631 | (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), | |
2632 | 1 << ep_bw->ep_interval); | |
2633 | ||
2634 | } | |
2635 | ||
3969384c | 2636 | static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, |
2e27980e SS |
2637 | struct xhci_bw_info *ep_bw, |
2638 | struct xhci_interval_bw_table *bw_table, | |
2639 | struct usb_device *udev, | |
2640 | struct xhci_virt_ep *virt_ep, | |
2641 | struct xhci_tt_bw_info *tt_info) | |
2642 | { | |
2643 | struct xhci_interval_bw *interval_bw; | |
2644 | int normalized_interval; | |
2645 | ||
2b698999 | 2646 | if (xhci_is_async_ep(ep_bw->type)) |
2e27980e SS |
2647 | return; |
2648 | ||
0caf6b33 | 2649 | if (udev->speed >= USB_SPEED_SUPER) { |
2b698999 SS |
2650 | if (xhci_is_sync_in_ep(ep_bw->type)) |
2651 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= | |
2652 | xhci_get_ss_bw_consumed(ep_bw); | |
2653 | else | |
2654 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= | |
2655 | xhci_get_ss_bw_consumed(ep_bw); | |
2656 | return; | |
2657 | } | |
2658 | ||
2659 | /* SuperSpeed endpoints never get added to intervals in the table, so | |
2660 | * this check is only valid for HS/FS/LS devices. | |
2661 | */ | |
2662 | if (list_empty(&virt_ep->bw_endpoint_list)) | |
2663 | return; | |
2e27980e SS |
2664 | /* For LS/FS devices, we need to translate the interval expressed in |
2665 | * microframes to frames. | |
2666 | */ | |
2667 | if (udev->speed == USB_SPEED_HIGH) | |
2668 | normalized_interval = ep_bw->ep_interval; | |
2669 | else | |
2670 | normalized_interval = ep_bw->ep_interval - 3; | |
2671 | ||
2672 | if (normalized_interval == 0) | |
2673 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; | |
2674 | interval_bw = &bw_table->interval_bw[normalized_interval]; | |
2675 | interval_bw->num_packets -= ep_bw->num_packets; | |
2676 | switch (udev->speed) { | |
2677 | case USB_SPEED_LOW: | |
2678 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; | |
2679 | break; | |
2680 | case USB_SPEED_FULL: | |
2681 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; | |
2682 | break; | |
2683 | case USB_SPEED_HIGH: | |
2684 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; | |
2685 | break; | |
1e4c5742 | 2686 | default: |
2e27980e SS |
2687 | /* Should never happen because only LS/FS/HS endpoints will get |
2688 | * added to the endpoint list. | |
2689 | */ | |
2690 | return; | |
2691 | } | |
2692 | if (tt_info) | |
2693 | tt_info->active_eps -= 1; | |
2694 | list_del_init(&virt_ep->bw_endpoint_list); | |
2695 | } | |
2696 | ||
2697 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, | |
2698 | struct xhci_bw_info *ep_bw, | |
2699 | struct xhci_interval_bw_table *bw_table, | |
2700 | struct usb_device *udev, | |
2701 | struct xhci_virt_ep *virt_ep, | |
2702 | struct xhci_tt_bw_info *tt_info) | |
2703 | { | |
2704 | struct xhci_interval_bw *interval_bw; | |
2705 | struct xhci_virt_ep *smaller_ep; | |
2706 | int normalized_interval; | |
2707 | ||
2708 | if (xhci_is_async_ep(ep_bw->type)) | |
2709 | return; | |
2710 | ||
2b698999 SS |
2711 | if (udev->speed == USB_SPEED_SUPER) { |
2712 | if (xhci_is_sync_in_ep(ep_bw->type)) | |
2713 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in += | |
2714 | xhci_get_ss_bw_consumed(ep_bw); | |
2715 | else | |
2716 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out += | |
2717 | xhci_get_ss_bw_consumed(ep_bw); | |
2718 | return; | |
2719 | } | |
2720 | ||
2e27980e SS |
2721 | /* For LS/FS devices, we need to translate the interval expressed in |
2722 | * microframes to frames. | |
2723 | */ | |
2724 | if (udev->speed == USB_SPEED_HIGH) | |
2725 | normalized_interval = ep_bw->ep_interval; | |
2726 | else | |
2727 | normalized_interval = ep_bw->ep_interval - 3; | |
2728 | ||
2729 | if (normalized_interval == 0) | |
2730 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; | |
2731 | interval_bw = &bw_table->interval_bw[normalized_interval]; | |
2732 | interval_bw->num_packets += ep_bw->num_packets; | |
2733 | switch (udev->speed) { | |
2734 | case USB_SPEED_LOW: | |
2735 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; | |
2736 | break; | |
2737 | case USB_SPEED_FULL: | |
2738 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; | |
2739 | break; | |
2740 | case USB_SPEED_HIGH: | |
2741 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; | |
2742 | break; | |
1e4c5742 | 2743 | default: |
2e27980e SS |
2744 | /* Should never happen because only LS/FS/HS endpoints will get |
2745 | * added to the endpoint list. | |
2746 | */ | |
2747 | return; | |
2748 | } | |
2749 | ||
2750 | if (tt_info) | |
2751 | tt_info->active_eps += 1; | |
2752 | /* Insert the endpoint into the list, largest max packet size first. */ | |
2753 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, | |
2754 | bw_endpoint_list) { | |
2755 | if (ep_bw->max_packet_size >= | |
2756 | smaller_ep->bw_info.max_packet_size) { | |
2757 | /* Add the new ep before the smaller endpoint */ | |
2758 | list_add_tail(&virt_ep->bw_endpoint_list, | |
2759 | &smaller_ep->bw_endpoint_list); | |
2760 | return; | |
2761 | } | |
2762 | } | |
2763 | /* Add the new endpoint at the end of the list. */ | |
2764 | list_add_tail(&virt_ep->bw_endpoint_list, | |
2765 | &interval_bw->endpoints); | |
2766 | } | |
2767 | ||
2768 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, | |
2769 | struct xhci_virt_device *virt_dev, | |
2770 | int old_active_eps) | |
2771 | { | |
2772 | struct xhci_root_port_bw_info *rh_bw_info; | |
2773 | if (!virt_dev->tt_info) | |
2774 | return; | |
2775 | ||
06790c19 | 2776 | rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum]; |
2e27980e SS |
2777 | if (old_active_eps == 0 && |
2778 | virt_dev->tt_info->active_eps != 0) { | |
2779 | rh_bw_info->num_active_tts += 1; | |
c29eea62 | 2780 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; |
2e27980e SS |
2781 | } else if (old_active_eps != 0 && |
2782 | virt_dev->tt_info->active_eps == 0) { | |
2783 | rh_bw_info->num_active_tts -= 1; | |
c29eea62 | 2784 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; |
2e27980e SS |
2785 | } |
2786 | } | |
2787 | ||
2788 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, | |
2789 | struct xhci_virt_device *virt_dev, | |
2790 | struct xhci_container_ctx *in_ctx) | |
2791 | { | |
2792 | struct xhci_bw_info ep_bw_info[31]; | |
2793 | int i; | |
2794 | struct xhci_input_control_ctx *ctrl_ctx; | |
2795 | int old_active_eps = 0; | |
2796 | ||
2e27980e SS |
2797 | if (virt_dev->tt_info) |
2798 | old_active_eps = virt_dev->tt_info->active_eps; | |
2799 | ||
4daf9df5 | 2800 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); |
92f8e767 SS |
2801 | if (!ctrl_ctx) { |
2802 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
2803 | __func__); | |
2804 | return -ENOMEM; | |
2805 | } | |
2e27980e SS |
2806 | |
2807 | for (i = 0; i < 31; i++) { | |
2808 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) | |
2809 | continue; | |
2810 | ||
2811 | /* Make a copy of the BW info in case we need to revert this */ | |
2812 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, | |
2813 | sizeof(ep_bw_info[i])); | |
2814 | /* Drop the endpoint from the interval table if the endpoint is | |
2815 | * being dropped or changed. | |
2816 | */ | |
2817 | if (EP_IS_DROPPED(ctrl_ctx, i)) | |
2818 | xhci_drop_ep_from_interval_table(xhci, | |
2819 | &virt_dev->eps[i].bw_info, | |
2820 | virt_dev->bw_table, | |
2821 | virt_dev->udev, | |
2822 | &virt_dev->eps[i], | |
2823 | virt_dev->tt_info); | |
2824 | } | |
2825 | /* Overwrite the information stored in the endpoints' bw_info */ | |
2826 | xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); | |
2827 | for (i = 0; i < 31; i++) { | |
2828 | /* Add any changed or added endpoints to the interval table */ | |
2829 | if (EP_IS_ADDED(ctrl_ctx, i)) | |
2830 | xhci_add_ep_to_interval_table(xhci, | |
2831 | &virt_dev->eps[i].bw_info, | |
2832 | virt_dev->bw_table, | |
2833 | virt_dev->udev, | |
2834 | &virt_dev->eps[i], | |
2835 | virt_dev->tt_info); | |
2836 | } | |
2837 | ||
2838 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { | |
2839 | /* Ok, this fits in the bandwidth we have. | |
2840 | * Update the number of active TTs. | |
2841 | */ | |
2842 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); | |
2843 | return 0; | |
2844 | } | |
2845 | ||
2846 | /* We don't have enough bandwidth for this, revert the stored info. */ | |
2847 | for (i = 0; i < 31; i++) { | |
2848 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) | |
2849 | continue; | |
2850 | ||
2851 | /* Drop the new copies of any added or changed endpoints from | |
2852 | * the interval table. | |
2853 | */ | |
2854 | if (EP_IS_ADDED(ctrl_ctx, i)) { | |
2855 | xhci_drop_ep_from_interval_table(xhci, | |
2856 | &virt_dev->eps[i].bw_info, | |
2857 | virt_dev->bw_table, | |
2858 | virt_dev->udev, | |
2859 | &virt_dev->eps[i], | |
2860 | virt_dev->tt_info); | |
2861 | } | |
2862 | /* Revert the endpoint back to its old information */ | |
2863 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], | |
2864 | sizeof(ep_bw_info[i])); | |
2865 | /* Add any changed or dropped endpoints back into the table */ | |
2866 | if (EP_IS_DROPPED(ctrl_ctx, i)) | |
2867 | xhci_add_ep_to_interval_table(xhci, | |
2868 | &virt_dev->eps[i].bw_info, | |
2869 | virt_dev->bw_table, | |
2870 | virt_dev->udev, | |
2871 | &virt_dev->eps[i], | |
2872 | virt_dev->tt_info); | |
2873 | } | |
2874 | return -ENOMEM; | |
2875 | } | |
2876 | ||
07b887f8 MN |
2877 | /* |
2878 | * Synchronous XHCI stop endpoint helper. Issues the stop endpoint command and | |
2879 | * waits for the command completion before returning. This does not call | |
2880 | * xhci_handle_cmd_stop_ep(), which has additional handling for 'context error' | |
2881 | * cases, along with transfer ring cleanup. | |
2882 | * | |
2883 | * xhci_stop_endpoint_sync() is intended to be utilized by clients that manage | |
2884 | * their own transfer ring, such as offload situations. | |
2885 | */ | |
2886 | int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend, | |
2887 | gfp_t gfp_flags) | |
2888 | { | |
2889 | struct xhci_command *command; | |
2890 | unsigned long flags; | |
2891 | int ret; | |
2892 | ||
2893 | command = xhci_alloc_command(xhci, true, gfp_flags); | |
2894 | if (!command) | |
2895 | return -ENOMEM; | |
2896 | ||
2897 | spin_lock_irqsave(&xhci->lock, flags); | |
2898 | ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id, | |
2899 | ep->ep_index, suspend); | |
2900 | if (ret < 0) { | |
2901 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2902 | goto out; | |
2903 | } | |
2904 | ||
2905 | xhci_ring_cmd_db(xhci); | |
2906 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2907 | ||
2908 | wait_for_completion(command->completion); | |
2909 | ||
2910 | /* No handling for COMP_CONTEXT_STATE_ERROR done at command completion*/ | |
2911 | if (command->status == COMP_COMMAND_ABORTED || | |
2912 | command->status == COMP_COMMAND_RING_STOPPED) { | |
2913 | xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); | |
2914 | ret = -ETIME; | |
2915 | } | |
2916 | out: | |
2917 | xhci_free_command(xhci, command); | |
2918 | ||
2919 | return ret; | |
2920 | } | |
2921 | EXPORT_SYMBOL_GPL(xhci_stop_endpoint_sync); | |
2e27980e | 2922 | |
f2217e8e SS |
2923 | /* Issue a configure endpoint command or evaluate context command |
2924 | * and wait for it to finish. | |
2925 | */ | |
2926 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |
913a8a34 SS |
2927 | struct usb_device *udev, |
2928 | struct xhci_command *command, | |
2929 | bool ctx_change, bool must_succeed) | |
f2217e8e SS |
2930 | { |
2931 | int ret; | |
f2217e8e | 2932 | unsigned long flags; |
92f8e767 | 2933 | struct xhci_input_control_ctx *ctrl_ctx; |
913a8a34 | 2934 | struct xhci_virt_device *virt_dev; |
e3a78ff0 | 2935 | struct xhci_slot_ctx *slot_ctx; |
ddba5cd0 MN |
2936 | |
2937 | if (!command) | |
2938 | return -EINVAL; | |
f2217e8e SS |
2939 | |
2940 | spin_lock_irqsave(&xhci->lock, flags); | |
d9f11ba9 MN |
2941 | |
2942 | if (xhci->xhc_state & XHCI_STATE_DYING) { | |
2943 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2944 | return -ESHUTDOWN; | |
2945 | } | |
2946 | ||
913a8a34 | 2947 | virt_dev = xhci->devs[udev->slot_id]; |
750645f8 | 2948 | |
4daf9df5 | 2949 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); |
92f8e767 | 2950 | if (!ctrl_ctx) { |
1f21569c | 2951 | spin_unlock_irqrestore(&xhci->lock, flags); |
92f8e767 SS |
2952 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
2953 | __func__); | |
2954 | return -ENOMEM; | |
2955 | } | |
2cf95c18 | 2956 | |
750645f8 | 2957 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && |
92f8e767 | 2958 | xhci_reserve_host_resources(xhci, ctrl_ctx)) { |
750645f8 SS |
2959 | spin_unlock_irqrestore(&xhci->lock, flags); |
2960 | xhci_warn(xhci, "Not enough host resources, " | |
2961 | "active endpoint contexts = %u\n", | |
2962 | xhci->num_active_eps); | |
2963 | return -ENOMEM; | |
2964 | } | |
af8e119f | 2965 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change && |
ddba5cd0 | 2966 | xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { |
2e27980e | 2967 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
92f8e767 | 2968 | xhci_free_host_resources(xhci, ctrl_ctx); |
2e27980e SS |
2969 | spin_unlock_irqrestore(&xhci->lock, flags); |
2970 | xhci_warn(xhci, "Not enough bandwidth\n"); | |
2971 | return -ENOMEM; | |
2972 | } | |
750645f8 | 2973 | |
e3a78ff0 | 2974 | slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); |
90d6d573 MN |
2975 | |
2976 | trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); | |
e3a78ff0 MN |
2977 | trace_xhci_configure_endpoint(slot_ctx); |
2978 | ||
f2217e8e | 2979 | if (!ctx_change) |
ddba5cd0 MN |
2980 | ret = xhci_queue_configure_endpoint(xhci, command, |
2981 | command->in_ctx->dma, | |
913a8a34 | 2982 | udev->slot_id, must_succeed); |
f2217e8e | 2983 | else |
ddba5cd0 MN |
2984 | ret = xhci_queue_evaluate_context(xhci, command, |
2985 | command->in_ctx->dma, | |
4b266541 | 2986 | udev->slot_id, must_succeed); |
f2217e8e | 2987 | if (ret < 0) { |
2cf95c18 | 2988 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
92f8e767 | 2989 | xhci_free_host_resources(xhci, ctrl_ctx); |
f2217e8e | 2990 | spin_unlock_irqrestore(&xhci->lock, flags); |
3a7fa5be XR |
2991 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
2992 | "FIXME allocate a new ring segment"); | |
f2217e8e SS |
2993 | return -ENOMEM; |
2994 | } | |
2995 | xhci_ring_cmd_db(xhci); | |
2996 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2997 | ||
2998 | /* Wait for the configure endpoint command to complete */ | |
c311e391 | 2999 | wait_for_completion(command->completion); |
f2217e8e SS |
3000 | |
3001 | if (!ctx_change) | |
ddba5cd0 MN |
3002 | ret = xhci_configure_endpoint_result(xhci, udev, |
3003 | &command->status); | |
2cf95c18 | 3004 | else |
ddba5cd0 MN |
3005 | ret = xhci_evaluate_context_result(xhci, udev, |
3006 | &command->status); | |
2cf95c18 SS |
3007 | |
3008 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | |
3009 | spin_lock_irqsave(&xhci->lock, flags); | |
3010 | /* If the command failed, remove the reserved resources. | |
3011 | * Otherwise, clean up the estimate to include dropped eps. | |
3012 | */ | |
3013 | if (ret) | |
92f8e767 | 3014 | xhci_free_host_resources(xhci, ctrl_ctx); |
2cf95c18 | 3015 | else |
92f8e767 | 3016 | xhci_finish_resource_reservation(xhci, ctrl_ctx); |
2cf95c18 SS |
3017 | spin_unlock_irqrestore(&xhci->lock, flags); |
3018 | } | |
3019 | return ret; | |
f2217e8e SS |
3020 | } |
3021 | ||
df613834 HG |
3022 | static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, |
3023 | struct xhci_virt_device *vdev, int i) | |
3024 | { | |
3025 | struct xhci_virt_ep *ep = &vdev->eps[i]; | |
3026 | ||
3027 | if (ep->ep_state & EP_HAS_STREAMS) { | |
3028 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", | |
3029 | xhci_get_endpoint_address(i)); | |
3030 | xhci_free_stream_info(xhci, ep->stream_info); | |
3031 | ep->stream_info = NULL; | |
3032 | ep->ep_state &= ~EP_HAS_STREAMS; | |
3033 | } | |
3034 | } | |
3035 | ||
f88ba78d SS |
3036 | /* Called after one or more calls to xhci_add_endpoint() or |
3037 | * xhci_drop_endpoint(). If this call fails, the USB core is expected | |
3038 | * to call xhci_reset_bandwidth(). | |
3039 | * | |
3040 | * Since we are in the middle of changing either configuration or | |
3041 | * installing a new alt setting, the USB core won't allow URBs to be | |
3042 | * enqueued for any endpoint on the old config or interface. Nothing | |
3043 | * else should be touching the xhci->devs[slot_id] structure, so we | |
3044 | * don't need to take the xhci->lock for manipulating that. | |
3045 | */ | |
1d69f9d9 | 3046 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
f94e0186 SS |
3047 | { |
3048 | int i; | |
3049 | int ret = 0; | |
f94e0186 SS |
3050 | struct xhci_hcd *xhci; |
3051 | struct xhci_virt_device *virt_dev; | |
d115b048 JY |
3052 | struct xhci_input_control_ctx *ctrl_ctx; |
3053 | struct xhci_slot_ctx *slot_ctx; | |
ddba5cd0 | 3054 | struct xhci_command *command; |
f94e0186 | 3055 | |
64927730 | 3056 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
f94e0186 SS |
3057 | if (ret <= 0) |
3058 | return ret; | |
3059 | xhci = hcd_to_xhci(hcd); | |
98d74f9c MN |
3060 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
3061 | (xhci->xhc_state & XHCI_STATE_REMOVING)) | |
fe6c6c13 | 3062 | return -ENODEV; |
f94e0186 | 3063 | |
700e2052 | 3064 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
f94e0186 SS |
3065 | virt_dev = xhci->devs[udev->slot_id]; |
3066 | ||
103afda0 | 3067 | command = xhci_alloc_command(xhci, true, GFP_KERNEL); |
ddba5cd0 MN |
3068 | if (!command) |
3069 | return -ENOMEM; | |
3070 | ||
3071 | command->in_ctx = virt_dev->in_ctx; | |
3072 | ||
f94e0186 | 3073 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
4daf9df5 | 3074 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); |
92f8e767 SS |
3075 | if (!ctrl_ctx) { |
3076 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
3077 | __func__); | |
ddba5cd0 MN |
3078 | ret = -ENOMEM; |
3079 | goto command_cleanup; | |
92f8e767 | 3080 | } |
28ccd296 ME |
3081 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
3082 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); | |
3083 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); | |
2dc37539 SS |
3084 | |
3085 | /* Don't issue the command if there's no endpoints to update. */ | |
3086 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && | |
ddba5cd0 MN |
3087 | ctrl_ctx->drop_flags == 0) { |
3088 | ret = 0; | |
3089 | goto command_cleanup; | |
3090 | } | |
d6759133 | 3091 | /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ |
d115b048 | 3092 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
d6759133 JW |
3093 | for (i = 31; i >= 1; i--) { |
3094 | __le32 le32 = cpu_to_le32(BIT(i)); | |
3095 | ||
3096 | if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) | |
3097 | || (ctrl_ctx->add_flags & le32) || i == 1) { | |
3098 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); | |
3099 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); | |
3100 | break; | |
3101 | } | |
3102 | } | |
f94e0186 | 3103 | |
ddba5cd0 | 3104 | ret = xhci_configure_endpoint(xhci, udev, command, |
913a8a34 | 3105 | false, false); |
ddba5cd0 | 3106 | if (ret) |
f94e0186 | 3107 | /* Callee should call reset_bandwidth() */ |
ddba5cd0 | 3108 | goto command_cleanup; |
f94e0186 | 3109 | |
834cb0fc | 3110 | /* Free any rings that were dropped, but not changed. */ |
98871e94 | 3111 | for (i = 1; i < 31; i++) { |
4819fef5 | 3112 | if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && |
df613834 | 3113 | !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { |
c5628a2a | 3114 | xhci_free_endpoint_ring(xhci, virt_dev, i); |
df613834 HG |
3115 | xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); |
3116 | } | |
834cb0fc | 3117 | } |
d115b048 | 3118 | xhci_zero_in_ctx(xhci, virt_dev); |
834cb0fc SS |
3119 | /* |
3120 | * Install any rings for completely new endpoints or changed endpoints, | |
c5628a2a | 3121 | * and free any old rings from changed endpoints. |
834cb0fc | 3122 | */ |
98871e94 | 3123 | for (i = 1; i < 31; i++) { |
74f9fe21 SS |
3124 | if (!virt_dev->eps[i].new_ring) |
3125 | continue; | |
c5628a2a | 3126 | /* Only free the old ring if it exists. |
74f9fe21 SS |
3127 | * It may not if this is the first add of an endpoint. |
3128 | */ | |
3129 | if (virt_dev->eps[i].ring) { | |
c5628a2a | 3130 | xhci_free_endpoint_ring(xhci, virt_dev, i); |
f94e0186 | 3131 | } |
df613834 | 3132 | xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); |
74f9fe21 SS |
3133 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
3134 | virt_dev->eps[i].new_ring = NULL; | |
167657a1 | 3135 | xhci_debugfs_create_endpoint(xhci, virt_dev, i); |
f94e0186 | 3136 | } |
ddba5cd0 MN |
3137 | command_cleanup: |
3138 | kfree(command->completion); | |
3139 | kfree(command); | |
f94e0186 | 3140 | |
f94e0186 SS |
3141 | return ret; |
3142 | } | |
14295a15 | 3143 | EXPORT_SYMBOL_GPL(xhci_check_bandwidth); |
f94e0186 | 3144 | |
1d69f9d9 | 3145 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
f94e0186 | 3146 | { |
f94e0186 SS |
3147 | struct xhci_hcd *xhci; |
3148 | struct xhci_virt_device *virt_dev; | |
3149 | int i, ret; | |
3150 | ||
64927730 | 3151 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
f94e0186 SS |
3152 | if (ret <= 0) |
3153 | return; | |
3154 | xhci = hcd_to_xhci(hcd); | |
3155 | ||
700e2052 | 3156 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
f94e0186 SS |
3157 | virt_dev = xhci->devs[udev->slot_id]; |
3158 | /* Free any rings allocated for added endpoints */ | |
98871e94 | 3159 | for (i = 0; i < 31; i++) { |
63a0d9ab | 3160 | if (virt_dev->eps[i].new_ring) { |
02b6fdc2 | 3161 | xhci_debugfs_remove_endpoint(xhci, virt_dev, i); |
63a0d9ab SS |
3162 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); |
3163 | virt_dev->eps[i].new_ring = NULL; | |
f94e0186 SS |
3164 | } |
3165 | } | |
d115b048 | 3166 | xhci_zero_in_ctx(xhci, virt_dev); |
f94e0186 | 3167 | } |
14295a15 | 3168 | EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); |
f94e0186 | 3169 | |
59d50e53 XR |
3170 | /* Get the available bandwidth of the ports under the xhci roothub */ |
3171 | int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, | |
3172 | u8 dev_speed) | |
3173 | { | |
3174 | struct xhci_command *cmd; | |
3175 | unsigned long flags; | |
3176 | int ret; | |
3177 | ||
3178 | if (!ctx || !xhci) | |
3179 | return -EINVAL; | |
3180 | ||
3181 | cmd = xhci_alloc_command(xhci, true, GFP_KERNEL); | |
3182 | if (!cmd) | |
3183 | return -ENOMEM; | |
3184 | ||
3185 | cmd->in_ctx = ctx; | |
3186 | ||
3187 | /* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */ | |
3188 | spin_lock_irqsave(&xhci->lock, flags); | |
3189 | ||
3190 | ret = xhci_queue_get_port_bw(xhci, cmd, ctx->dma, dev_speed, 0); | |
3191 | if (ret) { | |
3192 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3193 | goto err_out; | |
3194 | } | |
3195 | xhci_ring_cmd_db(xhci); | |
3196 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3197 | ||
3198 | wait_for_completion(cmd->completion); | |
3199 | err_out: | |
3200 | kfree(cmd->completion); | |
3201 | kfree(cmd); | |
3202 | ||
3203 | return ret; | |
3204 | } | |
3205 | ||
5270b951 | 3206 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
913a8a34 SS |
3207 | struct xhci_container_ctx *in_ctx, |
3208 | struct xhci_container_ctx *out_ctx, | |
92f8e767 | 3209 | struct xhci_input_control_ctx *ctrl_ctx, |
913a8a34 | 3210 | u32 add_flags, u32 drop_flags) |
5270b951 | 3211 | { |
28ccd296 ME |
3212 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); |
3213 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); | |
913a8a34 | 3214 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
28ccd296 | 3215 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
5270b951 SS |
3216 | } |
3217 | ||
18b74067 MN |
3218 | static void xhci_endpoint_disable(struct usb_hcd *hcd, |
3219 | struct usb_host_endpoint *host_ep) | |
3220 | { | |
3221 | struct xhci_hcd *xhci; | |
3222 | struct xhci_virt_device *vdev; | |
3223 | struct xhci_virt_ep *ep; | |
3224 | struct usb_device *udev; | |
3225 | unsigned long flags; | |
3226 | unsigned int ep_index; | |
3227 | ||
3228 | xhci = hcd_to_xhci(hcd); | |
3229 | rescan: | |
3230 | spin_lock_irqsave(&xhci->lock, flags); | |
3231 | ||
3232 | udev = (struct usb_device *)host_ep->hcpriv; | |
3233 | if (!udev || !udev->slot_id) | |
3234 | goto done; | |
3235 | ||
3236 | vdev = xhci->devs[udev->slot_id]; | |
3237 | if (!vdev) | |
3238 | goto done; | |
3239 | ||
3240 | ep_index = xhci_get_endpoint_index(&host_ep->desc); | |
3241 | ep = &vdev->eps[ep_index]; | |
18b74067 MN |
3242 | |
3243 | /* wait for hub_tt_work to finish clearing hub TT */ | |
3244 | if (ep->ep_state & EP_CLEARING_TT) { | |
3245 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3246 | schedule_timeout_uninterruptible(1); | |
3247 | goto rescan; | |
3248 | } | |
3249 | ||
3250 | if (ep->ep_state) | |
3251 | xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", | |
3252 | ep->ep_state); | |
3253 | done: | |
3254 | host_ep->hcpriv = NULL; | |
3255 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3256 | } | |
3257 | ||
f5249461 MN |
3258 | /* |
3259 | * Called after usb core issues a clear halt control message. | |
3260 | * The host side of the halt should already be cleared by a reset endpoint | |
3261 | * command issued when the STALL event was received. | |
d0167ad2 | 3262 | * |
f5249461 MN |
3263 | * The reset endpoint command may only be issued to endpoints in the halted |
3264 | * state. For software that wishes to reset the data toggle or sequence number | |
3265 | * of an endpoint that isn't in the halted state this function will issue a | |
3266 | * configure endpoint command with the Drop and Add bits set for the target | |
3267 | * endpoint. Refer to the additional note in xhci spcification section 4.6.8. | |
80602b6b MN |
3268 | * |
3269 | * vdev may be lost due to xHC restore error and re-initialization during S3/S4 | |
3270 | * resume. A new vdev will be allocated later by xhci_discover_or_reset_device() | |
a1587d97 | 3271 | */ |
8e71a322 | 3272 | |
3969384c | 3273 | static void xhci_endpoint_reset(struct usb_hcd *hcd, |
f5249461 | 3274 | struct usb_host_endpoint *host_ep) |
a1587d97 SS |
3275 | { |
3276 | struct xhci_hcd *xhci; | |
f5249461 MN |
3277 | struct usb_device *udev; |
3278 | struct xhci_virt_device *vdev; | |
3279 | struct xhci_virt_ep *ep; | |
3280 | struct xhci_input_control_ctx *ctrl_ctx; | |
3bf0514d | 3281 | struct xhci_command *stop_cmd, *cfg_cmd; |
f5249461 MN |
3282 | unsigned int ep_index; |
3283 | unsigned long flags; | |
3284 | u32 ep_flag; | |
8de66b0e | 3285 | int err; |
a1587d97 SS |
3286 | |
3287 | xhci = hcd_to_xhci(hcd); | |
e34900f4 MN |
3288 | ep_index = xhci_get_endpoint_index(&host_ep->desc); |
3289 | ||
3290 | /* | |
3291 | * Usb core assumes a max packet value for ep0 on FS devices until the | |
3292 | * real value is read from the descriptor. Core resets Ep0 if values | |
3293 | * mismatch. Reconfigure the xhci ep0 endpoint context here in that case | |
3294 | */ | |
3295 | if (usb_endpoint_xfer_control(&host_ep->desc) && ep_index == 0) { | |
80602b6b | 3296 | |
e34900f4 | 3297 | udev = container_of(host_ep, struct usb_device, ep0); |
80602b6b MN |
3298 | if (udev->speed != USB_SPEED_FULL || !udev->slot_id) |
3299 | return; | |
3300 | ||
3301 | vdev = xhci->devs[udev->slot_id]; | |
3302 | if (!vdev || vdev->udev != udev) | |
3303 | return; | |
3304 | ||
3305 | xhci_check_ep0_maxpacket(xhci, vdev); | |
3306 | ||
e34900f4 MN |
3307 | /* Nothing else should be done here for ep0 during ep reset */ |
3308 | return; | |
3309 | } | |
3310 | ||
f5249461 MN |
3311 | if (!host_ep->hcpriv) |
3312 | return; | |
3313 | udev = (struct usb_device *) host_ep->hcpriv; | |
3314 | vdev = xhci->devs[udev->slot_id]; | |
cb53c517 | 3315 | |
cb53c517 MN |
3316 | if (!udev->slot_id || !vdev) |
3317 | return; | |
e34900f4 | 3318 | |
f5249461 | 3319 | ep = &vdev->eps[ep_index]; |
0c74d232 | 3320 | |
f5249461 | 3321 | /* Bail out if toggle is already being cleared by a endpoint reset */ |
af1352f8 | 3322 | spin_lock_irqsave(&xhci->lock, flags); |
f5249461 MN |
3323 | if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { |
3324 | ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; | |
a01ba2a3 | 3325 | spin_unlock_irqrestore(&xhci->lock, flags); |
f5249461 MN |
3326 | return; |
3327 | } | |
a01ba2a3 | 3328 | spin_unlock_irqrestore(&xhci->lock, flags); |
f5249461 MN |
3329 | /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ |
3330 | if (usb_endpoint_xfer_control(&host_ep->desc) || | |
3331 | usb_endpoint_xfer_isoc(&host_ep->desc)) | |
3332 | return; | |
3333 | ||
3334 | ep_flag = xhci_get_endpoint_flag(&host_ep->desc); | |
3335 | ||
3336 | if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) | |
3337 | return; | |
3338 | ||
3bf0514d GKH |
3339 | stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); |
3340 | if (!stop_cmd) | |
3341 | return; | |
3342 | ||
f5249461 MN |
3343 | cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); |
3344 | if (!cfg_cmd) | |
3345 | goto cleanup; | |
3346 | ||
3347 | spin_lock_irqsave(&xhci->lock, flags); | |
3348 | ||
3349 | /* block queuing new trbs and ringing ep doorbell */ | |
3350 | ep->ep_state |= EP_SOFT_CLEAR_TOGGLE; | |
ddba5cd0 | 3351 | |
c92bcfa7 | 3352 | /* |
f5249461 MN |
3353 | * Make sure endpoint ring is empty before resetting the toggle/seq. |
3354 | * Driver is required to synchronously cancel all transfer request. | |
3355 | * Stop the endpoint to force xHC to update the output context | |
c92bcfa7 | 3356 | */ |
a1587d97 | 3357 | |
f5249461 MN |
3358 | if (!list_empty(&ep->ring->td_list)) { |
3359 | dev_err(&udev->dev, "EP not empty, refuse reset\n"); | |
3360 | spin_unlock_irqrestore(&xhci->lock, flags); | |
d89b7664 | 3361 | xhci_free_command(xhci, cfg_cmd); |
f5249461 MN |
3362 | goto cleanup; |
3363 | } | |
8de66b0e | 3364 | |
3bf0514d GKH |
3365 | err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, |
3366 | ep_index, 0); | |
8de66b0e | 3367 | if (err < 0) { |
3bf0514d GKH |
3368 | spin_unlock_irqrestore(&xhci->lock, flags); |
3369 | xhci_free_command(xhci, cfg_cmd); | |
8de66b0e BK |
3370 | xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", |
3371 | __func__, err); | |
3372 | goto cleanup; | |
3373 | } | |
3374 | ||
3bf0514d GKH |
3375 | xhci_ring_cmd_db(xhci); |
3376 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3377 | ||
3378 | wait_for_completion(stop_cmd->completion); | |
3379 | ||
f5249461 | 3380 | spin_lock_irqsave(&xhci->lock, flags); |
3bf0514d | 3381 | |
f5249461 MN |
3382 | /* config ep command clears toggle if add and drop ep flags are set */ |
3383 | ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); | |
597899d2 MN |
3384 | if (!ctrl_ctx) { |
3385 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3386 | xhci_free_command(xhci, cfg_cmd); | |
3387 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
3388 | __func__); | |
3389 | goto cleanup; | |
3390 | } | |
3391 | ||
f5249461 MN |
3392 | xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, |
3393 | ctrl_ctx, ep_flag, ep_flag); | |
3394 | xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); | |
3395 | ||
8de66b0e | 3396 | err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, |
f5249461 | 3397 | udev->slot_id, false); |
8de66b0e BK |
3398 | if (err < 0) { |
3399 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3400 | xhci_free_command(xhci, cfg_cmd); | |
3401 | xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", | |
3402 | __func__, err); | |
3403 | goto cleanup; | |
3404 | } | |
3405 | ||
f5249461 MN |
3406 | xhci_ring_cmd_db(xhci); |
3407 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3408 | ||
3409 | wait_for_completion(cfg_cmd->completion); | |
3410 | ||
f5249461 MN |
3411 | xhci_free_command(xhci, cfg_cmd); |
3412 | cleanup: | |
3bf0514d | 3413 | xhci_free_command(xhci, stop_cmd); |
a01ba2a3 | 3414 | spin_lock_irqsave(&xhci->lock, flags); |
f1ec7ae6 DH |
3415 | if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) |
3416 | ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; | |
a01ba2a3 | 3417 | spin_unlock_irqrestore(&xhci->lock, flags); |
a1587d97 SS |
3418 | } |
3419 | ||
8df75f42 SS |
3420 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
3421 | struct usb_device *udev, struct usb_host_endpoint *ep, | |
3422 | unsigned int slot_id) | |
3423 | { | |
3424 | int ret; | |
3425 | unsigned int ep_index; | |
3426 | unsigned int ep_state; | |
3427 | ||
3428 | if (!ep) | |
3429 | return -EINVAL; | |
64927730 | 3430 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); |
8df75f42 | 3431 | if (ret <= 0) |
243a1dd7 | 3432 | return ret ? ret : -EINVAL; |
a3901538 | 3433 | if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { |
8df75f42 SS |
3434 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" |
3435 | " descriptor for ep 0x%x does not support streams\n", | |
3436 | ep->desc.bEndpointAddress); | |
3437 | return -EINVAL; | |
3438 | } | |
3439 | ||
3440 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
3441 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | |
3442 | if (ep_state & EP_HAS_STREAMS || | |
3443 | ep_state & EP_GETTING_STREAMS) { | |
3444 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " | |
3445 | "already has streams set up.\n", | |
3446 | ep->desc.bEndpointAddress); | |
3447 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " | |
3448 | "dynamic stream context array reallocation.\n"); | |
3449 | return -EINVAL; | |
3450 | } | |
3451 | if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { | |
3452 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " | |
3453 | "endpoint 0x%x; URBs are pending.\n", | |
3454 | ep->desc.bEndpointAddress); | |
3455 | return -EINVAL; | |
3456 | } | |
3457 | return 0; | |
3458 | } | |
3459 | ||
3460 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, | |
3461 | unsigned int *num_streams, unsigned int *num_stream_ctxs) | |
3462 | { | |
3463 | unsigned int max_streams; | |
3464 | ||
3465 | /* The stream context array size must be a power of two */ | |
3466 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); | |
3467 | /* | |
3468 | * Find out how many primary stream array entries the host controller | |
3469 | * supports. Later we may use secondary stream arrays (similar to 2nd | |
3470 | * level page entries), but that's an optional feature for xHCI host | |
3471 | * controllers. xHCs must support at least 4 stream IDs. | |
3472 | */ | |
3473 | max_streams = HCC_MAX_PSA(xhci->hcc_params); | |
3474 | if (*num_stream_ctxs > max_streams) { | |
3475 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", | |
3476 | max_streams); | |
3477 | *num_stream_ctxs = max_streams; | |
3478 | *num_streams = max_streams; | |
3479 | } | |
3480 | } | |
3481 | ||
3482 | /* Returns an error code if one of the endpoint already has streams. | |
3483 | * This does not change any data structures, it only checks and gathers | |
3484 | * information. | |
3485 | */ | |
3486 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, | |
3487 | struct usb_device *udev, | |
3488 | struct usb_host_endpoint **eps, unsigned int num_eps, | |
3489 | unsigned int *num_streams, u32 *changed_ep_bitmask) | |
3490 | { | |
8df75f42 SS |
3491 | unsigned int max_streams; |
3492 | unsigned int endpoint_flag; | |
3493 | int i; | |
3494 | int ret; | |
3495 | ||
3496 | for (i = 0; i < num_eps; i++) { | |
3497 | ret = xhci_check_streams_endpoint(xhci, udev, | |
3498 | eps[i], udev->slot_id); | |
3499 | if (ret < 0) | |
3500 | return ret; | |
3501 | ||
18b7ede5 | 3502 | max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); |
8df75f42 SS |
3503 | if (max_streams < (*num_streams - 1)) { |
3504 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", | |
3505 | eps[i]->desc.bEndpointAddress, | |
3506 | max_streams); | |
3507 | *num_streams = max_streams+1; | |
3508 | } | |
3509 | ||
3510 | endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); | |
3511 | if (*changed_ep_bitmask & endpoint_flag) | |
3512 | return -EINVAL; | |
3513 | *changed_ep_bitmask |= endpoint_flag; | |
3514 | } | |
3515 | return 0; | |
3516 | } | |
3517 | ||
3518 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, | |
3519 | struct usb_device *udev, | |
3520 | struct usb_host_endpoint **eps, unsigned int num_eps) | |
3521 | { | |
3522 | u32 changed_ep_bitmask = 0; | |
3523 | unsigned int slot_id; | |
3524 | unsigned int ep_index; | |
3525 | unsigned int ep_state; | |
3526 | int i; | |
3527 | ||
3528 | slot_id = udev->slot_id; | |
3529 | if (!xhci->devs[slot_id]) | |
3530 | return 0; | |
3531 | ||
3532 | for (i = 0; i < num_eps; i++) { | |
3533 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3534 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | |
3535 | /* Are streams already being freed for the endpoint? */ | |
3536 | if (ep_state & EP_GETTING_NO_STREAMS) { | |
3537 | xhci_warn(xhci, "WARN Can't disable streams for " | |
03e64e96 JP |
3538 | "endpoint 0x%x, " |
3539 | "streams are being disabled already\n", | |
8df75f42 SS |
3540 | eps[i]->desc.bEndpointAddress); |
3541 | return 0; | |
3542 | } | |
3543 | /* Are there actually any streams to free? */ | |
3544 | if (!(ep_state & EP_HAS_STREAMS) && | |
3545 | !(ep_state & EP_GETTING_STREAMS)) { | |
3546 | xhci_warn(xhci, "WARN Can't disable streams for " | |
03e64e96 JP |
3547 | "endpoint 0x%x, " |
3548 | "streams are already disabled!\n", | |
8df75f42 SS |
3549 | eps[i]->desc.bEndpointAddress); |
3550 | xhci_warn(xhci, "WARN xhci_free_streams() called " | |
3551 | "with non-streams endpoint\n"); | |
3552 | return 0; | |
3553 | } | |
3554 | changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); | |
3555 | } | |
3556 | return changed_ep_bitmask; | |
3557 | } | |
3558 | ||
3559 | /* | |
c2a298d9 | 3560 | * The USB device drivers use this function (through the HCD interface in USB |
8df75f42 SS |
3561 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to |
3562 | * coordinate mass storage command queueing across multiple endpoints (basically | |
3563 | * a stream ID == a task ID). | |
3564 | * | |
3565 | * Setting up streams involves allocating the same size stream context array | |
3566 | * for each endpoint and issuing a configure endpoint command for all endpoints. | |
3567 | * | |
3568 | * Don't allow the call to succeed if one endpoint only supports one stream | |
3569 | * (which means it doesn't support streams at all). | |
3570 | * | |
3571 | * Drivers may get less stream IDs than they asked for, if the host controller | |
3572 | * hardware or endpoints claim they can't support the number of requested | |
3573 | * stream IDs. | |
3574 | */ | |
3969384c | 3575 | static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, |
8df75f42 SS |
3576 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3577 | unsigned int num_streams, gfp_t mem_flags) | |
3578 | { | |
3579 | int i, ret; | |
3580 | struct xhci_hcd *xhci; | |
3581 | struct xhci_virt_device *vdev; | |
3582 | struct xhci_command *config_cmd; | |
92f8e767 | 3583 | struct xhci_input_control_ctx *ctrl_ctx; |
8df75f42 SS |
3584 | unsigned int ep_index; |
3585 | unsigned int num_stream_ctxs; | |
f9c589e1 | 3586 | unsigned int max_packet; |
8df75f42 SS |
3587 | unsigned long flags; |
3588 | u32 changed_ep_bitmask = 0; | |
3589 | ||
3590 | if (!eps) | |
3591 | return -EINVAL; | |
3592 | ||
3593 | /* Add one to the number of streams requested to account for | |
3594 | * stream 0 that is reserved for xHCI usage. | |
3595 | */ | |
3596 | num_streams += 1; | |
3597 | xhci = hcd_to_xhci(hcd); | |
3598 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", | |
3599 | num_streams); | |
3600 | ||
f7920884 | 3601 | /* MaxPSASize value 0 (2 streams) means streams are not supported */ |
8f873c1f HG |
3602 | if ((xhci->quirks & XHCI_BROKEN_STREAMS) || |
3603 | HCC_MAX_PSA(xhci->hcc_params) < 4) { | |
f7920884 HG |
3604 | xhci_dbg(xhci, "xHCI controller does not support streams.\n"); |
3605 | return -ENOSYS; | |
3606 | } | |
3607 | ||
14d49b7a | 3608 | config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); |
74e0b564 | 3609 | if (!config_cmd) |
8df75f42 | 3610 | return -ENOMEM; |
74e0b564 | 3611 | |
4daf9df5 | 3612 | ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); |
92f8e767 SS |
3613 | if (!ctrl_ctx) { |
3614 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
3615 | __func__); | |
3616 | xhci_free_command(xhci, config_cmd); | |
3617 | return -ENOMEM; | |
3618 | } | |
8df75f42 SS |
3619 | |
3620 | /* Check to make sure all endpoints are not already configured for | |
3621 | * streams. While we're at it, find the maximum number of streams that | |
3622 | * all the endpoints will support and check for duplicate endpoints. | |
3623 | */ | |
3624 | spin_lock_irqsave(&xhci->lock, flags); | |
3625 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, | |
3626 | num_eps, &num_streams, &changed_ep_bitmask); | |
3627 | if (ret < 0) { | |
3628 | xhci_free_command(xhci, config_cmd); | |
3629 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3630 | return ret; | |
3631 | } | |
3632 | if (num_streams <= 1) { | |
3633 | xhci_warn(xhci, "WARN: endpoints can't handle " | |
3634 | "more than one stream.\n"); | |
3635 | xhci_free_command(xhci, config_cmd); | |
3636 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3637 | return -EINVAL; | |
3638 | } | |
3639 | vdev = xhci->devs[udev->slot_id]; | |
25985edc | 3640 | /* Mark each endpoint as being in transition, so |
8df75f42 SS |
3641 | * xhci_urb_enqueue() will reject all URBs. |
3642 | */ | |
3643 | for (i = 0; i < num_eps; i++) { | |
3644 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3645 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; | |
3646 | } | |
3647 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3648 | ||
3649 | /* Setup internal data structures and allocate HW data structures for | |
3650 | * streams (but don't install the HW structures in the input context | |
3651 | * until we're sure all memory allocation succeeded). | |
3652 | */ | |
3653 | xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); | |
3654 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", | |
3655 | num_stream_ctxs, num_streams); | |
3656 | ||
3657 | for (i = 0; i < num_eps; i++) { | |
3658 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
734d3ddd | 3659 | max_packet = usb_endpoint_maxp(&eps[i]->desc); |
8df75f42 SS |
3660 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, |
3661 | num_stream_ctxs, | |
f9c589e1 MN |
3662 | num_streams, |
3663 | max_packet, mem_flags); | |
8df75f42 SS |
3664 | if (!vdev->eps[ep_index].stream_info) |
3665 | goto cleanup; | |
3666 | /* Set maxPstreams in endpoint context and update deq ptr to | |
3667 | * point to stream context array. FIXME | |
3668 | */ | |
3669 | } | |
3670 | ||
3671 | /* Set up the input context for a configure endpoint command. */ | |
3672 | for (i = 0; i < num_eps; i++) { | |
3673 | struct xhci_ep_ctx *ep_ctx; | |
3674 | ||
3675 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3676 | ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); | |
3677 | ||
3678 | xhci_endpoint_copy(xhci, config_cmd->in_ctx, | |
3679 | vdev->out_ctx, ep_index); | |
3680 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, | |
3681 | vdev->eps[ep_index].stream_info); | |
3682 | } | |
3683 | /* Tell the HW to drop its old copy of the endpoint context info | |
3684 | * and add the updated copy from the input context. | |
3685 | */ | |
3686 | xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, | |
92f8e767 SS |
3687 | vdev->out_ctx, ctrl_ctx, |
3688 | changed_ep_bitmask, changed_ep_bitmask); | |
8df75f42 SS |
3689 | |
3690 | /* Issue and wait for the configure endpoint command */ | |
3691 | ret = xhci_configure_endpoint(xhci, udev, config_cmd, | |
3692 | false, false); | |
3693 | ||
3694 | /* xHC rejected the configure endpoint command for some reason, so we | |
3695 | * leave the old ring intact and free our internal streams data | |
3696 | * structure. | |
3697 | */ | |
3698 | if (ret < 0) | |
3699 | goto cleanup; | |
3700 | ||
3701 | spin_lock_irqsave(&xhci->lock, flags); | |
3702 | for (i = 0; i < num_eps; i++) { | |
3703 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3704 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | |
3705 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", | |
3706 | udev->slot_id, ep_index); | |
3707 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; | |
3708 | } | |
3709 | xhci_free_command(xhci, config_cmd); | |
3710 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3711 | ||
712da5fc MN |
3712 | for (i = 0; i < num_eps; i++) { |
3713 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3714 | xhci_debugfs_create_stream_files(xhci, vdev, ep_index); | |
3715 | } | |
8df75f42 SS |
3716 | /* Subtract 1 for stream 0, which drivers can't use */ |
3717 | return num_streams - 1; | |
3718 | ||
3719 | cleanup: | |
3720 | /* If it didn't work, free the streams! */ | |
3721 | for (i = 0; i < num_eps; i++) { | |
3722 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3723 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | |
8a007748 | 3724 | vdev->eps[ep_index].stream_info = NULL; |
8df75f42 SS |
3725 | /* FIXME Unset maxPstreams in endpoint context and |
3726 | * update deq ptr to point to normal string ring. | |
3727 | */ | |
3728 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | |
3729 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | |
3730 | xhci_endpoint_zero(xhci, vdev, eps[i]); | |
3731 | } | |
3732 | xhci_free_command(xhci, config_cmd); | |
3733 | return -ENOMEM; | |
3734 | } | |
3735 | ||
3736 | /* Transition the endpoint from using streams to being a "normal" endpoint | |
3737 | * without streams. | |
3738 | * | |
3739 | * Modify the endpoint context state, submit a configure endpoint command, | |
3740 | * and free all endpoint rings for streams if that completes successfully. | |
3741 | */ | |
3969384c | 3742 | static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, |
8df75f42 SS |
3743 | struct usb_host_endpoint **eps, unsigned int num_eps, |
3744 | gfp_t mem_flags) | |
3745 | { | |
3746 | int i, ret; | |
3747 | struct xhci_hcd *xhci; | |
3748 | struct xhci_virt_device *vdev; | |
3749 | struct xhci_command *command; | |
92f8e767 | 3750 | struct xhci_input_control_ctx *ctrl_ctx; |
8df75f42 SS |
3751 | unsigned int ep_index; |
3752 | unsigned long flags; | |
3753 | u32 changed_ep_bitmask; | |
3754 | ||
3755 | xhci = hcd_to_xhci(hcd); | |
3756 | vdev = xhci->devs[udev->slot_id]; | |
3757 | ||
3758 | /* Set up a configure endpoint command to remove the streams rings */ | |
3759 | spin_lock_irqsave(&xhci->lock, flags); | |
3760 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, | |
3761 | udev, eps, num_eps); | |
3762 | if (changed_ep_bitmask == 0) { | |
3763 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3764 | return -EINVAL; | |
3765 | } | |
3766 | ||
3767 | /* Use the xhci_command structure from the first endpoint. We may have | |
3768 | * allocated too many, but the driver may call xhci_free_streams() for | |
3769 | * each endpoint it grouped into one call to xhci_alloc_streams(). | |
3770 | */ | |
3771 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); | |
3772 | command = vdev->eps[ep_index].stream_info->free_streams_command; | |
4daf9df5 | 3773 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); |
92f8e767 | 3774 | if (!ctrl_ctx) { |
1f21569c | 3775 | spin_unlock_irqrestore(&xhci->lock, flags); |
92f8e767 SS |
3776 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
3777 | __func__); | |
3778 | return -EINVAL; | |
3779 | } | |
3780 | ||
8df75f42 SS |
3781 | for (i = 0; i < num_eps; i++) { |
3782 | struct xhci_ep_ctx *ep_ctx; | |
3783 | ||
3784 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3785 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); | |
3786 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= | |
3787 | EP_GETTING_NO_STREAMS; | |
3788 | ||
3789 | xhci_endpoint_copy(xhci, command->in_ctx, | |
3790 | vdev->out_ctx, ep_index); | |
4daf9df5 | 3791 | xhci_setup_no_streams_ep_input_ctx(ep_ctx, |
8df75f42 SS |
3792 | &vdev->eps[ep_index]); |
3793 | } | |
3794 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, | |
92f8e767 SS |
3795 | vdev->out_ctx, ctrl_ctx, |
3796 | changed_ep_bitmask, changed_ep_bitmask); | |
8df75f42 SS |
3797 | spin_unlock_irqrestore(&xhci->lock, flags); |
3798 | ||
3799 | /* Issue and wait for the configure endpoint command, | |
3800 | * which must succeed. | |
3801 | */ | |
3802 | ret = xhci_configure_endpoint(xhci, udev, command, | |
3803 | false, true); | |
3804 | ||
3805 | /* xHC rejected the configure endpoint command for some reason, so we | |
3806 | * leave the streams rings intact. | |
3807 | */ | |
3808 | if (ret < 0) | |
3809 | return ret; | |
3810 | ||
3811 | spin_lock_irqsave(&xhci->lock, flags); | |
3812 | for (i = 0; i < num_eps; i++) { | |
3813 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3814 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | |
8a007748 | 3815 | vdev->eps[ep_index].stream_info = NULL; |
8df75f42 SS |
3816 | /* FIXME Unset maxPstreams in endpoint context and |
3817 | * update deq ptr to point to normal string ring. | |
3818 | */ | |
3819 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; | |
3820 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | |
3821 | } | |
3822 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3823 | ||
3824 | return 0; | |
3825 | } | |
3826 | ||
2cf95c18 SS |
3827 | /* |
3828 | * Deletes endpoint resources for endpoints that were active before a Reset | |
3829 | * Device command, or a Disable Slot command. The Reset Device command leaves | |
3830 | * the control endpoint intact, whereas the Disable Slot command deletes it. | |
3831 | * | |
3832 | * Must be called with xhci->lock held. | |
3833 | */ | |
3834 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, | |
3835 | struct xhci_virt_device *virt_dev, bool drop_control_ep) | |
3836 | { | |
3837 | int i; | |
3838 | unsigned int num_dropped_eps = 0; | |
3839 | unsigned int drop_flags = 0; | |
3840 | ||
3841 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { | |
3842 | if (virt_dev->eps[i].ring) { | |
3843 | drop_flags |= 1 << i; | |
3844 | num_dropped_eps++; | |
3845 | } | |
3846 | } | |
3847 | xhci->num_active_eps -= num_dropped_eps; | |
3848 | if (num_dropped_eps) | |
4bdfe4c3 XR |
3849 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
3850 | "Dropped %u ep ctxs, flags = 0x%x, " | |
3851 | "%u now active.", | |
2cf95c18 SS |
3852 | num_dropped_eps, drop_flags, |
3853 | xhci->num_active_eps); | |
3854 | } | |
3855 | ||
76d98856 KC |
3856 | static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); |
3857 | ||
2a8f82c4 SS |
3858 | /* |
3859 | * This submits a Reset Device Command, which will set the device state to 0, | |
3860 | * set the device address to 0, and disable all the endpoints except the default | |
3861 | * control endpoint. The USB core should come back and call | |
3862 | * xhci_address_device(), and then re-set up the configuration. If this is | |
3863 | * called because of a usb_reset_and_verify_device(), then the old alternate | |
3864 | * settings will be re-installed through the normal bandwidth allocation | |
3865 | * functions. | |
3866 | * | |
3867 | * Wait for the Reset Device command to finish. Remove all structures | |
3868 | * associated with the endpoints that were disabled. Clear the input device | |
c5628a2a | 3869 | * structure? Reset the control endpoint 0 max packet size? |
f0615c45 AX |
3870 | * |
3871 | * If the virt_dev to be reset does not exist or does not match the udev, | |
3872 | * it means the device is lost, possibly due to the xHC restore error and | |
3873 | * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to | |
3874 | * re-allocate the device. | |
2a8f82c4 | 3875 | */ |
3969384c LB |
3876 | static int xhci_discover_or_reset_device(struct usb_hcd *hcd, |
3877 | struct usb_device *udev) | |
2a8f82c4 SS |
3878 | { |
3879 | int ret, i; | |
3880 | unsigned long flags; | |
3881 | struct xhci_hcd *xhci; | |
3882 | unsigned int slot_id; | |
3883 | struct xhci_virt_device *virt_dev; | |
3884 | struct xhci_command *reset_device_cmd; | |
001fd382 | 3885 | struct xhci_slot_ctx *slot_ctx; |
2e27980e | 3886 | int old_active_eps = 0; |
2a8f82c4 | 3887 | |
f0615c45 | 3888 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); |
2a8f82c4 SS |
3889 | if (ret <= 0) |
3890 | return ret; | |
3891 | xhci = hcd_to_xhci(hcd); | |
3892 | slot_id = udev->slot_id; | |
3893 | virt_dev = xhci->devs[slot_id]; | |
f0615c45 AX |
3894 | if (!virt_dev) { |
3895 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " | |
3896 | "not exist. Re-allocate the device\n", slot_id); | |
3897 | ret = xhci_alloc_dev(hcd, udev); | |
3898 | if (ret == 1) | |
3899 | return 0; | |
3900 | else | |
3901 | return -EINVAL; | |
3902 | } | |
3903 | ||
326124a0 BC |
3904 | if (virt_dev->tt_info) |
3905 | old_active_eps = virt_dev->tt_info->active_eps; | |
3906 | ||
f0615c45 AX |
3907 | if (virt_dev->udev != udev) { |
3908 | /* If the virt_dev and the udev does not match, this virt_dev | |
3909 | * may belong to another udev. | |
3910 | * Re-allocate the device. | |
3911 | */ | |
3912 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " | |
3913 | "not match the udev. Re-allocate the device\n", | |
3914 | slot_id); | |
3915 | ret = xhci_alloc_dev(hcd, udev); | |
3916 | if (ret == 1) | |
3917 | return 0; | |
3918 | else | |
3919 | return -EINVAL; | |
3920 | } | |
2a8f82c4 | 3921 | |
001fd382 ML |
3922 | /* If device is not setup, there is no point in resetting it */ |
3923 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); | |
3924 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == | |
3925 | SLOT_STATE_DISABLED) | |
3926 | return 0; | |
3927 | ||
76d98856 KC |
3928 | if (xhci->quirks & XHCI_ETRON_HOST) { |
3929 | /* | |
3930 | * Obtaining a new device slot to inform the xHCI host that | |
3931 | * the USB device has been reset. | |
3932 | */ | |
3933 | ret = xhci_disable_slot(xhci, udev->slot_id); | |
3934 | xhci_free_virt_device(xhci, udev->slot_id); | |
3935 | if (!ret) { | |
3936 | ret = xhci_alloc_dev(hcd, udev); | |
3937 | if (ret == 1) | |
3938 | ret = 0; | |
3939 | else | |
3940 | ret = -EINVAL; | |
3941 | } | |
3942 | return ret; | |
3943 | } | |
3944 | ||
19a7d0d6 FB |
3945 | trace_xhci_discover_or_reset_device(slot_ctx); |
3946 | ||
2a8f82c4 SS |
3947 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); |
3948 | /* Allocate the command structure that holds the struct completion. | |
3949 | * Assume we're in process context, since the normal device reset | |
3950 | * process has to wait for the device anyway. Storage devices are | |
3951 | * reset as part of error handling, so use GFP_NOIO instead of | |
3952 | * GFP_KERNEL. | |
3953 | */ | |
103afda0 | 3954 | reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO); |
2a8f82c4 SS |
3955 | if (!reset_device_cmd) { |
3956 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); | |
3957 | return -ENOMEM; | |
3958 | } | |
3959 | ||
3960 | /* Attempt to submit the Reset Device command to the command ring */ | |
3961 | spin_lock_irqsave(&xhci->lock, flags); | |
7a3783ef | 3962 | |
ddba5cd0 | 3963 | ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); |
2a8f82c4 SS |
3964 | if (ret) { |
3965 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
2a8f82c4 SS |
3966 | spin_unlock_irqrestore(&xhci->lock, flags); |
3967 | goto command_cleanup; | |
3968 | } | |
3969 | xhci_ring_cmd_db(xhci); | |
3970 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3971 | ||
3972 | /* Wait for the Reset Device command to finish */ | |
c311e391 | 3973 | wait_for_completion(reset_device_cmd->completion); |
2a8f82c4 SS |
3974 | |
3975 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, | |
3976 | * unless we tried to reset a slot ID that wasn't enabled, | |
3977 | * or the device wasn't in the addressed or configured state. | |
3978 | */ | |
3979 | ret = reset_device_cmd->status; | |
3980 | switch (ret) { | |
0b7c105a | 3981 | case COMP_COMMAND_ABORTED: |
604d02a2 | 3982 | case COMP_COMMAND_RING_STOPPED: |
c311e391 MN |
3983 | xhci_warn(xhci, "Timeout waiting for reset device command\n"); |
3984 | ret = -ETIME; | |
3985 | goto command_cleanup; | |
0b7c105a FB |
3986 | case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */ |
3987 | case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */ | |
38a532a6 | 3988 | xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", |
2a8f82c4 SS |
3989 | slot_id, |
3990 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); | |
38a532a6 | 3991 | xhci_dbg(xhci, "Not freeing device rings.\n"); |
2a8f82c4 SS |
3992 | /* Don't treat this as an error. May change my mind later. */ |
3993 | ret = 0; | |
3994 | goto command_cleanup; | |
3995 | case COMP_SUCCESS: | |
3996 | xhci_dbg(xhci, "Successful reset device command.\n"); | |
3997 | break; | |
3998 | default: | |
3999 | if (xhci_is_vendor_info_code(xhci, ret)) | |
4000 | break; | |
4001 | xhci_warn(xhci, "Unknown completion code %u for " | |
4002 | "reset device command.\n", ret); | |
4003 | ret = -EINVAL; | |
4004 | goto command_cleanup; | |
4005 | } | |
4006 | ||
2cf95c18 SS |
4007 | /* Free up host controller endpoint resources */ |
4008 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | |
4009 | spin_lock_irqsave(&xhci->lock, flags); | |
4010 | /* Don't delete the default control endpoint resources */ | |
4011 | xhci_free_device_endpoint_resources(xhci, virt_dev, false); | |
4012 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4013 | } | |
4014 | ||
c5628a2a | 4015 | /* Everything but endpoint 0 is disabled, so free the rings. */ |
98871e94 | 4016 | for (i = 1; i < 31; i++) { |
2dea75d9 DT |
4017 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; |
4018 | ||
4019 | if (ep->ep_state & EP_HAS_STREAMS) { | |
df613834 HG |
4020 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", |
4021 | xhci_get_endpoint_address(i)); | |
2dea75d9 DT |
4022 | xhci_free_stream_info(xhci, ep->stream_info); |
4023 | ep->stream_info = NULL; | |
4024 | ep->ep_state &= ~EP_HAS_STREAMS; | |
4025 | } | |
4026 | ||
4027 | if (ep->ring) { | |
b85a2ebd WC |
4028 | if (ep->sideband) |
4029 | xhci_sideband_notify_ep_ring_free(ep->sideband, i); | |
02b6fdc2 | 4030 | xhci_debugfs_remove_endpoint(xhci, virt_dev, i); |
c5628a2a | 4031 | xhci_free_endpoint_ring(xhci, virt_dev, i); |
2dea75d9 | 4032 | } |
2e27980e SS |
4033 | if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) |
4034 | xhci_drop_ep_from_interval_table(xhci, | |
4035 | &virt_dev->eps[i].bw_info, | |
4036 | virt_dev->bw_table, | |
4037 | udev, | |
4038 | &virt_dev->eps[i], | |
4039 | virt_dev->tt_info); | |
9af5d71d | 4040 | xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); |
2a8f82c4 | 4041 | } |
2e27980e SS |
4042 | /* If necessary, update the number of active TTs on this root port */ |
4043 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); | |
b8c3b718 | 4044 | virt_dev->flags = 0; |
2a8f82c4 SS |
4045 | ret = 0; |
4046 | ||
4047 | command_cleanup: | |
4048 | xhci_free_command(xhci, reset_device_cmd); | |
4049 | return ret; | |
4050 | } | |
4051 | ||
3ffbba95 SS |
4052 | /* |
4053 | * At this point, the struct usb_device is about to go away, the device has | |
4054 | * disconnected, and all traffic has been stopped and the endpoints have been | |
4055 | * disabled. Free any HC data structures associated with that device. | |
4056 | */ | |
3969384c | 4057 | static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
3ffbba95 SS |
4058 | { |
4059 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
6f5165cf | 4060 | struct xhci_virt_device *virt_dev; |
19a7d0d6 | 4061 | struct xhci_slot_ctx *slot_ctx; |
a2bc47c4 | 4062 | unsigned long flags; |
64927730 | 4063 | int i, ret; |
ddba5cd0 | 4064 | |
c8476fb8 SN |
4065 | /* |
4066 | * We called pm_runtime_get_noresume when the device was attached. | |
4067 | * Decrement the counter here to allow controller to runtime suspend | |
4068 | * if no devices remain. | |
4069 | */ | |
4070 | if (xhci->quirks & XHCI_RESET_ON_RESUME) | |
e7ecf069 | 4071 | pm_runtime_put_noidle(hcd->self.controller); |
c8476fb8 | 4072 | |
64927730 | 4073 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
7bd89b40 SS |
4074 | /* If the host is halted due to driver unload, we still need to free the |
4075 | * device. | |
4076 | */ | |
cd3f1790 | 4077 | if (ret <= 0 && ret != -ENODEV) |
3ffbba95 | 4078 | return; |
64927730 | 4079 | |
6f5165cf | 4080 | virt_dev = xhci->devs[udev->slot_id]; |
19a7d0d6 FB |
4081 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
4082 | trace_xhci_free_dev(slot_ctx); | |
6f5165cf SS |
4083 | |
4084 | /* Stop any wayward timer functions (which may grab the lock) */ | |
25355e04 | 4085 | for (i = 0; i < 31; i++) |
9983a5fc | 4086 | virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; |
44a182b9 | 4087 | virt_dev->udev = NULL; |
7faac195 | 4088 | xhci_disable_slot(xhci, udev->slot_id); |
a2bc47c4 MN |
4089 | |
4090 | spin_lock_irqsave(&xhci->lock, flags); | |
7faac195 | 4091 | xhci_free_virt_device(xhci, udev->slot_id); |
a2bc47c4 MN |
4092 | spin_unlock_irqrestore(&xhci->lock, flags); |
4093 | ||
f9e609b8 GZ |
4094 | } |
4095 | ||
cd3f1790 | 4096 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) |
f9e609b8 | 4097 | { |
cd3f1790 | 4098 | struct xhci_command *command; |
f9e609b8 GZ |
4099 | unsigned long flags; |
4100 | u32 state; | |
98d107b8 | 4101 | int ret; |
f9e609b8 | 4102 | |
7faac195 | 4103 | command = xhci_alloc_command(xhci, true, GFP_KERNEL); |
f9e609b8 GZ |
4104 | if (!command) |
4105 | return -ENOMEM; | |
4106 | ||
9334367c IJ |
4107 | xhci_debugfs_remove_slot(xhci, slot_id); |
4108 | ||
3ffbba95 | 4109 | spin_lock_irqsave(&xhci->lock, flags); |
c526d0d4 | 4110 | /* Don't disable the slot if the host controller is dead. */ |
b0ba9720 | 4111 | state = readl(&xhci->op_regs->status); |
7bd89b40 SS |
4112 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
4113 | (xhci->xhc_state & XHCI_STATE_HALTED)) { | |
c526d0d4 | 4114 | spin_unlock_irqrestore(&xhci->lock, flags); |
ddba5cd0 | 4115 | kfree(command); |
dcabc76f | 4116 | return -ENODEV; |
c526d0d4 SS |
4117 | } |
4118 | ||
f9e609b8 GZ |
4119 | ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, |
4120 | slot_id); | |
4121 | if (ret) { | |
3ffbba95 | 4122 | spin_unlock_irqrestore(&xhci->lock, flags); |
cd3f1790 | 4123 | kfree(command); |
f9e609b8 | 4124 | return ret; |
3ffbba95 | 4125 | } |
23e3be11 | 4126 | xhci_ring_cmd_db(xhci); |
3ffbba95 | 4127 | spin_unlock_irqrestore(&xhci->lock, flags); |
7faac195 MN |
4128 | |
4129 | wait_for_completion(command->completion); | |
4130 | ||
4131 | if (command->status != COMP_SUCCESS) | |
4132 | xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n", | |
4133 | slot_id, command->status); | |
4134 | ||
4135 | xhci_free_command(xhci, command); | |
4136 | ||
98d107b8 | 4137 | return 0; |
3ffbba95 SS |
4138 | } |
4139 | ||
2cf95c18 SS |
4140 | /* |
4141 | * Checks if we have enough host controller resources for the default control | |
4142 | * endpoint. | |
4143 | * | |
4144 | * Must be called with xhci->lock held. | |
4145 | */ | |
4146 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) | |
4147 | { | |
4148 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { | |
4bdfe4c3 XR |
4149 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
4150 | "Not enough ep ctxs: " | |
4151 | "%u active, need to add 1, limit is %u.", | |
2cf95c18 SS |
4152 | xhci->num_active_eps, xhci->limit_active_eps); |
4153 | return -ENOMEM; | |
4154 | } | |
4155 | xhci->num_active_eps += 1; | |
4bdfe4c3 XR |
4156 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
4157 | "Adding 1 ep ctx, %u now active.", | |
2cf95c18 SS |
4158 | xhci->num_active_eps); |
4159 | return 0; | |
4160 | } | |
4161 | ||
4162 | ||
3ffbba95 SS |
4163 | /* |
4164 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command | |
4165 | * timed out, or allocating memory failed. Returns 1 on success. | |
4166 | */ | |
4167 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |
4168 | { | |
4169 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
19a7d0d6 FB |
4170 | struct xhci_virt_device *vdev; |
4171 | struct xhci_slot_ctx *slot_ctx; | |
3ffbba95 | 4172 | unsigned long flags; |
a00918d0 | 4173 | int ret, slot_id; |
ddba5cd0 MN |
4174 | struct xhci_command *command; |
4175 | ||
103afda0 | 4176 | command = xhci_alloc_command(xhci, true, GFP_KERNEL); |
ddba5cd0 MN |
4177 | if (!command) |
4178 | return 0; | |
3ffbba95 SS |
4179 | |
4180 | spin_lock_irqsave(&xhci->lock, flags); | |
ddba5cd0 | 4181 | ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); |
3ffbba95 SS |
4182 | if (ret) { |
4183 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4184 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
87e44f2a | 4185 | xhci_free_command(xhci, command); |
3ffbba95 SS |
4186 | return 0; |
4187 | } | |
23e3be11 | 4188 | xhci_ring_cmd_db(xhci); |
3ffbba95 SS |
4189 | spin_unlock_irqrestore(&xhci->lock, flags); |
4190 | ||
c311e391 | 4191 | wait_for_completion(command->completion); |
c2d3d49b | 4192 | slot_id = command->slot_id; |
3ffbba95 | 4193 | |
a00918d0 | 4194 | if (!slot_id || command->status != COMP_SUCCESS) { |
e11487f1 MN |
4195 | xhci_err(xhci, "Error while assigning device slot ID: %s\n", |
4196 | xhci_trb_comp_code_string(command->status)); | |
be982038 SS |
4197 | xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", |
4198 | HCS_MAX_SLOTS( | |
4199 | readl(&xhci->cap_regs->hcs_params1))); | |
87e44f2a | 4200 | xhci_free_command(xhci, command); |
3ffbba95 SS |
4201 | return 0; |
4202 | } | |
2cf95c18 | 4203 | |
cd3f1790 LB |
4204 | xhci_free_command(xhci, command); |
4205 | ||
2cf95c18 SS |
4206 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
4207 | spin_lock_irqsave(&xhci->lock, flags); | |
4208 | ret = xhci_reserve_host_control_ep_resources(xhci); | |
4209 | if (ret) { | |
4210 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4211 | xhci_warn(xhci, "Not enough host resources, " | |
4212 | "active endpoint contexts = %u\n", | |
4213 | xhci->num_active_eps); | |
4214 | goto disable_slot; | |
4215 | } | |
4216 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4217 | } | |
4218 | /* Use GFP_NOIO, since this function can be called from | |
a6d940dd SS |
4219 | * xhci_discover_or_reset_device(), which may be called as part of |
4220 | * mass storage driver error handling. | |
4221 | */ | |
a00918d0 | 4222 | if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { |
3ffbba95 | 4223 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); |
2cf95c18 | 4224 | goto disable_slot; |
3ffbba95 | 4225 | } |
19a7d0d6 FB |
4226 | vdev = xhci->devs[slot_id]; |
4227 | slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); | |
4228 | trace_xhci_alloc_dev(slot_ctx); | |
4229 | ||
a00918d0 | 4230 | udev->slot_id = slot_id; |
c8476fb8 | 4231 | |
02b6fdc2 LB |
4232 | xhci_debugfs_create_slot(xhci, slot_id); |
4233 | ||
c8476fb8 SN |
4234 | /* |
4235 | * If resetting upon resume, we can't put the controller into runtime | |
4236 | * suspend if there is a device attached. | |
4237 | */ | |
4238 | if (xhci->quirks & XHCI_RESET_ON_RESUME) | |
e7ecf069 | 4239 | pm_runtime_get_noresume(hcd->self.controller); |
c8476fb8 | 4240 | |
3ffbba95 SS |
4241 | /* Is this a LS or FS device under a HS hub? */ |
4242 | /* Hub or peripherial? */ | |
3ffbba95 | 4243 | return 1; |
2cf95c18 SS |
4244 | |
4245 | disable_slot: | |
7faac195 MN |
4246 | xhci_disable_slot(xhci, udev->slot_id); |
4247 | xhci_free_virt_device(xhci, udev->slot_id); | |
11ec7588 LB |
4248 | |
4249 | return 0; | |
3ffbba95 SS |
4250 | } |
4251 | ||
a769154c HG |
4252 | /** |
4253 | * xhci_setup_device - issues an Address Device command to assign a unique | |
4254 | * USB bus address. | |
4255 | * @hcd: USB host controller data structure. | |
4256 | * @udev: USB dev structure representing the connected device. | |
4257 | * @setup: Enum specifying setup mode: address only or with context. | |
4258 | * @timeout_ms: Max wait time (ms) for the command operation to complete. | |
4259 | * | |
4260 | * Return: 0 if successful; otherwise, negative error code. | |
3ffbba95 | 4261 | */ |
48fc7dbd | 4262 | static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, |
a769154c | 4263 | enum xhci_setup_dev setup, unsigned int timeout_ms) |
3ffbba95 | 4264 | { |
6f8ffc0b | 4265 | const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; |
3ffbba95 | 4266 | unsigned long flags; |
3ffbba95 SS |
4267 | struct xhci_virt_device *virt_dev; |
4268 | int ret = 0; | |
4269 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
d115b048 JY |
4270 | struct xhci_slot_ctx *slot_ctx; |
4271 | struct xhci_input_control_ctx *ctrl_ctx; | |
8e595a5d | 4272 | u64 temp_64; |
a00918d0 CB |
4273 | struct xhci_command *command = NULL; |
4274 | ||
4275 | mutex_lock(&xhci->mutex); | |
3ffbba95 | 4276 | |
90797aee LB |
4277 | if (xhci->xhc_state) { /* dying, removing or halted */ |
4278 | ret = -ESHUTDOWN; | |
448116bf | 4279 | goto out; |
90797aee | 4280 | } |
448116bf | 4281 | |
3ffbba95 | 4282 | if (!udev->slot_id) { |
84a99f6f XR |
4283 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
4284 | "Bad Slot ID %d", udev->slot_id); | |
a00918d0 CB |
4285 | ret = -EINVAL; |
4286 | goto out; | |
3ffbba95 SS |
4287 | } |
4288 | ||
3ffbba95 SS |
4289 | virt_dev = xhci->devs[udev->slot_id]; |
4290 | ||
7ed603ec ME |
4291 | if (WARN_ON(!virt_dev)) { |
4292 | /* | |
4293 | * In plug/unplug torture test with an NEC controller, | |
4294 | * a zero-dereference was observed once due to virt_dev = 0. | |
4295 | * Print useful debug rather than crash if it is observed again! | |
4296 | */ | |
4297 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", | |
4298 | udev->slot_id); | |
a00918d0 CB |
4299 | ret = -EINVAL; |
4300 | goto out; | |
7ed603ec | 4301 | } |
19a7d0d6 FB |
4302 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
4303 | trace_xhci_setup_device_slot(slot_ctx); | |
7ed603ec | 4304 | |
f161ead7 | 4305 | if (setup == SETUP_CONTEXT_ONLY) { |
f161ead7 MN |
4306 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
4307 | SLOT_STATE_DEFAULT) { | |
4308 | xhci_dbg(xhci, "Slot already in default state\n"); | |
a00918d0 | 4309 | goto out; |
f161ead7 MN |
4310 | } |
4311 | } | |
4312 | ||
103afda0 | 4313 | command = xhci_alloc_command(xhci, true, GFP_KERNEL); |
a00918d0 CB |
4314 | if (!command) { |
4315 | ret = -ENOMEM; | |
4316 | goto out; | |
4317 | } | |
ddba5cd0 MN |
4318 | |
4319 | command->in_ctx = virt_dev->in_ctx; | |
a769154c | 4320 | command->timeout_ms = timeout_ms; |
ddba5cd0 | 4321 | |
f0615c45 | 4322 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
4daf9df5 | 4323 | ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); |
92f8e767 SS |
4324 | if (!ctrl_ctx) { |
4325 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
4326 | __func__); | |
a00918d0 CB |
4327 | ret = -EINVAL; |
4328 | goto out; | |
92f8e767 | 4329 | } |
f0615c45 AX |
4330 | /* |
4331 | * If this is the first Set Address since device plug-in or | |
4332 | * virt_device realloaction after a resume with an xHCI power loss, | |
4333 | * then set up the slot context. | |
4334 | */ | |
4335 | if (!slot_ctx->dev_info) | |
3ffbba95 | 4336 | xhci_setup_addressable_virt_dev(xhci, udev); |
f0615c45 | 4337 | /* Otherwise, update the control endpoint ring enqueue pointer. */ |
2d1ee590 SS |
4338 | else |
4339 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); | |
d31c285b SS |
4340 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); |
4341 | ctrl_ctx->drop_flags = 0; | |
4342 | ||
1d27fabe | 4343 | trace_xhci_address_ctx(xhci, virt_dev->in_ctx, |
0c052aab | 4344 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
3ffbba95 | 4345 | |
90d6d573 | 4346 | trace_xhci_address_ctrl_ctx(ctrl_ctx); |
f88ba78d | 4347 | spin_lock_irqsave(&xhci->lock, flags); |
a711edee | 4348 | trace_xhci_setup_device(virt_dev); |
ddba5cd0 | 4349 | ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, |
48fc7dbd | 4350 | udev->slot_id, setup); |
3ffbba95 SS |
4351 | if (ret) { |
4352 | spin_unlock_irqrestore(&xhci->lock, flags); | |
84a99f6f XR |
4353 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
4354 | "FIXME: allocate a command ring segment"); | |
a00918d0 | 4355 | goto out; |
3ffbba95 | 4356 | } |
23e3be11 | 4357 | xhci_ring_cmd_db(xhci); |
3ffbba95 SS |
4358 | spin_unlock_irqrestore(&xhci->lock, flags); |
4359 | ||
4360 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ | |
c311e391 MN |
4361 | wait_for_completion(command->completion); |
4362 | ||
3ffbba95 SS |
4363 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing |
4364 | * the SetAddress() "recovery interval" required by USB and aborting the | |
4365 | * command on a timeout. | |
4366 | */ | |
9ea1833e | 4367 | switch (command->status) { |
0b7c105a | 4368 | case COMP_COMMAND_ABORTED: |
604d02a2 | 4369 | case COMP_COMMAND_RING_STOPPED: |
c311e391 MN |
4370 | xhci_warn(xhci, "Timeout while waiting for setup device command\n"); |
4371 | ret = -ETIME; | |
4372 | break; | |
0b7c105a FB |
4373 | case COMP_CONTEXT_STATE_ERROR: |
4374 | case COMP_SLOT_NOT_ENABLED_ERROR: | |
6f8ffc0b DW |
4375 | xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", |
4376 | act, udev->slot_id); | |
3ffbba95 SS |
4377 | ret = -EINVAL; |
4378 | break; | |
0b7c105a | 4379 | case COMP_USB_TRANSACTION_ERROR: |
6f8ffc0b | 4380 | dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); |
651aaf36 LB |
4381 | |
4382 | mutex_unlock(&xhci->mutex); | |
4383 | ret = xhci_disable_slot(xhci, udev->slot_id); | |
7faac195 | 4384 | xhci_free_virt_device(xhci, udev->slot_id); |
af8e119f MN |
4385 | if (!ret) { |
4386 | if (xhci_alloc_dev(hcd, udev) == 1) | |
4387 | xhci_setup_addressable_virt_dev(xhci, udev); | |
4388 | } | |
651aaf36 LB |
4389 | kfree(command->completion); |
4390 | kfree(command); | |
4391 | return -EPROTO; | |
0b7c105a | 4392 | case COMP_INCOMPATIBLE_DEVICE_ERROR: |
6f8ffc0b DW |
4393 | dev_warn(&udev->dev, |
4394 | "ERROR: Incompatible device for setup %s command\n", act); | |
f6ba6fe2 AH |
4395 | ret = -ENODEV; |
4396 | break; | |
3ffbba95 | 4397 | case COMP_SUCCESS: |
84a99f6f | 4398 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
6f8ffc0b | 4399 | "Successful setup %s command", act); |
3ffbba95 SS |
4400 | break; |
4401 | default: | |
6f8ffc0b DW |
4402 | xhci_err(xhci, |
4403 | "ERROR: unexpected setup %s command completion code 0x%x.\n", | |
9ea1833e | 4404 | act, command->status); |
1d27fabe | 4405 | trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); |
3ffbba95 SS |
4406 | ret = -EINVAL; |
4407 | break; | |
4408 | } | |
a00918d0 CB |
4409 | if (ret) |
4410 | goto out; | |
f7b2e403 | 4411 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
84a99f6f XR |
4412 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
4413 | "Op regs DCBAA ptr = %#016llx", temp_64); | |
4414 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | |
4415 | "Slot ID %d dcbaa entry @%p = %#016llx", | |
4416 | udev->slot_id, | |
4417 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], | |
4418 | (unsigned long long) | |
4419 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); | |
4420 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | |
4421 | "Output Context DMA address = %#08llx", | |
d115b048 | 4422 | (unsigned long long)virt_dev->out_ctx->dma); |
1d27fabe | 4423 | trace_xhci_address_ctx(xhci, virt_dev->in_ctx, |
0c052aab | 4424 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
3ffbba95 SS |
4425 | /* |
4426 | * USB core uses address 1 for the roothubs, so we add one to the | |
4427 | * address given back to us by the HC. | |
4428 | */ | |
1d27fabe | 4429 | trace_xhci_address_ctx(xhci, virt_dev->out_ctx, |
0c052aab | 4430 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
f94e0186 | 4431 | /* Zero the input context control for later use */ |
d115b048 JY |
4432 | ctrl_ctx->add_flags = 0; |
4433 | ctrl_ctx->drop_flags = 0; | |
4998f1ef JL |
4434 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
4435 | udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); | |
3ffbba95 | 4436 | |
84a99f6f | 4437 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
a2cdc343 DW |
4438 | "Internal device address = %d", |
4439 | le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); | |
a00918d0 CB |
4440 | out: |
4441 | mutex_unlock(&xhci->mutex); | |
87e44f2a LB |
4442 | if (command) { |
4443 | kfree(command->completion); | |
4444 | kfree(command); | |
4445 | } | |
a00918d0 | 4446 | return ret; |
3ffbba95 SS |
4447 | } |
4448 | ||
a769154c HG |
4449 | static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev, |
4450 | unsigned int timeout_ms) | |
48fc7dbd | 4451 | { |
a769154c | 4452 | return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms); |
48fc7dbd DW |
4453 | } |
4454 | ||
3969384c | 4455 | static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) |
48fc7dbd | 4456 | { |
a769154c HG |
4457 | return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY, |
4458 | XHCI_CMD_DEFAULT_TIMEOUT); | |
48fc7dbd DW |
4459 | } |
4460 | ||
3f5eb141 LT |
4461 | /* |
4462 | * Transfer the port index into real index in the HW port status | |
4463 | * registers. Caculate offset between the port's PORTSC register | |
4464 | * and port status base. Divide the number of per port register | |
4465 | * to get the real index. The raw port number bases 1. | |
4466 | */ | |
4467 | int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) | |
4468 | { | |
38986ffa | 4469 | struct xhci_hub *rhub; |
3f5eb141 | 4470 | |
38986ffa MN |
4471 | rhub = xhci_get_rhub(hcd); |
4472 | return rhub->ports[port1 - 1]->hw_portnum + 1; | |
3f5eb141 LT |
4473 | } |
4474 | ||
a558ccdc MN |
4475 | /* |
4476 | * Issue an Evaluate Context command to change the Maximum Exit Latency in the | |
4477 | * slot context. If that succeeds, store the new MEL in the xhci_virt_device. | |
4478 | */ | |
d5c82feb | 4479 | static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, |
a558ccdc MN |
4480 | struct usb_device *udev, u16 max_exit_latency) |
4481 | { | |
4482 | struct xhci_virt_device *virt_dev; | |
4483 | struct xhci_command *command; | |
4484 | struct xhci_input_control_ctx *ctrl_ctx; | |
4485 | struct xhci_slot_ctx *slot_ctx; | |
4486 | unsigned long flags; | |
4487 | int ret; | |
4488 | ||
5c2a380a MN |
4489 | command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL); |
4490 | if (!command) | |
4491 | return -ENOMEM; | |
4492 | ||
a558ccdc | 4493 | spin_lock_irqsave(&xhci->lock, flags); |
96044694 MN |
4494 | |
4495 | virt_dev = xhci->devs[udev->slot_id]; | |
4496 | ||
4497 | /* | |
4498 | * virt_dev might not exists yet if xHC resumed from hibernate (S4) and | |
4499 | * xHC was re-initialized. Exit latency will be set later after | |
4500 | * hub_port_finish_reset() is done and xhci->devs[] are re-allocated | |
4501 | */ | |
4502 | ||
4503 | if (!virt_dev || max_exit_latency == virt_dev->current_mel) { | |
a558ccdc | 4504 | spin_unlock_irqrestore(&xhci->lock, flags); |
f6caea48 | 4505 | xhci_free_command(xhci, command); |
a558ccdc MN |
4506 | return 0; |
4507 | } | |
4508 | ||
4509 | /* Attempt to issue an Evaluate Context command to change the MEL. */ | |
4daf9df5 | 4510 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); |
92f8e767 SS |
4511 | if (!ctrl_ctx) { |
4512 | spin_unlock_irqrestore(&xhci->lock, flags); | |
5c2a380a | 4513 | xhci_free_command(xhci, command); |
92f8e767 SS |
4514 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
4515 | __func__); | |
4516 | return -ENOMEM; | |
4517 | } | |
4518 | ||
a558ccdc MN |
4519 | xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); |
4520 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4521 | ||
a558ccdc MN |
4522 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
4523 | slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); | |
4524 | slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); | |
4525 | slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); | |
4801d4ea | 4526 | slot_ctx->dev_state = 0; |
a558ccdc | 4527 | |
3a7fa5be XR |
4528 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
4529 | "Set up evaluate context for LPM MEL change."); | |
a558ccdc MN |
4530 | |
4531 | /* Issue and wait for the evaluate context command. */ | |
4532 | ret = xhci_configure_endpoint(xhci, udev, command, | |
4533 | true, true); | |
a558ccdc MN |
4534 | |
4535 | if (!ret) { | |
4536 | spin_lock_irqsave(&xhci->lock, flags); | |
4537 | virt_dev->current_mel = max_exit_latency; | |
4538 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4539 | } | |
5c2a380a MN |
4540 | |
4541 | xhci_free_command(xhci, command); | |
4542 | ||
a558ccdc MN |
4543 | return ret; |
4544 | } | |
4545 | ||
ceb6c9c8 | 4546 | #ifdef CONFIG_PM |
9574323c AX |
4547 | |
4548 | /* BESL to HIRD Encoding array for USB2 LPM */ | |
4549 | static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, | |
4550 | 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; | |
4551 | ||
4552 | /* Calculate HIRD/BESL for USB2 PORTPMSC*/ | |
f99298bf AX |
4553 | static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, |
4554 | struct usb_device *udev) | |
9574323c | 4555 | { |
f99298bf AX |
4556 | int u2del, besl, besl_host; |
4557 | int besl_device = 0; | |
4558 | u32 field; | |
4559 | ||
4560 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); | |
4561 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); | |
9574323c | 4562 | |
f99298bf AX |
4563 | if (field & USB_BESL_SUPPORT) { |
4564 | for (besl_host = 0; besl_host < 16; besl_host++) { | |
4565 | if (xhci_besl_encoding[besl_host] >= u2del) | |
9574323c AX |
4566 | break; |
4567 | } | |
f99298bf AX |
4568 | /* Use baseline BESL value as default */ |
4569 | if (field & USB_BESL_BASELINE_VALID) | |
4570 | besl_device = USB_GET_BESL_BASELINE(field); | |
4571 | else if (field & USB_BESL_DEEP_VALID) | |
4572 | besl_device = USB_GET_BESL_DEEP(field); | |
9574323c AX |
4573 | } else { |
4574 | if (u2del <= 50) | |
f99298bf | 4575 | besl_host = 0; |
9574323c | 4576 | else |
f99298bf | 4577 | besl_host = (u2del - 51) / 75 + 1; |
9574323c AX |
4578 | } |
4579 | ||
f99298bf AX |
4580 | besl = besl_host + besl_device; |
4581 | if (besl > 15) | |
4582 | besl = 15; | |
4583 | ||
4584 | return besl; | |
9574323c AX |
4585 | } |
4586 | ||
a558ccdc MN |
4587 | /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ |
4588 | static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) | |
4589 | { | |
4590 | u32 field; | |
4591 | int l1; | |
4592 | int besld = 0; | |
4593 | int hirdm = 0; | |
4594 | ||
4595 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); | |
4596 | ||
4597 | /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ | |
17f34867 | 4598 | l1 = udev->l1_params.timeout / 256; |
a558ccdc MN |
4599 | |
4600 | /* device has preferred BESLD */ | |
4601 | if (field & USB_BESL_DEEP_VALID) { | |
4602 | besld = USB_GET_BESL_DEEP(field); | |
4603 | hirdm = 1; | |
4604 | } | |
4605 | ||
4606 | return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); | |
4607 | } | |
4608 | ||
3969384c | 4609 | static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
65580b43 AX |
4610 | struct usb_device *udev, int enable) |
4611 | { | |
4612 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
38986ffa | 4613 | struct xhci_port **ports; |
a558ccdc MN |
4614 | __le32 __iomem *pm_addr, *hlpm_addr; |
4615 | u32 pm_val, hlpm_val, field; | |
65580b43 AX |
4616 | unsigned int port_num; |
4617 | unsigned long flags; | |
a558ccdc MN |
4618 | int hird, exit_latency; |
4619 | int ret; | |
65580b43 | 4620 | |
f0c472a6 KHF |
4621 | if (xhci->quirks & XHCI_HW_LPM_DISABLE) |
4622 | return -EPERM; | |
4623 | ||
b50107bb | 4624 | if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || |
65580b43 AX |
4625 | !udev->lpm_capable) |
4626 | return -EPERM; | |
4627 | ||
4628 | if (!udev->parent || udev->parent->parent || | |
4629 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) | |
4630 | return -EPERM; | |
4631 | ||
4632 | if (udev->usb2_hw_lpm_capable != 1) | |
4633 | return -EPERM; | |
4634 | ||
4635 | spin_lock_irqsave(&xhci->lock, flags); | |
4636 | ||
38986ffa | 4637 | ports = xhci->usb2_rhub.ports; |
65580b43 | 4638 | port_num = udev->portnum - 1; |
38986ffa | 4639 | pm_addr = ports[port_num]->addr + PORTPMSC; |
b0ba9720 | 4640 | pm_val = readl(pm_addr); |
38986ffa | 4641 | hlpm_addr = ports[port_num]->addr + PORTHLPMC; |
65580b43 AX |
4642 | |
4643 | xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", | |
789a1714 | 4644 | str_enable_disable(enable), port_num + 1); |
65580b43 | 4645 | |
f0c472a6 | 4646 | if (enable) { |
a558ccdc MN |
4647 | /* Host supports BESL timeout instead of HIRD */ |
4648 | if (udev->usb2_hw_lpm_besl_capable) { | |
4649 | /* if device doesn't have a preferred BESL value use a | |
4650 | * default one which works with mixed HIRD and BESL | |
4651 | * systems. See XHCI_DEFAULT_BESL definition in xhci.h | |
4652 | */ | |
7aa1bb2f | 4653 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
a558ccdc MN |
4654 | if ((field & USB_BESL_SUPPORT) && |
4655 | (field & USB_BESL_BASELINE_VALID)) | |
4656 | hird = USB_GET_BESL_BASELINE(field); | |
4657 | else | |
17f34867 | 4658 | hird = udev->l1_params.besl; |
a558ccdc MN |
4659 | |
4660 | exit_latency = xhci_besl_encoding[hird]; | |
4661 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4662 | ||
a558ccdc MN |
4663 | ret = xhci_change_max_exit_latency(xhci, udev, |
4664 | exit_latency); | |
a558ccdc MN |
4665 | if (ret < 0) |
4666 | return ret; | |
4667 | spin_lock_irqsave(&xhci->lock, flags); | |
4668 | ||
4669 | hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); | |
204b7793 | 4670 | writel(hlpm_val, hlpm_addr); |
a558ccdc | 4671 | /* flush write */ |
b0ba9720 | 4672 | readl(hlpm_addr); |
a558ccdc MN |
4673 | } else { |
4674 | hird = xhci_calculate_hird_besl(xhci, udev); | |
4675 | } | |
4676 | ||
4677 | pm_val &= ~PORT_HIRD_MASK; | |
58e21f73 | 4678 | pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); |
204b7793 | 4679 | writel(pm_val, pm_addr); |
b0ba9720 | 4680 | pm_val = readl(pm_addr); |
a558ccdc | 4681 | pm_val |= PORT_HLE; |
204b7793 | 4682 | writel(pm_val, pm_addr); |
a558ccdc | 4683 | /* flush write */ |
b0ba9720 | 4684 | readl(pm_addr); |
65580b43 | 4685 | } else { |
58e21f73 | 4686 | pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); |
204b7793 | 4687 | writel(pm_val, pm_addr); |
a558ccdc | 4688 | /* flush write */ |
b0ba9720 | 4689 | readl(pm_addr); |
a558ccdc MN |
4690 | if (udev->usb2_hw_lpm_besl_capable) { |
4691 | spin_unlock_irqrestore(&xhci->lock, flags); | |
a558ccdc | 4692 | xhci_change_max_exit_latency(xhci, udev, 0); |
b3d71abd KHF |
4693 | readl_poll_timeout(ports[port_num]->addr, pm_val, |
4694 | (pm_val & PORT_PLS_MASK) == XDEV_U0, | |
4695 | 100, 10000); | |
a558ccdc MN |
4696 | return 0; |
4697 | } | |
65580b43 AX |
4698 | } |
4699 | ||
4700 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4701 | return 0; | |
4702 | } | |
4703 | ||
3969384c | 4704 | static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
b01bcbf7 SS |
4705 | { |
4706 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
6d3bc5e9 MN |
4707 | struct xhci_port *port; |
4708 | u32 capability; | |
b01bcbf7 | 4709 | |
948ce83f MN |
4710 | /* Check if USB3 device at root port is tunneled over USB4 */ |
4711 | if (hcd->speed >= HCD_USB3 && !udev->parent->parent) { | |
4712 | port = xhci->usb3_rhub.ports[udev->portnum - 1]; | |
4713 | ||
f46a6e16 MN |
4714 | udev->tunnel_mode = xhci_port_is_tunneled(xhci, port); |
4715 | if (udev->tunnel_mode == USB_LINK_UNKNOWN) | |
4716 | dev_dbg(&udev->dev, "link tunnel state unknown\n"); | |
4717 | else if (udev->tunnel_mode == USB_LINK_TUNNELED) | |
948ce83f | 4718 | dev_dbg(&udev->dev, "tunneled over USB4 link\n"); |
f46a6e16 | 4719 | else if (udev->tunnel_mode == USB_LINK_NATIVE) |
948ce83f MN |
4720 | dev_dbg(&udev->dev, "native USB 3.x link\n"); |
4721 | return 0; | |
4722 | } | |
4723 | ||
6d3bc5e9 | 4724 | if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support) |
de68bab4 SS |
4725 | return 0; |
4726 | ||
4727 | /* we only support lpm for non-hub device connected to root hub yet */ | |
4728 | if (!udev->parent || udev->parent->parent || | |
4729 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) | |
4730 | return 0; | |
4731 | ||
6d3bc5e9 MN |
4732 | port = xhci->usb2_rhub.ports[udev->portnum - 1]; |
4733 | capability = port->port_cap->protocol_caps; | |
4734 | ||
4735 | if (capability & XHCI_HLC) { | |
de68bab4 SS |
4736 | udev->usb2_hw_lpm_capable = 1; |
4737 | udev->l1_params.timeout = XHCI_L1_TIMEOUT; | |
4738 | udev->l1_params.besl = XHCI_DEFAULT_BESL; | |
6d3bc5e9 | 4739 | if (capability & XHCI_BLC) |
de68bab4 | 4740 | udev->usb2_hw_lpm_besl_capable = 1; |
b01bcbf7 SS |
4741 | } |
4742 | ||
4743 | return 0; | |
4744 | } | |
4745 | ||
3b3db026 SS |
4746 | /*---------------------- USB 3.0 Link PM functions ------------------------*/ |
4747 | ||
e3567d2c SS |
4748 | /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ |
4749 | static unsigned long long xhci_service_interval_to_ns( | |
4750 | struct usb_endpoint_descriptor *desc) | |
4751 | { | |
16b45fdf | 4752 | return (1ULL << (desc->bInterval - 1)) * 125 * 1000; |
e3567d2c SS |
4753 | } |
4754 | ||
3b3db026 SS |
4755 | static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, |
4756 | enum usb3_link_state state) | |
4757 | { | |
4758 | unsigned long long sel; | |
4759 | unsigned long long pel; | |
4760 | unsigned int max_sel_pel; | |
4761 | char *state_name; | |
4762 | ||
4763 | switch (state) { | |
4764 | case USB3_LPM_U1: | |
4765 | /* Convert SEL and PEL stored in nanoseconds to microseconds */ | |
4766 | sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); | |
4767 | pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); | |
4768 | max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; | |
4769 | state_name = "U1"; | |
4770 | break; | |
4771 | case USB3_LPM_U2: | |
4772 | sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); | |
4773 | pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); | |
4774 | max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; | |
4775 | state_name = "U2"; | |
4776 | break; | |
4777 | default: | |
4778 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", | |
4779 | __func__); | |
e25e62ae | 4780 | return USB3_LPM_DISABLED; |
3b3db026 SS |
4781 | } |
4782 | ||
4783 | if (sel <= max_sel_pel && pel <= max_sel_pel) | |
4784 | return USB3_LPM_DEVICE_INITIATED; | |
4785 | ||
4786 | if (sel > max_sel_pel) | |
4787 | dev_dbg(&udev->dev, "Device-initiated %s disabled " | |
4788 | "due to long SEL %llu ms\n", | |
4789 | state_name, sel); | |
4790 | else | |
4791 | dev_dbg(&udev->dev, "Device-initiated %s disabled " | |
03e64e96 | 4792 | "due to long PEL %llu ms\n", |
3b3db026 SS |
4793 | state_name, pel); |
4794 | return USB3_LPM_DISABLED; | |
4795 | } | |
4796 | ||
9502c46c | 4797 | /* The U1 timeout should be the maximum of the following values: |
e3567d2c SS |
4798 | * - For control endpoints, U1 system exit latency (SEL) * 3 |
4799 | * - For bulk endpoints, U1 SEL * 5 | |
4800 | * - For interrupt endpoints: | |
4801 | * - Notification EPs, U1 SEL * 3 | |
4802 | * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) | |
4803 | * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) | |
4804 | */ | |
9502c46c PA |
4805 | static unsigned long long xhci_calculate_intel_u1_timeout( |
4806 | struct usb_device *udev, | |
e3567d2c SS |
4807 | struct usb_endpoint_descriptor *desc) |
4808 | { | |
4809 | unsigned long long timeout_ns; | |
4810 | int ep_type; | |
4811 | int intr_type; | |
4812 | ||
4813 | ep_type = usb_endpoint_type(desc); | |
4814 | switch (ep_type) { | |
4815 | case USB_ENDPOINT_XFER_CONTROL: | |
4816 | timeout_ns = udev->u1_params.sel * 3; | |
4817 | break; | |
4818 | case USB_ENDPOINT_XFER_BULK: | |
4819 | timeout_ns = udev->u1_params.sel * 5; | |
4820 | break; | |
4821 | case USB_ENDPOINT_XFER_INT: | |
4822 | intr_type = usb_endpoint_interrupt_type(desc); | |
4823 | if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { | |
4824 | timeout_ns = udev->u1_params.sel * 3; | |
4825 | break; | |
4826 | } | |
4827 | /* Otherwise the calculation is the same as isoc eps */ | |
df561f66 | 4828 | fallthrough; |
e3567d2c SS |
4829 | case USB_ENDPOINT_XFER_ISOC: |
4830 | timeout_ns = xhci_service_interval_to_ns(desc); | |
c88db160 | 4831 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); |
e3567d2c SS |
4832 | if (timeout_ns < udev->u1_params.sel * 2) |
4833 | timeout_ns = udev->u1_params.sel * 2; | |
4834 | break; | |
4835 | default: | |
4836 | return 0; | |
4837 | } | |
4838 | ||
9502c46c PA |
4839 | return timeout_ns; |
4840 | } | |
4841 | ||
4842 | /* Returns the hub-encoded U1 timeout value. */ | |
4843 | static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, | |
4844 | struct usb_device *udev, | |
4845 | struct usb_endpoint_descriptor *desc) | |
4846 | { | |
4847 | unsigned long long timeout_ns; | |
4848 | ||
0472bf06 MN |
4849 | /* Prevent U1 if service interval is shorter than U1 exit latency */ |
4850 | if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { | |
2847c46c | 4851 | if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { |
0472bf06 MN |
4852 | dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); |
4853 | return USB3_LPM_DISABLED; | |
4854 | } | |
4855 | } | |
4856 | ||
d5e234ff | 4857 | if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) |
2847c46c MN |
4858 | timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); |
4859 | else | |
4860 | timeout_ns = udev->u1_params.sel; | |
4861 | ||
9502c46c PA |
4862 | /* The U1 timeout is encoded in 1us intervals. |
4863 | * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. | |
4864 | */ | |
e3567d2c | 4865 | if (timeout_ns == USB3_LPM_DISABLED) |
9502c46c PA |
4866 | timeout_ns = 1; |
4867 | else | |
4868 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); | |
e3567d2c SS |
4869 | |
4870 | /* If the necessary timeout value is bigger than what we can set in the | |
4871 | * USB 3.0 hub, we have to disable hub-initiated U1. | |
4872 | */ | |
4873 | if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) | |
4874 | return timeout_ns; | |
b020761e MN |
4875 | dev_dbg(&udev->dev, "Hub-initiated U1 disabled due to long timeout %lluus\n", |
4876 | timeout_ns); | |
e3567d2c SS |
4877 | return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); |
4878 | } | |
4879 | ||
9502c46c | 4880 | /* The U2 timeout should be the maximum of: |
e3567d2c SS |
4881 | * - 10 ms (to avoid the bandwidth impact on the scheduler) |
4882 | * - largest bInterval of any active periodic endpoint (to avoid going | |
4883 | * into lower power link states between intervals). | |
4884 | * - the U2 Exit Latency of the device | |
4885 | */ | |
9502c46c PA |
4886 | static unsigned long long xhci_calculate_intel_u2_timeout( |
4887 | struct usb_device *udev, | |
e3567d2c SS |
4888 | struct usb_endpoint_descriptor *desc) |
4889 | { | |
4890 | unsigned long long timeout_ns; | |
4891 | unsigned long long u2_del_ns; | |
4892 | ||
4893 | timeout_ns = 10 * 1000 * 1000; | |
4894 | ||
4895 | if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && | |
4896 | (xhci_service_interval_to_ns(desc) > timeout_ns)) | |
4897 | timeout_ns = xhci_service_interval_to_ns(desc); | |
4898 | ||
966e7a85 | 4899 | u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; |
e3567d2c SS |
4900 | if (u2_del_ns > timeout_ns) |
4901 | timeout_ns = u2_del_ns; | |
4902 | ||
9502c46c PA |
4903 | return timeout_ns; |
4904 | } | |
4905 | ||
4906 | /* Returns the hub-encoded U2 timeout value. */ | |
4907 | static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, | |
4908 | struct usb_device *udev, | |
4909 | struct usb_endpoint_descriptor *desc) | |
4910 | { | |
4911 | unsigned long long timeout_ns; | |
4912 | ||
0472bf06 MN |
4913 | /* Prevent U2 if service interval is shorter than U2 exit latency */ |
4914 | if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { | |
2847c46c | 4915 | if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { |
0472bf06 MN |
4916 | dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); |
4917 | return USB3_LPM_DISABLED; | |
4918 | } | |
4919 | } | |
4920 | ||
d5e234ff | 4921 | if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) |
2847c46c MN |
4922 | timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); |
4923 | else | |
4924 | timeout_ns = udev->u2_params.sel; | |
4925 | ||
e3567d2c | 4926 | /* The U2 timeout is encoded in 256us intervals */ |
c88db160 | 4927 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); |
e3567d2c SS |
4928 | /* If the necessary timeout value is bigger than what we can set in the |
4929 | * USB 3.0 hub, we have to disable hub-initiated U2. | |
4930 | */ | |
4931 | if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) | |
4932 | return timeout_ns; | |
b020761e MN |
4933 | dev_dbg(&udev->dev, "Hub-initiated U2 disabled due to long timeout %lluus\n", |
4934 | timeout_ns * 256); | |
e3567d2c SS |
4935 | return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); |
4936 | } | |
4937 | ||
3b3db026 SS |
4938 | static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
4939 | struct usb_device *udev, | |
4940 | struct usb_endpoint_descriptor *desc, | |
4941 | enum usb3_link_state state, | |
4942 | u16 *timeout) | |
4943 | { | |
9502c46c PA |
4944 | if (state == USB3_LPM_U1) |
4945 | return xhci_calculate_u1_timeout(xhci, udev, desc); | |
4946 | else if (state == USB3_LPM_U2) | |
4947 | return xhci_calculate_u2_timeout(xhci, udev, desc); | |
e3567d2c | 4948 | |
3b3db026 SS |
4949 | return USB3_LPM_DISABLED; |
4950 | } | |
4951 | ||
4952 | static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, | |
4953 | struct usb_device *udev, | |
4954 | struct usb_endpoint_descriptor *desc, | |
4955 | enum usb3_link_state state, | |
4956 | u16 *timeout) | |
4957 | { | |
4958 | u16 alt_timeout; | |
4959 | ||
4960 | alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, | |
4961 | desc, state, timeout); | |
4962 | ||
d500c63f | 4963 | /* If we found we can't enable hub-initiated LPM, and |
3b3db026 | 4964 | * the U1 or U2 exit latency was too high to allow |
d500c63f JS |
4965 | * device-initiated LPM as well, then we will disable LPM |
4966 | * for this device, so stop searching any further. | |
3b3db026 | 4967 | */ |
d500c63f | 4968 | if (alt_timeout == USB3_LPM_DISABLED) { |
3b3db026 SS |
4969 | *timeout = alt_timeout; |
4970 | return -E2BIG; | |
4971 | } | |
4972 | if (alt_timeout > *timeout) | |
4973 | *timeout = alt_timeout; | |
4974 | return 0; | |
4975 | } | |
4976 | ||
4977 | static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, | |
4978 | struct usb_device *udev, | |
4979 | struct usb_host_interface *alt, | |
4980 | enum usb3_link_state state, | |
4981 | u16 *timeout) | |
4982 | { | |
4983 | int j; | |
4984 | ||
4985 | for (j = 0; j < alt->desc.bNumEndpoints; j++) { | |
4986 | if (xhci_update_timeout_for_endpoint(xhci, udev, | |
4987 | &alt->endpoint[j].desc, state, timeout)) | |
4988 | return -E2BIG; | |
3b3db026 SS |
4989 | } |
4990 | return 0; | |
4991 | } | |
4992 | ||
d5e234ff WW |
4993 | static int xhci_check_tier_policy(struct xhci_hcd *xhci, |
4994 | struct usb_device *udev, | |
e3567d2c SS |
4995 | enum usb3_link_state state) |
4996 | { | |
d5e234ff WW |
4997 | struct usb_device *parent = udev->parent; |
4998 | int tier = 1; /* roothub is tier1 */ | |
e3567d2c | 4999 | |
d5e234ff WW |
5000 | while (parent) { |
5001 | parent = parent->parent; | |
5002 | tier++; | |
5003 | } | |
e3567d2c | 5004 | |
d5e234ff WW |
5005 | if (xhci->quirks & XHCI_INTEL_HOST && tier > 3) |
5006 | goto fail; | |
5007 | if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2) | |
5008 | goto fail; | |
e3567d2c | 5009 | |
d5e234ff WW |
5010 | return 0; |
5011 | fail: | |
5012 | dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n", | |
5013 | tier); | |
e3567d2c SS |
5014 | return -E2BIG; |
5015 | } | |
5016 | ||
3b3db026 SS |
5017 | /* Returns the U1 or U2 timeout that should be enabled. |
5018 | * If the tier check or timeout setting functions return with a non-zero exit | |
5019 | * code, that means the timeout value has been finalized and we shouldn't look | |
5020 | * at any more endpoints. | |
5021 | */ | |
5022 | static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, | |
5023 | struct usb_device *udev, enum usb3_link_state state) | |
5024 | { | |
5025 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
5026 | struct usb_host_config *config; | |
5027 | char *state_name; | |
5028 | int i; | |
5029 | u16 timeout = USB3_LPM_DISABLED; | |
5030 | ||
5031 | if (state == USB3_LPM_U1) | |
5032 | state_name = "U1"; | |
5033 | else if (state == USB3_LPM_U2) | |
5034 | state_name = "U2"; | |
5035 | else { | |
5036 | dev_warn(&udev->dev, "Can't enable unknown link state %i\n", | |
5037 | state); | |
5038 | return timeout; | |
5039 | } | |
5040 | ||
3b3db026 SS |
5041 | /* Gather some information about the currently installed configuration |
5042 | * and alternate interface settings. | |
5043 | */ | |
5044 | if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, | |
5045 | state, &timeout)) | |
5046 | return timeout; | |
5047 | ||
5048 | config = udev->actconfig; | |
5049 | if (!config) | |
5050 | return timeout; | |
5051 | ||
64ba419b | 5052 | for (i = 0; i < config->desc.bNumInterfaces; i++) { |
3b3db026 SS |
5053 | struct usb_driver *driver; |
5054 | struct usb_interface *intf = config->interface[i]; | |
5055 | ||
5056 | if (!intf) | |
5057 | continue; | |
5058 | ||
5059 | /* Check if any currently bound drivers want hub-initiated LPM | |
5060 | * disabled. | |
5061 | */ | |
5062 | if (intf->dev.driver) { | |
5063 | driver = to_usb_driver(intf->dev.driver); | |
5064 | if (driver && driver->disable_hub_initiated_lpm) { | |
cd9d9491 MN |
5065 | dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n", |
5066 | state_name, driver->name); | |
5067 | timeout = xhci_get_timeout_no_hub_lpm(udev, | |
5068 | state); | |
5069 | if (timeout == USB3_LPM_DISABLED) | |
5070 | return timeout; | |
3b3db026 SS |
5071 | } |
5072 | } | |
5073 | ||
5074 | /* Not sure how this could happen... */ | |
5075 | if (!intf->cur_altsetting) | |
5076 | continue; | |
5077 | ||
5078 | if (xhci_update_timeout_for_interface(xhci, udev, | |
5079 | intf->cur_altsetting, | |
5080 | state, &timeout)) | |
5081 | return timeout; | |
5082 | } | |
5083 | return timeout; | |
5084 | } | |
5085 | ||
3b3db026 SS |
5086 | static int calculate_max_exit_latency(struct usb_device *udev, |
5087 | enum usb3_link_state state_changed, | |
5088 | u16 hub_encoded_timeout) | |
5089 | { | |
5090 | unsigned long long u1_mel_us = 0; | |
5091 | unsigned long long u2_mel_us = 0; | |
5092 | unsigned long long mel_us = 0; | |
5093 | bool disabling_u1; | |
5094 | bool disabling_u2; | |
5095 | bool enabling_u1; | |
5096 | bool enabling_u2; | |
5097 | ||
5098 | disabling_u1 = (state_changed == USB3_LPM_U1 && | |
5099 | hub_encoded_timeout == USB3_LPM_DISABLED); | |
5100 | disabling_u2 = (state_changed == USB3_LPM_U2 && | |
5101 | hub_encoded_timeout == USB3_LPM_DISABLED); | |
5102 | ||
5103 | enabling_u1 = (state_changed == USB3_LPM_U1 && | |
5104 | hub_encoded_timeout != USB3_LPM_DISABLED); | |
5105 | enabling_u2 = (state_changed == USB3_LPM_U2 && | |
5106 | hub_encoded_timeout != USB3_LPM_DISABLED); | |
5107 | ||
5108 | /* If U1 was already enabled and we're not disabling it, | |
5109 | * or we're going to enable U1, account for the U1 max exit latency. | |
5110 | */ | |
5111 | if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || | |
5112 | enabling_u1) | |
5113 | u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); | |
5114 | if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || | |
5115 | enabling_u2) | |
5116 | u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); | |
5117 | ||
f28fb27e CD |
5118 | mel_us = max(u1_mel_us, u2_mel_us); |
5119 | ||
3b3db026 SS |
5120 | /* xHCI host controller max exit latency field is only 16 bits wide. */ |
5121 | if (mel_us > MAX_EXIT) { | |
5122 | dev_warn(&udev->dev, "Link PM max exit latency of %lluus " | |
5123 | "is too big.\n", mel_us); | |
5124 | return -E2BIG; | |
5125 | } | |
5126 | return mel_us; | |
5127 | } | |
5128 | ||
5129 | /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ | |
3969384c | 5130 | static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
3b3db026 SS |
5131 | struct usb_device *udev, enum usb3_link_state state) |
5132 | { | |
5133 | struct xhci_hcd *xhci; | |
0522b9a1 | 5134 | struct xhci_port *port; |
3b3db026 SS |
5135 | u16 hub_encoded_timeout; |
5136 | int mel; | |
5137 | int ret; | |
5138 | ||
5139 | xhci = hcd_to_xhci(hcd); | |
5140 | /* The LPM timeout values are pretty host-controller specific, so don't | |
5141 | * enable hub-initiated timeouts unless the vendor has provided | |
5142 | * information about their timeout algorithm. | |
5143 | */ | |
5144 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || | |
5145 | !xhci->devs[udev->slot_id]) | |
5146 | return USB3_LPM_DISABLED; | |
5147 | ||
424140d3 MN |
5148 | if (xhci_check_tier_policy(xhci, udev, state) < 0) |
5149 | return USB3_LPM_DISABLED; | |
5150 | ||
0522b9a1 MN |
5151 | /* If connected to root port then check port can handle lpm */ |
5152 | if (udev->parent && !udev->parent->parent) { | |
5153 | port = xhci->usb3_rhub.ports[udev->portnum - 1]; | |
5154 | if (port->lpm_incapable) | |
5155 | return USB3_LPM_DISABLED; | |
5156 | } | |
5157 | ||
3b3db026 SS |
5158 | hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); |
5159 | mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); | |
5160 | if (mel < 0) { | |
5161 | /* Max Exit Latency is too big, disable LPM. */ | |
5162 | hub_encoded_timeout = USB3_LPM_DISABLED; | |
5163 | mel = 0; | |
5164 | } | |
5165 | ||
5166 | ret = xhci_change_max_exit_latency(xhci, udev, mel); | |
5167 | if (ret) | |
5168 | return ret; | |
5169 | return hub_encoded_timeout; | |
5170 | } | |
5171 | ||
3969384c | 5172 | static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
3b3db026 SS |
5173 | struct usb_device *udev, enum usb3_link_state state) |
5174 | { | |
5175 | struct xhci_hcd *xhci; | |
5176 | u16 mel; | |
3b3db026 SS |
5177 | |
5178 | xhci = hcd_to_xhci(hcd); | |
5179 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || | |
5180 | !xhci->devs[udev->slot_id]) | |
5181 | return 0; | |
5182 | ||
5183 | mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); | |
f1cda54c | 5184 | return xhci_change_max_exit_latency(xhci, udev, mel); |
3b3db026 | 5185 | } |
b01bcbf7 | 5186 | #else /* CONFIG_PM */ |
9574323c | 5187 | |
3969384c | 5188 | static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
ceb6c9c8 RW |
5189 | struct usb_device *udev, int enable) |
5190 | { | |
5191 | return 0; | |
5192 | } | |
5193 | ||
3969384c | 5194 | static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
ceb6c9c8 RW |
5195 | { |
5196 | return 0; | |
5197 | } | |
5198 | ||
3969384c | 5199 | static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
b01bcbf7 | 5200 | struct usb_device *udev, enum usb3_link_state state) |
65580b43 | 5201 | { |
b01bcbf7 | 5202 | return USB3_LPM_DISABLED; |
65580b43 AX |
5203 | } |
5204 | ||
3969384c | 5205 | static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
b01bcbf7 | 5206 | struct usb_device *udev, enum usb3_link_state state) |
9574323c AX |
5207 | { |
5208 | return 0; | |
5209 | } | |
b01bcbf7 | 5210 | #endif /* CONFIG_PM */ |
9574323c | 5211 | |
b01bcbf7 | 5212 | /*-------------------------------------------------------------------------*/ |
9574323c | 5213 | |
ac1c1b7f SS |
5214 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
5215 | * internal data structures for the device. | |
5216 | */ | |
23a3b8d5 | 5217 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, |
ac1c1b7f SS |
5218 | struct usb_tt *tt, gfp_t mem_flags) |
5219 | { | |
5220 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
5221 | struct xhci_virt_device *vdev; | |
5222 | struct xhci_command *config_cmd; | |
5223 | struct xhci_input_control_ctx *ctrl_ctx; | |
5224 | struct xhci_slot_ctx *slot_ctx; | |
5225 | unsigned long flags; | |
5226 | unsigned think_time; | |
5227 | int ret; | |
5228 | ||
5229 | /* Ignore root hubs */ | |
5230 | if (!hdev->parent) | |
5231 | return 0; | |
5232 | ||
5233 | vdev = xhci->devs[hdev->slot_id]; | |
5234 | if (!vdev) { | |
5235 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); | |
5236 | return -EINVAL; | |
5237 | } | |
74e0b564 | 5238 | |
14d49b7a | 5239 | config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); |
74e0b564 | 5240 | if (!config_cmd) |
ac1c1b7f | 5241 | return -ENOMEM; |
74e0b564 | 5242 | |
4daf9df5 | 5243 | ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); |
92f8e767 SS |
5244 | if (!ctrl_ctx) { |
5245 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
5246 | __func__); | |
5247 | xhci_free_command(xhci, config_cmd); | |
5248 | return -ENOMEM; | |
5249 | } | |
ac1c1b7f SS |
5250 | |
5251 | spin_lock_irqsave(&xhci->lock, flags); | |
839c817c SS |
5252 | if (hdev->speed == USB_SPEED_HIGH && |
5253 | xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { | |
5254 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); | |
5255 | xhci_free_command(xhci, config_cmd); | |
5256 | spin_unlock_irqrestore(&xhci->lock, flags); | |
5257 | return -ENOMEM; | |
5258 | } | |
5259 | ||
ac1c1b7f | 5260 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); |
28ccd296 | 5261 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
ac1c1b7f | 5262 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); |
28ccd296 | 5263 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
096b110a CY |
5264 | /* |
5265 | * refer to section 6.2.2: MTT should be 0 for full speed hub, | |
5266 | * but it may be already set to 1 when setup an xHCI virtual | |
5267 | * device, so clear it anyway. | |
5268 | */ | |
ac1c1b7f | 5269 | if (tt->multi) |
28ccd296 | 5270 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
096b110a CY |
5271 | else if (hdev->speed == USB_SPEED_FULL) |
5272 | slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); | |
5273 | ||
ac1c1b7f SS |
5274 | if (xhci->hci_version > 0x95) { |
5275 | xhci_dbg(xhci, "xHCI version %x needs hub " | |
5276 | "TT think time and number of ports\n", | |
5277 | (unsigned int) xhci->hci_version); | |
28ccd296 | 5278 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); |
ac1c1b7f SS |
5279 | /* Set TT think time - convert from ns to FS bit times. |
5280 | * 0 = 8 FS bit times, 1 = 16 FS bit times, | |
5281 | * 2 = 24 FS bit times, 3 = 32 FS bit times. | |
700b4173 AX |
5282 | * |
5283 | * xHCI 1.0: this field shall be 0 if the device is not a | |
5284 | * High-spped hub. | |
ac1c1b7f SS |
5285 | */ |
5286 | think_time = tt->think_time; | |
5287 | if (think_time != 0) | |
5288 | think_time = (think_time / 666) - 1; | |
700b4173 AX |
5289 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) |
5290 | slot_ctx->tt_info |= | |
5291 | cpu_to_le32(TT_THINK_TIME(think_time)); | |
ac1c1b7f SS |
5292 | } else { |
5293 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " | |
5294 | "TT think time or number of ports\n", | |
5295 | (unsigned int) xhci->hci_version); | |
5296 | } | |
5297 | slot_ctx->dev_state = 0; | |
5298 | spin_unlock_irqrestore(&xhci->lock, flags); | |
5299 | ||
5300 | xhci_dbg(xhci, "Set up %s for hub device.\n", | |
5301 | (xhci->hci_version > 0x95) ? | |
5302 | "configure endpoint" : "evaluate context"); | |
ac1c1b7f SS |
5303 | |
5304 | /* Issue and wait for the configure endpoint or | |
5305 | * evaluate context command. | |
5306 | */ | |
5307 | if (xhci->hci_version > 0x95) | |
5308 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | |
5309 | false, false); | |
5310 | else | |
5311 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | |
5312 | true, false); | |
5313 | ||
ac1c1b7f SS |
5314 | xhci_free_command(xhci, config_cmd); |
5315 | return ret; | |
5316 | } | |
23a3b8d5 | 5317 | EXPORT_SYMBOL_GPL(xhci_update_hub_device); |
ac1c1b7f | 5318 | |
3969384c | 5319 | static int xhci_get_frame(struct usb_hcd *hcd) |
66d4eadd SS |
5320 | { |
5321 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
5322 | /* EHCI mods by the periodic size. Why? */ | |
b0ba9720 | 5323 | return readl(&xhci->run_regs->microframe_index) >> 3; |
66d4eadd SS |
5324 | } |
5325 | ||
57f23cd0 HK |
5326 | static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) |
5327 | { | |
5328 | xhci->usb2_rhub.hcd = hcd; | |
5329 | hcd->speed = HCD_USB2; | |
5330 | hcd->self.root_hub->speed = USB_SPEED_HIGH; | |
5331 | /* | |
5332 | * USB 2.0 roothub under xHCI has an integrated TT, | |
5333 | * (rate matching hub) as opposed to having an OHCI/UHCI | |
5334 | * companion controller. | |
5335 | */ | |
5336 | hcd->has_tt = 1; | |
5337 | } | |
5338 | ||
5339 | static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) | |
5340 | { | |
5341 | unsigned int minor_rev; | |
5342 | ||
5343 | /* | |
5344 | * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts | |
5345 | * should return 0x31 for sbrn, or that the minor revision | |
5346 | * is a two digit BCD containig minor and sub-minor numbers. | |
5347 | * This was later clarified in xHCI 1.2. | |
5348 | * | |
5349 | * Some USB 3.1 capable hosts therefore have sbrn 0x30, and | |
5350 | * minor revision set to 0x1 instead of 0x10. | |
5351 | */ | |
5352 | if (xhci->usb3_rhub.min_rev == 0x1) | |
5353 | minor_rev = 1; | |
5354 | else | |
5355 | minor_rev = xhci->usb3_rhub.min_rev / 0x10; | |
5356 | ||
5357 | switch (minor_rev) { | |
5358 | case 2: | |
5359 | hcd->speed = HCD_USB32; | |
5360 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; | |
5361 | hcd->self.root_hub->rx_lanes = 2; | |
5362 | hcd->self.root_hub->tx_lanes = 2; | |
5363 | hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2; | |
5364 | break; | |
5365 | case 1: | |
5366 | hcd->speed = HCD_USB31; | |
5367 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; | |
5368 | hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1; | |
5369 | break; | |
5370 | } | |
5371 | xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", | |
5372 | minor_rev, minor_rev ? "Enhanced " : ""); | |
5373 | ||
5374 | xhci->usb3_rhub.hcd = hcd; | |
5375 | } | |
5376 | ||
552e0c4f SAS |
5377 | int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) |
5378 | { | |
5379 | struct xhci_hcd *xhci; | |
4c39d4b9 AB |
5380 | /* |
5381 | * TODO: Check with DWC3 clients for sysdev according to | |
5382 | * quirks | |
5383 | */ | |
5384 | struct device *dev = hcd->self.sysdev; | |
552e0c4f | 5385 | int retval; |
552e0c4f | 5386 | |
1386ff75 SS |
5387 | /* Accept arbitrarily long scatter-gather lists */ |
5388 | hcd->self.sg_tablesize = ~0; | |
fc76051c | 5389 | |
e2ed5114 MN |
5390 | /* support to build packet from discontinuous buffers */ |
5391 | hcd->self.no_sg_constraint = 1; | |
5392 | ||
19181bc5 HG |
5393 | /* XHCI controllers don't stop the ep queue on short packets :| */ |
5394 | hcd->self.no_stop_on_short = 1; | |
552e0c4f | 5395 | |
b50107bb MN |
5396 | xhci = hcd_to_xhci(hcd); |
5397 | ||
873f3236 | 5398 | if (!usb_hcd_is_primary_hcd(hcd)) { |
57f23cd0 | 5399 | xhci_hcd_init_usb3_data(xhci, hcd); |
552e0c4f SAS |
5400 | return 0; |
5401 | } | |
5402 | ||
a00918d0 | 5403 | mutex_init(&xhci->mutex); |
57f23cd0 | 5404 | xhci->main_hcd = hcd; |
552e0c4f SAS |
5405 | xhci->cap_regs = hcd->regs; |
5406 | xhci->op_regs = hcd->regs + | |
b0ba9720 | 5407 | HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); |
552e0c4f | 5408 | xhci->run_regs = hcd->regs + |
b0ba9720 | 5409 | (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); |
552e0c4f | 5410 | /* Cache read-only capability registers */ |
b0ba9720 XR |
5411 | xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); |
5412 | xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); | |
5413 | xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); | |
c63d5757 | 5414 | xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase)); |
b0ba9720 | 5415 | xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); |
04abb6de LB |
5416 | if (xhci->hci_version > 0x100) |
5417 | xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); | |
552e0c4f | 5418 | |
b17a57f8 MN |
5419 | /* xhci-plat or xhci-pci might have set max_interrupters already */ |
5420 | if ((!xhci->max_interrupters) || | |
5421 | xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1)) | |
5422 | xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1); | |
5423 | ||
757de492 | 5424 | xhci->quirks |= quirks; |
4e6a1ee7 | 5425 | |
9b907c91 MN |
5426 | if (get_quirks) |
5427 | get_quirks(dev, xhci); | |
552e0c4f | 5428 | |
07f3cb7c GC |
5429 | /* In xhci controllers which follow xhci 1.0 spec gives a spurious |
5430 | * success event after a short transfer. This quirk will ignore such | |
5431 | * spurious event. | |
5432 | */ | |
5433 | if (xhci->hci_version > 0x96) | |
5434 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; | |
5435 | ||
083ba4c4 NN |
5436 | if (xhci->hci_version == 0x95 && link_quirk) { |
5437 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits"); | |
5438 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; | |
5439 | } | |
5440 | ||
552e0c4f SAS |
5441 | /* Make sure the HC is halted. */ |
5442 | retval = xhci_halt(xhci); | |
5443 | if (retval) | |
cd33a321 | 5444 | return retval; |
552e0c4f | 5445 | |
12de0a35 MZ |
5446 | xhci_zero_64b_regs(xhci); |
5447 | ||
552e0c4f SAS |
5448 | xhci_dbg(xhci, "Resetting HCD\n"); |
5449 | /* Reset the internal HC memory state and registers. */ | |
14073ce9 | 5450 | retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); |
552e0c4f | 5451 | if (retval) |
cd33a321 | 5452 | return retval; |
552e0c4f SAS |
5453 | xhci_dbg(xhci, "Reset complete\n"); |
5454 | ||
0a380be8 YS |
5455 | /* |
5456 | * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) | |
5457 | * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit | |
5458 | * address memory pointers actually. So, this driver clears the AC64 | |
5459 | * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, | |
5460 | * DMA_BIT_MASK(32)) in this xhci_gen_setup(). | |
5461 | */ | |
5462 | if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) | |
5463 | xhci->hcc_params &= ~BIT(0); | |
5464 | ||
c10cf118 XR |
5465 | /* Set dma_mask and coherent_dma_mask to 64-bits, |
5466 | * if xHC supports 64-bit addressing */ | |
5467 | if (HCC_64BIT_ADDR(xhci->hcc_params) && | |
5468 | !dma_set_mask(dev, DMA_BIT_MASK(64))) { | |
552e0c4f | 5469 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); |
c10cf118 | 5470 | dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); |
fda182d8 DD |
5471 | } else { |
5472 | /* | |
5473 | * This is to avoid error in cases where a 32-bit USB | |
5474 | * controller is used on a 64-bit capable system. | |
5475 | */ | |
5476 | retval = dma_set_mask(dev, DMA_BIT_MASK(32)); | |
5477 | if (retval) | |
5478 | return retval; | |
5479 | xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); | |
5480 | dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); | |
552e0c4f SAS |
5481 | } |
5482 | ||
5483 | xhci_dbg(xhci, "Calling HCD init\n"); | |
5484 | /* Initialize HCD and host controller data structures. */ | |
5485 | retval = xhci_init(hcd); | |
5486 | if (retval) | |
cd33a321 | 5487 | return retval; |
552e0c4f | 5488 | xhci_dbg(xhci, "Called HCD init\n"); |
99705092 | 5489 | |
873f3236 HK |
5490 | if (xhci_hcd_is_usb3(hcd)) |
5491 | xhci_hcd_init_usb3_data(xhci, hcd); | |
5492 | else | |
5493 | xhci_hcd_init_usb2_data(xhci, hcd); | |
5494 | ||
36b68579 | 5495 | xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", |
99705092 HG |
5496 | xhci->hcc_params, xhci->hci_version, xhci->quirks); |
5497 | ||
552e0c4f | 5498 | return 0; |
552e0c4f | 5499 | } |
436e8c7d | 5500 | EXPORT_SYMBOL_GPL(xhci_gen_setup); |
552e0c4f | 5501 | |
ef513be0 JL |
5502 | static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, |
5503 | struct usb_host_endpoint *ep) | |
5504 | { | |
5505 | struct xhci_hcd *xhci; | |
5506 | struct usb_device *udev; | |
5507 | unsigned int slot_id; | |
5508 | unsigned int ep_index; | |
5509 | unsigned long flags; | |
5510 | ||
5511 | xhci = hcd_to_xhci(hcd); | |
18b74067 MN |
5512 | |
5513 | spin_lock_irqsave(&xhci->lock, flags); | |
ef513be0 JL |
5514 | udev = (struct usb_device *)ep->hcpriv; |
5515 | slot_id = udev->slot_id; | |
5516 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
5517 | ||
ef513be0 JL |
5518 | xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; |
5519 | xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); | |
5520 | spin_unlock_irqrestore(&xhci->lock, flags); | |
5521 | } | |
5522 | ||
1885d9a3 AB |
5523 | static const struct hc_driver xhci_hc_driver = { |
5524 | .description = "xhci-hcd", | |
5525 | .product_desc = "xHCI Host Controller", | |
32479d4b | 5526 | .hcd_priv_size = sizeof(struct xhci_hcd), |
1885d9a3 AB |
5527 | |
5528 | /* | |
5529 | * generic hardware linkage | |
5530 | */ | |
5531 | .irq = xhci_irq, | |
36dc0165 SK |
5532 | .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | |
5533 | HCD_BH, | |
1885d9a3 AB |
5534 | |
5535 | /* | |
5536 | * basic lifecycle operations | |
5537 | */ | |
5538 | .reset = NULL, /* set in xhci_init_driver() */ | |
5539 | .start = xhci_run, | |
5540 | .stop = xhci_stop, | |
5541 | .shutdown = xhci_shutdown, | |
5542 | ||
5543 | /* | |
5544 | * managing i/o requests and associated device resources | |
5545 | */ | |
33e39350 | 5546 | .map_urb_for_dma = xhci_map_urb_for_dma, |
2017a1e5 | 5547 | .unmap_urb_for_dma = xhci_unmap_urb_for_dma, |
1885d9a3 AB |
5548 | .urb_enqueue = xhci_urb_enqueue, |
5549 | .urb_dequeue = xhci_urb_dequeue, | |
5550 | .alloc_dev = xhci_alloc_dev, | |
5551 | .free_dev = xhci_free_dev, | |
5552 | .alloc_streams = xhci_alloc_streams, | |
5553 | .free_streams = xhci_free_streams, | |
5554 | .add_endpoint = xhci_add_endpoint, | |
5555 | .drop_endpoint = xhci_drop_endpoint, | |
18b74067 | 5556 | .endpoint_disable = xhci_endpoint_disable, |
1885d9a3 AB |
5557 | .endpoint_reset = xhci_endpoint_reset, |
5558 | .check_bandwidth = xhci_check_bandwidth, | |
5559 | .reset_bandwidth = xhci_reset_bandwidth, | |
5560 | .address_device = xhci_address_device, | |
5561 | .enable_device = xhci_enable_device, | |
5562 | .update_hub_device = xhci_update_hub_device, | |
5563 | .reset_device = xhci_discover_or_reset_device, | |
5564 | ||
5565 | /* | |
5566 | * scheduling support | |
5567 | */ | |
5568 | .get_frame_number = xhci_get_frame, | |
5569 | ||
5570 | /* | |
5571 | * root hub support | |
5572 | */ | |
5573 | .hub_control = xhci_hub_control, | |
5574 | .hub_status_data = xhci_hub_status_data, | |
5575 | .bus_suspend = xhci_bus_suspend, | |
5576 | .bus_resume = xhci_bus_resume, | |
8f9cc83c | 5577 | .get_resuming_ports = xhci_get_resuming_ports, |
1885d9a3 AB |
5578 | |
5579 | /* | |
5580 | * call back when device connected and addressed | |
5581 | */ | |
5582 | .update_device = xhci_update_device, | |
5583 | .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, | |
5584 | .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, | |
5585 | .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, | |
5586 | .find_raw_port_number = xhci_find_raw_port_number, | |
ef513be0 | 5587 | .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, |
1885d9a3 AB |
5588 | }; |
5589 | ||
cd33a321 RQ |
5590 | void xhci_init_driver(struct hc_driver *drv, |
5591 | const struct xhci_driver_overrides *over) | |
1885d9a3 | 5592 | { |
cd33a321 RQ |
5593 | BUG_ON(!over); |
5594 | ||
5595 | /* Copy the generic table to drv then apply the overrides */ | |
1885d9a3 | 5596 | *drv = xhci_hc_driver; |
cd33a321 RQ |
5597 | |
5598 | if (over) { | |
5599 | drv->hcd_priv_size += over->extra_priv_size; | |
5600 | if (over->reset) | |
5601 | drv->reset = over->reset; | |
5602 | if (over->start) | |
5603 | drv->start = over->start; | |
14295a15 CY |
5604 | if (over->add_endpoint) |
5605 | drv->add_endpoint = over->add_endpoint; | |
5606 | if (over->drop_endpoint) | |
5607 | drv->drop_endpoint = over->drop_endpoint; | |
1d69f9d9 IJ |
5608 | if (over->check_bandwidth) |
5609 | drv->check_bandwidth = over->check_bandwidth; | |
5610 | if (over->reset_bandwidth) | |
5611 | drv->reset_bandwidth = over->reset_bandwidth; | |
23a3b8d5 MN |
5612 | if (over->update_hub_device) |
5613 | drv->update_hub_device = over->update_hub_device; | |
592338dd JL |
5614 | if (over->hub_control) |
5615 | drv->hub_control = over->hub_control; | |
cd33a321 | 5616 | } |
1885d9a3 AB |
5617 | } |
5618 | EXPORT_SYMBOL_GPL(xhci_init_driver); | |
5619 | ||
66d4eadd SS |
5620 | MODULE_DESCRIPTION(DRIVER_DESC); |
5621 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
5622 | MODULE_LICENSE("GPL"); | |
5623 | ||
5624 | static int __init xhci_hcd_init(void) | |
5625 | { | |
98441973 SS |
5626 | /* |
5627 | * Check the compiler generated sizes of structures that must be laid | |
5628 | * out in specific ways for hardware access. | |
5629 | */ | |
5630 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); | |
5631 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); | |
5632 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); | |
5633 | /* xhci_device_control has eight fields, and also | |
5634 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx | |
5635 | */ | |
98441973 SS |
5636 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
5637 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); | |
5638 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); | |
04abb6de | 5639 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); |
98441973 SS |
5640 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); |
5641 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ | |
5642 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); | |
1eaf35e4 ON |
5643 | |
5644 | if (usb_disabled()) | |
5645 | return -ENODEV; | |
5646 | ||
02b6fdc2 | 5647 | xhci_debugfs_create_root(); |
6aec5000 | 5648 | xhci_dbc_init(); |
02b6fdc2 | 5649 | |
66d4eadd SS |
5650 | return 0; |
5651 | } | |
b04c846c AD |
5652 | |
5653 | /* | |
5654 | * If an init function is provided, an exit function must also be provided | |
5655 | * to allow module unload. | |
5656 | */ | |
02b6fdc2 LB |
5657 | static void __exit xhci_hcd_fini(void) |
5658 | { | |
5659 | xhci_debugfs_remove_root(); | |
6aec5000 | 5660 | xhci_dbc_exit(); |
02b6fdc2 | 5661 | } |
b04c846c | 5662 | |
66d4eadd | 5663 | module_init(xhci_hcd_init); |
b04c846c | 5664 | module_exit(xhci_hcd_fini); |