Commit | Line | Data |
---|---|---|
81eb669b DC |
1 | /* Cypress West Bridge API source file (cyasdma.c) |
2 | ## =========================== | |
3 | ## Copyright (C) 2010 Cypress Semiconductor | |
4 | ## | |
5 | ## This program is free software; you can redistribute it and/or | |
6 | ## modify it under the terms of the GNU General Public License | |
7 | ## as published by the Free Software Foundation; either version 2 | |
8 | ## of the License, or (at your option) any later version. | |
9 | ## | |
10 | ## This program is distributed in the hope that it will be useful, | |
11 | ## but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | ## GNU General Public License for more details. | |
14 | ## | |
15 | ## You should have received a copy of the GNU General Public License | |
16 | ## along with this program; if not, write to the Free Software | |
17 | ## Foundation, Inc., 51 Franklin Street, Fifth Floor | |
18 | ## Boston, MA 02110-1301, USA. | |
19 | ## =========================== | |
20 | */ | |
21 | ||
22 | #include "../../include/linux/westbridge/cyashal.h" | |
23 | #include "../../include/linux/westbridge/cyasdma.h" | |
24 | #include "../../include/linux/westbridge/cyaslowlevel.h" | |
25 | #include "../../include/linux/westbridge/cyaserr.h" | |
26 | #include "../../include/linux/westbridge/cyasregs.h" | |
27 | ||
28 | /* | |
29 | * Add the DMA queue entry to the free list to be re-used later | |
30 | */ | |
31 | static void | |
32 | cy_as_dma_add_request_to_free_queue(cy_as_device *dev_p, | |
33 | cy_as_dma_queue_entry *req_p) | |
34 | { | |
0769c38d DC |
35 | uint32_t imask; |
36 | imask = cy_as_hal_disable_interrupts(); | |
81eb669b | 37 | |
0769c38d DC |
38 | req_p->next_p = dev_p->dma_freelist_p; |
39 | dev_p->dma_freelist_p = req_p; | |
81eb669b | 40 | |
0769c38d | 41 | cy_as_hal_enable_interrupts(imask); |
81eb669b DC |
42 | } |
43 | ||
44 | /* | |
45 | * Get a DMA queue entry from the free list. | |
46 | */ | |
47 | static cy_as_dma_queue_entry * | |
48 | cy_as_dma_get_dma_queue_entry(cy_as_device *dev_p) | |
49 | { | |
0769c38d DC |
50 | cy_as_dma_queue_entry *req_p; |
51 | uint32_t imask; | |
81eb669b | 52 | |
0769c38d | 53 | cy_as_hal_assert(dev_p->dma_freelist_p != 0); |
81eb669b | 54 | |
0769c38d DC |
55 | imask = cy_as_hal_disable_interrupts(); |
56 | req_p = dev_p->dma_freelist_p; | |
57 | dev_p->dma_freelist_p = req_p->next_p; | |
58 | cy_as_hal_enable_interrupts(imask); | |
81eb669b | 59 | |
0769c38d | 60 | return req_p; |
81eb669b DC |
61 | } |
62 | ||
63 | /* | |
64 | * Set the maximum size that the West Bridge hardware | |
65 | * can handle in a single DMA operation. This size | |
66 | * may change for the P <-> U endpoints as a function | |
67 | * of the endpoint type and whether we are running | |
68 | * at full speed or high speed. | |
69 | */ | |
70 | cy_as_return_status_t | |
71 | cy_as_dma_set_max_dma_size(cy_as_device *dev_p, | |
72 | cy_as_end_point_number_t ep, uint32_t size) | |
73 | { | |
74 | /* In MTP mode, EP2 is allowed to have all max sizes. */ | |
75 | if ((!dev_p->is_mtp_firmware) || (ep != 0x02)) { | |
76 | if (size < 64 || size > 1024) | |
0769c38d | 77 | return CY_AS_ERROR_INVALID_SIZE; |
81eb669b DC |
78 | } |
79 | ||
0769c38d DC |
80 | CY_AS_NUM_EP(dev_p, ep)->maxhwdata = (uint16_t)size; |
81 | return CY_AS_ERROR_SUCCESS; | |
81eb669b DC |
82 | } |
83 | ||
84 | /* | |
85 | * The callback for requests sent to West Bridge | |
86 | * to relay endpoint data. Endpoint data for EP0 | |
87 | * and EP1 are sent using mailbox requests. This | |
88 | * is the callback that is called when a response | |
89 | * to a mailbox request to send data is received. | |
90 | */ | |
91 | static void | |
92 | cy_as_dma_request_callback( | |
93 | cy_as_device *dev_p, | |
94 | uint8_t context, | |
95 | cy_as_ll_request_response *req_p, | |
96 | cy_as_ll_request_response *resp_p, | |
97 | cy_as_return_status_t ret) | |
98 | { | |
0769c38d DC |
99 | uint16_t v; |
100 | uint16_t datacnt; | |
101 | cy_as_end_point_number_t ep; | |
81eb669b | 102 | |
0769c38d | 103 | (void)context; |
81eb669b | 104 | |
0769c38d | 105 | cy_as_log_debug_message(5, "cy_as_dma_request_callback called"); |
81eb669b DC |
106 | |
107 | /* | |
108 | * extract the return code from the firmware | |
109 | */ | |
110 | if (ret == CY_AS_ERROR_SUCCESS) { | |
111 | if (cy_as_ll_request_response__get_code(resp_p) != | |
112 | CY_RESP_SUCCESS_FAILURE) | |
0769c38d | 113 | ret = CY_AS_ERROR_INVALID_RESPONSE; |
81eb669b | 114 | else |
0769c38d | 115 | ret = cy_as_ll_request_response__get_word(resp_p, 0); |
81eb669b DC |
116 | } |
117 | ||
118 | /* | |
119 | * extract the endpoint number and the transferred byte count | |
120 | * from the request. | |
121 | */ | |
0769c38d DC |
122 | v = cy_as_ll_request_response__get_word(req_p, 0); |
123 | ep = (cy_as_end_point_number_t)((v >> 13) & 0x01); | |
81eb669b DC |
124 | |
125 | if (ret == CY_AS_ERROR_SUCCESS) { | |
126 | /* | |
127 | * if the firmware returns success, | |
128 | * all of the data requested was | |
129 | * transferred. there are no partial | |
130 | * transfers. | |
131 | */ | |
0769c38d | 132 | datacnt = v & 0x3FF; |
81eb669b DC |
133 | } else { |
134 | /* | |
135 | * if the firmware returned an error, no data was transferred. | |
136 | */ | |
0769c38d | 137 | datacnt = 0; |
81eb669b DC |
138 | } |
139 | ||
140 | /* | |
141 | * queue the request and response data structures for use with the | |
142 | * next EP0 or EP1 request. | |
143 | */ | |
144 | if (ep == 0) { | |
0769c38d DC |
145 | dev_p->usb_ep0_dma_req = req_p; |
146 | dev_p->usb_ep0_dma_resp = resp_p; | |
81eb669b | 147 | } else { |
0769c38d DC |
148 | dev_p->usb_ep1_dma_req = req_p; |
149 | dev_p->usb_ep1_dma_resp = resp_p; | |
81eb669b DC |
150 | } |
151 | ||
152 | /* | |
153 | * call the DMA complete function so we can | |
154 | * signal that this portion of the transfer | |
155 | * has completed. if the low level request | |
156 | * was canceled, we do not need to signal | |
157 | * the completed function as the only way a | |
158 | * cancel can happen is via the DMA cancel | |
159 | * function. | |
160 | */ | |
161 | if (ret != CY_AS_ERROR_CANCELED) | |
0769c38d | 162 | cy_as_dma_completed_callback(dev_p->tag, ep, datacnt, ret); |
81eb669b DC |
163 | } |
164 | ||
165 | /* | |
166 | * Set the DRQ mask register for the given endpoint number. If state is | |
167 | * CyTrue, the DRQ interrupt for the given endpoint is enabled, otherwise | |
168 | * it is disabled. | |
169 | */ | |
170 | static void | |
171 | cy_as_dma_set_drq(cy_as_device *dev_p, | |
172 | cy_as_end_point_number_t ep, cy_bool state) | |
173 | { | |
0769c38d DC |
174 | uint16_t mask; |
175 | uint16_t v; | |
176 | uint32_t intval; | |
81eb669b DC |
177 | |
178 | /* | |
179 | * there are not DRQ register bits for EP0 and EP1 | |
180 | */ | |
181 | if (ep == 0 || ep == 1) | |
0769c38d | 182 | return; |
81eb669b DC |
183 | |
184 | /* | |
185 | * disable interrupts while we do this to be sure the state of the | |
186 | * DRQ mask register is always well defined. | |
187 | */ | |
0769c38d | 188 | intval = cy_as_hal_disable_interrupts(); |
81eb669b DC |
189 | |
190 | /* | |
191 | * set the DRQ bit to the given state for the ep given | |
192 | */ | |
0769c38d DC |
193 | mask = (1 << ep); |
194 | v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK); | |
81eb669b DC |
195 | |
196 | if (state) | |
0769c38d | 197 | v |= mask; |
81eb669b | 198 | else |
0769c38d | 199 | v &= ~mask; |
81eb669b | 200 | |
0769c38d DC |
201 | cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK, v); |
202 | cy_as_hal_enable_interrupts(intval); | |
81eb669b DC |
203 | } |
204 | ||
205 | /* | |
206 | * Send the next DMA request for the endpoint given | |
207 | */ | |
208 | static void | |
209 | cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p) | |
210 | { | |
0769c38d DC |
211 | uint32_t datacnt; |
212 | void *buf_p; | |
213 | cy_as_dma_queue_entry *dma_p; | |
81eb669b | 214 | |
0769c38d | 215 | cy_as_log_debug_message(6, "cy_as_dma_send_next_dma_request called"); |
81eb669b DC |
216 | |
217 | /* If the queue is empty, nothing to do */ | |
0769c38d | 218 | dma_p = ep_p->queue_p; |
81eb669b DC |
219 | if (dma_p == 0) { |
220 | /* | |
221 | * there are no pending DMA requests | |
222 | * for this endpoint. disable the DRQ | |
223 | * mask bits to insure no interrupts | |
224 | * will be triggered by this endpoint | |
225 | * until someone is interested in the data. | |
226 | */ | |
0769c38d DC |
227 | cy_as_dma_set_drq(dev_p, ep_p->ep, cy_false); |
228 | return; | |
81eb669b DC |
229 | } |
230 | ||
0769c38d | 231 | cy_as_dma_end_point_set_running(ep_p); |
81eb669b DC |
232 | |
233 | /* | |
234 | * get the number of words that still | |
235 | * need to be xferred in this request. | |
236 | */ | |
0769c38d DC |
237 | datacnt = dma_p->size - dma_p->offset; |
238 | cy_as_hal_assert(datacnt >= 0); | |
81eb669b DC |
239 | |
240 | /* | |
241 | * the HAL layer should never limit the size | |
242 | * of the transfer to something less than the | |
243 | * maxhwdata otherwise, the data will be sent | |
244 | * in packets that are not correct in size. | |
245 | */ | |
246 | cy_as_hal_assert(ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE | |
0769c38d | 247 | || ep_p->maxhaldata >= ep_p->maxhwdata); |
81eb669b DC |
248 | |
249 | /* | |
250 | * update the number of words that need to be xferred yet | |
251 | * based on the limits of the HAL layer. | |
252 | */ | |
253 | if (ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE) { | |
254 | if (datacnt > ep_p->maxhwdata) | |
0769c38d | 255 | datacnt = ep_p->maxhwdata; |
81eb669b DC |
256 | } else { |
257 | if (datacnt > ep_p->maxhaldata) | |
0769c38d | 258 | datacnt = ep_p->maxhaldata; |
81eb669b DC |
259 | } |
260 | ||
261 | /* | |
262 | * find a pointer to the data that needs to be transferred | |
263 | */ | |
264 | buf_p = (((char *)dma_p->buf_p) + dma_p->offset); | |
265 | ||
266 | /* | |
267 | * mark a request in transit | |
268 | */ | |
0769c38d | 269 | cy_as_dma_end_point_set_in_transit(ep_p); |
81eb669b DC |
270 | |
271 | if (ep_p->ep == 0 || ep_p->ep == 1) { | |
272 | /* | |
273 | * if this is a WRITE request on EP0 and EP1 | |
274 | * we write the data via an EP_DATA request | |
275 | * to west bridge via the mailbox registers. | |
276 | * if this is a READ request, we do nothing | |
277 | * and the data will arrive via an EP_DATA | |
278 | * request from west bridge. in the request | |
279 | * handler for the USB context we will pass | |
280 | * the data back into the DMA module. | |
281 | */ | |
282 | if (dma_p->readreq == cy_false) { | |
0769c38d DC |
283 | uint16_t v; |
284 | uint16_t len; | |
285 | cy_as_ll_request_response *resp_p; | |
286 | cy_as_ll_request_response *req_p; | |
287 | cy_as_return_status_t ret; | |
81eb669b | 288 | |
0769c38d | 289 | len = (uint16_t)(datacnt / 2); |
81eb669b | 290 | if (datacnt % 2) |
0769c38d | 291 | len++; |
81eb669b | 292 | |
0769c38d | 293 | len++; |
81eb669b DC |
294 | |
295 | if (ep_p->ep == 0) { | |
0769c38d DC |
296 | req_p = dev_p->usb_ep0_dma_req; |
297 | resp_p = dev_p->usb_ep0_dma_resp; | |
298 | dev_p->usb_ep0_dma_req = 0; | |
299 | dev_p->usb_ep0_dma_resp = 0; | |
81eb669b | 300 | } else { |
0769c38d DC |
301 | req_p = dev_p->usb_ep1_dma_req; |
302 | resp_p = dev_p->usb_ep1_dma_resp; | |
303 | dev_p->usb_ep1_dma_req = 0; | |
304 | dev_p->usb_ep1_dma_resp = 0; | |
81eb669b DC |
305 | } |
306 | ||
0769c38d DC |
307 | cy_as_hal_assert(req_p != 0); |
308 | cy_as_hal_assert(resp_p != 0); | |
309 | cy_as_hal_assert(len <= 64); | |
81eb669b DC |
310 | |
311 | cy_as_ll_init_request(req_p, CY_RQT_USB_EP_DATA, | |
0769c38d | 312 | CY_RQT_USB_RQT_CONTEXT, len); |
81eb669b | 313 | |
0769c38d | 314 | v = (uint16_t)(datacnt | (ep_p->ep << 13) | (1 << 14)); |
81eb669b | 315 | if (dma_p->offset == 0) |
0769c38d | 316 | v |= (1 << 12);/* Set the first packet bit */ |
81eb669b | 317 | if (dma_p->offset + datacnt == dma_p->size) |
0769c38d | 318 | v |= (1 << 11);/* Set the last packet bit */ |
81eb669b | 319 | |
0769c38d | 320 | cy_as_ll_request_response__set_word(req_p, 0, v); |
81eb669b | 321 | cy_as_ll_request_response__pack(req_p, |
0769c38d | 322 | 1, datacnt, buf_p); |
81eb669b | 323 | |
0769c38d | 324 | cy_as_ll_init_response(resp_p, 1); |
81eb669b DC |
325 | |
326 | ret = cy_as_ll_send_request(dev_p, req_p, resp_p, | |
0769c38d | 327 | cy_false, cy_as_dma_request_callback); |
81eb669b DC |
328 | if (ret == CY_AS_ERROR_SUCCESS) |
329 | cy_as_log_debug_message(5, | |
0769c38d | 330 | "+++ send EP 0/1 data via mailbox registers"); |
81eb669b DC |
331 | else |
332 | cy_as_log_debug_message(5, | |
333 | "+++ error sending EP 0/1 data via mailbox " | |
0769c38d | 334 | "registers - CY_AS_ERROR_TIMEOUT"); |
81eb669b DC |
335 | |
336 | if (ret != CY_AS_ERROR_SUCCESS) | |
337 | cy_as_dma_completed_callback(dev_p->tag, | |
0769c38d | 338 | ep_p->ep, 0, ret); |
81eb669b DC |
339 | } |
340 | } else { | |
341 | /* | |
342 | * this is a DMA request on an endpoint that is accessible | |
343 | * via the P port. ask the HAL DMA capabilities to | |
344 | * perform this. the amount of data sent is limited by the | |
345 | * HAL max size as well as what we need to send. if the | |
346 | * ep_p->maxhaldata is set to a value larger than the | |
347 | * endpoint buffer size, then we will pass more than a | |
348 | * single buffer worth of data to the HAL layer and expect | |
349 | * the HAL layer to divide the data into packets. the last | |
350 | * parameter here (ep_p->maxhwdata) gives the packet size for | |
351 | * the data so the HAL layer knows what the packet size should | |
352 | * be. | |
353 | */ | |
354 | if (cy_as_dma_end_point_is_direction_in(ep_p)) | |
355 | cy_as_hal_dma_setup_write(dev_p->tag, | |
0769c38d | 356 | ep_p->ep, buf_p, datacnt, ep_p->maxhwdata); |
81eb669b DC |
357 | else |
358 | cy_as_hal_dma_setup_read(dev_p->tag, | |
0769c38d | 359 | ep_p->ep, buf_p, datacnt, ep_p->maxhwdata); |
81eb669b DC |
360 | |
361 | /* | |
362 | * the DRQ interrupt for this endpoint should be enabled | |
363 | * so that the data transfer progresses at interrupt time. | |
364 | */ | |
0769c38d | 365 | cy_as_dma_set_drq(dev_p, ep_p->ep, cy_true); |
81eb669b DC |
366 | } |
367 | } | |
368 | ||
369 | /* | |
370 | * This function is called when the HAL layer has | |
371 | * completed the last requested DMA operation. | |
372 | * This function sends/receives the next batch of | |
373 | * data associated with the current DMA request, | |
374 | * or it is is complete, moves to the next DMA request. | |
375 | */ | |
376 | void | |
377 | cy_as_dma_completed_callback(cy_as_hal_device_tag tag, | |
378 | cy_as_end_point_number_t ep, uint32_t cnt, cy_as_return_status_t status) | |
379 | { | |
0769c38d DC |
380 | uint32_t mask; |
381 | cy_as_dma_queue_entry *req_p; | |
382 | cy_as_dma_end_point *ep_p; | |
383 | cy_as_device *dev_p = cy_as_device_find_from_tag(tag); | |
81eb669b DC |
384 | |
385 | /* Make sure the HAL layer gave us good parameters */ | |
0769c38d DC |
386 | cy_as_hal_assert(dev_p != 0); |
387 | cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE); | |
388 | cy_as_hal_assert(ep < 16); | |
81eb669b DC |
389 | |
390 | ||
391 | /* Get the endpoint ptr */ | |
0769c38d DC |
392 | ep_p = CY_AS_NUM_EP(dev_p, ep); |
393 | cy_as_hal_assert(ep_p->queue_p != 0); | |
81eb669b DC |
394 | |
395 | /* Get a pointer to the current entry in the queue */ | |
0769c38d DC |
396 | mask = cy_as_hal_disable_interrupts(); |
397 | req_p = ep_p->queue_p; | |
81eb669b DC |
398 | |
399 | /* Update the offset to reflect the data actually received or sent */ | |
0769c38d | 400 | req_p->offset += cnt; |
81eb669b DC |
401 | |
402 | /* | |
403 | * if we are still sending/receiving the current packet, | |
404 | * send/receive the next chunk basically we keep going | |
405 | * if we have not sent/received enough data, and we are | |
406 | * not doing a packet operation, and the last packet | |
407 | * sent or received was a full sized packet. in other | |
408 | * words, when we are NOT doing a packet operation, a | |
409 | * less than full size packet (a short packet) will | |
410 | * terminate the operation. | |
411 | * | |
412 | * note: if this is EP1 request and the request has | |
413 | * timed out, it means the buffer is not free. | |
414 | * we have to resend the data. | |
415 | * | |
416 | * note: for the MTP data transfers, the DMA transfer | |
417 | * for the next packet can only be started asynchronously, | |
418 | * after a firmware event notifies that the device is ready. | |
419 | */ | |
420 | if (((req_p->offset != req_p->size) && (req_p->packet == cy_false) && | |
421 | ((cnt == ep_p->maxhaldata) || ((cnt == ep_p->maxhwdata) && | |
422 | ((ep != CY_AS_MTP_READ_ENDPOINT) || | |
423 | (cnt == dev_p->usb_max_tx_size))))) | |
424 | || ((ep == 1) && (status == CY_AS_ERROR_TIMEOUT))) { | |
0769c38d | 425 | cy_as_hal_enable_interrupts(mask); |
81eb669b DC |
426 | |
427 | /* | |
428 | * and send the request again to send the next block of | |
429 | * data. special handling for MTP transfers on E_ps 2 | |
430 | * and 6. the send_next_request will be processed based | |
431 | * on the event sent by the firmware. | |
432 | */ | |
433 | if ((ep == CY_AS_MTP_WRITE_ENDPOINT) || ( | |
434 | (ep == CY_AS_MTP_READ_ENDPOINT) && | |
435 | (!cy_as_dma_end_point_is_direction_in(ep_p)))) | |
0769c38d | 436 | cy_as_dma_end_point_set_stopped(ep_p); |
81eb669b | 437 | else |
0769c38d | 438 | cy_as_dma_send_next_dma_request(dev_p, ep_p); |
81eb669b DC |
439 | } else { |
440 | /* | |
441 | * we get here if ... | |
442 | * we have sent or received all of the data | |
443 | * or | |
444 | * we are doing a packet operation | |
445 | * or | |
446 | * we receive a short packet | |
447 | */ | |
448 | ||
449 | /* | |
450 | * remove this entry from the DMA queue for this endpoint. | |
451 | */ | |
0769c38d DC |
452 | cy_as_dma_end_point_clear_in_transit(ep_p); |
453 | ep_p->queue_p = req_p->next_p; | |
81eb669b DC |
454 | if (ep_p->last_p == req_p) { |
455 | /* | |
456 | * we have removed the last packet from the DMA queue, | |
457 | * disable the interrupt associated with this interrupt. | |
458 | */ | |
0769c38d DC |
459 | ep_p->last_p = 0; |
460 | cy_as_hal_enable_interrupts(mask); | |
461 | cy_as_dma_set_drq(dev_p, ep, cy_false); | |
81eb669b | 462 | } else |
0769c38d | 463 | cy_as_hal_enable_interrupts(mask); |
81eb669b DC |
464 | |
465 | if (req_p->cb) { | |
466 | /* | |
467 | * if the request has a callback associated with it, | |
468 | * call the callback to tell the interested party that | |
469 | * this DMA request has completed. | |
470 | * | |
471 | * note, we set the in_callback bit to insure that we | |
472 | * cannot recursively call an API function that is | |
473 | * synchronous only from a callback. | |
474 | */ | |
0769c38d | 475 | cy_as_device_set_in_callback(dev_p); |
81eb669b | 476 | (*req_p->cb)(dev_p, ep, req_p->buf_p, |
0769c38d DC |
477 | req_p->offset, status); |
478 | cy_as_device_clear_in_callback(dev_p); | |
81eb669b DC |
479 | } |
480 | ||
481 | /* | |
482 | * we are done with this request, put it on the freelist to be | |
483 | * reused at a later time. | |
484 | */ | |
0769c38d | 485 | cy_as_dma_add_request_to_free_queue(dev_p, req_p); |
81eb669b DC |
486 | |
487 | if (ep_p->queue_p == 0) { | |
488 | /* | |
489 | * if the endpoint is out of DMA entries, set the | |
490 | * endpoint as stopped. | |
491 | */ | |
0769c38d | 492 | cy_as_dma_end_point_set_stopped(ep_p); |
81eb669b DC |
493 | |
494 | /* | |
495 | * the DMA queue is empty, wake any task waiting on | |
496 | * the QUEUE to drain. | |
497 | */ | |
498 | if (cy_as_dma_end_point_is_sleeping(ep_p)) { | |
0769c38d DC |
499 | cy_as_dma_end_point_set_wake_state(ep_p); |
500 | cy_as_hal_wake(&ep_p->channel); | |
81eb669b DC |
501 | } |
502 | } else { | |
503 | /* | |
504 | * if the queued operation is a MTP transfer, | |
505 | * wait until firmware event before sending | |
506 | * down the next DMA request. | |
507 | */ | |
508 | if ((ep == CY_AS_MTP_WRITE_ENDPOINT) || | |
509 | ((ep == CY_AS_MTP_READ_ENDPOINT) && | |
510 | (!cy_as_dma_end_point_is_direction_in(ep_p))) || | |
511 | ((ep == dev_p->storage_read_endpoint) && | |
512 | (!cy_as_device_is_p2s_dma_start_recvd(dev_p))) | |
513 | || ((ep == dev_p->storage_write_endpoint) && | |
514 | (!cy_as_device_is_p2s_dma_start_recvd(dev_p)))) | |
0769c38d | 515 | cy_as_dma_end_point_set_stopped(ep_p); |
81eb669b | 516 | else |
0769c38d | 517 | cy_as_dma_send_next_dma_request(dev_p, ep_p); |
81eb669b DC |
518 | } |
519 | } | |
520 | } | |
521 | ||
522 | /* | |
523 | * This function is used to kick start DMA on a given | |
524 | * channel. If DMA is already running on the given | |
525 | * endpoint, nothing happens. If DMA is not running, | |
526 | * the first entry is pulled from the DMA queue and | |
527 | * sent/recevied to/from the West Bridge device. | |
528 | */ | |
529 | cy_as_return_status_t | |
530 | cy_as_dma_kick_start(cy_as_device *dev_p, cy_as_end_point_number_t ep) | |
531 | { | |
0769c38d DC |
532 | cy_as_dma_end_point *ep_p; |
533 | cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE); | |
81eb669b | 534 | |
0769c38d | 535 | ep_p = CY_AS_NUM_EP(dev_p, ep); |
81eb669b DC |
536 | |
537 | /* We are already running */ | |
538 | if (cy_as_dma_end_point_is_running(ep_p)) | |
0769c38d | 539 | return CY_AS_ERROR_SUCCESS; |
81eb669b DC |
540 | |
541 | cy_as_dma_send_next_dma_request(dev_p, ep_p); | |
0769c38d | 542 | return CY_AS_ERROR_SUCCESS; |
81eb669b DC |
543 | } |
544 | ||
545 | /* | |
546 | * This function stops the given endpoint. Stopping and endpoint cancels | |
547 | * any pending DMA operations and frees all resources associated with the | |
548 | * given endpoint. | |
549 | */ | |
550 | static cy_as_return_status_t | |
551 | cy_as_dma_stop_end_point(cy_as_device *dev_p, cy_as_end_point_number_t ep) | |
552 | { | |
0769c38d DC |
553 | cy_as_return_status_t ret; |
554 | cy_as_dma_end_point *ep_p = CY_AS_NUM_EP(dev_p, ep); | |
81eb669b DC |
555 | |
556 | /* | |
557 | * cancel any pending DMA requests associated with this endpoint. this | |
558 | * cancels any DMA requests at the HAL layer as well as dequeues any | |
559 | * request that is currently pending. | |
560 | */ | |
0769c38d | 561 | ret = cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED); |
81eb669b | 562 | if (ret != CY_AS_ERROR_SUCCESS) |
0769c38d | 563 | return ret; |
81eb669b DC |
564 | |
565 | /* | |
566 | * destroy the sleep channel | |
567 | */ | |
568 | if (!cy_as_hal_destroy_sleep_channel(&ep_p->channel) | |
569 | && ret == CY_AS_ERROR_SUCCESS) | |
0769c38d | 570 | ret = CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED; |
81eb669b DC |
571 | |
572 | /* | |
573 | * free the memory associated with this endpoint | |
574 | */ | |
0769c38d | 575 | cy_as_hal_free(ep_p); |
81eb669b DC |
576 | |
577 | /* | |
578 | * set the data structure ptr to something sane since the | |
579 | * previous pointer is now free. | |
580 | */ | |
0769c38d | 581 | dev_p->endp[ep] = 0; |
81eb669b | 582 | |
0769c38d | 583 | return ret; |
81eb669b DC |
584 | } |
585 | ||
586 | /* | |
587 | * This method stops the USB stack. This is an internal function that does | |
588 | * all of the work of destroying the USB stack without the protections that | |
589 | * we provide to the API (i.e. stopping at stack that is not running). | |
590 | */ | |
591 | static cy_as_return_status_t | |
592 | cy_as_dma_stop_internal(cy_as_device *dev_p) | |
593 | { | |
0769c38d DC |
594 | cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS; |
595 | cy_as_return_status_t lret; | |
596 | cy_as_end_point_number_t i; | |
81eb669b DC |
597 | |
598 | /* | |
599 | * stop all of the endpoints. this cancels all DMA requests, and | |
600 | * frees all resources associated with each endpoint. | |
601 | */ | |
0769c38d DC |
602 | for (i = 0; i < sizeof(dev_p->endp)/(sizeof(dev_p->endp[0])); i++) { |
603 | lret = cy_as_dma_stop_end_point(dev_p, i); | |
81eb669b | 604 | if (lret != CY_AS_ERROR_SUCCESS && ret == CY_AS_ERROR_SUCCESS) |
0769c38d | 605 | ret = lret; |
81eb669b DC |
606 | } |
607 | ||
608 | /* | |
609 | * now, free the list of DMA requests structures that we use to manage | |
610 | * DMA requests. | |
611 | */ | |
612 | while (dev_p->dma_freelist_p) { | |
0769c38d DC |
613 | cy_as_dma_queue_entry *req_p; |
614 | uint32_t imask = cy_as_hal_disable_interrupts(); | |
81eb669b | 615 | |
0769c38d DC |
616 | req_p = dev_p->dma_freelist_p; |
617 | dev_p->dma_freelist_p = req_p->next_p; | |
81eb669b | 618 | |
0769c38d | 619 | cy_as_hal_enable_interrupts(imask); |
81eb669b | 620 | |
0769c38d | 621 | cy_as_hal_free(req_p); |
81eb669b DC |
622 | } |
623 | ||
0769c38d DC |
624 | cy_as_ll_destroy_request(dev_p, dev_p->usb_ep0_dma_req); |
625 | cy_as_ll_destroy_request(dev_p, dev_p->usb_ep1_dma_req); | |
626 | cy_as_ll_destroy_response(dev_p, dev_p->usb_ep0_dma_resp); | |
627 | cy_as_ll_destroy_response(dev_p, dev_p->usb_ep1_dma_resp); | |
81eb669b | 628 | |
0769c38d | 629 | return ret; |
81eb669b DC |
630 | } |
631 | ||
632 | ||
633 | /* | |
634 | * CyAsDmaStop() | |
635 | * | |
636 | * This function shuts down the DMA module. All resources | |
637 | * associated with the DMA module will be freed. This | |
638 | * routine is the API stop function. It insures that we | |
639 | * are stopping a stack that is actually running and then | |
640 | * calls the internal function to do the work. | |
641 | */ | |
642 | cy_as_return_status_t | |
643 | cy_as_dma_stop(cy_as_device *dev_p) | |
644 | { | |
0769c38d | 645 | cy_as_return_status_t ret; |
81eb669b | 646 | |
0769c38d DC |
647 | ret = cy_as_dma_stop_internal(dev_p); |
648 | cy_as_device_set_dma_stopped(dev_p); | |
81eb669b | 649 | |
0769c38d | 650 | return ret; |
81eb669b DC |
651 | } |
652 | ||
653 | /* | |
654 | * CyAsDmaStart() | |
655 | * | |
d9fed669 | 656 | * This function initializes the DMA module to insure it is up and running. |
81eb669b DC |
657 | */ |
658 | cy_as_return_status_t | |
659 | cy_as_dma_start(cy_as_device *dev_p) | |
660 | { | |
0769c38d DC |
661 | cy_as_end_point_number_t i; |
662 | uint16_t cnt; | |
81eb669b DC |
663 | |
664 | if (cy_as_device_is_dma_running(dev_p)) | |
0769c38d | 665 | return CY_AS_ERROR_ALREADY_RUNNING; |
81eb669b DC |
666 | |
667 | /* | |
668 | * pre-allocate DMA queue structures to be used in the interrupt context | |
669 | */ | |
0769c38d | 670 | for (cnt = 0; cnt < 32; cnt++) { |
81eb669b | 671 | cy_as_dma_queue_entry *entry_p = (cy_as_dma_queue_entry *) |
0769c38d | 672 | cy_as_hal_alloc(sizeof(cy_as_dma_queue_entry)); |
81eb669b | 673 | if (entry_p == 0) { |
0769c38d DC |
674 | cy_as_dma_stop_internal(dev_p); |
675 | return CY_AS_ERROR_OUT_OF_MEMORY; | |
81eb669b | 676 | } |
0769c38d | 677 | cy_as_dma_add_request_to_free_queue(dev_p, entry_p); |
81eb669b DC |
678 | } |
679 | ||
680 | /* | |
681 | * pre-allocate the DMA requests for sending EP0 | |
682 | * and EP1 data to west bridge | |
683 | */ | |
684 | dev_p->usb_ep0_dma_req = cy_as_ll_create_request(dev_p, | |
0769c38d | 685 | CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64); |
81eb669b | 686 | dev_p->usb_ep1_dma_req = cy_as_ll_create_request(dev_p, |
0769c38d | 687 | CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64); |
81eb669b DC |
688 | |
689 | if (dev_p->usb_ep0_dma_req == 0 || dev_p->usb_ep1_dma_req == 0) { | |
0769c38d DC |
690 | cy_as_dma_stop_internal(dev_p); |
691 | return CY_AS_ERROR_OUT_OF_MEMORY; | |
81eb669b | 692 | } |
0769c38d | 693 | dev_p->usb_ep0_dma_req_save = dev_p->usb_ep0_dma_req; |
81eb669b | 694 | |
0769c38d DC |
695 | dev_p->usb_ep0_dma_resp = cy_as_ll_create_response(dev_p, 1); |
696 | dev_p->usb_ep1_dma_resp = cy_as_ll_create_response(dev_p, 1); | |
81eb669b | 697 | if (dev_p->usb_ep0_dma_resp == 0 || dev_p->usb_ep1_dma_resp == 0) { |
0769c38d DC |
698 | cy_as_dma_stop_internal(dev_p); |
699 | return CY_AS_ERROR_OUT_OF_MEMORY; | |
81eb669b | 700 | } |
0769c38d | 701 | dev_p->usb_ep0_dma_resp_save = dev_p->usb_ep0_dma_resp; |
81eb669b DC |
702 | |
703 | /* | |
704 | * set the dev_p->endp to all zeros to insure cleanup is possible if | |
705 | * an error occurs during initialization. | |
706 | */ | |
0769c38d | 707 | cy_as_hal_mem_set(dev_p->endp, 0, sizeof(dev_p->endp)); |
81eb669b DC |
708 | |
709 | /* | |
710 | * now, iterate through each of the endpoints and initialize each | |
711 | * one. | |
712 | */ | |
0769c38d | 713 | for (i = 0; i < sizeof(dev_p->endp)/sizeof(dev_p->endp[0]); i++) { |
81eb669b | 714 | dev_p->endp[i] = (cy_as_dma_end_point *) |
0769c38d | 715 | cy_as_hal_alloc(sizeof(cy_as_dma_end_point)); |
81eb669b | 716 | if (dev_p->endp[i] == 0) { |
0769c38d DC |
717 | cy_as_dma_stop_internal(dev_p); |
718 | return CY_AS_ERROR_OUT_OF_MEMORY; | |
81eb669b DC |
719 | } |
720 | cy_as_hal_mem_set(dev_p->endp[i], 0, | |
0769c38d | 721 | sizeof(cy_as_dma_end_point)); |
81eb669b | 722 | |
0769c38d DC |
723 | dev_p->endp[i]->ep = i; |
724 | dev_p->endp[i]->queue_p = 0; | |
725 | dev_p->endp[i]->last_p = 0; | |
81eb669b | 726 | |
0769c38d | 727 | cy_as_dma_set_drq(dev_p, i, cy_false); |
81eb669b DC |
728 | |
729 | if (!cy_as_hal_create_sleep_channel(&dev_p->endp[i]->channel)) | |
0769c38d | 730 | return CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED; |
81eb669b DC |
731 | } |
732 | ||
733 | /* | |
734 | * tell the HAL layer who to call when the | |
735 | * HAL layer completes a DMA request | |
736 | */ | |
737 | cy_as_hal_dma_register_callback(dev_p->tag, | |
0769c38d | 738 | cy_as_dma_completed_callback); |
81eb669b DC |
739 | |
740 | /* | |
741 | * mark DMA as up and running on this device | |
742 | */ | |
0769c38d | 743 | cy_as_device_set_dma_running(dev_p); |
81eb669b | 744 | |
0769c38d | 745 | return CY_AS_ERROR_SUCCESS; |
81eb669b DC |
746 | } |
747 | ||
748 | /* | |
749 | * Wait for all entries in the DMA queue associated | |
750 | * the given endpoint to be drained. This function | |
751 | * will not return until all the DMA data has been | |
752 | * transferred. | |
753 | */ | |
754 | cy_as_return_status_t | |
755 | cy_as_dma_drain_queue(cy_as_device *dev_p, | |
756 | cy_as_end_point_number_t ep, cy_bool kickstart) | |
757 | { | |
0769c38d DC |
758 | cy_as_dma_end_point *ep_p; |
759 | int loopcount = 1000; | |
760 | uint32_t mask; | |
81eb669b DC |
761 | |
762 | /* | |
763 | * make sure the endpoint is valid | |
764 | */ | |
765 | if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0])) | |
0769c38d | 766 | return CY_AS_ERROR_INVALID_ENDPOINT; |
81eb669b DC |
767 | |
768 | /* Get the endpoint pointer based on the endpoint number */ | |
0769c38d | 769 | ep_p = CY_AS_NUM_EP(dev_p, ep); |
81eb669b DC |
770 | |
771 | /* | |
772 | * if the endpoint is empty of traffic, we return | |
773 | * with success immediately | |
774 | */ | |
0769c38d | 775 | mask = cy_as_hal_disable_interrupts(); |
81eb669b | 776 | if (ep_p->queue_p == 0) { |
0769c38d DC |
777 | cy_as_hal_enable_interrupts(mask); |
778 | return CY_AS_ERROR_SUCCESS; | |
81eb669b DC |
779 | } else { |
780 | /* | |
781 | * add 10 seconds to the time out value for each 64 KB segment | |
782 | * of data to be transferred. | |
783 | */ | |
784 | if (ep_p->queue_p->size > 0x10000) | |
0769c38d | 785 | loopcount += ((ep_p->queue_p->size / 0x10000) * 1000); |
81eb669b | 786 | } |
0769c38d | 787 | cy_as_hal_enable_interrupts(mask); |
81eb669b DC |
788 | |
789 | /* If we are already sleeping on this endpoint, it is an error */ | |
790 | if (cy_as_dma_end_point_is_sleeping(ep_p)) | |
0769c38d | 791 | return CY_AS_ERROR_NESTED_SLEEP; |
81eb669b DC |
792 | |
793 | /* | |
794 | * we disable the endpoint while the queue drains to | |
795 | * prevent any additional requests from being queued while we are waiting | |
796 | */ | |
797 | cy_as_dma_enable_end_point(dev_p, ep, | |
0769c38d | 798 | cy_false, cy_as_direction_dont_change); |
81eb669b DC |
799 | |
800 | if (kickstart) { | |
801 | /* | |
802 | * now, kick start the DMA if necessary | |
803 | */ | |
0769c38d | 804 | cy_as_dma_kick_start(dev_p, ep); |
81eb669b DC |
805 | } |
806 | ||
807 | /* | |
808 | * check one last time before we begin sleeping to see if the | |
809 | * queue is drained. | |
810 | */ | |
811 | if (ep_p->queue_p == 0) { | |
812 | cy_as_dma_enable_end_point(dev_p, ep, cy_true, | |
0769c38d DC |
813 | cy_as_direction_dont_change); |
814 | return CY_AS_ERROR_SUCCESS; | |
81eb669b DC |
815 | } |
816 | ||
817 | while (loopcount-- > 0) { | |
818 | /* | |
819 | * sleep for 10 ms maximum (per loop) while | |
820 | * waiting for the transfer to complete. | |
821 | */ | |
0769c38d DC |
822 | cy_as_dma_end_point_set_sleep_state(ep_p); |
823 | cy_as_hal_sleep_on(&ep_p->channel, 10); | |
81eb669b DC |
824 | |
825 | /* If we timed out, the sleep bit will still be set */ | |
0769c38d | 826 | cy_as_dma_end_point_set_wake_state(ep_p); |
81eb669b DC |
827 | |
828 | /* Check the queue to see if is drained */ | |
829 | if (ep_p->queue_p == 0) { | |
830 | /* | |
831 | * clear the endpoint running and in transit flags | |
832 | * for the endpoint, now that its DMA queue is empty. | |
833 | */ | |
0769c38d DC |
834 | cy_as_dma_end_point_clear_in_transit(ep_p); |
835 | cy_as_dma_end_point_set_stopped(ep_p); | |
81eb669b DC |
836 | |
837 | cy_as_dma_enable_end_point(dev_p, ep, | |
0769c38d DC |
838 | cy_true, cy_as_direction_dont_change); |
839 | return CY_AS_ERROR_SUCCESS; | |
81eb669b DC |
840 | } |
841 | } | |
842 | ||
843 | /* | |
844 | * the DMA operation that has timed out can be cancelled, so that later | |
845 | * operations on this queue can proceed. | |
846 | */ | |
0769c38d | 847 | cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_TIMEOUT); |
81eb669b | 848 | cy_as_dma_enable_end_point(dev_p, ep, |
0769c38d DC |
849 | cy_true, cy_as_direction_dont_change); |
850 | return CY_AS_ERROR_TIMEOUT; | |
81eb669b DC |
851 | } |
852 | ||
853 | /* | |
854 | * This function queues a write request in the DMA queue | |
855 | * for a given endpoint. The direction of the | |
856 | * entry will be inferred from the endpoint direction. | |
857 | */ | |
858 | cy_as_return_status_t | |
859 | cy_as_dma_queue_request(cy_as_device *dev_p, | |
860 | cy_as_end_point_number_t ep, void *mem_p, | |
861 | uint32_t size, cy_bool pkt, cy_bool readreq, cy_as_dma_callback cb) | |
862 | { | |
0769c38d DC |
863 | uint32_t mask; |
864 | cy_as_dma_queue_entry *entry_p; | |
865 | cy_as_dma_end_point *ep_p; | |
81eb669b DC |
866 | |
867 | /* | |
868 | * make sure the endpoint is valid | |
869 | */ | |
870 | if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0])) | |
0769c38d | 871 | return CY_AS_ERROR_INVALID_ENDPOINT; |
81eb669b DC |
872 | |
873 | /* Get the endpoint pointer based on the endpoint number */ | |
0769c38d | 874 | ep_p = CY_AS_NUM_EP(dev_p, ep); |
81eb669b DC |
875 | |
876 | if (!cy_as_dma_end_point_is_enabled(ep_p)) | |
0769c38d | 877 | return CY_AS_ERROR_ENDPOINT_DISABLED; |
81eb669b | 878 | |
0769c38d | 879 | entry_p = cy_as_dma_get_dma_queue_entry(dev_p); |
81eb669b | 880 | |
0769c38d DC |
881 | entry_p->buf_p = mem_p; |
882 | entry_p->cb = cb; | |
883 | entry_p->size = size; | |
884 | entry_p->offset = 0; | |
885 | entry_p->packet = pkt; | |
886 | entry_p->readreq = readreq; | |
81eb669b | 887 | |
0769c38d DC |
888 | mask = cy_as_hal_disable_interrupts(); |
889 | entry_p->next_p = 0; | |
81eb669b | 890 | if (ep_p->last_p) |
0769c38d DC |
891 | ep_p->last_p->next_p = entry_p; |
892 | ep_p->last_p = entry_p; | |
81eb669b | 893 | if (ep_p->queue_p == 0) |
0769c38d DC |
894 | ep_p->queue_p = entry_p; |
895 | cy_as_hal_enable_interrupts(mask); | |
81eb669b | 896 | |
0769c38d | 897 | return CY_AS_ERROR_SUCCESS; |
81eb669b DC |
898 | } |
899 | ||
900 | /* | |
901 | * This function enables or disables and endpoint for DMA | |
902 | * queueing. If an endpoint is disabled, any queue requests | |
903 | * continue to be processed, but no new requests can be queued. | |
904 | */ | |
905 | cy_as_return_status_t | |
906 | cy_as_dma_enable_end_point(cy_as_device *dev_p, | |
907 | cy_as_end_point_number_t ep, cy_bool enable, cy_as_dma_direction dir) | |
908 | { | |
0769c38d | 909 | cy_as_dma_end_point *ep_p; |
81eb669b DC |
910 | |
911 | /* | |
912 | * make sure the endpoint is valid | |
913 | */ | |
914 | if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0])) | |
0769c38d | 915 | return CY_AS_ERROR_INVALID_ENDPOINT; |
81eb669b DC |
916 | |
917 | /* Get the endpoint pointer based on the endpoint number */ | |
0769c38d | 918 | ep_p = CY_AS_NUM_EP(dev_p, ep); |
81eb669b DC |
919 | |
920 | if (dir == cy_as_direction_out) | |
0769c38d | 921 | cy_as_dma_end_point_set_direction_out(ep_p); |
81eb669b | 922 | else if (dir == cy_as_direction_in) |
0769c38d | 923 | cy_as_dma_end_point_set_direction_in(ep_p); |
81eb669b DC |
924 | |
925 | /* | |
926 | * get the maximum size of data buffer the HAL | |
927 | * layer can accept. this is used when the DMA | |
928 | * module is sending DMA requests to the HAL. | |
929 | * the DMA module will never send down a request | |
930 | * that is greater than this value. | |
931 | * | |
932 | * for EP0 and EP1, we can send no more than 64 | |
933 | * bytes of data at one time as this is the maximum | |
934 | * size of a packet that can be sent via these | |
935 | * endpoints. | |
936 | */ | |
937 | if (ep == 0 || ep == 1) | |
0769c38d | 938 | ep_p->maxhaldata = 64; |
81eb669b DC |
939 | else |
940 | ep_p->maxhaldata = cy_as_hal_dma_max_request_size( | |
0769c38d | 941 | dev_p->tag, ep); |
81eb669b DC |
942 | |
943 | if (enable) | |
0769c38d | 944 | cy_as_dma_end_point_enable(ep_p); |
81eb669b | 945 | else |
0769c38d | 946 | cy_as_dma_end_point_disable(ep_p); |
81eb669b | 947 | |
0769c38d | 948 | return CY_AS_ERROR_SUCCESS; |
81eb669b DC |
949 | } |
950 | ||
951 | /* | |
952 | * This function cancels any DMA operations pending with the HAL layer as well | |
953 | * as any DMA operation queued on the endpoint. | |
954 | */ | |
955 | cy_as_return_status_t | |
956 | cy_as_dma_cancel( | |
957 | cy_as_device *dev_p, | |
958 | cy_as_end_point_number_t ep, | |
959 | cy_as_return_status_t err) | |
960 | { | |
0769c38d DC |
961 | uint32_t mask; |
962 | cy_as_dma_end_point *ep_p; | |
963 | cy_as_dma_queue_entry *entry_p; | |
964 | cy_bool epstate; | |
81eb669b DC |
965 | |
966 | /* | |
967 | * make sure the endpoint is valid | |
968 | */ | |
969 | if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0])) | |
0769c38d | 970 | return CY_AS_ERROR_INVALID_ENDPOINT; |
81eb669b DC |
971 | |
972 | /* Get the endpoint pointer based on the endpoint number */ | |
0769c38d | 973 | ep_p = CY_AS_NUM_EP(dev_p, ep); |
81eb669b DC |
974 | |
975 | if (ep_p) { | |
976 | /* Remember the state of the endpoint */ | |
0769c38d | 977 | epstate = cy_as_dma_end_point_is_enabled(ep_p); |
81eb669b DC |
978 | |
979 | /* | |
980 | * disable the endpoint so no more DMA packets can be | |
981 | * queued. | |
982 | */ | |
983 | cy_as_dma_enable_end_point(dev_p, ep, | |
0769c38d | 984 | cy_false, cy_as_direction_dont_change); |
81eb669b DC |
985 | |
986 | /* | |
987 | * don't allow any interrupts from this endpoint | |
988 | * while we get the most current request off of | |
989 | * the queue. | |
990 | */ | |
0769c38d | 991 | cy_as_dma_set_drq(dev_p, ep, cy_false); |
81eb669b DC |
992 | |
993 | /* | |
994 | * cancel any pending request queued in the HAL layer | |
995 | */ | |
996 | if (cy_as_dma_end_point_in_transit(ep_p)) | |
0769c38d | 997 | cy_as_hal_dma_cancel_request(dev_p->tag, ep_p->ep); |
81eb669b DC |
998 | |
999 | /* | |
1000 | * shutdown the DMA for this endpoint so no | |
1001 | * more data is transferred | |
1002 | */ | |
0769c38d | 1003 | cy_as_dma_end_point_set_stopped(ep_p); |
81eb669b DC |
1004 | |
1005 | /* | |
1006 | * mark the endpoint as not in transit, because we are | |
1007 | * going to consume any queued requests | |
1008 | */ | |
0769c38d | 1009 | cy_as_dma_end_point_clear_in_transit(ep_p); |
81eb669b DC |
1010 | |
1011 | /* | |
1012 | * now, remove each entry in the queue and call the | |
1013 | * associated callback stating that the request was | |
1014 | * canceled. | |
1015 | */ | |
0769c38d | 1016 | ep_p->last_p = 0; |
81eb669b DC |
1017 | while (ep_p->queue_p != 0) { |
1018 | /* Disable interrupts to manipulate the queue */ | |
0769c38d | 1019 | mask = cy_as_hal_disable_interrupts(); |
81eb669b DC |
1020 | |
1021 | /* Remove an entry from the queue */ | |
0769c38d DC |
1022 | entry_p = ep_p->queue_p; |
1023 | ep_p->queue_p = entry_p->next_p; | |
81eb669b DC |
1024 | |
1025 | /* Ok, the queue has been updated, we can | |
1026 | * turn interrupts back on */ | |
0769c38d | 1027 | cy_as_hal_enable_interrupts(mask); |
81eb669b DC |
1028 | |
1029 | /* Call the callback indicating we have | |
1030 | * canceled the DMA */ | |
1031 | if (entry_p->cb) | |
1032 | entry_p->cb(dev_p, ep, | |
0769c38d | 1033 | entry_p->buf_p, entry_p->size, err); |
81eb669b | 1034 | |
0769c38d | 1035 | cy_as_dma_add_request_to_free_queue(dev_p, entry_p); |
81eb669b DC |
1036 | } |
1037 | ||
1038 | if (ep == 0 || ep == 1) { | |
1039 | /* | |
1040 | * if this endpoint is zero or one, we need to | |
1041 | * clear the queue of any pending CY_RQT_USB_EP_DATA | |
1042 | * requests as these are pending requests to send | |
1043 | * data to the west bridge device. | |
1044 | */ | |
0769c38d | 1045 | cy_as_ll_remove_ep_data_requests(dev_p, ep); |
81eb669b DC |
1046 | } |
1047 | ||
1048 | if (epstate) { | |
1049 | /* | |
1050 | * the endpoint started out enabled, so we | |
1051 | * re-enable the endpoint here. | |
1052 | */ | |
1053 | cy_as_dma_enable_end_point(dev_p, ep, | |
0769c38d | 1054 | cy_true, cy_as_direction_dont_change); |
81eb669b DC |
1055 | } |
1056 | } | |
1057 | ||
0769c38d | 1058 | return CY_AS_ERROR_SUCCESS; |
81eb669b DC |
1059 | } |
1060 | ||
1061 | cy_as_return_status_t | |
1062 | cy_as_dma_received_data(cy_as_device *dev_p, | |
1063 | cy_as_end_point_number_t ep, uint32_t dsize, void *data) | |
1064 | { | |
0769c38d DC |
1065 | cy_as_dma_queue_entry *dma_p; |
1066 | uint8_t *src_p, *dest_p; | |
1067 | cy_as_dma_end_point *ep_p; | |
1068 | uint32_t xfersize; | |
81eb669b DC |
1069 | |
1070 | /* | |
1071 | * make sure the endpoint is valid | |
1072 | */ | |
1073 | if (ep != 0 && ep != 1) | |
0769c38d | 1074 | return CY_AS_ERROR_INVALID_ENDPOINT; |
81eb669b DC |
1075 | |
1076 | /* Get the endpoint pointer based on the endpoint number */ | |
0769c38d DC |
1077 | ep_p = CY_AS_NUM_EP(dev_p, ep); |
1078 | dma_p = ep_p->queue_p; | |
81eb669b | 1079 | if (dma_p == 0) |
0769c38d | 1080 | return CY_AS_ERROR_SUCCESS; |
81eb669b DC |
1081 | |
1082 | /* | |
1083 | * if the data received exceeds the size of the DMA buffer, | |
1084 | * clip the data to the size of the buffer. this can lead | |
1085 | * to loosing some data, but is not different than doing | |
1086 | * non-packet reads on the other endpoints. | |
1087 | */ | |
1088 | if (dsize > dma_p->size - dma_p->offset) | |
0769c38d | 1089 | dsize = dma_p->size - dma_p->offset; |
81eb669b DC |
1090 | |
1091 | /* | |
1092 | * copy the data from the request packet to the DMA buffer | |
1093 | * for the endpoint | |
1094 | */ | |
0769c38d DC |
1095 | src_p = (uint8_t *)data; |
1096 | dest_p = ((uint8_t *)(dma_p->buf_p)) + dma_p->offset; | |
1097 | xfersize = dsize; | |
81eb669b | 1098 | while (xfersize-- > 0) |
0769c38d | 1099 | *dest_p++ = *src_p++; |
81eb669b DC |
1100 | |
1101 | /* Signal the DMA module that we have | |
1102 | * received data for this EP request */ | |
1103 | cy_as_dma_completed_callback(dev_p->tag, | |
0769c38d | 1104 | ep, dsize, CY_AS_ERROR_SUCCESS); |
81eb669b | 1105 | |
0769c38d | 1106 | return CY_AS_ERROR_SUCCESS; |
81eb669b | 1107 | } |