treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 273
[linux-block.git] / sound / pci / asihpi / hpimsgx.c
CommitLineData
07d7fe7b 1// SPDX-License-Identifier: GPL-2.0-only
719f82d3
EB
2/******************************************************************************
3
4 AudioScience HPI driver
5bc91f5b 5 Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
719f82d3 6
719f82d3 7
938c565a 8Extended Message Function With Response Caching
719f82d3
EB
9
10(C) Copyright AudioScience Inc. 2002
11*****************************************************************************/
12#define SOURCEFILE_NAME "hpimsgx.c"
13#include "hpi_internal.h"
f6baaec2 14#include "hpi_version.h"
719f82d3 15#include "hpimsginit.h"
3285ea10 16#include "hpicmn.h"
719f82d3
EB
17#include "hpimsgx.h"
18#include "hpidebug.h"
19
20static struct pci_device_id asihpi_pci_tbl[] = {
21#include "hpipcida.h"
22};
23
24static struct hpios_spinlock msgx_lock;
25
26static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
5bc91f5b 27static int logging_enabled = 1;
719f82d3
EB
28
29static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
30 *pci_info)
31{
32
33 int i;
34
35 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
36 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
3285ea10
EB
37 && asihpi_pci_tbl[i].vendor !=
38 pci_info->pci_dev->vendor)
719f82d3
EB
39 continue;
40 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
3285ea10
EB
41 && asihpi_pci_tbl[i].device !=
42 pci_info->pci_dev->device)
719f82d3
EB
43 continue;
44 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
45 && asihpi_pci_tbl[i].subvendor !=
3285ea10 46 pci_info->pci_dev->subsystem_vendor)
719f82d3
EB
47 continue;
48 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
49 && asihpi_pci_tbl[i].subdevice !=
3285ea10 50 pci_info->pci_dev->subsystem_device)
719f82d3
EB
51 continue;
52
3285ea10
EB
53 /* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
54 asihpi_pci_tbl[i].driver_data); */
719f82d3
EB
55 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
56 }
57
58 return NULL;
59}
60
61static inline void hw_entry_point(struct hpi_message *phm,
62 struct hpi_response *phr)
63{
3285ea10
EB
64 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
65 && hpi_entry_points[phm->adapter_index])
66 hpi_entry_points[phm->adapter_index] (phm, phr);
67 else
68 hpi_init_response(phr, phm->object, phm->function,
69 HPI_ERROR_PROCESSING_MESSAGE);
719f82d3
EB
70}
71
72static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
73static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
74
75static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
76static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
77
78static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
79 void *h_owner);
80static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
81 void *h_owner);
82static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
83 void *h_owner);
84static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
85 void *h_owner);
86
87static void HPIMSGX__reset(u16 adapter_index);
3285ea10 88
719f82d3
EB
89static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
90static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
91
92#ifndef DISABLE_PRAGMA_PACK1
93#pragma pack(push, 1)
94#endif
95
96struct hpi_subsys_response {
97 struct hpi_response_header h;
98 struct hpi_subsys_res s;
99};
100
101struct hpi_adapter_response {
102 struct hpi_response_header h;
103 struct hpi_adapter_res a;
104};
105
106struct hpi_mixer_response {
107 struct hpi_response_header h;
108 struct hpi_mixer_res m;
109};
110
111struct hpi_stream_response {
112 struct hpi_response_header h;
113 struct hpi_stream_res d;
114};
115
116struct adapter_info {
117 u16 type;
118 u16 num_instreams;
119 u16 num_outstreams;
120};
121
122struct asi_open_state {
123 int open_flag;
124 void *h_owner;
125};
126
127#ifndef DISABLE_PRAGMA_PACK1
128#pragma pack(pop)
129#endif
130
131/* Globals */
132static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
133
134static struct hpi_stream_response
135 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
136
137static struct hpi_stream_response
138 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
139
140static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
141
719f82d3
EB
142static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
143
144/* use these to keep track of opens from user mode apps/DLLs */
145static struct asi_open_state
146 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
147
148static struct asi_open_state
149 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
150
151static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
152 void *h_owner)
153{
3285ea10
EB
154 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
155 HPI_DEBUG_LOG(WARNING,
156 "suspicious adapter index %d in subsys message 0x%x.\n",
157 phm->adapter_index, phm->function);
158
719f82d3
EB
159 switch (phm->function) {
160 case HPI_SUBSYS_GET_VERSION:
161 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
162 HPI_SUBSYS_GET_VERSION, 0);
163 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
164 phr->u.s.data = HPI_VER; /* return major.minor.release */
165 break;
166 case HPI_SUBSYS_OPEN:
167 /*do not propagate the message down the chain */
168 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
169 break;
170 case HPI_SUBSYS_CLOSE:
171 /*do not propagate the message down the chain */
172 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
173 0);
174 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
175 break;
176 case HPI_SUBSYS_DRIVER_LOAD:
177 /* Initialize this module's internal state */
178 hpios_msgxlock_init(&msgx_lock);
179 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
719f82d3
EB
180 /* Init subsys_findadapters response to no-adapters */
181 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
182 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
183 HPI_SUBSYS_DRIVER_LOAD, 0);
184 /* individual HPIs dont implement driver load */
185 HPI_COMMON(phm, phr);
186 break;
187 case HPI_SUBSYS_DRIVER_UNLOAD:
188 HPI_COMMON(phm, phr);
189 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
719f82d3
EB
190 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
191 HPI_SUBSYS_DRIVER_UNLOAD, 0);
192 return;
193
719f82d3 194 case HPI_SUBSYS_GET_NUM_ADAPTERS:
719f82d3 195 case HPI_SUBSYS_GET_ADAPTER:
3285ea10
EB
196 HPI_COMMON(phm, phr);
197 break;
719f82d3 198
719f82d3
EB
199 case HPI_SUBSYS_CREATE_ADAPTER:
200 HPIMSGX__init(phm, phr);
201 break;
3285ea10 202
719f82d3 203 default:
1d595d2a 204 /* Must explicitly handle every subsys message in this switch */
3285ea10
EB
205 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
206 HPI_ERROR_INVALID_FUNC);
719f82d3
EB
207 break;
208 }
209}
210
211static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
212 void *h_owner)
213{
214 switch (phm->function) {
215 case HPI_ADAPTER_OPEN:
216 adapter_open(phm, phr);
217 break;
218 case HPI_ADAPTER_CLOSE:
219 adapter_close(phm, phr);
220 break;
6d0b898e
EB
221 case HPI_ADAPTER_DELETE:
222 HPIMSGX__cleanup(phm->adapter_index, h_owner);
223 {
224 struct hpi_message hm;
225 struct hpi_response hr;
226 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
227 HPI_ADAPTER_CLOSE);
228 hm.adapter_index = phm->adapter_index;
229 hw_entry_point(&hm, &hr);
230 }
231 hw_entry_point(phm, phr);
232 break;
233
719f82d3
EB
234 default:
235 hw_entry_point(phm, phr);
236 break;
237 }
238}
239
240static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
241{
242 switch (phm->function) {
243 case HPI_MIXER_OPEN:
244 mixer_open(phm, phr);
245 break;
246 case HPI_MIXER_CLOSE:
247 mixer_close(phm, phr);
248 break;
249 default:
250 hw_entry_point(phm, phr);
251 break;
252 }
253}
254
255static void outstream_message(struct hpi_message *phm,
256 struct hpi_response *phr, void *h_owner)
257{
258 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
259 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
260 HPI_ERROR_INVALID_OBJ_INDEX);
261 return;
262 }
263
264 switch (phm->function) {
265 case HPI_OSTREAM_OPEN:
266 outstream_open(phm, phr, h_owner);
267 break;
268 case HPI_OSTREAM_CLOSE:
269 outstream_close(phm, phr, h_owner);
270 break;
271 default:
272 hw_entry_point(phm, phr);
273 break;
274 }
275}
276
277static void instream_message(struct hpi_message *phm,
278 struct hpi_response *phr, void *h_owner)
279{
280 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
281 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
282 HPI_ERROR_INVALID_OBJ_INDEX);
283 return;
284 }
285
286 switch (phm->function) {
287 case HPI_ISTREAM_OPEN:
288 instream_open(phm, phr, h_owner);
289 break;
290 case HPI_ISTREAM_CLOSE:
291 instream_close(phm, phr, h_owner);
292 break;
293 default:
294 hw_entry_point(phm, phr);
295 break;
296 }
297}
298
299/* NOTE: HPI_Message() must be defined in the driver as a wrapper for
300 * HPI_MessageEx so that functions in hpifunc.c compile.
301 */
302void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
303 void *h_owner)
304{
5bc91f5b
EB
305
306 if (logging_enabled)
307 HPI_DEBUG_MESSAGE(DEBUG, phm);
719f82d3 308
82b5774f 309 if (phm->type != HPI_TYPE_REQUEST) {
719f82d3
EB
310 hpi_init_response(phr, phm->object, phm->function,
311 HPI_ERROR_INVALID_TYPE);
312 return;
313 }
314
315 if (phm->adapter_index >= HPI_MAX_ADAPTERS
316 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
317 hpi_init_response(phr, phm->object, phm->function,
318 HPI_ERROR_BAD_ADAPTER_NUMBER);
319 return;
320 }
321
322 switch (phm->object) {
323 case HPI_OBJ_SUBSYSTEM:
324 subsys_message(phm, phr, h_owner);
325 break;
326
327 case HPI_OBJ_ADAPTER:
328 adapter_message(phm, phr, h_owner);
329 break;
330
331 case HPI_OBJ_MIXER:
332 mixer_message(phm, phr);
333 break;
334
335 case HPI_OBJ_OSTREAM:
336 outstream_message(phm, phr, h_owner);
337 break;
338
339 case HPI_OBJ_ISTREAM:
340 instream_message(phm, phr, h_owner);
341 break;
342
343 default:
344 hw_entry_point(phm, phr);
345 break;
346 }
3285ea10 347
5bc91f5b
EB
348 if (logging_enabled)
349 HPI_DEBUG_RESPONSE(phr);
350
351 if (phr->error >= HPI_ERROR_DSP_COMMUNICATION) {
352 hpi_debug_level_set(HPI_DEBUG_LEVEL_ERROR);
353 logging_enabled = 0;
354 }
719f82d3
EB
355}
356
357static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
358{
359 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
360 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
361 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
362}
363
364static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
365{
366 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
367 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
368}
369
370static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
371{
372 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
373 sizeof(rESP_HPI_MIXER_OPEN[0]));
374}
375
376static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
377{
378 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
379}
380
381static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
382 void *h_owner)
383{
384
385 struct hpi_message hm;
386 struct hpi_response hr;
387
388 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
389
390 hpios_msgxlock_lock(&msgx_lock);
391
392 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
393 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
394 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
395 [phm->obj_index].h.error)
396 memcpy(phr,
397 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
398 obj_index],
399 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
400 else {
401 instream_user_open[phm->adapter_index][phm->
402 obj_index].open_flag = 1;
3285ea10 403 hpios_msgxlock_unlock(&msgx_lock);
719f82d3
EB
404
405 /* issue a reset */
406 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
407 HPI_ISTREAM_RESET);
408 hm.adapter_index = phm->adapter_index;
409 hm.obj_index = phm->obj_index;
410 hw_entry_point(&hm, &hr);
411
412 hpios_msgxlock_lock(&msgx_lock);
413 if (hr.error) {
414 instream_user_open[phm->adapter_index][phm->
415 obj_index].open_flag = 0;
416 phr->error = hr.error;
417 } else {
418 instream_user_open[phm->adapter_index][phm->
419 obj_index].open_flag = 1;
420 instream_user_open[phm->adapter_index][phm->
421 obj_index].h_owner = h_owner;
422 memcpy(phr,
423 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
424 [phm->obj_index],
425 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
426 }
427 }
3285ea10 428 hpios_msgxlock_unlock(&msgx_lock);
719f82d3
EB
429}
430
431static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
432 void *h_owner)
433{
434
435 struct hpi_message hm;
436 struct hpi_response hr;
437
438 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
439
440 hpios_msgxlock_lock(&msgx_lock);
441 if (h_owner ==
442 instream_user_open[phm->adapter_index][phm->
443 obj_index].h_owner) {
444 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
445 "instream %d owned by %p\n",
446 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
447 instream_user_open[phm->adapter_index][phm->
448 obj_index].h_owner = NULL;
3285ea10 449 hpios_msgxlock_unlock(&msgx_lock);
719f82d3
EB
450 /* issue a reset */
451 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
452 HPI_ISTREAM_RESET);
453 hm.adapter_index = phm->adapter_index;
454 hm.obj_index = phm->obj_index;
455 hw_entry_point(&hm, &hr);
456 hpios_msgxlock_lock(&msgx_lock);
457 if (hr.error) {
458 instream_user_open[phm->adapter_index][phm->
459 obj_index].h_owner = h_owner;
460 phr->error = hr.error;
461 } else {
462 instream_user_open[phm->adapter_index][phm->
463 obj_index].open_flag = 0;
464 instream_user_open[phm->adapter_index][phm->
465 obj_index].h_owner = NULL;
466 }
467 } else {
468 HPI_DEBUG_LOG(WARNING,
469 "%p trying to close %d instream %d owned by %p\n",
470 h_owner, phm->adapter_index, phm->obj_index,
471 instream_user_open[phm->adapter_index][phm->
472 obj_index].h_owner);
473 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
474 }
3285ea10 475 hpios_msgxlock_unlock(&msgx_lock);
719f82d3
EB
476}
477
478static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
479 void *h_owner)
480{
481
482 struct hpi_message hm;
483 struct hpi_response hr;
484
485 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
486
487 hpios_msgxlock_lock(&msgx_lock);
488
489 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
490 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
491 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
492 [phm->obj_index].h.error)
493 memcpy(phr,
494 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
495 obj_index],
496 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
497 else {
498 outstream_user_open[phm->adapter_index][phm->
499 obj_index].open_flag = 1;
3285ea10 500 hpios_msgxlock_unlock(&msgx_lock);
719f82d3
EB
501
502 /* issue a reset */
503 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
504 HPI_OSTREAM_RESET);
505 hm.adapter_index = phm->adapter_index;
506 hm.obj_index = phm->obj_index;
507 hw_entry_point(&hm, &hr);
508
509 hpios_msgxlock_lock(&msgx_lock);
510 if (hr.error) {
511 outstream_user_open[phm->adapter_index][phm->
512 obj_index].open_flag = 0;
513 phr->error = hr.error;
514 } else {
515 outstream_user_open[phm->adapter_index][phm->
516 obj_index].open_flag = 1;
517 outstream_user_open[phm->adapter_index][phm->
518 obj_index].h_owner = h_owner;
519 memcpy(phr,
520 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
521 [phm->obj_index],
522 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
523 }
524 }
3285ea10 525 hpios_msgxlock_unlock(&msgx_lock);
719f82d3
EB
526}
527
528static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
529 void *h_owner)
530{
531
532 struct hpi_message hm;
533 struct hpi_response hr;
534
535 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
536
537 hpios_msgxlock_lock(&msgx_lock);
538
539 if (h_owner ==
540 outstream_user_open[phm->adapter_index][phm->
541 obj_index].h_owner) {
542 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
543 "outstream %d owned by %p\n",
544 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
545 outstream_user_open[phm->adapter_index][phm->
546 obj_index].h_owner = NULL;
3285ea10 547 hpios_msgxlock_unlock(&msgx_lock);
719f82d3
EB
548 /* issue a reset */
549 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
550 HPI_OSTREAM_RESET);
551 hm.adapter_index = phm->adapter_index;
552 hm.obj_index = phm->obj_index;
553 hw_entry_point(&hm, &hr);
554 hpios_msgxlock_lock(&msgx_lock);
555 if (hr.error) {
556 outstream_user_open[phm->adapter_index][phm->
557 obj_index].h_owner = h_owner;
558 phr->error = hr.error;
559 } else {
560 outstream_user_open[phm->adapter_index][phm->
561 obj_index].open_flag = 0;
562 outstream_user_open[phm->adapter_index][phm->
563 obj_index].h_owner = NULL;
564 }
565 } else {
566 HPI_DEBUG_LOG(WARNING,
567 "%p trying to close %d outstream %d owned by %p\n",
568 h_owner, phm->adapter_index, phm->obj_index,
569 outstream_user_open[phm->adapter_index][phm->
570 obj_index].h_owner);
571 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
572 }
3285ea10 573 hpios_msgxlock_unlock(&msgx_lock);
719f82d3
EB
574}
575
576static u16 adapter_prepare(u16 adapter)
577{
578 struct hpi_message hm;
579 struct hpi_response hr;
580
581 /* Open the adapter and streams */
582 u16 i;
583
584 /* call to HPI_ADAPTER_OPEN */
585 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
586 HPI_ADAPTER_OPEN);
587 hm.adapter_index = adapter;
588 hw_entry_point(&hm, &hr);
589 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
590 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
591 if (hr.error)
592 return hr.error;
593
594 /* call to HPI_ADAPTER_GET_INFO */
595 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
596 HPI_ADAPTER_GET_INFO);
597 hm.adapter_index = adapter;
598 hw_entry_point(&hm, &hr);
599 if (hr.error)
600 return hr.error;
601
3285ea10
EB
602 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
603 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
604 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
719f82d3
EB
605
606 /* call to HPI_OSTREAM_OPEN */
607 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
608 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
609 HPI_OSTREAM_OPEN);
610 hm.adapter_index = adapter;
611 hm.obj_index = i;
612 hw_entry_point(&hm, &hr);
613 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
614 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
615 outstream_user_open[adapter][i].open_flag = 0;
616 outstream_user_open[adapter][i].h_owner = NULL;
617 }
618
619 /* call to HPI_ISTREAM_OPEN */
620 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
621 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
622 HPI_ISTREAM_OPEN);
623 hm.adapter_index = adapter;
624 hm.obj_index = i;
625 hw_entry_point(&hm, &hr);
626 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
627 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
628 instream_user_open[adapter][i].open_flag = 0;
629 instream_user_open[adapter][i].h_owner = NULL;
630 }
631
632 /* call to HPI_MIXER_OPEN */
633 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
634 hm.adapter_index = adapter;
635 hw_entry_point(&hm, &hr);
636 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
637 sizeof(rESP_HPI_MIXER_OPEN[0]));
638
3285ea10 639 return 0;
719f82d3
EB
640}
641
642static void HPIMSGX__reset(u16 adapter_index)
643{
644 int i;
645 u16 adapter;
646 struct hpi_response hr;
647
648 if (adapter_index == HPIMSGX_ALLADAPTERS) {
719f82d3
EB
649 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
650
651 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
652 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
653 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
654 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
655
656 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
657 HPI_ERROR_INVALID_OBJ);
658 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
659 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
660
661 for (i = 0; i < HPI_MAX_STREAMS; i++) {
662 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
663 HPI_OSTREAM_OPEN,
664 HPI_ERROR_INVALID_OBJ);
665 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
666 &hr,
667 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
668 [i]));
669 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
670 HPI_ISTREAM_OPEN,
671 HPI_ERROR_INVALID_OBJ);
672 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
673 &hr,
674 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
675 [i]));
676 }
677 }
678 } else if (adapter_index < HPI_MAX_ADAPTERS) {
679 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
680 HPI_ERROR_BAD_ADAPTER;
681 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
682 HPI_ERROR_INVALID_OBJ;
683 for (i = 0; i < HPI_MAX_STREAMS; i++) {
684 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
685 HPI_ERROR_INVALID_OBJ;
686 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
687 HPI_ERROR_INVALID_OBJ;
688 }
719f82d3
EB
689 }
690}
691
692static u16 HPIMSGX__init(struct hpi_message *phm,
693 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
694 /* resource list or NULL=find all */
695 struct hpi_response *phr
696 /* response from HPI_ADAPTER_GET_INFO */
697 )
698{
699 hpi_handler_func *entry_point_func;
700 struct hpi_response hr;
701
719f82d3
EB
702 /* Init response here so we can pass in previous adapter list */
703 hpi_init_response(&hr, phm->object, phm->function,
704 HPI_ERROR_INVALID_OBJ);
719f82d3
EB
705
706 entry_point_func =
707 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
708
709 if (entry_point_func) {
710 HPI_DEBUG_MESSAGE(DEBUG, phm);
711 entry_point_func(phm, &hr);
712 } else {
713 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
714 return phr->error;
715 }
716 if (hr.error == 0) {
25985edc 717 /* the adapter was created successfully
719f82d3
EB
718 save the mapping for future use */
719 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
720 /* prepare adapter (pre-open streams etc.) */
721 HPI_DEBUG_LOG(DEBUG,
722 "HPI_SUBSYS_CREATE_ADAPTER successful,"
723 " preparing adapter\n");
724 adapter_prepare(hr.u.s.adapter_index);
725 }
726 memcpy(phr, &hr, hr.size);
727 return phr->error;
728}
729
730static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
731{
732 int i, adapter, adapter_limit;
733
734 if (!h_owner)
735 return;
736
737 if (adapter_index == HPIMSGX_ALLADAPTERS) {
738 adapter = 0;
739 adapter_limit = HPI_MAX_ADAPTERS;
740 } else {
741 adapter = adapter_index;
742 adapter_limit = adapter + 1;
743 }
744
745 for (; adapter < adapter_limit; adapter++) {
746 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
747 for (i = 0; i < HPI_MAX_STREAMS; i++) {
748 if (h_owner ==
749 outstream_user_open[adapter][i].h_owner) {
750 struct hpi_message hm;
751 struct hpi_response hr;
752
753 HPI_DEBUG_LOG(DEBUG,
3285ea10 754 "Close adapter %d ostream %d\n",
719f82d3
EB
755 adapter, i);
756
757 hpi_init_message_response(&hm, &hr,
758 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
759 hm.adapter_index = (u16)adapter;
760 hm.obj_index = (u16)i;
761 hw_entry_point(&hm, &hr);
762
763 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
764 hw_entry_point(&hm, &hr);
765
766 hm.function = HPI_OSTREAM_GROUP_RESET;
767 hw_entry_point(&hm, &hr);
768
769 outstream_user_open[adapter][i].open_flag = 0;
770 outstream_user_open[adapter][i].h_owner =
771 NULL;
772 }
773 if (h_owner == instream_user_open[adapter][i].h_owner) {
774 struct hpi_message hm;
775 struct hpi_response hr;
776
777 HPI_DEBUG_LOG(DEBUG,
3285ea10 778 "Close adapter %d istream %d\n",
719f82d3
EB
779 adapter, i);
780
781 hpi_init_message_response(&hm, &hr,
782 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
783 hm.adapter_index = (u16)adapter;
784 hm.obj_index = (u16)i;
785 hw_entry_point(&hm, &hr);
786
787 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
788 hw_entry_point(&hm, &hr);
789
790 hm.function = HPI_ISTREAM_GROUP_RESET;
791 hw_entry_point(&hm, &hr);
792
793 instream_user_open[adapter][i].open_flag = 0;
794 instream_user_open[adapter][i].h_owner = NULL;
795 }
796 }
797 }
798}