Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / drivers / misc / mei / client.c
CommitLineData
9fff0425 1// SPDX-License-Identifier: GPL-2.0
ab841160 2/*
95953618 3 * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
ab841160 4 * Intel Management Engine Interface (Intel MEI) Linux driver
ab841160
OW
5 */
6
174cd4b1 7#include <linux/sched/signal.h>
9ca9050b
TW
8#include <linux/wait.h>
9#include <linux/delay.h>
1f180359 10#include <linux/slab.h>
04bb139a 11#include <linux/pm_runtime.h>
369aea84 12#include <linux/dma-mapping.h>
ab841160 13
4f3afe1d 14#include <linux/mei.h>
47a73801
TW
15
16#include "mei_dev.h"
0edb23fc 17#include "hbm.h"
90e0b5f1
TW
18#include "client.h"
19
79563db9
TW
20/**
21 * mei_me_cl_init - initialize me client
22 *
23 * @me_cl: me client
24 */
25void mei_me_cl_init(struct mei_me_client *me_cl)
26{
27 INIT_LIST_HEAD(&me_cl->list);
28 kref_init(&me_cl->refcnt);
29}
30
31/**
32 * mei_me_cl_get - increases me client refcount
33 *
34 * @me_cl: me client
35 *
36 * Locking: called under "dev->device_lock" lock
37 *
38 * Return: me client or NULL
39 */
40struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
41{
b7d88514
TW
42 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
43 return me_cl;
79563db9 44
b7d88514 45 return NULL;
79563db9
TW
46}
47
48/**
b7d88514 49 * mei_me_cl_release - free me client
79563db9 50 *
79563db9 51 * @ref: me_client refcount
daa0c28d
RD
52 *
53 * Locking: called under "dev->device_lock" lock
79563db9
TW
54 */
55static void mei_me_cl_release(struct kref *ref)
56{
57 struct mei_me_client *me_cl =
58 container_of(ref, struct mei_me_client, refcnt);
b7d88514 59
79563db9
TW
60 kfree(me_cl);
61}
b7d88514 62
79563db9
TW
63/**
64 * mei_me_cl_put - decrease me client refcount and free client if necessary
65 *
79563db9 66 * @me_cl: me client
daa0c28d
RD
67 *
68 * Locking: called under "dev->device_lock" lock
79563db9
TW
69 */
70void mei_me_cl_put(struct mei_me_client *me_cl)
71{
72 if (me_cl)
73 kref_put(&me_cl->refcnt, mei_me_cl_release);
74}
75
90e0b5f1 76/**
d49ed64a 77 * __mei_me_cl_del - delete me client from the list and decrease
b7d88514
TW
78 * reference counter
79 *
80 * @dev: mei device
81 * @me_cl: me client
82 *
83 * Locking: dev->me_clients_rwsem
84 */
85static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
86{
87 if (!me_cl)
88 return;
89
d49ed64a 90 list_del_init(&me_cl->list);
b7d88514
TW
91 mei_me_cl_put(me_cl);
92}
93
d49ed64a
AU
94/**
95 * mei_me_cl_del - delete me client from the list and decrease
96 * reference counter
97 *
98 * @dev: mei device
99 * @me_cl: me client
100 */
101void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
102{
103 down_write(&dev->me_clients_rwsem);
104 __mei_me_cl_del(dev, me_cl);
105 up_write(&dev->me_clients_rwsem);
106}
107
b7d88514
TW
108/**
109 * mei_me_cl_add - add me client to the list
110 *
111 * @dev: mei device
112 * @me_cl: me client
113 */
114void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
115{
116 down_write(&dev->me_clients_rwsem);
117 list_add(&me_cl->list, &dev->me_clients);
118 up_write(&dev->me_clients_rwsem);
119}
120
121/**
122 * __mei_me_cl_by_uuid - locate me client by uuid
79563db9 123 * increases ref count
90e0b5f1
TW
124 *
125 * @dev: mei device
a8605ea2 126 * @uuid: me client uuid
a27a76d3 127 *
a8605ea2 128 * Return: me client or NULL if not found
b7d88514
TW
129 *
130 * Locking: dev->me_clients_rwsem
90e0b5f1 131 */
b7d88514 132static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
d320832f 133 const uuid_le *uuid)
90e0b5f1 134{
5ca2d388 135 struct mei_me_client *me_cl;
b7d88514 136 const uuid_le *pn;
90e0b5f1 137
b7d88514
TW
138 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
139
140 list_for_each_entry(me_cl, &dev->me_clients, list) {
141 pn = &me_cl->props.protocol_name;
142 if (uuid_le_cmp(*uuid, *pn) == 0)
79563db9 143 return mei_me_cl_get(me_cl);
b7d88514 144 }
90e0b5f1 145
d320832f 146 return NULL;
90e0b5f1
TW
147}
148
b7d88514
TW
149/**
150 * mei_me_cl_by_uuid - locate me client by uuid
151 * increases ref count
152 *
153 * @dev: mei device
154 * @uuid: me client uuid
155 *
156 * Return: me client or NULL if not found
157 *
158 * Locking: dev->me_clients_rwsem
159 */
160struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
161 const uuid_le *uuid)
162{
163 struct mei_me_client *me_cl;
164
165 down_read(&dev->me_clients_rwsem);
166 me_cl = __mei_me_cl_by_uuid(dev, uuid);
167 up_read(&dev->me_clients_rwsem);
168
169 return me_cl;
170}
171
90e0b5f1 172/**
a8605ea2 173 * mei_me_cl_by_id - locate me client by client id
79563db9 174 * increases ref count
90e0b5f1
TW
175 *
176 * @dev: the device structure
177 * @client_id: me client id
178 *
a8605ea2 179 * Return: me client or NULL if not found
b7d88514
TW
180 *
181 * Locking: dev->me_clients_rwsem
90e0b5f1 182 */
d320832f 183struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
90e0b5f1 184{
a27a76d3 185
b7d88514
TW
186 struct mei_me_client *__me_cl, *me_cl = NULL;
187
188 down_read(&dev->me_clients_rwsem);
189 list_for_each_entry(__me_cl, &dev->me_clients, list) {
190 if (__me_cl->client_id == client_id) {
191 me_cl = mei_me_cl_get(__me_cl);
192 break;
193 }
194 }
195 up_read(&dev->me_clients_rwsem);
196
197 return me_cl;
198}
199
200/**
201 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
202 * increases ref count
203 *
204 * @dev: the device structure
205 * @uuid: me client uuid
206 * @client_id: me client id
207 *
208 * Return: me client or null if not found
209 *
210 * Locking: dev->me_clients_rwsem
211 */
212static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
213 const uuid_le *uuid, u8 client_id)
214{
5ca2d388 215 struct mei_me_client *me_cl;
b7d88514
TW
216 const uuid_le *pn;
217
218 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
90e0b5f1 219
b7d88514
TW
220 list_for_each_entry(me_cl, &dev->me_clients, list) {
221 pn = &me_cl->props.protocol_name;
222 if (uuid_le_cmp(*uuid, *pn) == 0 &&
223 me_cl->client_id == client_id)
79563db9 224 return mei_me_cl_get(me_cl);
b7d88514 225 }
79563db9 226
d320832f 227 return NULL;
90e0b5f1 228}
ab841160 229
b7d88514 230
a8605ea2
AU
231/**
232 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
79563db9 233 * increases ref count
a8605ea2
AU
234 *
235 * @dev: the device structure
236 * @uuid: me client uuid
237 * @client_id: me client id
238 *
b7d88514 239 * Return: me client or null if not found
a8605ea2 240 */
d880f329
TW
241struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
242 const uuid_le *uuid, u8 client_id)
243{
244 struct mei_me_client *me_cl;
245
b7d88514
TW
246 down_read(&dev->me_clients_rwsem);
247 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
248 up_read(&dev->me_clients_rwsem);
79563db9 249
b7d88514 250 return me_cl;
d880f329
TW
251}
252
25ca6472 253/**
79563db9 254 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
25ca6472
TW
255 *
256 * @dev: the device structure
257 * @uuid: me client uuid
79563db9
TW
258 *
259 * Locking: called under "dev->device_lock" lock
25ca6472 260 */
79563db9 261void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
25ca6472 262{
b7d88514 263 struct mei_me_client *me_cl;
25ca6472 264
79563db9 265 dev_dbg(dev->dev, "remove %pUl\n", uuid);
b7d88514
TW
266
267 down_write(&dev->me_clients_rwsem);
268 me_cl = __mei_me_cl_by_uuid(dev, uuid);
269 __mei_me_cl_del(dev, me_cl);
fc9c03ce 270 mei_me_cl_put(me_cl);
b7d88514 271 up_write(&dev->me_clients_rwsem);
79563db9
TW
272}
273
79563db9
TW
274/**
275 * mei_me_cl_rm_all - remove all me clients
276 *
277 * @dev: the device structure
278 *
279 * Locking: called under "dev->device_lock" lock
280 */
281void mei_me_cl_rm_all(struct mei_device *dev)
282{
283 struct mei_me_client *me_cl, *next;
284
b7d88514 285 down_write(&dev->me_clients_rwsem);
79563db9 286 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
b7d88514
TW
287 __mei_me_cl_del(dev, me_cl);
288 up_write(&dev->me_clients_rwsem);
79563db9
TW
289}
290
928fa666
TW
291/**
292 * mei_io_cb_free - free mei_cb_private related memory
293 *
294 * @cb: mei callback struct
295 */
296void mei_io_cb_free(struct mei_cl_cb *cb)
297{
298 if (cb == NULL)
299 return;
300
301 list_del(&cb->list);
4adf613e 302 kvfree(cb->buf.data);
4ed1cc99 303 kfree(cb->ext_hdr);
928fa666
TW
304 kfree(cb);
305}
306
af336cab 307/**
09f8c33a 308 * mei_tx_cb_enqueue - queue tx callback
af336cab 309 *
af336cab
AU
310 * @cb: mei callback struct
311 * @head: an instance of list to queue on
daa0c28d
RD
312 *
313 * Locking: called under "dev->device_lock" lock
af336cab
AU
314 */
315static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
316 struct list_head *head)
317{
318 list_add_tail(&cb->list, head);
319 cb->cl->tx_cb_queued++;
320}
321
322/**
323 * mei_tx_cb_dequeue - dequeue tx callback
324 *
af336cab 325 * @cb: mei callback struct to dequeue and free
daa0c28d
RD
326 *
327 * Locking: called under "dev->device_lock" lock
af336cab
AU
328 */
329static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
330{
331 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
332 cb->cl->tx_cb_queued--;
333
334 mei_io_cb_free(cb);
335}
336
f35fe5f4
AU
337/**
338 * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
339 *
f35fe5f4
AU
340 * @cl: mei client
341 * @fp: pointer to file structure
daa0c28d
RD
342 *
343 * Locking: called under "dev->device_lock" lock
f35fe5f4
AU
344 */
345static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
346 const struct file *fp)
347{
348 struct mei_cl_vtag *cl_vtag;
349
350 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
351 if (cl_vtag->fp == fp) {
352 cl_vtag->pending_read = true;
353 return;
354 }
355 }
356}
357
928fa666
TW
358/**
359 * mei_io_cb_init - allocate and initialize io callback
360 *
361 * @cl: mei client
362 * @type: operation type
363 * @fp: pointer to file structure
364 *
365 * Return: mei_cl_cb pointer or NULL;
366 */
3030dc05
TW
367static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
368 enum mei_cb_file_ops type,
369 const struct file *fp)
928fa666
TW
370{
371 struct mei_cl_cb *cb;
372
4b40b225 373 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
928fa666
TW
374 if (!cb)
375 return NULL;
376
377 INIT_LIST_HEAD(&cb->list);
62e8e6ad 378 cb->fp = fp;
928fa666
TW
379 cb->cl = cl;
380 cb->buf_idx = 0;
381 cb->fop_type = type;
0cd7c01a 382 cb->vtag = 0;
4ed1cc99 383 cb->ext_hdr = NULL;
0cd7c01a 384
928fa666
TW
385 return cb;
386}
387
cc99ecfd 388/**
af336cab 389 * mei_io_list_flush_cl - removes cbs belonging to the cl.
cc99ecfd 390 *
962ff7bc 391 * @head: an instance of our list structure
af336cab 392 * @cl: host client
9ca9050b 393 */
af336cab
AU
394static void mei_io_list_flush_cl(struct list_head *head,
395 const struct mei_cl *cl)
9ca9050b 396{
928fa666 397 struct mei_cl_cb *cb, *next;
9ca9050b 398
962ff7bc 399 list_for_each_entry_safe(cb, next, head, list) {
cee4c4d6 400 if (cl == cb->cl) {
928fa666 401 list_del_init(&cb->list);
cee4c4d6
AU
402 if (cb->fop_type == MEI_FOP_READ)
403 mei_io_cb_free(cb);
404 }
9ca9050b
TW
405 }
406}
407
cc99ecfd 408/**
af336cab 409 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
cc99ecfd 410 *
962ff7bc 411 * @head: An instance of our list structure
cc99ecfd 412 * @cl: host client
15ffa991 413 * @fp: file pointer (matching cb file object), may be NULL
cc99ecfd 414 */
af336cab 415static void mei_io_tx_list_free_cl(struct list_head *head,
15ffa991
AU
416 const struct mei_cl *cl,
417 const struct file *fp)
cc99ecfd 418{
af336cab 419 struct mei_cl_cb *cb, *next;
cc99ecfd 420
af336cab 421 list_for_each_entry_safe(cb, next, head, list) {
15ffa991 422 if (cl == cb->cl && (!fp || fp == cb->fp))
af336cab
AU
423 mei_tx_cb_dequeue(cb);
424 }
f046192d
TW
425}
426
427/**
428 * mei_io_list_free_fp - free cb from a list that matches file pointer
429 *
430 * @head: io list
431 * @fp: file pointer (matching cb file object), may be NULL
432 */
394a77d0 433static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
f046192d
TW
434{
435 struct mei_cl_cb *cb, *next;
436
437 list_for_each_entry_safe(cb, next, head, list)
438 if (!fp || fp == cb->fp)
439 mei_io_cb_free(cb);
cc99ecfd
TW
440}
441
f35fe5f4
AU
442/**
443 * mei_cl_free_pending - free pending cb
444 *
445 * @cl: host client
446 */
447static void mei_cl_free_pending(struct mei_cl *cl)
448{
449 struct mei_cl_cb *cb;
450
451 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
452 mei_io_cb_free(cb);
453}
454
bca67d68
TW
455/**
456 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
457 *
458 * @cl: host client
459 * @length: size of the buffer
967b274e 460 * @fop_type: operation type
bca67d68
TW
461 * @fp: associated file pointer (might be NULL)
462 *
463 * Return: cb on success and NULL on failure
464 */
465struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
3030dc05 466 enum mei_cb_file_ops fop_type,
f23e2cc4 467 const struct file *fp)
bca67d68
TW
468{
469 struct mei_cl_cb *cb;
470
3030dc05 471 cb = mei_io_cb_init(cl, fop_type, fp);
bca67d68
TW
472 if (!cb)
473 return NULL;
474
aab3b1a3
AU
475 if (length == 0)
476 return cb;
477
4adf613e 478 cb->buf.data = kvmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
aab3b1a3 479 if (!cb->buf.data) {
bca67d68
TW
480 mei_io_cb_free(cb);
481 return NULL;
482 }
aab3b1a3 483 cb->buf.size = length;
bca67d68
TW
484
485 return cb;
486}
487
3030dc05
TW
488/**
489 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
490 * and enqueuing of the control commands cb
491 *
492 * @cl: host client
493 * @length: size of the buffer
967b274e 494 * @fop_type: operation type
3030dc05
TW
495 * @fp: associated file pointer (might be NULL)
496 *
497 * Return: cb on success and NULL on failure
498 * Locking: called under "dev->device_lock" lock
499 */
500struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
501 enum mei_cb_file_ops fop_type,
502 const struct file *fp)
503{
504 struct mei_cl_cb *cb;
505
506 /* for RX always allocate at least client's mtu */
507 if (length)
508 length = max_t(size_t, length, mei_cl_mtu(cl));
509
510 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
511 if (!cb)
512 return NULL;
513
962ff7bc 514 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
3030dc05
TW
515 return cb;
516}
517
a9bed610
TW
518/**
519 * mei_cl_read_cb - find this cl's callback in the read list
520 * for a specific file
521 *
522 * @cl: host client
523 * @fp: file pointer (matching cb file object), may be NULL
524 *
525 * Return: cb on success, NULL if cb is not found
526 */
d1376f3d 527struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
a9bed610
TW
528{
529 struct mei_cl_cb *cb;
d1376f3d 530 struct mei_cl_cb *ret_cb = NULL;
a9bed610 531
d1376f3d 532 spin_lock(&cl->rd_completed_lock);
a9bed610 533 list_for_each_entry(cb, &cl->rd_completed, list)
d1376f3d
AU
534 if (!fp || fp == cb->fp) {
535 ret_cb = cb;
536 break;
537 }
538 spin_unlock(&cl->rd_completed_lock);
539 return ret_cb;
a9bed610
TW
540}
541
9ca9050b
TW
542/**
543 * mei_cl_flush_queues - flushes queue lists belonging to cl.
544 *
9ca9050b 545 * @cl: host client
a9bed610 546 * @fp: file pointer (matching cb file object), may be NULL
ce23139c
AU
547 *
548 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
9ca9050b 549 */
a9bed610 550int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
9ca9050b 551{
c0abffbd
AU
552 struct mei_device *dev;
553
90e0b5f1 554 if (WARN_ON(!cl || !cl->dev))
9ca9050b
TW
555 return -EINVAL;
556
c0abffbd
AU
557 dev = cl->dev;
558
559 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
15ffa991
AU
560 mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
561 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
562 /* free pending and control cb only in final flush */
563 if (!fp) {
564 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
565 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
f35fe5f4 566 mei_cl_free_pending(cl);
15ffa991 567 }
d1376f3d 568 spin_lock(&cl->rd_completed_lock);
f046192d 569 mei_io_list_free_fp(&cl->rd_completed, fp);
d1376f3d 570 spin_unlock(&cl->rd_completed_lock);
a9bed610 571
9ca9050b
TW
572 return 0;
573}
574
9ca9050b 575/**
83ce0741 576 * mei_cl_init - initializes cl.
9ca9050b
TW
577 *
578 * @cl: host client to be initialized
579 * @dev: mei device
580 */
394a77d0 581static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
9ca9050b 582{
4b40b225 583 memset(cl, 0, sizeof(*cl));
9ca9050b
TW
584 init_waitqueue_head(&cl->wait);
585 init_waitqueue_head(&cl->rx_wait);
586 init_waitqueue_head(&cl->tx_wait);
b38a362f 587 init_waitqueue_head(&cl->ev_wait);
f35fe5f4 588 INIT_LIST_HEAD(&cl->vtag_map);
d1376f3d 589 spin_lock_init(&cl->rd_completed_lock);
a9bed610
TW
590 INIT_LIST_HEAD(&cl->rd_completed);
591 INIT_LIST_HEAD(&cl->rd_pending);
9ca9050b 592 INIT_LIST_HEAD(&cl->link);
9ca9050b 593 cl->writing_state = MEI_IDLE;
bd47b526 594 cl->state = MEI_FILE_UNINITIALIZED;
9ca9050b
TW
595 cl->dev = dev;
596}
597
598/**
599 * mei_cl_allocate - allocates cl structure and sets it up.
600 *
601 * @dev: mei device
a8605ea2 602 * Return: The allocated file or NULL on failure
9ca9050b
TW
603 */
604struct mei_cl *mei_cl_allocate(struct mei_device *dev)
605{
606 struct mei_cl *cl;
607
4b40b225 608 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
9ca9050b
TW
609 if (!cl)
610 return NULL;
611
612 mei_cl_init(cl, dev);
613
614 return cl;
615}
616
3908be6f
AU
617/**
618 * mei_cl_link - allocate host id in the host map
9ca9050b 619 *
3908be6f 620 * @cl: host client
393b148f 621 *
a8605ea2 622 * Return: 0 on success
9ca9050b 623 * -EINVAL on incorrect values
03b8d341 624 * -EMFILE if open count exceeded.
9ca9050b 625 */
7851e008 626int mei_cl_link(struct mei_cl *cl)
9ca9050b 627{
90e0b5f1 628 struct mei_device *dev;
7851e008 629 int id;
9ca9050b 630
781d0d89 631 if (WARN_ON(!cl || !cl->dev))
9ca9050b
TW
632 return -EINVAL;
633
90e0b5f1
TW
634 dev = cl->dev;
635
7851e008 636 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
781d0d89 637 if (id >= MEI_CLIENTS_MAX) {
2bf94cab 638 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
e036cc57
TW
639 return -EMFILE;
640 }
641
394a77d0 642 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
2bf94cab 643 dev_err(dev->dev, "open_handle_count exceeded %d",
e036cc57
TW
644 MEI_MAX_OPEN_HANDLE_COUNT);
645 return -EMFILE;
9ca9050b
TW
646 }
647
781d0d89
TW
648 dev->open_handle_count++;
649
650 cl->host_client_id = id;
651 list_add_tail(&cl->link, &dev->file_list);
652
653 set_bit(id, dev->host_clients_map);
654
655 cl->state = MEI_FILE_INITIALIZING;
656
c0abffbd 657 cl_dbg(dev, cl, "link cl\n");
781d0d89 658 return 0;
9ca9050b 659}
781d0d89 660
9ca9050b 661/**
d49ed64a 662 * mei_cl_unlink - remove host client from the list
9ca9050b 663 *
393b148f 664 * @cl: host client
ce23139c
AU
665 *
666 * Return: always 0
9ca9050b 667 */
90e0b5f1 668int mei_cl_unlink(struct mei_cl *cl)
9ca9050b 669{
90e0b5f1 670 struct mei_device *dev;
90e0b5f1 671
781d0d89
TW
672 /* don't shout on error exit path */
673 if (!cl)
674 return 0;
675
394a77d0 676 if (WARN_ON(!cl->dev))
8e9a4a9a 677 return 0;
90e0b5f1
TW
678
679 dev = cl->dev;
680
a14c44d8
TW
681 cl_dbg(dev, cl, "unlink client");
682
2cca3465
AU
683 if (cl->state == MEI_FILE_UNINITIALIZED)
684 return 0;
685
22f96a0e
TW
686 if (dev->open_handle_count > 0)
687 dev->open_handle_count--;
688
689 /* never clear the 0 bit */
690 if (cl->host_client_id)
691 clear_bit(cl->host_client_id, dev->host_clients_map);
692
693 list_del_init(&cl->link);
694
bd47b526 695 cl->state = MEI_FILE_UNINITIALIZED;
7c7a6077
AU
696 cl->writing_state = MEI_IDLE;
697
698 WARN_ON(!list_empty(&cl->rd_completed) ||
699 !list_empty(&cl->rd_pending) ||
700 !list_empty(&cl->link));
22f96a0e 701
90e0b5f1 702 return 0;
9ca9050b
TW
703}
704
025fb792 705void mei_host_client_init(struct mei_device *dev)
9ca9050b 706{
43b8a7ed 707 mei_set_devstate(dev, MEI_DEV_ENABLED);
6adb8efb 708 dev->reset_count = 0;
04bb139a 709
025fb792 710 schedule_work(&dev->bus_rescan_work);
6009595a 711
2bf94cab
TW
712 pm_runtime_mark_last_busy(dev->dev);
713 dev_dbg(dev->dev, "rpm: autosuspend\n");
d5f8e166 714 pm_request_autosuspend(dev->dev);
9ca9050b
TW
715}
716
6aae48ff 717/**
a8605ea2 718 * mei_hbuf_acquire - try to acquire host buffer
6aae48ff
TW
719 *
720 * @dev: the device structure
a8605ea2 721 * Return: true if host buffer was acquired
6aae48ff
TW
722 */
723bool mei_hbuf_acquire(struct mei_device *dev)
724{
04bb139a 725 if (mei_pg_state(dev) == MEI_PG_ON ||
3dc196ea 726 mei_pg_in_transition(dev)) {
2bf94cab 727 dev_dbg(dev->dev, "device is in pg\n");
04bb139a
TW
728 return false;
729 }
730
6aae48ff 731 if (!dev->hbuf_is_ready) {
2bf94cab 732 dev_dbg(dev->dev, "hbuf is not ready\n");
6aae48ff
TW
733 return false;
734 }
735
736 dev->hbuf_is_ready = false;
737
738 return true;
739}
9ca9050b 740
a4307fe4
AU
741/**
742 * mei_cl_wake_all - wake up readers, writers and event waiters so
743 * they can be interrupted
744 *
745 * @cl: host client
746 */
747static void mei_cl_wake_all(struct mei_cl *cl)
748{
749 struct mei_device *dev = cl->dev;
750
751 /* synchronized under device mutex */
752 if (waitqueue_active(&cl->rx_wait)) {
753 cl_dbg(dev, cl, "Waking up reading client!\n");
754 wake_up_interruptible(&cl->rx_wait);
755 }
756 /* synchronized under device mutex */
757 if (waitqueue_active(&cl->tx_wait)) {
758 cl_dbg(dev, cl, "Waking up writing client!\n");
759 wake_up_interruptible(&cl->tx_wait);
760 }
761 /* synchronized under device mutex */
762 if (waitqueue_active(&cl->ev_wait)) {
763 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
764 wake_up_interruptible(&cl->ev_wait);
765 }
7ff4bdd4
AU
766 /* synchronized under device mutex */
767 if (waitqueue_active(&cl->wait)) {
768 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
69f1804a 769 wake_up(&cl->wait);
7ff4bdd4 770 }
a4307fe4
AU
771}
772
3c666182
TW
773/**
774 * mei_cl_set_disconnected - set disconnected state and clear
775 * associated states and resources
776 *
777 * @cl: host client
778 */
669c256c 779static void mei_cl_set_disconnected(struct mei_cl *cl)
3c666182
TW
780{
781 struct mei_device *dev = cl->dev;
782
783 if (cl->state == MEI_FILE_DISCONNECTED ||
bd47b526 784 cl->state <= MEI_FILE_INITIALIZING)
3c666182
TW
785 return;
786
787 cl->state = MEI_FILE_DISCONNECTED;
15ffa991
AU
788 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
789 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
f046192d
TW
790 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
791 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
a4307fe4 792 mei_cl_wake_all(cl);
46978ada 793 cl->rx_flow_ctrl_creds = 0;
4034b81b 794 cl->tx_flow_ctrl_creds = 0;
3c666182 795 cl->timer_count = 0;
d49ed64a 796
a03d77f6
AU
797 if (!cl->me_cl)
798 return;
799
800 if (!WARN_ON(cl->me_cl->connect_count == 0))
801 cl->me_cl->connect_count--;
802
c241e9b1 803 if (cl->me_cl->connect_count == 0)
4034b81b 804 cl->me_cl->tx_flow_ctrl_creds = 0;
c241e9b1 805
d49ed64a
AU
806 mei_me_cl_put(cl->me_cl);
807 cl->me_cl = NULL;
3c666182
TW
808}
809
a03d77f6
AU
810static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
811{
1df629ef 812 if (!mei_me_cl_get(me_cl))
a03d77f6
AU
813 return -ENOENT;
814
1df629ef
AU
815 /* only one connection is allowed for fixed address clients */
816 if (me_cl->props.fixed_address) {
817 if (me_cl->connect_count) {
818 mei_me_cl_put(me_cl);
819 return -EBUSY;
820 }
821 }
822
823 cl->me_cl = me_cl;
a03d77f6
AU
824 cl->state = MEI_FILE_CONNECTING;
825 cl->me_cl->connect_count++;
826
827 return 0;
828}
829
3c666182
TW
830/*
831 * mei_cl_send_disconnect - send disconnect request
832 *
833 * @cl: host client
834 * @cb: callback block
835 *
836 * Return: 0, OK; otherwise, error.
837 */
838static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
839{
840 struct mei_device *dev;
841 int ret;
842
843 dev = cl->dev;
844
845 ret = mei_hbm_cl_disconnect_req(dev, cl);
846 cl->status = ret;
847 if (ret) {
848 cl->state = MEI_FILE_DISCONNECT_REPLY;
849 return ret;
850 }
851
962ff7bc 852 list_move_tail(&cb->list, &dev->ctrl_rd_list);
95953618 853 cl->timer_count = dev->timeouts.connect;
1892fc2e 854 mei_schedule_stall_timer(dev);
3c666182
TW
855
856 return 0;
857}
858
859/**
860 * mei_cl_irq_disconnect - processes close related operation from
861 * interrupt thread context - send disconnect request
862 *
863 * @cl: client
864 * @cb: callback block.
865 * @cmpl_list: complete list.
866 *
867 * Return: 0, OK; otherwise, error.
868 */
869int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
962ff7bc 870 struct list_head *cmpl_list)
3c666182
TW
871{
872 struct mei_device *dev = cl->dev;
873 u32 msg_slots;
874 int slots;
875 int ret;
876
98e70866 877 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
3c666182 878 slots = mei_hbuf_empty_slots(dev);
de877437
TW
879 if (slots < 0)
880 return -EOVERFLOW;
3c666182 881
de877437 882 if ((u32)slots < msg_slots)
3c666182
TW
883 return -EMSGSIZE;
884
885 ret = mei_cl_send_disconnect(cl, cb);
886 if (ret)
962ff7bc 887 list_move_tail(&cb->list, cmpl_list);
3c666182
TW
888
889 return ret;
890}
891
9ca9050b 892/**
18901357
AU
893 * __mei_cl_disconnect - disconnect host client from the me one
894 * internal function runtime pm has to be already acquired
9ca9050b 895 *
90e0b5f1 896 * @cl: host client
9ca9050b 897 *
a8605ea2 898 * Return: 0 on success, <0 on failure.
9ca9050b 899 */
18901357 900static int __mei_cl_disconnect(struct mei_cl *cl)
9ca9050b 901{
90e0b5f1 902 struct mei_device *dev;
9ca9050b 903 struct mei_cl_cb *cb;
fe2f17eb 904 int rets;
9ca9050b 905
90e0b5f1
TW
906 dev = cl->dev;
907
3c666182
TW
908 cl->state = MEI_FILE_DISCONNECTING;
909
3030dc05
TW
910 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
911 if (!cb) {
912 rets = -ENOMEM;
3c666182 913 goto out;
3030dc05 914 }
5a8373fb 915
6aae48ff 916 if (mei_hbuf_acquire(dev)) {
3c666182
TW
917 rets = mei_cl_send_disconnect(cl, cb);
918 if (rets) {
c0abffbd 919 cl_err(dev, cl, "failed to disconnect.\n");
3c666182 920 goto out;
9ca9050b 921 }
9ca9050b 922 }
9ca9050b 923
3c666182 924 mutex_unlock(&dev->device_lock);
7ff4bdd4
AU
925 wait_event_timeout(cl->wait,
926 cl->state == MEI_FILE_DISCONNECT_REPLY ||
927 cl->state == MEI_FILE_DISCONNECTED,
95953618 928 dev->timeouts.cl_connect);
9ca9050b 929 mutex_lock(&dev->device_lock);
fe2f17eb 930
3c666182 931 rets = cl->status;
7ff4bdd4
AU
932 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
933 cl->state != MEI_FILE_DISCONNECTED) {
fe2f17eb
AU
934 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
935 rets = -ETIME;
9ca9050b
TW
936 }
937
3c666182
TW
938out:
939 /* we disconnect also on error */
940 mei_cl_set_disconnected(cl);
941 if (!rets)
942 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
943
18901357
AU
944 mei_io_cb_free(cb);
945 return rets;
946}
947
948/**
949 * mei_cl_disconnect - disconnect host client from the me one
950 *
951 * @cl: host client
952 *
953 * Locking: called under "dev->device_lock" lock
954 *
955 * Return: 0 on success, <0 on failure.
956 */
957int mei_cl_disconnect(struct mei_cl *cl)
958{
959 struct mei_device *dev;
960 int rets;
961
962 if (WARN_ON(!cl || !cl->dev))
963 return -ENODEV;
964
965 dev = cl->dev;
966
967 cl_dbg(dev, cl, "disconnecting");
968
969 if (!mei_cl_is_connected(cl))
970 return 0;
971
972 if (mei_cl_is_fixed_address(cl)) {
973 mei_cl_set_disconnected(cl);
974 return 0;
975 }
976
36edb140
AU
977 if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
978 dev->dev_state == MEI_DEV_POWER_DOWN) {
7ae079ac
TW
979 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
980 mei_cl_set_disconnected(cl);
981 return 0;
982 }
983
18901357
AU
984 rets = pm_runtime_get(dev->dev);
985 if (rets < 0 && rets != -EINPROGRESS) {
986 pm_runtime_put_noidle(dev->dev);
987 cl_err(dev, cl, "rpm: get failed %d\n", rets);
988 return rets;
989 }
990
991 rets = __mei_cl_disconnect(cl);
992
04bb139a 993 cl_dbg(dev, cl, "rpm: autosuspend\n");
2bf94cab
TW
994 pm_runtime_mark_last_busy(dev->dev);
995 pm_runtime_put_autosuspend(dev->dev);
04bb139a 996
9ca9050b
TW
997 return rets;
998}
999
1000
1001/**
90e0b5f1
TW
1002 * mei_cl_is_other_connecting - checks if other
1003 * client with the same me client id is connecting
9ca9050b 1004 *
9ca9050b
TW
1005 * @cl: private data of the file object
1006 *
a8605ea2 1007 * Return: true if other client is connected, false - otherwise.
9ca9050b 1008 */
0c53357c 1009static bool mei_cl_is_other_connecting(struct mei_cl *cl)
9ca9050b 1010{
90e0b5f1 1011 struct mei_device *dev;
0c53357c 1012 struct mei_cl_cb *cb;
90e0b5f1
TW
1013
1014 dev = cl->dev;
1015
962ff7bc 1016 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
0c53357c 1017 if (cb->fop_type == MEI_FOP_CONNECT &&
d49ed64a 1018 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
90e0b5f1 1019 return true;
9ca9050b 1020 }
90e0b5f1
TW
1021
1022 return false;
9ca9050b
TW
1023}
1024
0c53357c
TW
1025/**
1026 * mei_cl_send_connect - send connect request
1027 *
1028 * @cl: host client
1029 * @cb: callback block
1030 *
1031 * Return: 0, OK; otherwise, error.
1032 */
1033static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1034{
1035 struct mei_device *dev;
1036 int ret;
1037
1038 dev = cl->dev;
1039
1040 ret = mei_hbm_cl_connect_req(dev, cl);
1041 cl->status = ret;
1042 if (ret) {
1043 cl->state = MEI_FILE_DISCONNECT_REPLY;
1044 return ret;
1045 }
1046
962ff7bc 1047 list_move_tail(&cb->list, &dev->ctrl_rd_list);
95953618 1048 cl->timer_count = dev->timeouts.connect;
1892fc2e 1049 mei_schedule_stall_timer(dev);
0c53357c
TW
1050 return 0;
1051}
1052
1053/**
1054 * mei_cl_irq_connect - send connect request in irq_thread context
1055 *
1056 * @cl: host client
1057 * @cb: callback block
1058 * @cmpl_list: complete list
1059 *
1060 * Return: 0, OK; otherwise, error.
1061 */
1062int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
962ff7bc 1063 struct list_head *cmpl_list)
0c53357c
TW
1064{
1065 struct mei_device *dev = cl->dev;
1066 u32 msg_slots;
1067 int slots;
1068 int rets;
1069
0c53357c
TW
1070 if (mei_cl_is_other_connecting(cl))
1071 return 0;
1072
98e70866 1073 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
de877437
TW
1074 slots = mei_hbuf_empty_slots(dev);
1075 if (slots < 0)
1076 return -EOVERFLOW;
1077
1078 if ((u32)slots < msg_slots)
0c53357c
TW
1079 return -EMSGSIZE;
1080
1081 rets = mei_cl_send_connect(cl, cb);
1082 if (rets)
962ff7bc 1083 list_move_tail(&cb->list, cmpl_list);
0c53357c
TW
1084
1085 return rets;
1086}
1087
9f81abda 1088/**
83ce0741 1089 * mei_cl_connect - connect host client to the me one
9f81abda
TW
1090 *
1091 * @cl: host client
d49ed64a 1092 * @me_cl: me client
3030dc05 1093 * @fp: pointer to file structure
9f81abda
TW
1094 *
1095 * Locking: called under "dev->device_lock" lock
1096 *
a8605ea2 1097 * Return: 0 on success, <0 on failure.
9f81abda 1098 */
d49ed64a 1099int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
3030dc05 1100 const struct file *fp)
9f81abda
TW
1101{
1102 struct mei_device *dev;
1103 struct mei_cl_cb *cb;
9f81abda
TW
1104 int rets;
1105
1df629ef 1106 if (WARN_ON(!cl || !cl->dev || !me_cl))
9f81abda
TW
1107 return -ENODEV;
1108
1109 dev = cl->dev;
1110
1df629ef
AU
1111 rets = mei_cl_set_connecting(cl, me_cl);
1112 if (rets)
5d882460 1113 goto nortpm;
1df629ef
AU
1114
1115 if (mei_cl_is_fixed_address(cl)) {
1116 cl->state = MEI_FILE_CONNECTED;
5d882460
AU
1117 rets = 0;
1118 goto nortpm;
1df629ef
AU
1119 }
1120
2bf94cab 1121 rets = pm_runtime_get(dev->dev);
04bb139a 1122 if (rets < 0 && rets != -EINPROGRESS) {
2bf94cab 1123 pm_runtime_put_noidle(dev->dev);
04bb139a 1124 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1df629ef 1125 goto nortpm;
04bb139a
TW
1126 }
1127
3030dc05
TW
1128 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1129 if (!cb) {
1130 rets = -ENOMEM;
9f81abda 1131 goto out;
3030dc05 1132 }
0c53357c 1133
6aae48ff
TW
1134 /* run hbuf acquire last so we don't have to undo */
1135 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
0c53357c
TW
1136 rets = mei_cl_send_connect(cl, cb);
1137 if (rets)
9f81abda 1138 goto out;
9f81abda
TW
1139 }
1140
1141 mutex_unlock(&dev->device_lock);
12f45ed4 1142 wait_event_timeout(cl->wait,
285e2996 1143 (cl->state == MEI_FILE_CONNECTED ||
7ff4bdd4 1144 cl->state == MEI_FILE_DISCONNECTED ||
18901357 1145 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
3c666182 1146 cl->state == MEI_FILE_DISCONNECT_REPLY),
95953618 1147 dev->timeouts.cl_connect);
9f81abda
TW
1148 mutex_lock(&dev->device_lock);
1149
f3de9b63 1150 if (!mei_cl_is_connected(cl)) {
18901357 1151 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
f046192d
TW
1152 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1153 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
18901357
AU
1154 /* ignore disconnect return valuue;
1155 * in case of failure reset will be invoked
1156 */
1157 __mei_cl_disconnect(cl);
1158 rets = -EFAULT;
1159 goto out;
1160 }
1161
0c53357c 1162 /* timeout or something went really wrong */
285e2996
AU
1163 if (!cl->status)
1164 cl->status = -EFAULT;
9f81abda
TW
1165 }
1166
1167 rets = cl->status;
9f81abda 1168out:
04bb139a 1169 cl_dbg(dev, cl, "rpm: autosuspend\n");
2bf94cab
TW
1170 pm_runtime_mark_last_busy(dev->dev);
1171 pm_runtime_put_autosuspend(dev->dev);
04bb139a 1172
9f81abda 1173 mei_io_cb_free(cb);
0c53357c 1174
1df629ef 1175nortpm:
0c53357c
TW
1176 if (!mei_cl_is_connected(cl))
1177 mei_cl_set_disconnected(cl);
1178
9f81abda
TW
1179 return rets;
1180}
1181
03b8d341
TW
1182/**
1183 * mei_cl_alloc_linked - allocate and link host client
1184 *
1185 * @dev: the device structure
03b8d341
TW
1186 *
1187 * Return: cl on success ERR_PTR on failure
1188 */
7851e008 1189struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
03b8d341
TW
1190{
1191 struct mei_cl *cl;
1192 int ret;
1193
1194 cl = mei_cl_allocate(dev);
1195 if (!cl) {
1196 ret = -ENOMEM;
1197 goto err;
1198 }
1199
7851e008 1200 ret = mei_cl_link(cl);
03b8d341
TW
1201 if (ret)
1202 goto err;
1203
1204 return cl;
1205err:
1206 kfree(cl);
1207 return ERR_PTR(ret);
1208}
1209
9ca9050b 1210/**
4034b81b 1211 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
9ca9050b 1212 *
06ee536b 1213 * @cl: host client
9ca9050b 1214 *
4034b81b 1215 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
9ca9050b 1216 */
4034b81b 1217static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
9ca9050b 1218{
d49ed64a 1219 if (WARN_ON(!cl || !cl->me_cl))
90e0b5f1
TW
1220 return -EINVAL;
1221
4034b81b 1222 if (cl->tx_flow_ctrl_creds > 0)
9ca9050b
TW
1223 return 1;
1224
a808c80c 1225 if (mei_cl_is_fixed_address(cl))
1df629ef 1226 return 1;
1df629ef 1227
d49ed64a 1228 if (mei_cl_is_single_recv_buf(cl)) {
4034b81b 1229 if (cl->me_cl->tx_flow_ctrl_creds > 0)
d49ed64a 1230 return 1;
9ca9050b 1231 }
d49ed64a 1232 return 0;
9ca9050b
TW
1233}
1234
1235/**
4034b81b
TW
1236 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1237 * for a client
9ca9050b 1238 *
4034b81b 1239 * @cl: host client
393b148f 1240 *
a8605ea2 1241 * Return:
9ca9050b 1242 * 0 on success
9ca9050b
TW
1243 * -EINVAL when ctrl credits are <= 0
1244 */
4034b81b 1245static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
9ca9050b 1246{
d49ed64a 1247 if (WARN_ON(!cl || !cl->me_cl))
90e0b5f1
TW
1248 return -EINVAL;
1249
1df629ef
AU
1250 if (mei_cl_is_fixed_address(cl))
1251 return 0;
1252
d49ed64a 1253 if (mei_cl_is_single_recv_buf(cl)) {
4034b81b 1254 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
d49ed64a 1255 return -EINVAL;
4034b81b 1256 cl->me_cl->tx_flow_ctrl_creds--;
12d00665 1257 } else {
4034b81b 1258 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
d49ed64a 1259 return -EINVAL;
4034b81b 1260 cl->tx_flow_ctrl_creds--;
9ca9050b 1261 }
d49ed64a 1262 return 0;
9ca9050b
TW
1263}
1264
f35fe5f4
AU
1265/**
1266 * mei_cl_vtag_alloc - allocate and fill the vtag structure
1267 *
1268 * @fp: pointer to file structure
1269 * @vtag: vm tag
1270 *
1271 * Return:
1272 * * Pointer to allocated struct - on success
1273 * * ERR_PTR(-ENOMEM) on memory allocation failure
1274 */
1275struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
1276{
1277 struct mei_cl_vtag *cl_vtag;
1278
1279 cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
1280 if (!cl_vtag)
1281 return ERR_PTR(-ENOMEM);
1282
1283 INIT_LIST_HEAD(&cl_vtag->list);
1284 cl_vtag->vtag = vtag;
1285 cl_vtag->fp = fp;
1286
1287 return cl_vtag;
1288}
1289
1290/**
1291 * mei_cl_fp_by_vtag - obtain the file pointer by vtag
1292 *
1293 * @cl: host client
85261c1f 1294 * @vtag: virtual tag
f35fe5f4
AU
1295 *
1296 * Return:
1297 * * A file pointer - on success
1298 * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
1299 */
1300const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
1301{
1302 struct mei_cl_vtag *vtag_l;
1303
1304 list_for_each_entry(vtag_l, &cl->vtag_map, list)
85261c1f
AU
1305 /* The client on bus has one fixed fp */
1306 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
1307 vtag_l->vtag == vtag)
f35fe5f4
AU
1308 return vtag_l->fp;
1309
1310 return ERR_PTR(-ENOENT);
1311}
1312
1313/**
1314 * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
1315 *
1316 * @cl: host client
1317 * @vtag: vm tag
1318 */
1319static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
1320{
1321 struct mei_cl_vtag *vtag_l;
1322
1323 list_for_each_entry(vtag_l, &cl->vtag_map, list) {
f17ef47b
AU
1324 /* The client on bus has one fixed vtag map */
1325 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
1326 vtag_l->vtag == vtag) {
f35fe5f4
AU
1327 vtag_l->pending_read = false;
1328 break;
1329 }
1330 }
1331}
1332
1333/**
1334 * mei_cl_read_vtag_add_fc - add flow control for next pending reader
1335 * in the vtag list
1336 *
1337 * @cl: host client
1338 */
1339static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
1340{
1341 struct mei_cl_vtag *cl_vtag;
1342
1343 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
1344 if (cl_vtag->pending_read) {
1345 if (mei_cl_enqueue_ctrl_wr_cb(cl,
1346 mei_cl_mtu(cl),
1347 MEI_FOP_READ,
1348 cl_vtag->fp))
1349 cl->rx_flow_ctrl_creds++;
1350 break;
1351 }
1352 }
1353}
1354
1355/**
1356 * mei_cl_vt_support_check - check if client support vtags
1357 *
1358 * @cl: host client
1359 *
1360 * Return:
1361 * * 0 - supported, or not connected at all
1362 * * -EOPNOTSUPP - vtags are not supported by client
1363 */
1364int mei_cl_vt_support_check(const struct mei_cl *cl)
1365{
1366 struct mei_device *dev = cl->dev;
1367
1368 if (!dev->hbm_f_vt_supported)
1369 return -EOPNOTSUPP;
1370
1371 if (!cl->me_cl)
1372 return 0;
1373
1374 return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
1375}
1376
d1376f3d
AU
1377/**
1378 * mei_cl_add_rd_completed - add read completed callback to list with lock
f35fe5f4 1379 * and vtag check
d1376f3d
AU
1380 *
1381 * @cl: host client
1382 * @cb: callback block
1383 *
1384 */
1385void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1386{
f35fe5f4
AU
1387 const struct file *fp;
1388
1389 if (!mei_cl_vt_support_check(cl)) {
1390 fp = mei_cl_fp_by_vtag(cl, cb->vtag);
1391 if (IS_ERR(fp)) {
1392 /* client already disconnected, discarding */
1393 mei_io_cb_free(cb);
1394 return;
1395 }
1396 cb->fp = fp;
1397 mei_cl_reset_read_by_vtag(cl, cb->vtag);
1398 mei_cl_read_vtag_add_fc(cl);
1399 }
1400
d1376f3d
AU
1401 spin_lock(&cl->rd_completed_lock);
1402 list_add_tail(&cb->list, &cl->rd_completed);
1403 spin_unlock(&cl->rd_completed_lock);
1404}
1405
1406/**
1407 * mei_cl_del_rd_completed - free read completed callback with lock
1408 *
1409 * @cl: host client
1410 * @cb: callback block
1411 *
1412 */
1413void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1414{
1415 spin_lock(&cl->rd_completed_lock);
1416 mei_io_cb_free(cb);
1417 spin_unlock(&cl->rd_completed_lock);
1418}
1419
51678ccb
TW
1420/**
1421 * mei_cl_notify_fop2req - convert fop to proper request
1422 *
1423 * @fop: client notification start response command
1424 *
1425 * Return: MEI_HBM_NOTIFICATION_START/STOP
1426 */
1427u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1428{
1429 if (fop == MEI_FOP_NOTIFY_START)
1430 return MEI_HBM_NOTIFICATION_START;
1431 else
1432 return MEI_HBM_NOTIFICATION_STOP;
1433}
1434
1435/**
1436 * mei_cl_notify_req2fop - convert notification request top file operation type
1437 *
1438 * @req: hbm notification request type
1439 *
1440 * Return: MEI_FOP_NOTIFY_START/STOP
1441 */
1442enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1443{
1444 if (req == MEI_HBM_NOTIFICATION_START)
1445 return MEI_FOP_NOTIFY_START;
1446 else
1447 return MEI_FOP_NOTIFY_STOP;
1448}
1449
1450/**
1451 * mei_cl_irq_notify - send notification request in irq_thread context
1452 *
1453 * @cl: client
1454 * @cb: callback block.
1455 * @cmpl_list: complete list.
1456 *
1457 * Return: 0 on such and error otherwise.
1458 */
1459int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
962ff7bc 1460 struct list_head *cmpl_list)
51678ccb
TW
1461{
1462 struct mei_device *dev = cl->dev;
1463 u32 msg_slots;
1464 int slots;
1465 int ret;
1466 bool request;
1467
98e70866 1468 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
51678ccb 1469 slots = mei_hbuf_empty_slots(dev);
de877437
TW
1470 if (slots < 0)
1471 return -EOVERFLOW;
51678ccb 1472
de877437 1473 if ((u32)slots < msg_slots)
51678ccb
TW
1474 return -EMSGSIZE;
1475
1476 request = mei_cl_notify_fop2req(cb->fop_type);
1477 ret = mei_hbm_cl_notify_req(dev, cl, request);
1478 if (ret) {
1479 cl->status = ret;
962ff7bc 1480 list_move_tail(&cb->list, cmpl_list);
51678ccb
TW
1481 return ret;
1482 }
1483
962ff7bc 1484 list_move_tail(&cb->list, &dev->ctrl_rd_list);
51678ccb
TW
1485 return 0;
1486}
1487
1488/**
1489 * mei_cl_notify_request - send notification stop/start request
1490 *
1491 * @cl: host client
3030dc05 1492 * @fp: associate request with file
51678ccb
TW
1493 * @request: 1 for start or 0 for stop
1494 *
1495 * Locking: called under "dev->device_lock" lock
1496 *
1497 * Return: 0 on such and error otherwise.
1498 */
f23e2cc4 1499int mei_cl_notify_request(struct mei_cl *cl,
3030dc05 1500 const struct file *fp, u8 request)
51678ccb
TW
1501{
1502 struct mei_device *dev;
1503 struct mei_cl_cb *cb;
1504 enum mei_cb_file_ops fop_type;
1505 int rets;
1506
1507 if (WARN_ON(!cl || !cl->dev))
1508 return -ENODEV;
1509
1510 dev = cl->dev;
1511
1512 if (!dev->hbm_f_ev_supported) {
1513 cl_dbg(dev, cl, "notifications not supported\n");
1514 return -EOPNOTSUPP;
1515 }
1516
7c47d2ca
AU
1517 if (!mei_cl_is_connected(cl))
1518 return -ENODEV;
1519
51678ccb
TW
1520 rets = pm_runtime_get(dev->dev);
1521 if (rets < 0 && rets != -EINPROGRESS) {
1522 pm_runtime_put_noidle(dev->dev);
1523 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1524 return rets;
1525 }
1526
1527 fop_type = mei_cl_notify_req2fop(request);
3030dc05 1528 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
51678ccb
TW
1529 if (!cb) {
1530 rets = -ENOMEM;
1531 goto out;
1532 }
1533
1534 if (mei_hbuf_acquire(dev)) {
1535 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1536 rets = -ENODEV;
1537 goto out;
1538 }
962ff7bc 1539 list_move_tail(&cb->list, &dev->ctrl_rd_list);
51678ccb
TW
1540 }
1541
1542 mutex_unlock(&dev->device_lock);
7ff4bdd4 1543 wait_event_timeout(cl->wait,
a19bf053
AU
1544 cl->notify_en == request ||
1545 cl->status ||
1546 !mei_cl_is_connected(cl),
95953618 1547 dev->timeouts.cl_connect);
51678ccb
TW
1548 mutex_lock(&dev->device_lock);
1549
4a8eaa96
AU
1550 if (cl->notify_en != request && !cl->status)
1551 cl->status = -EFAULT;
51678ccb
TW
1552
1553 rets = cl->status;
1554
1555out:
1556 cl_dbg(dev, cl, "rpm: autosuspend\n");
1557 pm_runtime_mark_last_busy(dev->dev);
1558 pm_runtime_put_autosuspend(dev->dev);
1559
1560 mei_io_cb_free(cb);
1561 return rets;
1562}
1563
237092bf
TW
1564/**
1565 * mei_cl_notify - raise notification
1566 *
1567 * @cl: host client
1568 *
1569 * Locking: called under "dev->device_lock" lock
1570 */
1571void mei_cl_notify(struct mei_cl *cl)
1572{
1573 struct mei_device *dev;
1574
1575 if (!cl || !cl->dev)
1576 return;
1577
1578 dev = cl->dev;
1579
1580 if (!cl->notify_en)
1581 return;
1582
1583 cl_dbg(dev, cl, "notify event");
1584 cl->notify_ev = true;
850f8940
TW
1585 if (!mei_cl_bus_notify_event(cl))
1586 wake_up_interruptible(&cl->ev_wait);
237092bf
TW
1587
1588 if (cl->ev_async)
1589 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
bb2ef9c3 1590
237092bf
TW
1591}
1592
b38a362f
TW
1593/**
1594 * mei_cl_notify_get - get or wait for notification event
1595 *
1596 * @cl: host client
1597 * @block: this request is blocking
1598 * @notify_ev: true if notification event was received
1599 *
1600 * Locking: called under "dev->device_lock" lock
1601 *
1602 * Return: 0 on such and error otherwise.
1603 */
1604int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1605{
1606 struct mei_device *dev;
1607 int rets;
1608
1609 *notify_ev = false;
1610
1611 if (WARN_ON(!cl || !cl->dev))
1612 return -ENODEV;
1613
1614 dev = cl->dev;
1615
6c0d6701
AU
1616 if (!dev->hbm_f_ev_supported) {
1617 cl_dbg(dev, cl, "notifications not supported\n");
1618 return -EOPNOTSUPP;
1619 }
1620
b38a362f
TW
1621 if (!mei_cl_is_connected(cl))
1622 return -ENODEV;
1623
1624 if (cl->notify_ev)
1625 goto out;
1626
1627 if (!block)
1628 return -EAGAIN;
1629
1630 mutex_unlock(&dev->device_lock);
1631 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1632 mutex_lock(&dev->device_lock);
1633
1634 if (rets < 0)
1635 return rets;
1636
1637out:
1638 *notify_ev = cl->notify_ev;
1639 cl->notify_ev = false;
1640 return 0;
1641}
1642
ab841160 1643/**
393b148f 1644 * mei_cl_read_start - the start read client message function.
ab841160 1645 *
90e0b5f1 1646 * @cl: host client
ce23139c 1647 * @length: number of bytes to read
bca67d68 1648 * @fp: pointer to file structure
ab841160 1649 *
a8605ea2 1650 * Return: 0 on success, <0 on failure.
ab841160 1651 */
f23e2cc4 1652int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
ab841160 1653{
90e0b5f1 1654 struct mei_device *dev;
ab841160 1655 struct mei_cl_cb *cb;
664df38b 1656 int rets;
ab841160 1657
90e0b5f1
TW
1658 if (WARN_ON(!cl || !cl->dev))
1659 return -ENODEV;
1660
1661 dev = cl->dev;
1662
b950ac1d 1663 if (!mei_cl_is_connected(cl))
ab841160
OW
1664 return -ENODEV;
1665
d49ed64a
AU
1666 if (!mei_me_cl_is_active(cl->me_cl)) {
1667 cl_err(dev, cl, "no such me client\n");
7ca96aa2 1668 return -ENOTTY;
664df38b 1669 }
1df629ef 1670
394a77d0 1671 if (mei_cl_is_fixed_address(cl))
e51dfa5a
AU
1672 return 0;
1673
46978ada 1674 /* HW currently supports only one pending read */
f35fe5f4
AU
1675 if (cl->rx_flow_ctrl_creds) {
1676 mei_cl_set_read_by_fp(cl, fp);
46978ada 1677 return -EBUSY;
f35fe5f4 1678 }
46978ada 1679
3030dc05 1680 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1df629ef
AU
1681 if (!cb)
1682 return -ENOMEM;
1683
f35fe5f4
AU
1684 mei_cl_set_read_by_fp(cl, fp);
1685
2bf94cab 1686 rets = pm_runtime_get(dev->dev);
04bb139a 1687 if (rets < 0 && rets != -EINPROGRESS) {
2bf94cab 1688 pm_runtime_put_noidle(dev->dev);
04bb139a 1689 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1df629ef 1690 goto nortpm;
04bb139a
TW
1691 }
1692
46978ada 1693 rets = 0;
6aae48ff 1694 if (mei_hbuf_acquire(dev)) {
86113500
AU
1695 rets = mei_hbm_cl_flow_control_req(dev, cl);
1696 if (rets < 0)
04bb139a 1697 goto out;
04bb139a 1698
46978ada 1699 list_move_tail(&cb->list, &cl->rd_pending);
ab841160 1700 }
46978ada 1701 cl->rx_flow_ctrl_creds++;
accb884b 1702
04bb139a
TW
1703out:
1704 cl_dbg(dev, cl, "rpm: autosuspend\n");
2bf94cab
TW
1705 pm_runtime_mark_last_busy(dev->dev);
1706 pm_runtime_put_autosuspend(dev->dev);
1df629ef 1707nortpm:
04bb139a
TW
1708 if (rets)
1709 mei_io_cb_free(cb);
1710
ab841160
OW
1711 return rets;
1712}
1713
40292383 1714static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag)
0cd7c01a 1715{
40292383
TW
1716 struct mei_ext_hdr_vtag *vtag_hdr = ext;
1717
1718 vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG;
1719 vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr));
1720 vtag_hdr->vtag = vtag;
1721 vtag_hdr->reserved = 0;
1722 return vtag_hdr->hdr.length;
0cd7c01a
TW
1723}
1724
4ed1cc99
TW
1725static inline bool mei_ext_hdr_is_gsc(struct mei_ext_hdr *ext)
1726{
1727 return ext && ext->type == MEI_EXT_HDR_GSC;
1728}
1729
1730static inline u8 mei_ext_hdr_set_gsc(struct mei_ext_hdr *ext, struct mei_ext_hdr *gsc_hdr)
1731{
1732 memcpy(ext, gsc_hdr, mei_ext_hdr_len(gsc_hdr));
1733 return ext->length;
1734}
1735
a1c4d08b 1736/**
0cd7c01a 1737 * mei_msg_hdr_init - allocate and initialize mei message header
a1c4d08b 1738 *
a1c4d08b 1739 * @cb: message callback structure
0cd7c01a 1740 *
7615da2b 1741 * Return: a pointer to initialized header or ERR_PTR on failure
a1c4d08b 1742 */
0cd7c01a 1743static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
a1c4d08b 1744{
0cd7c01a
TW
1745 size_t hdr_len;
1746 struct mei_ext_meta_hdr *meta;
0cd7c01a 1747 struct mei_msg_hdr *mei_hdr;
4ed1cc99
TW
1748 bool is_ext, is_hbm, is_gsc, is_vtag;
1749 struct mei_ext_hdr *next_ext;
0cd7c01a
TW
1750
1751 if (!cb)
1752 return ERR_PTR(-EINVAL);
1753
1754 /* Extended header for vtag is attached only on the first fragment */
1755 is_vtag = (cb->vtag && cb->buf_idx == 0);
4ed1cc99
TW
1756 is_hbm = cb->cl->me_cl->client_id == 0;
1757 is_gsc = ((!is_hbm) && cb->cl->dev->hbm_f_gsc_supported && mei_ext_hdr_is_gsc(cb->ext_hdr));
1758 is_ext = is_vtag || is_gsc;
0cd7c01a
TW
1759
1760 /* Compute extended header size */
1761 hdr_len = sizeof(*mei_hdr);
1762
1763 if (!is_ext)
1764 goto setup_hdr;
1765
1766 hdr_len += sizeof(*meta);
1767 if (is_vtag)
40292383 1768 hdr_len += sizeof(struct mei_ext_hdr_vtag);
0cd7c01a 1769
4ed1cc99
TW
1770 if (is_gsc)
1771 hdr_len += mei_ext_hdr_len(cb->ext_hdr);
1772
0cd7c01a
TW
1773setup_hdr:
1774 mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
1775 if (!mei_hdr)
1776 return ERR_PTR(-ENOMEM);
1777
a1c4d08b
TW
1778 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1779 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
a1c4d08b 1780 mei_hdr->internal = cb->internal;
0cd7c01a
TW
1781 mei_hdr->extended = is_ext;
1782
1783 if (!is_ext)
1784 goto out;
1785
1786 meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
4ed1cc99
TW
1787 meta->size = 0;
1788 next_ext = (struct mei_ext_hdr *)meta->hdrs;
0cd7c01a
TW
1789 if (is_vtag) {
1790 meta->count++;
4ed1cc99
TW
1791 meta->size += mei_ext_hdr_set_vtag(next_ext, cb->vtag);
1792 next_ext = mei_ext_next(next_ext);
1793 }
1794
1795 if (is_gsc) {
1796 meta->count++;
1797 meta->size += mei_ext_hdr_set_gsc(next_ext, cb->ext_hdr);
1798 next_ext = mei_ext_next(next_ext);
0cd7c01a 1799 }
4ed1cc99 1800
0cd7c01a
TW
1801out:
1802 mei_hdr->length = hdr_len - sizeof(*mei_hdr);
1803 return mei_hdr;
a1c4d08b
TW
1804}
1805
21767546 1806/**
9d098192 1807 * mei_cl_irq_write - write a message to device
21767546
TW
1808 * from the interrupt thread context
1809 *
1810 * @cl: client
1811 * @cb: callback block.
21767546
TW
1812 * @cmpl_list: complete list.
1813 *
a8605ea2 1814 * Return: 0, OK; otherwise error.
21767546 1815 */
9d098192 1816int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
962ff7bc 1817 struct list_head *cmpl_list)
21767546 1818{
136698e5
TW
1819 struct mei_device *dev;
1820 struct mei_msg_data *buf;
0cd7c01a
TW
1821 struct mei_msg_hdr *mei_hdr = NULL;
1822 size_t hdr_len;
c30362cc 1823 size_t hbuf_len, dr_len;
4ed1cc99 1824 size_t buf_len = 0;
0cd7c01a 1825 size_t data_len;
8c8d964c 1826 int hbuf_slots;
c30362cc
TW
1827 u32 dr_slots;
1828 u32 dma_len;
2ebf8c94 1829 int rets;
b8b73035 1830 bool first_chunk;
4ed1cc99 1831 const void *data = NULL;
21767546 1832
136698e5
TW
1833 if (WARN_ON(!cl || !cl->dev))
1834 return -ENODEV;
1835
1836 dev = cl->dev;
1837
5db7514d 1838 buf = &cb->buf;
136698e5 1839
b8b73035
AU
1840 first_chunk = cb->buf_idx == 0;
1841
4034b81b 1842 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
136698e5 1843 if (rets < 0)
e09ee853 1844 goto err;
136698e5
TW
1845
1846 if (rets == 0) {
04bb139a 1847 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
136698e5
TW
1848 return 0;
1849 }
1850
4ed1cc99
TW
1851 if (buf->data) {
1852 buf_len = buf->size - cb->buf_idx;
1853 data = buf->data + cb->buf_idx;
1854 }
8c8d964c
TW
1855 hbuf_slots = mei_hbuf_empty_slots(dev);
1856 if (hbuf_slots < 0) {
1857 rets = -EOVERFLOW;
1858 goto err;
1859 }
98e70866 1860
3aef021b 1861 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
c30362cc
TW
1862 dr_slots = mei_dma_ring_empty_slots(dev);
1863 dr_len = mei_slots2data(dr_slots);
8c8d964c 1864
0cd7c01a
TW
1865 mei_hdr = mei_msg_hdr_init(cb);
1866 if (IS_ERR(mei_hdr)) {
1867 rets = PTR_ERR(mei_hdr);
1868 mei_hdr = NULL;
1869 goto err;
1870 }
1871
0cd7c01a 1872 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
a1c4d08b 1873
8c8d964c
TW
1874 /**
1875 * Split the message only if we can write the whole host buffer
1876 * otherwise wait for next time the host buffer is empty.
1877 */
0cd7c01a
TW
1878 if (hdr_len + buf_len <= hbuf_len) {
1879 data_len = buf_len;
1880 mei_hdr->msg_complete = 1;
c30362cc 1881 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
0cd7c01a
TW
1882 mei_hdr->dma_ring = 1;
1883 if (buf_len > dr_len)
1884 buf_len = dr_len;
c30362cc 1885 else
0cd7c01a 1886 mei_hdr->msg_complete = 1;
c30362cc 1887
0cd7c01a
TW
1888 data_len = sizeof(dma_len);
1889 dma_len = buf_len;
c30362cc 1890 data = &dma_len;
8c8d964c 1891 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
0cd7c01a
TW
1892 buf_len = hbuf_len - hdr_len;
1893 data_len = buf_len;
21767546 1894 } else {
0cd7c01a 1895 kfree(mei_hdr);
21767546
TW
1896 return 0;
1897 }
0cd7c01a 1898 mei_hdr->length += data_len;
21767546 1899
4ed1cc99 1900 if (mei_hdr->dma_ring && buf->data)
0cd7c01a
TW
1901 mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
1902 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
21767546 1903
e09ee853
AU
1904 if (rets)
1905 goto err;
21767546
TW
1906
1907 cl->status = 0;
4dfaa9f7 1908 cl->writing_state = MEI_WRITING;
0cd7c01a 1909 cb->buf_idx += buf_len;
4dfaa9f7 1910
b8b73035 1911 if (first_chunk) {
e09ee853
AU
1912 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1913 rets = -EIO;
1914 goto err;
1915 }
21767546
TW
1916 }
1917
0cd7c01a 1918 if (mei_hdr->msg_complete)
962ff7bc 1919 list_move_tail(&cb->list, &dev->write_waiting_list);
b8b73035 1920
0cd7c01a 1921 kfree(mei_hdr);
21767546 1922 return 0;
e09ee853
AU
1923
1924err:
0cd7c01a 1925 kfree(mei_hdr);
e09ee853 1926 cl->status = rets;
962ff7bc 1927 list_move_tail(&cb->list, cmpl_list);
e09ee853 1928 return rets;
21767546
TW
1929}
1930
4234a6de
TW
1931/**
1932 * mei_cl_write - submit a write cb to mei device
a8605ea2 1933 * assumes device_lock is locked
4234a6de
TW
1934 *
1935 * @cl: host client
a8605ea2 1936 * @cb: write callback with filled data
83f47eea
AU
1937 * @timeout: send timeout in milliseconds.
1938 * effective only for blocking writes: the cb->blocking is set.
1939 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
4234a6de 1940 *
a8605ea2 1941 * Return: number of bytes sent on success, <0 on failure.
4234a6de 1942 */
83f47eea 1943ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout)
4234a6de
TW
1944{
1945 struct mei_device *dev;
1946 struct mei_msg_data *buf;
0cd7c01a
TW
1947 struct mei_msg_hdr *mei_hdr = NULL;
1948 size_t hdr_len;
1949 size_t hbuf_len, dr_len;
1950 size_t buf_len;
1951 size_t data_len;
8c8d964c 1952 int hbuf_slots;
c30362cc
TW
1953 u32 dr_slots;
1954 u32 dma_len;
5151e2b5 1955 ssize_t rets;
e0cb6b2f 1956 bool blocking;
c30362cc 1957 const void *data;
4234a6de
TW
1958
1959 if (WARN_ON(!cl || !cl->dev))
1960 return -ENODEV;
1961
1962 if (WARN_ON(!cb))
1963 return -EINVAL;
1964
1965 dev = cl->dev;
1966
5db7514d 1967 buf = &cb->buf;
0cd7c01a 1968 buf_len = buf->size;
4234a6de 1969
0cd7c01a 1970 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
4234a6de 1971
c30362cc
TW
1972 blocking = cb->blocking;
1973 data = buf->data;
1974
2bf94cab 1975 rets = pm_runtime_get(dev->dev);
04bb139a 1976 if (rets < 0 && rets != -EINPROGRESS) {
2bf94cab 1977 pm_runtime_put_noidle(dev->dev);
5151e2b5 1978 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
6cbb097f 1979 goto free;
04bb139a 1980 }
4234a6de 1981
6aae48ff
TW
1982 cb->buf_idx = 0;
1983 cl->writing_state = MEI_IDLE;
1984
4234a6de 1985
4034b81b 1986 rets = mei_cl_tx_flow_ctrl_creds(cl);
4234a6de
TW
1987 if (rets < 0)
1988 goto err;
1989
0cd7c01a
TW
1990 mei_hdr = mei_msg_hdr_init(cb);
1991 if (IS_ERR(mei_hdr)) {
8f06aee8 1992 rets = PTR_ERR(mei_hdr);
0cd7c01a
TW
1993 mei_hdr = NULL;
1994 goto err;
1995 }
1996
0cd7c01a 1997 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
a1c4d08b 1998
6aae48ff
TW
1999 if (rets == 0) {
2000 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
0cd7c01a 2001 rets = buf_len;
6aae48ff
TW
2002 goto out;
2003 }
8c8d964c 2004
6aae48ff
TW
2005 if (!mei_hbuf_acquire(dev)) {
2006 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
0cd7c01a 2007 rets = buf_len;
4234a6de
TW
2008 goto out;
2009 }
4234a6de 2010
8c8d964c
TW
2011 hbuf_slots = mei_hbuf_empty_slots(dev);
2012 if (hbuf_slots < 0) {
ee623602 2013 buf_len = -EOVERFLOW;
8c8d964c
TW
2014 goto out;
2015 }
2016
3aef021b 2017 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
c30362cc
TW
2018 dr_slots = mei_dma_ring_empty_slots(dev);
2019 dr_len = mei_slots2data(dr_slots);
98e70866 2020
0cd7c01a
TW
2021 if (hdr_len + buf_len <= hbuf_len) {
2022 data_len = buf_len;
2023 mei_hdr->msg_complete = 1;
c30362cc 2024 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
0cd7c01a
TW
2025 mei_hdr->dma_ring = 1;
2026 if (buf_len > dr_len)
2027 buf_len = dr_len;
c30362cc 2028 else
0cd7c01a 2029 mei_hdr->msg_complete = 1;
c30362cc 2030
0cd7c01a
TW
2031 data_len = sizeof(dma_len);
2032 dma_len = buf_len;
c30362cc 2033 data = &dma_len;
8c8d964c 2034 } else {
0cd7c01a
TW
2035 buf_len = hbuf_len - hdr_len;
2036 data_len = buf_len;
4234a6de
TW
2037 }
2038
0cd7c01a
TW
2039 mei_hdr->length += data_len;
2040
4ed1cc99 2041 if (mei_hdr->dma_ring && buf->data)
0cd7c01a
TW
2042 mei_dma_ring_write(dev, buf->data, buf_len);
2043 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
c30362cc 2044
2ebf8c94 2045 if (rets)
4234a6de 2046 goto err;
4234a6de 2047
4034b81b 2048 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
b8b73035
AU
2049 if (rets)
2050 goto err;
2051
4234a6de 2052 cl->writing_state = MEI_WRITING;
0cd7c01a 2053 cb->buf_idx = buf_len;
c30362cc 2054 /* restore return value */
0cd7c01a 2055 buf_len = buf->size;
4234a6de 2056
4234a6de 2057out:
0cd7c01a 2058 if (mei_hdr->msg_complete)
af336cab 2059 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
b8b73035 2060 else
af336cab 2061 mei_tx_cb_enqueue(cb, &dev->write_list);
4234a6de 2062
23253c31 2063 cb = NULL;
4234a6de
TW
2064 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
2065
2066 mutex_unlock(&dev->device_lock);
83f47eea
AU
2067 rets = wait_event_interruptible_timeout(cl->tx_wait,
2068 cl->writing_state == MEI_WRITE_COMPLETE ||
2069 (!mei_cl_is_connected(cl)),
2070 msecs_to_jiffies(timeout));
4234a6de 2071 mutex_lock(&dev->device_lock);
83f47eea
AU
2072 /* clean all queue on timeout as something fatal happened */
2073 if (rets == 0) {
2074 rets = -ETIME;
2075 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
2076 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
2077 }
7ca96aa2 2078 /* wait_event_interruptible returns -ERESTARTSYS */
83f47eea
AU
2079 if (rets > 0)
2080 rets = 0;
7ca96aa2
AU
2081 if (rets) {
2082 if (signal_pending(current))
2083 rets = -EINTR;
2084 goto err;
2085 }
0faf6a3b
AU
2086 if (cl->writing_state != MEI_WRITE_COMPLETE) {
2087 rets = -EFAULT;
2088 goto err;
2089 }
4234a6de 2090 }
7ca96aa2 2091
0cd7c01a 2092 rets = buf_len;
4234a6de 2093err:
04bb139a 2094 cl_dbg(dev, cl, "rpm: autosuspend\n");
2bf94cab
TW
2095 pm_runtime_mark_last_busy(dev->dev);
2096 pm_runtime_put_autosuspend(dev->dev);
6cbb097f
AU
2097free:
2098 mei_io_cb_free(cb);
04bb139a 2099
0cd7c01a
TW
2100 kfree(mei_hdr);
2101
4234a6de
TW
2102 return rets;
2103}
2104
db086fa9
TW
2105/**
2106 * mei_cl_complete - processes completed operation for a client
2107 *
2108 * @cl: private data of the file object.
2109 * @cb: callback block.
2110 */
2111void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
2112{
a1809d38
AU
2113 struct mei_device *dev = cl->dev;
2114
3c666182
TW
2115 switch (cb->fop_type) {
2116 case MEI_FOP_WRITE:
af336cab 2117 mei_tx_cb_dequeue(cb);
db086fa9 2118 cl->writing_state = MEI_WRITE_COMPLETE;
a1809d38 2119 if (waitqueue_active(&cl->tx_wait)) {
db086fa9 2120 wake_up_interruptible(&cl->tx_wait);
a1809d38
AU
2121 } else {
2122 pm_runtime_mark_last_busy(dev->dev);
2123 pm_request_autosuspend(dev->dev);
2124 }
3c666182 2125 break;
db086fa9 2126
3c666182 2127 case MEI_FOP_READ:
d1376f3d 2128 mei_cl_add_rd_completed(cl, cb);
46978ada
AU
2129 if (!mei_cl_is_fixed_address(cl) &&
2130 !WARN_ON(!cl->rx_flow_ctrl_creds))
2131 cl->rx_flow_ctrl_creds--;
a1f9ae2b
TW
2132 if (!mei_cl_bus_rx_event(cl))
2133 wake_up_interruptible(&cl->rx_wait);
3c666182
TW
2134 break;
2135
2136 case MEI_FOP_CONNECT:
2137 case MEI_FOP_DISCONNECT:
51678ccb
TW
2138 case MEI_FOP_NOTIFY_STOP:
2139 case MEI_FOP_NOTIFY_START:
369aea84
AU
2140 case MEI_FOP_DMA_MAP:
2141 case MEI_FOP_DMA_UNMAP:
3c666182
TW
2142 if (waitqueue_active(&cl->wait))
2143 wake_up(&cl->wait);
db086fa9 2144
6a8d648c
AU
2145 break;
2146 case MEI_FOP_DISCONNECT_RSP:
2147 mei_io_cb_free(cb);
2148 mei_cl_set_disconnected(cl);
3c666182
TW
2149 break;
2150 default:
2151 BUG_ON(0);
db086fa9
TW
2152 }
2153}
2154
4234a6de 2155
074b4c01
TW
2156/**
2157 * mei_cl_all_disconnect - disconnect forcefully all connected clients
2158 *
a8605ea2 2159 * @dev: mei device
074b4c01 2160 */
074b4c01
TW
2161void mei_cl_all_disconnect(struct mei_device *dev)
2162{
31f88f57 2163 struct mei_cl *cl;
074b4c01 2164
3c666182
TW
2165 list_for_each_entry(cl, &dev->file_list, link)
2166 mei_cl_set_disconnected(cl);
074b4c01 2167}
685867f4 2168EXPORT_SYMBOL_GPL(mei_cl_all_disconnect);
369aea84
AU
2169
2170static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
2171{
2172 struct mei_cl *cl;
2173
2174 list_for_each_entry(cl, &dev->file_list, link)
2175 if (cl->dma.buffer_id == buffer_id)
2176 return cl;
2177 return NULL;
2178}
2179
2180/**
2181 * mei_cl_irq_dma_map - send client dma map request in irq_thread context
2182 *
2183 * @cl: client
2184 * @cb: callback block.
2185 * @cmpl_list: complete list.
2186 *
2187 * Return: 0 on such and error otherwise.
2188 */
2189int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
2190 struct list_head *cmpl_list)
2191{
2192 struct mei_device *dev = cl->dev;
2193 u32 msg_slots;
2194 int slots;
2195 int ret;
2196
2197 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request));
2198 slots = mei_hbuf_empty_slots(dev);
2199 if (slots < 0)
2200 return -EOVERFLOW;
2201
2202 if ((u32)slots < msg_slots)
2203 return -EMSGSIZE;
2204
2205 ret = mei_hbm_cl_dma_map_req(dev, cl);
2206 if (ret) {
2207 cl->status = ret;
2208 list_move_tail(&cb->list, cmpl_list);
2209 return ret;
2210 }
2211
2212 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2213 return 0;
2214}
2215
2216/**
2217 * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context
2218 *
2219 * @cl: client
2220 * @cb: callback block.
2221 * @cmpl_list: complete list.
2222 *
2223 * Return: 0 on such and error otherwise.
2224 */
2225int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
2226 struct list_head *cmpl_list)
2227{
2228 struct mei_device *dev = cl->dev;
2229 u32 msg_slots;
2230 int slots;
2231 int ret;
2232
2233 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request));
2234 slots = mei_hbuf_empty_slots(dev);
2235 if (slots < 0)
2236 return -EOVERFLOW;
2237
2238 if ((u32)slots < msg_slots)
2239 return -EMSGSIZE;
2240
2241 ret = mei_hbm_cl_dma_unmap_req(dev, cl);
2242 if (ret) {
2243 cl->status = ret;
2244 list_move_tail(&cb->list, cmpl_list);
2245 return ret;
2246 }
2247
2248 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2249 return 0;
2250}
2251
2252static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
2253{
2254 cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
2255 &cl->dma.daddr, GFP_KERNEL);
2256 if (!cl->dma.vaddr)
2257 return -ENOMEM;
2258
2259 cl->dma.buffer_id = buf_id;
2260 cl->dma.size = size;
2261
2262 return 0;
2263}
2264
2265static void mei_cl_dma_free(struct mei_cl *cl)
2266{
2267 cl->dma.buffer_id = 0;
2268 dmam_free_coherent(cl->dev->dev,
2269 cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
2270 cl->dma.size = 0;
2271 cl->dma.vaddr = NULL;
2272 cl->dma.daddr = 0;
2273}
2274
2275/**
09f8c33a 2276 * mei_cl_dma_alloc_and_map - send client dma map request
369aea84
AU
2277 *
2278 * @cl: host client
2279 * @fp: pointer to file structure
2280 * @buffer_id: id of the mapped buffer
2281 * @size: size of the buffer
2282 *
2283 * Locking: called under "dev->device_lock" lock
2284 *
2285 * Return:
2286 * * -ENODEV
2287 * * -EINVAL
2288 * * -EOPNOTSUPP
2289 * * -EPROTO
2290 * * -ENOMEM;
2291 */
2292int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
2293 u8 buffer_id, size_t size)
2294{
2295 struct mei_device *dev;
2296 struct mei_cl_cb *cb;
2297 int rets;
2298
2299 if (WARN_ON(!cl || !cl->dev))
2300 return -ENODEV;
2301
2302 dev = cl->dev;
2303
2304 if (!dev->hbm_f_cd_supported) {
2305 cl_dbg(dev, cl, "client dma is not supported\n");
2306 return -EOPNOTSUPP;
2307 }
2308
2309 if (buffer_id == 0)
2310 return -EINVAL;
2311
ce068bc7
TW
2312 if (mei_cl_is_connected(cl))
2313 return -EPROTO;
369aea84
AU
2314
2315 if (cl->dma_mapped)
2316 return -EPROTO;
2317
2318 if (mei_cl_dma_map_find(dev, buffer_id)) {
2319 cl_dbg(dev, cl, "client dma with id %d is already allocated\n",
2320 cl->dma.buffer_id);
2321 return -EPROTO;
2322 }
2323
2324 rets = pm_runtime_get(dev->dev);
2325 if (rets < 0 && rets != -EINPROGRESS) {
2326 pm_runtime_put_noidle(dev->dev);
2327 cl_err(dev, cl, "rpm: get failed %d\n", rets);
2328 return rets;
2329 }
2330
2331 rets = mei_cl_dma_alloc(cl, buffer_id, size);
2332 if (rets) {
2333 pm_runtime_put_noidle(dev->dev);
2334 return rets;
2335 }
2336
2337 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
2338 if (!cb) {
2339 rets = -ENOMEM;
2340 goto out;
2341 }
2342
2343 if (mei_hbuf_acquire(dev)) {
2344 if (mei_hbm_cl_dma_map_req(dev, cl)) {
2345 rets = -ENODEV;
2346 goto out;
2347 }
2348 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2349 }
2350
43aa323e
AU
2351 cl->status = 0;
2352
369aea84
AU
2353 mutex_unlock(&dev->device_lock);
2354 wait_event_timeout(cl->wait,
ce068bc7 2355 cl->dma_mapped || cl->status,
95953618 2356 dev->timeouts.cl_connect);
369aea84
AU
2357 mutex_lock(&dev->device_lock);
2358
2359 if (!cl->dma_mapped && !cl->status)
2360 cl->status = -EFAULT;
2361
2362 rets = cl->status;
2363
2364out:
2365 if (rets)
2366 mei_cl_dma_free(cl);
2367
2368 cl_dbg(dev, cl, "rpm: autosuspend\n");
2369 pm_runtime_mark_last_busy(dev->dev);
2370 pm_runtime_put_autosuspend(dev->dev);
2371
2372 mei_io_cb_free(cb);
2373 return rets;
2374}
2375
2376/**
09f8c33a 2377 * mei_cl_dma_unmap - send client dma unmap request
369aea84
AU
2378 *
2379 * @cl: host client
2380 * @fp: pointer to file structure
2381 *
2382 * Locking: called under "dev->device_lock" lock
2383 *
2384 * Return: 0 on such and error otherwise.
2385 */
2386int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
2387{
2388 struct mei_device *dev;
2389 struct mei_cl_cb *cb;
2390 int rets;
2391
2392 if (WARN_ON(!cl || !cl->dev))
2393 return -ENODEV;
2394
2395 dev = cl->dev;
2396
2397 if (!dev->hbm_f_cd_supported) {
2398 cl_dbg(dev, cl, "client dma is not supported\n");
2399 return -EOPNOTSUPP;
2400 }
2401
ce068bc7
TW
2402 /* do not allow unmap for connected client */
2403 if (mei_cl_is_connected(cl))
2404 return -EPROTO;
369aea84
AU
2405
2406 if (!cl->dma_mapped)
2407 return -EPROTO;
2408
2409 rets = pm_runtime_get(dev->dev);
2410 if (rets < 0 && rets != -EINPROGRESS) {
2411 pm_runtime_put_noidle(dev->dev);
2412 cl_err(dev, cl, "rpm: get failed %d\n", rets);
2413 return rets;
2414 }
2415
2416 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
2417 if (!cb) {
2418 rets = -ENOMEM;
2419 goto out;
2420 }
2421
2422 if (mei_hbuf_acquire(dev)) {
2423 if (mei_hbm_cl_dma_unmap_req(dev, cl)) {
2424 rets = -ENODEV;
2425 goto out;
2426 }
2427 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2428 }
2429
43aa323e
AU
2430 cl->status = 0;
2431
369aea84
AU
2432 mutex_unlock(&dev->device_lock);
2433 wait_event_timeout(cl->wait,
ce068bc7 2434 !cl->dma_mapped || cl->status,
95953618 2435 dev->timeouts.cl_connect);
369aea84
AU
2436 mutex_lock(&dev->device_lock);
2437
2438 if (cl->dma_mapped && !cl->status)
2439 cl->status = -EFAULT;
2440
2441 rets = cl->status;
2442
2443 if (!rets)
2444 mei_cl_dma_free(cl);
2445out:
2446 cl_dbg(dev, cl, "rpm: autosuspend\n");
2447 pm_runtime_mark_last_busy(dev->dev);
2448 pm_runtime_put_autosuspend(dev->dev);
2449
2450 mei_io_cb_free(cb);
2451 return rets;
2452}