Merge tag 'efi-fixes-for-v6.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / misc / ibmvmc.c
CommitLineData
0eca353e
BL
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * IBM Power Systems Virtual Management Channel Support.
4 *
5 * Copyright (c) 2004, 2018 IBM Corp.
6 * Dave Engebretsen engebret@us.ibm.com
7 * Steven Royer seroyer@linux.vnet.ibm.com
8 * Adam Reznechek adreznec@linux.vnet.ibm.com
9 * Bryant G. Ly <bryantly@linux.vnet.ibm.com>
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/kthread.h>
15#include <linux/major.h>
16#include <linux/string.h>
17#include <linux/fcntl.h>
18#include <linux/slab.h>
19#include <linux/poll.h>
20#include <linux/init.h>
21#include <linux/fs.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h>
24#include <linux/percpu.h>
25#include <linux/delay.h>
26#include <linux/uaccess.h>
27#include <linux/io.h>
28#include <linux/miscdevice.h>
29#include <linux/sched/signal.h>
30
31#include <asm/byteorder.h>
32#include <asm/irq.h>
33#include <asm/vio.h>
34
35#include "ibmvmc.h"
36
37#define IBMVMC_DRIVER_VERSION "1.0"
38
39/*
40 * Static global variables
41 */
42static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
43
44static const char ibmvmc_driver_name[] = "ibmvmc";
45
46static struct ibmvmc_struct ibmvmc;
47static struct ibmvmc_hmc hmcs[MAX_HMCS];
48static struct crq_server_adapter ibmvmc_adapter;
49
50static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
51static int ibmvmc_max_hmcs = DEFAULT_HMCS;
52static int ibmvmc_max_mtu = DEFAULT_MTU;
53
54static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
55 u64 dliobn, u64 dlioba)
56{
57 long rc = 0;
58
59 /* Ensure all writes to source memory are visible before hcall */
60 dma_wmb();
61 pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
62 length, sliobn, slioba, dliobn, dlioba);
63 rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
64 dliobn, dlioba);
65 pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
66
67 return rc;
68}
69
70static inline void h_free_crq(uint32_t unit_address)
71{
72 long rc = 0;
73
74 do {
75 if (H_IS_LONG_BUSY(rc))
76 msleep(get_longbusy_msecs(rc));
77
78 rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
79 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
80}
81
82/**
83 * h_request_vmc: - request a hypervisor virtual management channel device
84 * @vmc_index: drc index of the vmc device created
85 *
86 * Requests the hypervisor create a new virtual management channel device,
87 * allowing this partition to send hypervisor virtualization control
88 * commands.
89 *
90 * Return:
91 * 0 - Success
92 * Non-zero - Failure
93 */
94static inline long h_request_vmc(u32 *vmc_index)
95{
96 long rc = 0;
97 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
98
99 do {
100 if (H_IS_LONG_BUSY(rc))
101 msleep(get_longbusy_msecs(rc));
102
103 /* Call to request the VMC device from phyp */
104 rc = plpar_hcall(H_REQUEST_VMC, retbuf);
105 pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
106 *vmc_index = retbuf[0];
107 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
108
109 return rc;
110}
111
112/* routines for managing a command/response queue */
113/**
114 * ibmvmc_handle_event: - Interrupt handler for crq events
115 * @irq: number of irq to handle, not used
116 * @dev_instance: crq_server_adapter that received interrupt
117 *
118 * Disables interrupts and schedules ibmvmc_task
119 *
120 * Always returns IRQ_HANDLED
121 */
122static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
123{
124 struct crq_server_adapter *adapter =
125 (struct crq_server_adapter *)dev_instance;
126
127 vio_disable_interrupts(to_vio_dev(adapter->dev));
128 tasklet_schedule(&adapter->work_task);
129
130 return IRQ_HANDLED;
131}
132
133/**
134 * ibmvmc_release_crq_queue - Release CRQ Queue
135 *
136 * @adapter: crq_server_adapter struct
137 *
138 * Return:
139 * 0 - Success
140 * Non-Zero - Failure
141 */
142static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
143{
144 struct vio_dev *vdev = to_vio_dev(adapter->dev);
145 struct crq_queue *queue = &adapter->queue;
146
147 free_irq(vdev->irq, (void *)adapter);
148 tasklet_kill(&adapter->work_task);
149
150 if (adapter->reset_task)
151 kthread_stop(adapter->reset_task);
152
153 h_free_crq(vdev->unit_address);
154 dma_unmap_single(adapter->dev,
155 queue->msg_token,
156 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
157 free_page((unsigned long)queue->msgs);
158}
159
160/**
161 * ibmvmc_reset_crq_queue - Reset CRQ Queue
162 *
163 * @adapter: crq_server_adapter struct
164 *
165 * This function calls h_free_crq and then calls H_REG_CRQ and does all the
166 * bookkeeping to get us back to where we can communicate.
167 *
168 * Return:
169 * 0 - Success
170 * Non-Zero - Failure
171 */
172static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
173{
174 struct vio_dev *vdev = to_vio_dev(adapter->dev);
175 struct crq_queue *queue = &adapter->queue;
176 int rc = 0;
177
178 /* Close the CRQ */
179 h_free_crq(vdev->unit_address);
180
181 /* Clean out the queue */
182 memset(queue->msgs, 0x00, PAGE_SIZE);
183 queue->cur = 0;
184
185 /* And re-open it again */
186 rc = plpar_hcall_norets(H_REG_CRQ,
187 vdev->unit_address,
188 queue->msg_token, PAGE_SIZE);
189 if (rc == 2)
190 /* Adapter is good, but other end is not ready */
191 dev_warn(adapter->dev, "Partner adapter not ready\n");
192 else if (rc != 0)
193 dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
194
195 return rc;
196}
197
198/**
199 * crq_queue_next_crq: - Returns the next entry in message queue
200 * @queue: crq_queue to use
201 *
202 * Returns pointer to next entry in queue, or NULL if there are no new
203 * entried in the CRQ.
204 */
205static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
206{
207 struct ibmvmc_crq_msg *crq;
208 unsigned long flags;
209
210 spin_lock_irqsave(&queue->lock, flags);
211 crq = &queue->msgs[queue->cur];
212 if (crq->valid & 0x80) {
213 if (++queue->cur == queue->size)
214 queue->cur = 0;
215
216 /* Ensure the read of the valid bit occurs before reading any
217 * other bits of the CRQ entry
218 */
219 dma_rmb();
220 } else {
221 crq = NULL;
222 }
223
224 spin_unlock_irqrestore(&queue->lock, flags);
225
226 return crq;
227}
228
229/**
230 * ibmvmc_send_crq - Send CRQ
231 *
232 * @adapter: crq_server_adapter struct
233 * @word1: Word1 Data field
234 * @word2: Word2 Data field
235 *
236 * Return:
237 * 0 - Success
238 * Non-Zero - Failure
239 */
240static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
241 u64 word1, u64 word2)
242{
243 struct vio_dev *vdev = to_vio_dev(adapter->dev);
244 long rc = 0;
245
246 dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
247 vdev->unit_address, word1, word2);
248
249 /*
250 * Ensure the command buffer is flushed to memory before handing it
251 * over to the other side to prevent it from fetching any stale data.
252 */
253 dma_wmb();
254 rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
255 dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
256
257 return rc;
258}
259
260/**
261 * alloc_dma_buffer - Create DMA Buffer
262 *
263 * @vdev: vio_dev struct
264 * @size: Size field
265 * @dma_handle: DMA address field
266 *
267 * Allocates memory for the command queue and maps remote memory into an
268 * ioba.
269 *
270 * Returns a pointer to the buffer
271 */
272static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
273 dma_addr_t *dma_handle)
274{
275 /* allocate memory */
97b715b6 276 void *buffer = kzalloc(size, GFP_ATOMIC);
0eca353e
BL
277
278 if (!buffer) {
279 *dma_handle = 0;
280 return NULL;
281 }
282
283 /* DMA map */
284 *dma_handle = dma_map_single(&vdev->dev, buffer, size,
285 DMA_BIDIRECTIONAL);
286
287 if (dma_mapping_error(&vdev->dev, *dma_handle)) {
288 *dma_handle = 0;
453431a5 289 kfree_sensitive(buffer);
0eca353e
BL
290 return NULL;
291 }
292
293 return buffer;
294}
295
296/**
297 * free_dma_buffer - Free DMA Buffer
298 *
299 * @vdev: vio_dev struct
300 * @size: Size field
301 * @vaddr: Address field
302 * @dma_handle: DMA address field
303 *
304 * Releases memory for a command queue and unmaps mapped remote memory.
305 */
306static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
307 dma_addr_t dma_handle)
308{
309 /* DMA unmap */
310 dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
311
312 /* deallocate memory */
453431a5 313 kfree_sensitive(vaddr);
0eca353e
BL
314}
315
316/**
317 * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer
318 *
319 * @hmc_index: HMC Index Field
320 *
321 * Return:
322 * Pointer to ibmvmc_buffer
323 */
324static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
325{
326 struct ibmvmc_buffer *buffer;
327 struct ibmvmc_buffer *ret_buf = NULL;
328 unsigned long i;
329
330 if (hmc_index > ibmvmc.max_hmc_index)
331 return NULL;
332
333 buffer = hmcs[hmc_index].buffer;
334
335 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
336 if (buffer[i].valid && buffer[i].free &&
337 buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
338 buffer[i].free = 0;
339 ret_buf = &buffer[i];
340 break;
341 }
342 }
343
344 return ret_buf;
345}
346
347/**
348 * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer
349 *
350 * @adapter: crq_server_adapter struct
351 * @hmc_index: Hmc Index field
352 *
353 * Return:
354 * Pointer to ibmvmc_buffer
355 */
356static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
357 u8 hmc_index)
358{
359 struct ibmvmc_buffer *buffer;
360 struct ibmvmc_buffer *ret_buf = NULL;
361 unsigned long i;
362
363 if (hmc_index > ibmvmc.max_hmc_index) {
364 dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
365 hmc_index);
366 return NULL;
367 }
368
369 buffer = hmcs[hmc_index].buffer;
370
371 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
372 if (buffer[i].free &&
373 buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
374 buffer[i].free = 0;
375 ret_buf = &buffer[i];
376 break;
377 }
378 }
379
380 return ret_buf;
381}
382
383/**
384 * ibmvmc_free_hmc_buffer - Free an HMC Buffer
385 *
386 * @hmc: ibmvmc_hmc struct
387 * @buffer: ibmvmc_buffer struct
388 *
389 */
390static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
391 struct ibmvmc_buffer *buffer)
392{
393 unsigned long flags;
394
395 spin_lock_irqsave(&hmc->lock, flags);
396 buffer->free = 1;
397 spin_unlock_irqrestore(&hmc->lock, flags);
398}
399
400/**
401 * ibmvmc_count_hmc_buffers - Count HMC Buffers
402 *
403 * @hmc_index: HMC Index field
404 * @valid: Valid number of buffers field
405 * @free: Free number of buffers field
406 *
407 */
408static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
409 unsigned int *free)
410{
411 struct ibmvmc_buffer *buffer;
412 unsigned long i;
413 unsigned long flags;
414
415 if (hmc_index > ibmvmc.max_hmc_index)
416 return;
417
418 if (!valid || !free)
419 return;
420
421 *valid = 0; *free = 0;
422
423 buffer = hmcs[hmc_index].buffer;
424 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
425
426 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
427 if (buffer[i].valid) {
428 *valid = *valid + 1;
429 if (buffer[i].free)
430 *free = *free + 1;
431 }
432 }
433
434 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
435}
436
437/**
438 * ibmvmc_get_free_hmc - Get Free HMC
439 *
440 * Return:
441 * Pointer to an available HMC Connection
442 * Null otherwise
443 */
444static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
445{
446 unsigned long i;
447 unsigned long flags;
448
449 /*
450 * Find an available HMC connection.
451 */
452 for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
453 spin_lock_irqsave(&hmcs[i].lock, flags);
454 if (hmcs[i].state == ibmhmc_state_free) {
455 hmcs[i].index = i;
456 hmcs[i].state = ibmhmc_state_initial;
457 spin_unlock_irqrestore(&hmcs[i].lock, flags);
458 return &hmcs[i];
459 }
460 spin_unlock_irqrestore(&hmcs[i].lock, flags);
461 }
462
463 return NULL;
464}
465
466/**
467 * ibmvmc_return_hmc - Return an HMC Connection
468 *
469 * @hmc: ibmvmc_hmc struct
470 * @release_readers: Number of readers connected to session
471 *
472 * This function releases the HMC connections back into the pool.
473 *
474 * Return:
475 * 0 - Success
476 * Non-zero - Failure
477 */
478static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
479{
480 struct ibmvmc_buffer *buffer;
481 struct crq_server_adapter *adapter;
482 struct vio_dev *vdev;
483 unsigned long i;
484 unsigned long flags;
485
486 if (!hmc || !hmc->adapter)
487 return -EIO;
488
489 if (release_readers) {
490 if (hmc->file_session) {
491 struct ibmvmc_file_session *session = hmc->file_session;
492
493 session->valid = 0;
494 wake_up_interruptible(&ibmvmc_read_wait);
495 }
496 }
497
498 adapter = hmc->adapter;
499 vdev = to_vio_dev(adapter->dev);
500
501 spin_lock_irqsave(&hmc->lock, flags);
502 hmc->index = 0;
503 hmc->state = ibmhmc_state_free;
504 hmc->queue_head = 0;
505 hmc->queue_tail = 0;
506 buffer = hmc->buffer;
507 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
508 if (buffer[i].valid) {
509 free_dma_buffer(vdev,
510 ibmvmc.max_mtu,
511 buffer[i].real_addr_local,
512 buffer[i].dma_addr_local);
513 dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
514 }
515 memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
516
517 hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
518 }
519
520 spin_unlock_irqrestore(&hmc->lock, flags);
521
522 return 0;
523}
524
525/**
526 * ibmvmc_send_open - Interface Open
527 * @buffer: Pointer to ibmvmc_buffer struct
528 * @hmc: Pointer to ibmvmc_hmc struct
529 *
530 * This command is sent by the management partition as the result of a
531 * management partition device request. It causes the hypervisor to
532 * prepare a set of data buffers for the management application connection
533 * indicated HMC idx. A unique HMC Idx would be used if multiple management
534 * applications running concurrently were desired. Before responding to this
535 * command, the hypervisor must provide the management partition with at
536 * least one of these new buffers via the Add Buffer. This indicates whether
537 * the messages are inbound or outbound from the hypervisor.
538 *
539 * Return:
540 * 0 - Success
541 * Non-zero - Failure
542 */
543static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
544 struct ibmvmc_hmc *hmc)
545{
546 struct ibmvmc_crq_msg crq_msg;
547 struct crq_server_adapter *adapter;
548 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
549 int rc = 0;
550
551 if (!hmc || !hmc->adapter)
552 return -EIO;
553
554 adapter = hmc->adapter;
555
556 dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
557 (unsigned long)buffer->size, (unsigned long)adapter->liobn,
558 (unsigned long)buffer->dma_addr_local,
559 (unsigned long)adapter->riobn,
560 (unsigned long)buffer->dma_addr_remote);
561
562 rc = h_copy_rdma(buffer->size,
563 adapter->liobn,
564 buffer->dma_addr_local,
565 adapter->riobn,
566 buffer->dma_addr_remote);
567 if (rc) {
568 dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
569 rc);
570 return -EIO;
571 }
572
573 hmc->state = ibmhmc_state_opening;
574
575 crq_msg.valid = 0x80;
576 crq_msg.type = VMC_MSG_OPEN;
577 crq_msg.status = 0;
578 crq_msg.var1.rsvd = 0;
579 crq_msg.hmc_session = hmc->session;
580 crq_msg.hmc_index = hmc->index;
581 crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
582 crq_msg.rsvd = 0;
583 crq_msg.var3.rsvd = 0;
584
585 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
586 be64_to_cpu(crq_as_u64[1]));
587
588 return rc;
589}
590
591/**
592 * ibmvmc_send_close - Interface Close
593 * @hmc: Pointer to ibmvmc_hmc struct
594 *
595 * This command is sent by the management partition to terminate a
596 * management application to hypervisor connection. When this command is
597 * sent, the management partition has quiesced all I/O operations to all
598 * buffers associated with this management application connection, and
599 * has freed any storage for these buffers.
600 *
601 * Return:
602 * 0 - Success
603 * Non-zero - Failure
604 */
605static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
606{
607 struct ibmvmc_crq_msg crq_msg;
608 struct crq_server_adapter *adapter;
609 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
610 int rc = 0;
611
612 if (!hmc || !hmc->adapter)
613 return -EIO;
614
615 adapter = hmc->adapter;
616
617 dev_info(adapter->dev, "CRQ send: close\n");
618
619 crq_msg.valid = 0x80;
620 crq_msg.type = VMC_MSG_CLOSE;
621 crq_msg.status = 0;
622 crq_msg.var1.rsvd = 0;
623 crq_msg.hmc_session = hmc->session;
624 crq_msg.hmc_index = hmc->index;
625 crq_msg.var2.rsvd = 0;
626 crq_msg.rsvd = 0;
627 crq_msg.var3.rsvd = 0;
628
629 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
630 be64_to_cpu(crq_as_u64[1]));
631
632 return rc;
633}
634
635/**
636 * ibmvmc_send_capabilities - Send VMC Capabilities
637 *
638 * @adapter: crq_server_adapter struct
639 *
640 * The capabilities message is an administrative message sent after the CRQ
641 * initialization sequence of messages and is used to exchange VMC capabilities
642 * between the management partition and the hypervisor. The management
643 * partition must send this message and the hypervisor must respond with VMC
644 * capabilities Response message before HMC interface message can begin. Any
645 * HMC interface messages received before the exchange of capabilities has
646 * complete are dropped.
647 *
648 * Return:
649 * 0 - Success
650 */
651static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
652{
653 struct ibmvmc_admin_crq_msg crq_msg;
654 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
655
656 dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
657 crq_msg.valid = 0x80;
658 crq_msg.type = VMC_MSG_CAP;
659 crq_msg.status = 0;
660 crq_msg.rsvd[0] = 0;
661 crq_msg.rsvd[1] = 0;
662 crq_msg.max_hmc = ibmvmc_max_hmcs;
663 crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
664 crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
665 crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
666 crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
667
668 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
669 be64_to_cpu(crq_as_u64[1]));
670
671 ibmvmc.state = ibmvmc_state_capabilities;
672
673 return 0;
674}
675
676/**
677 * ibmvmc_send_add_buffer_resp - Add Buffer Response
678 *
679 * @adapter: crq_server_adapter struct
680 * @status: Status field
681 * @hmc_session: HMC Session field
682 * @hmc_index: HMC Index field
683 * @buffer_id: Buffer Id field
684 *
685 * This command is sent by the management partition to the hypervisor in
686 * response to the Add Buffer message. The Status field indicates the result of
687 * the command.
688 *
689 * Return:
690 * 0 - Success
691 */
692static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
693 u8 status, u8 hmc_session,
694 u8 hmc_index, u16 buffer_id)
695{
696 struct ibmvmc_crq_msg crq_msg;
697 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
698
699 dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
700 crq_msg.valid = 0x80;
701 crq_msg.type = VMC_MSG_ADD_BUF_RESP;
702 crq_msg.status = status;
703 crq_msg.var1.rsvd = 0;
704 crq_msg.hmc_session = hmc_session;
705 crq_msg.hmc_index = hmc_index;
706 crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
707 crq_msg.rsvd = 0;
708 crq_msg.var3.rsvd = 0;
709
710 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
711 be64_to_cpu(crq_as_u64[1]));
712
713 return 0;
714}
715
716/**
717 * ibmvmc_send_rem_buffer_resp - Remove Buffer Response
718 *
719 * @adapter: crq_server_adapter struct
720 * @status: Status field
721 * @hmc_session: HMC Session field
722 * @hmc_index: HMC Index field
723 * @buffer_id: Buffer Id field
724 *
725 * This command is sent by the management partition to the hypervisor in
726 * response to the Remove Buffer message. The Buffer ID field indicates
727 * which buffer the management partition selected to remove. The Status
728 * field indicates the result of the command.
729 *
730 * Return:
731 * 0 - Success
732 */
733static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
734 u8 status, u8 hmc_session,
735 u8 hmc_index, u16 buffer_id)
736{
737 struct ibmvmc_crq_msg crq_msg;
738 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
739
740 dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
741 crq_msg.valid = 0x80;
742 crq_msg.type = VMC_MSG_REM_BUF_RESP;
743 crq_msg.status = status;
744 crq_msg.var1.rsvd = 0;
745 crq_msg.hmc_session = hmc_session;
746 crq_msg.hmc_index = hmc_index;
747 crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
748 crq_msg.rsvd = 0;
749 crq_msg.var3.rsvd = 0;
750
751 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
752 be64_to_cpu(crq_as_u64[1]));
753
754 return 0;
755}
756
757/**
758 * ibmvmc_send_msg - Signal Message
759 *
760 * @adapter: crq_server_adapter struct
761 * @buffer: ibmvmc_buffer struct
762 * @hmc: ibmvmc_hmc struct
18248659 763 * @msg_len: message length field
0eca353e
BL
764 *
765 * This command is sent between the management partition and the hypervisor
766 * in order to signal the arrival of an HMC protocol message. The command
767 * can be sent by both the management partition and the hypervisor. It is
768 * used for all traffic between the management application and the hypervisor,
769 * regardless of who initiated the communication.
770 *
771 * There is no response to this message.
772 *
773 * Return:
774 * 0 - Success
775 * Non-zero - Failure
776 */
777static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
778 struct ibmvmc_buffer *buffer,
779 struct ibmvmc_hmc *hmc, int msg_len)
780{
781 struct ibmvmc_crq_msg crq_msg;
782 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
783 int rc = 0;
784
785 dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
786 rc = h_copy_rdma(msg_len,
787 adapter->liobn,
788 buffer->dma_addr_local,
789 adapter->riobn,
790 buffer->dma_addr_remote);
791 if (rc) {
792 dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
793 rc);
794 return rc;
795 }
796
797 crq_msg.valid = 0x80;
798 crq_msg.type = VMC_MSG_SIGNAL;
799 crq_msg.status = 0;
800 crq_msg.var1.rsvd = 0;
801 crq_msg.hmc_session = hmc->session;
802 crq_msg.hmc_index = hmc->index;
803 crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
804 crq_msg.var3.msg_len = cpu_to_be32(msg_len);
805 dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
806 be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
807
808 buffer->owner = VMC_BUF_OWNER_HV;
809 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
810 be64_to_cpu(crq_as_u64[1]));
811
812 return rc;
813}
814
815/**
816 * ibmvmc_open - Open Session
817 *
818 * @inode: inode struct
819 * @file: file struct
820 *
821 * Return:
822 * 0 - Success
e25df781 823 * Non-zero - Failure
0eca353e
BL
824 */
825static int ibmvmc_open(struct inode *inode, struct file *file)
826{
827 struct ibmvmc_file_session *session;
0eca353e
BL
828
829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
830 (unsigned long)inode, (unsigned long)file,
831 ibmvmc.state);
832
833 session = kzalloc(sizeof(*session), GFP_KERNEL);
e25df781
GS
834 if (!session)
835 return -ENOMEM;
836
0eca353e
BL
837 session->file = file;
838 file->private_data = session;
839
e25df781 840 return 0;
0eca353e
BL
841}
842
843/**
844 * ibmvmc_close - Close Session
845 *
846 * @inode: inode struct
847 * @file: file struct
848 *
849 * Return:
850 * 0 - Success
851 * Non-zero - Failure
852 */
853static int ibmvmc_close(struct inode *inode, struct file *file)
854{
855 struct ibmvmc_file_session *session;
856 struct ibmvmc_hmc *hmc;
857 int rc = 0;
858 unsigned long flags;
859
860 pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
861 (unsigned long)file, ibmvmc.state);
862
863 session = file->private_data;
864 if (!session)
865 return -EIO;
866
867 hmc = session->hmc;
868 if (hmc) {
869 if (!hmc->adapter)
870 return -EIO;
871
872 if (ibmvmc.state == ibmvmc_state_failed) {
873 dev_warn(hmc->adapter->dev, "close: state_failed\n");
874 return -EIO;
875 }
876
877 spin_lock_irqsave(&hmc->lock, flags);
878 if (hmc->state >= ibmhmc_state_opening) {
879 rc = ibmvmc_send_close(hmc);
880 if (rc)
881 dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
882 }
883 spin_unlock_irqrestore(&hmc->lock, flags);
884 }
885
453431a5 886 kfree_sensitive(session);
0eca353e
BL
887
888 return rc;
889}
890
891/**
892 * ibmvmc_read - Read
893 *
894 * @file: file struct
895 * @buf: Character buffer
896 * @nbytes: Size in bytes
897 * @ppos: Offset
898 *
899 * Return:
900 * 0 - Success
901 * Non-zero - Failure
902 */
903static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
904 loff_t *ppos)
905{
906 struct ibmvmc_file_session *session;
907 struct ibmvmc_hmc *hmc;
908 struct crq_server_adapter *adapter;
909 struct ibmvmc_buffer *buffer;
910 ssize_t n;
911 ssize_t retval = 0;
912 unsigned long flags;
913 DEFINE_WAIT(wait);
914
915 pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
916 (unsigned long)file, (unsigned long)buf,
917 (unsigned long)nbytes);
918
919 if (nbytes == 0)
920 return 0;
921
922 if (nbytes > ibmvmc.max_mtu) {
923 pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
924 (unsigned int)nbytes);
925 return -EINVAL;
926 }
927
928 session = file->private_data;
929 if (!session) {
930 pr_warn("ibmvmc: read: no session\n");
931 return -EIO;
932 }
933
934 hmc = session->hmc;
935 if (!hmc) {
936 pr_warn("ibmvmc: read: no hmc\n");
937 return -EIO;
938 }
939
940 adapter = hmc->adapter;
941 if (!adapter) {
942 pr_warn("ibmvmc: read: no adapter\n");
943 return -EIO;
944 }
945
946 do {
947 prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
948
949 spin_lock_irqsave(&hmc->lock, flags);
950 if (hmc->queue_tail != hmc->queue_head)
951 /* Data is available */
952 break;
953
954 spin_unlock_irqrestore(&hmc->lock, flags);
955
956 if (!session->valid) {
957 retval = -EBADFD;
958 goto out;
959 }
960 if (file->f_flags & O_NONBLOCK) {
961 retval = -EAGAIN;
962 goto out;
963 }
964
965 schedule();
966
967 if (signal_pending(current)) {
968 retval = -ERESTARTSYS;
969 goto out;
970 }
971 } while (1);
972
973 buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
974 hmc->queue_tail++;
975 if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
976 hmc->queue_tail = 0;
977 spin_unlock_irqrestore(&hmc->lock, flags);
978
979 nbytes = min_t(size_t, nbytes, buffer->msg_len);
980 n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
981 dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
982 ibmvmc_free_hmc_buffer(hmc, buffer);
983 retval = nbytes;
984
985 if (n) {
986 dev_warn(adapter->dev, "read: copy to user failed.\n");
987 retval = -EFAULT;
988 }
989
990 out:
991 finish_wait(&ibmvmc_read_wait, &wait);
992 dev_dbg(adapter->dev, "read: out %ld\n", retval);
993 return retval;
994}
995
996/**
997 * ibmvmc_poll - Poll
998 *
999 * @file: file struct
1000 * @wait: Poll Table
1001 *
1002 * Return:
1003 * poll.h return values
1004 */
1005static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
1006{
1007 struct ibmvmc_file_session *session;
1008 struct ibmvmc_hmc *hmc;
1009 unsigned int mask = 0;
1010
1011 session = file->private_data;
1012 if (!session)
1013 return 0;
1014
1015 hmc = session->hmc;
1016 if (!hmc)
1017 return 0;
1018
1019 poll_wait(file, &ibmvmc_read_wait, wait);
1020
1021 if (hmc->queue_head != hmc->queue_tail)
1022 mask |= POLLIN | POLLRDNORM;
1023
1024 return mask;
1025}
1026
1027/**
1028 * ibmvmc_write - Write
1029 *
1030 * @file: file struct
18248659 1031 * @buffer: Character buffer
0eca353e
BL
1032 * @count: Count field
1033 * @ppos: Offset
1034 *
1035 * Return:
1036 * 0 - Success
1037 * Non-zero - Failure
1038 */
1039static ssize_t ibmvmc_write(struct file *file, const char *buffer,
1040 size_t count, loff_t *ppos)
1041{
8789c172 1042 struct inode *inode;
0eca353e
BL
1043 struct ibmvmc_buffer *vmc_buffer;
1044 struct ibmvmc_file_session *session;
1045 struct crq_server_adapter *adapter;
1046 struct ibmvmc_hmc *hmc;
1047 unsigned char *buf;
1048 unsigned long flags;
1049 size_t bytes;
1050 const char *p = buffer;
1051 size_t c = count;
1052 int ret = 0;
1053
1054 session = file->private_data;
1055 if (!session)
1056 return -EIO;
1057
1058 hmc = session->hmc;
1059 if (!hmc)
1060 return -EIO;
1061
1062 spin_lock_irqsave(&hmc->lock, flags);
1063 if (hmc->state == ibmhmc_state_free) {
1064 /* HMC connection is not valid (possibly was reset under us). */
1065 ret = -EIO;
1066 goto out;
1067 }
1068
1069 adapter = hmc->adapter;
1070 if (!adapter) {
1071 ret = -EIO;
1072 goto out;
1073 }
1074
1075 if (count > ibmvmc.max_mtu) {
1076 dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
1077 (unsigned long)count);
1078 ret = -EIO;
1079 goto out;
1080 }
1081
1082 /* Waiting for the open resp message to the ioctl(1) - retry */
1083 if (hmc->state == ibmhmc_state_opening) {
1084 ret = -EBUSY;
1085 goto out;
1086 }
1087
1088 /* Make sure the ioctl() was called & the open msg sent, and that
1089 * the HMC connection has not failed.
1090 */
1091 if (hmc->state != ibmhmc_state_ready) {
1092 ret = -EIO;
1093 goto out;
1094 }
1095
1096 vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1097 if (!vmc_buffer) {
1098 /* No buffer available for the msg send, or we have not yet
1099 * completed the open/open_resp sequence. Retry until this is
1100 * complete.
1101 */
1102 ret = -EBUSY;
1103 goto out;
1104 }
1105 if (!vmc_buffer->real_addr_local) {
1106 dev_err(adapter->dev, "no buffer storage assigned\n");
1107 ret = -EIO;
1108 goto out;
1109 }
1110 buf = vmc_buffer->real_addr_local;
1111
1112 while (c > 0) {
1113 bytes = min_t(size_t, c, vmc_buffer->size);
1114
1115 bytes -= copy_from_user(buf, p, bytes);
1116 if (!bytes) {
1117 ret = -EFAULT;
1118 goto out;
1119 }
1120 c -= bytes;
1121 p += bytes;
1122 }
1123 if (p == buffer)
1124 goto out;
1125
8789c172 1126 inode = file_inode(file);
41441cec 1127 inode->i_mtime = inode_set_ctime_current(inode);
8789c172 1128 mark_inode_dirty(inode);
0eca353e
BL
1129
1130 dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
1131 (unsigned long)file, (unsigned long)count);
1132
1133 ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
1134 ret = p - buffer;
1135 out:
1136 spin_unlock_irqrestore(&hmc->lock, flags);
1137 return (ssize_t)(ret);
1138}
1139
1140/**
1141 * ibmvmc_setup_hmc - Setup the HMC
1142 *
1143 * @session: ibmvmc_file_session struct
1144 *
1145 * Return:
1146 * 0 - Success
1147 * Non-zero - Failure
1148 */
1149static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
1150{
1151 struct ibmvmc_hmc *hmc;
1152 unsigned int valid, free, index;
1153
1154 if (ibmvmc.state == ibmvmc_state_failed) {
1155 pr_warn("ibmvmc: Reserve HMC: state_failed\n");
1156 return -EIO;
1157 }
1158
1159 if (ibmvmc.state < ibmvmc_state_ready) {
1160 pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
1161 return -EAGAIN;
1162 }
1163
1164 /* Device is busy until capabilities have been exchanged and we
1165 * have a generic buffer for each possible HMC connection.
1166 */
1167 for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
1168 valid = 0;
1169 ibmvmc_count_hmc_buffers(index, &valid, &free);
1170 if (valid == 0) {
1171 pr_warn("ibmvmc: buffers not ready for index %d\n",
1172 index);
1173 return -ENOBUFS;
1174 }
1175 }
1176
1177 /* Get an hmc object, and transition to ibmhmc_state_initial */
1178 hmc = ibmvmc_get_free_hmc();
1179 if (!hmc) {
1180 pr_warn("%s: free hmc not found\n", __func__);
1181 return -EBUSY;
1182 }
1183
1184 hmc->session = hmc->session + 1;
1185 if (hmc->session == 0xff)
1186 hmc->session = 1;
1187
1188 session->hmc = hmc;
1189 hmc->adapter = &ibmvmc_adapter;
1190 hmc->file_session = session;
1191 session->valid = 1;
1192
1193 return 0;
1194}
1195
1196/**
1197 * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID
1198 *
1199 * @session: ibmvmc_file_session struct
1200 * @new_hmc_id: HMC id field
1201 *
1202 * IOCTL command to setup the hmc id
1203 *
1204 * Return:
1205 * 0 - Success
1206 * Non-zero - Failure
1207 */
1208static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
1209 unsigned char __user *new_hmc_id)
1210{
1211 struct ibmvmc_hmc *hmc;
1212 struct ibmvmc_buffer *buffer;
1213 size_t bytes;
1214 char print_buffer[HMC_ID_LEN + 1];
1215 unsigned long flags;
1216 long rc = 0;
1217
1218 /* Reserve HMC session */
1219 hmc = session->hmc;
1220 if (!hmc) {
1221 rc = ibmvmc_setup_hmc(session);
1222 if (rc)
1223 return rc;
1224
1225 hmc = session->hmc;
1226 if (!hmc) {
1227 pr_err("ibmvmc: setup_hmc success but no hmc\n");
1228 return -EIO;
1229 }
1230 }
1231
1232 if (hmc->state != ibmhmc_state_initial) {
1233 pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
1234 hmc->state);
1235 return -EIO;
1236 }
1237
1238 bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
1239 if (bytes)
1240 return -EFAULT;
1241
1242 /* Send Open Session command */
1243 spin_lock_irqsave(&hmc->lock, flags);
1244 buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1245 spin_unlock_irqrestore(&hmc->lock, flags);
1246
1247 if (!buffer || !buffer->real_addr_local) {
1248 pr_warn("ibmvmc: sethmcid: no buffer available\n");
1249 return -EIO;
1250 }
1251
1252 /* Make sure buffer is NULL terminated before trying to print it */
1253 memset(print_buffer, 0, HMC_ID_LEN + 1);
1254 strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
1255 pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
1256
1257 memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
1258 /* RDMA over ID, send open msg, change state to ibmhmc_state_opening */
1259 rc = ibmvmc_send_open(buffer, hmc);
1260
1261 return rc;
1262}
1263
1264/**
1265 * ibmvmc_ioctl_query - IOCTL Query
1266 *
1267 * @session: ibmvmc_file_session struct
1268 * @ret_struct: ibmvmc_query_struct
1269 *
1270 * Return:
1271 * 0 - Success
1272 * Non-zero - Failure
1273 */
1274static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
1275 struct ibmvmc_query_struct __user *ret_struct)
1276{
1277 struct ibmvmc_query_struct query_struct;
1278 size_t bytes;
1279
1280 memset(&query_struct, 0, sizeof(query_struct));
1281 query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
1282 query_struct.state = ibmvmc.state;
1283 query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
1284
1285 bytes = copy_to_user(ret_struct, &query_struct,
1286 sizeof(query_struct));
1287 if (bytes)
1288 return -EFAULT;
1289
1290 return 0;
1291}
1292
1293/**
1294 * ibmvmc_ioctl_requestvmc - IOCTL Request VMC
1295 *
1296 * @session: ibmvmc_file_session struct
1297 * @ret_vmc_index: VMC Index
1298 *
1299 * Return:
1300 * 0 - Success
1301 * Non-zero - Failure
1302 */
1303static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
1304 u32 __user *ret_vmc_index)
1305{
1306 /* TODO: (adreznec) Add locking to control multiple process access */
1307 size_t bytes;
1308 long rc;
1309 u32 vmc_drc_index;
1310
1311 /* Call to request the VMC device from phyp*/
1312 rc = h_request_vmc(&vmc_drc_index);
1313 pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
1314
1315 if (rc == H_SUCCESS) {
1316 rc = 0;
1317 } else if (rc == H_FUNCTION) {
1318 pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
1319 return -EPERM;
1320 } else if (rc == H_AUTHORITY) {
1321 pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
1322 return -EPERM;
1323 } else if (rc == H_HARDWARE) {
1324 pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
1325 return -EIO;
1326 } else if (rc == H_RESOURCE) {
1327 pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
1328 return -ENODEV;
1329 } else if (rc == H_NOT_AVAILABLE) {
1330 pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
1331 return -EPERM;
1332 } else if (rc == H_PARAMETER) {
1333 pr_err("ibmvmc: requestvmc: invalid parameter\n");
1334 return -EINVAL;
1335 }
1336
1337 /* Success, set the vmc index in global struct */
1338 ibmvmc.vmc_drc_index = vmc_drc_index;
1339
1340 bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
1341 sizeof(*ret_vmc_index));
1342 if (bytes) {
1343 pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
1344 return -EFAULT;
1345 }
1346 return rc;
1347}
1348
1349/**
1350 * ibmvmc_ioctl - IOCTL
1351 *
18248659 1352 * @file: file information
0eca353e
BL
1353 * @cmd: cmd field
1354 * @arg: Argument field
1355 *
1356 * Return:
1357 * 0 - Success
1358 * Non-zero - Failure
1359 */
1360static long ibmvmc_ioctl(struct file *file,
1361 unsigned int cmd, unsigned long arg)
1362{
1363 struct ibmvmc_file_session *session = file->private_data;
1364
1365 pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
1366 (unsigned long)file, cmd, arg,
1367 (unsigned long)session);
1368
1369 if (!session) {
1370 pr_warn("ibmvmc: ioctl: no session\n");
1371 return -EIO;
1372 }
1373
1374 switch (cmd) {
1375 case VMC_IOCTL_SETHMCID:
1376 return ibmvmc_ioctl_sethmcid(session,
1377 (unsigned char __user *)arg);
1378 case VMC_IOCTL_QUERY:
1379 return ibmvmc_ioctl_query(session,
1380 (struct ibmvmc_query_struct __user *)arg);
1381 case VMC_IOCTL_REQUESTVMC:
1382 return ibmvmc_ioctl_requestvmc(session,
1383 (unsigned int __user *)arg);
1384 default:
1385 pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
1386 return -EINVAL;
1387 }
1388}
1389
1390static const struct file_operations ibmvmc_fops = {
1391 .owner = THIS_MODULE,
1392 .read = ibmvmc_read,
1393 .write = ibmvmc_write,
1394 .poll = ibmvmc_poll,
1395 .unlocked_ioctl = ibmvmc_ioctl,
1396 .open = ibmvmc_open,
1397 .release = ibmvmc_close,
1398};
1399
1400/**
1401 * ibmvmc_add_buffer - Add Buffer
1402 *
1403 * @adapter: crq_server_adapter struct
1404 * @crq: ibmvmc_crq_msg struct
1405 *
1406 * This message transfers a buffer from hypervisor ownership to management
1407 * partition ownership. The LIOBA is obtained from the virtual TCE table
1408 * associated with the hypervisor side of the VMC device, and points to a
1409 * buffer of size MTU (as established in the capabilities exchange).
1410 *
1411 * Typical flow for ading buffers:
1412 * 1. A new management application connection is opened by the management
1413 * partition.
1414 * 2. The hypervisor assigns new buffers for the traffic associated with
1415 * that connection.
1416 * 3. The hypervisor sends VMC Add Buffer messages to the management
1417 * partition, informing it of the new buffers.
1418 * 4. The hypervisor sends an HMC protocol message (to the management
1419 * application) notifying it of the new buffers. This informs the
1420 * application that it has buffers available for sending HMC
1421 * commands.
1422 *
1423 * Return:
1424 * 0 - Success
1425 * Non-zero - Failure
1426 */
1427static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
1428 struct ibmvmc_crq_msg *crq)
1429{
1430 struct ibmvmc_buffer *buffer;
1431 u8 hmc_index;
1432 u8 hmc_session;
1433 u16 buffer_id;
1434 unsigned long flags;
1435 int rc = 0;
1436
1437 if (!crq)
1438 return -1;
1439
1440 hmc_session = crq->hmc_session;
1441 hmc_index = crq->hmc_index;
1442 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1443
1444 if (hmc_index > ibmvmc.max_hmc_index) {
1445 dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
1446 hmc_index);
1447 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1448 hmc_session, hmc_index, buffer_id);
1449 return -1;
1450 }
1451
1452 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1453 dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
1454 buffer_id);
1455 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1456 hmc_session, hmc_index, buffer_id);
1457 return -1;
1458 }
1459
1460 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1461 buffer = &hmcs[hmc_index].buffer[buffer_id];
1462
1463 if (buffer->real_addr_local || buffer->dma_addr_local) {
1464 dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
1465 (unsigned long)buffer_id);
1466 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1467 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1468 hmc_session, hmc_index, buffer_id);
1469 return -1;
1470 }
1471
1472 buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
1473 ibmvmc.max_mtu,
1474 &buffer->dma_addr_local);
1475
1476 if (!buffer->real_addr_local) {
1477 dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
1478 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1479 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
1480 hmc_session, hmc_index, buffer_id);
1481 return -1;
1482 }
1483
1484 buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
1485 buffer->size = ibmvmc.max_mtu;
1486 buffer->owner = crq->var1.owner;
1487 buffer->free = 1;
1488 /* Must ensure valid==1 is observable only after all other fields are */
1489 dma_wmb();
1490 buffer->valid = 1;
1491 buffer->id = buffer_id;
1492
1493 dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
1494 dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n",
1495 hmc_index, hmc_session, buffer_id, buffer->owner);
1496 dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n",
1497 (u32)buffer->dma_addr_local,
1498 (u32)buffer->dma_addr_remote);
1499 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1500
1501 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1502 hmc_index, buffer_id);
1503
1504 return rc;
1505}
1506
1507/**
1508 * ibmvmc_rem_buffer - Remove Buffer
1509 *
1510 * @adapter: crq_server_adapter struct
1511 * @crq: ibmvmc_crq_msg struct
1512 *
1513 * This message requests an HMC buffer to be transferred from management
1514 * partition ownership to hypervisor ownership. The management partition may
1515 * not be able to satisfy the request at a particular point in time if all its
1516 * buffers are in use. The management partition requires a depth of at least
1517 * one inbound buffer to allow management application commands to flow to the
1518 * hypervisor. It is, therefore, an interface error for the hypervisor to
1519 * attempt to remove the management partition's last buffer.
1520 *
1521 * The hypervisor is expected to manage buffer usage with the management
1522 * application directly and inform the management partition when buffers may be
1523 * removed. The typical flow for removing buffers:
1524 *
1525 * 1. The management application no longer needs a communication path to a
1526 * particular hypervisor function. That function is closed.
1527 * 2. The hypervisor and the management application quiesce all traffic to that
1528 * function. The hypervisor requests a reduction in buffer pool size.
1529 * 3. The management application acknowledges the reduction in buffer pool size.
1530 * 4. The hypervisor sends a Remove Buffer message to the management partition,
1531 * informing it of the reduction in buffers.
1532 * 5. The management partition verifies it can remove the buffer. This is
1533 * possible if buffers have been quiesced.
1534 *
1535 * Return:
1536 * 0 - Success
1537 * Non-zero - Failure
1538 */
1539/*
1540 * The hypervisor requested that we pick an unused buffer, and return it.
1541 * Before sending the buffer back, we free any storage associated with the
1542 * buffer.
1543 */
1544static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
1545 struct ibmvmc_crq_msg *crq)
1546{
1547 struct ibmvmc_buffer *buffer;
1548 u8 hmc_index;
1549 u8 hmc_session;
1550 u16 buffer_id = 0;
1551 unsigned long flags;
1552 int rc = 0;
1553
1554 if (!crq)
1555 return -1;
1556
1557 hmc_session = crq->hmc_session;
1558 hmc_index = crq->hmc_index;
1559
1560 if (hmc_index > ibmvmc.max_hmc_index) {
1561 dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
1562 hmc_index);
1563 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1564 hmc_session, hmc_index, buffer_id);
1565 return -1;
1566 }
1567
1568 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1569 buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
1570 if (!buffer) {
1571 dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
1572 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1573 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
1574 hmc_session, hmc_index,
1575 VMC_INVALID_BUFFER_ID);
1576 return -1;
1577 }
1578
1579 buffer_id = buffer->id;
1580
1581 if (buffer->valid)
1582 free_dma_buffer(to_vio_dev(adapter->dev),
1583 ibmvmc.max_mtu,
1584 buffer->real_addr_local,
1585 buffer->dma_addr_local);
1586
1587 memset(buffer, 0, sizeof(struct ibmvmc_buffer));
1588 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1589
1590 dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
1591 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1592 hmc_index, buffer_id);
1593
1594 return rc;
1595}
1596
1597static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
1598 struct ibmvmc_crq_msg *crq)
1599{
1600 struct ibmvmc_buffer *buffer;
1601 struct ibmvmc_hmc *hmc;
1602 unsigned long msg_len;
1603 u8 hmc_index;
1604 u8 hmc_session;
1605 u16 buffer_id;
1606 unsigned long flags;
1607 int rc = 0;
1608
1609 if (!crq)
1610 return -1;
1611
1612 /* Hypervisor writes CRQs directly into our memory in big endian */
1613 dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
1614 be64_to_cpu(*((unsigned long *)crq)),
1615 be64_to_cpu(*(((unsigned long *)crq) + 1)));
1616
1617 hmc_session = crq->hmc_session;
1618 hmc_index = crq->hmc_index;
1619 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1620 msg_len = be32_to_cpu(crq->var3.msg_len);
1621
1622 if (hmc_index > ibmvmc.max_hmc_index) {
1623 dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
1624 hmc_index);
1625 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1626 hmc_session, hmc_index, buffer_id);
1627 return -1;
1628 }
1629
1630 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1631 dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
1632 buffer_id);
1633 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1634 hmc_session, hmc_index, buffer_id);
1635 return -1;
1636 }
1637
1638 hmc = &hmcs[hmc_index];
1639 spin_lock_irqsave(&hmc->lock, flags);
1640
1641 if (hmc->state == ibmhmc_state_free) {
1642 dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
1643 hmc->state);
1644 /* HMC connection is not valid (possibly was reset under us). */
1645 spin_unlock_irqrestore(&hmc->lock, flags);
1646 return -1;
1647 }
1648
1649 buffer = &hmc->buffer[buffer_id];
1650
1651 if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
1652 dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
1653 buffer->valid, buffer->owner);
1654 spin_unlock_irqrestore(&hmc->lock, flags);
1655 return -1;
1656 }
1657
1658 /* RDMA the data into the partition. */
1659 rc = h_copy_rdma(msg_len,
1660 adapter->riobn,
1661 buffer->dma_addr_remote,
1662 adapter->liobn,
1663 buffer->dma_addr_local);
1664
1665 dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
1666 (unsigned int)msg_len, (unsigned int)buffer_id,
1667 (unsigned int)hmc->queue_head, (unsigned int)hmc_index);
1668 buffer->msg_len = msg_len;
1669 buffer->free = 0;
1670 buffer->owner = VMC_BUF_OWNER_ALPHA;
1671
1672 if (rc) {
1673 dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
1674 rc);
1675 spin_unlock_irqrestore(&hmc->lock, flags);
1676 return -1;
1677 }
1678
1679 /* Must be locked because read operates on the same data */
1680 hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
1681 hmc->queue_head++;
1682 if (hmc->queue_head == ibmvmc_max_buf_pool_size)
1683 hmc->queue_head = 0;
1684
1685 if (hmc->queue_head == hmc->queue_tail)
1686 dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
1687
1688 spin_unlock_irqrestore(&hmc->lock, flags);
1689
1690 wake_up_interruptible(&ibmvmc_read_wait);
1691
1692 return 0;
1693}
1694
1695/**
1696 * ibmvmc_process_capabilities - Process Capabilities
1697 *
1698 * @adapter: crq_server_adapter struct
1699 * @crqp: ibmvmc_crq_msg struct
1700 *
1701 */
1702static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
1703 struct ibmvmc_crq_msg *crqp)
1704{
1705 struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
1706
1707 if ((be16_to_cpu(crq->version) >> 8) !=
1708 (IBMVMC_PROTOCOL_VERSION >> 8)) {
1709 dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
1710 be16_to_cpu(crq->version),
1711 IBMVMC_PROTOCOL_VERSION);
1712 ibmvmc.state = ibmvmc_state_failed;
1713 return;
1714 }
1715
1716 ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
1717 ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
1718 be16_to_cpu(crq->pool_size));
1719 ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
1720 ibmvmc.state = ibmvmc_state_ready;
1721
1722 dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
1723 ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
1724 ibmvmc.max_hmc_index);
1725}
1726
1727/**
1728 * ibmvmc_validate_hmc_session - Validate HMC Session
1729 *
1730 * @adapter: crq_server_adapter struct
1731 * @crq: ibmvmc_crq_msg struct
1732 *
1733 * Return:
1734 * 0 - Success
1735 * Non-zero - Failure
1736 */
1737static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
1738 struct ibmvmc_crq_msg *crq)
1739{
1740 unsigned char hmc_index;
1741
1742 hmc_index = crq->hmc_index;
1743
1744 if (crq->hmc_session == 0)
1745 return 0;
1746
1747 if (hmc_index > ibmvmc.max_hmc_index)
1748 return -1;
1749
1750 if (hmcs[hmc_index].session != crq->hmc_session) {
1751 dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
1752 hmcs[hmc_index].session, crq->hmc_session);
1753 return -1;
1754 }
1755
1756 return 0;
1757}
1758
1759/**
1760 * ibmvmc_reset - Reset
1761 *
1762 * @adapter: crq_server_adapter struct
1763 * @xport_event: export_event field
1764 *
1765 * Closes all HMC sessions and conditionally schedules a CRQ reset.
1766 * @xport_event: If true, the partner closed their CRQ; we don't need to reset.
1767 * If false, we need to schedule a CRQ reset.
1768 */
1769static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
1770{
1771 int i;
1772
1773 if (ibmvmc.state != ibmvmc_state_sched_reset) {
1774 dev_info(adapter->dev, "*** Reset to initial state.\n");
1775 for (i = 0; i < ibmvmc_max_hmcs; i++)
1776 ibmvmc_return_hmc(&hmcs[i], xport_event);
1777
1778 if (xport_event) {
1779 /* CRQ was closed by the partner. We don't need to do
1780 * anything except set ourself to the correct state to
1781 * handle init msgs.
1782 */
1783 ibmvmc.state = ibmvmc_state_crqinit;
1784 } else {
1785 /* The partner did not close their CRQ - instead, we're
1786 * closing the CRQ on our end. Need to schedule this
1787 * for process context, because CRQ reset may require a
1788 * sleep.
1789 *
1790 * Setting ibmvmc.state here immediately prevents
1791 * ibmvmc_open from completing until the reset
1792 * completes in process context.
1793 */
1794 ibmvmc.state = ibmvmc_state_sched_reset;
1795 dev_dbg(adapter->dev, "Device reset scheduled");
1796 wake_up_interruptible(&adapter->reset_wait_queue);
1797 }
1798 }
1799}
1800
1801/**
1802 * ibmvmc_reset_task - Reset Task
1803 *
1804 * @data: Data field
1805 *
1806 * Performs a CRQ reset of the VMC device in process context.
1807 * NOTE: This function should not be called directly, use ibmvmc_reset.
1808 */
1809static int ibmvmc_reset_task(void *data)
1810{
1811 struct crq_server_adapter *adapter = data;
1812 int rc;
1813
1814 set_user_nice(current, -20);
1815
1816 while (!kthread_should_stop()) {
1817 wait_event_interruptible(adapter->reset_wait_queue,
1818 (ibmvmc.state == ibmvmc_state_sched_reset) ||
1819 kthread_should_stop());
1820
1821 if (kthread_should_stop())
1822 break;
1823
1824 dev_dbg(adapter->dev, "CRQ resetting in process context");
1825 tasklet_disable(&adapter->work_task);
1826
1827 rc = ibmvmc_reset_crq_queue(adapter);
1828
1829 if (rc != H_SUCCESS && rc != H_RESOURCE) {
1830 dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
1831 rc);
1832 ibmvmc.state = ibmvmc_state_failed;
1833 } else {
1834 ibmvmc.state = ibmvmc_state_crqinit;
1835
1836 if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
1837 != 0 && rc != H_RESOURCE)
1838 dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
1839 }
1840
1841 vio_enable_interrupts(to_vio_dev(adapter->dev));
1842 tasklet_enable(&adapter->work_task);
1843 }
1844
1845 return 0;
1846}
1847
1848/**
1849 * ibmvmc_process_open_resp - Process Open Response
1850 *
1851 * @crq: ibmvmc_crq_msg struct
1852 * @adapter: crq_server_adapter struct
1853 *
1854 * This command is sent by the hypervisor in response to the Interface
1855 * Open message. When this message is received, the indicated buffer is
1856 * again available for management partition use.
1857 */
1858static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
1859 struct crq_server_adapter *adapter)
1860{
1861 unsigned char hmc_index;
1862 unsigned short buffer_id;
1863
1864 hmc_index = crq->hmc_index;
1865 if (hmc_index > ibmvmc.max_hmc_index) {
1866 /* Why would PHYP give an index > max negotiated? */
1867 ibmvmc_reset(adapter, false);
1868 return;
1869 }
1870
1871 if (crq->status) {
1872 dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
1873 crq->status);
1874 ibmvmc_return_hmc(&hmcs[hmc_index], false);
1875 return;
1876 }
1877
1878 if (hmcs[hmc_index].state == ibmhmc_state_opening) {
1879 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1880 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1881 dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
1882 buffer_id);
1883 hmcs[hmc_index].state = ibmhmc_state_failed;
1884 } else {
1885 ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
1886 &hmcs[hmc_index].buffer[buffer_id]);
1887 hmcs[hmc_index].state = ibmhmc_state_ready;
1888 dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
1889 }
1890 } else {
1891 dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
1892 hmcs[hmc_index].state);
1893 }
1894}
1895
1896/**
1897 * ibmvmc_process_close_resp - Process Close Response
1898 *
1899 * @crq: ibmvmc_crq_msg struct
1900 * @adapter: crq_server_adapter struct
1901 *
1902 * This command is sent by the hypervisor in response to the managemant
1903 * application Interface Close message.
1904 *
1905 * If the close fails, simply reset the entire driver as the state of the VMC
1906 * must be in tough shape.
1907 */
1908static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
1909 struct crq_server_adapter *adapter)
1910{
1911 unsigned char hmc_index;
1912
1913 hmc_index = crq->hmc_index;
1914 if (hmc_index > ibmvmc.max_hmc_index) {
1915 ibmvmc_reset(adapter, false);
1916 return;
1917 }
1918
1919 if (crq->status) {
1920 dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
1921 crq->status);
1922 ibmvmc_reset(adapter, false);
1923 return;
1924 }
1925
1926 ibmvmc_return_hmc(&hmcs[hmc_index], false);
1927}
1928
1929/**
1930 * ibmvmc_crq_process - Process CRQ
1931 *
1932 * @adapter: crq_server_adapter struct
1933 * @crq: ibmvmc_crq_msg struct
1934 *
1935 * Process the CRQ message based upon the type of message received.
1936 *
1937 */
1938static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
1939 struct ibmvmc_crq_msg *crq)
1940{
1941 switch (crq->type) {
1942 case VMC_MSG_CAP_RESP:
1943 dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
1944 crq->type);
1945 if (ibmvmc.state == ibmvmc_state_capabilities)
1946 ibmvmc_process_capabilities(adapter, crq);
1947 else
1948 dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
1949 ibmvmc.state);
1950 break;
1951 case VMC_MSG_OPEN_RESP:
1952 dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
1953 crq->type);
1954 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1955 ibmvmc_process_open_resp(crq, adapter);
1956 break;
1957 case VMC_MSG_ADD_BUF:
1958 dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
1959 crq->type);
1960 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1961 ibmvmc_add_buffer(adapter, crq);
1962 break;
1963 case VMC_MSG_REM_BUF:
1964 dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
1965 crq->type);
1966 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1967 ibmvmc_rem_buffer(adapter, crq);
1968 break;
1969 case VMC_MSG_SIGNAL:
1970 dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
1971 crq->type);
1972 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1973 ibmvmc_recv_msg(adapter, crq);
1974 break;
1975 case VMC_MSG_CLOSE_RESP:
1976 dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
1977 crq->type);
1978 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1979 ibmvmc_process_close_resp(crq, adapter);
1980 break;
1981 case VMC_MSG_CAP:
1982 case VMC_MSG_OPEN:
1983 case VMC_MSG_CLOSE:
1984 case VMC_MSG_ADD_BUF_RESP:
1985 case VMC_MSG_REM_BUF_RESP:
1986 dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
1987 crq->type);
1988 break;
1989 default:
1990 dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
1991 crq->type);
1992 break;
1993 }
1994}
1995
1996/**
1997 * ibmvmc_handle_crq_init - Handle CRQ Init
1998 *
1999 * @crq: ibmvmc_crq_msg struct
2000 * @adapter: crq_server_adapter struct
2001 *
2002 * Handle the type of crq initialization based on whether
2003 * it is a message or a response.
2004 *
2005 */
2006static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
2007 struct crq_server_adapter *adapter)
2008{
2009 switch (crq->type) {
2010 case 0x01: /* Initialization message */
2011 dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
2012 ibmvmc.state);
2013 if (ibmvmc.state == ibmvmc_state_crqinit) {
2014 /* Send back a response */
2015 if (ibmvmc_send_crq(adapter, 0xC002000000000000,
2016 0) == 0)
2017 ibmvmc_send_capabilities(adapter);
2018 else
2019 dev_err(adapter->dev, " Unable to send init rsp\n");
2020 } else {
2021 dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
2022 ibmvmc.state, ibmvmc.max_mtu);
2023 }
2024
2025 break;
2026 case 0x02: /* Initialization response */
2027 dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
2028 ibmvmc.state);
2029 if (ibmvmc.state == ibmvmc_state_crqinit)
2030 ibmvmc_send_capabilities(adapter);
2031 break;
2032 default:
2033 dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
2034 (unsigned long)crq->type);
2035 }
2036}
2037
2038/**
2039 * ibmvmc_handle_crq - Handle CRQ
2040 *
2041 * @crq: ibmvmc_crq_msg struct
2042 * @adapter: crq_server_adapter struct
2043 *
2044 * Read the command elements from the command queue and execute the
2045 * requests based upon the type of crq message.
2046 *
2047 */
2048static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
2049 struct crq_server_adapter *adapter)
2050{
2051 switch (crq->valid) {
2052 case 0xC0: /* initialization */
2053 ibmvmc_handle_crq_init(crq, adapter);
2054 break;
2055 case 0xFF: /* Hypervisor telling us the connection is closed */
2056 dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
2057 ibmvmc_reset(adapter, true);
2058 break;
2059 case 0x80: /* real payload */
2060 ibmvmc_crq_process(adapter, crq);
2061 break;
2062 default:
2063 dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
2064 crq->valid);
2065 break;
2066 }
2067}
2068
2069static void ibmvmc_task(unsigned long data)
2070{
2071 struct crq_server_adapter *adapter =
2072 (struct crq_server_adapter *)data;
2073 struct vio_dev *vdev = to_vio_dev(adapter->dev);
2074 struct ibmvmc_crq_msg *crq;
2075 int done = 0;
2076
2077 while (!done) {
2078 /* Pull all the valid messages off the CRQ */
2079 while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
2080 ibmvmc_handle_crq(crq, adapter);
2081 crq->valid = 0x00;
2082 /* CRQ reset was requested, stop processing CRQs.
2083 * Interrupts will be re-enabled by the reset task.
2084 */
2085 if (ibmvmc.state == ibmvmc_state_sched_reset)
2086 return;
2087 }
2088
2089 vio_enable_interrupts(vdev);
2090 crq = crq_queue_next_crq(&adapter->queue);
2091 if (crq) {
2092 vio_disable_interrupts(vdev);
2093 ibmvmc_handle_crq(crq, adapter);
2094 crq->valid = 0x00;
2095 /* CRQ reset was requested, stop processing CRQs.
2096 * Interrupts will be re-enabled by the reset task.
2097 */
2098 if (ibmvmc.state == ibmvmc_state_sched_reset)
2099 return;
2100 } else {
2101 done = 1;
2102 }
2103 }
2104}
2105
2106/**
2107 * ibmvmc_init_crq_queue - Init CRQ Queue
2108 *
2109 * @adapter: crq_server_adapter struct
2110 *
2111 * Return:
2112 * 0 - Success
2113 * Non-zero - Failure
2114 */
2115static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
2116{
2117 struct vio_dev *vdev = to_vio_dev(adapter->dev);
2118 struct crq_queue *queue = &adapter->queue;
2119 int rc = 0;
2120 int retrc = 0;
2121
2122 queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
2123
2124 if (!queue->msgs)
2125 goto malloc_failed;
2126
2127 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
2128
2129 queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
2130 queue->size * sizeof(*queue->msgs),
2131 DMA_BIDIRECTIONAL);
2132
2133 if (dma_mapping_error(adapter->dev, queue->msg_token))
2134 goto map_failed;
2135
2136 retrc = plpar_hcall_norets(H_REG_CRQ,
2137 vdev->unit_address,
2138 queue->msg_token, PAGE_SIZE);
c55e9318 2139 rc = retrc;
0eca353e
BL
2140
2141 if (rc == H_RESOURCE)
2142 rc = ibmvmc_reset_crq_queue(adapter);
2143
2144 if (rc == 2) {
2145 dev_warn(adapter->dev, "Partner adapter not ready\n");
2146 retrc = 0;
2147 } else if (rc != 0) {
2148 dev_err(adapter->dev, "Error %d opening adapter\n", rc);
2149 goto reg_crq_failed;
2150 }
2151
2152 queue->cur = 0;
2153 spin_lock_init(&queue->lock);
2154
2155 tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
2156
2157 if (request_irq(vdev->irq,
2158 ibmvmc_handle_event,
2159 0, "ibmvmc", (void *)adapter) != 0) {
2160 dev_err(adapter->dev, "couldn't register irq 0x%x\n",
2161 vdev->irq);
2162 goto req_irq_failed;
2163 }
2164
2165 rc = vio_enable_interrupts(vdev);
2166 if (rc != 0) {
2167 dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
2168 goto req_irq_failed;
2169 }
2170
2171 return retrc;
2172
2173req_irq_failed:
2174 /* Cannot have any work since we either never got our IRQ registered,
2175 * or never got interrupts enabled
2176 */
2177 tasklet_kill(&adapter->work_task);
2178 h_free_crq(vdev->unit_address);
2179reg_crq_failed:
2180 dma_unmap_single(adapter->dev,
2181 queue->msg_token,
2182 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
2183map_failed:
2184 free_page((unsigned long)queue->msgs);
2185malloc_failed:
2186 return -ENOMEM;
2187}
2188
2189/* Fill in the liobn and riobn fields on the adapter */
2190static int read_dma_window(struct vio_dev *vdev,
2191 struct crq_server_adapter *adapter)
2192{
2193 const __be32 *dma_window;
2194 const __be32 *prop;
2195
2196 /* TODO Using of_parse_dma_window would be better, but it doesn't give
2197 * a way to read multiple windows without already knowing the size of
2198 * a window or the number of windows
2199 */
2200 dma_window =
2201 (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
2202 NULL);
2203 if (!dma_window) {
2204 dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
2205 return -1;
2206 }
2207
2208 adapter->liobn = be32_to_cpu(*dma_window);
2209 dma_window++;
2210
2211 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2212 NULL);
2213 if (!prop) {
2214 dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
2215 dma_window++;
2216 } else {
2217 dma_window += be32_to_cpu(*prop);
2218 }
2219
2220 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2221 NULL);
2222 if (!prop) {
2223 dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
2224 dma_window++;
2225 } else {
2226 dma_window += be32_to_cpu(*prop);
2227 }
2228
2229 /* dma_window should point to the second window now */
2230 adapter->riobn = be32_to_cpu(*dma_window);
2231
2232 return 0;
2233}
2234
2235static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2236{
2237 struct crq_server_adapter *adapter = &ibmvmc_adapter;
2238 int rc;
2239
2240 dev_set_drvdata(&vdev->dev, NULL);
2241 memset(adapter, 0, sizeof(*adapter));
2242 adapter->dev = &vdev->dev;
2243
2244 dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
2245
2246 rc = read_dma_window(vdev, adapter);
2247 if (rc != 0) {
2248 ibmvmc.state = ibmvmc_state_failed;
2249 return -1;
2250 }
2251
2252 dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
2253 adapter->liobn, adapter->riobn);
2254
2255 init_waitqueue_head(&adapter->reset_wait_queue);
2256 adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
2257 if (IS_ERR(adapter->reset_task)) {
2258 dev_err(adapter->dev, "Failed to start reset thread\n");
2259 ibmvmc.state = ibmvmc_state_failed;
2260 rc = PTR_ERR(adapter->reset_task);
2261 adapter->reset_task = NULL;
2262 return rc;
2263 }
2264
2265 rc = ibmvmc_init_crq_queue(adapter);
2266 if (rc != 0 && rc != H_RESOURCE) {
2267 dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
2268 rc);
2269 ibmvmc.state = ibmvmc_state_failed;
2270 goto crq_failed;
2271 }
2272
2273 ibmvmc.state = ibmvmc_state_crqinit;
2274
2275 /* Try to send an initialization message. Note that this is allowed
2276 * to fail if the other end is not acive. In that case we just wait
2277 * for the other side to initialize.
2278 */
2279 if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
2280 rc != H_RESOURCE)
2281 dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
2282
2283 dev_set_drvdata(&vdev->dev, adapter);
2284
2285 return 0;
2286
2287crq_failed:
2288 kthread_stop(adapter->reset_task);
2289 adapter->reset_task = NULL;
2290 return -EPERM;
2291}
2292
386a966f 2293static void ibmvmc_remove(struct vio_dev *vdev)
0eca353e
BL
2294{
2295 struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
2296
2297 dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
2298 vdev->unit_address);
2299 ibmvmc_release_crq_queue(adapter);
0eca353e
BL
2300}
2301
2302static struct vio_device_id ibmvmc_device_table[] = {
2303 { "ibm,vmc", "IBM,vmc" },
2304 { "", "" }
2305};
2306MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
2307
2308static struct vio_driver ibmvmc_driver = {
2309 .name = ibmvmc_driver_name,
2310 .id_table = ibmvmc_device_table,
2311 .probe = ibmvmc_probe,
2312 .remove = ibmvmc_remove,
2313};
2314
2315static void __init ibmvmc_scrub_module_parms(void)
2316{
2317 if (ibmvmc_max_mtu > MAX_MTU) {
2318 pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
2319 ibmvmc_max_mtu = MAX_MTU;
2320 } else if (ibmvmc_max_mtu < MIN_MTU) {
2321 pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
2322 ibmvmc_max_mtu = MIN_MTU;
2323 }
2324
2325 if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
2326 pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
2327 MAX_BUF_POOL_SIZE);
2328 ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
2329 } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
2330 pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
2331 MIN_BUF_POOL_SIZE);
2332 ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
2333 }
2334
2335 if (ibmvmc_max_hmcs > MAX_HMCS) {
2336 pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
2337 ibmvmc_max_hmcs = MAX_HMCS;
2338 } else if (ibmvmc_max_hmcs < MIN_HMCS) {
2339 pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
2340 ibmvmc_max_hmcs = MIN_HMCS;
2341 }
2342}
2343
2344static struct miscdevice ibmvmc_miscdev = {
2345 .name = ibmvmc_driver_name,
2346 .minor = MISC_DYNAMIC_MINOR,
2347 .fops = &ibmvmc_fops,
2348};
2349
2350static int __init ibmvmc_module_init(void)
2351{
2352 int rc, i, j;
2353
2354 ibmvmc.state = ibmvmc_state_initial;
2355 pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
2356
2357 rc = misc_register(&ibmvmc_miscdev);
2358 if (rc) {
2359 pr_err("ibmvmc: misc registration failed\n");
2360 goto misc_register_failed;
2361 }
2362 pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
2363 ibmvmc_miscdev.minor);
2364
2365 /* Initialize data structures */
2366 memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
2367 for (i = 0; i < MAX_HMCS; i++) {
2368 spin_lock_init(&hmcs[i].lock);
2369 hmcs[i].state = ibmhmc_state_free;
2370 for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
2371 hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
2372 }
2373
2374 /* Sanity check module parms */
2375 ibmvmc_scrub_module_parms();
2376
2377 /*
2378 * Initialize some reasonable values. Might be negotiated smaller
2379 * values during the capabilities exchange.
2380 */
2381 ibmvmc.max_mtu = ibmvmc_max_mtu;
2382 ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
2383 ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
2384
2385 rc = vio_register_driver(&ibmvmc_driver);
2386
2387 if (rc) {
2388 pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
2389 goto vio_reg_failed;
2390 }
2391
2392 return 0;
2393
2394vio_reg_failed:
2395 misc_deregister(&ibmvmc_miscdev);
2396misc_register_failed:
2397 return rc;
2398}
2399
2400static void __exit ibmvmc_module_exit(void)
2401{
2402 pr_info("ibmvmc: module exit\n");
2403 vio_unregister_driver(&ibmvmc_driver);
2404 misc_deregister(&ibmvmc_miscdev);
2405}
2406
2407module_init(ibmvmc_module_init);
2408module_exit(ibmvmc_module_exit);
2409
2410module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
2411 int, 0644);
2412MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
2413module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
2414MODULE_PARM_DESC(max_hmcs, "Max HMCs");
2415module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
2416MODULE_PARM_DESC(max_mtu, "Max MTU");
2417
2418MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>");
2419MODULE_DESCRIPTION("IBM VMC");
2420MODULE_VERSION(IBMVMC_DRIVER_VERSION);
2421MODULE_LICENSE("GPL v2");