Revert "x86/unwind: Detect bad stack return address"
[linux-2.6-block.git] / drivers / scsi / ibmvscsi_tgt / ibmvscsi_tgt.c
1 /*******************************************************************************
2  * IBM Virtual SCSI Target Driver
3  * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
4  *                         Santiago Leon (santil@us.ibm.com) IBM Corp.
5  *                         Linda Xie (lxie@us.ibm.com) IBM Corp.
6  *
7  * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
8  * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
11  * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  ****************************************************************************/
24
25 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
26
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/types.h>
31 #include <linux/list.h>
32 #include <linux/string.h>
33
34 #include <target/target_core_base.h>
35 #include <target/target_core_fabric.h>
36
37 #include <asm/hvcall.h>
38 #include <asm/vio.h>
39
40 #include <scsi/viosrp.h>
41
42 #include "ibmvscsi_tgt.h"
43
44 #define IBMVSCSIS_VERSION       "v0.2"
45
46 #define INITIAL_SRP_LIMIT       800
47 #define DEFAULT_MAX_SECTORS     256
48
49 static uint max_vdma_size = MAX_H_COPY_RDMA;
50
51 static char system_id[SYS_ID_NAME_LEN] = "";
52 static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
53 static uint partition_number = -1;
54
55 /* Adapter list and lock to control it */
56 static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
57 static LIST_HEAD(ibmvscsis_dev_list);
58
59 static long ibmvscsis_parse_command(struct scsi_info *vscsi,
60                                     struct viosrp_crq *crq);
61
62 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
63
64 static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
65                                       struct srp_rsp *rsp)
66 {
67         u32 residual_count = se_cmd->residual_count;
68
69         if (!residual_count)
70                 return;
71
72         if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
73                 if (se_cmd->data_direction == DMA_TO_DEVICE) {
74                         /* residual data from an underflow write */
75                         rsp->flags = SRP_RSP_FLAG_DOUNDER;
76                         rsp->data_out_res_cnt = cpu_to_be32(residual_count);
77                 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
78                         /* residual data from an underflow read */
79                         rsp->flags = SRP_RSP_FLAG_DIUNDER;
80                         rsp->data_in_res_cnt = cpu_to_be32(residual_count);
81                 }
82         } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
83                 if (se_cmd->data_direction == DMA_TO_DEVICE) {
84                         /* residual data from an overflow write */
85                         rsp->flags = SRP_RSP_FLAG_DOOVER;
86                         rsp->data_out_res_cnt = cpu_to_be32(residual_count);
87                 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
88                         /* residual data from an overflow read */
89                         rsp->flags = SRP_RSP_FLAG_DIOVER;
90                         rsp->data_in_res_cnt = cpu_to_be32(residual_count);
91                 }
92         }
93 }
94
95 /**
96  * connection_broken() - Determine if the connection to the client is good
97  * @vscsi:      Pointer to our adapter structure
98  *
99  * This function attempts to send a ping MAD to the client. If the call to
100  * queue the request returns H_CLOSED then the connection has been broken
101  * and the function returns TRUE.
102  *
103  * EXECUTION ENVIRONMENT:
104  *      Interrupt or Process environment
105  */
106 static bool connection_broken(struct scsi_info *vscsi)
107 {
108         struct viosrp_crq *crq;
109         u64 buffer[2] = { 0, 0 };
110         long h_return_code;
111         bool rc = false;
112
113         /* create a PING crq */
114         crq = (struct viosrp_crq *)&buffer;
115         crq->valid = VALID_CMD_RESP_EL;
116         crq->format = MESSAGE_IN_CRQ;
117         crq->status = PING;
118
119         h_return_code = h_send_crq(vscsi->dds.unit_id,
120                                    cpu_to_be64(buffer[MSG_HI]),
121                                    cpu_to_be64(buffer[MSG_LOW]));
122
123         pr_debug("connection_broken: rc %ld\n", h_return_code);
124
125         if (h_return_code == H_CLOSED)
126                 rc = true;
127
128         return rc;
129 }
130
131 /**
132  * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
133  * @vscsi:      Pointer to our adapter structure
134  *
135  * This function calls h_free_q then frees the interrupt bit etc.
136  * It must release the lock before doing so because of the time it can take
137  * for h_free_crq in PHYP
138  * NOTE: the caller must make sure that state and or flags will prevent
139  *       interrupt handler from scheduling work.
140  * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
141  *       we can't do it here, because we don't have the lock
142  *
143  * EXECUTION ENVIRONMENT:
144  *      Process level
145  */
146 static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
147 {
148         long qrc;
149         long rc = ADAPT_SUCCESS;
150         int ticks = 0;
151
152         do {
153                 qrc = h_free_crq(vscsi->dds.unit_id);
154                 switch (qrc) {
155                 case H_SUCCESS:
156                         break;
157
158                 case H_HARDWARE:
159                 case H_PARAMETER:
160                         dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
161                                 qrc);
162                         rc = ERROR;
163                         break;
164
165                 case H_BUSY:
166                 case H_LONG_BUSY_ORDER_1_MSEC:
167                         /* msleep not good for small values */
168                         usleep_range(1000, 2000);
169                         ticks += 1;
170                         break;
171                 case H_LONG_BUSY_ORDER_10_MSEC:
172                         usleep_range(10000, 20000);
173                         ticks += 10;
174                         break;
175                 case H_LONG_BUSY_ORDER_100_MSEC:
176                         msleep(100);
177                         ticks += 100;
178                         break;
179                 case H_LONG_BUSY_ORDER_1_SEC:
180                         ssleep(1);
181                         ticks += 1000;
182                         break;
183                 case H_LONG_BUSY_ORDER_10_SEC:
184                         ssleep(10);
185                         ticks += 10000;
186                         break;
187                 case H_LONG_BUSY_ORDER_100_SEC:
188                         ssleep(100);
189                         ticks += 100000;
190                         break;
191                 default:
192                         dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
193                                 qrc);
194                         rc = ERROR;
195                         break;
196                 }
197
198                 /*
199                  * dont wait more then 300 seconds
200                  * ticks are in milliseconds more or less
201                  */
202                 if (ticks > 300000 && qrc != H_SUCCESS) {
203                         rc = ERROR;
204                         dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
205                 }
206         } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
207
208         pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
209
210         return rc;
211 }
212
213 /**
214  * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
215  * @vscsi:      Pointer to our adapter structure
216  * @client_closed:      True if client closed its queue
217  *
218  * Deletes information specific to the client when the client goes away
219  *
220  * EXECUTION ENVIRONMENT:
221  *      Interrupt or Process
222  */
223 static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
224                                          bool client_closed)
225 {
226         vscsi->client_cap = 0;
227
228         /*
229          * Some things we don't want to clear if we're closing the queue,
230          * because some clients don't resend the host handshake when they
231          * get a transport event.
232          */
233         if (client_closed)
234                 vscsi->client_data.os_type = 0;
235 }
236
237 /**
238  * ibmvscsis_free_command_q() - Free Command Queue
239  * @vscsi:      Pointer to our adapter structure
240  *
241  * This function calls unregister_command_q, then clears interrupts and
242  * any pending interrupt acknowledgments associated with the command q.
243  * It also clears memory if there is no error.
244  *
245  * PHYP did not meet the PAPR architecture so that we must give up the
246  * lock. This causes a timing hole regarding state change.  To close the
247  * hole this routine does accounting on any change that occurred during
248  * the time the lock is not held.
249  * NOTE: must give up and then acquire the interrupt lock, the caller must
250  *       make sure that state and or flags will prevent interrupt handler from
251  *       scheduling work.
252  *
253  * EXECUTION ENVIRONMENT:
254  *      Process level, interrupt lock is held
255  */
256 static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
257 {
258         int bytes;
259         u32 flags_under_lock;
260         u16 state_under_lock;
261         long rc = ADAPT_SUCCESS;
262
263         if (!(vscsi->flags & CRQ_CLOSED)) {
264                 vio_disable_interrupts(vscsi->dma_dev);
265
266                 state_under_lock = vscsi->new_state;
267                 flags_under_lock = vscsi->flags;
268                 vscsi->phyp_acr_state = 0;
269                 vscsi->phyp_acr_flags = 0;
270
271                 spin_unlock_bh(&vscsi->intr_lock);
272                 rc = ibmvscsis_unregister_command_q(vscsi);
273                 spin_lock_bh(&vscsi->intr_lock);
274
275                 if (state_under_lock != vscsi->new_state)
276                         vscsi->phyp_acr_state = vscsi->new_state;
277
278                 vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
279
280                 if (rc == ADAPT_SUCCESS) {
281                         bytes = vscsi->cmd_q.size * PAGE_SIZE;
282                         memset(vscsi->cmd_q.base_addr, 0, bytes);
283                         vscsi->cmd_q.index = 0;
284                         vscsi->flags |= CRQ_CLOSED;
285
286                         ibmvscsis_delete_client_info(vscsi, false);
287                 }
288
289                 pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
290                          vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
291                          vscsi->phyp_acr_state);
292         }
293         return rc;
294 }
295
296 /**
297  * ibmvscsis_cmd_q_dequeue() - Get valid Command element
298  * @mask:       Mask to use in case index wraps
299  * @current_index:      Current index into command queue
300  * @base_addr:  Pointer to start of command queue
301  *
302  * Returns a pointer to a valid command element or NULL, if the command
303  * queue is empty
304  *
305  * EXECUTION ENVIRONMENT:
306  *      Interrupt environment, interrupt lock held
307  */
308 static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
309                                                   uint *current_index,
310                                                   struct viosrp_crq *base_addr)
311 {
312         struct viosrp_crq *ptr;
313
314         ptr = base_addr + *current_index;
315
316         if (ptr->valid) {
317                 *current_index = (*current_index + 1) & mask;
318                 dma_rmb();
319         } else {
320                 ptr = NULL;
321         }
322
323         return ptr;
324 }
325
326 /**
327  * ibmvscsis_send_init_message() - send initialize message to the client
328  * @vscsi:      Pointer to our adapter structure
329  * @format:     Which Init Message format to send
330  *
331  * EXECUTION ENVIRONMENT:
332  *      Interrupt environment interrupt lock held
333  */
334 static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
335 {
336         struct viosrp_crq *crq;
337         u64 buffer[2] = { 0, 0 };
338         long rc;
339
340         crq = (struct viosrp_crq *)&buffer;
341         crq->valid = VALID_INIT_MSG;
342         crq->format = format;
343         rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
344                         cpu_to_be64(buffer[MSG_LOW]));
345
346         return rc;
347 }
348
349 /**
350  * ibmvscsis_check_init_msg() - Check init message valid
351  * @vscsi:      Pointer to our adapter structure
352  * @format:     Pointer to return format of Init Message, if any.
353  *              Set to UNUSED_FORMAT if no Init Message in queue.
354  *
355  * Checks if an initialize message was queued by the initiatior
356  * after the queue was created and before the interrupt was enabled.
357  *
358  * EXECUTION ENVIRONMENT:
359  *      Process level only, interrupt lock held
360  */
361 static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
362 {
363         struct viosrp_crq *crq;
364         long rc = ADAPT_SUCCESS;
365
366         crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
367                                       vscsi->cmd_q.base_addr);
368         if (!crq) {
369                 *format = (uint)UNUSED_FORMAT;
370         } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
371                 *format = (uint)INIT_MSG;
372                 crq->valid = INVALIDATE_CMD_RESP_EL;
373                 dma_rmb();
374
375                 /*
376                  * the caller has ensured no initialize message was
377                  * sent after the queue was
378                  * created so there should be no other message on the queue.
379                  */
380                 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
381                                               &vscsi->cmd_q.index,
382                                               vscsi->cmd_q.base_addr);
383                 if (crq) {
384                         *format = (uint)(crq->format);
385                         rc = ERROR;
386                         crq->valid = INVALIDATE_CMD_RESP_EL;
387                         dma_rmb();
388                 }
389         } else {
390                 *format = (uint)(crq->format);
391                 rc = ERROR;
392                 crq->valid = INVALIDATE_CMD_RESP_EL;
393                 dma_rmb();
394         }
395
396         return rc;
397 }
398
399 /**
400  * ibmvscsis_disconnect() - Helper function to disconnect
401  * @work:       Pointer to work_struct, gives access to our adapter structure
402  *
403  * An error has occurred or the driver received a Transport event,
404  * and the driver is requesting that the command queue be de-registered
405  * in a safe manner. If there is no outstanding I/O then we can stop the
406  * queue. If we are restarting the queue it will be reflected in the
407  * the state of the adapter.
408  *
409  * EXECUTION ENVIRONMENT:
410  *      Process environment
411  */
412 static void ibmvscsis_disconnect(struct work_struct *work)
413 {
414         struct scsi_info *vscsi = container_of(work, struct scsi_info,
415                                                proc_work);
416         u16 new_state;
417         bool wait_idle = false;
418
419         spin_lock_bh(&vscsi->intr_lock);
420         new_state = vscsi->new_state;
421         vscsi->new_state = 0;
422
423         pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
424                  vscsi->state);
425
426         /*
427          * check which state we are in and see if we
428          * should transitition to the new state
429          */
430         switch (vscsi->state) {
431         /* Should never be called while in this state. */
432         case NO_QUEUE:
433         /*
434          * Can never transition from this state;
435          * igonore errors and logout.
436          */
437         case UNCONFIGURING:
438                 break;
439
440         /* can transition from this state to UNCONFIGURING */
441         case ERR_DISCONNECT:
442                 if (new_state == UNCONFIGURING)
443                         vscsi->state = new_state;
444                 break;
445
446         /*
447          * Can transition from this state to to unconfiguring
448          * or err disconnect.
449          */
450         case ERR_DISCONNECT_RECONNECT:
451                 switch (new_state) {
452                 case UNCONFIGURING:
453                 case ERR_DISCONNECT:
454                         vscsi->state = new_state;
455                         break;
456
457                 case WAIT_IDLE:
458                         break;
459                 default:
460                         break;
461                 }
462                 break;
463
464         /* can transition from this state to UNCONFIGURING */
465         case ERR_DISCONNECTED:
466                 if (new_state == UNCONFIGURING)
467                         vscsi->state = new_state;
468                 break;
469
470         case WAIT_ENABLED:
471                 switch (new_state) {
472                 case UNCONFIGURING:
473                         vscsi->state = new_state;
474                         vscsi->flags |= RESPONSE_Q_DOWN;
475                         vscsi->flags &= ~(SCHEDULE_DISCONNECT |
476                                           DISCONNECT_SCHEDULED);
477                         dma_rmb();
478                         if (vscsi->flags & CFG_SLEEPING) {
479                                 vscsi->flags &= ~CFG_SLEEPING;
480                                 complete(&vscsi->unconfig);
481                         }
482                         break;
483
484                 /* should never happen */
485                 case ERR_DISCONNECT:
486                 case ERR_DISCONNECT_RECONNECT:
487                 case WAIT_IDLE:
488                         dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
489                                 vscsi->state);
490                         break;
491                 }
492                 break;
493
494         case WAIT_IDLE:
495                 switch (new_state) {
496                 case UNCONFIGURING:
497                         vscsi->flags |= RESPONSE_Q_DOWN;
498                         vscsi->state = new_state;
499                         vscsi->flags &= ~(SCHEDULE_DISCONNECT |
500                                           DISCONNECT_SCHEDULED);
501                         ibmvscsis_free_command_q(vscsi);
502                         break;
503                 case ERR_DISCONNECT:
504                 case ERR_DISCONNECT_RECONNECT:
505                         vscsi->state = new_state;
506                         break;
507                 }
508                 break;
509
510         /*
511          * Initiator has not done a successful srp login
512          * or has done a successful srp logout ( adapter was not
513          * busy). In the first case there can be responses queued
514          * waiting for space on the initiators response queue (MAD)
515          * The second case the adapter is idle. Assume the worse case,
516          * i.e. the second case.
517          */
518         case WAIT_CONNECTION:
519         case CONNECTED:
520         case SRP_PROCESSING:
521                 wait_idle = true;
522                 vscsi->state = new_state;
523                 break;
524
525         /* can transition from this state to UNCONFIGURING */
526         case UNDEFINED:
527                 if (new_state == UNCONFIGURING)
528                         vscsi->state = new_state;
529                 break;
530         default:
531                 break;
532         }
533
534         if (wait_idle) {
535                 pr_debug("disconnect start wait, active %d, sched %d\n",
536                          (int)list_empty(&vscsi->active_q),
537                          (int)list_empty(&vscsi->schedule_q));
538                 if (!list_empty(&vscsi->active_q) ||
539                     !list_empty(&vscsi->schedule_q)) {
540                         vscsi->flags |= WAIT_FOR_IDLE;
541                         pr_debug("disconnect flags 0x%x\n", vscsi->flags);
542                         /*
543                          * This routine is can not be called with the interrupt
544                          * lock held.
545                          */
546                         spin_unlock_bh(&vscsi->intr_lock);
547                         wait_for_completion(&vscsi->wait_idle);
548                         spin_lock_bh(&vscsi->intr_lock);
549                 }
550                 pr_debug("disconnect stop wait\n");
551
552                 ibmvscsis_adapter_idle(vscsi);
553         }
554
555         spin_unlock_bh(&vscsi->intr_lock);
556 }
557
558 /**
559  * ibmvscsis_post_disconnect() - Schedule the disconnect
560  * @vscsi:      Pointer to our adapter structure
561  * @new_state:  State to move to after disconnecting
562  * @flag_bits:  Flags to turn on in adapter structure
563  *
564  * If it's already been scheduled, then see if we need to "upgrade"
565  * the new state (if the one passed in is more "severe" than the
566  * previous one).
567  *
568  * PRECONDITION:
569  *      interrupt lock is held
570  */
571 static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
572                                       uint flag_bits)
573 {
574         uint state;
575
576         /* check the validity of the new state */
577         switch (new_state) {
578         case UNCONFIGURING:
579         case ERR_DISCONNECT:
580         case ERR_DISCONNECT_RECONNECT:
581         case WAIT_IDLE:
582                 break;
583
584         default:
585                 dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
586                         new_state);
587                 return;
588         }
589
590         vscsi->flags |= flag_bits;
591
592         pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
593                  new_state, flag_bits, vscsi->flags, vscsi->state);
594
595         if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
596                 vscsi->flags |= SCHEDULE_DISCONNECT;
597                 vscsi->new_state = new_state;
598
599                 INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
600                 (void)queue_work(vscsi->work_q, &vscsi->proc_work);
601         } else {
602                 if (vscsi->new_state)
603                         state = vscsi->new_state;
604                 else
605                         state = vscsi->state;
606
607                 switch (state) {
608                 case NO_QUEUE:
609                 case UNCONFIGURING:
610                         break;
611
612                 case ERR_DISCONNECTED:
613                 case ERR_DISCONNECT:
614                 case UNDEFINED:
615                         if (new_state == UNCONFIGURING)
616                                 vscsi->new_state = new_state;
617                         break;
618
619                 case ERR_DISCONNECT_RECONNECT:
620                         switch (new_state) {
621                         case UNCONFIGURING:
622                         case ERR_DISCONNECT:
623                                 vscsi->new_state = new_state;
624                                 break;
625                         default:
626                                 break;
627                         }
628                         break;
629
630                 case WAIT_ENABLED:
631                 case WAIT_IDLE:
632                 case WAIT_CONNECTION:
633                 case CONNECTED:
634                 case SRP_PROCESSING:
635                         vscsi->new_state = new_state;
636                         break;
637
638                 default:
639                         break;
640                 }
641         }
642
643         pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
644                  vscsi->flags, vscsi->new_state);
645 }
646
647 /**
648  * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
649  * @vscsi:      Pointer to our adapter structure
650  *
651  * Must be called with interrupt lock held.
652  */
653 static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
654 {
655         long rc = ADAPT_SUCCESS;
656
657         switch (vscsi->state) {
658         case NO_QUEUE:
659         case ERR_DISCONNECT:
660         case ERR_DISCONNECT_RECONNECT:
661         case ERR_DISCONNECTED:
662         case UNCONFIGURING:
663         case UNDEFINED:
664                 rc = ERROR;
665                 break;
666
667         case WAIT_CONNECTION:
668                 vscsi->state = CONNECTED;
669                 break;
670
671         case WAIT_IDLE:
672         case SRP_PROCESSING:
673         case CONNECTED:
674         case WAIT_ENABLED:
675         default:
676                 rc = ERROR;
677                 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
678                         vscsi->state);
679                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
680                 break;
681         }
682
683         return rc;
684 }
685
686 /**
687  * ibmvscsis_handle_init_msg() - Respond to an Init Message
688  * @vscsi:      Pointer to our adapter structure
689  *
690  * Must be called with interrupt lock held.
691  */
692 static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
693 {
694         long rc = ADAPT_SUCCESS;
695
696         switch (vscsi->state) {
697         case WAIT_CONNECTION:
698                 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
699                 switch (rc) {
700                 case H_SUCCESS:
701                         vscsi->state = CONNECTED;
702                         break;
703
704                 case H_PARAMETER:
705                         dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
706                                 rc);
707                         ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
708                         break;
709
710                 case H_DROPPED:
711                         dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
712                                 rc);
713                         rc = ERROR;
714                         ibmvscsis_post_disconnect(vscsi,
715                                                   ERR_DISCONNECT_RECONNECT, 0);
716                         break;
717
718                 case H_CLOSED:
719                         pr_warn("init_msg: failed to send, rc %ld\n", rc);
720                         rc = 0;
721                         break;
722                 }
723                 break;
724
725         case UNDEFINED:
726                 rc = ERROR;
727                 break;
728
729         case UNCONFIGURING:
730                 break;
731
732         case WAIT_ENABLED:
733         case CONNECTED:
734         case SRP_PROCESSING:
735         case WAIT_IDLE:
736         case NO_QUEUE:
737         case ERR_DISCONNECT:
738         case ERR_DISCONNECT_RECONNECT:
739         case ERR_DISCONNECTED:
740         default:
741                 rc = ERROR;
742                 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
743                         vscsi->state);
744                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
745                 break;
746         }
747
748         return rc;
749 }
750
751 /**
752  * ibmvscsis_init_msg() - Respond to an init message
753  * @vscsi:      Pointer to our adapter structure
754  * @crq:        Pointer to CRQ element containing the Init Message
755  *
756  * EXECUTION ENVIRONMENT:
757  *      Interrupt, interrupt lock held
758  */
759 static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
760 {
761         long rc = ADAPT_SUCCESS;
762
763         pr_debug("init_msg: state 0x%hx\n", vscsi->state);
764
765         rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
766                       (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
767                       0);
768         if (rc == H_SUCCESS) {
769                 vscsi->client_data.partition_number =
770                         be64_to_cpu(*(u64 *)vscsi->map_buf);
771                 pr_debug("init_msg, part num %d\n",
772                          vscsi->client_data.partition_number);
773         } else {
774                 pr_debug("init_msg h_vioctl rc %ld\n", rc);
775                 rc = ADAPT_SUCCESS;
776         }
777
778         if (crq->format == INIT_MSG) {
779                 rc = ibmvscsis_handle_init_msg(vscsi);
780         } else if (crq->format == INIT_COMPLETE_MSG) {
781                 rc = ibmvscsis_handle_init_compl_msg(vscsi);
782         } else {
783                 rc = ERROR;
784                 dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
785                         (uint)crq->format);
786                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
787         }
788
789         return rc;
790 }
791
792 /**
793  * ibmvscsis_establish_new_q() - Establish new CRQ queue
794  * @vscsi:      Pointer to our adapter structure
795  *
796  * Must be called with interrupt lock held.
797  */
798 static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
799 {
800         long rc = ADAPT_SUCCESS;
801         uint format;
802
803         vscsi->flags &= PRESERVE_FLAG_FIELDS;
804         vscsi->rsp_q_timer.timer_pops = 0;
805         vscsi->debit = 0;
806         vscsi->credit = 0;
807
808         rc = vio_enable_interrupts(vscsi->dma_dev);
809         if (rc) {
810                 pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
811                         rc);
812                 return rc;
813         }
814
815         rc = ibmvscsis_check_init_msg(vscsi, &format);
816         if (rc) {
817                 dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
818                         rc);
819                 return rc;
820         }
821
822         if (format == UNUSED_FORMAT) {
823                 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
824                 switch (rc) {
825                 case H_SUCCESS:
826                 case H_DROPPED:
827                 case H_CLOSED:
828                         rc = ADAPT_SUCCESS;
829                         break;
830
831                 case H_PARAMETER:
832                 case H_HARDWARE:
833                         break;
834
835                 default:
836                         vscsi->state = UNDEFINED;
837                         rc = H_HARDWARE;
838                         break;
839                 }
840         } else if (format == INIT_MSG) {
841                 rc = ibmvscsis_handle_init_msg(vscsi);
842         }
843
844         return rc;
845 }
846
847 /**
848  * ibmvscsis_reset_queue() - Reset CRQ Queue
849  * @vscsi:      Pointer to our adapter structure
850  *
851  * This function calls h_free_q and then calls h_reg_q and does all
852  * of the bookkeeping to get us back to where we can communicate.
853  *
854  * Actually, we don't always call h_free_crq.  A problem was discovered
855  * where one partition would close and reopen his queue, which would
856  * cause his partner to get a transport event, which would cause him to
857  * close and reopen his queue, which would cause the original partition
858  * to get a transport event, etc., etc.  To prevent this, we don't
859  * actually close our queue if the client initiated the reset, (i.e.
860  * either we got a transport event or we have detected that the client's
861  * queue is gone)
862  *
863  * EXECUTION ENVIRONMENT:
864  *      Process environment, called with interrupt lock held
865  */
866 static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
867 {
868         int bytes;
869         long rc = ADAPT_SUCCESS;
870
871         pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
872
873         /* don't reset, the client did it for us */
874         if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
875                 vscsi->flags &= PRESERVE_FLAG_FIELDS;
876                 vscsi->rsp_q_timer.timer_pops = 0;
877                 vscsi->debit = 0;
878                 vscsi->credit = 0;
879                 vscsi->state = WAIT_CONNECTION;
880                 vio_enable_interrupts(vscsi->dma_dev);
881         } else {
882                 rc = ibmvscsis_free_command_q(vscsi);
883                 if (rc == ADAPT_SUCCESS) {
884                         vscsi->state = WAIT_CONNECTION;
885
886                         bytes = vscsi->cmd_q.size * PAGE_SIZE;
887                         rc = h_reg_crq(vscsi->dds.unit_id,
888                                        vscsi->cmd_q.crq_token, bytes);
889                         if (rc == H_CLOSED || rc == H_SUCCESS) {
890                                 rc = ibmvscsis_establish_new_q(vscsi);
891                         }
892
893                         if (rc != ADAPT_SUCCESS) {
894                                 pr_debug("reset_queue: reg_crq rc %ld\n", rc);
895
896                                 vscsi->state = ERR_DISCONNECTED;
897                                 vscsi->flags |= RESPONSE_Q_DOWN;
898                                 ibmvscsis_free_command_q(vscsi);
899                         }
900                 } else {
901                         vscsi->state = ERR_DISCONNECTED;
902                         vscsi->flags |= RESPONSE_Q_DOWN;
903                 }
904         }
905 }
906
907 /**
908  * ibmvscsis_free_cmd_resources() - Free command resources
909  * @vscsi:      Pointer to our adapter structure
910  * @cmd:        Command which is not longer in use
911  *
912  * Must be called with interrupt lock held.
913  */
914 static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
915                                          struct ibmvscsis_cmd *cmd)
916 {
917         struct iu_entry *iue = cmd->iue;
918
919         switch (cmd->type) {
920         case TASK_MANAGEMENT:
921         case SCSI_CDB:
922                 /*
923                  * When the queue goes down this value is cleared, so it
924                  * cannot be cleared in this general purpose function.
925                  */
926                 if (vscsi->debit)
927                         vscsi->debit -= 1;
928                 break;
929         case ADAPTER_MAD:
930                 vscsi->flags &= ~PROCESSING_MAD;
931                 break;
932         case UNSET_TYPE:
933                 break;
934         default:
935                 dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
936                         cmd->type);
937                 break;
938         }
939
940         cmd->iue = NULL;
941         list_add_tail(&cmd->list, &vscsi->free_cmd);
942         srp_iu_put(iue);
943
944         if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
945             list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
946                 vscsi->flags &= ~WAIT_FOR_IDLE;
947                 complete(&vscsi->wait_idle);
948         }
949 }
950
951 /**
952  * ibmvscsis_trans_event() - Handle a Transport Event
953  * @vscsi:      Pointer to our adapter structure
954  * @crq:        Pointer to CRQ entry containing the Transport Event
955  *
956  * Do the logic to close the I_T nexus.  This function may not
957  * behave to specification.
958  *
959  * EXECUTION ENVIRONMENT:
960  *      Interrupt, interrupt lock held
961  */
962 static long ibmvscsis_trans_event(struct scsi_info *vscsi,
963                                   struct viosrp_crq *crq)
964 {
965         long rc = ADAPT_SUCCESS;
966
967         pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
968                  (int)crq->format, vscsi->flags, vscsi->state);
969
970         switch (crq->format) {
971         case MIGRATED:
972         case PARTNER_FAILED:
973         case PARTNER_DEREGISTER:
974                 ibmvscsis_delete_client_info(vscsi, true);
975                 break;
976
977         default:
978                 rc = ERROR;
979                 dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
980                         (uint)crq->format);
981                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
982                                           RESPONSE_Q_DOWN);
983                 break;
984         }
985
986         if (rc == ADAPT_SUCCESS) {
987                 switch (vscsi->state) {
988                 case NO_QUEUE:
989                 case ERR_DISCONNECTED:
990                 case UNDEFINED:
991                         break;
992
993                 case UNCONFIGURING:
994                         vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
995                         break;
996
997                 case WAIT_ENABLED:
998                         break;
999
1000                 case WAIT_CONNECTION:
1001                         break;
1002
1003                 case CONNECTED:
1004                         ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
1005                                                   (RESPONSE_Q_DOWN |
1006                                                    TRANS_EVENT));
1007                         break;
1008
1009                 case SRP_PROCESSING:
1010                         if ((vscsi->debit > 0) ||
1011                             !list_empty(&vscsi->schedule_q) ||
1012                             !list_empty(&vscsi->waiting_rsp) ||
1013                             !list_empty(&vscsi->active_q)) {
1014                                 pr_debug("debit %d, sched %d, wait %d, active %d\n",
1015                                          vscsi->debit,
1016                                          (int)list_empty(&vscsi->schedule_q),
1017                                          (int)list_empty(&vscsi->waiting_rsp),
1018                                          (int)list_empty(&vscsi->active_q));
1019                                 pr_warn("connection lost with outstanding work\n");
1020                         } else {
1021                                 pr_debug("trans_event: SRP Processing, but no outstanding work\n");
1022                         }
1023
1024                         ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
1025                                                   (RESPONSE_Q_DOWN |
1026                                                    TRANS_EVENT));
1027                         break;
1028
1029                 case ERR_DISCONNECT:
1030                 case ERR_DISCONNECT_RECONNECT:
1031                 case WAIT_IDLE:
1032                         vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
1033                         break;
1034                 }
1035         }
1036
1037         rc = vscsi->flags & SCHEDULE_DISCONNECT;
1038
1039         pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
1040                  vscsi->flags, vscsi->state, rc);
1041
1042         return rc;
1043 }
1044
1045 /**
1046  * ibmvscsis_poll_cmd_q() - Poll Command Queue
1047  * @vscsi:      Pointer to our adapter structure
1048  *
1049  * Called to handle command elements that may have arrived while
1050  * interrupts were disabled.
1051  *
1052  * EXECUTION ENVIRONMENT:
1053  *      intr_lock must be held
1054  */
1055 static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
1056 {
1057         struct viosrp_crq *crq;
1058         long rc;
1059         bool ack = true;
1060         volatile u8 valid;
1061
1062         pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
1063                  vscsi->flags, vscsi->state, vscsi->cmd_q.index);
1064
1065         rc = vscsi->flags & SCHEDULE_DISCONNECT;
1066         crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
1067         valid = crq->valid;
1068         dma_rmb();
1069
1070         while (valid) {
1071 poll_work:
1072                 vscsi->cmd_q.index =
1073                         (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
1074
1075                 if (!rc) {
1076                         rc = ibmvscsis_parse_command(vscsi, crq);
1077                 } else {
1078                         if ((uint)crq->valid == VALID_TRANS_EVENT) {
1079                                 /*
1080                                  * must service the transport layer events even
1081                                  * in an error state, dont break out until all
1082                                  * the consecutive transport events have been
1083                                  * processed
1084                                  */
1085                                 rc = ibmvscsis_trans_event(vscsi, crq);
1086                         } else if (vscsi->flags & TRANS_EVENT) {
1087                                 /*
1088                                  * if a tranport event has occurred leave
1089                                  * everything but transport events on the queue
1090                                  */
1091                                 pr_debug("poll_cmd_q, ignoring\n");
1092
1093                                 /*
1094                                  * need to decrement the queue index so we can
1095                                  * look at the elment again
1096                                  */
1097                                 if (vscsi->cmd_q.index)
1098                                         vscsi->cmd_q.index -= 1;
1099                                 else
1100                                         /*
1101                                          * index is at 0 it just wrapped.
1102                                          * have it index last element in q
1103                                          */
1104                                         vscsi->cmd_q.index = vscsi->cmd_q.mask;
1105                                 break;
1106                         }
1107                 }
1108
1109                 crq->valid = INVALIDATE_CMD_RESP_EL;
1110
1111                 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
1112                 valid = crq->valid;
1113                 dma_rmb();
1114         }
1115
1116         if (!rc) {
1117                 if (ack) {
1118                         vio_enable_interrupts(vscsi->dma_dev);
1119                         ack = false;
1120                         pr_debug("poll_cmd_q, reenabling interrupts\n");
1121                 }
1122                 valid = crq->valid;
1123                 dma_rmb();
1124                 if (valid)
1125                         goto poll_work;
1126         }
1127
1128         pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
1129 }
1130
1131 /**
1132  * ibmvscsis_free_cmd_qs() - Free elements in queue
1133  * @vscsi:      Pointer to our adapter structure
1134  *
1135  * Free all of the elements on all queues that are waiting for
1136  * whatever reason.
1137  *
1138  * PRECONDITION:
1139  *      Called with interrupt lock held
1140  */
1141 static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
1142 {
1143         struct ibmvscsis_cmd *cmd, *nxt;
1144
1145         pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1146                  (int)list_empty(&vscsi->waiting_rsp),
1147                  vscsi->rsp_q_timer.started);
1148
1149         list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1150                 list_del(&cmd->list);
1151                 ibmvscsis_free_cmd_resources(vscsi, cmd);
1152         }
1153 }
1154
1155 /**
1156  * ibmvscsis_get_free_cmd() - Get free command from list
1157  * @vscsi:      Pointer to our adapter structure
1158  *
1159  * Must be called with interrupt lock held.
1160  */
1161 static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1162 {
1163         struct ibmvscsis_cmd *cmd = NULL;
1164         struct iu_entry *iue;
1165
1166         iue = srp_iu_get(&vscsi->target);
1167         if (iue) {
1168                 cmd = list_first_entry_or_null(&vscsi->free_cmd,
1169                                                struct ibmvscsis_cmd, list);
1170                 if (cmd) {
1171                         list_del(&cmd->list);
1172                         cmd->iue = iue;
1173                         cmd->type = UNSET_TYPE;
1174                         memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
1175                 } else {
1176                         srp_iu_put(iue);
1177                 }
1178         }
1179
1180         return cmd;
1181 }
1182
1183 /**
1184  * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
1185  * @vscsi:      Pointer to our adapter structure
1186  *
1187  * This function is called when the adapter is idle when the driver
1188  * is attempting to clear an error condition.
1189  * The adapter is considered busy if any of its cmd queues
1190  * are non-empty. This function can be invoked
1191  * from the off level disconnect function.
1192  *
1193  * EXECUTION ENVIRONMENT:
1194  *      Process environment called with interrupt lock held
1195  */
1196 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1197 {
1198         int free_qs = false;
1199
1200         pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
1201                  vscsi->state);
1202
1203         /* Only need to free qs if we're disconnecting from client */
1204         if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
1205                 free_qs = true;
1206
1207         switch (vscsi->state) {
1208         case UNCONFIGURING:
1209                 ibmvscsis_free_command_q(vscsi);
1210                 dma_rmb();
1211                 isync();
1212                 if (vscsi->flags & CFG_SLEEPING) {
1213                         vscsi->flags &= ~CFG_SLEEPING;
1214                         complete(&vscsi->unconfig);
1215                 }
1216                 break;
1217         case ERR_DISCONNECT_RECONNECT:
1218                 ibmvscsis_reset_queue(vscsi);
1219                 pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
1220                 break;
1221
1222         case ERR_DISCONNECT:
1223                 ibmvscsis_free_command_q(vscsi);
1224                 vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
1225                 vscsi->flags |= RESPONSE_Q_DOWN;
1226                 if (vscsi->tport.enabled)
1227                         vscsi->state = ERR_DISCONNECTED;
1228                 else
1229                         vscsi->state = WAIT_ENABLED;
1230                 pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1231                          vscsi->flags, vscsi->state);
1232                 break;
1233
1234         case WAIT_IDLE:
1235                 vscsi->rsp_q_timer.timer_pops = 0;
1236                 vscsi->debit = 0;
1237                 vscsi->credit = 0;
1238                 if (vscsi->flags & TRANS_EVENT) {
1239                         vscsi->state = WAIT_CONNECTION;
1240                         vscsi->flags &= PRESERVE_FLAG_FIELDS;
1241                 } else {
1242                         vscsi->state = CONNECTED;
1243                         vscsi->flags &= ~DISCONNECT_SCHEDULED;
1244                 }
1245
1246                 pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1247                          vscsi->flags, vscsi->state);
1248                 ibmvscsis_poll_cmd_q(vscsi);
1249                 break;
1250
1251         case ERR_DISCONNECTED:
1252                 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1253                 pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1254                          vscsi->flags, vscsi->state);
1255                 break;
1256
1257         default:
1258                 dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
1259                         vscsi->state);
1260                 break;
1261         }
1262
1263         if (free_qs)
1264                 ibmvscsis_free_cmd_qs(vscsi);
1265
1266         /*
1267          * There is a timing window where we could lose a disconnect request.
1268          * The known path to this window occurs during the DISCONNECT_RECONNECT
1269          * case above: reset_queue calls free_command_q, which will release the
1270          * interrupt lock.  During that time, a new post_disconnect call can be
1271          * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
1272          * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
1273          * will only set the new_state.  Now free_command_q reacquires the intr
1274          * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
1275          * FIELDS), and the disconnect is lost.  This is particularly bad when
1276          * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
1277          * forever.
1278          * Fix is that free command queue sets acr state and acr flags if there
1279          * is a change under the lock
1280          * note free command queue writes to this state it clears it
1281          * before releasing the lock, different drivers call the free command
1282          * queue different times so dont initialize above
1283          */
1284         if (vscsi->phyp_acr_state != 0) {
1285                 /*
1286                  * set any bits in flags that may have been cleared by
1287                  * a call to free command queue in switch statement
1288                  * or reset queue
1289                  */
1290                 vscsi->flags |= vscsi->phyp_acr_flags;
1291                 ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
1292                 vscsi->phyp_acr_state = 0;
1293                 vscsi->phyp_acr_flags = 0;
1294
1295                 pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1296                          vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
1297                          vscsi->phyp_acr_state);
1298         }
1299
1300         pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1301                  vscsi->flags, vscsi->state, vscsi->new_state);
1302 }
1303
1304 /**
1305  * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
1306  * @vscsi:      Pointer to our adapter structure
1307  * @cmd:        Pointer to command element to use to process the request
1308  * @crq:        Pointer to CRQ entry containing the request
1309  *
1310  * Copy the srp information unit from the hosted
1311  * partition using remote dma
1312  *
1313  * EXECUTION ENVIRONMENT:
1314  *      Interrupt, interrupt lock held
1315  */
1316 static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
1317                                       struct ibmvscsis_cmd *cmd,
1318                                       struct viosrp_crq *crq)
1319 {
1320         struct iu_entry *iue = cmd->iue;
1321         long rc = 0;
1322         u16 len;
1323
1324         len = be16_to_cpu(crq->IU_length);
1325         if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
1326                 dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
1327                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1328                 return SRP_VIOLATION;
1329         }
1330
1331         rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
1332                          be64_to_cpu(crq->IU_data_ptr),
1333                          vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
1334
1335         switch (rc) {
1336         case H_SUCCESS:
1337                 cmd->init_time = mftb();
1338                 iue->remote_token = crq->IU_data_ptr;
1339                 iue->iu_len = len;
1340                 pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1341                          be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
1342                 break;
1343         case H_PERMISSION:
1344                 if (connection_broken(vscsi))
1345                         ibmvscsis_post_disconnect(vscsi,
1346                                                   ERR_DISCONNECT_RECONNECT,
1347                                                   (RESPONSE_Q_DOWN |
1348                                                    CLIENT_FAILED));
1349                 else
1350                         ibmvscsis_post_disconnect(vscsi,
1351                                                   ERR_DISCONNECT_RECONNECT, 0);
1352
1353                 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1354                         rc);
1355                 break;
1356         case H_DEST_PARM:
1357         case H_SOURCE_PARM:
1358         default:
1359                 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1360                         rc);
1361                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1362                 break;
1363         }
1364
1365         return rc;
1366 }
1367
1368 /**
1369  * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
1370  * @vscsi:      Pointer to our adapter structure
1371  * @iue:        Information Unit containing the Adapter Info MAD request
1372  *
1373  * EXECUTION ENVIRONMENT:
1374  *      Interrupt adapter lock is held
1375  */
1376 static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1377                                    struct iu_entry *iue)
1378 {
1379         struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
1380         struct mad_adapter_info_data *info;
1381         uint flag_bits = 0;
1382         dma_addr_t token;
1383         long rc;
1384
1385         mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1386
1387         if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
1388                 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1389                 return 0;
1390         }
1391
1392         info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1393                                   GFP_KERNEL);
1394         if (!info) {
1395                 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1396                         iue->target);
1397                 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1398                 return 0;
1399         }
1400
1401         /* Get remote info */
1402         rc = h_copy_rdma(be16_to_cpu(mad->common.length),
1403                          vscsi->dds.window[REMOTE].liobn,
1404                          be64_to_cpu(mad->buffer),
1405                          vscsi->dds.window[LOCAL].liobn, token);
1406
1407         if (rc != H_SUCCESS) {
1408                 if (rc == H_PERMISSION) {
1409                         if (connection_broken(vscsi))
1410                                 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1411                 }
1412                 pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
1413                         rc);
1414                 pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1415                          be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
1416                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1417                                           flag_bits);
1418                 goto free_dma;
1419         }
1420
1421         /*
1422          * Copy client info, but ignore partition number, which we
1423          * already got from phyp - unless we failed to get it from
1424          * phyp (e.g. if we're running on a p5 system).
1425          */
1426         if (vscsi->client_data.partition_number == 0)
1427                 vscsi->client_data.partition_number =
1428                         be32_to_cpu(info->partition_number);
1429         strncpy(vscsi->client_data.srp_version, info->srp_version,
1430                 sizeof(vscsi->client_data.srp_version));
1431         strncpy(vscsi->client_data.partition_name, info->partition_name,
1432                 sizeof(vscsi->client_data.partition_name));
1433         vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
1434         vscsi->client_data.os_type = be32_to_cpu(info->os_type);
1435
1436         /* Copy our info */
1437         strncpy(info->srp_version, SRP_VERSION,
1438                 sizeof(info->srp_version));
1439         strncpy(info->partition_name, vscsi->dds.partition_name,
1440                 sizeof(info->partition_name));
1441         info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
1442         info->mad_version = cpu_to_be32(MAD_VERSION_1);
1443         info->os_type = cpu_to_be32(LINUX);
1444         memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1445         info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
1446
1447         dma_wmb();
1448         rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
1449                          token, vscsi->dds.window[REMOTE].liobn,
1450                          be64_to_cpu(mad->buffer));
1451         switch (rc) {
1452         case H_SUCCESS:
1453                 break;
1454
1455         case H_SOURCE_PARM:
1456         case H_DEST_PARM:
1457         case H_PERMISSION:
1458                 if (connection_broken(vscsi))
1459                         flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1460         default:
1461                 dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1462                         rc);
1463                 ibmvscsis_post_disconnect(vscsi,
1464                                           ERR_DISCONNECT_RECONNECT,
1465                                           flag_bits);
1466                 break;
1467         }
1468
1469 free_dma:
1470         dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
1471         pr_debug("Leaving adapter_info, rc %ld\n", rc);
1472
1473         return rc;
1474 }
1475
1476 /**
1477  * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
1478  * @vscsi:      Pointer to our adapter structure
1479  * @iue:        Information Unit containing the Capabilities MAD request
1480  *
1481  * NOTE: if you return an error from this routine you must be
1482  * disconnecting or you will cause a hang
1483  *
1484  * EXECUTION ENVIRONMENT:
1485  *      Interrupt called with adapter lock held
1486  */
1487 static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1488 {
1489         struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
1490         struct capabilities *cap;
1491         struct mad_capability_common *common;
1492         dma_addr_t token;
1493         u16 olen, len, status, min_len, cap_len;
1494         u32 flag;
1495         uint flag_bits = 0;
1496         long rc = 0;
1497
1498         olen = be16_to_cpu(mad->common.length);
1499         /*
1500          * struct capabilities hardcodes a couple capabilities after the
1501          * header, but the capabilities can actually be in any order.
1502          */
1503         min_len = offsetof(struct capabilities, migration);
1504         if ((olen < min_len) || (olen > PAGE_SIZE)) {
1505                 pr_warn("cap_mad: invalid len %d\n", olen);
1506                 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1507                 return 0;
1508         }
1509
1510         cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1511                                  GFP_KERNEL);
1512         if (!cap) {
1513                 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1514                         iue->target);
1515                 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1516                 return 0;
1517         }
1518         rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
1519                          be64_to_cpu(mad->buffer),
1520                          vscsi->dds.window[LOCAL].liobn, token);
1521         if (rc == H_SUCCESS) {
1522                 strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
1523                         SRP_MAX_LOC_LEN);
1524
1525                 len = olen - min_len;
1526                 status = VIOSRP_MAD_SUCCESS;
1527                 common = (struct mad_capability_common *)&cap->migration;
1528
1529                 while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
1530                         pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
1531                                  len, be32_to_cpu(common->cap_type),
1532                                  be16_to_cpu(common->length));
1533
1534                         cap_len = be16_to_cpu(common->length);
1535                         if (cap_len > len) {
1536                                 dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
1537                                 status = VIOSRP_MAD_FAILED;
1538                                 break;
1539                         }
1540
1541                         if (cap_len == 0) {
1542                                 dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
1543                                 status = VIOSRP_MAD_FAILED;
1544                                 break;
1545                         }
1546
1547                         switch (common->cap_type) {
1548                         default:
1549                                 pr_debug("cap_mad: unsupported capability\n");
1550                                 common->server_support = 0;
1551                                 flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
1552                                 cap->flags &= ~flag;
1553                                 break;
1554                         }
1555
1556                         len = len - cap_len;
1557                         common = (struct mad_capability_common *)
1558                                 ((char *)common + cap_len);
1559                 }
1560
1561                 mad->common.status = cpu_to_be16(status);
1562
1563                 dma_wmb();
1564                 rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
1565                                  vscsi->dds.window[REMOTE].liobn,
1566                                  be64_to_cpu(mad->buffer));
1567
1568                 if (rc != H_SUCCESS) {
1569                         pr_debug("cap_mad: failed to copy to client, rc %ld\n",
1570                                  rc);
1571
1572                         if (rc == H_PERMISSION) {
1573                                 if (connection_broken(vscsi))
1574                                         flag_bits = (RESPONSE_Q_DOWN |
1575                                                      CLIENT_FAILED);
1576                         }
1577
1578                         pr_warn("cap_mad: error copying data to client, rc %ld\n",
1579                                 rc);
1580                         ibmvscsis_post_disconnect(vscsi,
1581                                                   ERR_DISCONNECT_RECONNECT,
1582                                                   flag_bits);
1583                 }
1584         }
1585
1586         dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
1587
1588         pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1589                  rc, vscsi->client_cap);
1590
1591         return rc;
1592 }
1593
1594 /**
1595  * ibmvscsis_process_mad() - Service a MAnagement Data gram
1596  * @vscsi:      Pointer to our adapter structure
1597  * @iue:        Information Unit containing the MAD request
1598  *
1599  * Must be called with interrupt lock held.
1600  */
1601 static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1602 {
1603         struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1604         struct viosrp_empty_iu *empty;
1605         long rc = ADAPT_SUCCESS;
1606
1607         switch (be32_to_cpu(mad->type)) {
1608         case VIOSRP_EMPTY_IU_TYPE:
1609                 empty = &vio_iu(iue)->mad.empty_iu;
1610                 vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
1611                 vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
1612                 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1613                 break;
1614         case VIOSRP_ADAPTER_INFO_TYPE:
1615                 rc = ibmvscsis_adapter_info(vscsi, iue);
1616                 break;
1617         case VIOSRP_CAPABILITIES_TYPE:
1618                 rc = ibmvscsis_cap_mad(vscsi, iue);
1619                 break;
1620         case VIOSRP_ENABLE_FAST_FAIL:
1621                 if (vscsi->state == CONNECTED) {
1622                         vscsi->fast_fail = true;
1623                         mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1624                 } else {
1625                         pr_warn("fast fail mad sent after login\n");
1626                         mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
1627                 }
1628                 break;
1629         default:
1630                 mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
1631                 break;
1632         }
1633
1634         return rc;
1635 }
1636
1637 /**
1638  * srp_snd_msg_failed() - Handle an error when sending a response
1639  * @vscsi:      Pointer to our adapter structure
1640  * @rc:         The return code from the h_send_crq command
1641  *
1642  * Must be called with interrupt lock held.
1643  */
1644 static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
1645 {
1646         ktime_t kt;
1647
1648         if (rc != H_DROPPED) {
1649                 ibmvscsis_free_cmd_qs(vscsi);
1650
1651                 if (rc == H_CLOSED)
1652                         vscsi->flags |= CLIENT_FAILED;
1653
1654                 /* don't flag the same problem multiple times */
1655                 if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1656                         vscsi->flags |= RESPONSE_Q_DOWN;
1657                         if (!(vscsi->state & (ERR_DISCONNECT |
1658                                               ERR_DISCONNECT_RECONNECT |
1659                                               ERR_DISCONNECTED | UNDEFINED))) {
1660                                 dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
1661                                         vscsi->state, vscsi->flags, rc);
1662                         }
1663                         ibmvscsis_post_disconnect(vscsi,
1664                                                   ERR_DISCONNECT_RECONNECT, 0);
1665                 }
1666                 return;
1667         }
1668
1669         /*
1670          * The response queue is full.
1671          * If the server is processing SRP requests, i.e.
1672          * the client has successfully done an
1673          * SRP_LOGIN, then it will wait forever for room in
1674          * the queue.  However if the system admin
1675          * is attempting to unconfigure the server then one
1676          * or more children will be in a state where
1677          * they are being removed. So if there is even one
1678          * child being removed then the driver assumes
1679          * the system admin is attempting to break the
1680          * connection with the client and MAX_TIMER_POPS
1681          * is honored.
1682          */
1683         if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
1684             (vscsi->state == SRP_PROCESSING)) {
1685                 pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1686                          vscsi->flags, (int)vscsi->rsp_q_timer.started,
1687                          vscsi->rsp_q_timer.timer_pops);
1688
1689                 /*
1690                  * Check if the timer is running; if it
1691                  * is not then start it up.
1692                  */
1693                 if (!vscsi->rsp_q_timer.started) {
1694                         if (vscsi->rsp_q_timer.timer_pops <
1695                             MAX_TIMER_POPS) {
1696                                 kt = ktime_set(0, WAIT_NANO_SECONDS);
1697                         } else {
1698                                 /*
1699                                  * slide the timeslice if the maximum
1700                                  * timer pops have already happened
1701                                  */
1702                                 kt = ktime_set(WAIT_SECONDS, 0);
1703                         }
1704
1705                         vscsi->rsp_q_timer.started = true;
1706                         hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
1707                                       HRTIMER_MODE_REL);
1708                 }
1709         } else {
1710                 /*
1711                  * TBD: Do we need to worry about this? Need to get
1712                  *      remove working.
1713                  */
1714                 /*
1715                  * waited a long time and it appears the system admin
1716                  * is bring this driver down
1717                  */
1718                 vscsi->flags |= RESPONSE_Q_DOWN;
1719                 ibmvscsis_free_cmd_qs(vscsi);
1720                 /*
1721                  * if the driver is already attempting to disconnect
1722                  * from the client and has already logged an error
1723                  * trace this event but don't put it in the error log
1724                  */
1725                 if (!(vscsi->state & (ERR_DISCONNECT |
1726                                       ERR_DISCONNECT_RECONNECT |
1727                                       ERR_DISCONNECTED | UNDEFINED))) {
1728                         dev_err(&vscsi->dev, "client crq full too long\n");
1729                         ibmvscsis_post_disconnect(vscsi,
1730                                                   ERR_DISCONNECT_RECONNECT,
1731                                                   0);
1732                 }
1733         }
1734 }
1735
1736 /**
1737  * ibmvscsis_send_messages() - Send a Response
1738  * @vscsi:      Pointer to our adapter structure
1739  *
1740  * Send a response, first checking the waiting queue. Responses are
1741  * sent in order they are received. If the response cannot be sent,
1742  * because the client queue is full, it stays on the waiting queue.
1743  *
1744  * PRECONDITION:
1745  *      Called with interrupt lock held
1746  */
1747 static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1748 {
1749         u64 msg_hi = 0;
1750         /* note do not attmempt to access the IU_data_ptr with this pointer
1751          * it is not valid
1752          */
1753         struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
1754         struct ibmvscsis_cmd *cmd, *nxt;
1755         struct iu_entry *iue;
1756         long rc = ADAPT_SUCCESS;
1757
1758         if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1759                 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1760                         iue = cmd->iue;
1761
1762                         crq->valid = VALID_CMD_RESP_EL;
1763                         crq->format = cmd->rsp.format;
1764
1765                         if (cmd->flags & CMD_FAST_FAIL)
1766                                 crq->status = VIOSRP_ADAPTER_FAIL;
1767
1768                         crq->IU_length = cpu_to_be16(cmd->rsp.len);
1769
1770                         rc = h_send_crq(vscsi->dma_dev->unit_address,
1771                                         be64_to_cpu(msg_hi),
1772                                         be64_to_cpu(cmd->rsp.tag));
1773
1774                         pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
1775                                  cmd, be64_to_cpu(cmd->rsp.tag), rc);
1776
1777                         /* if all ok free up the command element resources */
1778                         if (rc == H_SUCCESS) {
1779                                 /* some movement has occurred */
1780                                 vscsi->rsp_q_timer.timer_pops = 0;
1781                                 list_del(&cmd->list);
1782
1783                                 ibmvscsis_free_cmd_resources(vscsi, cmd);
1784                         } else {
1785                                 srp_snd_msg_failed(vscsi, rc);
1786                                 break;
1787                         }
1788                 }
1789
1790                 if (!rc) {
1791                         /*
1792                          * The timer could pop with the queue empty.  If
1793                          * this happens, rc will always indicate a
1794                          * success; clear the pop count.
1795                          */
1796                         vscsi->rsp_q_timer.timer_pops = 0;
1797                 }
1798         } else {
1799                 ibmvscsis_free_cmd_qs(vscsi);
1800         }
1801 }
1802
1803 /* Called with intr lock held */
1804 static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
1805                                     struct ibmvscsis_cmd *cmd,
1806                                     struct viosrp_crq *crq)
1807 {
1808         struct iu_entry *iue = cmd->iue;
1809         struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1810         uint flag_bits = 0;
1811         long rc;
1812
1813         dma_wmb();
1814         rc = h_copy_rdma(sizeof(struct mad_common),
1815                          vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
1816                          vscsi->dds.window[REMOTE].liobn,
1817                          be64_to_cpu(crq->IU_data_ptr));
1818         if (!rc) {
1819                 cmd->rsp.format = VIOSRP_MAD_FORMAT;
1820                 cmd->rsp.len = sizeof(struct mad_common);
1821                 cmd->rsp.tag = mad->tag;
1822                 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
1823                 ibmvscsis_send_messages(vscsi);
1824         } else {
1825                 pr_debug("Error sending mad response, rc %ld\n", rc);
1826                 if (rc == H_PERMISSION) {
1827                         if (connection_broken(vscsi))
1828                                 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1829                 }
1830                 dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
1831                         rc);
1832
1833                 ibmvscsis_free_cmd_resources(vscsi, cmd);
1834                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1835                                           flag_bits);
1836         }
1837 }
1838
1839 /**
1840  * ibmvscsis_mad() - Service a MAnagement Data gram.
1841  * @vscsi:      Pointer to our adapter structure
1842  * @crq:        Pointer to the CRQ entry containing the MAD request
1843  *
1844  * EXECUTION ENVIRONMENT:
1845  *      Interrupt, called with adapter lock held
1846  */
1847 static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
1848 {
1849         struct iu_entry *iue;
1850         struct ibmvscsis_cmd *cmd;
1851         struct mad_common *mad;
1852         long rc = ADAPT_SUCCESS;
1853
1854         switch (vscsi->state) {
1855                 /*
1856                  * We have not exchanged Init Msgs yet, so this MAD was sent
1857                  * before the last Transport Event; client will not be
1858                  * expecting a response.
1859                  */
1860         case WAIT_CONNECTION:
1861                 pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
1862                          vscsi->flags);
1863                 return ADAPT_SUCCESS;
1864
1865         case SRP_PROCESSING:
1866         case CONNECTED:
1867                 break;
1868
1869                 /*
1870                  * We should never get here while we're in these states.
1871                  * Just log an error and get out.
1872                  */
1873         case UNCONFIGURING:
1874         case WAIT_IDLE:
1875         case ERR_DISCONNECT:
1876         case ERR_DISCONNECT_RECONNECT:
1877         default:
1878                 dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
1879                         vscsi->state);
1880                 return ADAPT_SUCCESS;
1881         }
1882
1883         cmd = ibmvscsis_get_free_cmd(vscsi);
1884         if (!cmd) {
1885                 dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
1886                         vscsi->debit);
1887                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1888                 return ERROR;
1889         }
1890         iue = cmd->iue;
1891         cmd->type = ADAPTER_MAD;
1892
1893         rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
1894         if (!rc) {
1895                 mad = (struct mad_common *)&vio_iu(iue)->mad;
1896
1897                 pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
1898
1899                 rc = ibmvscsis_process_mad(vscsi, iue);
1900
1901                 pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
1902                          rc);
1903
1904                 if (!rc)
1905                         ibmvscsis_send_mad_resp(vscsi, cmd, crq);
1906         } else {
1907                 ibmvscsis_free_cmd_resources(vscsi, cmd);
1908         }
1909
1910         pr_debug("Leaving mad, rc %ld\n", rc);
1911         return rc;
1912 }
1913
1914 /**
1915  * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
1916  * @vscsi:      Pointer to our adapter structure
1917  * @cmd:        Pointer to the command for the SRP Login request
1918  *
1919  * EXECUTION ENVIRONMENT:
1920  *      Interrupt, interrupt lock held
1921  */
1922 static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
1923                                 struct ibmvscsis_cmd *cmd)
1924 {
1925         struct iu_entry *iue = cmd->iue;
1926         struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
1927         struct format_code *fmt;
1928         uint flag_bits = 0;
1929         long rc = ADAPT_SUCCESS;
1930
1931         memset(rsp, 0, sizeof(struct srp_login_rsp));
1932
1933         rsp->opcode = SRP_LOGIN_RSP;
1934         rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
1935         rsp->tag = cmd->rsp.tag;
1936         rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
1937         rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
1938         fmt = (struct format_code *)&rsp->buf_fmt;
1939         fmt->buffers = SUPPORTED_FORMATS;
1940         vscsi->credit = 0;
1941
1942         cmd->rsp.len = sizeof(struct srp_login_rsp);
1943
1944         dma_wmb();
1945         rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
1946                          iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
1947                          be64_to_cpu(iue->remote_token));
1948
1949         switch (rc) {
1950         case H_SUCCESS:
1951                 break;
1952
1953         case H_PERMISSION:
1954                 if (connection_broken(vscsi))
1955                         flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
1956                 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
1957                         rc);
1958                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1959                                           flag_bits);
1960                 break;
1961         case H_SOURCE_PARM:
1962         case H_DEST_PARM:
1963         default:
1964                 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
1965                         rc);
1966                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1967                 break;
1968         }
1969
1970         return rc;
1971 }
1972
1973 /**
1974  * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
1975  * @vscsi:      Pointer to our adapter structure
1976  * @cmd:        Pointer to the command for the SRP Login request
1977  * @reason:     The reason the SRP Login is being rejected, per SRP protocol
1978  *
1979  * EXECUTION ENVIRONMENT:
1980  *      Interrupt, interrupt lock held
1981  */
1982 static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
1983                                     struct ibmvscsis_cmd *cmd, u32 reason)
1984 {
1985         struct iu_entry *iue = cmd->iue;
1986         struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
1987         struct format_code *fmt;
1988         uint flag_bits = 0;
1989         long rc = ADAPT_SUCCESS;
1990
1991         memset(rej, 0, sizeof(*rej));
1992
1993         rej->opcode = SRP_LOGIN_REJ;
1994         rej->reason = cpu_to_be32(reason);
1995         rej->tag = cmd->rsp.tag;
1996         fmt = (struct format_code *)&rej->buf_fmt;
1997         fmt->buffers = SUPPORTED_FORMATS;
1998
1999         cmd->rsp.len = sizeof(*rej);
2000
2001         dma_wmb();
2002         rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
2003                          iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
2004                          be64_to_cpu(iue->remote_token));
2005
2006         switch (rc) {
2007         case H_SUCCESS:
2008                 break;
2009         case H_PERMISSION:
2010                 if (connection_broken(vscsi))
2011                         flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
2012                 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
2013                         rc);
2014                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
2015                                           flag_bits);
2016                 break;
2017         case H_SOURCE_PARM:
2018         case H_DEST_PARM:
2019         default:
2020                 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
2021                         rc);
2022                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2023                 break;
2024         }
2025
2026         return rc;
2027 }
2028
2029 static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
2030 {
2031         char *name = tport->tport_name;
2032         struct ibmvscsis_nexus *nexus;
2033         int rc;
2034
2035         if (tport->ibmv_nexus) {
2036                 pr_debug("tport->ibmv_nexus already exists\n");
2037                 return 0;
2038         }
2039
2040         nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
2041         if (!nexus) {
2042                 pr_err("Unable to allocate struct ibmvscsis_nexus\n");
2043                 return -ENOMEM;
2044         }
2045
2046         nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
2047                                               TARGET_PROT_NORMAL, name, nexus,
2048                                               NULL);
2049         if (IS_ERR(nexus->se_sess)) {
2050                 rc = PTR_ERR(nexus->se_sess);
2051                 goto transport_init_fail;
2052         }
2053
2054         tport->ibmv_nexus = nexus;
2055
2056         return 0;
2057
2058 transport_init_fail:
2059         kfree(nexus);
2060         return rc;
2061 }
2062
2063 static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
2064 {
2065         struct se_session *se_sess;
2066         struct ibmvscsis_nexus *nexus;
2067
2068         nexus = tport->ibmv_nexus;
2069         if (!nexus)
2070                 return -ENODEV;
2071
2072         se_sess = nexus->se_sess;
2073         if (!se_sess)
2074                 return -ENODEV;
2075
2076         /*
2077          * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
2078          */
2079         target_wait_for_sess_cmds(se_sess);
2080         transport_deregister_session_configfs(se_sess);
2081         transport_deregister_session(se_sess);
2082         tport->ibmv_nexus = NULL;
2083         kfree(nexus);
2084
2085         return 0;
2086 }
2087
2088 /**
2089  * ibmvscsis_srp_login() - Process an SRP Login Request
2090  * @vscsi:      Pointer to our adapter structure
2091  * @cmd:        Command element to use to process the SRP Login request
2092  * @crq:        Pointer to CRQ entry containing the SRP Login request
2093  *
2094  * EXECUTION ENVIRONMENT:
2095  *      Interrupt, called with interrupt lock held
2096  */
2097 static long ibmvscsis_srp_login(struct scsi_info *vscsi,
2098                                 struct ibmvscsis_cmd *cmd,
2099                                 struct viosrp_crq *crq)
2100 {
2101         struct iu_entry *iue = cmd->iue;
2102         struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
2103         struct port_id {
2104                 __be64 id_extension;
2105                 __be64 io_guid;
2106         } *iport, *tport;
2107         struct format_code *fmt;
2108         u32 reason = 0x0;
2109         long rc = ADAPT_SUCCESS;
2110
2111         iport = (struct port_id *)req->initiator_port_id;
2112         tport = (struct port_id *)req->target_port_id;
2113         fmt = (struct format_code *)&req->req_buf_fmt;
2114         if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
2115                 reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
2116         else if (be32_to_cpu(req->req_it_iu_len) < 64)
2117                 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
2118         else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
2119                  (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
2120                 reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
2121         else if (req->req_flags & SRP_MULTICHAN_MULTI)
2122                 reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
2123         else if (fmt->buffers & (~SUPPORTED_FORMATS))
2124                 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
2125         else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
2126                 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
2127
2128         if (vscsi->state == SRP_PROCESSING)
2129                 reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
2130
2131         rc = ibmvscsis_make_nexus(&vscsi->tport);
2132         if (rc)
2133                 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
2134
2135         cmd->rsp.format = VIOSRP_SRP_FORMAT;
2136         cmd->rsp.tag = req->tag;
2137
2138         pr_debug("srp_login: reason 0x%x\n", reason);
2139
2140         if (reason)
2141                 rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
2142         else
2143                 rc = ibmvscsis_login_rsp(vscsi, cmd);
2144
2145         if (!rc) {
2146                 if (!reason)
2147                         vscsi->state = SRP_PROCESSING;
2148
2149                 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2150                 ibmvscsis_send_messages(vscsi);
2151         } else {
2152                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2153         }
2154
2155         pr_debug("Leaving srp_login, rc %ld\n", rc);
2156         return rc;
2157 }
2158
2159 /**
2160  * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
2161  * @vscsi:      Pointer to our adapter structure
2162  * @cmd:        Command element to use to process the Implicit Logout request
2163  * @crq:        Pointer to CRQ entry containing the Implicit Logout request
2164  *
2165  * Do the logic to close the I_T nexus.  This function may not
2166  * behave to specification.
2167  *
2168  * EXECUTION ENVIRONMENT:
2169  *      Interrupt, interrupt lock held
2170  */
2171 static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
2172                                    struct ibmvscsis_cmd *cmd,
2173                                    struct viosrp_crq *crq)
2174 {
2175         struct iu_entry *iue = cmd->iue;
2176         struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
2177         long rc = ADAPT_SUCCESS;
2178
2179         if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
2180             !list_empty(&vscsi->waiting_rsp)) {
2181                 dev_err(&vscsi->dev, "i_logout: outstanding work\n");
2182                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2183         } else {
2184                 cmd->rsp.format = SRP_FORMAT;
2185                 cmd->rsp.tag = log_out->tag;
2186                 cmd->rsp.len = sizeof(struct mad_common);
2187                 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2188                 ibmvscsis_send_messages(vscsi);
2189
2190                 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
2191         }
2192
2193         return rc;
2194 }
2195
2196 /* Called with intr lock held */
2197 static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
2198 {
2199         struct ibmvscsis_cmd *cmd;
2200         struct iu_entry *iue;
2201         struct srp_cmd *srp;
2202         struct srp_tsk_mgmt *tsk;
2203         long rc;
2204
2205         if (vscsi->request_limit - vscsi->debit <= 0) {
2206                 /* Client has exceeded request limit */
2207                 dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
2208                         vscsi->request_limit, vscsi->debit);
2209                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2210                 return;
2211         }
2212
2213         cmd = ibmvscsis_get_free_cmd(vscsi);
2214         if (!cmd) {
2215                 dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
2216                         vscsi->debit);
2217                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2218                 return;
2219         }
2220         iue = cmd->iue;
2221         srp = &vio_iu(iue)->srp.cmd;
2222
2223         rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2224         if (rc) {
2225                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2226                 return;
2227         }
2228
2229         if (vscsi->state == SRP_PROCESSING) {
2230                 switch (srp->opcode) {
2231                 case SRP_LOGIN_REQ:
2232                         rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2233                         break;
2234
2235                 case SRP_TSK_MGMT:
2236                         tsk = &vio_iu(iue)->srp.tsk_mgmt;
2237                         pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
2238                                  tsk->tag);
2239                         cmd->rsp.tag = tsk->tag;
2240                         vscsi->debit += 1;
2241                         cmd->type = TASK_MANAGEMENT;
2242                         list_add_tail(&cmd->list, &vscsi->schedule_q);
2243                         queue_work(vscsi->work_q, &cmd->work);
2244                         break;
2245
2246                 case SRP_CMD:
2247                         pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
2248                                  srp->tag);
2249                         cmd->rsp.tag = srp->tag;
2250                         vscsi->debit += 1;
2251                         cmd->type = SCSI_CDB;
2252                         /*
2253                          * We want to keep track of work waiting for
2254                          * the workqueue.
2255                          */
2256                         list_add_tail(&cmd->list, &vscsi->schedule_q);
2257                         queue_work(vscsi->work_q, &cmd->work);
2258                         break;
2259
2260                 case SRP_I_LOGOUT:
2261                         rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
2262                         break;
2263
2264                 case SRP_CRED_RSP:
2265                 case SRP_AER_RSP:
2266                 default:
2267                         ibmvscsis_free_cmd_resources(vscsi, cmd);
2268                         dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
2269                                 (uint)srp->opcode);
2270                         ibmvscsis_post_disconnect(vscsi,
2271                                                   ERR_DISCONNECT_RECONNECT, 0);
2272                         break;
2273                 }
2274         } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
2275                 rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2276         } else {
2277                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2278                 dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
2279                         vscsi->state);
2280                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2281         }
2282 }
2283
2284 /**
2285  * ibmvscsis_ping_response() - Respond to a ping request
2286  * @vscsi:      Pointer to our adapter structure
2287  *
2288  * Let the client know that the server is alive and waiting on
2289  * its native I/O stack.
2290  * If any type of error occurs from the call to queue a ping
2291  * response then the client is either not accepting or receiving
2292  * interrupts.  Disconnect with an error.
2293  *
2294  * EXECUTION ENVIRONMENT:
2295  *      Interrupt, interrupt lock held
2296  */
2297 static long ibmvscsis_ping_response(struct scsi_info *vscsi)
2298 {
2299         struct viosrp_crq *crq;
2300         u64 buffer[2] = { 0, 0 };
2301         long rc;
2302
2303         crq = (struct viosrp_crq *)&buffer;
2304         crq->valid = VALID_CMD_RESP_EL;
2305         crq->format = (u8)MESSAGE_IN_CRQ;
2306         crq->status = PING_RESPONSE;
2307
2308         rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
2309                         cpu_to_be64(buffer[MSG_LOW]));
2310
2311         switch (rc) {
2312         case H_SUCCESS:
2313                 break;
2314         case H_CLOSED:
2315                 vscsi->flags |= CLIENT_FAILED;
2316         case H_DROPPED:
2317                 vscsi->flags |= RESPONSE_Q_DOWN;
2318         case H_REMOTE_PARM:
2319                 dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
2320                         rc);
2321                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2322                 break;
2323         default:
2324                 dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
2325                         rc);
2326                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2327                 break;
2328         }
2329
2330         return rc;
2331 }
2332
2333 /**
2334  * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2335  * @vscsi:      Pointer to our adapter structure
2336  * @crq:        Pointer to CRQ element containing the SRP request
2337  *
2338  * This function will return success if the command queue element is valid
2339  * and the srp iu or MAD request it pointed to was also valid.  That does
2340  * not mean that an error was not returned to the client.
2341  *
2342  * EXECUTION ENVIRONMENT:
2343  *      Interrupt, intr lock held
2344  */
2345 static long ibmvscsis_parse_command(struct scsi_info *vscsi,
2346                                     struct viosrp_crq *crq)
2347 {
2348         long rc = ADAPT_SUCCESS;
2349
2350         switch (crq->valid) {
2351         case VALID_CMD_RESP_EL:
2352                 switch (crq->format) {
2353                 case OS400_FORMAT:
2354                 case AIX_FORMAT:
2355                 case LINUX_FORMAT:
2356                 case MAD_FORMAT:
2357                         if (vscsi->flags & PROCESSING_MAD) {
2358                                 rc = ERROR;
2359                                 dev_err(&vscsi->dev, "parse_command: already processing mad\n");
2360                                 ibmvscsis_post_disconnect(vscsi,
2361                                                        ERR_DISCONNECT_RECONNECT,
2362                                                        0);
2363                         } else {
2364                                 vscsi->flags |= PROCESSING_MAD;
2365                                 rc = ibmvscsis_mad(vscsi, crq);
2366                         }
2367                         break;
2368
2369                 case SRP_FORMAT:
2370                         ibmvscsis_srp_cmd(vscsi, crq);
2371                         break;
2372
2373                 case MESSAGE_IN_CRQ:
2374                         if (crq->status == PING)
2375                                 ibmvscsis_ping_response(vscsi);
2376                         break;
2377
2378                 default:
2379                         dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
2380                                 (uint)crq->format);
2381                         ibmvscsis_post_disconnect(vscsi,
2382                                                   ERR_DISCONNECT_RECONNECT, 0);
2383                         break;
2384                 }
2385                 break;
2386
2387         case VALID_TRANS_EVENT:
2388                 rc = ibmvscsis_trans_event(vscsi, crq);
2389                 break;
2390
2391         case VALID_INIT_MSG:
2392                 rc = ibmvscsis_init_msg(vscsi, crq);
2393                 break;
2394
2395         default:
2396                 dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
2397                         (uint)crq->valid);
2398                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2399                 break;
2400         }
2401
2402         /*
2403          * Return only what the interrupt handler cares
2404          * about. Most errors we keep right on trucking.
2405          */
2406         rc = vscsi->flags & SCHEDULE_DISCONNECT;
2407
2408         return rc;
2409 }
2410
2411 static int read_dma_window(struct scsi_info *vscsi)
2412 {
2413         struct vio_dev *vdev = vscsi->dma_dev;
2414         const __be32 *dma_window;
2415         const __be32 *prop;
2416
2417         /* TODO Using of_parse_dma_window would be better, but it doesn't give
2418          * a way to read multiple windows without already knowing the size of
2419          * a window or the number of windows.
2420          */
2421         dma_window = (const __be32 *)vio_get_attribute(vdev,
2422                                                        "ibm,my-dma-window",
2423                                                        NULL);
2424         if (!dma_window) {
2425                 pr_err("Couldn't find ibm,my-dma-window property\n");
2426                 return -1;
2427         }
2428
2429         vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
2430         dma_window++;
2431
2432         prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2433                                                  NULL);
2434         if (!prop) {
2435                 pr_warn("Couldn't find ibm,#dma-address-cells property\n");
2436                 dma_window++;
2437         } else {
2438                 dma_window += be32_to_cpu(*prop);
2439         }
2440
2441         prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2442                                                  NULL);
2443         if (!prop) {
2444                 pr_warn("Couldn't find ibm,#dma-size-cells property\n");
2445                 dma_window++;
2446         } else {
2447                 dma_window += be32_to_cpu(*prop);
2448         }
2449
2450         /* dma_window should point to the second window now */
2451         vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
2452
2453         return 0;
2454 }
2455
2456 static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
2457 {
2458         struct ibmvscsis_tport *tport = NULL;
2459         struct vio_dev *vdev;
2460         struct scsi_info *vscsi;
2461
2462         spin_lock_bh(&ibmvscsis_dev_lock);
2463         list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
2464                 vdev = vscsi->dma_dev;
2465                 if (!strcmp(dev_name(&vdev->dev), name)) {
2466                         tport = &vscsi->tport;
2467                         break;
2468                 }
2469         }
2470         spin_unlock_bh(&ibmvscsis_dev_lock);
2471
2472         return tport;
2473 }
2474
2475 /**
2476  * ibmvscsis_parse_cmd() - Parse SRP Command
2477  * @vscsi:      Pointer to our adapter structure
2478  * @cmd:        Pointer to command element with SRP command
2479  *
2480  * Parse the srp command; if it is valid then submit it to tcm.
2481  * Note: The return code does not reflect the status of the SCSI CDB.
2482  *
2483  * EXECUTION ENVIRONMENT:
2484  *      Process level
2485  */
2486 static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2487                                 struct ibmvscsis_cmd *cmd)
2488 {
2489         struct iu_entry *iue = cmd->iue;
2490         struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2491         struct ibmvscsis_nexus *nexus;
2492         u64 data_len = 0;
2493         enum dma_data_direction dir;
2494         int attr = 0;
2495         int rc = 0;
2496
2497         nexus = vscsi->tport.ibmv_nexus;
2498         /*
2499          * additional length in bytes.  Note that the SRP spec says that
2500          * additional length is in 4-byte words, but technically the
2501          * additional length field is only the upper 6 bits of the byte.
2502          * The lower 2 bits are reserved.  If the lower 2 bits are 0 (as
2503          * all reserved fields should be), then interpreting the byte as
2504          * an int will yield the length in bytes.
2505          */
2506         if (srp->add_cdb_len & 0x03) {
2507                 dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
2508                 spin_lock_bh(&vscsi->intr_lock);
2509                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2510                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2511                 spin_unlock_bh(&vscsi->intr_lock);
2512                 return;
2513         }
2514
2515         if (srp_get_desc_table(srp, &dir, &data_len)) {
2516                 dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
2517                         srp->tag);
2518                 goto fail;
2519         }
2520
2521         cmd->rsp.sol_not = srp->sol_not;
2522
2523         switch (srp->task_attr) {
2524         case SRP_SIMPLE_TASK:
2525                 attr = TCM_SIMPLE_TAG;
2526                 break;
2527         case SRP_ORDERED_TASK:
2528                 attr = TCM_ORDERED_TAG;
2529                 break;
2530         case SRP_HEAD_TASK:
2531                 attr = TCM_HEAD_TAG;
2532                 break;
2533         case SRP_ACA_TASK:
2534                 attr = TCM_ACA_TAG;
2535                 break;
2536         default:
2537                 dev_err(&vscsi->dev, "Invalid task attribute %d\n",
2538                         srp->task_attr);
2539                 goto fail;
2540         }
2541
2542         cmd->se_cmd.tag = be64_to_cpu(srp->tag);
2543
2544         spin_lock_bh(&vscsi->intr_lock);
2545         list_add_tail(&cmd->list, &vscsi->active_q);
2546         spin_unlock_bh(&vscsi->intr_lock);
2547
2548         srp->lun.scsi_lun[0] &= 0x3f;
2549
2550         rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
2551                                cmd->sense_buf, scsilun_to_int(&srp->lun),
2552                                data_len, attr, dir, 0);
2553         if (rc) {
2554                 dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
2555                 spin_lock_bh(&vscsi->intr_lock);
2556                 list_del(&cmd->list);
2557                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2558                 spin_unlock_bh(&vscsi->intr_lock);
2559                 goto fail;
2560         }
2561         return;
2562
2563 fail:
2564         spin_lock_bh(&vscsi->intr_lock);
2565         ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2566         spin_unlock_bh(&vscsi->intr_lock);
2567 }
2568
2569 /**
2570  * ibmvscsis_parse_task() - Parse SRP Task Management Request
2571  * @vscsi:      Pointer to our adapter structure
2572  * @cmd:        Pointer to command element with SRP task management request
2573  *
2574  * Parse the srp task management request; if it is valid then submit it to tcm.
2575  * Note: The return code does not reflect the status of the task management
2576  * request.
2577  *
2578  * EXECUTION ENVIRONMENT:
2579  *      Processor level
2580  */
2581 static void ibmvscsis_parse_task(struct scsi_info *vscsi,
2582                                  struct ibmvscsis_cmd *cmd)
2583 {
2584         struct iu_entry *iue = cmd->iue;
2585         struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
2586         int tcm_type;
2587         u64 tag_to_abort = 0;
2588         int rc = 0;
2589         struct ibmvscsis_nexus *nexus;
2590
2591         nexus = vscsi->tport.ibmv_nexus;
2592
2593         cmd->rsp.sol_not = srp_tsk->sol_not;
2594
2595         switch (srp_tsk->tsk_mgmt_func) {
2596         case SRP_TSK_ABORT_TASK:
2597                 tcm_type = TMR_ABORT_TASK;
2598                 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
2599                 break;
2600         case SRP_TSK_ABORT_TASK_SET:
2601                 tcm_type = TMR_ABORT_TASK_SET;
2602                 break;
2603         case SRP_TSK_CLEAR_TASK_SET:
2604                 tcm_type = TMR_CLEAR_TASK_SET;
2605                 break;
2606         case SRP_TSK_LUN_RESET:
2607                 tcm_type = TMR_LUN_RESET;
2608                 break;
2609         case SRP_TSK_CLEAR_ACA:
2610                 tcm_type = TMR_CLEAR_ACA;
2611                 break;
2612         default:
2613                 dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
2614                         srp_tsk->tsk_mgmt_func);
2615                 cmd->se_cmd.se_tmr_req->response =
2616                         TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
2617                 rc = -1;
2618                 break;
2619         }
2620
2621         if (!rc) {
2622                 cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
2623
2624                 spin_lock_bh(&vscsi->intr_lock);
2625                 list_add_tail(&cmd->list, &vscsi->active_q);
2626                 spin_unlock_bh(&vscsi->intr_lock);
2627
2628                 srp_tsk->lun.scsi_lun[0] &= 0x3f;
2629
2630                 pr_debug("calling submit_tmr, func %d\n",
2631                          srp_tsk->tsk_mgmt_func);
2632                 rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
2633                                        scsilun_to_int(&srp_tsk->lun), srp_tsk,
2634                                        tcm_type, GFP_KERNEL, tag_to_abort, 0);
2635                 if (rc) {
2636                         dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
2637                                 rc);
2638                         spin_lock_bh(&vscsi->intr_lock);
2639                         list_del(&cmd->list);
2640                         spin_unlock_bh(&vscsi->intr_lock);
2641                         cmd->se_cmd.se_tmr_req->response =
2642                                 TMR_FUNCTION_REJECTED;
2643                 }
2644         }
2645
2646         if (rc)
2647                 transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
2648 }
2649
2650 static void ibmvscsis_scheduler(struct work_struct *work)
2651 {
2652         struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
2653                                                  work);
2654         struct scsi_info *vscsi = cmd->adapter;
2655
2656         spin_lock_bh(&vscsi->intr_lock);
2657
2658         /* Remove from schedule_q */
2659         list_del(&cmd->list);
2660
2661         /* Don't submit cmd if we're disconnecting */
2662         if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
2663                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2664
2665                 /* ibmvscsis_disconnect might be waiting for us */
2666                 if (list_empty(&vscsi->active_q) &&
2667                     list_empty(&vscsi->schedule_q) &&
2668                     (vscsi->flags & WAIT_FOR_IDLE)) {
2669                         vscsi->flags &= ~WAIT_FOR_IDLE;
2670                         complete(&vscsi->wait_idle);
2671                 }
2672
2673                 spin_unlock_bh(&vscsi->intr_lock);
2674                 return;
2675         }
2676
2677         spin_unlock_bh(&vscsi->intr_lock);
2678
2679         switch (cmd->type) {
2680         case SCSI_CDB:
2681                 ibmvscsis_parse_cmd(vscsi, cmd);
2682                 break;
2683         case TASK_MANAGEMENT:
2684                 ibmvscsis_parse_task(vscsi, cmd);
2685                 break;
2686         default:
2687                 dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
2688                         cmd->type);
2689                 spin_lock_bh(&vscsi->intr_lock);
2690                 ibmvscsis_free_cmd_resources(vscsi, cmd);
2691                 spin_unlock_bh(&vscsi->intr_lock);
2692                 break;
2693         }
2694 }
2695
2696 static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
2697 {
2698         struct ibmvscsis_cmd *cmd;
2699         int i;
2700
2701         INIT_LIST_HEAD(&vscsi->free_cmd);
2702         vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
2703                                   GFP_KERNEL);
2704         if (!vscsi->cmd_pool)
2705                 return -ENOMEM;
2706
2707         for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
2708              i++, cmd++) {
2709                 cmd->adapter = vscsi;
2710                 INIT_WORK(&cmd->work, ibmvscsis_scheduler);
2711                 list_add_tail(&cmd->list, &vscsi->free_cmd);
2712         }
2713
2714         return 0;
2715 }
2716
2717 static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
2718 {
2719         kfree(vscsi->cmd_pool);
2720         vscsi->cmd_pool = NULL;
2721         INIT_LIST_HEAD(&vscsi->free_cmd);
2722 }
2723
2724 /**
2725  * ibmvscsis_service_wait_q() - Service Waiting Queue
2726  * @timer:      Pointer to timer which has expired
2727  *
2728  * This routine is called when the timer pops to service the waiting
2729  * queue. Elements on the queue have completed, their responses have been
2730  * copied to the client, but the client's response queue was full so
2731  * the queue message could not be sent. The routine grabs the proper locks
2732  * and calls send messages.
2733  *
2734  * EXECUTION ENVIRONMENT:
2735  *      called at interrupt level
2736  */
2737 static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
2738 {
2739         struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
2740         struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
2741                                                rsp_q_timer);
2742
2743         spin_lock_bh(&vscsi->intr_lock);
2744         p_timer->timer_pops += 1;
2745         p_timer->started = false;
2746         ibmvscsis_send_messages(vscsi);
2747         spin_unlock_bh(&vscsi->intr_lock);
2748
2749         return HRTIMER_NORESTART;
2750 }
2751
2752 static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
2753 {
2754         struct timer_cb *p_timer;
2755
2756         p_timer = &vscsi->rsp_q_timer;
2757         hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2758
2759         p_timer->timer.function = ibmvscsis_service_wait_q;
2760         p_timer->started = false;
2761         p_timer->timer_pops = 0;
2762
2763         return ADAPT_SUCCESS;
2764 }
2765
2766 static void ibmvscsis_freetimer(struct scsi_info *vscsi)
2767 {
2768         struct timer_cb *p_timer;
2769
2770         p_timer = &vscsi->rsp_q_timer;
2771
2772         (void)hrtimer_cancel(&p_timer->timer);
2773
2774         p_timer->started = false;
2775         p_timer->timer_pops = 0;
2776 }
2777
2778 static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
2779 {
2780         struct scsi_info *vscsi = data;
2781
2782         vio_disable_interrupts(vscsi->dma_dev);
2783         tasklet_schedule(&vscsi->work_task);
2784
2785         return IRQ_HANDLED;
2786 }
2787
2788 /**
2789  * ibmvscsis_enable_change_state() - Set new state based on enabled status
2790  * @vscsi:      Pointer to our adapter structure
2791  *
2792  * This function determines our new state now that we are enabled.  This
2793  * may involve sending an Init Complete message to the client.
2794  *
2795  * Must be called with interrupt lock held.
2796  */
2797 static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
2798 {
2799         int bytes;
2800         long rc = ADAPT_SUCCESS;
2801
2802         bytes = vscsi->cmd_q.size * PAGE_SIZE;
2803         rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
2804         if (rc == H_CLOSED || rc == H_SUCCESS) {
2805                 vscsi->state = WAIT_CONNECTION;
2806                 rc = ibmvscsis_establish_new_q(vscsi);
2807         }
2808
2809         if (rc != ADAPT_SUCCESS) {
2810                 vscsi->state = ERR_DISCONNECTED;
2811                 vscsi->flags |= RESPONSE_Q_DOWN;
2812         }
2813
2814         return rc;
2815 }
2816
2817 /**
2818  * ibmvscsis_create_command_q() - Create Command Queue
2819  * @vscsi:      Pointer to our adapter structure
2820  * @num_cmds:   Currently unused.  In the future, may be used to determine
2821  *              the size of the CRQ.
2822  *
2823  * Allocates memory for command queue maps remote memory into an ioba
2824  * initializes the command response queue
2825  *
2826  * EXECUTION ENVIRONMENT:
2827  *      Process level only
2828  */
2829 static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
2830 {
2831         int pages;
2832         struct vio_dev *vdev = vscsi->dma_dev;
2833
2834         /* We might support multiple pages in the future, but just 1 for now */
2835         pages = 1;
2836
2837         vscsi->cmd_q.size = pages;
2838
2839         vscsi->cmd_q.base_addr =
2840                 (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
2841         if (!vscsi->cmd_q.base_addr)
2842                 return -ENOMEM;
2843
2844         vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
2845
2846         vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
2847                                                 vscsi->cmd_q.base_addr,
2848                                                 PAGE_SIZE, DMA_BIDIRECTIONAL);
2849         if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
2850                 free_page((unsigned long)vscsi->cmd_q.base_addr);
2851                 return -ENOMEM;
2852         }
2853
2854         return 0;
2855 }
2856
2857 /**
2858  * ibmvscsis_destroy_command_q - Destroy Command Queue
2859  * @vscsi:      Pointer to our adapter structure
2860  *
2861  * Releases memory for command queue and unmaps mapped remote memory.
2862  *
2863  * EXECUTION ENVIRONMENT:
2864  *      Process level only
2865  */
2866 static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
2867 {
2868         dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
2869                          PAGE_SIZE, DMA_BIDIRECTIONAL);
2870         free_page((unsigned long)vscsi->cmd_q.base_addr);
2871         vscsi->cmd_q.base_addr = NULL;
2872         vscsi->state = NO_QUEUE;
2873 }
2874
2875 static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
2876                               struct ibmvscsis_cmd *cmd)
2877 {
2878         struct iu_entry *iue = cmd->iue;
2879         struct se_cmd *se_cmd = &cmd->se_cmd;
2880         struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2881         struct scsi_sense_hdr sshdr;
2882         u8 rc = se_cmd->scsi_status;
2883
2884         if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
2885                 if (scsi_normalize_sense(se_cmd->sense_buffer,
2886                                          se_cmd->scsi_sense_length, &sshdr))
2887                         if (sshdr.sense_key == HARDWARE_ERROR &&
2888                             (se_cmd->residual_count == 0 ||
2889                              se_cmd->residual_count == se_cmd->data_length)) {
2890                                 rc = NO_SENSE;
2891                                 cmd->flags |= CMD_FAST_FAIL;
2892                         }
2893
2894         return rc;
2895 }
2896
2897 /**
2898  * srp_build_response() - Build an SRP response buffer
2899  * @vscsi:      Pointer to our adapter structure
2900  * @cmd:        Pointer to command for which to send the response
2901  * @len_p:      Where to return the length of the IU response sent.  This
2902  *              is needed to construct the CRQ response.
2903  *
2904  * Build the SRP response buffer and copy it to the client's memory space.
2905  */
2906 static long srp_build_response(struct scsi_info *vscsi,
2907                                struct ibmvscsis_cmd *cmd, uint *len_p)
2908 {
2909         struct iu_entry *iue = cmd->iue;
2910         struct se_cmd *se_cmd = &cmd->se_cmd;
2911         struct srp_rsp *rsp;
2912         uint len;
2913         u32 rsp_code;
2914         char *data;
2915         u32 *tsk_status;
2916         long rc = ADAPT_SUCCESS;
2917
2918         spin_lock_bh(&vscsi->intr_lock);
2919
2920         rsp = &vio_iu(iue)->srp.rsp;
2921         len = sizeof(*rsp);
2922         memset(rsp, 0, len);
2923         data = rsp->data;
2924
2925         rsp->opcode = SRP_RSP;
2926
2927         if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
2928                 rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
2929         else
2930                 rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
2931         rsp->tag = cmd->rsp.tag;
2932         rsp->flags = 0;
2933
2934         if (cmd->type == SCSI_CDB) {
2935                 rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
2936                 if (rsp->status) {
2937                         pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
2938                                  (int)rsp->status);
2939                         ibmvscsis_determine_resid(se_cmd, rsp);
2940                         if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
2941                                 rsp->sense_data_len =
2942                                         cpu_to_be32(se_cmd->scsi_sense_length);
2943                                 rsp->flags |= SRP_RSP_FLAG_SNSVALID;
2944                                 len += se_cmd->scsi_sense_length;
2945                                 memcpy(data, se_cmd->sense_buffer,
2946                                        se_cmd->scsi_sense_length);
2947                         }
2948                         rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
2949                                 UCSOLNT_RESP_SHIFT;
2950                 } else if (cmd->flags & CMD_FAST_FAIL) {
2951                         pr_debug("build_resp: cmd %p, fast fail\n", cmd);
2952                         rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
2953                                 UCSOLNT_RESP_SHIFT;
2954                 } else {
2955                         rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
2956                                 SCSOLNT_RESP_SHIFT;
2957                 }
2958         } else {
2959                 /* this is task management */
2960                 rsp->status = 0;
2961                 rsp->resp_data_len = cpu_to_be32(4);
2962                 rsp->flags |= SRP_RSP_FLAG_RSPVALID;
2963
2964                 switch (se_cmd->se_tmr_req->response) {
2965                 case TMR_FUNCTION_COMPLETE:
2966                 case TMR_TASK_DOES_NOT_EXIST:
2967                         rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
2968                         rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
2969                                 SCSOLNT_RESP_SHIFT;
2970                         break;
2971                 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
2972                 case TMR_LUN_DOES_NOT_EXIST:
2973                         rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
2974                         rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
2975                                 UCSOLNT_RESP_SHIFT;
2976                         break;
2977                 case TMR_FUNCTION_FAILED:
2978                 case TMR_FUNCTION_REJECTED:
2979                 default:
2980                         rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
2981                         rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
2982                                 UCSOLNT_RESP_SHIFT;
2983                         break;
2984                 }
2985
2986                 tsk_status = (u32 *)data;
2987                 *tsk_status = cpu_to_be32(rsp_code);
2988                 data = (char *)(tsk_status + 1);
2989                 len += 4;
2990         }
2991
2992         dma_wmb();
2993         rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
2994                          vscsi->dds.window[REMOTE].liobn,
2995                          be64_to_cpu(iue->remote_token));
2996
2997         switch (rc) {
2998         case H_SUCCESS:
2999                 vscsi->credit = 0;
3000                 *len_p = len;
3001                 break;
3002         case H_PERMISSION:
3003                 if (connection_broken(vscsi))
3004                         vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
3005
3006                 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
3007                         rc, vscsi->flags, vscsi->state);
3008                 break;
3009         case H_SOURCE_PARM:
3010         case H_DEST_PARM:
3011         default:
3012                 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
3013                         rc);
3014                 break;
3015         }
3016
3017         spin_unlock_bh(&vscsi->intr_lock);
3018
3019         return rc;
3020 }
3021
3022 static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
3023                           int nsg, struct srp_direct_buf *md, int nmd,
3024                           enum dma_data_direction dir, unsigned int bytes)
3025 {
3026         struct iu_entry *iue = cmd->iue;
3027         struct srp_target *target = iue->target;
3028         struct scsi_info *vscsi = target->ldata;
3029         struct scatterlist *sgp;
3030         dma_addr_t client_ioba, server_ioba;
3031         ulong buf_len;
3032         ulong client_len, server_len;
3033         int md_idx;
3034         long tx_len;
3035         long rc = 0;
3036
3037         if (bytes == 0)
3038                 return 0;
3039
3040         sgp = sg;
3041         client_len = 0;
3042         server_len = 0;
3043         md_idx = 0;
3044         tx_len = bytes;
3045
3046         do {
3047                 if (client_len == 0) {
3048                         if (md_idx >= nmd) {
3049                                 dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
3050                                 rc = -EIO;
3051                                 break;
3052                         }
3053                         client_ioba = be64_to_cpu(md[md_idx].va);
3054                         client_len = be32_to_cpu(md[md_idx].len);
3055                 }
3056                 if (server_len == 0) {
3057                         if (!sgp) {
3058                                 dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
3059                                 rc = -EIO;
3060                                 break;
3061                         }
3062                         server_ioba = sg_dma_address(sgp);
3063                         server_len = sg_dma_len(sgp);
3064                 }
3065
3066                 buf_len = tx_len;
3067
3068                 if (buf_len > client_len)
3069                         buf_len = client_len;
3070
3071                 if (buf_len > server_len)
3072                         buf_len = server_len;
3073
3074                 if (buf_len > max_vdma_size)
3075                         buf_len = max_vdma_size;
3076
3077                 if (dir == DMA_TO_DEVICE) {
3078                         /* read from client */
3079                         rc = h_copy_rdma(buf_len,
3080                                          vscsi->dds.window[REMOTE].liobn,
3081                                          client_ioba,
3082                                          vscsi->dds.window[LOCAL].liobn,
3083                                          server_ioba);
3084                 } else {
3085                         /* The h_copy_rdma will cause phyp, running in another
3086                          * partition, to read memory, so we need to make sure
3087                          * the data has been written out, hence these syncs.
3088                          */
3089                         /* ensure that everything is in memory */
3090                         isync();
3091                         /* ensure that memory has been made visible */
3092                         dma_wmb();
3093                         rc = h_copy_rdma(buf_len,
3094                                          vscsi->dds.window[LOCAL].liobn,
3095                                          server_ioba,
3096                                          vscsi->dds.window[REMOTE].liobn,
3097                                          client_ioba);
3098                 }
3099                 switch (rc) {
3100                 case H_SUCCESS:
3101                         break;
3102                 case H_PERMISSION:
3103                 case H_SOURCE_PARM:
3104                 case H_DEST_PARM:
3105                         if (connection_broken(vscsi)) {
3106                                 spin_lock_bh(&vscsi->intr_lock);
3107                                 vscsi->flags |=
3108                                         (RESPONSE_Q_DOWN | CLIENT_FAILED);
3109                                 spin_unlock_bh(&vscsi->intr_lock);
3110                         }
3111                         dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
3112                                 rc);
3113                         break;
3114
3115                 default:
3116                         dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
3117                                 rc);
3118                         break;
3119                 }
3120
3121                 if (!rc) {
3122                         tx_len -= buf_len;
3123                         if (tx_len) {
3124                                 client_len -= buf_len;
3125                                 if (client_len == 0)
3126                                         md_idx++;
3127                                 else
3128                                         client_ioba += buf_len;
3129
3130                                 server_len -= buf_len;
3131                                 if (server_len == 0)
3132                                         sgp = sg_next(sgp);
3133                                 else
3134                                         server_ioba += buf_len;
3135                         } else {
3136                                 break;
3137                         }
3138                 }
3139         } while (!rc);
3140
3141         return rc;
3142 }
3143
3144 /**
3145  * ibmvscsis_handle_crq() - Handle CRQ
3146  * @data:       Pointer to our adapter structure
3147  *
3148  * Read the command elements from the command queue and copy the payloads
3149  * associated with the command elements to local memory and execute the
3150  * SRP requests.
3151  *
3152  * Note: this is an edge triggered interrupt. It can not be shared.
3153  */
3154 static void ibmvscsis_handle_crq(unsigned long data)
3155 {
3156         struct scsi_info *vscsi = (struct scsi_info *)data;
3157         struct viosrp_crq *crq;
3158         long rc;
3159         bool ack = true;
3160         volatile u8 valid;
3161
3162         spin_lock_bh(&vscsi->intr_lock);
3163
3164         pr_debug("got interrupt\n");
3165
3166         /*
3167          * if we are in a path where we are waiting for all pending commands
3168          * to complete because we received a transport event and anything in
3169          * the command queue is for a new connection, do nothing
3170          */
3171         if (TARGET_STOP(vscsi)) {
3172                 vio_enable_interrupts(vscsi->dma_dev);
3173
3174                 pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3175                          vscsi->flags, vscsi->state);
3176                 spin_unlock_bh(&vscsi->intr_lock);
3177                 return;
3178         }
3179
3180         rc = vscsi->flags & SCHEDULE_DISCONNECT;
3181         crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3182         valid = crq->valid;
3183         dma_rmb();
3184
3185         while (valid) {
3186                 /*
3187                  * These are edege triggered interrupts. After dropping out of
3188                  * the while loop, the code must check for work since an
3189                  * interrupt could be lost, and an elment be left on the queue,
3190                  * hence the label.
3191                  */
3192 cmd_work:
3193                 vscsi->cmd_q.index =
3194                         (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
3195
3196                 if (!rc) {
3197                         rc = ibmvscsis_parse_command(vscsi, crq);
3198                 } else {
3199                         if ((uint)crq->valid == VALID_TRANS_EVENT) {
3200                                 /*
3201                                  * must service the transport layer events even
3202                                  * in an error state, dont break out until all
3203                                  * the consecutive transport events have been
3204                                  * processed
3205                                  */
3206                                 rc = ibmvscsis_trans_event(vscsi, crq);
3207                         } else if (vscsi->flags & TRANS_EVENT) {
3208                                 /*
3209                                  * if a transport event has occurred leave
3210                                  * everything but transport events on the queue
3211                                  *
3212                                  * need to decrement the queue index so we can
3213                                  * look at the element again
3214                                  */
3215                                 if (vscsi->cmd_q.index)
3216                                         vscsi->cmd_q.index -= 1;
3217                                 else
3218                                         /*
3219                                          * index is at 0 it just wrapped.
3220                                          * have it index last element in q
3221                                          */
3222                                         vscsi->cmd_q.index = vscsi->cmd_q.mask;
3223                                 break;
3224                         }
3225                 }
3226
3227                 crq->valid = INVALIDATE_CMD_RESP_EL;
3228
3229                 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3230                 valid = crq->valid;
3231                 dma_rmb();
3232         }
3233
3234         if (!rc) {
3235                 if (ack) {
3236                         vio_enable_interrupts(vscsi->dma_dev);
3237                         ack = false;
3238                         pr_debug("handle_crq, reenabling interrupts\n");
3239                 }
3240                 valid = crq->valid;
3241                 dma_rmb();
3242                 if (valid)
3243                         goto cmd_work;
3244         } else {
3245                 pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3246                          vscsi->flags, vscsi->state, vscsi->cmd_q.index);
3247         }
3248
3249         pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3250                  (int)list_empty(&vscsi->schedule_q), vscsi->flags,
3251                  vscsi->state);
3252
3253         spin_unlock_bh(&vscsi->intr_lock);
3254 }
3255
3256 static int ibmvscsis_probe(struct vio_dev *vdev,
3257                            const struct vio_device_id *id)
3258 {
3259         struct scsi_info *vscsi;
3260         int rc = 0;
3261         long hrc = 0;
3262         char wq_name[24];
3263
3264         vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
3265         if (!vscsi) {
3266                 rc = -ENOMEM;
3267                 pr_err("probe: allocation of adapter failed\n");
3268                 return rc;
3269         }
3270
3271         vscsi->dma_dev = vdev;
3272         vscsi->dev = vdev->dev;
3273         INIT_LIST_HEAD(&vscsi->schedule_q);
3274         INIT_LIST_HEAD(&vscsi->waiting_rsp);
3275         INIT_LIST_HEAD(&vscsi->active_q);
3276
3277         snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
3278                  dev_name(&vdev->dev));
3279
3280         pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
3281
3282         rc = read_dma_window(vscsi);
3283         if (rc)
3284                 goto free_adapter;
3285         pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
3286                  vscsi->dds.window[LOCAL].liobn,
3287                  vscsi->dds.window[REMOTE].liobn);
3288
3289         strcpy(vscsi->eye, "VSCSI ");
3290         strncat(vscsi->eye, vdev->name, MAX_EYE);
3291
3292         vscsi->dds.unit_id = vdev->unit_address;
3293         strncpy(vscsi->dds.partition_name, partition_name,
3294                 sizeof(vscsi->dds.partition_name));
3295         vscsi->dds.partition_num = partition_number;
3296
3297         spin_lock_bh(&ibmvscsis_dev_lock);
3298         list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
3299         spin_unlock_bh(&ibmvscsis_dev_lock);
3300
3301         /*
3302          * TBD: How do we determine # of cmds to request?  Do we know how
3303          * many "children" we have?
3304          */
3305         vscsi->request_limit = INITIAL_SRP_LIMIT;
3306         rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
3307                               SRP_MAX_IU_LEN);
3308         if (rc)
3309                 goto rem_list;
3310
3311         vscsi->target.ldata = vscsi;
3312
3313         rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
3314         if (rc) {
3315                 dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
3316                         rc, vscsi->request_limit);
3317                 goto free_target;
3318         }
3319
3320         /*
3321          * Note: the lock is used in freeing timers, so must initialize
3322          * first so that ordering in case of error is correct.
3323          */
3324         spin_lock_init(&vscsi->intr_lock);
3325
3326         rc = ibmvscsis_alloctimer(vscsi);
3327         if (rc) {
3328                 dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
3329                 goto free_cmds;
3330         }
3331
3332         rc = ibmvscsis_create_command_q(vscsi, 256);
3333         if (rc) {
3334                 dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
3335                         rc);
3336                 goto free_timer;
3337         }
3338
3339         vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
3340         if (!vscsi->map_buf) {
3341                 rc = -ENOMEM;
3342                 dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
3343                 goto destroy_queue;
3344         }
3345
3346         vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
3347                                          DMA_BIDIRECTIONAL);
3348         if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
3349                 rc = -ENOMEM;
3350                 dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
3351                 goto free_buf;
3352         }
3353
3354         hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
3355                        (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
3356                        0);
3357         if (hrc == H_SUCCESS)
3358                 vscsi->client_data.partition_number =
3359                         be64_to_cpu(*(u64 *)vscsi->map_buf);
3360         /*
3361          * We expect the VIOCTL to fail if we're configured as "any
3362          * client can connect" and the client isn't activated yet.
3363          * We'll make the call again when he sends an init msg.
3364          */
3365         pr_debug("probe hrc %ld, client partition num %d\n",
3366                  hrc, vscsi->client_data.partition_number);
3367
3368         tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
3369                      (unsigned long)vscsi);
3370
3371         init_completion(&vscsi->wait_idle);
3372         init_completion(&vscsi->unconfig);
3373
3374         snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
3375         vscsi->work_q = create_workqueue(wq_name);
3376         if (!vscsi->work_q) {
3377                 rc = -ENOMEM;
3378                 dev_err(&vscsi->dev, "create_workqueue failed\n");
3379                 goto unmap_buf;
3380         }
3381
3382         rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
3383         if (rc) {
3384                 rc = -EPERM;
3385                 dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
3386                 goto destroy_WQ;
3387         }
3388
3389         vscsi->state = WAIT_ENABLED;
3390
3391         dev_set_drvdata(&vdev->dev, vscsi);
3392
3393         return 0;
3394
3395 destroy_WQ:
3396         destroy_workqueue(vscsi->work_q);
3397 unmap_buf:
3398         dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3399                          DMA_BIDIRECTIONAL);
3400 free_buf:
3401         kfree(vscsi->map_buf);
3402 destroy_queue:
3403         tasklet_kill(&vscsi->work_task);
3404         ibmvscsis_unregister_command_q(vscsi);
3405         ibmvscsis_destroy_command_q(vscsi);
3406 free_timer:
3407         ibmvscsis_freetimer(vscsi);
3408 free_cmds:
3409         ibmvscsis_free_cmds(vscsi);
3410 free_target:
3411         srp_target_free(&vscsi->target);
3412 rem_list:
3413         spin_lock_bh(&ibmvscsis_dev_lock);
3414         list_del(&vscsi->list);
3415         spin_unlock_bh(&ibmvscsis_dev_lock);
3416 free_adapter:
3417         kfree(vscsi);
3418
3419         return rc;
3420 }
3421
3422 static int ibmvscsis_remove(struct vio_dev *vdev)
3423 {
3424         struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
3425
3426         pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
3427
3428         spin_lock_bh(&vscsi->intr_lock);
3429         ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
3430         vscsi->flags |= CFG_SLEEPING;
3431         spin_unlock_bh(&vscsi->intr_lock);
3432         wait_for_completion(&vscsi->unconfig);
3433
3434         vio_disable_interrupts(vdev);
3435         free_irq(vdev->irq, vscsi);
3436         destroy_workqueue(vscsi->work_q);
3437         dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3438                          DMA_BIDIRECTIONAL);
3439         kfree(vscsi->map_buf);
3440         tasklet_kill(&vscsi->work_task);
3441         ibmvscsis_destroy_command_q(vscsi);
3442         ibmvscsis_freetimer(vscsi);
3443         ibmvscsis_free_cmds(vscsi);
3444         srp_target_free(&vscsi->target);
3445         spin_lock_bh(&ibmvscsis_dev_lock);
3446         list_del(&vscsi->list);
3447         spin_unlock_bh(&ibmvscsis_dev_lock);
3448         kfree(vscsi);
3449
3450         return 0;
3451 }
3452
3453 static ssize_t system_id_show(struct device *dev,
3454                               struct device_attribute *attr, char *buf)
3455 {
3456         return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
3457 }
3458
3459 static ssize_t partition_number_show(struct device *dev,
3460                                      struct device_attribute *attr, char *buf)
3461 {
3462         return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
3463 }
3464
3465 static ssize_t unit_address_show(struct device *dev,
3466                                  struct device_attribute *attr, char *buf)
3467 {
3468         struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
3469
3470         return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
3471 }
3472
3473 static int ibmvscsis_get_system_info(void)
3474 {
3475         struct device_node *rootdn, *vdevdn;
3476         const char *id, *model, *name;
3477         const uint *num;
3478
3479         rootdn = of_find_node_by_path("/");
3480         if (!rootdn)
3481                 return -ENOENT;
3482
3483         model = of_get_property(rootdn, "model", NULL);
3484         id = of_get_property(rootdn, "system-id", NULL);
3485         if (model && id)
3486                 snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
3487
3488         name = of_get_property(rootdn, "ibm,partition-name", NULL);
3489         if (name)
3490                 strncpy(partition_name, name, sizeof(partition_name));
3491
3492         num = of_get_property(rootdn, "ibm,partition-no", NULL);
3493         if (num)
3494                 partition_number = of_read_number(num, 1);
3495
3496         of_node_put(rootdn);
3497
3498         vdevdn = of_find_node_by_path("/vdevice");
3499         if (vdevdn) {
3500                 const uint *mvds;
3501
3502                 mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
3503                                        NULL);
3504                 if (mvds)
3505                         max_vdma_size = *mvds;
3506                 of_node_put(vdevdn);
3507         }
3508
3509         return 0;
3510 }
3511
3512 static char *ibmvscsis_get_fabric_name(void)
3513 {
3514         return "ibmvscsis";
3515 }
3516
3517 static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
3518 {
3519         struct ibmvscsis_tport *tport =
3520                 container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3521
3522         return tport->tport_name;
3523 }
3524
3525 static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
3526 {
3527         struct ibmvscsis_tport *tport =
3528                 container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3529
3530         return tport->tport_tpgt;
3531 }
3532
3533 static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
3534 {
3535         return 1;
3536 }
3537
3538 static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
3539 {
3540         return 1;
3541 }
3542
3543 static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
3544 {
3545         return 0;
3546 }
3547
3548 static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
3549 {
3550         return 1;
3551 }
3552
3553 static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
3554 {
3555         return target_put_sess_cmd(se_cmd);
3556 }
3557
3558 static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
3559 {
3560         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3561                                                  se_cmd);
3562         struct scsi_info *vscsi = cmd->adapter;
3563
3564         spin_lock_bh(&vscsi->intr_lock);
3565         /* Remove from active_q */
3566         list_move_tail(&cmd->list, &vscsi->waiting_rsp);
3567         ibmvscsis_send_messages(vscsi);
3568         spin_unlock_bh(&vscsi->intr_lock);
3569 }
3570
3571 static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
3572 {
3573         return 0;
3574 }
3575
3576 static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3577 {
3578         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3579                                                  se_cmd);
3580         struct iu_entry *iue = cmd->iue;
3581         int rc;
3582
3583         rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
3584                                1, 1);
3585         if (rc) {
3586                 pr_err("srp_transfer_data() failed: %d\n", rc);
3587                 return -EAGAIN;
3588         }
3589         /*
3590          * We now tell TCM to add this WRITE CDB directly into the TCM storage
3591          * object execution queue.
3592          */
3593         target_execute_cmd(se_cmd);
3594         return 0;
3595 }
3596
3597 static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
3598 {
3599         return 0;
3600 }
3601
3602 static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
3603 {
3604 }
3605
3606 static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
3607 {
3608         return 0;
3609 }
3610
3611 static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
3612 {
3613         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3614                                                  se_cmd);
3615         struct iu_entry *iue = cmd->iue;
3616         struct scsi_info *vscsi = cmd->adapter;
3617         char *sd;
3618         uint len = 0;
3619         int rc;
3620
3621         rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
3622                                1);
3623         if (rc) {
3624                 pr_err("srp_transfer_data failed: %d\n", rc);
3625                 sd = se_cmd->sense_buffer;
3626                 se_cmd->scsi_sense_length = 18;
3627                 memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
3628                 /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
3629                 scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
3630                                         0x08, 0x01);
3631         }
3632
3633         srp_build_response(vscsi, cmd, &len);
3634         cmd->rsp.format = SRP_FORMAT;
3635         cmd->rsp.len = len;
3636
3637         return 0;
3638 }
3639
3640 static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
3641 {
3642         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3643                                                  se_cmd);
3644         struct scsi_info *vscsi = cmd->adapter;
3645         uint len;
3646
3647         pr_debug("queue_status %p\n", se_cmd);
3648
3649         srp_build_response(vscsi, cmd, &len);
3650         cmd->rsp.format = SRP_FORMAT;
3651         cmd->rsp.len = len;
3652
3653         return 0;
3654 }
3655
3656 static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
3657 {
3658         struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3659                                                  se_cmd);
3660         struct scsi_info *vscsi = cmd->adapter;
3661         uint len;
3662
3663         pr_debug("queue_tm_rsp %p, status %d\n",
3664                  se_cmd, (int)se_cmd->se_tmr_req->response);
3665
3666         srp_build_response(vscsi, cmd, &len);
3667         cmd->rsp.format = SRP_FORMAT;
3668         cmd->rsp.len = len;
3669 }
3670
3671 static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
3672 {
3673         /* TBD: What (if anything) should we do here? */
3674         pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
3675 }
3676
3677 static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
3678                                            struct config_group *group,
3679                                            const char *name)
3680 {
3681         struct ibmvscsis_tport *tport;
3682
3683         tport = ibmvscsis_lookup_port(name);
3684         if (tport) {
3685                 tport->tport_proto_id = SCSI_PROTOCOL_SRP;
3686                 pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
3687                          name, tport, tport->tport_proto_id);
3688                 return &tport->tport_wwn;
3689         }
3690
3691         return ERR_PTR(-EINVAL);
3692 }
3693
3694 static void ibmvscsis_drop_tport(struct se_wwn *wwn)
3695 {
3696         struct ibmvscsis_tport *tport = container_of(wwn,
3697                                                      struct ibmvscsis_tport,
3698                                                      tport_wwn);
3699
3700         pr_debug("drop_tport(%s)\n",
3701                  config_item_name(&tport->tport_wwn.wwn_group.cg_item));
3702 }
3703
3704 static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
3705                                                   struct config_group *group,
3706                                                   const char *name)
3707 {
3708         struct ibmvscsis_tport *tport =
3709                 container_of(wwn, struct ibmvscsis_tport, tport_wwn);
3710         int rc;
3711
3712         tport->releasing = false;
3713
3714         rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
3715                                tport->tport_proto_id);
3716         if (rc)
3717                 return ERR_PTR(rc);
3718
3719         return &tport->se_tpg;
3720 }
3721
3722 static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
3723 {
3724         struct ibmvscsis_tport *tport = container_of(se_tpg,
3725                                                      struct ibmvscsis_tport,
3726                                                      se_tpg);
3727
3728         tport->releasing = true;
3729         tport->enabled = false;
3730
3731         /*
3732          * Release the virtual I_T Nexus for this ibmvscsis TPG
3733          */
3734         ibmvscsis_drop_nexus(tport);
3735         /*
3736          * Deregister the se_tpg from TCM..
3737          */
3738         core_tpg_deregister(se_tpg);
3739 }
3740
3741 static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
3742                                           char *page)
3743 {
3744         return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
3745 }
3746 CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
3747
3748 static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
3749         &ibmvscsis_wwn_attr_version,
3750         NULL,
3751 };
3752
3753 static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
3754                                          char *page)
3755 {
3756         struct se_portal_group *se_tpg = to_tpg(item);
3757         struct ibmvscsis_tport *tport = container_of(se_tpg,
3758                                                      struct ibmvscsis_tport,
3759                                                      se_tpg);
3760
3761         return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
3762 }
3763
3764 static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
3765                                           const char *page, size_t count)
3766 {
3767         struct se_portal_group *se_tpg = to_tpg(item);
3768         struct ibmvscsis_tport *tport = container_of(se_tpg,
3769                                                      struct ibmvscsis_tport,
3770                                                      se_tpg);
3771         struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3772         unsigned long tmp;
3773         int rc;
3774         long lrc;
3775
3776         rc = kstrtoul(page, 0, &tmp);
3777         if (rc < 0) {
3778                 pr_err("Unable to extract srpt_tpg_store_enable\n");
3779                 return -EINVAL;
3780         }
3781
3782         if ((tmp != 0) && (tmp != 1)) {
3783                 pr_err("Illegal value for srpt_tpg_store_enable\n");
3784                 return -EINVAL;
3785         }
3786
3787         if (tmp) {
3788                 spin_lock_bh(&vscsi->intr_lock);
3789                 tport->enabled = true;
3790                 lrc = ibmvscsis_enable_change_state(vscsi);
3791                 if (lrc)
3792                         pr_err("enable_change_state failed, rc %ld state %d\n",
3793                                lrc, vscsi->state);
3794                 spin_unlock_bh(&vscsi->intr_lock);
3795         } else {
3796                 spin_lock_bh(&vscsi->intr_lock);
3797                 tport->enabled = false;
3798                 /* This simulates the server going down */
3799                 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
3800                 spin_unlock_bh(&vscsi->intr_lock);
3801         }
3802
3803         pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
3804
3805         return count;
3806 }
3807 CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
3808
3809 static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
3810         &ibmvscsis_tpg_attr_enable,
3811         NULL,
3812 };
3813
3814 static const struct target_core_fabric_ops ibmvscsis_ops = {
3815         .module                         = THIS_MODULE,
3816         .name                           = "ibmvscsis",
3817         .get_fabric_name                = ibmvscsis_get_fabric_name,
3818         .tpg_get_wwn                    = ibmvscsis_get_fabric_wwn,
3819         .tpg_get_tag                    = ibmvscsis_get_tag,
3820         .tpg_get_default_depth          = ibmvscsis_get_default_depth,
3821         .tpg_check_demo_mode            = ibmvscsis_check_true,
3822         .tpg_check_demo_mode_cache      = ibmvscsis_check_true,
3823         .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
3824         .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
3825         .tpg_get_inst_index             = ibmvscsis_tpg_get_inst_index,
3826         .check_stop_free                = ibmvscsis_check_stop_free,
3827         .release_cmd                    = ibmvscsis_release_cmd,
3828         .sess_get_index                 = ibmvscsis_sess_get_index,
3829         .write_pending                  = ibmvscsis_write_pending,
3830         .write_pending_status           = ibmvscsis_write_pending_status,
3831         .set_default_node_attributes    = ibmvscsis_set_default_node_attrs,
3832         .get_cmd_state                  = ibmvscsis_get_cmd_state,
3833         .queue_data_in                  = ibmvscsis_queue_data_in,
3834         .queue_status                   = ibmvscsis_queue_status,
3835         .queue_tm_rsp                   = ibmvscsis_queue_tm_rsp,
3836         .aborted_task                   = ibmvscsis_aborted_task,
3837         /*
3838          * Setup function pointers for logic in target_core_fabric_configfs.c
3839          */
3840         .fabric_make_wwn                = ibmvscsis_make_tport,
3841         .fabric_drop_wwn                = ibmvscsis_drop_tport,
3842         .fabric_make_tpg                = ibmvscsis_make_tpg,
3843         .fabric_drop_tpg                = ibmvscsis_drop_tpg,
3844
3845         .tfc_wwn_attrs                  = ibmvscsis_wwn_attrs,
3846         .tfc_tpg_base_attrs             = ibmvscsis_tpg_attrs,
3847 };
3848
3849 static void ibmvscsis_dev_release(struct device *dev) {};
3850
3851 static struct class_attribute ibmvscsis_class_attrs[] = {
3852         __ATTR_NULL,
3853 };
3854
3855 static struct device_attribute dev_attr_system_id =
3856         __ATTR(system_id, S_IRUGO, system_id_show, NULL);
3857
3858 static struct device_attribute dev_attr_partition_number =
3859         __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
3860
3861 static struct device_attribute dev_attr_unit_address =
3862         __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
3863
3864 static struct attribute *ibmvscsis_dev_attrs[] = {
3865         &dev_attr_system_id.attr,
3866         &dev_attr_partition_number.attr,
3867         &dev_attr_unit_address.attr,
3868 };
3869 ATTRIBUTE_GROUPS(ibmvscsis_dev);
3870
3871 static struct class ibmvscsis_class = {
3872         .name           = "ibmvscsis",
3873         .dev_release    = ibmvscsis_dev_release,
3874         .class_attrs    = ibmvscsis_class_attrs,
3875         .dev_groups     = ibmvscsis_dev_groups,
3876 };
3877
3878 static struct vio_device_id ibmvscsis_device_table[] = {
3879         { "v-scsi-host", "IBM,v-scsi-host" },
3880         { "", "" }
3881 };
3882 MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
3883
3884 static struct vio_driver ibmvscsis_driver = {
3885         .name = "ibmvscsis",
3886         .id_table = ibmvscsis_device_table,
3887         .probe = ibmvscsis_probe,
3888         .remove = ibmvscsis_remove,
3889 };
3890
3891 /*
3892  * ibmvscsis_init() - Kernel Module initialization
3893  *
3894  * Note: vio_register_driver() registers callback functions, and at least one
3895  * of those callback functions calls TCM - Linux IO Target Subsystem, thus
3896  * the SCSI Target template must be registered before vio_register_driver()
3897  * is called.
3898  */
3899 static int __init ibmvscsis_init(void)
3900 {
3901         int rc = 0;
3902
3903         rc = ibmvscsis_get_system_info();
3904         if (rc) {
3905                 pr_err("rc %d from get_system_info\n", rc);
3906                 goto out;
3907         }
3908
3909         rc = class_register(&ibmvscsis_class);
3910         if (rc) {
3911                 pr_err("failed class register\n");
3912                 goto out;
3913         }
3914
3915         rc = target_register_template(&ibmvscsis_ops);
3916         if (rc) {
3917                 pr_err("rc %d from target_register_template\n", rc);
3918                 goto unregister_class;
3919         }
3920
3921         rc = vio_register_driver(&ibmvscsis_driver);
3922         if (rc) {
3923                 pr_err("rc %d from vio_register_driver\n", rc);
3924                 goto unregister_target;
3925         }
3926
3927         return 0;
3928
3929 unregister_target:
3930         target_unregister_template(&ibmvscsis_ops);
3931 unregister_class:
3932         class_unregister(&ibmvscsis_class);
3933 out:
3934         return rc;
3935 }
3936
3937 static void __exit ibmvscsis_exit(void)
3938 {
3939         pr_info("Unregister IBM virtual SCSI host driver\n");
3940         vio_unregister_driver(&ibmvscsis_driver);
3941         target_unregister_template(&ibmvscsis_ops);
3942         class_unregister(&ibmvscsis_class);
3943 }
3944
3945 MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
3946 MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
3947 MODULE_LICENSE("GPL");
3948 MODULE_VERSION(IBMVSCSIS_VERSION);
3949 module_init(ibmvscsis_init);
3950 module_exit(ibmvscsis_exit);