[S390] sclp: invalid handling of temporary 'not operational' status
[linux-block.git] / drivers / s390 / char / sclp.c
index 985d1613baaa59235d5c6a6edcb76e91f5fa9b0c..3457a9a31571eb135513980a79716720331dbd20 100644 (file)
@@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t {
 /* Internal state: is a request active at the sclp? */
 static volatile enum sclp_running_state_t {
        sclp_running_state_idle,
-       sclp_running_state_running
+       sclp_running_state_running,
+       sclp_running_state_reset_pending
 } sclp_running_state = sclp_running_state_idle;
 
 /* Internal state: is a read request pending? */
@@ -88,7 +89,7 @@ static volatile enum sclp_mask_state_t {
 
 /* Timeout intervals in seconds.*/
 #define SCLP_BUSY_INTERVAL     10
-#define SCLP_RETRY_INTERVAL    15
+#define SCLP_RETRY_INTERVAL    30
 
 static void sclp_process_queue(void);
 static int sclp_init_mask(int calculate);
@@ -100,13 +101,12 @@ service_call(sclp_cmdw_t command, void *sccb)
 {
        int cc;
 
-       __asm__ __volatile__(
-               "   .insn rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
-               "   ipm   %0\n"
-               "   srl   %0,28"
-               : "=&d" (cc)
-               : "d" (command), "a" (__pa(sccb))
-               : "cc", "memory" );
+       asm volatile(
+               "       .insn   rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
+               "       ipm     %0\n"
+               "       srl     %0,28"
+               : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
+               : "cc", "memory");
        if (cc == 3)
                return -EIO;
        if (cc == 2)
@@ -114,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb)
        return 0;
 }
 
-/* Request timeout handler. Restart the request queue. If DATA is non-zero,
- * force restart of running request. */
+static inline void __sclp_make_read_req(void);
+
 static void
-sclp_request_timeout(unsigned long data)
+__sclp_queue_read_req(void)
 {
-       unsigned long flags;
-
-       if (data) {
-               spin_lock_irqsave(&sclp_lock, flags);
-               sclp_running_state = sclp_running_state_idle;
-               spin_unlock_irqrestore(&sclp_lock, flags);
+       if (sclp_reading_state == sclp_reading_state_idle) {
+               sclp_reading_state = sclp_reading_state_reading;
+               __sclp_make_read_req();
+               /* Add request to head of queue */
+               list_add(&sclp_read_req.list, &sclp_req_queue);
        }
-       sclp_process_queue();
 }
 
 /* Set up request retry timer. Called while sclp_lock is locked. */
@@ -141,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
        add_timer(&sclp_request_timer);
 }
 
+/* Request timeout handler. Restart the request queue. If DATA is non-zero,
+ * force restart of running request. */
+static void
+sclp_request_timeout(unsigned long data)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sclp_lock, flags);
+       if (data) {
+               if (sclp_running_state == sclp_running_state_running) {
+                       /* Break running state and queue NOP read event request
+                        * to get a defined interface state. */
+                       __sclp_queue_read_req();
+                       sclp_running_state = sclp_running_state_idle;
+               }
+       } else {
+               __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+                                        sclp_request_timeout, 0);
+       }
+       spin_unlock_irqrestore(&sclp_lock, flags);
+       sclp_process_queue();
+}
+
 /* Try to start a request. Return zero if the request was successfully
  * started or if it will be started at a later time. Return non-zero otherwise.
  * Called while sclp_lock is locked. */
@@ -192,7 +213,15 @@ sclp_process_queue(void)
                rc = __sclp_start_request(req);
                if (rc == 0)
                        break;
-               /* Request failed. */
+               /* Request failed */
+               if (req->start_count > 1) {
+                       /* Cannot abort already submitted request - could still
+                        * be active at the SCLP */
+                       __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+                                                sclp_request_timeout, 0);
+                       break;
+               }
+               /* Post-processing for aborted request */
                list_del(&req->list);
                if (req->callback) {
                        spin_unlock_irqrestore(&sclp_lock, flags);
@@ -222,7 +251,8 @@ sclp_add_request(struct sclp_req *req)
        list_add_tail(&req->list, &sclp_req_queue);
        rc = 0;
        /* Start if request is first in list */
-       if (req->list.prev == &sclp_req_queue) {
+       if (sclp_running_state == sclp_running_state_idle &&
+           req->list.prev == &sclp_req_queue) {
                rc = __sclp_start_request(req);
                if (rc)
                        list_del(&req->list);
@@ -325,7 +355,7 @@ __sclp_find_req(u32 sccb)
  * Prepare read event data request if necessary. Start processing of next
  * request on queue. */
 static void
-sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
+sclp_interrupt_handler(__u16 code)
 {
        struct sclp_req *req;
        u32 finished_sccb;
@@ -335,6 +365,8 @@ sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
        finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
        evbuf_pending = S390_lowcore.ext_params & 0x3;
        if (finished_sccb) {
+               del_timer(&sclp_request_timer);
+               sclp_running_state = sclp_running_state_reset_pending;
                req = __sclp_find_req(finished_sccb);
                if (req) {
                        /* Request post-processing */
@@ -349,27 +381,12 @@ sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
                sclp_running_state = sclp_running_state_idle;
        }
        if (evbuf_pending && sclp_receive_mask != 0 &&
-           sclp_reading_state == sclp_reading_state_idle &&
-           sclp_activation_state == sclp_activation_state_active ) {
-               sclp_reading_state = sclp_reading_state_reading;
-               __sclp_make_read_req();
-               /* Add request to head of queue */
-               list_add(&sclp_read_req.list, &sclp_req_queue);
-       }
+           sclp_activation_state == sclp_activation_state_active)
+               __sclp_queue_read_req();
        spin_unlock(&sclp_lock);
        sclp_process_queue();
 }
 
-/* Return current Time-Of-Day clock. */
-static inline u64
-sclp_get_clock(void)
-{
-       u64 result;
-
-       asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc");
-       return result;
-}
-
 /* Convert interval in jiffies to TOD ticks. */
 static inline u64
 sclp_tod_from_jiffies(unsigned long jiffies)
@@ -382,7 +399,6 @@ sclp_tod_from_jiffies(unsigned long jiffies)
 void
 sclp_sync_wait(void)
 {
-       unsigned long psw_mask;
        unsigned long flags;
        unsigned long cr0, cr0_sync;
        u64 timeout;
@@ -392,7 +408,7 @@ sclp_sync_wait(void)
        timeout = 0;
        if (timer_pending(&sclp_request_timer)) {
                /* Get timeout TOD value */
-               timeout = sclp_get_clock() +
+               timeout = get_clock() +
                          sclp_tod_from_jiffies(sclp_request_timer.expires -
                                                jiffies);
        }
@@ -406,13 +422,12 @@ sclp_sync_wait(void)
        cr0_sync |= 0x00000200;
        cr0_sync &= 0xFFFFF3AC;
        __ctl_load(cr0_sync, 0, 0);
-       asm volatile ("STOSM 0(%1),0x01"
-                     : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
+       __raw_local_irq_stosm(0x01);
        /* Loop until driver state indicates finished request */
        while (sclp_running_state != sclp_running_state_idle) {
                /* Check for expired request timer */
                if (timer_pending(&sclp_request_timer) &&
-                   sclp_get_clock() > timeout &&
+                   get_clock() > timeout &&
                    del_timer(&sclp_request_timer))
                        sclp_request_timer.function(sclp_request_timer.data);
                barrier();
@@ -756,7 +771,7 @@ EXPORT_SYMBOL(sclp_reactivate);
 /* Handler for external interruption used during initialization. Modify
  * request state to done. */
 static void
-sclp_check_handler(struct pt_regs *regs, __u16 code)
+sclp_check_handler(__u16 code)
 {
        u32 finished_sccb;