2 * Handling of internal CCW device requests.
4 * Copyright IBM Corp. 2009
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 #include <linux/types.h>
10 #include <asm/ccwdev.h>
16 #include "cio_debug.h"
19 * lpm_adjust - adjust path mask
20 * @lpm: path mask to adjust
21 * @mask: mask of available paths
23 * Shift @lpm right until @lpm and @mask have at least one bit in common or
24 * until @lpm is zero. Return the resulting lpm.
26 int lpm_adjust(int lpm, int mask)
28 while (lpm && ((lpm & mask) == 0))
34 * Adjust path mask to use next path and reset retry count. Return resulting
37 static u16 ccwreq_next_path(struct ccw_device *cdev)
39 struct ccw_request *req = &cdev->private->req;
41 req->retries = req->maxretries;
42 req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
48 * Clean up device state and report to callback.
50 static void ccwreq_stop(struct ccw_device *cdev, int rc)
52 struct subchannel *sch = to_subchannel(cdev->dev.parent);
53 struct ccw_request *req = &cdev->private->req;
58 ccw_device_set_timeout(cdev, 0);
59 memset(&cdev->private->irb, 0, sizeof(struct irb));
60 sch->lpm = sch->schib.pmcw.pam;
61 if (rc && rc != -ENODEV && req->drc)
63 req->callback(cdev, req->data, rc);
67 * (Re-)Start the operation until retries and paths are exhausted.
69 static void ccwreq_do(struct ccw_device *cdev)
71 struct ccw_request *req = &cdev->private->req;
72 struct subchannel *sch = to_subchannel(cdev->dev.parent);
73 struct ccw1 *cp = req->cp;
77 if (req->retries-- == 0) {
78 /* Retries exhausted, try next path. */
79 ccwreq_next_path(cdev);
82 /* Perform start function. */
84 memset(&cdev->private->irb, 0, sizeof(struct irb));
85 rc = cio_start(sch, cp, (u8) req->mask);
87 /* I/O started successfully. */
88 ccw_device_set_timeout(cdev, req->timeout);
92 /* Permanent device error. */
96 /* Permant path error. */
97 ccwreq_next_path(cdev);
100 /* Temporary improper status. */
106 ccwreq_stop(cdev, rc);
110 * ccw_request_start - perform I/O request
113 * Perform the I/O request specified by cdev->req.
115 void ccw_request_start(struct ccw_device *cdev)
117 struct ccw_request *req = &cdev->private->req;
119 /* Try all paths twice to counter link flapping. */
121 req->retries = req->maxretries;
122 req->mask = lpm_adjust(req->mask, req->lpm);
132 ccwreq_stop(cdev, -EACCES);
136 * ccw_request_cancel - cancel running I/O request
139 * Cancel the I/O request specified by cdev->req. Return non-zero if request
140 * has already finished, zero otherwise.
142 int ccw_request_cancel(struct ccw_device *cdev)
144 struct subchannel *sch = to_subchannel(cdev->dev.parent);
145 struct ccw_request *req = &cdev->private->req;
153 ccwreq_stop(cdev, rc);
158 * Return the status of the internal I/O started on the specified ccw device.
159 * Perform BASIC SENSE if required.
161 static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
163 struct irb *irb = &cdev->private->irb;
164 struct cmd_scsw *scsw = &irb->scsw.cmd;
166 /* Perform BASIC SENSE if needed. */
167 if (ccw_device_accumulate_and_sense(cdev, lcirb))
169 /* Check for halt/clear interrupt. */
170 if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
172 /* Check for path error. */
173 if (scsw->cc == 3 || scsw->pno)
174 return IO_PATH_ERROR;
175 /* Handle BASIC SENSE data. */
176 if (irb->esw.esw0.erw.cons) {
177 CIO_TRACE_EVENT(2, "sensedata");
178 CIO_HEX_EVENT(2, &cdev->private->dev_id,
179 sizeof(struct ccw_dev_id));
180 CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
181 /* Check for command reject. */
182 if (irb->ecw[0] & SNS0_CMD_REJECT)
184 /* Assume that unexpected SENSE data implies an error. */
185 return IO_STATUS_ERROR;
187 /* Check for channel errors. */
188 if (scsw->cstat != 0)
189 return IO_STATUS_ERROR;
190 /* Check for device errors. */
191 if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
192 return IO_STATUS_ERROR;
193 /* Check for final state. */
194 if (!(scsw->dstat & DEV_STAT_DEV_END))
196 /* Check for other improper status. */
197 if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
198 return IO_STATUS_ERROR;
203 * Log ccw request status.
205 static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
207 struct ccw_request *req = &cdev->private->req;
209 struct ccw_dev_id dev_id;
213 } __attribute__ ((packed)) data;
214 data.dev_id = cdev->private->dev_id;
215 data.retries = req->retries;
216 data.lpm = (u8) req->mask;
217 data.status = (u8) status;
218 CIO_TRACE_EVENT(2, "reqstat");
219 CIO_HEX_EVENT(2, &data, sizeof(data));
223 * ccw_request_handler - interrupt handler for I/O request procedure.
226 * Handle interrupt during I/O request procedure.
228 void ccw_request_handler(struct ccw_device *cdev)
230 struct ccw_request *req = &cdev->private->req;
231 struct irb *irb = (struct irb *) __LC_IRB;
232 enum io_status status;
233 int rc = -EOPNOTSUPP;
235 /* Check status of I/O request. */
236 status = ccwreq_status(cdev, irb);
238 status = req->filter(cdev, req->data, irb, status);
239 if (status != IO_RUNNING)
240 ccw_device_set_timeout(cdev, 0);
241 if (status != IO_DONE && status != IO_RUNNING)
242 ccwreq_log_status(cdev, status);
252 case IO_STATUS_ERROR:
255 /* Check if request was cancelled on purpose. */
262 /* Check back with request initiator. */
265 switch (req->check(cdev, req->data)) {
276 ccwreq_stop(cdev, 0);
280 /* Try next path and restart I/O. */
281 if (!ccwreq_next_path(cdev)) {
290 ccwreq_stop(cdev, rc);
295 * ccw_request_timeout - timeout handler for I/O request procedure
298 * Handle timeout during I/O request procedure.
300 void ccw_request_timeout(struct ccw_device *cdev)
302 struct subchannel *sch = to_subchannel(cdev->dev.parent);
303 struct ccw_request *req = &cdev->private->req;
306 if (!ccwreq_next_path(cdev)) {
307 /* set the final return code for this request */
316 ccwreq_stop(cdev, rc);
320 * ccw_request_notoper - notoper handler for I/O request procedure
323 * Handle timeout during I/O request procedure.
325 void ccw_request_notoper(struct ccw_device *cdev)
327 ccwreq_stop(cdev, -ENODEV);