Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
62b74942 | 3 | * core function to access sclp interface |
1da177e4 | 4 | * |
62b74942 MH |
5 | * Copyright IBM Corp. 1999, 2009 |
6 | * | |
7 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | |
1da177e4 LT |
9 | */ |
10 | ||
052ff461 | 11 | #include <linux/kernel_stat.h> |
1da177e4 LT |
12 | #include <linux/module.h> |
13 | #include <linux/err.h> | |
f39650de | 14 | #include <linux/panic_notifier.h> |
1da177e4 LT |
15 | #include <linux/spinlock.h> |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/timer.h> | |
18 | #include <linux/reboot.h> | |
19 | #include <linux/jiffies.h> | |
b3d00c3b | 20 | #include <linux/init.h> |
62b74942 | 21 | #include <linux/platform_device.h> |
052ff461 HC |
22 | #include <asm/types.h> |
23 | #include <asm/irq.h> | |
70aa5d39 | 24 | #include <asm/debug.h> |
1da177e4 LT |
25 | |
26 | #include "sclp.h" | |
27 | ||
28 | #define SCLP_HEADER "sclp: " | |
29 | ||
70aa5d39 | 30 | struct sclp_trace_entry { |
ff8a58b0 | 31 | char id[4] __nonstring; |
70aa5d39 PO |
32 | u32 a; |
33 | u64 b; | |
34 | }; | |
35 | ||
36 | #define SCLP_TRACE_ENTRY_SIZE sizeof(struct sclp_trace_entry) | |
37 | #define SCLP_TRACE_MAX_SIZE 128 | |
38 | #define SCLP_TRACE_EVENT_MAX_SIZE 64 | |
39 | ||
40 | /* Debug trace area intended for all entries in abbreviated form. */ | |
41 | DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE, | |
42 | &debug_hex_ascii_view); | |
43 | ||
44 | /* Error trace area intended for full entries relating to failed requests. */ | |
45 | DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1, | |
46 | SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view); | |
47 | ||
1da177e4 LT |
48 | /* Lock to protect internal data consistency. */ |
49 | static DEFINE_SPINLOCK(sclp_lock); | |
50 | ||
d082d3ce | 51 | /* Mask of events that we can send to the sclp interface. */ |
1da177e4 LT |
52 | static sccb_mask_t sclp_receive_mask; |
53 | ||
d082d3ce | 54 | /* Mask of events that we can receive from the sclp interface. */ |
1da177e4 LT |
55 | static sccb_mask_t sclp_send_mask; |
56 | ||
57 | /* List of registered event listeners and senders. */ | |
8bc00c04 | 58 | static LIST_HEAD(sclp_reg_list); |
1da177e4 LT |
59 | |
60 | /* List of queued requests. */ | |
8bc00c04 | 61 | static LIST_HEAD(sclp_req_queue); |
1da177e4 | 62 | |
d608f45e | 63 | /* Data for read and init requests. */ |
1da177e4 LT |
64 | static struct sclp_req sclp_read_req; |
65 | static struct sclp_req sclp_init_req; | |
087c4d74 GS |
66 | static void *sclp_read_sccb; |
67 | static struct init_sccb *sclp_init_sccb; | |
1da177e4 | 68 | |
25b41a7b MS |
69 | /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */ |
70 | int sclp_console_pages = SCLP_CONSOLE_PAGES; | |
71 | /* Flag to indicate if buffer pages are dropped on buffer full condition */ | |
2473be45 | 72 | bool sclp_console_drop = true; |
25b41a7b MS |
73 | /* Number of times the console dropped buffer pages */ |
74 | unsigned long sclp_console_full; | |
75 | ||
70aa5d39 PO |
76 | /* The currently active SCLP command word. */ |
77 | static sclp_cmdw_t active_cmd; | |
78 | ||
79 | static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err) | |
80 | { | |
81 | struct sclp_trace_entry e; | |
82 | ||
83 | memset(&e, 0, sizeof(e)); | |
e37988bc | 84 | strtomem(e.id, id); |
70aa5d39 PO |
85 | e.a = a; |
86 | e.b = b; | |
87 | debug_event(&sclp_debug, prio, &e, sizeof(e)); | |
88 | if (err) | |
89 | debug_event(&sclp_debug_err, 0, &e, sizeof(e)); | |
90 | } | |
91 | ||
92 | static inline int no_zeroes_len(void *data, int len) | |
93 | { | |
94 | char *d = data; | |
95 | ||
96 | /* Minimize trace area usage by not tracing trailing zeroes. */ | |
97 | while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0) | |
98 | len--; | |
99 | ||
100 | return len; | |
101 | } | |
102 | ||
103 | static inline void sclp_trace_bin(int prio, void *d, int len, int errlen) | |
104 | { | |
105 | debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len)); | |
106 | if (errlen) | |
107 | debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen)); | |
108 | } | |
109 | ||
110 | static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb) | |
111 | { | |
112 | struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1); | |
113 | int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE; | |
114 | ||
115 | /* Full SCCB tracing if debug level is set to max. */ | |
116 | if (sclp_debug.level == DEBUG_MAX_LEVEL) | |
117 | return len; | |
118 | ||
119 | /* Minimal tracing for console writes. */ | |
120 | if (cmd == SCLP_CMDW_WRITE_EVENT_DATA && | |
121 | (evbuf->type == EVTYP_MSG || evbuf->type == EVTYP_VT220MSG)) | |
122 | limit = SCLP_TRACE_ENTRY_SIZE; | |
123 | ||
124 | return min(len, limit); | |
125 | } | |
126 | ||
127 | static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b, | |
128 | sclp_cmdw_t cmd, struct sccb_header *sccb, | |
129 | bool err) | |
130 | { | |
131 | sclp_trace(prio, id, a, b, err); | |
132 | if (sccb) { | |
133 | sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb), | |
134 | err ? sccb->length : 0); | |
135 | } | |
136 | } | |
137 | ||
138 | static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b, | |
139 | struct evbuf_header *evbuf, bool err) | |
140 | { | |
141 | sclp_trace(prio, id, a, b, err); | |
142 | sclp_trace_bin(prio + 1, evbuf, | |
143 | min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE), | |
144 | err ? evbuf->length : 0); | |
145 | } | |
146 | ||
147 | static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req, | |
148 | bool err) | |
149 | { | |
150 | struct sccb_header *sccb = req->sccb; | |
151 | union { | |
152 | struct { | |
153 | u16 status; | |
154 | u16 response; | |
155 | u16 timeout; | |
156 | u16 start_count; | |
157 | }; | |
158 | u64 b; | |
159 | } summary; | |
160 | ||
161 | summary.status = req->status; | |
162 | summary.response = sccb ? sccb->response_code : 0; | |
163 | summary.timeout = (u16)req->queue_timeout; | |
164 | summary.start_count = (u16)req->start_count; | |
165 | ||
ada1da31 | 166 | sclp_trace(prio, id, __pa(sccb), summary.b, err); |
70aa5d39 PO |
167 | } |
168 | ||
169 | static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b, | |
170 | struct sclp_register *reg) | |
171 | { | |
172 | struct { | |
173 | u64 receive; | |
174 | u64 send; | |
175 | } d; | |
176 | ||
177 | d.receive = reg->receive_mask; | |
178 | d.send = reg->send_mask; | |
179 | ||
180 | sclp_trace(prio, id, a, b, false); | |
181 | sclp_trace_bin(prio, &d, sizeof(d), 0); | |
182 | } | |
183 | ||
25b41a7b MS |
184 | static int __init sclp_setup_console_pages(char *str) |
185 | { | |
186 | int pages, rc; | |
187 | ||
188 | rc = kstrtoint(str, 0, &pages); | |
189 | if (!rc && pages >= SCLP_CONSOLE_PAGES) | |
190 | sclp_console_pages = pages; | |
191 | return 1; | |
192 | } | |
193 | ||
194 | __setup("sclp_con_pages=", sclp_setup_console_pages); | |
195 | ||
196 | static int __init sclp_setup_console_drop(char *str) | |
197 | { | |
2473be45 | 198 | return kstrtobool(str, &sclp_console_drop) == 0; |
25b41a7b MS |
199 | } |
200 | ||
201 | __setup("sclp_con_drop=", sclp_setup_console_drop); | |
202 | ||
1da177e4 LT |
203 | /* Timer for request retries. */ |
204 | static struct timer_list sclp_request_timer; | |
205 | ||
9f0128f9 GS |
206 | /* Timer for queued requests. */ |
207 | static struct timer_list sclp_queue_timer; | |
208 | ||
1da177e4 LT |
209 | /* Internal state: is a request active at the sclp? */ |
210 | static volatile enum sclp_running_state_t { | |
211 | sclp_running_state_idle, | |
dbd8ae63 PO |
212 | sclp_running_state_running, |
213 | sclp_running_state_reset_pending | |
1da177e4 LT |
214 | } sclp_running_state = sclp_running_state_idle; |
215 | ||
216 | /* Internal state: is a read request pending? */ | |
217 | static volatile enum sclp_reading_state_t { | |
218 | sclp_reading_state_idle, | |
219 | sclp_reading_state_reading | |
220 | } sclp_reading_state = sclp_reading_state_idle; | |
221 | ||
222 | /* Internal state: is the driver currently serving requests? */ | |
223 | static volatile enum sclp_activation_state_t { | |
224 | sclp_activation_state_active, | |
225 | sclp_activation_state_deactivating, | |
226 | sclp_activation_state_inactive, | |
227 | sclp_activation_state_activating | |
228 | } sclp_activation_state = sclp_activation_state_active; | |
229 | ||
230 | /* Internal state: is an init mask request pending? */ | |
231 | static volatile enum sclp_mask_state_t { | |
232 | sclp_mask_state_idle, | |
233 | sclp_mask_state_initializing | |
234 | } sclp_mask_state = sclp_mask_state_idle; | |
235 | ||
236 | /* Maximum retry counts */ | |
237 | #define SCLP_INIT_RETRY 3 | |
238 | #define SCLP_MASK_RETRY 3 | |
1da177e4 LT |
239 | |
240 | /* Timeout intervals in seconds.*/ | |
25fab9eb | 241 | #define SCLP_BUSY_INTERVAL 10 |
dbd8ae63 | 242 | #define SCLP_RETRY_INTERVAL 30 |
1da177e4 | 243 | |
c9602ee7 | 244 | static void sclp_request_timeout(bool force_restart); |
1da177e4 | 245 | static void sclp_process_queue(void); |
364c8558 | 246 | static void __sclp_make_read_req(void); |
1da177e4 LT |
247 | static int sclp_init_mask(int calculate); |
248 | static int sclp_init(void); | |
249 | ||
1da177e4 | 250 | static void |
dbd8ae63 | 251 | __sclp_queue_read_req(void) |
1da177e4 | 252 | { |
dbd8ae63 PO |
253 | if (sclp_reading_state == sclp_reading_state_idle) { |
254 | sclp_reading_state = sclp_reading_state_reading; | |
255 | __sclp_make_read_req(); | |
256 | /* Add request to head of queue */ | |
257 | list_add(&sclp_read_req.list, &sclp_req_queue); | |
1da177e4 | 258 | } |
1da177e4 LT |
259 | } |
260 | ||
261 | /* Set up request retry timer. Called while sclp_lock is locked. */ | |
262 | static inline void | |
c9602ee7 | 263 | __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *)) |
1da177e4 LT |
264 | { |
265 | del_timer(&sclp_request_timer); | |
841b86f3 | 266 | sclp_request_timer.function = cb; |
1da177e4 LT |
267 | sclp_request_timer.expires = jiffies + time; |
268 | add_timer(&sclp_request_timer); | |
269 | } | |
270 | ||
c9602ee7 KC |
271 | static void sclp_request_timeout_restart(struct timer_list *unused) |
272 | { | |
273 | sclp_request_timeout(true); | |
274 | } | |
275 | ||
276 | static void sclp_request_timeout_normal(struct timer_list *unused) | |
277 | { | |
278 | sclp_request_timeout(false); | |
279 | } | |
280 | ||
281 | /* Request timeout handler. Restart the request queue. If force_restart, | |
dbd8ae63 | 282 | * force restart of running request. */ |
c9602ee7 | 283 | static void sclp_request_timeout(bool force_restart) |
dbd8ae63 PO |
284 | { |
285 | unsigned long flags; | |
286 | ||
70aa5d39 PO |
287 | /* TMO: A timeout occurred (a=force_restart) */ |
288 | sclp_trace(2, "TMO", force_restart, 0, true); | |
289 | ||
dbd8ae63 | 290 | spin_lock_irqsave(&sclp_lock, flags); |
c9602ee7 | 291 | if (force_restart) { |
dbd8ae63 PO |
292 | if (sclp_running_state == sclp_running_state_running) { |
293 | /* Break running state and queue NOP read event request | |
294 | * to get a defined interface state. */ | |
295 | __sclp_queue_read_req(); | |
296 | sclp_running_state = sclp_running_state_idle; | |
297 | } | |
298 | } else { | |
299 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | |
c9602ee7 | 300 | sclp_request_timeout_normal); |
dbd8ae63 PO |
301 | } |
302 | spin_unlock_irqrestore(&sclp_lock, flags); | |
303 | sclp_process_queue(); | |
304 | } | |
305 | ||
9f0128f9 GS |
306 | /* |
307 | * Returns the expire value in jiffies of the next pending request timeout, | |
308 | * if any. Needs to be called with sclp_lock. | |
309 | */ | |
310 | static unsigned long __sclp_req_queue_find_next_timeout(void) | |
311 | { | |
312 | unsigned long expires_next = 0; | |
313 | struct sclp_req *req; | |
314 | ||
315 | list_for_each_entry(req, &sclp_req_queue, list) { | |
316 | if (!req->queue_expires) | |
317 | continue; | |
318 | if (!expires_next || | |
319 | (time_before(req->queue_expires, expires_next))) | |
320 | expires_next = req->queue_expires; | |
321 | } | |
322 | return expires_next; | |
323 | } | |
324 | ||
325 | /* | |
326 | * Returns expired request, if any, and removes it from the list. | |
327 | */ | |
328 | static struct sclp_req *__sclp_req_queue_remove_expired_req(void) | |
329 | { | |
330 | unsigned long flags, now; | |
331 | struct sclp_req *req; | |
332 | ||
333 | spin_lock_irqsave(&sclp_lock, flags); | |
334 | now = jiffies; | |
335 | /* Don't need list_for_each_safe because we break out after list_del */ | |
336 | list_for_each_entry(req, &sclp_req_queue, list) { | |
337 | if (!req->queue_expires) | |
338 | continue; | |
339 | if (time_before_eq(req->queue_expires, now)) { | |
340 | if (req->status == SCLP_REQ_QUEUED) { | |
341 | req->status = SCLP_REQ_QUEUED_TIMEOUT; | |
342 | list_del(&req->list); | |
343 | goto out; | |
344 | } | |
345 | } | |
346 | } | |
347 | req = NULL; | |
348 | out: | |
349 | spin_unlock_irqrestore(&sclp_lock, flags); | |
350 | return req; | |
351 | } | |
352 | ||
353 | /* | |
354 | * Timeout handler for queued requests. Removes request from list and | |
355 | * invokes callback. This timer can be set per request in situations where | |
356 | * waiting too long would be harmful to the system, e.g. during SE reboot. | |
357 | */ | |
c9602ee7 | 358 | static void sclp_req_queue_timeout(struct timer_list *unused) |
9f0128f9 GS |
359 | { |
360 | unsigned long flags, expires_next; | |
361 | struct sclp_req *req; | |
362 | ||
363 | do { | |
364 | req = __sclp_req_queue_remove_expired_req(); | |
70aa5d39 PO |
365 | |
366 | if (req) { | |
367 | /* RQTM: Request timed out (a=sccb, b=summary) */ | |
368 | sclp_trace_req(2, "RQTM", req, true); | |
369 | } | |
370 | ||
9f0128f9 GS |
371 | if (req && req->callback) |
372 | req->callback(req, req->callback_data); | |
373 | } while (req); | |
374 | ||
375 | spin_lock_irqsave(&sclp_lock, flags); | |
376 | expires_next = __sclp_req_queue_find_next_timeout(); | |
377 | if (expires_next) | |
378 | mod_timer(&sclp_queue_timer, expires_next); | |
379 | spin_unlock_irqrestore(&sclp_lock, flags); | |
380 | } | |
381 | ||
70aa5d39 PO |
382 | static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb) |
383 | { | |
384 | static u64 srvc_count; | |
385 | int rc; | |
386 | ||
387 | /* SRV1: Service call about to be issued (a=command, b=sccb address) */ | |
388 | sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false); | |
389 | ||
390 | rc = sclp_service_call(command, sccb); | |
391 | ||
392 | /* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */ | |
393 | sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0); | |
394 | ||
395 | if (rc == 0) | |
396 | active_cmd = command; | |
397 | ||
398 | return rc; | |
399 | } | |
400 | ||
1da177e4 LT |
401 | /* Try to start a request. Return zero if the request was successfully |
402 | * started or if it will be started at a later time. Return non-zero otherwise. | |
403 | * Called while sclp_lock is locked. */ | |
404 | static int | |
405 | __sclp_start_request(struct sclp_req *req) | |
406 | { | |
407 | int rc; | |
408 | ||
409 | if (sclp_running_state != sclp_running_state_idle) | |
410 | return 0; | |
411 | del_timer(&sclp_request_timer); | |
70aa5d39 | 412 | rc = sclp_service_call_trace(req->command, req->sccb); |
25fab9eb PO |
413 | req->start_count++; |
414 | ||
1da177e4 | 415 | if (rc == 0) { |
3ad2f3fb | 416 | /* Successfully started request */ |
1da177e4 LT |
417 | req->status = SCLP_REQ_RUNNING; |
418 | sclp_running_state = sclp_running_state_running; | |
419 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, | |
c9602ee7 | 420 | sclp_request_timeout_restart); |
1da177e4 LT |
421 | return 0; |
422 | } else if (rc == -EBUSY) { | |
423 | /* Try again later */ | |
424 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | |
c9602ee7 | 425 | sclp_request_timeout_normal); |
1da177e4 LT |
426 | return 0; |
427 | } | |
428 | /* Request failed */ | |
429 | req->status = SCLP_REQ_FAILED; | |
430 | return rc; | |
431 | } | |
432 | ||
433 | /* Try to start queued requests. */ | |
434 | static void | |
435 | sclp_process_queue(void) | |
436 | { | |
437 | struct sclp_req *req; | |
438 | int rc; | |
439 | unsigned long flags; | |
440 | ||
441 | spin_lock_irqsave(&sclp_lock, flags); | |
442 | if (sclp_running_state != sclp_running_state_idle) { | |
443 | spin_unlock_irqrestore(&sclp_lock, flags); | |
444 | return; | |
445 | } | |
446 | del_timer(&sclp_request_timer); | |
447 | while (!list_empty(&sclp_req_queue)) { | |
448 | req = list_entry(sclp_req_queue.next, struct sclp_req, list); | |
449 | rc = __sclp_start_request(req); | |
450 | if (rc == 0) | |
451 | break; | |
dbd8ae63 PO |
452 | /* Request failed */ |
453 | if (req->start_count > 1) { | |
454 | /* Cannot abort already submitted request - could still | |
455 | * be active at the SCLP */ | |
456 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | |
c9602ee7 | 457 | sclp_request_timeout_normal); |
dbd8ae63 PO |
458 | break; |
459 | } | |
460 | /* Post-processing for aborted request */ | |
1da177e4 | 461 | list_del(&req->list); |
70aa5d39 PO |
462 | |
463 | /* RQAB: Request aborted (a=sccb, b=summary) */ | |
464 | sclp_trace_req(2, "RQAB", req, true); | |
465 | ||
1da177e4 LT |
466 | if (req->callback) { |
467 | spin_unlock_irqrestore(&sclp_lock, flags); | |
468 | req->callback(req, req->callback_data); | |
469 | spin_lock_irqsave(&sclp_lock, flags); | |
470 | } | |
471 | } | |
472 | spin_unlock_irqrestore(&sclp_lock, flags); | |
473 | } | |
474 | ||
62b74942 MH |
475 | static int __sclp_can_add_request(struct sclp_req *req) |
476 | { | |
2f7e5208 | 477 | if (req == &sclp_init_req) |
62b74942 | 478 | return 1; |
62b74942 MH |
479 | if (sclp_init_state != sclp_init_state_initialized) |
480 | return 0; | |
481 | if (sclp_activation_state != sclp_activation_state_active) | |
482 | return 0; | |
483 | return 1; | |
484 | } | |
485 | ||
1da177e4 LT |
486 | /* Queue a new request. Return zero on success, non-zero otherwise. */ |
487 | int | |
488 | sclp_add_request(struct sclp_req *req) | |
489 | { | |
490 | unsigned long flags; | |
491 | int rc; | |
492 | ||
493 | spin_lock_irqsave(&sclp_lock, flags); | |
62b74942 | 494 | if (!__sclp_can_add_request(req)) { |
1da177e4 LT |
495 | spin_unlock_irqrestore(&sclp_lock, flags); |
496 | return -EIO; | |
497 | } | |
70aa5d39 PO |
498 | |
499 | /* RQAD: Request was added (a=sccb, b=caller) */ | |
ada1da31 | 500 | sclp_trace(2, "RQAD", __pa(req->sccb), _RET_IP_, false); |
70aa5d39 | 501 | |
1da177e4 LT |
502 | req->status = SCLP_REQ_QUEUED; |
503 | req->start_count = 0; | |
504 | list_add_tail(&req->list, &sclp_req_queue); | |
505 | rc = 0; | |
9f0128f9 GS |
506 | if (req->queue_timeout) { |
507 | req->queue_expires = jiffies + req->queue_timeout * HZ; | |
508 | if (!timer_pending(&sclp_queue_timer) || | |
509 | time_after(sclp_queue_timer.expires, req->queue_expires)) | |
510 | mod_timer(&sclp_queue_timer, req->queue_expires); | |
511 | } else | |
512 | req->queue_expires = 0; | |
1da177e4 | 513 | /* Start if request is first in list */ |
dbd8ae63 PO |
514 | if (sclp_running_state == sclp_running_state_idle && |
515 | req->list.prev == &sclp_req_queue) { | |
1da177e4 LT |
516 | rc = __sclp_start_request(req); |
517 | if (rc) | |
518 | list_del(&req->list); | |
519 | } | |
520 | spin_unlock_irqrestore(&sclp_lock, flags); | |
521 | return rc; | |
522 | } | |
523 | ||
524 | EXPORT_SYMBOL(sclp_add_request); | |
525 | ||
526 | /* Dispatch events found in request buffer to registered listeners. Return 0 | |
527 | * if all events were dispatched, non-zero otherwise. */ | |
528 | static int | |
529 | sclp_dispatch_evbufs(struct sccb_header *sccb) | |
530 | { | |
531 | unsigned long flags; | |
532 | struct evbuf_header *evbuf; | |
533 | struct list_head *l; | |
534 | struct sclp_register *reg; | |
535 | int offset; | |
536 | int rc; | |
537 | ||
538 | spin_lock_irqsave(&sclp_lock, flags); | |
539 | rc = 0; | |
540 | for (offset = sizeof(struct sccb_header); offset < sccb->length; | |
541 | offset += evbuf->length) { | |
1da177e4 | 542 | evbuf = (struct evbuf_header *) ((addr_t) sccb + offset); |
e2e5a0f2 PO |
543 | /* Check for malformed hardware response */ |
544 | if (evbuf->length == 0) | |
545 | break; | |
546 | /* Search for event handler */ | |
1da177e4 LT |
547 | reg = NULL; |
548 | list_for_each(l, &sclp_reg_list) { | |
549 | reg = list_entry(l, struct sclp_register, list); | |
0ee5f8dc | 550 | if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type)) |
1da177e4 LT |
551 | break; |
552 | else | |
553 | reg = NULL; | |
554 | } | |
70aa5d39 PO |
555 | |
556 | /* EVNT: Event callback (b=receiver) */ | |
557 | sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0, | |
558 | evbuf, !reg); | |
559 | ||
1da177e4 LT |
560 | if (reg && reg->receiver_fn) { |
561 | spin_unlock_irqrestore(&sclp_lock, flags); | |
562 | reg->receiver_fn(evbuf); | |
563 | spin_lock_irqsave(&sclp_lock, flags); | |
564 | } else if (reg == NULL) | |
d06cbda6 | 565 | rc = -EOPNOTSUPP; |
1da177e4 LT |
566 | } |
567 | spin_unlock_irqrestore(&sclp_lock, flags); | |
568 | return rc; | |
569 | } | |
570 | ||
571 | /* Read event data request callback. */ | |
572 | static void | |
573 | sclp_read_cb(struct sclp_req *req, void *data) | |
574 | { | |
575 | unsigned long flags; | |
576 | struct sccb_header *sccb; | |
577 | ||
578 | sccb = (struct sccb_header *) req->sccb; | |
579 | if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 || | |
580 | sccb->response_code == 0x220)) | |
581 | sclp_dispatch_evbufs(sccb); | |
582 | spin_lock_irqsave(&sclp_lock, flags); | |
583 | sclp_reading_state = sclp_reading_state_idle; | |
584 | spin_unlock_irqrestore(&sclp_lock, flags); | |
585 | } | |
586 | ||
587 | /* Prepare read event data request. Called while sclp_lock is locked. */ | |
364c8558 | 588 | static void __sclp_make_read_req(void) |
1da177e4 LT |
589 | { |
590 | struct sccb_header *sccb; | |
591 | ||
592 | sccb = (struct sccb_header *) sclp_read_sccb; | |
593 | clear_page(sccb); | |
594 | memset(&sclp_read_req, 0, sizeof(struct sclp_req)); | |
ab14de6c | 595 | sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; |
1da177e4 LT |
596 | sclp_read_req.status = SCLP_REQ_QUEUED; |
597 | sclp_read_req.start_count = 0; | |
598 | sclp_read_req.callback = sclp_read_cb; | |
599 | sclp_read_req.sccb = sccb; | |
600 | sccb->length = PAGE_SIZE; | |
601 | sccb->function_code = 0; | |
602 | sccb->control_mask[2] = 0x80; | |
603 | } | |
604 | ||
605 | /* Search request list for request with matching sccb. Return request if found, | |
606 | * NULL otherwise. Called while sclp_lock is locked. */ | |
607 | static inline struct sclp_req * | |
608 | __sclp_find_req(u32 sccb) | |
609 | { | |
610 | struct list_head *l; | |
611 | struct sclp_req *req; | |
612 | ||
613 | list_for_each(l, &sclp_req_queue) { | |
614 | req = list_entry(l, struct sclp_req, list); | |
ada1da31 AG |
615 | if (sccb == __pa(req->sccb)) |
616 | return req; | |
1da177e4 LT |
617 | } |
618 | return NULL; | |
619 | } | |
620 | ||
70aa5d39 PO |
621 | static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd) |
622 | { | |
ada1da31 | 623 | struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int); |
70aa5d39 PO |
624 | struct evbuf_header *evbuf; |
625 | u16 response; | |
626 | ||
627 | if (!sccb) | |
628 | return true; | |
629 | ||
630 | /* Check SCCB response. */ | |
631 | response = sccb->response_code & 0xff; | |
632 | if (response != 0x10 && response != 0x20) | |
633 | return false; | |
634 | ||
635 | /* Check event-processed flag on outgoing events. */ | |
636 | if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) { | |
637 | evbuf = (struct evbuf_header *)(sccb + 1); | |
638 | if (!(evbuf->flags & 0x80)) | |
639 | return false; | |
640 | } | |
641 | ||
642 | return true; | |
643 | } | |
644 | ||
1da177e4 LT |
645 | /* Handler for external interruption. Perform request post-processing. |
646 | * Prepare read event data request if necessary. Start processing of next | |
647 | * request on queue. */ | |
fde15c3a | 648 | static void sclp_interrupt_handler(struct ext_code ext_code, |
f6649a7e | 649 | unsigned int param32, unsigned long param64) |
1da177e4 LT |
650 | { |
651 | struct sclp_req *req; | |
652 | u32 finished_sccb; | |
653 | u32 evbuf_pending; | |
654 | ||
420f42ec | 655 | inc_irq_stat(IRQEXT_SCP); |
1da177e4 | 656 | spin_lock(&sclp_lock); |
f6649a7e MS |
657 | finished_sccb = param32 & 0xfffffff8; |
658 | evbuf_pending = param32 & 0x3; | |
70aa5d39 PO |
659 | |
660 | /* INT: Interrupt received (a=intparm, b=cmd) */ | |
661 | sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd, | |
ada1da31 | 662 | (struct sccb_header *)__va(finished_sccb), |
70aa5d39 PO |
663 | !ok_response(finished_sccb, active_cmd)); |
664 | ||
1da177e4 | 665 | if (finished_sccb) { |
dbd8ae63 PO |
666 | del_timer(&sclp_request_timer); |
667 | sclp_running_state = sclp_running_state_reset_pending; | |
1da177e4 LT |
668 | req = __sclp_find_req(finished_sccb); |
669 | if (req) { | |
670 | /* Request post-processing */ | |
671 | list_del(&req->list); | |
672 | req->status = SCLP_REQ_DONE; | |
70aa5d39 PO |
673 | |
674 | /* RQOK: Request success (a=sccb, b=summary) */ | |
675 | sclp_trace_req(2, "RQOK", req, false); | |
676 | ||
1da177e4 LT |
677 | if (req->callback) { |
678 | spin_unlock(&sclp_lock); | |
679 | req->callback(req, req->callback_data); | |
680 | spin_lock(&sclp_lock); | |
681 | } | |
70aa5d39 PO |
682 | } else { |
683 | /* UNEX: Unexpected SCCB completion (a=sccb address) */ | |
684 | sclp_trace(0, "UNEX", finished_sccb, 0, true); | |
1da177e4 LT |
685 | } |
686 | sclp_running_state = sclp_running_state_idle; | |
70aa5d39 | 687 | active_cmd = 0; |
1da177e4 | 688 | } |
d082d3ce | 689 | if (evbuf_pending && |
dbd8ae63 PO |
690 | sclp_activation_state == sclp_activation_state_active) |
691 | __sclp_queue_read_req(); | |
1da177e4 LT |
692 | spin_unlock(&sclp_lock); |
693 | sclp_process_queue(); | |
694 | } | |
695 | ||
1da177e4 LT |
696 | /* Convert interval in jiffies to TOD ticks. */ |
697 | static inline u64 | |
698 | sclp_tod_from_jiffies(unsigned long jiffies) | |
699 | { | |
700 | return (u64) (jiffies / HZ) << 32; | |
701 | } | |
702 | ||
703 | /* Wait until a currently running request finished. Note: while this function | |
704 | * is running, no timers are served on the calling CPU. */ | |
705 | void | |
706 | sclp_sync_wait(void) | |
707 | { | |
934b2857 | 708 | unsigned long long old_tick; |
527618ab | 709 | struct ctlreg cr0, cr0_sync; |
1f194a4c | 710 | unsigned long flags; |
70aa5d39 | 711 | static u64 sync_count; |
1da177e4 | 712 | u64 timeout; |
c59d744b | 713 | int irq_context; |
1da177e4 | 714 | |
70aa5d39 PO |
715 | /* SYN1: Synchronous wait start (a=runstate, b=sync count) */ |
716 | sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false); | |
717 | ||
1da177e4 LT |
718 | /* We'll be disabling timer interrupts, so we need a custom timeout |
719 | * mechanism */ | |
720 | timeout = 0; | |
721 | if (timer_pending(&sclp_request_timer)) { | |
722 | /* Get timeout TOD value */ | |
8c071b0f | 723 | timeout = get_tod_clock_fast() + |
1da177e4 LT |
724 | sclp_tod_from_jiffies(sclp_request_timer.expires - |
725 | jiffies); | |
726 | } | |
1f194a4c | 727 | local_irq_save(flags); |
1da177e4 | 728 | /* Prevent bottom half from executing once we force interrupts open */ |
c59d744b HC |
729 | irq_context = in_interrupt(); |
730 | if (!irq_context) | |
731 | local_bh_disable(); | |
1da177e4 | 732 | /* Enable service-signal interruption, disable timer interrupts */ |
934b2857 | 733 | old_tick = local_tick_disable(); |
1f194a4c | 734 | trace_hardirqs_on(); |
2372d391 | 735 | local_ctl_store(0, &cr0); |
527618ab HC |
736 | cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK; |
737 | cr0_sync.val |= 1UL << (63 - 54); | |
2372d391 | 738 | local_ctl_load(0, &cr0_sync); |
df9ee292 | 739 | __arch_local_irq_stosm(0x01); |
1da177e4 LT |
740 | /* Loop until driver state indicates finished request */ |
741 | while (sclp_running_state != sclp_running_state_idle) { | |
742 | /* Check for expired request timer */ | |
4f45c37f | 743 | if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer)) |
841b86f3 | 744 | sclp_request_timer.function(&sclp_request_timer); |
1da177e4 LT |
745 | cpu_relax(); |
746 | } | |
1f194a4c | 747 | local_irq_disable(); |
2372d391 | 748 | local_ctl_load(0, &cr0); |
c59d744b HC |
749 | if (!irq_context) |
750 | _local_bh_enable(); | |
934b2857 | 751 | local_tick_enable(old_tick); |
1f194a4c | 752 | local_irq_restore(flags); |
70aa5d39 PO |
753 | |
754 | /* SYN2: Synchronous wait end (a=runstate, b=sync_count) */ | |
755 | sclp_trace(4, "SYN2", sclp_running_state, sync_count, false); | |
1da177e4 | 756 | } |
1da177e4 LT |
757 | EXPORT_SYMBOL(sclp_sync_wait); |
758 | ||
759 | /* Dispatch changes in send and receive mask to registered listeners. */ | |
4d284cac | 760 | static void |
1da177e4 LT |
761 | sclp_dispatch_state_change(void) |
762 | { | |
763 | struct list_head *l; | |
764 | struct sclp_register *reg; | |
765 | unsigned long flags; | |
766 | sccb_mask_t receive_mask; | |
767 | sccb_mask_t send_mask; | |
768 | ||
769 | do { | |
770 | spin_lock_irqsave(&sclp_lock, flags); | |
771 | reg = NULL; | |
772 | list_for_each(l, &sclp_reg_list) { | |
773 | reg = list_entry(l, struct sclp_register, list); | |
d082d3ce PO |
774 | receive_mask = reg->send_mask & sclp_receive_mask; |
775 | send_mask = reg->receive_mask & sclp_send_mask; | |
1da177e4 LT |
776 | if (reg->sclp_receive_mask != receive_mask || |
777 | reg->sclp_send_mask != send_mask) { | |
778 | reg->sclp_receive_mask = receive_mask; | |
779 | reg->sclp_send_mask = send_mask; | |
780 | break; | |
781 | } else | |
782 | reg = NULL; | |
783 | } | |
784 | spin_unlock_irqrestore(&sclp_lock, flags); | |
70aa5d39 PO |
785 | if (reg && reg->state_change_fn) { |
786 | /* STCG: State-change callback (b=callback) */ | |
787 | sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn, | |
788 | false); | |
789 | ||
1da177e4 | 790 | reg->state_change_fn(reg); |
70aa5d39 | 791 | } |
1da177e4 LT |
792 | } while (reg); |
793 | } | |
794 | ||
795 | struct sclp_statechangebuf { | |
796 | struct evbuf_header header; | |
797 | u8 validity_sclp_active_facility_mask : 1; | |
798 | u8 validity_sclp_receive_mask : 1; | |
799 | u8 validity_sclp_send_mask : 1; | |
800 | u8 validity_read_data_function_mask : 1; | |
801 | u16 _zeros : 12; | |
802 | u16 mask_length; | |
803 | u64 sclp_active_facility_mask; | |
b8435635 CI |
804 | u8 masks[2 * 1021 + 4]; /* variable length */ |
805 | /* | |
806 | * u8 sclp_receive_mask[mask_length]; | |
807 | * u8 sclp_send_mask[mask_length]; | |
808 | * u32 read_data_function_mask; | |
809 | */ | |
1da177e4 LT |
810 | } __attribute__((packed)); |
811 | ||
812 | ||
813 | /* State change event callback. Inform listeners of changes. */ | |
814 | static void | |
815 | sclp_state_change_cb(struct evbuf_header *evbuf) | |
816 | { | |
817 | unsigned long flags; | |
818 | struct sclp_statechangebuf *scbuf; | |
819 | ||
b8435635 CI |
820 | BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE); |
821 | ||
1da177e4 | 822 | scbuf = (struct sclp_statechangebuf *) evbuf; |
1da177e4 LT |
823 | spin_lock_irqsave(&sclp_lock, flags); |
824 | if (scbuf->validity_sclp_receive_mask) | |
b8435635 | 825 | sclp_receive_mask = sccb_get_recv_mask(scbuf); |
1da177e4 | 826 | if (scbuf->validity_sclp_send_mask) |
b8435635 | 827 | sclp_send_mask = sccb_get_send_mask(scbuf); |
1da177e4 | 828 | spin_unlock_irqrestore(&sclp_lock, flags); |
887d935a | 829 | if (scbuf->validity_sclp_active_facility_mask) |
78335a30 | 830 | sclp.facilities = scbuf->sclp_active_facility_mask; |
1da177e4 LT |
831 | sclp_dispatch_state_change(); |
832 | } | |
833 | ||
834 | static struct sclp_register sclp_state_change_event = { | |
6d4740c8 | 835 | .receive_mask = EVTYP_STATECHANGE_MASK, |
1da177e4 LT |
836 | .receiver_fn = sclp_state_change_cb |
837 | }; | |
838 | ||
839 | /* Calculate receive and send mask of currently registered listeners. | |
840 | * Called while sclp_lock is locked. */ | |
841 | static inline void | |
842 | __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask) | |
843 | { | |
844 | struct list_head *l; | |
845 | struct sclp_register *t; | |
846 | ||
847 | *receive_mask = 0; | |
848 | *send_mask = 0; | |
849 | list_for_each(l, &sclp_reg_list) { | |
850 | t = list_entry(l, struct sclp_register, list); | |
851 | *receive_mask |= t->receive_mask; | |
852 | *send_mask |= t->send_mask; | |
853 | } | |
854 | } | |
855 | ||
856 | /* Register event listener. Return 0 on success, non-zero otherwise. */ | |
857 | int | |
858 | sclp_register(struct sclp_register *reg) | |
859 | { | |
860 | unsigned long flags; | |
861 | sccb_mask_t receive_mask; | |
862 | sccb_mask_t send_mask; | |
863 | int rc; | |
864 | ||
70aa5d39 PO |
865 | /* REG: Event listener registered (b=caller) */ |
866 | sclp_trace_register(2, "REG", 0, _RET_IP_, reg); | |
867 | ||
1da177e4 LT |
868 | rc = sclp_init(); |
869 | if (rc) | |
870 | return rc; | |
871 | spin_lock_irqsave(&sclp_lock, flags); | |
872 | /* Check event mask for collisions */ | |
873 | __sclp_get_mask(&receive_mask, &send_mask); | |
874 | if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) { | |
875 | spin_unlock_irqrestore(&sclp_lock, flags); | |
876 | return -EBUSY; | |
877 | } | |
878 | /* Trigger initial state change callback */ | |
879 | reg->sclp_receive_mask = 0; | |
880 | reg->sclp_send_mask = 0; | |
881 | list_add(®->list, &sclp_reg_list); | |
882 | spin_unlock_irqrestore(&sclp_lock, flags); | |
883 | rc = sclp_init_mask(1); | |
884 | if (rc) { | |
885 | spin_lock_irqsave(&sclp_lock, flags); | |
886 | list_del(®->list); | |
887 | spin_unlock_irqrestore(&sclp_lock, flags); | |
888 | } | |
889 | return rc; | |
890 | } | |
891 | ||
892 | EXPORT_SYMBOL(sclp_register); | |
893 | ||
894 | /* Unregister event listener. */ | |
895 | void | |
896 | sclp_unregister(struct sclp_register *reg) | |
897 | { | |
898 | unsigned long flags; | |
899 | ||
70aa5d39 PO |
900 | /* UREG: Event listener unregistered (b=caller) */ |
901 | sclp_trace_register(2, "UREG", 0, _RET_IP_, reg); | |
902 | ||
1da177e4 LT |
903 | spin_lock_irqsave(&sclp_lock, flags); |
904 | list_del(®->list); | |
905 | spin_unlock_irqrestore(&sclp_lock, flags); | |
906 | sclp_init_mask(1); | |
907 | } | |
908 | ||
909 | EXPORT_SYMBOL(sclp_unregister); | |
910 | ||
911 | /* Remove event buffers which are marked processed. Return the number of | |
912 | * remaining event buffers. */ | |
913 | int | |
914 | sclp_remove_processed(struct sccb_header *sccb) | |
915 | { | |
916 | struct evbuf_header *evbuf; | |
917 | int unprocessed; | |
918 | u16 remaining; | |
919 | ||
920 | evbuf = (struct evbuf_header *) (sccb + 1); | |
921 | unprocessed = 0; | |
922 | remaining = sccb->length - sizeof(struct sccb_header); | |
923 | while (remaining > 0) { | |
924 | remaining -= evbuf->length; | |
925 | if (evbuf->flags & 0x80) { | |
926 | sccb->length -= evbuf->length; | |
927 | memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length), | |
928 | remaining); | |
929 | } else { | |
930 | unprocessed++; | |
931 | evbuf = (struct evbuf_header *) | |
932 | ((addr_t) evbuf + evbuf->length); | |
933 | } | |
934 | } | |
935 | return unprocessed; | |
936 | } | |
937 | ||
938 | EXPORT_SYMBOL(sclp_remove_processed); | |
939 | ||
1da177e4 LT |
940 | /* Prepare init mask request. Called while sclp_lock is locked. */ |
941 | static inline void | |
0ee5f8dc | 942 | __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask) |
1da177e4 | 943 | { |
087c4d74 | 944 | struct init_sccb *sccb = sclp_init_sccb; |
1da177e4 | 945 | |
1da177e4 LT |
946 | clear_page(sccb); |
947 | memset(&sclp_init_req, 0, sizeof(struct sclp_req)); | |
ab14de6c | 948 | sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; |
1da177e4 LT |
949 | sclp_init_req.status = SCLP_REQ_FILLED; |
950 | sclp_init_req.start_count = 0; | |
951 | sclp_init_req.callback = NULL; | |
952 | sclp_init_req.callback_data = NULL; | |
953 | sclp_init_req.sccb = sccb; | |
0ee5f8dc | 954 | sccb->header.length = sizeof(*sccb); |
0b0d1173 CI |
955 | if (sclp_mask_compat_mode) |
956 | sccb->mask_length = SCLP_MASK_SIZE_COMPAT; | |
957 | else | |
958 | sccb->mask_length = sizeof(sccb_mask_t); | |
b8435635 CI |
959 | sccb_set_recv_mask(sccb, receive_mask); |
960 | sccb_set_send_mask(sccb, send_mask); | |
961 | sccb_set_sclp_recv_mask(sccb, 0); | |
962 | sccb_set_sclp_send_mask(sccb, 0); | |
1da177e4 LT |
963 | } |
964 | ||
965 | /* Start init mask request. If calculate is non-zero, calculate the mask as | |
966 | * requested by registered listeners. Use zero mask otherwise. Return 0 on | |
967 | * success, non-zero otherwise. */ | |
968 | static int | |
969 | sclp_init_mask(int calculate) | |
970 | { | |
971 | unsigned long flags; | |
087c4d74 | 972 | struct init_sccb *sccb = sclp_init_sccb; |
1da177e4 LT |
973 | sccb_mask_t receive_mask; |
974 | sccb_mask_t send_mask; | |
975 | int retry; | |
976 | int rc; | |
977 | unsigned long wait; | |
978 | ||
979 | spin_lock_irqsave(&sclp_lock, flags); | |
980 | /* Check if interface is in appropriate state */ | |
981 | if (sclp_mask_state != sclp_mask_state_idle) { | |
982 | spin_unlock_irqrestore(&sclp_lock, flags); | |
983 | return -EBUSY; | |
984 | } | |
985 | if (sclp_activation_state == sclp_activation_state_inactive) { | |
986 | spin_unlock_irqrestore(&sclp_lock, flags); | |
987 | return -EINVAL; | |
988 | } | |
989 | sclp_mask_state = sclp_mask_state_initializing; | |
990 | /* Determine mask */ | |
991 | if (calculate) | |
992 | __sclp_get_mask(&receive_mask, &send_mask); | |
993 | else { | |
994 | receive_mask = 0; | |
995 | send_mask = 0; | |
996 | } | |
997 | rc = -EIO; | |
998 | for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) { | |
999 | /* Prepare request */ | |
1000 | __sclp_make_init_req(receive_mask, send_mask); | |
1001 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1002 | if (sclp_add_request(&sclp_init_req)) { | |
1003 | /* Try again later */ | |
1004 | wait = jiffies + SCLP_BUSY_INTERVAL * HZ; | |
1005 | while (time_before(jiffies, wait)) | |
1006 | sclp_sync_wait(); | |
1007 | spin_lock_irqsave(&sclp_lock, flags); | |
1008 | continue; | |
1009 | } | |
1010 | while (sclp_init_req.status != SCLP_REQ_DONE && | |
1011 | sclp_init_req.status != SCLP_REQ_FAILED) | |
1012 | sclp_sync_wait(); | |
1013 | spin_lock_irqsave(&sclp_lock, flags); | |
1014 | if (sclp_init_req.status == SCLP_REQ_DONE && | |
1015 | sccb->header.response_code == 0x20) { | |
1016 | /* Successful request */ | |
1017 | if (calculate) { | |
b8435635 CI |
1018 | sclp_receive_mask = sccb_get_sclp_recv_mask(sccb); |
1019 | sclp_send_mask = sccb_get_sclp_send_mask(sccb); | |
1da177e4 LT |
1020 | } else { |
1021 | sclp_receive_mask = 0; | |
1022 | sclp_send_mask = 0; | |
1023 | } | |
1024 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1025 | sclp_dispatch_state_change(); | |
1026 | spin_lock_irqsave(&sclp_lock, flags); | |
1027 | rc = 0; | |
1028 | break; | |
1029 | } | |
1030 | } | |
1031 | sclp_mask_state = sclp_mask_state_idle; | |
1032 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1033 | return rc; | |
1034 | } | |
1035 | ||
1036 | /* Deactivate SCLP interface. On success, new requests will be rejected, | |
1037 | * events will no longer be dispatched. Return 0 on success, non-zero | |
1038 | * otherwise. */ | |
1039 | int | |
1040 | sclp_deactivate(void) | |
1041 | { | |
1042 | unsigned long flags; | |
1043 | int rc; | |
1044 | ||
1045 | spin_lock_irqsave(&sclp_lock, flags); | |
1046 | /* Deactivate can only be called when active */ | |
1047 | if (sclp_activation_state != sclp_activation_state_active) { | |
1048 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1049 | return -EINVAL; | |
1050 | } | |
1051 | sclp_activation_state = sclp_activation_state_deactivating; | |
1052 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1053 | rc = sclp_init_mask(0); | |
1054 | spin_lock_irqsave(&sclp_lock, flags); | |
1055 | if (rc == 0) | |
1056 | sclp_activation_state = sclp_activation_state_inactive; | |
1057 | else | |
1058 | sclp_activation_state = sclp_activation_state_active; | |
1059 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1060 | return rc; | |
1061 | } | |
1062 | ||
1063 | EXPORT_SYMBOL(sclp_deactivate); | |
1064 | ||
1065 | /* Reactivate SCLP interface after sclp_deactivate. On success, new | |
1066 | * requests will be accepted, events will be dispatched again. Return 0 on | |
1067 | * success, non-zero otherwise. */ | |
1068 | int | |
1069 | sclp_reactivate(void) | |
1070 | { | |
1071 | unsigned long flags; | |
1072 | int rc; | |
1073 | ||
1074 | spin_lock_irqsave(&sclp_lock, flags); | |
1075 | /* Reactivate can only be called when inactive */ | |
1076 | if (sclp_activation_state != sclp_activation_state_inactive) { | |
1077 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1078 | return -EINVAL; | |
1079 | } | |
1080 | sclp_activation_state = sclp_activation_state_activating; | |
1081 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1082 | rc = sclp_init_mask(1); | |
1083 | spin_lock_irqsave(&sclp_lock, flags); | |
1084 | if (rc == 0) | |
1085 | sclp_activation_state = sclp_activation_state_active; | |
1086 | else | |
1087 | sclp_activation_state = sclp_activation_state_inactive; | |
1088 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1089 | return rc; | |
1090 | } | |
1091 | ||
1092 | EXPORT_SYMBOL(sclp_reactivate); | |
1093 | ||
1094 | /* Handler for external interruption used during initialization. Modify | |
1095 | * request state to done. */ | |
fde15c3a | 1096 | static void sclp_check_handler(struct ext_code ext_code, |
f6649a7e | 1097 | unsigned int param32, unsigned long param64) |
1da177e4 LT |
1098 | { |
1099 | u32 finished_sccb; | |
1100 | ||
420f42ec | 1101 | inc_irq_stat(IRQEXT_SCP); |
f6649a7e | 1102 | finished_sccb = param32 & 0xfffffff8; |
1da177e4 LT |
1103 | /* Is this the interrupt we are waiting for? */ |
1104 | if (finished_sccb == 0) | |
1105 | return; | |
ada1da31 | 1106 | if (finished_sccb != __pa(sclp_init_sccb)) |
a12c53f4 MS |
1107 | panic("sclp: unsolicited interrupt for buffer at 0x%x\n", |
1108 | finished_sccb); | |
1da177e4 LT |
1109 | spin_lock(&sclp_lock); |
1110 | if (sclp_running_state == sclp_running_state_running) { | |
1111 | sclp_init_req.status = SCLP_REQ_DONE; | |
1112 | sclp_running_state = sclp_running_state_idle; | |
1113 | } | |
1114 | spin_unlock(&sclp_lock); | |
1115 | } | |
1116 | ||
1117 | /* Initial init mask request timed out. Modify request state to failed. */ | |
1118 | static void | |
c9602ee7 | 1119 | sclp_check_timeout(struct timer_list *unused) |
1da177e4 LT |
1120 | { |
1121 | unsigned long flags; | |
1122 | ||
1123 | spin_lock_irqsave(&sclp_lock, flags); | |
1124 | if (sclp_running_state == sclp_running_state_running) { | |
1125 | sclp_init_req.status = SCLP_REQ_FAILED; | |
1126 | sclp_running_state = sclp_running_state_idle; | |
1127 | } | |
1128 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1129 | } | |
1130 | ||
1131 | /* Perform a check of the SCLP interface. Return zero if the interface is | |
1132 | * available and there are no pending requests from a previous instance. | |
1133 | * Return non-zero otherwise. */ | |
1134 | static int | |
1135 | sclp_check_interface(void) | |
1136 | { | |
1137 | struct init_sccb *sccb; | |
1138 | unsigned long flags; | |
1139 | int retry; | |
1140 | int rc; | |
1141 | ||
1142 | spin_lock_irqsave(&sclp_lock, flags); | |
1143 | /* Prepare init mask command */ | |
1dad093b | 1144 | rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); |
1da177e4 LT |
1145 | if (rc) { |
1146 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1147 | return rc; | |
1148 | } | |
1149 | for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { | |
1150 | __sclp_make_init_req(0, 0); | |
1151 | sccb = (struct init_sccb *) sclp_init_req.sccb; | |
70aa5d39 | 1152 | rc = sclp_service_call_trace(sclp_init_req.command, sccb); |
1da177e4 LT |
1153 | if (rc == -EIO) |
1154 | break; | |
1155 | sclp_init_req.status = SCLP_REQ_RUNNING; | |
1156 | sclp_running_state = sclp_running_state_running; | |
1157 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, | |
c9602ee7 | 1158 | sclp_check_timeout); |
1da177e4 LT |
1159 | spin_unlock_irqrestore(&sclp_lock, flags); |
1160 | /* Enable service-signal interruption - needs to happen | |
1161 | * with IRQs enabled. */ | |
82003c3e | 1162 | irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); |
1da177e4 LT |
1163 | /* Wait for signal from interrupt or timeout */ |
1164 | sclp_sync_wait(); | |
1165 | /* Disable service-signal interruption - needs to happen | |
1166 | * with IRQs enabled. */ | |
82003c3e | 1167 | irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); |
1da177e4 LT |
1168 | spin_lock_irqsave(&sclp_lock, flags); |
1169 | del_timer(&sclp_request_timer); | |
0b0d1173 CI |
1170 | rc = -EBUSY; |
1171 | if (sclp_init_req.status == SCLP_REQ_DONE) { | |
1172 | if (sccb->header.response_code == 0x20) { | |
1173 | rc = 0; | |
1174 | break; | |
1175 | } else if (sccb->header.response_code == 0x74f0) { | |
1176 | if (!sclp_mask_compat_mode) { | |
1177 | sclp_mask_compat_mode = true; | |
1178 | retry = 0; | |
1179 | } | |
1180 | } | |
1181 | } | |
1da177e4 | 1182 | } |
1dad093b | 1183 | unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); |
1da177e4 LT |
1184 | spin_unlock_irqrestore(&sclp_lock, flags); |
1185 | return rc; | |
1186 | } | |
1187 | ||
1188 | /* Reboot event handler. Reset send and receive mask to prevent pending SCLP | |
1189 | * events from interfering with rebooted system. */ | |
1190 | static int | |
1191 | sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) | |
1192 | { | |
1193 | sclp_deactivate(); | |
1194 | return NOTIFY_DONE; | |
1195 | } | |
1196 | ||
1197 | static struct notifier_block sclp_reboot_notifier = { | |
1198 | .notifier_call = sclp_reboot_event | |
1199 | }; | |
1200 | ||
36369569 | 1201 | static ssize_t con_pages_show(struct device_driver *dev, char *buf) |
25b41a7b | 1202 | { |
a086c53d | 1203 | return sysfs_emit(buf, "%i\n", sclp_console_pages); |
25b41a7b MS |
1204 | } |
1205 | ||
36369569 | 1206 | static DRIVER_ATTR_RO(con_pages); |
25b41a7b | 1207 | |
1143f6f5 HC |
1208 | static ssize_t con_drop_store(struct device_driver *dev, const char *buf, size_t count) |
1209 | { | |
1210 | int rc; | |
1211 | ||
1212 | rc = kstrtobool(buf, &sclp_console_drop); | |
1213 | return rc ?: count; | |
1214 | } | |
1215 | ||
36369569 | 1216 | static ssize_t con_drop_show(struct device_driver *dev, char *buf) |
25b41a7b | 1217 | { |
a086c53d | 1218 | return sysfs_emit(buf, "%i\n", sclp_console_drop); |
25b41a7b MS |
1219 | } |
1220 | ||
1143f6f5 | 1221 | static DRIVER_ATTR_RW(con_drop); |
25b41a7b | 1222 | |
36369569 | 1223 | static ssize_t con_full_show(struct device_driver *dev, char *buf) |
25b41a7b | 1224 | { |
a086c53d | 1225 | return sysfs_emit(buf, "%lu\n", sclp_console_full); |
25b41a7b MS |
1226 | } |
1227 | ||
36369569 | 1228 | static DRIVER_ATTR_RO(con_full); |
25b41a7b MS |
1229 | |
1230 | static struct attribute *sclp_drv_attrs[] = { | |
1231 | &driver_attr_con_pages.attr, | |
1232 | &driver_attr_con_drop.attr, | |
1233 | &driver_attr_con_full.attr, | |
1234 | NULL, | |
1235 | }; | |
1236 | static struct attribute_group sclp_drv_attr_group = { | |
1237 | .attrs = sclp_drv_attrs, | |
1238 | }; | |
1239 | static const struct attribute_group *sclp_drv_attr_groups[] = { | |
1240 | &sclp_drv_attr_group, | |
1241 | NULL, | |
1242 | }; | |
1243 | ||
62b74942 MH |
1244 | static struct platform_driver sclp_pdrv = { |
1245 | .driver = { | |
1246 | .name = "sclp", | |
25b41a7b | 1247 | .groups = sclp_drv_attr_groups, |
62b74942 MH |
1248 | }, |
1249 | }; | |
1250 | ||
1da177e4 LT |
1251 | /* Initialize SCLP driver. Return zero if driver is operational, non-zero |
1252 | * otherwise. */ | |
1253 | static int | |
1254 | sclp_init(void) | |
1255 | { | |
1256 | unsigned long flags; | |
62b74942 | 1257 | int rc = 0; |
1da177e4 | 1258 | |
1da177e4 LT |
1259 | spin_lock_irqsave(&sclp_lock, flags); |
1260 | /* Check for previous or running initialization */ | |
62b74942 MH |
1261 | if (sclp_init_state != sclp_init_state_uninitialized) |
1262 | goto fail_unlock; | |
1da177e4 | 1263 | sclp_init_state = sclp_init_state_initializing; |
087c4d74 GS |
1264 | sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA); |
1265 | sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA); | |
1266 | BUG_ON(!sclp_read_sccb || !sclp_init_sccb); | |
1da177e4 | 1267 | /* Set up variables */ |
1da177e4 | 1268 | list_add(&sclp_state_change_event.list, &sclp_reg_list); |
c9602ee7 KC |
1269 | timer_setup(&sclp_request_timer, NULL, 0); |
1270 | timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0); | |
1da177e4 LT |
1271 | /* Check interface */ |
1272 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1273 | rc = sclp_check_interface(); | |
1274 | spin_lock_irqsave(&sclp_lock, flags); | |
62b74942 MH |
1275 | if (rc) |
1276 | goto fail_init_state_uninitialized; | |
1da177e4 LT |
1277 | /* Register reboot handler */ |
1278 | rc = register_reboot_notifier(&sclp_reboot_notifier); | |
62b74942 MH |
1279 | if (rc) |
1280 | goto fail_init_state_uninitialized; | |
1da177e4 | 1281 | /* Register interrupt handler */ |
1dad093b | 1282 | rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler); |
62b74942 MH |
1283 | if (rc) |
1284 | goto fail_unregister_reboot_notifier; | |
1da177e4 LT |
1285 | sclp_init_state = sclp_init_state_initialized; |
1286 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1287 | /* Enable service-signal external interruption - needs to happen with | |
1288 | * IRQs enabled. */ | |
82003c3e | 1289 | irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); |
1da177e4 LT |
1290 | sclp_init_mask(1); |
1291 | return 0; | |
62b74942 MH |
1292 | |
1293 | fail_unregister_reboot_notifier: | |
1294 | unregister_reboot_notifier(&sclp_reboot_notifier); | |
1295 | fail_init_state_uninitialized: | |
1296 | sclp_init_state = sclp_init_state_uninitialized; | |
087c4d74 GS |
1297 | free_page((unsigned long) sclp_read_sccb); |
1298 | free_page((unsigned long) sclp_init_sccb); | |
62b74942 MH |
1299 | fail_unlock: |
1300 | spin_unlock_irqrestore(&sclp_lock, flags); | |
1301 | return rc; | |
1da177e4 | 1302 | } |
b3d00c3b PO |
1303 | |
1304 | static __init int sclp_initcall(void) | |
1305 | { | |
62b74942 MH |
1306 | int rc; |
1307 | ||
1308 | rc = platform_driver_register(&sclp_pdrv); | |
1309 | if (rc) | |
1310 | return rc; | |
25b41a7b | 1311 | |
b3d00c3b PO |
1312 | return sclp_init(); |
1313 | } | |
1314 | ||
1315 | arch_initcall(sclp_initcall); |