Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * drivers/s390/char/sclp.c | |
3 | * core function to access sclp interface | |
4 | * | |
5 | * S390 version | |
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
7 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | |
9 | */ | |
10 | ||
11 | #include <linux/module.h> | |
12 | #include <linux/err.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/timer.h> | |
16 | #include <linux/reboot.h> | |
17 | #include <linux/jiffies.h> | |
18 | #include <asm/types.h> | |
19 | #include <asm/s390_ext.h> | |
20 | ||
21 | #include "sclp.h" | |
22 | ||
23 | #define SCLP_HEADER "sclp: " | |
24 | ||
25 | /* Structure for register_early_external_interrupt. */ | |
26 | static ext_int_info_t ext_int_info_hwc; | |
27 | ||
28 | /* Lock to protect internal data consistency. */ | |
29 | static DEFINE_SPINLOCK(sclp_lock); | |
30 | ||
31 | /* Mask of events that we can receive from the sclp interface. */ | |
32 | static sccb_mask_t sclp_receive_mask; | |
33 | ||
34 | /* Mask of events that we can send to the sclp interface. */ | |
35 | static sccb_mask_t sclp_send_mask; | |
36 | ||
37 | /* List of registered event listeners and senders. */ | |
38 | static struct list_head sclp_reg_list; | |
39 | ||
40 | /* List of queued requests. */ | |
41 | static struct list_head sclp_req_queue; | |
42 | ||
43 | /* Data for read and and init requests. */ | |
44 | static struct sclp_req sclp_read_req; | |
45 | static struct sclp_req sclp_init_req; | |
46 | static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | |
47 | static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | |
48 | ||
49 | /* Timer for request retries. */ | |
50 | static struct timer_list sclp_request_timer; | |
51 | ||
52 | /* Internal state: is the driver initialized? */ | |
53 | static volatile enum sclp_init_state_t { | |
54 | sclp_init_state_uninitialized, | |
55 | sclp_init_state_initializing, | |
56 | sclp_init_state_initialized | |
57 | } sclp_init_state = sclp_init_state_uninitialized; | |
58 | ||
59 | /* Internal state: is a request active at the sclp? */ | |
60 | static volatile enum sclp_running_state_t { | |
61 | sclp_running_state_idle, | |
dbd8ae63 PO |
62 | sclp_running_state_running, |
63 | sclp_running_state_reset_pending | |
1da177e4 LT |
64 | } sclp_running_state = sclp_running_state_idle; |
65 | ||
66 | /* Internal state: is a read request pending? */ | |
67 | static volatile enum sclp_reading_state_t { | |
68 | sclp_reading_state_idle, | |
69 | sclp_reading_state_reading | |
70 | } sclp_reading_state = sclp_reading_state_idle; | |
71 | ||
72 | /* Internal state: is the driver currently serving requests? */ | |
73 | static volatile enum sclp_activation_state_t { | |
74 | sclp_activation_state_active, | |
75 | sclp_activation_state_deactivating, | |
76 | sclp_activation_state_inactive, | |
77 | sclp_activation_state_activating | |
78 | } sclp_activation_state = sclp_activation_state_active; | |
79 | ||
80 | /* Internal state: is an init mask request pending? */ | |
81 | static volatile enum sclp_mask_state_t { | |
82 | sclp_mask_state_idle, | |
83 | sclp_mask_state_initializing | |
84 | } sclp_mask_state = sclp_mask_state_idle; | |
85 | ||
86 | /* Maximum retry counts */ | |
87 | #define SCLP_INIT_RETRY 3 | |
88 | #define SCLP_MASK_RETRY 3 | |
1da177e4 LT |
89 | |
90 | /* Timeout intervals in seconds.*/ | |
25fab9eb | 91 | #define SCLP_BUSY_INTERVAL 10 |
dbd8ae63 | 92 | #define SCLP_RETRY_INTERVAL 30 |
1da177e4 LT |
93 | |
94 | static void sclp_process_queue(void); | |
95 | static int sclp_init_mask(int calculate); | |
96 | static int sclp_init(void); | |
97 | ||
98 | /* Perform service call. Return 0 on success, non-zero otherwise. */ | |
99 | static int | |
100 | service_call(sclp_cmdw_t command, void *sccb) | |
101 | { | |
102 | int cc; | |
103 | ||
94c12cc7 MS |
104 | asm volatile( |
105 | " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ | |
106 | " ipm %0\n" | |
107 | " srl %0,28" | |
108 | : "=&d" (cc) : "d" (command), "a" (__pa(sccb)) | |
109 | : "cc", "memory"); | |
1da177e4 LT |
110 | if (cc == 3) |
111 | return -EIO; | |
112 | if (cc == 2) | |
113 | return -EBUSY; | |
114 | return 0; | |
115 | } | |
116 | ||
dbd8ae63 PO |
117 | static inline void __sclp_make_read_req(void); |
118 | ||
1da177e4 | 119 | static void |
dbd8ae63 | 120 | __sclp_queue_read_req(void) |
1da177e4 | 121 | { |
dbd8ae63 PO |
122 | if (sclp_reading_state == sclp_reading_state_idle) { |
123 | sclp_reading_state = sclp_reading_state_reading; | |
124 | __sclp_make_read_req(); | |
125 | /* Add request to head of queue */ | |
126 | list_add(&sclp_read_req.list, &sclp_req_queue); | |
1da177e4 | 127 | } |
1da177e4 LT |
128 | } |
129 | ||
130 | /* Set up request retry timer. Called while sclp_lock is locked. */ | |
131 | static inline void | |
132 | __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), | |
133 | unsigned long data) | |
134 | { | |
135 | del_timer(&sclp_request_timer); | |
136 | sclp_request_timer.function = function; | |
137 | sclp_request_timer.data = data; | |
138 | sclp_request_timer.expires = jiffies + time; | |
139 | add_timer(&sclp_request_timer); | |
140 | } | |
141 | ||
dbd8ae63 PO |
142 | /* Request timeout handler. Restart the request queue. If DATA is non-zero, |
143 | * force restart of running request. */ | |
144 | static void | |
145 | sclp_request_timeout(unsigned long data) | |
146 | { | |
147 | unsigned long flags; | |
148 | ||
149 | spin_lock_irqsave(&sclp_lock, flags); | |
150 | if (data) { | |
151 | if (sclp_running_state == sclp_running_state_running) { | |
152 | /* Break running state and queue NOP read event request | |
153 | * to get a defined interface state. */ | |
154 | __sclp_queue_read_req(); | |
155 | sclp_running_state = sclp_running_state_idle; | |
156 | } | |
157 | } else { | |
158 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | |
159 | sclp_request_timeout, 0); | |
160 | } | |
161 | spin_unlock_irqrestore(&sclp_lock, flags); | |
162 | sclp_process_queue(); | |
163 | } | |
164 | ||
1da177e4 LT |
165 | /* Try to start a request. Return zero if the request was successfully |
166 | * started or if it will be started at a later time. Return non-zero otherwise. | |
167 | * Called while sclp_lock is locked. */ | |
168 | static int | |
169 | __sclp_start_request(struct sclp_req *req) | |
170 | { | |
171 | int rc; | |
172 | ||
173 | if (sclp_running_state != sclp_running_state_idle) | |
174 | return 0; | |
175 | del_timer(&sclp_request_timer); | |
25fab9eb PO |
176 | rc = service_call(req->command, req->sccb); |
177 | req->start_count++; | |
178 | ||
1da177e4 LT |
179 | if (rc == 0) { |
180 | /* Sucessfully started request */ | |
181 | req->status = SCLP_REQ_RUNNING; | |
182 | sclp_running_state = sclp_running_state_running; | |
183 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, | |
184 | sclp_request_timeout, 1); | |
185 | return 0; | |
186 | } else if (rc == -EBUSY) { | |
187 | /* Try again later */ | |
188 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | |
189 | sclp_request_timeout, 0); | |
190 | return 0; | |
191 | } | |
192 | /* Request failed */ | |
193 | req->status = SCLP_REQ_FAILED; | |
194 | return rc; | |
195 | } | |
196 | ||
197 | /* Try to start queued requests. */ | |
198 | static void | |
199 | sclp_process_queue(void) | |
200 | { | |
201 | struct sclp_req *req; | |
202 | int rc; | |
203 | unsigned long flags; | |
204 | ||
205 | spin_lock_irqsave(&sclp_lock, flags); | |
206 | if (sclp_running_state != sclp_running_state_idle) { | |
207 | spin_unlock_irqrestore(&sclp_lock, flags); | |
208 | return; | |
209 | } | |
210 | del_timer(&sclp_request_timer); | |
211 | while (!list_empty(&sclp_req_queue)) { | |
212 | req = list_entry(sclp_req_queue.next, struct sclp_req, list); | |
213 | rc = __sclp_start_request(req); | |
214 | if (rc == 0) | |
215 | break; | |
dbd8ae63 PO |
216 | /* Request failed */ |
217 | if (req->start_count > 1) { | |
218 | /* Cannot abort already submitted request - could still | |
219 | * be active at the SCLP */ | |
220 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | |
221 | sclp_request_timeout, 0); | |
222 | break; | |
223 | } | |
224 | /* Post-processing for aborted request */ | |
1da177e4 LT |
225 | list_del(&req->list); |
226 | if (req->callback) { | |
227 | spin_unlock_irqrestore(&sclp_lock, flags); | |
228 | req->callback(req, req->callback_data); | |
229 | spin_lock_irqsave(&sclp_lock, flags); | |
230 | } | |
231 | } | |
232 | spin_unlock_irqrestore(&sclp_lock, flags); | |
233 | } | |
234 | ||
235 | /* Queue a new request. Return zero on success, non-zero otherwise. */ | |
236 | int | |
237 | sclp_add_request(struct sclp_req *req) | |
238 | { | |
239 | unsigned long flags; | |
240 | int rc; | |
241 | ||
242 | spin_lock_irqsave(&sclp_lock, flags); | |
243 | if ((sclp_init_state != sclp_init_state_initialized || | |
244 | sclp_activation_state != sclp_activation_state_active) && | |
245 | req != &sclp_init_req) { | |
246 | spin_unlock_irqrestore(&sclp_lock, flags); | |
247 | return -EIO; | |
248 | } | |
249 | req->status = SCLP_REQ_QUEUED; | |
250 | req->start_count = 0; | |
251 | list_add_tail(&req->list, &sclp_req_queue); | |
252 | rc = 0; | |
253 | /* Start if request is first in list */ | |
dbd8ae63 PO |
254 | if (sclp_running_state == sclp_running_state_idle && |
255 | req->list.prev == &sclp_req_queue) { | |
1da177e4 LT |
256 | rc = __sclp_start_request(req); |
257 | if (rc) | |
258 | list_del(&req->list); | |
259 | } | |
260 | spin_unlock_irqrestore(&sclp_lock, flags); | |
261 | return rc; | |
262 | } | |
263 | ||
264 | EXPORT_SYMBOL(sclp_add_request); | |
265 | ||
266 | /* Dispatch events found in request buffer to registered listeners. Return 0 | |
267 | * if all events were dispatched, non-zero otherwise. */ | |
268 | static int | |
269 | sclp_dispatch_evbufs(struct sccb_header *sccb) | |
270 | { | |
271 | unsigned long flags; | |
272 | struct evbuf_header *evbuf; | |
273 | struct list_head *l; | |
274 | struct sclp_register *reg; | |
275 | int offset; | |
276 | int rc; | |
277 | ||
278 | spin_lock_irqsave(&sclp_lock, flags); | |
279 | rc = 0; | |
280 | for (offset = sizeof(struct sccb_header); offset < sccb->length; | |
281 | offset += evbuf->length) { | |
282 | /* Search for event handler */ | |
283 | evbuf = (struct evbuf_header *) ((addr_t) sccb + offset); | |
284 | reg = NULL; | |
285 | list_for_each(l, &sclp_reg_list) { | |
286 | reg = list_entry(l, struct sclp_register, list); | |
287 | if (reg->receive_mask & (1 << (32 - evbuf->type))) | |
288 | break; | |
289 | else | |
290 | reg = NULL; | |
291 | } | |
292 | if (reg && reg->receiver_fn) { | |
293 | spin_unlock_irqrestore(&sclp_lock, flags); | |
294 | reg->receiver_fn(evbuf); | |
295 | spin_lock_irqsave(&sclp_lock, flags); | |
296 | } else if (reg == NULL) | |
297 | rc = -ENOSYS; | |
298 | } | |
299 | spin_unlock_irqrestore(&sclp_lock, flags); | |
300 | return rc; | |
301 | } | |
302 | ||
303 | /* Read event data request callback. */ | |
304 | static void | |
305 | sclp_read_cb(struct sclp_req *req, void *data) | |
306 | { | |
307 | unsigned long flags; | |
308 | struct sccb_header *sccb; | |
309 | ||
310 | sccb = (struct sccb_header *) req->sccb; | |
311 | if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 || | |
312 | sccb->response_code == 0x220)) | |
313 | sclp_dispatch_evbufs(sccb); | |
314 | spin_lock_irqsave(&sclp_lock, flags); | |
315 | sclp_reading_state = sclp_reading_state_idle; | |
316 | spin_unlock_irqrestore(&sclp_lock, flags); | |
317 | } | |
318 | ||
319 | /* Prepare read event data request. Called while sclp_lock is locked. */ | |
320 | static inline void | |
321 | __sclp_make_read_req(void) | |
322 | { | |
323 | struct sccb_header *sccb; | |
324 | ||
325 | sccb = (struct sccb_header *) sclp_read_sccb; | |
326 | clear_page(sccb); | |
327 | memset(&sclp_read_req, 0, sizeof(struct sclp_req)); | |
328 | sclp_read_req.command = SCLP_CMDW_READDATA; | |
329 | sclp_read_req.status = SCLP_REQ_QUEUED; | |
330 | sclp_read_req.start_count = 0; | |
331 | sclp_read_req.callback = sclp_read_cb; | |
332 | sclp_read_req.sccb = sccb; | |
333 | sccb->length = PAGE_SIZE; | |
334 | sccb->function_code = 0; | |
335 | sccb->control_mask[2] = 0x80; | |
336 | } | |
337 | ||
338 | /* Search request list for request with matching sccb. Return request if found, | |
339 | * NULL otherwise. Called while sclp_lock is locked. */ | |
340 | static inline struct sclp_req * | |
341 | __sclp_find_req(u32 sccb) | |
342 | { | |
343 | struct list_head *l; | |
344 | struct sclp_req *req; | |
345 | ||
346 | list_for_each(l, &sclp_req_queue) { | |
347 | req = list_entry(l, struct sclp_req, list); | |
348 | if (sccb == (u32) (addr_t) req->sccb) | |
349 | return req; | |
350 | } | |
351 | return NULL; | |
352 | } | |
353 | ||
354 | /* Handler for external interruption. Perform request post-processing. | |
355 | * Prepare read event data request if necessary. Start processing of next | |
356 | * request on queue. */ | |
357 | static void | |
5a489b98 | 358 | sclp_interrupt_handler(__u16 code) |
1da177e4 LT |
359 | { |
360 | struct sclp_req *req; | |
361 | u32 finished_sccb; | |
362 | u32 evbuf_pending; | |
363 | ||
364 | spin_lock(&sclp_lock); | |
365 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; | |
366 | evbuf_pending = S390_lowcore.ext_params & 0x3; | |
367 | if (finished_sccb) { | |
dbd8ae63 PO |
368 | del_timer(&sclp_request_timer); |
369 | sclp_running_state = sclp_running_state_reset_pending; | |
1da177e4 LT |
370 | req = __sclp_find_req(finished_sccb); |
371 | if (req) { | |
372 | /* Request post-processing */ | |
373 | list_del(&req->list); | |
374 | req->status = SCLP_REQ_DONE; | |
375 | if (req->callback) { | |
376 | spin_unlock(&sclp_lock); | |
377 | req->callback(req, req->callback_data); | |
378 | spin_lock(&sclp_lock); | |
379 | } | |
380 | } | |
381 | sclp_running_state = sclp_running_state_idle; | |
382 | } | |
383 | if (evbuf_pending && sclp_receive_mask != 0 && | |
dbd8ae63 PO |
384 | sclp_activation_state == sclp_activation_state_active) |
385 | __sclp_queue_read_req(); | |
1da177e4 LT |
386 | spin_unlock(&sclp_lock); |
387 | sclp_process_queue(); | |
388 | } | |
389 | ||
1da177e4 LT |
390 | /* Convert interval in jiffies to TOD ticks. */ |
391 | static inline u64 | |
392 | sclp_tod_from_jiffies(unsigned long jiffies) | |
393 | { | |
394 | return (u64) (jiffies / HZ) << 32; | |
395 | } | |
396 | ||
397 | /* Wait until a currently running request finished. Note: while this function | |
398 | * is running, no timers are served on the calling CPU. */ | |
399 | void | |
400 | sclp_sync_wait(void) | |
401 | { | |
1f194a4c | 402 | unsigned long flags; |
1da177e4 LT |
403 | unsigned long cr0, cr0_sync; |
404 | u64 timeout; | |
405 | ||
406 | /* We'll be disabling timer interrupts, so we need a custom timeout | |
407 | * mechanism */ | |
408 | timeout = 0; | |
409 | if (timer_pending(&sclp_request_timer)) { | |
410 | /* Get timeout TOD value */ | |
94c12cc7 | 411 | timeout = get_clock() + |
1da177e4 LT |
412 | sclp_tod_from_jiffies(sclp_request_timer.expires - |
413 | jiffies); | |
414 | } | |
1f194a4c | 415 | local_irq_save(flags); |
1da177e4 LT |
416 | /* Prevent bottom half from executing once we force interrupts open */ |
417 | local_bh_disable(); | |
418 | /* Enable service-signal interruption, disable timer interrupts */ | |
1f194a4c | 419 | trace_hardirqs_on(); |
1da177e4 LT |
420 | __ctl_store(cr0, 0, 0); |
421 | cr0_sync = cr0; | |
422 | cr0_sync |= 0x00000200; | |
423 | cr0_sync &= 0xFFFFF3AC; | |
424 | __ctl_load(cr0_sync, 0, 0); | |
94c12cc7 | 425 | __raw_local_irq_stosm(0x01); |
1da177e4 LT |
426 | /* Loop until driver state indicates finished request */ |
427 | while (sclp_running_state != sclp_running_state_idle) { | |
428 | /* Check for expired request timer */ | |
429 | if (timer_pending(&sclp_request_timer) && | |
94c12cc7 | 430 | get_clock() > timeout && |
1da177e4 LT |
431 | del_timer(&sclp_request_timer)) |
432 | sclp_request_timer.function(sclp_request_timer.data); | |
433 | barrier(); | |
434 | cpu_relax(); | |
435 | } | |
1f194a4c | 436 | local_irq_disable(); |
1da177e4 | 437 | __ctl_load(cr0, 0, 0); |
1f194a4c HC |
438 | _local_bh_enable(); |
439 | local_irq_restore(flags); | |
1da177e4 LT |
440 | } |
441 | ||
442 | EXPORT_SYMBOL(sclp_sync_wait); | |
443 | ||
444 | /* Dispatch changes in send and receive mask to registered listeners. */ | |
445 | static inline void | |
446 | sclp_dispatch_state_change(void) | |
447 | { | |
448 | struct list_head *l; | |
449 | struct sclp_register *reg; | |
450 | unsigned long flags; | |
451 | sccb_mask_t receive_mask; | |
452 | sccb_mask_t send_mask; | |
453 | ||
454 | do { | |
455 | spin_lock_irqsave(&sclp_lock, flags); | |
456 | reg = NULL; | |
457 | list_for_each(l, &sclp_reg_list) { | |
458 | reg = list_entry(l, struct sclp_register, list); | |
459 | receive_mask = reg->receive_mask & sclp_receive_mask; | |
460 | send_mask = reg->send_mask & sclp_send_mask; | |
461 | if (reg->sclp_receive_mask != receive_mask || | |
462 | reg->sclp_send_mask != send_mask) { | |
463 | reg->sclp_receive_mask = receive_mask; | |
464 | reg->sclp_send_mask = send_mask; | |
465 | break; | |
466 | } else | |
467 | reg = NULL; | |
468 | } | |
469 | spin_unlock_irqrestore(&sclp_lock, flags); | |
470 | if (reg && reg->state_change_fn) | |
471 | reg->state_change_fn(reg); | |
472 | } while (reg); | |
473 | } | |
474 | ||
475 | struct sclp_statechangebuf { | |
476 | struct evbuf_header header; | |
477 | u8 validity_sclp_active_facility_mask : 1; | |
478 | u8 validity_sclp_receive_mask : 1; | |
479 | u8 validity_sclp_send_mask : 1; | |
480 | u8 validity_read_data_function_mask : 1; | |
481 | u16 _zeros : 12; | |
482 | u16 mask_length; | |
483 | u64 sclp_active_facility_mask; | |
484 | sccb_mask_t sclp_receive_mask; | |
485 | sccb_mask_t sclp_send_mask; | |
486 | u32 read_data_function_mask; | |
487 | } __attribute__((packed)); | |
488 | ||
489 | ||
490 | /* State change event callback. Inform listeners of changes. */ | |
491 | static void | |
492 | sclp_state_change_cb(struct evbuf_header *evbuf) | |
493 | { | |
494 | unsigned long flags; | |
495 | struct sclp_statechangebuf *scbuf; | |
496 | ||
497 | scbuf = (struct sclp_statechangebuf *) evbuf; | |
498 | if (scbuf->mask_length != sizeof(sccb_mask_t)) | |
499 | return; | |
500 | spin_lock_irqsave(&sclp_lock, flags); | |
501 | if (scbuf->validity_sclp_receive_mask) | |
502 | sclp_receive_mask = scbuf->sclp_receive_mask; | |
503 | if (scbuf->validity_sclp_send_mask) | |
504 | sclp_send_mask = scbuf->sclp_send_mask; | |
505 | spin_unlock_irqrestore(&sclp_lock, flags); | |
506 | sclp_dispatch_state_change(); | |
507 | } | |
508 | ||
509 | static struct sclp_register sclp_state_change_event = { | |
510 | .receive_mask = EvTyp_StateChange_Mask, | |
511 | .receiver_fn = sclp_state_change_cb | |
512 | }; | |
513 | ||
514 | /* Calculate receive and send mask of currently registered listeners. | |
515 | * Called while sclp_lock is locked. */ | |
516 | static inline void | |
517 | __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask) | |
518 | { | |
519 | struct list_head *l; | |
520 | struct sclp_register *t; | |
521 | ||
522 | *receive_mask = 0; | |
523 | *send_mask = 0; | |
524 | list_for_each(l, &sclp_reg_list) { | |
525 | t = list_entry(l, struct sclp_register, list); | |
526 | *receive_mask |= t->receive_mask; | |
527 | *send_mask |= t->send_mask; | |
528 | } | |
529 | } | |
530 | ||
531 | /* Register event listener. Return 0 on success, non-zero otherwise. */ | |
532 | int | |
533 | sclp_register(struct sclp_register *reg) | |
534 | { | |
535 | unsigned long flags; | |
536 | sccb_mask_t receive_mask; | |
537 | sccb_mask_t send_mask; | |
538 | int rc; | |
539 | ||
540 | rc = sclp_init(); | |
541 | if (rc) | |
542 | return rc; | |
543 | spin_lock_irqsave(&sclp_lock, flags); | |
544 | /* Check event mask for collisions */ | |
545 | __sclp_get_mask(&receive_mask, &send_mask); | |
546 | if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) { | |
547 | spin_unlock_irqrestore(&sclp_lock, flags); | |
548 | return -EBUSY; | |
549 | } | |
550 | /* Trigger initial state change callback */ | |
551 | reg->sclp_receive_mask = 0; | |
552 | reg->sclp_send_mask = 0; | |
553 | list_add(®->list, &sclp_reg_list); | |
554 | spin_unlock_irqrestore(&sclp_lock, flags); | |
555 | rc = sclp_init_mask(1); | |
556 | if (rc) { | |
557 | spin_lock_irqsave(&sclp_lock, flags); | |
558 | list_del(®->list); | |
559 | spin_unlock_irqrestore(&sclp_lock, flags); | |
560 | } | |
561 | return rc; | |
562 | } | |
563 | ||
564 | EXPORT_SYMBOL(sclp_register); | |
565 | ||
566 | /* Unregister event listener. */ | |
567 | void | |
568 | sclp_unregister(struct sclp_register *reg) | |
569 | { | |
570 | unsigned long flags; | |
571 | ||
572 | spin_lock_irqsave(&sclp_lock, flags); | |
573 | list_del(®->list); | |
574 | spin_unlock_irqrestore(&sclp_lock, flags); | |
575 | sclp_init_mask(1); | |
576 | } | |
577 | ||
578 | EXPORT_SYMBOL(sclp_unregister); | |
579 | ||
580 | /* Remove event buffers which are marked processed. Return the number of | |
581 | * remaining event buffers. */ | |
582 | int | |
583 | sclp_remove_processed(struct sccb_header *sccb) | |
584 | { | |
585 | struct evbuf_header *evbuf; | |
586 | int unprocessed; | |
587 | u16 remaining; | |
588 | ||
589 | evbuf = (struct evbuf_header *) (sccb + 1); | |
590 | unprocessed = 0; | |
591 | remaining = sccb->length - sizeof(struct sccb_header); | |
592 | while (remaining > 0) { | |
593 | remaining -= evbuf->length; | |
594 | if (evbuf->flags & 0x80) { | |
595 | sccb->length -= evbuf->length; | |
596 | memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length), | |
597 | remaining); | |
598 | } else { | |
599 | unprocessed++; | |
600 | evbuf = (struct evbuf_header *) | |
601 | ((addr_t) evbuf + evbuf->length); | |
602 | } | |
603 | } | |
604 | return unprocessed; | |
605 | } | |
606 | ||
607 | EXPORT_SYMBOL(sclp_remove_processed); | |
608 | ||
609 | struct init_sccb { | |
610 | struct sccb_header header; | |
611 | u16 _reserved; | |
612 | u16 mask_length; | |
613 | sccb_mask_t receive_mask; | |
614 | sccb_mask_t send_mask; | |
615 | sccb_mask_t sclp_send_mask; | |
616 | sccb_mask_t sclp_receive_mask; | |
617 | } __attribute__((packed)); | |
618 | ||
619 | /* Prepare init mask request. Called while sclp_lock is locked. */ | |
620 | static inline void | |
621 | __sclp_make_init_req(u32 receive_mask, u32 send_mask) | |
622 | { | |
623 | struct init_sccb *sccb; | |
624 | ||
625 | sccb = (struct init_sccb *) sclp_init_sccb; | |
626 | clear_page(sccb); | |
627 | memset(&sclp_init_req, 0, sizeof(struct sclp_req)); | |
628 | sclp_init_req.command = SCLP_CMDW_WRITEMASK; | |
629 | sclp_init_req.status = SCLP_REQ_FILLED; | |
630 | sclp_init_req.start_count = 0; | |
631 | sclp_init_req.callback = NULL; | |
632 | sclp_init_req.callback_data = NULL; | |
633 | sclp_init_req.sccb = sccb; | |
634 | sccb->header.length = sizeof(struct init_sccb); | |
635 | sccb->mask_length = sizeof(sccb_mask_t); | |
636 | sccb->receive_mask = receive_mask; | |
637 | sccb->send_mask = send_mask; | |
638 | sccb->sclp_receive_mask = 0; | |
639 | sccb->sclp_send_mask = 0; | |
640 | } | |
641 | ||
642 | /* Start init mask request. If calculate is non-zero, calculate the mask as | |
643 | * requested by registered listeners. Use zero mask otherwise. Return 0 on | |
644 | * success, non-zero otherwise. */ | |
645 | static int | |
646 | sclp_init_mask(int calculate) | |
647 | { | |
648 | unsigned long flags; | |
649 | struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb; | |
650 | sccb_mask_t receive_mask; | |
651 | sccb_mask_t send_mask; | |
652 | int retry; | |
653 | int rc; | |
654 | unsigned long wait; | |
655 | ||
656 | spin_lock_irqsave(&sclp_lock, flags); | |
657 | /* Check if interface is in appropriate state */ | |
658 | if (sclp_mask_state != sclp_mask_state_idle) { | |
659 | spin_unlock_irqrestore(&sclp_lock, flags); | |
660 | return -EBUSY; | |
661 | } | |
662 | if (sclp_activation_state == sclp_activation_state_inactive) { | |
663 | spin_unlock_irqrestore(&sclp_lock, flags); | |
664 | return -EINVAL; | |
665 | } | |
666 | sclp_mask_state = sclp_mask_state_initializing; | |
667 | /* Determine mask */ | |
668 | if (calculate) | |
669 | __sclp_get_mask(&receive_mask, &send_mask); | |
670 | else { | |
671 | receive_mask = 0; | |
672 | send_mask = 0; | |
673 | } | |
674 | rc = -EIO; | |
675 | for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) { | |
676 | /* Prepare request */ | |
677 | __sclp_make_init_req(receive_mask, send_mask); | |
678 | spin_unlock_irqrestore(&sclp_lock, flags); | |
679 | if (sclp_add_request(&sclp_init_req)) { | |
680 | /* Try again later */ | |
681 | wait = jiffies + SCLP_BUSY_INTERVAL * HZ; | |
682 | while (time_before(jiffies, wait)) | |
683 | sclp_sync_wait(); | |
684 | spin_lock_irqsave(&sclp_lock, flags); | |
685 | continue; | |
686 | } | |
687 | while (sclp_init_req.status != SCLP_REQ_DONE && | |
688 | sclp_init_req.status != SCLP_REQ_FAILED) | |
689 | sclp_sync_wait(); | |
690 | spin_lock_irqsave(&sclp_lock, flags); | |
691 | if (sclp_init_req.status == SCLP_REQ_DONE && | |
692 | sccb->header.response_code == 0x20) { | |
693 | /* Successful request */ | |
694 | if (calculate) { | |
695 | sclp_receive_mask = sccb->sclp_receive_mask; | |
696 | sclp_send_mask = sccb->sclp_send_mask; | |
697 | } else { | |
698 | sclp_receive_mask = 0; | |
699 | sclp_send_mask = 0; | |
700 | } | |
701 | spin_unlock_irqrestore(&sclp_lock, flags); | |
702 | sclp_dispatch_state_change(); | |
703 | spin_lock_irqsave(&sclp_lock, flags); | |
704 | rc = 0; | |
705 | break; | |
706 | } | |
707 | } | |
708 | sclp_mask_state = sclp_mask_state_idle; | |
709 | spin_unlock_irqrestore(&sclp_lock, flags); | |
710 | return rc; | |
711 | } | |
712 | ||
713 | /* Deactivate SCLP interface. On success, new requests will be rejected, | |
714 | * events will no longer be dispatched. Return 0 on success, non-zero | |
715 | * otherwise. */ | |
716 | int | |
717 | sclp_deactivate(void) | |
718 | { | |
719 | unsigned long flags; | |
720 | int rc; | |
721 | ||
722 | spin_lock_irqsave(&sclp_lock, flags); | |
723 | /* Deactivate can only be called when active */ | |
724 | if (sclp_activation_state != sclp_activation_state_active) { | |
725 | spin_unlock_irqrestore(&sclp_lock, flags); | |
726 | return -EINVAL; | |
727 | } | |
728 | sclp_activation_state = sclp_activation_state_deactivating; | |
729 | spin_unlock_irqrestore(&sclp_lock, flags); | |
730 | rc = sclp_init_mask(0); | |
731 | spin_lock_irqsave(&sclp_lock, flags); | |
732 | if (rc == 0) | |
733 | sclp_activation_state = sclp_activation_state_inactive; | |
734 | else | |
735 | sclp_activation_state = sclp_activation_state_active; | |
736 | spin_unlock_irqrestore(&sclp_lock, flags); | |
737 | return rc; | |
738 | } | |
739 | ||
740 | EXPORT_SYMBOL(sclp_deactivate); | |
741 | ||
742 | /* Reactivate SCLP interface after sclp_deactivate. On success, new | |
743 | * requests will be accepted, events will be dispatched again. Return 0 on | |
744 | * success, non-zero otherwise. */ | |
745 | int | |
746 | sclp_reactivate(void) | |
747 | { | |
748 | unsigned long flags; | |
749 | int rc; | |
750 | ||
751 | spin_lock_irqsave(&sclp_lock, flags); | |
752 | /* Reactivate can only be called when inactive */ | |
753 | if (sclp_activation_state != sclp_activation_state_inactive) { | |
754 | spin_unlock_irqrestore(&sclp_lock, flags); | |
755 | return -EINVAL; | |
756 | } | |
757 | sclp_activation_state = sclp_activation_state_activating; | |
758 | spin_unlock_irqrestore(&sclp_lock, flags); | |
759 | rc = sclp_init_mask(1); | |
760 | spin_lock_irqsave(&sclp_lock, flags); | |
761 | if (rc == 0) | |
762 | sclp_activation_state = sclp_activation_state_active; | |
763 | else | |
764 | sclp_activation_state = sclp_activation_state_inactive; | |
765 | spin_unlock_irqrestore(&sclp_lock, flags); | |
766 | return rc; | |
767 | } | |
768 | ||
769 | EXPORT_SYMBOL(sclp_reactivate); | |
770 | ||
771 | /* Handler for external interruption used during initialization. Modify | |
772 | * request state to done. */ | |
773 | static void | |
5a489b98 | 774 | sclp_check_handler(__u16 code) |
1da177e4 LT |
775 | { |
776 | u32 finished_sccb; | |
777 | ||
778 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; | |
779 | /* Is this the interrupt we are waiting for? */ | |
780 | if (finished_sccb == 0) | |
781 | return; | |
782 | if (finished_sccb != (u32) (addr_t) sclp_init_sccb) { | |
783 | printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt " | |
784 | "for buffer at 0x%x\n", finished_sccb); | |
785 | return; | |
786 | } | |
787 | spin_lock(&sclp_lock); | |
788 | if (sclp_running_state == sclp_running_state_running) { | |
789 | sclp_init_req.status = SCLP_REQ_DONE; | |
790 | sclp_running_state = sclp_running_state_idle; | |
791 | } | |
792 | spin_unlock(&sclp_lock); | |
793 | } | |
794 | ||
795 | /* Initial init mask request timed out. Modify request state to failed. */ | |
796 | static void | |
797 | sclp_check_timeout(unsigned long data) | |
798 | { | |
799 | unsigned long flags; | |
800 | ||
801 | spin_lock_irqsave(&sclp_lock, flags); | |
802 | if (sclp_running_state == sclp_running_state_running) { | |
803 | sclp_init_req.status = SCLP_REQ_FAILED; | |
804 | sclp_running_state = sclp_running_state_idle; | |
805 | } | |
806 | spin_unlock_irqrestore(&sclp_lock, flags); | |
807 | } | |
808 | ||
809 | /* Perform a check of the SCLP interface. Return zero if the interface is | |
810 | * available and there are no pending requests from a previous instance. | |
811 | * Return non-zero otherwise. */ | |
812 | static int | |
813 | sclp_check_interface(void) | |
814 | { | |
815 | struct init_sccb *sccb; | |
816 | unsigned long flags; | |
817 | int retry; | |
818 | int rc; | |
819 | ||
820 | spin_lock_irqsave(&sclp_lock, flags); | |
821 | /* Prepare init mask command */ | |
822 | rc = register_early_external_interrupt(0x2401, sclp_check_handler, | |
823 | &ext_int_info_hwc); | |
824 | if (rc) { | |
825 | spin_unlock_irqrestore(&sclp_lock, flags); | |
826 | return rc; | |
827 | } | |
828 | for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { | |
829 | __sclp_make_init_req(0, 0); | |
830 | sccb = (struct init_sccb *) sclp_init_req.sccb; | |
831 | rc = service_call(sclp_init_req.command, sccb); | |
832 | if (rc == -EIO) | |
833 | break; | |
834 | sclp_init_req.status = SCLP_REQ_RUNNING; | |
835 | sclp_running_state = sclp_running_state_running; | |
836 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, | |
837 | sclp_check_timeout, 0); | |
838 | spin_unlock_irqrestore(&sclp_lock, flags); | |
839 | /* Enable service-signal interruption - needs to happen | |
840 | * with IRQs enabled. */ | |
841 | ctl_set_bit(0, 9); | |
842 | /* Wait for signal from interrupt or timeout */ | |
843 | sclp_sync_wait(); | |
844 | /* Disable service-signal interruption - needs to happen | |
845 | * with IRQs enabled. */ | |
846 | ctl_clear_bit(0,9); | |
847 | spin_lock_irqsave(&sclp_lock, flags); | |
848 | del_timer(&sclp_request_timer); | |
849 | if (sclp_init_req.status == SCLP_REQ_DONE && | |
850 | sccb->header.response_code == 0x20) { | |
851 | rc = 0; | |
852 | break; | |
853 | } else | |
854 | rc = -EBUSY; | |
855 | } | |
856 | unregister_early_external_interrupt(0x2401, sclp_check_handler, | |
857 | &ext_int_info_hwc); | |
858 | spin_unlock_irqrestore(&sclp_lock, flags); | |
859 | return rc; | |
860 | } | |
861 | ||
862 | /* Reboot event handler. Reset send and receive mask to prevent pending SCLP | |
863 | * events from interfering with rebooted system. */ | |
864 | static int | |
865 | sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) | |
866 | { | |
867 | sclp_deactivate(); | |
868 | return NOTIFY_DONE; | |
869 | } | |
870 | ||
871 | static struct notifier_block sclp_reboot_notifier = { | |
872 | .notifier_call = sclp_reboot_event | |
873 | }; | |
874 | ||
875 | /* Initialize SCLP driver. Return zero if driver is operational, non-zero | |
876 | * otherwise. */ | |
877 | static int | |
878 | sclp_init(void) | |
879 | { | |
880 | unsigned long flags; | |
881 | int rc; | |
882 | ||
883 | if (!MACHINE_HAS_SCLP) | |
884 | return -ENODEV; | |
885 | spin_lock_irqsave(&sclp_lock, flags); | |
886 | /* Check for previous or running initialization */ | |
887 | if (sclp_init_state != sclp_init_state_uninitialized) { | |
888 | spin_unlock_irqrestore(&sclp_lock, flags); | |
889 | return 0; | |
890 | } | |
891 | sclp_init_state = sclp_init_state_initializing; | |
892 | /* Set up variables */ | |
893 | INIT_LIST_HEAD(&sclp_req_queue); | |
894 | INIT_LIST_HEAD(&sclp_reg_list); | |
895 | list_add(&sclp_state_change_event.list, &sclp_reg_list); | |
896 | init_timer(&sclp_request_timer); | |
897 | /* Check interface */ | |
898 | spin_unlock_irqrestore(&sclp_lock, flags); | |
899 | rc = sclp_check_interface(); | |
900 | spin_lock_irqsave(&sclp_lock, flags); | |
901 | if (rc) { | |
902 | sclp_init_state = sclp_init_state_uninitialized; | |
903 | spin_unlock_irqrestore(&sclp_lock, flags); | |
904 | return rc; | |
905 | } | |
906 | /* Register reboot handler */ | |
907 | rc = register_reboot_notifier(&sclp_reboot_notifier); | |
908 | if (rc) { | |
909 | sclp_init_state = sclp_init_state_uninitialized; | |
910 | spin_unlock_irqrestore(&sclp_lock, flags); | |
911 | return rc; | |
912 | } | |
913 | /* Register interrupt handler */ | |
914 | rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, | |
915 | &ext_int_info_hwc); | |
916 | if (rc) { | |
917 | unregister_reboot_notifier(&sclp_reboot_notifier); | |
918 | sclp_init_state = sclp_init_state_uninitialized; | |
919 | spin_unlock_irqrestore(&sclp_lock, flags); | |
920 | return rc; | |
921 | } | |
922 | sclp_init_state = sclp_init_state_initialized; | |
923 | spin_unlock_irqrestore(&sclp_lock, flags); | |
924 | /* Enable service-signal external interruption - needs to happen with | |
925 | * IRQs enabled. */ | |
926 | ctl_set_bit(0, 9); | |
927 | sclp_init_mask(1); | |
928 | return 0; | |
929 | } |