Commit | Line | Data |
---|---|---|
89eb8eb9 DN |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
a374c57b | 6 | * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. |
89eb8eb9 DN |
7 | */ |
8 | ||
89eb8eb9 DN |
9 | /* |
10 | * Cross Partition Communication (XPC) support - standard version. | |
11 | * | |
12 | * XPC provides a message passing capability that crosses partition | |
13 | * boundaries. This module is made up of two parts: | |
14 | * | |
15 | * partition This part detects the presence/absence of other | |
16 | * partitions. It provides a heartbeat and monitors | |
17 | * the heartbeats of other partitions. | |
18 | * | |
19 | * channel This part manages the channels and sends/receives | |
20 | * messages across them to/from other partitions. | |
21 | * | |
22 | * There are a couple of additional functions residing in XP, which | |
23 | * provide an interface to XPC for its users. | |
24 | * | |
25 | * | |
26 | * Caveats: | |
27 | * | |
7fb5e59d | 28 | * . Currently on sn2, we have no way to determine which nasid an IRQ |
c39838ce DN |
29 | * came from. Thus, xpc_send_IRQ_sn2() does a remote amo write |
30 | * followed by an IPI. The amo indicates where data is to be pulled | |
31 | * from, so after the IPI arrives, the remote partition checks the amo | |
32 | * word. The IPI can actually arrive before the amo however, so other | |
33 | * code must periodically check for this case. Also, remote amo | |
7fb5e59d DN |
34 | * operations do not reliably time out. Thus we do a remote PIO read |
35 | * solely to know whether the remote partition is down and whether we | |
36 | * should stop sending IPIs to it. This remote PIO read operation is | |
37 | * set up in a special nofault region so SAL knows to ignore (and | |
c39838ce | 38 | * cleanup) any errors due to the remote amo write, PIO read, and/or |
7fb5e59d | 39 | * PIO write operations. |
89eb8eb9 DN |
40 | * |
41 | * If/when new hardware solves this IPI problem, we should abandon | |
42 | * the current approach. | |
43 | * | |
44 | */ | |
45 | ||
89eb8eb9 | 46 | #include <linux/module.h> |
5a0e3ad6 | 47 | #include <linux/slab.h> |
261f3b49 DN |
48 | #include <linux/sysctl.h> |
49 | #include <linux/device.h> | |
69913927 | 50 | #include <linux/delay.h> |
a607c389 | 51 | #include <linux/reboot.h> |
1eeb66a1 | 52 | #include <linux/kdebug.h> |
2c2b94f9 | 53 | #include <linux/kthread.h> |
45d9ca49 | 54 | #include "xpc.h" |
89eb8eb9 | 55 | |
891348ca RH |
56 | #ifdef CONFIG_X86_64 |
57 | #include <asm/traps.h> | |
58 | #endif | |
59 | ||
89eb8eb9 DN |
60 | /* define two XPC debug device structures to be used with dev_dbg() et al */ |
61 | ||
62 | struct device_driver xpc_dbg_name = { | |
63 | .name = "xpc" | |
64 | }; | |
65 | ||
66 | struct device xpc_part_dbg_subname = { | |
bb0dc43e | 67 | .init_name = "", /* set to "part" at xpc_init() time */ |
89eb8eb9 DN |
68 | .driver = &xpc_dbg_name |
69 | }; | |
70 | ||
71 | struct device xpc_chan_dbg_subname = { | |
bb0dc43e | 72 | .init_name = "", /* set to "chan" at xpc_init() time */ |
89eb8eb9 DN |
73 | .driver = &xpc_dbg_name |
74 | }; | |
75 | ||
76 | struct device *xpc_part = &xpc_part_dbg_subname; | |
77 | struct device *xpc_chan = &xpc_chan_dbg_subname; | |
78 | ||
1f4674b2 DN |
79 | static int xpc_kdebug_ignore; |
80 | ||
89eb8eb9 DN |
81 | /* systune related variables for /proc/sys directories */ |
82 | ||
a607c389 DN |
83 | static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; |
84 | static int xpc_hb_min_interval = 1; | |
85 | static int xpc_hb_max_interval = 10; | |
89eb8eb9 | 86 | |
a607c389 DN |
87 | static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL; |
88 | static int xpc_hb_check_min_interval = 10; | |
89 | static int xpc_hb_check_max_interval = 120; | |
89eb8eb9 | 90 | |
a47d5dac DN |
91 | int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT; |
92 | static int xpc_disengage_min_timelimit; /* = 0 */ | |
93 | static int xpc_disengage_max_timelimit = 120; | |
89eb8eb9 | 94 | |
f0b76558 | 95 | static struct ctl_table xpc_sys_xpc_hb_dir[] = { |
89eb8eb9 | 96 | { |
35190506 DN |
97 | .procname = "hb_interval", |
98 | .data = &xpc_hb_interval, | |
99 | .maxlen = sizeof(int), | |
100 | .mode = 0644, | |
6d456111 | 101 | .proc_handler = proc_dointvec_minmax, |
35190506 DN |
102 | .extra1 = &xpc_hb_min_interval, |
103 | .extra2 = &xpc_hb_max_interval}, | |
89eb8eb9 | 104 | { |
35190506 DN |
105 | .procname = "hb_check_interval", |
106 | .data = &xpc_hb_check_interval, | |
107 | .maxlen = sizeof(int), | |
108 | .mode = 0644, | |
6d456111 | 109 | .proc_handler = proc_dointvec_minmax, |
35190506 DN |
110 | .extra1 = &xpc_hb_check_min_interval, |
111 | .extra2 = &xpc_hb_check_max_interval}, | |
68cbf075 | 112 | {} |
89eb8eb9 | 113 | }; |
f0b76558 | 114 | static struct ctl_table xpc_sys_xpc_dir[] = { |
89eb8eb9 | 115 | { |
35190506 DN |
116 | .procname = "hb", |
117 | .mode = 0555, | |
118 | .child = xpc_sys_xpc_hb_dir}, | |
e54af724 | 119 | { |
a47d5dac DN |
120 | .procname = "disengage_timelimit", |
121 | .data = &xpc_disengage_timelimit, | |
35190506 DN |
122 | .maxlen = sizeof(int), |
123 | .mode = 0644, | |
6d456111 | 124 | .proc_handler = proc_dointvec_minmax, |
a47d5dac DN |
125 | .extra1 = &xpc_disengage_min_timelimit, |
126 | .extra2 = &xpc_disengage_max_timelimit}, | |
68cbf075 | 127 | {} |
89eb8eb9 | 128 | }; |
f0b76558 | 129 | static struct ctl_table xpc_sys_dir[] = { |
89eb8eb9 | 130 | { |
35190506 DN |
131 | .procname = "xpc", |
132 | .mode = 0555, | |
133 | .child = xpc_sys_xpc_dir}, | |
68cbf075 | 134 | {} |
89eb8eb9 DN |
135 | }; |
136 | static struct ctl_table_header *xpc_sysctl; | |
137 | ||
a47d5dac DN |
138 | /* non-zero if any remote partition disengage was timed out */ |
139 | int xpc_disengage_timedout; | |
89eb8eb9 | 140 | |
5b8669df DN |
141 | /* #of activate IRQs received and not yet processed */ |
142 | int xpc_activate_IRQ_rcvd; | |
143 | DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock); | |
89eb8eb9 DN |
144 | |
145 | /* IRQ handler notifies this wait queue on receipt of an IRQ */ | |
6e41017a | 146 | DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq); |
89eb8eb9 DN |
147 | |
148 | static unsigned long xpc_hb_check_timeout; | |
33ba3c77 | 149 | static struct timer_list xpc_hb_timer; |
89eb8eb9 | 150 | |
e54af724 | 151 | /* notification that the xpc_hb_checker thread has exited */ |
f9e505a9 | 152 | static DECLARE_COMPLETION(xpc_hb_checker_exited); |
89eb8eb9 | 153 | |
e54af724 | 154 | /* notification that the xpc_discovery thread has exited */ |
f9e505a9 | 155 | static DECLARE_COMPLETION(xpc_discovery_exited); |
89eb8eb9 | 156 | |
89eb8eb9 DN |
157 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); |
158 | ||
a607c389 DN |
159 | static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); |
160 | static struct notifier_block xpc_reboot_notifier = { | |
161 | .notifier_call = xpc_system_reboot, | |
162 | }; | |
163 | ||
780d09e8 DN |
164 | static int xpc_system_die(struct notifier_block *, unsigned long, void *); |
165 | static struct notifier_block xpc_die_notifier = { | |
166 | .notifier_call = xpc_system_die, | |
167 | }; | |
168 | ||
a7665b0a | 169 | struct xpc_arch_operations xpc_arch_ops; |
94bd2708 | 170 | |
a607c389 | 171 | /* |
a47d5dac | 172 | * Timer function to enforce the timelimit on the partition disengage. |
a607c389 DN |
173 | */ |
174 | static void | |
25b42fa8 | 175 | xpc_timeout_partition_disengage(struct timer_list *t) |
a607c389 | 176 | { |
25b42fa8 | 177 | struct xpc_partition *part = from_timer(part, t, disengage_timer); |
a607c389 | 178 | |
a47d5dac | 179 | DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); |
a607c389 | 180 | |
35190506 | 181 | (void)xpc_partition_disengaged(part); |
a607c389 | 182 | |
a47d5dac | 183 | DBUG_ON(part->disengage_timeout != 0); |
a7665b0a | 184 | DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); |
a607c389 DN |
185 | } |
186 | ||
89eb8eb9 DN |
187 | /* |
188 | * Timer to produce the heartbeat. The timer structures function is | |
189 | * already set when this is initially called. A tunable is used to | |
190 | * specify when the next timeout should occur. | |
191 | */ | |
192 | static void | |
25b42fa8 | 193 | xpc_hb_beater(struct timer_list *unused) |
89eb8eb9 | 194 | { |
a7665b0a | 195 | xpc_arch_ops.increment_heartbeat(); |
89eb8eb9 | 196 | |
aaa3cd69 | 197 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) |
6e41017a | 198 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
89eb8eb9 DN |
199 | |
200 | xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); | |
201 | add_timer(&xpc_hb_timer); | |
202 | } | |
203 | ||
33ba3c77 DN |
204 | static void |
205 | xpc_start_hb_beater(void) | |
206 | { | |
a7665b0a | 207 | xpc_arch_ops.heartbeat_init(); |
25b42fa8 | 208 | timer_setup(&xpc_hb_timer, xpc_hb_beater, 0); |
33ba3c77 DN |
209 | xpc_hb_beater(0); |
210 | } | |
211 | ||
212 | static void | |
213 | xpc_stop_hb_beater(void) | |
214 | { | |
215 | del_timer_sync(&xpc_hb_timer); | |
a7665b0a | 216 | xpc_arch_ops.heartbeat_exit(); |
33ba3c77 DN |
217 | } |
218 | ||
61deb86e DN |
219 | /* |
220 | * At periodic intervals, scan through all active partitions and ensure | |
221 | * their heartbeat is still active. If not, the partition is deactivated. | |
222 | */ | |
223 | static void | |
224 | xpc_check_remote_hb(void) | |
225 | { | |
226 | struct xpc_partition *part; | |
227 | short partid; | |
228 | enum xp_retval ret; | |
229 | ||
230 | for (partid = 0; partid < xp_max_npartitions; partid++) { | |
231 | ||
232 | if (xpc_exiting) | |
233 | break; | |
234 | ||
235 | if (partid == xp_partition_id) | |
236 | continue; | |
237 | ||
238 | part = &xpc_partitions[partid]; | |
239 | ||
83469b55 DN |
240 | if (part->act_state == XPC_P_AS_INACTIVE || |
241 | part->act_state == XPC_P_AS_DEACTIVATING) { | |
61deb86e DN |
242 | continue; |
243 | } | |
244 | ||
a7665b0a | 245 | ret = xpc_arch_ops.get_remote_heartbeat(part); |
61deb86e DN |
246 | if (ret != xpSuccess) |
247 | XPC_DEACTIVATE_PARTITION(part, ret); | |
248 | } | |
249 | } | |
250 | ||
89eb8eb9 DN |
251 | /* |
252 | * This thread is responsible for nearly all of the partition | |
253 | * activation/deactivation. | |
254 | */ | |
255 | static int | |
256 | xpc_hb_checker(void *ignore) | |
257 | { | |
35190506 | 258 | int force_IRQ = 0; |
89eb8eb9 DN |
259 | |
260 | /* this thread was marked active by xpc_hb_init() */ | |
261 | ||
f7df8ed1 | 262 | set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU)); |
89eb8eb9 | 263 | |
4c013f5c | 264 | /* set our heartbeating to other partitions into motion */ |
89eb8eb9 | 265 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
33ba3c77 | 266 | xpc_start_hb_beater(); |
89eb8eb9 | 267 | |
2c2b94f9 | 268 | while (!xpc_exiting) { |
89eb8eb9 | 269 | |
89eb8eb9 DN |
270 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " |
271 | "been received\n", | |
35190506 | 272 | (int)(xpc_hb_check_timeout - jiffies), |
5b8669df | 273 | xpc_activate_IRQ_rcvd); |
89eb8eb9 | 274 | |
89eb8eb9 | 275 | /* checking of remote heartbeats is skewed by IRQ handling */ |
aaa3cd69 | 276 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) { |
5b8669df DN |
277 | xpc_hb_check_timeout = jiffies + |
278 | (xpc_hb_check_interval * HZ); | |
279 | ||
89eb8eb9 DN |
280 | dev_dbg(xpc_part, "checking remote heartbeats\n"); |
281 | xpc_check_remote_hb(); | |
282 | ||
283 | /* | |
5b8669df DN |
284 | * On sn2 we need to periodically recheck to ensure no |
285 | * IRQ/amo pairs have been missed. | |
89eb8eb9 | 286 | */ |
5b8669df DN |
287 | if (is_shub()) |
288 | force_IRQ = 1; | |
89eb8eb9 DN |
289 | } |
290 | ||
a607c389 | 291 | /* check for outstanding IRQs */ |
5b8669df | 292 | if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) { |
89eb8eb9 | 293 | force_IRQ = 0; |
5b8669df DN |
294 | dev_dbg(xpc_part, "processing activate IRQs " |
295 | "received\n"); | |
a7665b0a | 296 | xpc_arch_ops.process_activate_IRQ_rcvd(); |
89eb8eb9 | 297 | } |
a607c389 DN |
298 | |
299 | /* wait for IRQ or timeout */ | |
6e41017a | 300 | (void)wait_event_interruptible(xpc_activate_IRQ_wq, |
5b8669df | 301 | (time_is_before_eq_jiffies( |
aaa3cd69 | 302 | xpc_hb_check_timeout) || |
5b8669df | 303 | xpc_activate_IRQ_rcvd > 0 || |
2c2b94f9 | 304 | xpc_exiting)); |
89eb8eb9 DN |
305 | } |
306 | ||
33ba3c77 DN |
307 | xpc_stop_hb_beater(); |
308 | ||
89eb8eb9 DN |
309 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); |
310 | ||
e54af724 | 311 | /* mark this thread as having exited */ |
f9e505a9 | 312 | complete(&xpc_hb_checker_exited); |
89eb8eb9 DN |
313 | return 0; |
314 | } | |
315 | ||
89eb8eb9 DN |
316 | /* |
317 | * This thread will attempt to discover other partitions to activate | |
318 | * based on info provided by SAL. This new thread is short lived and | |
319 | * will exit once discovery is complete. | |
320 | */ | |
321 | static int | |
322 | xpc_initiate_discovery(void *ignore) | |
323 | { | |
89eb8eb9 DN |
324 | xpc_discovery(); |
325 | ||
326 | dev_dbg(xpc_part, "discovery thread is exiting\n"); | |
327 | ||
e54af724 | 328 | /* mark this thread as having exited */ |
f9e505a9 | 329 | complete(&xpc_discovery_exited); |
89eb8eb9 DN |
330 | return 0; |
331 | } | |
332 | ||
89eb8eb9 DN |
333 | /* |
334 | * The first kthread assigned to a newly activated partition is the one | |
e17d416b | 335 | * created by XPC HB with which it calls xpc_activating(). XPC hangs on to |
89eb8eb9 DN |
336 | * that kthread until the partition is brought down, at which time that kthread |
337 | * returns back to XPC HB. (The return of that kthread will signify to XPC HB | |
338 | * that XPC has dismantled all communication infrastructure for the associated | |
339 | * partition.) This kthread becomes the channel manager for that partition. | |
340 | * | |
341 | * Each active partition has a channel manager, who, besides connecting and | |
342 | * disconnecting channels, will ensure that each of the partition's connected | |
343 | * channels has the required number of assigned kthreads to get the work done. | |
344 | */ | |
345 | static void | |
346 | xpc_channel_mgr(struct xpc_partition *part) | |
347 | { | |
83469b55 | 348 | while (part->act_state != XPC_P_AS_DEACTIVATING || |
35190506 DN |
349 | atomic_read(&part->nchannels_active) > 0 || |
350 | !xpc_partition_disengaged(part)) { | |
89eb8eb9 | 351 | |
7fb5e59d | 352 | xpc_process_sent_chctl_flags(part); |
89eb8eb9 | 353 | |
89eb8eb9 DN |
354 | /* |
355 | * Wait until we've been requested to activate kthreads or | |
356 | * all of the channel's message queues have been torn down or | |
357 | * a signal is pending. | |
358 | * | |
359 | * The channel_mgr_requests is set to 1 after being awakened, | |
360 | * This is done to prevent the channel mgr from making one pass | |
361 | * through the loop for each request, since he will | |
362 | * be servicing all the requests in one pass. The reason it's | |
363 | * set to 1 instead of 0 is so that other kthreads will know | |
364 | * that the channel mgr is running and won't bother trying to | |
365 | * wake him up. | |
366 | */ | |
367 | atomic_dec(&part->channel_mgr_requests); | |
35190506 | 368 | (void)wait_event_interruptible(part->channel_mgr_wq, |
2c2b94f9 | 369 | (atomic_read(&part->channel_mgr_requests) > 0 || |
7fb5e59d | 370 | part->chctl.all_flags != 0 || |
83469b55 | 371 | (part->act_state == XPC_P_AS_DEACTIVATING && |
2c2b94f9 DN |
372 | atomic_read(&part->nchannels_active) == 0 && |
373 | xpc_partition_disengaged(part)))); | |
89eb8eb9 | 374 | atomic_set(&part->channel_mgr_requests, 1); |
89eb8eb9 DN |
375 | } |
376 | } | |
377 | ||
5b8669df DN |
378 | /* |
379 | * Guarantee that the kzalloc'd memory is cacheline aligned. | |
380 | */ | |
381 | void * | |
382 | xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | |
383 | { | |
384 | /* see if kzalloc will give us cachline aligned memory by default */ | |
385 | *base = kzalloc(size, flags); | |
386 | if (*base == NULL) | |
387 | return NULL; | |
388 | ||
389 | if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) | |
390 | return *base; | |
391 | ||
392 | kfree(*base); | |
393 | ||
394 | /* nope, we'll have to do it ourselves */ | |
395 | *base = kzalloc(size + L1_CACHE_BYTES, flags); | |
396 | if (*base == NULL) | |
397 | return NULL; | |
398 | ||
399 | return (void *)L1_CACHE_ALIGN((u64)*base); | |
400 | } | |
401 | ||
402 | /* | |
403 | * Setup the channel structures necessary to support XPartition Communication | |
404 | * between the specified remote partition and the local one. | |
405 | */ | |
406 | static enum xp_retval | |
407 | xpc_setup_ch_structures(struct xpc_partition *part) | |
408 | { | |
409 | enum xp_retval ret; | |
410 | int ch_number; | |
411 | struct xpc_channel *ch; | |
412 | short partid = XPC_PARTID(part); | |
413 | ||
414 | /* | |
415 | * Allocate all of the channel structures as a contiguous chunk of | |
416 | * memory. | |
417 | */ | |
418 | DBUG_ON(part->channels != NULL); | |
419 | part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, | |
420 | GFP_KERNEL); | |
421 | if (part->channels == NULL) { | |
422 | dev_err(xpc_chan, "can't get memory for channels\n"); | |
423 | return xpNoMemory; | |
424 | } | |
425 | ||
426 | /* allocate the remote open and close args */ | |
427 | ||
428 | part->remote_openclose_args = | |
429 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, | |
430 | GFP_KERNEL, &part-> | |
431 | remote_openclose_args_base); | |
432 | if (part->remote_openclose_args == NULL) { | |
433 | dev_err(xpc_chan, "can't get memory for remote connect args\n"); | |
434 | ret = xpNoMemory; | |
435 | goto out_1; | |
436 | } | |
437 | ||
438 | part->chctl.all_flags = 0; | |
439 | spin_lock_init(&part->chctl_lock); | |
440 | ||
441 | atomic_set(&part->channel_mgr_requests, 1); | |
442 | init_waitqueue_head(&part->channel_mgr_wq); | |
443 | ||
444 | part->nchannels = XPC_MAX_NCHANNELS; | |
445 | ||
446 | atomic_set(&part->nchannels_active, 0); | |
447 | atomic_set(&part->nchannels_engaged, 0); | |
448 | ||
449 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | |
450 | ch = &part->channels[ch_number]; | |
451 | ||
452 | ch->partid = partid; | |
453 | ch->number = ch_number; | |
454 | ch->flags = XPC_C_DISCONNECTED; | |
455 | ||
456 | atomic_set(&ch->kthreads_assigned, 0); | |
457 | atomic_set(&ch->kthreads_idle, 0); | |
458 | atomic_set(&ch->kthreads_active, 0); | |
459 | ||
460 | atomic_set(&ch->references, 0); | |
461 | atomic_set(&ch->n_to_notify, 0); | |
462 | ||
463 | spin_lock_init(&ch->lock); | |
464 | init_completion(&ch->wdisconnect_wait); | |
465 | ||
466 | atomic_set(&ch->n_on_msg_allocate_wq, 0); | |
467 | init_waitqueue_head(&ch->msg_allocate_wq); | |
468 | init_waitqueue_head(&ch->idle_wq); | |
469 | } | |
470 | ||
a7665b0a | 471 | ret = xpc_arch_ops.setup_ch_structures(part); |
5b8669df DN |
472 | if (ret != xpSuccess) |
473 | goto out_2; | |
474 | ||
475 | /* | |
476 | * With the setting of the partition setup_state to XPC_P_SS_SETUP, | |
477 | * we're declaring that this partition is ready to go. | |
478 | */ | |
479 | part->setup_state = XPC_P_SS_SETUP; | |
480 | ||
481 | return xpSuccess; | |
482 | ||
483 | /* setup of ch structures failed */ | |
484 | out_2: | |
485 | kfree(part->remote_openclose_args_base); | |
486 | part->remote_openclose_args = NULL; | |
487 | out_1: | |
488 | kfree(part->channels); | |
489 | part->channels = NULL; | |
490 | return ret; | |
491 | } | |
492 | ||
493 | /* | |
494 | * Teardown the channel structures necessary to support XPartition Communication | |
495 | * between the specified remote partition and the local one. | |
496 | */ | |
497 | static void | |
498 | xpc_teardown_ch_structures(struct xpc_partition *part) | |
499 | { | |
500 | DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); | |
501 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); | |
502 | ||
503 | /* | |
504 | * Make this partition inaccessible to local processes by marking it | |
505 | * as no longer setup. Then wait before proceeding with the teardown | |
506 | * until all existing references cease. | |
507 | */ | |
508 | DBUG_ON(part->setup_state != XPC_P_SS_SETUP); | |
509 | part->setup_state = XPC_P_SS_WTEARDOWN; | |
510 | ||
511 | wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); | |
512 | ||
513 | /* now we can begin tearing down the infrastructure */ | |
514 | ||
a7665b0a | 515 | xpc_arch_ops.teardown_ch_structures(part); |
5b8669df DN |
516 | |
517 | kfree(part->remote_openclose_args_base); | |
518 | part->remote_openclose_args = NULL; | |
519 | kfree(part->channels); | |
520 | part->channels = NULL; | |
521 | ||
522 | part->setup_state = XPC_P_SS_TORNDOWN; | |
523 | } | |
524 | ||
89eb8eb9 DN |
525 | /* |
526 | * When XPC HB determines that a partition has come up, it will create a new | |
527 | * kthread and that kthread will call this function to attempt to set up the | |
528 | * basic infrastructure used for Cross Partition Communication with the newly | |
529 | * upped partition. | |
530 | * | |
531 | * The kthread that was created by XPC HB and which setup the XPC | |
e17d416b DN |
532 | * infrastructure will remain assigned to the partition becoming the channel |
533 | * manager for that partition until the partition is deactivating, at which | |
534 | * time the kthread will teardown the XPC infrastructure and then exit. | |
89eb8eb9 | 535 | */ |
89eb8eb9 DN |
536 | static int |
537 | xpc_activating(void *__partid) | |
538 | { | |
64d032ba | 539 | short partid = (u64)__partid; |
89eb8eb9 DN |
540 | struct xpc_partition *part = &xpc_partitions[partid]; |
541 | unsigned long irq_flags; | |
89eb8eb9 | 542 | |
bc63d387 | 543 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
89eb8eb9 DN |
544 | |
545 | spin_lock_irqsave(&part->act_lock, irq_flags); | |
546 | ||
83469b55 DN |
547 | if (part->act_state == XPC_P_AS_DEACTIVATING) { |
548 | part->act_state = XPC_P_AS_INACTIVE; | |
89eb8eb9 DN |
549 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
550 | part->remote_rp_pa = 0; | |
551 | return 0; | |
552 | } | |
553 | ||
554 | /* indicate the thread is activating */ | |
83469b55 DN |
555 | DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ); |
556 | part->act_state = XPC_P_AS_ACTIVATING; | |
89eb8eb9 DN |
557 | |
558 | XPC_SET_REASON(part, 0, 0); | |
559 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | |
560 | ||
e17d416b | 561 | dev_dbg(xpc_part, "activating partition %d\n", partid); |
89eb8eb9 | 562 | |
a7665b0a | 563 | xpc_arch_ops.allow_hb(partid); |
89eb8eb9 | 564 | |
5b8669df | 565 | if (xpc_setup_ch_structures(part) == xpSuccess) { |
e17d416b DN |
566 | (void)xpc_part_ref(part); /* this will always succeed */ |
567 | ||
a7665b0a | 568 | if (xpc_arch_ops.make_first_contact(part) == xpSuccess) { |
e17d416b DN |
569 | xpc_mark_partition_active(part); |
570 | xpc_channel_mgr(part); | |
571 | /* won't return until partition is deactivating */ | |
572 | } | |
573 | ||
574 | xpc_part_deref(part); | |
5b8669df | 575 | xpc_teardown_ch_structures(part); |
e17d416b | 576 | } |
89eb8eb9 | 577 | |
a7665b0a | 578 | xpc_arch_ops.disallow_hb(partid); |
89eb8eb9 DN |
579 | xpc_mark_partition_inactive(part); |
580 | ||
65c17b80 | 581 | if (part->reason == xpReactivating) { |
89eb8eb9 | 582 | /* interrupting ourselves results in activating partition */ |
a7665b0a | 583 | xpc_arch_ops.request_partition_reactivation(part); |
89eb8eb9 DN |
584 | } |
585 | ||
586 | return 0; | |
587 | } | |
588 | ||
89eb8eb9 DN |
589 | void |
590 | xpc_activate_partition(struct xpc_partition *part) | |
591 | { | |
64d032ba | 592 | short partid = XPC_PARTID(part); |
89eb8eb9 | 593 | unsigned long irq_flags; |
2c2b94f9 | 594 | struct task_struct *kthread; |
89eb8eb9 | 595 | |
89eb8eb9 DN |
596 | spin_lock_irqsave(&part->act_lock, irq_flags); |
597 | ||
83469b55 | 598 | DBUG_ON(part->act_state != XPC_P_AS_INACTIVE); |
89eb8eb9 | 599 | |
83469b55 | 600 | part->act_state = XPC_P_AS_ACTIVATION_REQ; |
65c17b80 | 601 | XPC_SET_REASON(part, xpCloneKThread, __LINE__); |
89eb8eb9 DN |
602 | |
603 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | |
7c6c6636 | 604 | |
2c2b94f9 DN |
605 | kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d", |
606 | partid); | |
607 | if (IS_ERR(kthread)) { | |
7c6c6636 | 608 | spin_lock_irqsave(&part->act_lock, irq_flags); |
83469b55 | 609 | part->act_state = XPC_P_AS_INACTIVE; |
65c17b80 | 610 | XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); |
7c6c6636 RH |
611 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
612 | } | |
89eb8eb9 DN |
613 | } |
614 | ||
89eb8eb9 DN |
615 | void |
616 | xpc_activate_kthreads(struct xpc_channel *ch, int needed) | |
617 | { | |
618 | int idle = atomic_read(&ch->kthreads_idle); | |
619 | int assigned = atomic_read(&ch->kthreads_assigned); | |
620 | int wakeup; | |
621 | ||
89eb8eb9 DN |
622 | DBUG_ON(needed <= 0); |
623 | ||
624 | if (idle > 0) { | |
625 | wakeup = (needed > idle) ? idle : needed; | |
626 | needed -= wakeup; | |
627 | ||
628 | dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " | |
629 | "channel=%d\n", wakeup, ch->partid, ch->number); | |
630 | ||
631 | /* only wakeup the requested number of kthreads */ | |
632 | wake_up_nr(&ch->idle_wq, wakeup); | |
633 | } | |
634 | ||
2c2b94f9 | 635 | if (needed <= 0) |
89eb8eb9 | 636 | return; |
89eb8eb9 DN |
637 | |
638 | if (needed + assigned > ch->kthreads_assigned_limit) { | |
639 | needed = ch->kthreads_assigned_limit - assigned; | |
2c2b94f9 | 640 | if (needed <= 0) |
89eb8eb9 | 641 | return; |
89eb8eb9 DN |
642 | } |
643 | ||
644 | dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", | |
645 | needed, ch->partid, ch->number); | |
646 | ||
a460ef8d | 647 | xpc_create_kthreads(ch, needed, 0); |
89eb8eb9 DN |
648 | } |
649 | ||
89eb8eb9 DN |
650 | /* |
651 | * This function is where XPC's kthreads wait for messages to deliver. | |
652 | */ | |
653 | static void | |
654 | xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | |
655 | { | |
a7665b0a RH |
656 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = |
657 | xpc_arch_ops.n_of_deliverable_payloads; | |
658 | ||
89eb8eb9 DN |
659 | do { |
660 | /* deliver messages to their intended recipients */ | |
661 | ||
a7665b0a | 662 | while (n_of_deliverable_payloads(ch) > 0 && |
2c2b94f9 | 663 | !(ch->flags & XPC_C_DISCONNECTING)) { |
bd3e64c1 | 664 | xpc_deliver_payload(ch); |
89eb8eb9 DN |
665 | } |
666 | ||
667 | if (atomic_inc_return(&ch->kthreads_idle) > | |
35190506 | 668 | ch->kthreads_idle_limit) { |
89eb8eb9 DN |
669 | /* too many idle kthreads on this channel */ |
670 | atomic_dec(&ch->kthreads_idle); | |
671 | break; | |
672 | } | |
673 | ||
674 | dev_dbg(xpc_chan, "idle kthread calling " | |
675 | "wait_event_interruptible_exclusive()\n"); | |
676 | ||
35190506 | 677 | (void)wait_event_interruptible_exclusive(ch->idle_wq, |
a7665b0a | 678 | (n_of_deliverable_payloads(ch) > 0 || |
2c2b94f9 | 679 | (ch->flags & XPC_C_DISCONNECTING))); |
89eb8eb9 DN |
680 | |
681 | atomic_dec(&ch->kthreads_idle); | |
682 | ||
2c2b94f9 | 683 | } while (!(ch->flags & XPC_C_DISCONNECTING)); |
89eb8eb9 DN |
684 | } |
685 | ||
89eb8eb9 | 686 | static int |
2c2b94f9 | 687 | xpc_kthread_start(void *args) |
89eb8eb9 | 688 | { |
64d032ba | 689 | short partid = XPC_UNPACK_ARG1(args); |
89eb8eb9 DN |
690 | u16 ch_number = XPC_UNPACK_ARG2(args); |
691 | struct xpc_partition *part = &xpc_partitions[partid]; | |
692 | struct xpc_channel *ch; | |
693 | int n_needed; | |
e54af724 | 694 | unsigned long irq_flags; |
a7665b0a RH |
695 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = |
696 | xpc_arch_ops.n_of_deliverable_payloads; | |
89eb8eb9 | 697 | |
89eb8eb9 DN |
698 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", |
699 | partid, ch_number); | |
700 | ||
701 | ch = &part->channels[ch_number]; | |
702 | ||
703 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | |
89eb8eb9 DN |
704 | |
705 | /* let registerer know that connection has been established */ | |
706 | ||
e54af724 | 707 | spin_lock_irqsave(&ch->lock, irq_flags); |
4c2cd966 DN |
708 | if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { |
709 | ch->flags |= XPC_C_CONNECTEDCALLOUT; | |
e54af724 DN |
710 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
711 | ||
89eb8eb9 DN |
712 | xpc_connected_callout(ch); |
713 | ||
4c2cd966 DN |
714 | spin_lock_irqsave(&ch->lock, irq_flags); |
715 | ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; | |
716 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
717 | ||
89eb8eb9 DN |
718 | /* |
719 | * It is possible that while the callout was being | |
720 | * made that the remote partition sent some messages. | |
721 | * If that is the case, we may need to activate | |
722 | * additional kthreads to help deliver them. We only | |
723 | * need one less than total #of messages to deliver. | |
724 | */ | |
a7665b0a | 725 | n_needed = n_of_deliverable_payloads(ch) - 1; |
2c2b94f9 | 726 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) |
89eb8eb9 | 727 | xpc_activate_kthreads(ch, n_needed); |
2c2b94f9 | 728 | |
e54af724 DN |
729 | } else { |
730 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
89eb8eb9 DN |
731 | } |
732 | ||
733 | xpc_kthread_waitmsgs(part, ch); | |
734 | } | |
735 | ||
a460ef8d | 736 | /* let registerer know that connection is disconnecting */ |
e54af724 | 737 | |
a460ef8d DN |
738 | spin_lock_irqsave(&ch->lock, irq_flags); |
739 | if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | |
35190506 | 740 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { |
a460ef8d | 741 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; |
4c2cd966 | 742 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
a460ef8d | 743 | |
65c17b80 | 744 | xpc_disconnect_callout(ch, xpDisconnecting); |
a460ef8d DN |
745 | |
746 | spin_lock_irqsave(&ch->lock, irq_flags); | |
747 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; | |
748 | } | |
749 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
750 | ||
a47d5dac DN |
751 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
752 | atomic_dec_return(&part->nchannels_engaged) == 0) { | |
a7665b0a | 753 | xpc_arch_ops.indicate_partition_disengaged(part); |
89eb8eb9 DN |
754 | } |
755 | ||
89eb8eb9 DN |
756 | xpc_msgqueue_deref(ch); |
757 | ||
758 | dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", | |
759 | partid, ch_number); | |
760 | ||
761 | xpc_part_deref(part); | |
762 | return 0; | |
763 | } | |
764 | ||
89eb8eb9 DN |
765 | /* |
766 | * For each partition that XPC has established communications with, there is | |
767 | * a minimum of one kernel thread assigned to perform any operation that | |
768 | * may potentially sleep or block (basically the callouts to the asynchronous | |
769 | * functions registered via xpc_connect()). | |
770 | * | |
771 | * Additional kthreads are created and destroyed by XPC as the workload | |
772 | * demands. | |
773 | * | |
774 | * A kthread is assigned to one of the active channels that exists for a given | |
775 | * partition. | |
776 | */ | |
777 | void | |
a460ef8d | 778 | xpc_create_kthreads(struct xpc_channel *ch, int needed, |
35190506 | 779 | int ignore_disconnecting) |
89eb8eb9 DN |
780 | { |
781 | unsigned long irq_flags; | |
89eb8eb9 | 782 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); |
a607c389 | 783 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
2c2b94f9 | 784 | struct task_struct *kthread; |
a7665b0a RH |
785 | void (*indicate_partition_disengaged) (struct xpc_partition *) = |
786 | xpc_arch_ops.indicate_partition_disengaged; | |
89eb8eb9 | 787 | |
89eb8eb9 | 788 | while (needed-- > 0) { |
e54af724 DN |
789 | |
790 | /* | |
791 | * The following is done on behalf of the newly created | |
792 | * kthread. That kthread is responsible for doing the | |
793 | * counterpart to the following before it exits. | |
794 | */ | |
a460ef8d DN |
795 | if (ignore_disconnecting) { |
796 | if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { | |
797 | /* kthreads assigned had gone to zero */ | |
798 | BUG_ON(!(ch->flags & | |
35190506 | 799 | XPC_C_DISCONNECTINGCALLOUT_MADE)); |
a460ef8d DN |
800 | break; |
801 | } | |
802 | ||
803 | } else if (ch->flags & XPC_C_DISCONNECTING) { | |
804 | break; | |
805 | ||
a47d5dac DN |
806 | } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && |
807 | atomic_inc_return(&part->nchannels_engaged) == 1) { | |
a7665b0a | 808 | xpc_arch_ops.indicate_partition_engaged(part); |
a460ef8d | 809 | } |
35190506 | 810 | (void)xpc_part_ref(part); |
e54af724 | 811 | xpc_msgqueue_ref(ch); |
e54af724 | 812 | |
2c2b94f9 DN |
813 | kthread = kthread_run(xpc_kthread_start, (void *)args, |
814 | "xpc%02dc%d", ch->partid, ch->number); | |
815 | if (IS_ERR(kthread)) { | |
89eb8eb9 | 816 | /* the fork failed */ |
a460ef8d DN |
817 | |
818 | /* | |
819 | * NOTE: if (ignore_disconnecting && | |
820 | * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, | |
821 | * then we'll deadlock if all other kthreads assigned | |
822 | * to this channel are blocked in the channel's | |
823 | * registerer, because the only thing that will unblock | |
65c17b80 | 824 | * them is the xpDisconnecting callout that this |
2c2b94f9 | 825 | * failed kthread_run() would have made. |
a460ef8d DN |
826 | */ |
827 | ||
e54af724 DN |
828 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
829 | atomic_dec_return(&part->nchannels_engaged) == 0) { | |
a7665b0a | 830 | indicate_partition_disengaged(part); |
e54af724 DN |
831 | } |
832 | xpc_msgqueue_deref(ch); | |
833 | xpc_part_deref(part); | |
89eb8eb9 DN |
834 | |
835 | if (atomic_read(&ch->kthreads_assigned) < | |
35190506 | 836 | ch->kthreads_idle_limit) { |
89eb8eb9 DN |
837 | /* |
838 | * Flag this as an error only if we have an | |
839 | * insufficient #of kthreads for the channel | |
840 | * to function. | |
89eb8eb9 DN |
841 | */ |
842 | spin_lock_irqsave(&ch->lock, irq_flags); | |
65c17b80 | 843 | XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, |
35190506 | 844 | &irq_flags); |
89eb8eb9 DN |
845 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
846 | } | |
847 | break; | |
848 | } | |
89eb8eb9 DN |
849 | } |
850 | } | |
851 | ||
89eb8eb9 DN |
852 | void |
853 | xpc_disconnect_wait(int ch_number) | |
854 | { | |
a607c389 | 855 | unsigned long irq_flags; |
64d032ba | 856 | short partid; |
89eb8eb9 DN |
857 | struct xpc_partition *part; |
858 | struct xpc_channel *ch; | |
e54af724 | 859 | int wakeup_channel_mgr; |
89eb8eb9 | 860 | |
89eb8eb9 | 861 | /* now wait for all callouts to the caller's function to cease */ |
bc63d387 | 862 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
89eb8eb9 DN |
863 | part = &xpc_partitions[partid]; |
864 | ||
2c2b94f9 | 865 | if (!xpc_part_ref(part)) |
e54af724 | 866 | continue; |
89eb8eb9 | 867 | |
e54af724 | 868 | ch = &part->channels[ch_number]; |
89eb8eb9 | 869 | |
e54af724 | 870 | if (!(ch->flags & XPC_C_WDISCONNECT)) { |
89eb8eb9 | 871 | xpc_part_deref(part); |
e54af724 | 872 | continue; |
89eb8eb9 | 873 | } |
e54af724 | 874 | |
f9e505a9 | 875 | wait_for_completion(&ch->wdisconnect_wait); |
e54af724 DN |
876 | |
877 | spin_lock_irqsave(&ch->lock, irq_flags); | |
878 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); | |
879 | wakeup_channel_mgr = 0; | |
880 | ||
7fb5e59d | 881 | if (ch->delayed_chctl_flags) { |
83469b55 | 882 | if (part->act_state != XPC_P_AS_DEACTIVATING) { |
7fb5e59d DN |
883 | spin_lock(&part->chctl_lock); |
884 | part->chctl.flags[ch->number] |= | |
885 | ch->delayed_chctl_flags; | |
886 | spin_unlock(&part->chctl_lock); | |
e54af724 DN |
887 | wakeup_channel_mgr = 1; |
888 | } | |
7fb5e59d | 889 | ch->delayed_chctl_flags = 0; |
89eb8eb9 | 890 | } |
e54af724 DN |
891 | |
892 | ch->flags &= ~XPC_C_WDISCONNECT; | |
893 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
894 | ||
2c2b94f9 | 895 | if (wakeup_channel_mgr) |
e54af724 | 896 | xpc_wakeup_channel_mgr(part); |
e54af724 DN |
897 | |
898 | xpc_part_deref(part); | |
89eb8eb9 DN |
899 | } |
900 | } | |
901 | ||
5b8669df DN |
902 | static int |
903 | xpc_setup_partitions(void) | |
904 | { | |
905 | short partid; | |
906 | struct xpc_partition *part; | |
907 | ||
908 | xpc_partitions = kzalloc(sizeof(struct xpc_partition) * | |
909 | xp_max_npartitions, GFP_KERNEL); | |
910 | if (xpc_partitions == NULL) { | |
911 | dev_err(xpc_part, "can't get memory for partition structure\n"); | |
912 | return -ENOMEM; | |
913 | } | |
914 | ||
915 | /* | |
916 | * The first few fields of each entry of xpc_partitions[] need to | |
917 | * be initialized now so that calls to xpc_connect() and | |
918 | * xpc_disconnect() can be made prior to the activation of any remote | |
919 | * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE | |
920 | * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING | |
921 | * PARTITION HAS BEEN ACTIVATED. | |
922 | */ | |
923 | for (partid = 0; partid < xp_max_npartitions; partid++) { | |
924 | part = &xpc_partitions[partid]; | |
925 | ||
926 | DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); | |
927 | ||
928 | part->activate_IRQ_rcvd = 0; | |
929 | spin_lock_init(&part->act_lock); | |
930 | part->act_state = XPC_P_AS_INACTIVE; | |
931 | XPC_SET_REASON(part, 0, 0); | |
932 | ||
25b42fa8 KC |
933 | timer_setup(&part->disengage_timer, |
934 | xpc_timeout_partition_disengage, 0); | |
5b8669df DN |
935 | |
936 | part->setup_state = XPC_P_SS_UNSET; | |
937 | init_waitqueue_head(&part->teardown_wq); | |
938 | atomic_set(&part->references, 0); | |
939 | } | |
940 | ||
a7665b0a | 941 | return xpc_arch_ops.setup_partitions(); |
5b8669df DN |
942 | } |
943 | ||
944 | static void | |
945 | xpc_teardown_partitions(void) | |
946 | { | |
a7665b0a | 947 | xpc_arch_ops.teardown_partitions(); |
5b8669df DN |
948 | kfree(xpc_partitions); |
949 | } | |
950 | ||
89eb8eb9 | 951 | static void |
65c17b80 | 952 | xpc_do_exit(enum xp_retval reason) |
89eb8eb9 | 953 | { |
64d032ba | 954 | short partid; |
1ecaded8 | 955 | int active_part_count, printed_waiting_msg = 0; |
89eb8eb9 | 956 | struct xpc_partition *part; |
a47d5dac | 957 | unsigned long printmsg_time, disengage_timeout = 0; |
89eb8eb9 | 958 | |
a607c389 DN |
959 | /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ |
960 | DBUG_ON(xpc_exiting == 1); | |
89eb8eb9 DN |
961 | |
962 | /* | |
a607c389 DN |
963 | * Let the heartbeat checker thread and the discovery thread |
964 | * (if one is running) know that they should exit. Also wake up | |
965 | * the heartbeat checker thread in case it's sleeping. | |
89eb8eb9 DN |
966 | */ |
967 | xpc_exiting = 1; | |
6e41017a | 968 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
89eb8eb9 | 969 | |
e54af724 | 970 | /* wait for the discovery thread to exit */ |
f9e505a9 | 971 | wait_for_completion(&xpc_discovery_exited); |
89eb8eb9 | 972 | |
e54af724 | 973 | /* wait for the heartbeat checker thread to exit */ |
f9e505a9 | 974 | wait_for_completion(&xpc_hb_checker_exited); |
89eb8eb9 | 975 | |
a607c389 | 976 | /* sleep for a 1/3 of a second or so */ |
35190506 | 977 | (void)msleep_interruptible(300); |
89eb8eb9 DN |
978 | |
979 | /* wait for all partitions to become inactive */ | |
980 | ||
a47d5dac DN |
981 | printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); |
982 | xpc_disengage_timedout = 0; | |
a607c389 | 983 | |
89eb8eb9 DN |
984 | do { |
985 | active_part_count = 0; | |
986 | ||
bc63d387 | 987 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
89eb8eb9 | 988 | part = &xpc_partitions[partid]; |
89eb8eb9 | 989 | |
a607c389 | 990 | if (xpc_partition_disengaged(part) && |
83469b55 | 991 | part->act_state == XPC_P_AS_INACTIVE) { |
a607c389 | 992 | continue; |
89eb8eb9 | 993 | } |
a607c389 DN |
994 | |
995 | active_part_count++; | |
996 | ||
997 | XPC_DEACTIVATE_PARTITION(part, reason); | |
89eb8eb9 | 998 | |
a47d5dac DN |
999 | if (part->disengage_timeout > disengage_timeout) |
1000 | disengage_timeout = part->disengage_timeout; | |
a607c389 | 1001 | } |
89eb8eb9 | 1002 | |
a7665b0a | 1003 | if (xpc_arch_ops.any_partition_engaged()) { |
aaa3cd69 | 1004 | if (time_is_before_jiffies(printmsg_time)) { |
1ecaded8 | 1005 | dev_info(xpc_part, "waiting for remote " |
a47d5dac DN |
1006 | "partitions to deactivate, timeout in " |
1007 | "%ld seconds\n", (disengage_timeout - | |
1008 | jiffies) / HZ); | |
1ecaded8 | 1009 | printmsg_time = jiffies + |
a47d5dac | 1010 | (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); |
1ecaded8 DN |
1011 | printed_waiting_msg = 1; |
1012 | } | |
1013 | ||
1014 | } else if (active_part_count > 0) { | |
1015 | if (printed_waiting_msg) { | |
1016 | dev_info(xpc_part, "waiting for local partition" | |
a47d5dac | 1017 | " to deactivate\n"); |
1ecaded8 DN |
1018 | printed_waiting_msg = 0; |
1019 | } | |
1020 | ||
1021 | } else { | |
a47d5dac | 1022 | if (!xpc_disengage_timedout) { |
1ecaded8 | 1023 | dev_info(xpc_part, "all partitions have " |
a47d5dac | 1024 | "deactivated\n"); |
1ecaded8 DN |
1025 | } |
1026 | break; | |
89eb8eb9 DN |
1027 | } |
1028 | ||
a607c389 | 1029 | /* sleep for a 1/3 of a second or so */ |
35190506 | 1030 | (void)msleep_interruptible(300); |
a607c389 DN |
1031 | |
1032 | } while (1); | |
1033 | ||
a7665b0a | 1034 | DBUG_ON(xpc_arch_ops.any_partition_engaged()); |
a607c389 | 1035 | |
5b8669df | 1036 | xpc_teardown_rsvd_page(); |
a607c389 | 1037 | |
65c17b80 | 1038 | if (reason == xpUnloading) { |
35190506 | 1039 | (void)unregister_die_notifier(&xpc_die_notifier); |
bc63d387 | 1040 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); |
0752c670 | 1041 | } |
780d09e8 | 1042 | |
89eb8eb9 DN |
1043 | /* clear the interface to XPC's functions */ |
1044 | xpc_clear_interface(); | |
1045 | ||
2c2b94f9 | 1046 | if (xpc_sysctl) |
89eb8eb9 | 1047 | unregister_sysctl_table(xpc_sysctl); |
7682a4c6 | 1048 | |
5b8669df | 1049 | xpc_teardown_partitions(); |
6e41017a DN |
1050 | |
1051 | if (is_shub()) | |
1052 | xpc_exit_sn2(); | |
b7f7b074 | 1053 | else if (is_uv()) |
6e41017a | 1054 | xpc_exit_uv(); |
89eb8eb9 DN |
1055 | } |
1056 | ||
780d09e8 | 1057 | /* |
d6ad033a DN |
1058 | * This function is called when the system is being rebooted. |
1059 | */ | |
1060 | static int | |
1061 | xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) | |
1062 | { | |
65c17b80 | 1063 | enum xp_retval reason; |
d6ad033a | 1064 | |
d6ad033a DN |
1065 | switch (event) { |
1066 | case SYS_RESTART: | |
65c17b80 | 1067 | reason = xpSystemReboot; |
d6ad033a DN |
1068 | break; |
1069 | case SYS_HALT: | |
65c17b80 | 1070 | reason = xpSystemHalt; |
d6ad033a DN |
1071 | break; |
1072 | case SYS_POWER_OFF: | |
65c17b80 | 1073 | reason = xpSystemPoweroff; |
d6ad033a DN |
1074 | break; |
1075 | default: | |
65c17b80 | 1076 | reason = xpSystemGoingDown; |
d6ad033a DN |
1077 | } |
1078 | ||
1079 | xpc_do_exit(reason); | |
1080 | return NOTIFY_DONE; | |
1081 | } | |
1082 | ||
891348ca RH |
1083 | /* Used to only allow one cpu to complete disconnect */ |
1084 | static unsigned int xpc_die_disconnecting; | |
1085 | ||
d6ad033a | 1086 | /* |
a47d5dac DN |
1087 | * Notify other partitions to deactivate from us by first disengaging from all |
1088 | * references to our memory. | |
780d09e8 DN |
1089 | */ |
1090 | static void | |
a47d5dac | 1091 | xpc_die_deactivate(void) |
780d09e8 DN |
1092 | { |
1093 | struct xpc_partition *part; | |
64d032ba | 1094 | short partid; |
a47d5dac | 1095 | int any_engaged; |
261f3b49 DN |
1096 | long keep_waiting; |
1097 | long wait_to_print; | |
780d09e8 | 1098 | |
891348ca RH |
1099 | if (cmpxchg(&xpc_die_disconnecting, 0, 1)) |
1100 | return; | |
1101 | ||
780d09e8 DN |
1102 | /* keep xpc_hb_checker thread from doing anything (just in case) */ |
1103 | xpc_exiting = 1; | |
1104 | ||
a7665b0a | 1105 | xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */ |
780d09e8 | 1106 | |
bc63d387 | 1107 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
780d09e8 DN |
1108 | part = &xpc_partitions[partid]; |
1109 | ||
a7665b0a | 1110 | if (xpc_arch_ops.partition_engaged(partid) || |
83469b55 | 1111 | part->act_state != XPC_P_AS_INACTIVE) { |
a7665b0a RH |
1112 | xpc_arch_ops.request_partition_deactivation(part); |
1113 | xpc_arch_ops.indicate_partition_disengaged(part); | |
780d09e8 DN |
1114 | } |
1115 | } | |
1116 | ||
a47d5dac DN |
1117 | /* |
1118 | * Though we requested that all other partitions deactivate from us, | |
261f3b49 DN |
1119 | * we only wait until they've all disengaged or we've reached the |
1120 | * defined timelimit. | |
1121 | * | |
1122 | * Given that one iteration through the following while-loop takes | |
1123 | * approximately 200 microseconds, calculate the #of loops to take | |
1124 | * before bailing and the #of loops before printing a waiting message. | |
a47d5dac | 1125 | */ |
261f3b49 DN |
1126 | keep_waiting = xpc_disengage_timelimit * 1000 * 5; |
1127 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; | |
780d09e8 | 1128 | |
1ecaded8 | 1129 | while (1) { |
a7665b0a | 1130 | any_engaged = xpc_arch_ops.any_partition_engaged(); |
a47d5dac DN |
1131 | if (!any_engaged) { |
1132 | dev_info(xpc_part, "all partitions have deactivated\n"); | |
1ecaded8 DN |
1133 | break; |
1134 | } | |
780d09e8 | 1135 | |
261f3b49 | 1136 | if (!keep_waiting--) { |
bc63d387 DN |
1137 | for (partid = 0; partid < xp_max_npartitions; |
1138 | partid++) { | |
a7665b0a | 1139 | if (xpc_arch_ops.partition_engaged(partid)) { |
a47d5dac | 1140 | dev_info(xpc_part, "deactivate from " |
35190506 DN |
1141 | "remote partition %d timed " |
1142 | "out\n", partid); | |
1ecaded8 DN |
1143 | } |
1144 | } | |
1145 | break; | |
1146 | } | |
1147 | ||
261f3b49 | 1148 | if (!wait_to_print--) { |
780d09e8 | 1149 | dev_info(xpc_part, "waiting for remote partitions to " |
a47d5dac | 1150 | "deactivate, timeout in %ld seconds\n", |
261f3b49 DN |
1151 | keep_waiting / (1000 * 5)); |
1152 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * | |
1153 | 1000 * 5; | |
780d09e8 | 1154 | } |
261f3b49 DN |
1155 | |
1156 | udelay(200); | |
780d09e8 | 1157 | } |
780d09e8 DN |
1158 | } |
1159 | ||
780d09e8 | 1160 | /* |
1f4674b2 DN |
1161 | * This function is called when the system is being restarted or halted due |
1162 | * to some sort of system failure. If this is the case we need to notify the | |
1163 | * other partitions to disengage from all references to our memory. | |
1164 | * This function can also be called when our heartbeater could be offlined | |
1165 | * for a time. In this case we need to notify other partitions to not worry | |
1166 | * about the lack of a heartbeat. | |
780d09e8 DN |
1167 | */ |
1168 | static int | |
891348ca | 1169 | xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) |
780d09e8 | 1170 | { |
261f3b49 | 1171 | #ifdef CONFIG_IA64 /* !!! temporary kludge */ |
780d09e8 DN |
1172 | switch (event) { |
1173 | case DIE_MACHINE_RESTART: | |
1174 | case DIE_MACHINE_HALT: | |
a47d5dac | 1175 | xpc_die_deactivate(); |
780d09e8 | 1176 | break; |
1f4674b2 DN |
1177 | |
1178 | case DIE_KDEBUG_ENTER: | |
1179 | /* Should lack of heartbeat be ignored by other partitions? */ | |
2c2b94f9 | 1180 | if (!xpc_kdebug_ignore) |
1f4674b2 | 1181 | break; |
2c2b94f9 | 1182 | |
1f4674b2 | 1183 | /* fall through */ |
780d09e8 DN |
1184 | case DIE_MCA_MONARCH_ENTER: |
1185 | case DIE_INIT_MONARCH_ENTER: | |
a7665b0a | 1186 | xpc_arch_ops.offline_heartbeat(); |
780d09e8 | 1187 | break; |
1f4674b2 DN |
1188 | |
1189 | case DIE_KDEBUG_LEAVE: | |
1190 | /* Is lack of heartbeat being ignored by other partitions? */ | |
2c2b94f9 | 1191 | if (!xpc_kdebug_ignore) |
1f4674b2 | 1192 | break; |
2c2b94f9 | 1193 | |
1f4674b2 | 1194 | /* fall through */ |
780d09e8 DN |
1195 | case DIE_MCA_MONARCH_LEAVE: |
1196 | case DIE_INIT_MONARCH_LEAVE: | |
a7665b0a | 1197 | xpc_arch_ops.online_heartbeat(); |
780d09e8 DN |
1198 | break; |
1199 | } | |
261f3b49 | 1200 | #else |
891348ca RH |
1201 | struct die_args *die_args = _die_args; |
1202 | ||
1203 | switch (event) { | |
1204 | case DIE_TRAP: | |
1205 | if (die_args->trapnr == X86_TRAP_DF) | |
1206 | xpc_die_deactivate(); | |
1207 | ||
1208 | if (((die_args->trapnr == X86_TRAP_MF) || | |
1209 | (die_args->trapnr == X86_TRAP_XF)) && | |
f39b6f0e | 1210 | !user_mode(die_args->regs)) |
891348ca RH |
1211 | xpc_die_deactivate(); |
1212 | ||
1213 | break; | |
1214 | case DIE_INT3: | |
1215 | case DIE_DEBUG: | |
1216 | break; | |
1217 | case DIE_OOPS: | |
1218 | case DIE_GPF: | |
1219 | default: | |
1220 | xpc_die_deactivate(); | |
1221 | } | |
261f3b49 | 1222 | #endif |
780d09e8 DN |
1223 | |
1224 | return NOTIFY_DONE; | |
1225 | } | |
1226 | ||
89eb8eb9 DN |
1227 | int __init |
1228 | xpc_init(void) | |
1229 | { | |
1230 | int ret; | |
2c2b94f9 | 1231 | struct task_struct *kthread; |
ee6665e3 | 1232 | |
bb0dc43e KS |
1233 | dev_set_name(xpc_part, "part"); |
1234 | dev_set_name(xpc_chan, "chan"); | |
89eb8eb9 | 1235 | |
94bd2708 DN |
1236 | if (is_shub()) { |
1237 | /* | |
1238 | * The ia64-sn2 architecture supports at most 64 partitions. | |
c39838ce | 1239 | * And the inability to unregister remote amos restricts us |
94bd2708 DN |
1240 | * further to only support exactly 64 partitions on this |
1241 | * architecture, no less. | |
1242 | */ | |
5b8669df DN |
1243 | if (xp_max_npartitions != 64) { |
1244 | dev_err(xpc_part, "max #of partitions not set to 64\n"); | |
1245 | ret = -EINVAL; | |
1246 | } else { | |
1247 | ret = xpc_init_sn2(); | |
1248 | } | |
94bd2708 DN |
1249 | |
1250 | } else if (is_uv()) { | |
5b8669df | 1251 | ret = xpc_init_uv(); |
94bd2708 DN |
1252 | |
1253 | } else { | |
5b8669df | 1254 | ret = -ENODEV; |
94bd2708 | 1255 | } |
408865ce | 1256 | |
5b8669df DN |
1257 | if (ret != 0) |
1258 | return ret; | |
1259 | ||
1260 | ret = xpc_setup_partitions(); | |
1261 | if (ret != 0) { | |
bc63d387 | 1262 | dev_err(xpc_part, "can't get memory for partition structure\n"); |
ee6665e3 | 1263 | goto out_1; |
bc63d387 | 1264 | } |
89eb8eb9 | 1265 | |
bc63d387 DN |
1266 | xpc_sysctl = register_sysctl_table(xpc_sys_dir); |
1267 | ||
89eb8eb9 DN |
1268 | /* |
1269 | * Fill the partition reserved page with the information needed by | |
1270 | * other partitions to discover we are alive and establish initial | |
1271 | * communications. | |
1272 | */ | |
5b8669df DN |
1273 | ret = xpc_setup_rsvd_page(); |
1274 | if (ret != 0) { | |
bc63d387 | 1275 | dev_err(xpc_part, "can't setup our reserved page\n"); |
ee6665e3 | 1276 | goto out_2; |
89eb8eb9 DN |
1277 | } |
1278 | ||
a607c389 DN |
1279 | /* add ourselves to the reboot_notifier_list */ |
1280 | ret = register_reboot_notifier(&xpc_reboot_notifier); | |
2c2b94f9 | 1281 | if (ret != 0) |
a607c389 | 1282 | dev_warn(xpc_part, "can't register reboot notifier\n"); |
a607c389 | 1283 | |
1eeb66a1 | 1284 | /* add ourselves to the die_notifier list */ |
780d09e8 | 1285 | ret = register_die_notifier(&xpc_die_notifier); |
2c2b94f9 | 1286 | if (ret != 0) |
780d09e8 | 1287 | dev_warn(xpc_part, "can't register die notifier\n"); |
780d09e8 | 1288 | |
89eb8eb9 DN |
1289 | /* |
1290 | * The real work-horse behind xpc. This processes incoming | |
1291 | * interrupts and monitors remote heartbeats. | |
1292 | */ | |
2c2b94f9 DN |
1293 | kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); |
1294 | if (IS_ERR(kthread)) { | |
89eb8eb9 | 1295 | dev_err(xpc_part, "failed while forking hb check thread\n"); |
bc63d387 | 1296 | ret = -EBUSY; |
ee6665e3 | 1297 | goto out_3; |
89eb8eb9 DN |
1298 | } |
1299 | ||
89eb8eb9 DN |
1300 | /* |
1301 | * Startup a thread that will attempt to discover other partitions to | |
1302 | * activate based on info provided by SAL. This new thread is short | |
1303 | * lived and will exit once discovery is complete. | |
1304 | */ | |
2c2b94f9 DN |
1305 | kthread = kthread_run(xpc_initiate_discovery, NULL, |
1306 | XPC_DISCOVERY_THREAD_NAME); | |
1307 | if (IS_ERR(kthread)) { | |
89eb8eb9 DN |
1308 | dev_err(xpc_part, "failed while forking discovery thread\n"); |
1309 | ||
1310 | /* mark this new thread as a non-starter */ | |
f9e505a9 | 1311 | complete(&xpc_discovery_exited); |
89eb8eb9 | 1312 | |
65c17b80 | 1313 | xpc_do_exit(xpUnloading); |
89eb8eb9 DN |
1314 | return -EBUSY; |
1315 | } | |
1316 | ||
89eb8eb9 DN |
1317 | /* set the interface to point at XPC's functions */ |
1318 | xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, | |
97bf1aa1 DN |
1319 | xpc_initiate_send, xpc_initiate_send_notify, |
1320 | xpc_initiate_received, xpc_initiate_partid_to_nasids); | |
89eb8eb9 DN |
1321 | |
1322 | return 0; | |
bc63d387 DN |
1323 | |
1324 | /* initialization was not successful */ | |
ee6665e3 | 1325 | out_3: |
5b8669df | 1326 | xpc_teardown_rsvd_page(); |
94bd2708 | 1327 | |
bc63d387 DN |
1328 | (void)unregister_die_notifier(&xpc_die_notifier); |
1329 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); | |
ee6665e3 | 1330 | out_2: |
bc63d387 DN |
1331 | if (xpc_sysctl) |
1332 | unregister_sysctl_table(xpc_sysctl); | |
5b8669df DN |
1333 | |
1334 | xpc_teardown_partitions(); | |
6e41017a DN |
1335 | out_1: |
1336 | if (is_shub()) | |
1337 | xpc_exit_sn2(); | |
b7f7b074 | 1338 | else if (is_uv()) |
6e41017a | 1339 | xpc_exit_uv(); |
bc63d387 | 1340 | return ret; |
89eb8eb9 | 1341 | } |
89eb8eb9 | 1342 | |
35190506 | 1343 | module_init(xpc_init); |
89eb8eb9 DN |
1344 | |
1345 | void __exit | |
1346 | xpc_exit(void) | |
1347 | { | |
65c17b80 | 1348 | xpc_do_exit(xpUnloading); |
89eb8eb9 | 1349 | } |
89eb8eb9 | 1350 | |
35190506 | 1351 | module_exit(xpc_exit); |
89eb8eb9 DN |
1352 | |
1353 | MODULE_AUTHOR("Silicon Graphics, Inc."); | |
1354 | MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); | |
1355 | MODULE_LICENSE("GPL"); | |
1356 | ||
1357 | module_param(xpc_hb_interval, int, 0); | |
1358 | MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " | |
35190506 | 1359 | "heartbeat increments."); |
89eb8eb9 DN |
1360 | |
1361 | module_param(xpc_hb_check_interval, int, 0); | |
1362 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " | |
35190506 | 1363 | "heartbeat checks."); |
89eb8eb9 | 1364 | |
a47d5dac DN |
1365 | module_param(xpc_disengage_timelimit, int, 0); |
1366 | MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait " | |
1367 | "for disengage to complete."); | |
e54af724 | 1368 | |
1f4674b2 DN |
1369 | module_param(xpc_kdebug_ignore, int, 0); |
1370 | MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " | |
35190506 | 1371 | "other partitions when dropping into kdebug."); |