Commit | Line | Data |
---|---|---|
89eb8eb9 DN |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
7a6d94f0 | 6 | * (C) Copyright 2020 Hewlett Packard Enterprise Development LP |
a374c57b | 7 | * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. |
89eb8eb9 DN |
8 | */ |
9 | ||
89eb8eb9 DN |
10 | /* |
11 | * Cross Partition Communication (XPC) support - standard version. | |
12 | * | |
13 | * XPC provides a message passing capability that crosses partition | |
14 | * boundaries. This module is made up of two parts: | |
15 | * | |
16 | * partition This part detects the presence/absence of other | |
17 | * partitions. It provides a heartbeat and monitors | |
18 | * the heartbeats of other partitions. | |
19 | * | |
20 | * channel This part manages the channels and sends/receives | |
21 | * messages across them to/from other partitions. | |
22 | * | |
23 | * There are a couple of additional functions residing in XP, which | |
24 | * provide an interface to XPC for its users. | |
25 | * | |
26 | * | |
27 | * Caveats: | |
28 | * | |
7fb5e59d | 29 | * . Currently on sn2, we have no way to determine which nasid an IRQ |
c39838ce DN |
30 | * came from. Thus, xpc_send_IRQ_sn2() does a remote amo write |
31 | * followed by an IPI. The amo indicates where data is to be pulled | |
32 | * from, so after the IPI arrives, the remote partition checks the amo | |
33 | * word. The IPI can actually arrive before the amo however, so other | |
34 | * code must periodically check for this case. Also, remote amo | |
7fb5e59d DN |
35 | * operations do not reliably time out. Thus we do a remote PIO read |
36 | * solely to know whether the remote partition is down and whether we | |
37 | * should stop sending IPIs to it. This remote PIO read operation is | |
38 | * set up in a special nofault region so SAL knows to ignore (and | |
c39838ce | 39 | * cleanup) any errors due to the remote amo write, PIO read, and/or |
7fb5e59d | 40 | * PIO write operations. |
89eb8eb9 DN |
41 | * |
42 | * If/when new hardware solves this IPI problem, we should abandon | |
43 | * the current approach. | |
44 | * | |
45 | */ | |
46 | ||
89eb8eb9 | 47 | #include <linux/module.h> |
5a0e3ad6 | 48 | #include <linux/slab.h> |
261f3b49 DN |
49 | #include <linux/sysctl.h> |
50 | #include <linux/device.h> | |
69913927 | 51 | #include <linux/delay.h> |
a607c389 | 52 | #include <linux/reboot.h> |
1eeb66a1 | 53 | #include <linux/kdebug.h> |
2c2b94f9 | 54 | #include <linux/kthread.h> |
45d9ca49 | 55 | #include "xpc.h" |
89eb8eb9 | 56 | |
891348ca RH |
57 | #ifdef CONFIG_X86_64 |
58 | #include <asm/traps.h> | |
59 | #endif | |
60 | ||
89eb8eb9 DN |
61 | /* define two XPC debug device structures to be used with dev_dbg() et al */ |
62 | ||
19df2f8e | 63 | static struct device_driver xpc_dbg_name = { |
89eb8eb9 DN |
64 | .name = "xpc" |
65 | }; | |
66 | ||
19df2f8e | 67 | static struct device xpc_part_dbg_subname = { |
bb0dc43e | 68 | .init_name = "", /* set to "part" at xpc_init() time */ |
89eb8eb9 DN |
69 | .driver = &xpc_dbg_name |
70 | }; | |
71 | ||
19df2f8e | 72 | static struct device xpc_chan_dbg_subname = { |
bb0dc43e | 73 | .init_name = "", /* set to "chan" at xpc_init() time */ |
89eb8eb9 DN |
74 | .driver = &xpc_dbg_name |
75 | }; | |
76 | ||
77 | struct device *xpc_part = &xpc_part_dbg_subname; | |
78 | struct device *xpc_chan = &xpc_chan_dbg_subname; | |
79 | ||
1f4674b2 DN |
80 | static int xpc_kdebug_ignore; |
81 | ||
89eb8eb9 DN |
82 | /* systune related variables for /proc/sys directories */ |
83 | ||
a607c389 DN |
84 | static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; |
85 | static int xpc_hb_min_interval = 1; | |
86 | static int xpc_hb_max_interval = 10; | |
89eb8eb9 | 87 | |
a607c389 DN |
88 | static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL; |
89 | static int xpc_hb_check_min_interval = 10; | |
90 | static int xpc_hb_check_max_interval = 120; | |
89eb8eb9 | 91 | |
a47d5dac DN |
92 | int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT; |
93 | static int xpc_disengage_min_timelimit; /* = 0 */ | |
94 | static int xpc_disengage_max_timelimit = 120; | |
89eb8eb9 | 95 | |
b085dbf6 | 96 | static struct ctl_table xpc_sys_xpc_hb[] = { |
89eb8eb9 | 97 | { |
35190506 DN |
98 | .procname = "hb_interval", |
99 | .data = &xpc_hb_interval, | |
100 | .maxlen = sizeof(int), | |
101 | .mode = 0644, | |
6d456111 | 102 | .proc_handler = proc_dointvec_minmax, |
35190506 DN |
103 | .extra1 = &xpc_hb_min_interval, |
104 | .extra2 = &xpc_hb_max_interval}, | |
89eb8eb9 | 105 | { |
35190506 DN |
106 | .procname = "hb_check_interval", |
107 | .data = &xpc_hb_check_interval, | |
108 | .maxlen = sizeof(int), | |
109 | .mode = 0644, | |
6d456111 | 110 | .proc_handler = proc_dointvec_minmax, |
35190506 DN |
111 | .extra1 = &xpc_hb_check_min_interval, |
112 | .extra2 = &xpc_hb_check_max_interval}, | |
68cbf075 | 113 | {} |
89eb8eb9 | 114 | }; |
b085dbf6 | 115 | static struct ctl_table xpc_sys_xpc[] = { |
e54af724 | 116 | { |
a47d5dac DN |
117 | .procname = "disengage_timelimit", |
118 | .data = &xpc_disengage_timelimit, | |
35190506 DN |
119 | .maxlen = sizeof(int), |
120 | .mode = 0644, | |
6d456111 | 121 | .proc_handler = proc_dointvec_minmax, |
a47d5dac DN |
122 | .extra1 = &xpc_disengage_min_timelimit, |
123 | .extra2 = &xpc_disengage_max_timelimit}, | |
68cbf075 | 124 | {} |
89eb8eb9 | 125 | }; |
b085dbf6 | 126 | |
89eb8eb9 | 127 | static struct ctl_table_header *xpc_sysctl; |
b085dbf6 | 128 | static struct ctl_table_header *xpc_sysctl_hb; |
89eb8eb9 | 129 | |
a47d5dac DN |
130 | /* non-zero if any remote partition disengage was timed out */ |
131 | int xpc_disengage_timedout; | |
89eb8eb9 | 132 | |
5b8669df DN |
133 | /* #of activate IRQs received and not yet processed */ |
134 | int xpc_activate_IRQ_rcvd; | |
135 | DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock); | |
89eb8eb9 DN |
136 | |
137 | /* IRQ handler notifies this wait queue on receipt of an IRQ */ | |
6e41017a | 138 | DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq); |
89eb8eb9 DN |
139 | |
140 | static unsigned long xpc_hb_check_timeout; | |
33ba3c77 | 141 | static struct timer_list xpc_hb_timer; |
89eb8eb9 | 142 | |
e54af724 | 143 | /* notification that the xpc_hb_checker thread has exited */ |
f9e505a9 | 144 | static DECLARE_COMPLETION(xpc_hb_checker_exited); |
89eb8eb9 | 145 | |
e54af724 | 146 | /* notification that the xpc_discovery thread has exited */ |
f9e505a9 | 147 | static DECLARE_COMPLETION(xpc_discovery_exited); |
89eb8eb9 | 148 | |
89eb8eb9 DN |
149 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); |
150 | ||
a607c389 DN |
151 | static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); |
152 | static struct notifier_block xpc_reboot_notifier = { | |
153 | .notifier_call = xpc_system_reboot, | |
154 | }; | |
155 | ||
780d09e8 DN |
156 | static int xpc_system_die(struct notifier_block *, unsigned long, void *); |
157 | static struct notifier_block xpc_die_notifier = { | |
158 | .notifier_call = xpc_system_die, | |
159 | }; | |
160 | ||
a7665b0a | 161 | struct xpc_arch_operations xpc_arch_ops; |
94bd2708 | 162 | |
a607c389 | 163 | /* |
a47d5dac | 164 | * Timer function to enforce the timelimit on the partition disengage. |
a607c389 DN |
165 | */ |
166 | static void | |
25b42fa8 | 167 | xpc_timeout_partition_disengage(struct timer_list *t) |
a607c389 | 168 | { |
25b42fa8 | 169 | struct xpc_partition *part = from_timer(part, t, disengage_timer); |
a607c389 | 170 | |
a47d5dac | 171 | DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); |
a607c389 | 172 | |
997754f1 | 173 | xpc_partition_disengaged_from_timer(part); |
a607c389 | 174 | |
a47d5dac | 175 | DBUG_ON(part->disengage_timeout != 0); |
a7665b0a | 176 | DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); |
a607c389 DN |
177 | } |
178 | ||
89eb8eb9 DN |
179 | /* |
180 | * Timer to produce the heartbeat. The timer structures function is | |
181 | * already set when this is initially called. A tunable is used to | |
182 | * specify when the next timeout should occur. | |
183 | */ | |
184 | static void | |
25b42fa8 | 185 | xpc_hb_beater(struct timer_list *unused) |
89eb8eb9 | 186 | { |
a7665b0a | 187 | xpc_arch_ops.increment_heartbeat(); |
89eb8eb9 | 188 | |
aaa3cd69 | 189 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) |
6e41017a | 190 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
89eb8eb9 DN |
191 | |
192 | xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); | |
193 | add_timer(&xpc_hb_timer); | |
194 | } | |
195 | ||
33ba3c77 DN |
196 | static void |
197 | xpc_start_hb_beater(void) | |
198 | { | |
a7665b0a | 199 | xpc_arch_ops.heartbeat_init(); |
25b42fa8 | 200 | timer_setup(&xpc_hb_timer, xpc_hb_beater, 0); |
ac41ae0b | 201 | xpc_hb_beater(NULL); |
33ba3c77 DN |
202 | } |
203 | ||
204 | static void | |
205 | xpc_stop_hb_beater(void) | |
206 | { | |
207 | del_timer_sync(&xpc_hb_timer); | |
a7665b0a | 208 | xpc_arch_ops.heartbeat_exit(); |
33ba3c77 DN |
209 | } |
210 | ||
61deb86e DN |
211 | /* |
212 | * At periodic intervals, scan through all active partitions and ensure | |
213 | * their heartbeat is still active. If not, the partition is deactivated. | |
214 | */ | |
215 | static void | |
216 | xpc_check_remote_hb(void) | |
217 | { | |
218 | struct xpc_partition *part; | |
219 | short partid; | |
220 | enum xp_retval ret; | |
221 | ||
222 | for (partid = 0; partid < xp_max_npartitions; partid++) { | |
223 | ||
224 | if (xpc_exiting) | |
225 | break; | |
226 | ||
227 | if (partid == xp_partition_id) | |
228 | continue; | |
229 | ||
230 | part = &xpc_partitions[partid]; | |
231 | ||
83469b55 DN |
232 | if (part->act_state == XPC_P_AS_INACTIVE || |
233 | part->act_state == XPC_P_AS_DEACTIVATING) { | |
61deb86e DN |
234 | continue; |
235 | } | |
236 | ||
a7665b0a | 237 | ret = xpc_arch_ops.get_remote_heartbeat(part); |
61deb86e DN |
238 | if (ret != xpSuccess) |
239 | XPC_DEACTIVATE_PARTITION(part, ret); | |
240 | } | |
241 | } | |
242 | ||
89eb8eb9 DN |
243 | /* |
244 | * This thread is responsible for nearly all of the partition | |
245 | * activation/deactivation. | |
246 | */ | |
247 | static int | |
248 | xpc_hb_checker(void *ignore) | |
249 | { | |
35190506 | 250 | int force_IRQ = 0; |
89eb8eb9 DN |
251 | |
252 | /* this thread was marked active by xpc_hb_init() */ | |
253 | ||
f7df8ed1 | 254 | set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU)); |
89eb8eb9 | 255 | |
4c013f5c | 256 | /* set our heartbeating to other partitions into motion */ |
89eb8eb9 | 257 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
33ba3c77 | 258 | xpc_start_hb_beater(); |
89eb8eb9 | 259 | |
2c2b94f9 | 260 | while (!xpc_exiting) { |
89eb8eb9 | 261 | |
89eb8eb9 DN |
262 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " |
263 | "been received\n", | |
35190506 | 264 | (int)(xpc_hb_check_timeout - jiffies), |
5b8669df | 265 | xpc_activate_IRQ_rcvd); |
89eb8eb9 | 266 | |
89eb8eb9 | 267 | /* checking of remote heartbeats is skewed by IRQ handling */ |
aaa3cd69 | 268 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) { |
5b8669df DN |
269 | xpc_hb_check_timeout = jiffies + |
270 | (xpc_hb_check_interval * HZ); | |
271 | ||
89eb8eb9 DN |
272 | dev_dbg(xpc_part, "checking remote heartbeats\n"); |
273 | xpc_check_remote_hb(); | |
89eb8eb9 DN |
274 | } |
275 | ||
a607c389 | 276 | /* check for outstanding IRQs */ |
5b8669df | 277 | if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) { |
89eb8eb9 | 278 | force_IRQ = 0; |
5b8669df DN |
279 | dev_dbg(xpc_part, "processing activate IRQs " |
280 | "received\n"); | |
a7665b0a | 281 | xpc_arch_ops.process_activate_IRQ_rcvd(); |
89eb8eb9 | 282 | } |
a607c389 DN |
283 | |
284 | /* wait for IRQ or timeout */ | |
6e41017a | 285 | (void)wait_event_interruptible(xpc_activate_IRQ_wq, |
5b8669df | 286 | (time_is_before_eq_jiffies( |
aaa3cd69 | 287 | xpc_hb_check_timeout) || |
5b8669df | 288 | xpc_activate_IRQ_rcvd > 0 || |
2c2b94f9 | 289 | xpc_exiting)); |
89eb8eb9 DN |
290 | } |
291 | ||
33ba3c77 DN |
292 | xpc_stop_hb_beater(); |
293 | ||
89eb8eb9 DN |
294 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); |
295 | ||
e54af724 | 296 | /* mark this thread as having exited */ |
f9e505a9 | 297 | complete(&xpc_hb_checker_exited); |
89eb8eb9 DN |
298 | return 0; |
299 | } | |
300 | ||
89eb8eb9 DN |
301 | /* |
302 | * This thread will attempt to discover other partitions to activate | |
303 | * based on info provided by SAL. This new thread is short lived and | |
304 | * will exit once discovery is complete. | |
305 | */ | |
306 | static int | |
307 | xpc_initiate_discovery(void *ignore) | |
308 | { | |
89eb8eb9 DN |
309 | xpc_discovery(); |
310 | ||
311 | dev_dbg(xpc_part, "discovery thread is exiting\n"); | |
312 | ||
e54af724 | 313 | /* mark this thread as having exited */ |
f9e505a9 | 314 | complete(&xpc_discovery_exited); |
89eb8eb9 DN |
315 | return 0; |
316 | } | |
317 | ||
89eb8eb9 DN |
318 | /* |
319 | * The first kthread assigned to a newly activated partition is the one | |
e17d416b | 320 | * created by XPC HB with which it calls xpc_activating(). XPC hangs on to |
89eb8eb9 DN |
321 | * that kthread until the partition is brought down, at which time that kthread |
322 | * returns back to XPC HB. (The return of that kthread will signify to XPC HB | |
323 | * that XPC has dismantled all communication infrastructure for the associated | |
324 | * partition.) This kthread becomes the channel manager for that partition. | |
325 | * | |
326 | * Each active partition has a channel manager, who, besides connecting and | |
327 | * disconnecting channels, will ensure that each of the partition's connected | |
328 | * channels has the required number of assigned kthreads to get the work done. | |
329 | */ | |
330 | static void | |
331 | xpc_channel_mgr(struct xpc_partition *part) | |
332 | { | |
83469b55 | 333 | while (part->act_state != XPC_P_AS_DEACTIVATING || |
35190506 DN |
334 | atomic_read(&part->nchannels_active) > 0 || |
335 | !xpc_partition_disengaged(part)) { | |
89eb8eb9 | 336 | |
7fb5e59d | 337 | xpc_process_sent_chctl_flags(part); |
89eb8eb9 | 338 | |
89eb8eb9 DN |
339 | /* |
340 | * Wait until we've been requested to activate kthreads or | |
341 | * all of the channel's message queues have been torn down or | |
342 | * a signal is pending. | |
343 | * | |
344 | * The channel_mgr_requests is set to 1 after being awakened, | |
345 | * This is done to prevent the channel mgr from making one pass | |
346 | * through the loop for each request, since he will | |
347 | * be servicing all the requests in one pass. The reason it's | |
348 | * set to 1 instead of 0 is so that other kthreads will know | |
349 | * that the channel mgr is running and won't bother trying to | |
350 | * wake him up. | |
351 | */ | |
352 | atomic_dec(&part->channel_mgr_requests); | |
35190506 | 353 | (void)wait_event_interruptible(part->channel_mgr_wq, |
2c2b94f9 | 354 | (atomic_read(&part->channel_mgr_requests) > 0 || |
7fb5e59d | 355 | part->chctl.all_flags != 0 || |
83469b55 | 356 | (part->act_state == XPC_P_AS_DEACTIVATING && |
2c2b94f9 DN |
357 | atomic_read(&part->nchannels_active) == 0 && |
358 | xpc_partition_disengaged(part)))); | |
89eb8eb9 | 359 | atomic_set(&part->channel_mgr_requests, 1); |
89eb8eb9 DN |
360 | } |
361 | } | |
362 | ||
5b8669df DN |
363 | /* |
364 | * Guarantee that the kzalloc'd memory is cacheline aligned. | |
365 | */ | |
366 | void * | |
367 | xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | |
368 | { | |
369 | /* see if kzalloc will give us cachline aligned memory by default */ | |
370 | *base = kzalloc(size, flags); | |
371 | if (*base == NULL) | |
372 | return NULL; | |
373 | ||
374 | if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) | |
375 | return *base; | |
376 | ||
377 | kfree(*base); | |
378 | ||
379 | /* nope, we'll have to do it ourselves */ | |
380 | *base = kzalloc(size + L1_CACHE_BYTES, flags); | |
381 | if (*base == NULL) | |
382 | return NULL; | |
383 | ||
384 | return (void *)L1_CACHE_ALIGN((u64)*base); | |
385 | } | |
386 | ||
387 | /* | |
388 | * Setup the channel structures necessary to support XPartition Communication | |
389 | * between the specified remote partition and the local one. | |
390 | */ | |
391 | static enum xp_retval | |
392 | xpc_setup_ch_structures(struct xpc_partition *part) | |
393 | { | |
394 | enum xp_retval ret; | |
395 | int ch_number; | |
396 | struct xpc_channel *ch; | |
397 | short partid = XPC_PARTID(part); | |
398 | ||
399 | /* | |
400 | * Allocate all of the channel structures as a contiguous chunk of | |
401 | * memory. | |
402 | */ | |
403 | DBUG_ON(part->channels != NULL); | |
6396bb22 KC |
404 | part->channels = kcalloc(XPC_MAX_NCHANNELS, |
405 | sizeof(struct xpc_channel), | |
5b8669df DN |
406 | GFP_KERNEL); |
407 | if (part->channels == NULL) { | |
408 | dev_err(xpc_chan, "can't get memory for channels\n"); | |
409 | return xpNoMemory; | |
410 | } | |
411 | ||
412 | /* allocate the remote open and close args */ | |
413 | ||
414 | part->remote_openclose_args = | |
415 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, | |
416 | GFP_KERNEL, &part-> | |
417 | remote_openclose_args_base); | |
418 | if (part->remote_openclose_args == NULL) { | |
419 | dev_err(xpc_chan, "can't get memory for remote connect args\n"); | |
420 | ret = xpNoMemory; | |
421 | goto out_1; | |
422 | } | |
423 | ||
424 | part->chctl.all_flags = 0; | |
425 | spin_lock_init(&part->chctl_lock); | |
426 | ||
427 | atomic_set(&part->channel_mgr_requests, 1); | |
428 | init_waitqueue_head(&part->channel_mgr_wq); | |
429 | ||
430 | part->nchannels = XPC_MAX_NCHANNELS; | |
431 | ||
432 | atomic_set(&part->nchannels_active, 0); | |
433 | atomic_set(&part->nchannels_engaged, 0); | |
434 | ||
435 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | |
436 | ch = &part->channels[ch_number]; | |
437 | ||
438 | ch->partid = partid; | |
439 | ch->number = ch_number; | |
440 | ch->flags = XPC_C_DISCONNECTED; | |
441 | ||
442 | atomic_set(&ch->kthreads_assigned, 0); | |
443 | atomic_set(&ch->kthreads_idle, 0); | |
444 | atomic_set(&ch->kthreads_active, 0); | |
445 | ||
446 | atomic_set(&ch->references, 0); | |
447 | atomic_set(&ch->n_to_notify, 0); | |
448 | ||
449 | spin_lock_init(&ch->lock); | |
450 | init_completion(&ch->wdisconnect_wait); | |
451 | ||
452 | atomic_set(&ch->n_on_msg_allocate_wq, 0); | |
453 | init_waitqueue_head(&ch->msg_allocate_wq); | |
454 | init_waitqueue_head(&ch->idle_wq); | |
455 | } | |
456 | ||
a7665b0a | 457 | ret = xpc_arch_ops.setup_ch_structures(part); |
5b8669df DN |
458 | if (ret != xpSuccess) |
459 | goto out_2; | |
460 | ||
461 | /* | |
462 | * With the setting of the partition setup_state to XPC_P_SS_SETUP, | |
463 | * we're declaring that this partition is ready to go. | |
464 | */ | |
465 | part->setup_state = XPC_P_SS_SETUP; | |
466 | ||
467 | return xpSuccess; | |
468 | ||
469 | /* setup of ch structures failed */ | |
470 | out_2: | |
471 | kfree(part->remote_openclose_args_base); | |
472 | part->remote_openclose_args = NULL; | |
473 | out_1: | |
474 | kfree(part->channels); | |
475 | part->channels = NULL; | |
476 | return ret; | |
477 | } | |
478 | ||
479 | /* | |
480 | * Teardown the channel structures necessary to support XPartition Communication | |
481 | * between the specified remote partition and the local one. | |
482 | */ | |
483 | static void | |
484 | xpc_teardown_ch_structures(struct xpc_partition *part) | |
485 | { | |
486 | DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); | |
487 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); | |
488 | ||
489 | /* | |
490 | * Make this partition inaccessible to local processes by marking it | |
491 | * as no longer setup. Then wait before proceeding with the teardown | |
492 | * until all existing references cease. | |
493 | */ | |
494 | DBUG_ON(part->setup_state != XPC_P_SS_SETUP); | |
495 | part->setup_state = XPC_P_SS_WTEARDOWN; | |
496 | ||
497 | wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); | |
498 | ||
499 | /* now we can begin tearing down the infrastructure */ | |
500 | ||
a7665b0a | 501 | xpc_arch_ops.teardown_ch_structures(part); |
5b8669df DN |
502 | |
503 | kfree(part->remote_openclose_args_base); | |
504 | part->remote_openclose_args = NULL; | |
505 | kfree(part->channels); | |
506 | part->channels = NULL; | |
507 | ||
508 | part->setup_state = XPC_P_SS_TORNDOWN; | |
509 | } | |
510 | ||
89eb8eb9 DN |
511 | /* |
512 | * When XPC HB determines that a partition has come up, it will create a new | |
513 | * kthread and that kthread will call this function to attempt to set up the | |
514 | * basic infrastructure used for Cross Partition Communication with the newly | |
515 | * upped partition. | |
516 | * | |
517 | * The kthread that was created by XPC HB and which setup the XPC | |
e17d416b DN |
518 | * infrastructure will remain assigned to the partition becoming the channel |
519 | * manager for that partition until the partition is deactivating, at which | |
520 | * time the kthread will teardown the XPC infrastructure and then exit. | |
89eb8eb9 | 521 | */ |
89eb8eb9 DN |
522 | static int |
523 | xpc_activating(void *__partid) | |
524 | { | |
64d032ba | 525 | short partid = (u64)__partid; |
89eb8eb9 DN |
526 | struct xpc_partition *part = &xpc_partitions[partid]; |
527 | unsigned long irq_flags; | |
89eb8eb9 | 528 | |
bc63d387 | 529 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
89eb8eb9 DN |
530 | |
531 | spin_lock_irqsave(&part->act_lock, irq_flags); | |
532 | ||
83469b55 DN |
533 | if (part->act_state == XPC_P_AS_DEACTIVATING) { |
534 | part->act_state = XPC_P_AS_INACTIVE; | |
89eb8eb9 DN |
535 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
536 | part->remote_rp_pa = 0; | |
537 | return 0; | |
538 | } | |
539 | ||
540 | /* indicate the thread is activating */ | |
83469b55 DN |
541 | DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ); |
542 | part->act_state = XPC_P_AS_ACTIVATING; | |
89eb8eb9 DN |
543 | |
544 | XPC_SET_REASON(part, 0, 0); | |
545 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | |
546 | ||
e17d416b | 547 | dev_dbg(xpc_part, "activating partition %d\n", partid); |
89eb8eb9 | 548 | |
a7665b0a | 549 | xpc_arch_ops.allow_hb(partid); |
89eb8eb9 | 550 | |
5b8669df | 551 | if (xpc_setup_ch_structures(part) == xpSuccess) { |
e17d416b DN |
552 | (void)xpc_part_ref(part); /* this will always succeed */ |
553 | ||
a7665b0a | 554 | if (xpc_arch_ops.make_first_contact(part) == xpSuccess) { |
e17d416b DN |
555 | xpc_mark_partition_active(part); |
556 | xpc_channel_mgr(part); | |
557 | /* won't return until partition is deactivating */ | |
558 | } | |
559 | ||
560 | xpc_part_deref(part); | |
5b8669df | 561 | xpc_teardown_ch_structures(part); |
e17d416b | 562 | } |
89eb8eb9 | 563 | |
a7665b0a | 564 | xpc_arch_ops.disallow_hb(partid); |
89eb8eb9 DN |
565 | xpc_mark_partition_inactive(part); |
566 | ||
65c17b80 | 567 | if (part->reason == xpReactivating) { |
89eb8eb9 | 568 | /* interrupting ourselves results in activating partition */ |
a7665b0a | 569 | xpc_arch_ops.request_partition_reactivation(part); |
89eb8eb9 DN |
570 | } |
571 | ||
572 | return 0; | |
573 | } | |
574 | ||
89eb8eb9 DN |
575 | void |
576 | xpc_activate_partition(struct xpc_partition *part) | |
577 | { | |
64d032ba | 578 | short partid = XPC_PARTID(part); |
89eb8eb9 | 579 | unsigned long irq_flags; |
2c2b94f9 | 580 | struct task_struct *kthread; |
89eb8eb9 | 581 | |
89eb8eb9 DN |
582 | spin_lock_irqsave(&part->act_lock, irq_flags); |
583 | ||
83469b55 | 584 | DBUG_ON(part->act_state != XPC_P_AS_INACTIVE); |
89eb8eb9 | 585 | |
83469b55 | 586 | part->act_state = XPC_P_AS_ACTIVATION_REQ; |
65c17b80 | 587 | XPC_SET_REASON(part, xpCloneKThread, __LINE__); |
89eb8eb9 DN |
588 | |
589 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | |
7c6c6636 | 590 | |
2c2b94f9 DN |
591 | kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d", |
592 | partid); | |
593 | if (IS_ERR(kthread)) { | |
7c6c6636 | 594 | spin_lock_irqsave(&part->act_lock, irq_flags); |
83469b55 | 595 | part->act_state = XPC_P_AS_INACTIVE; |
65c17b80 | 596 | XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); |
7c6c6636 RH |
597 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
598 | } | |
89eb8eb9 DN |
599 | } |
600 | ||
89eb8eb9 DN |
601 | void |
602 | xpc_activate_kthreads(struct xpc_channel *ch, int needed) | |
603 | { | |
604 | int idle = atomic_read(&ch->kthreads_idle); | |
605 | int assigned = atomic_read(&ch->kthreads_assigned); | |
606 | int wakeup; | |
607 | ||
89eb8eb9 DN |
608 | DBUG_ON(needed <= 0); |
609 | ||
610 | if (idle > 0) { | |
611 | wakeup = (needed > idle) ? idle : needed; | |
612 | needed -= wakeup; | |
613 | ||
614 | dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " | |
615 | "channel=%d\n", wakeup, ch->partid, ch->number); | |
616 | ||
617 | /* only wakeup the requested number of kthreads */ | |
618 | wake_up_nr(&ch->idle_wq, wakeup); | |
619 | } | |
620 | ||
2c2b94f9 | 621 | if (needed <= 0) |
89eb8eb9 | 622 | return; |
89eb8eb9 DN |
623 | |
624 | if (needed + assigned > ch->kthreads_assigned_limit) { | |
625 | needed = ch->kthreads_assigned_limit - assigned; | |
2c2b94f9 | 626 | if (needed <= 0) |
89eb8eb9 | 627 | return; |
89eb8eb9 DN |
628 | } |
629 | ||
630 | dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", | |
631 | needed, ch->partid, ch->number); | |
632 | ||
a460ef8d | 633 | xpc_create_kthreads(ch, needed, 0); |
89eb8eb9 DN |
634 | } |
635 | ||
89eb8eb9 DN |
636 | /* |
637 | * This function is where XPC's kthreads wait for messages to deliver. | |
638 | */ | |
639 | static void | |
640 | xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | |
641 | { | |
a7665b0a RH |
642 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = |
643 | xpc_arch_ops.n_of_deliverable_payloads; | |
644 | ||
89eb8eb9 DN |
645 | do { |
646 | /* deliver messages to their intended recipients */ | |
647 | ||
a7665b0a | 648 | while (n_of_deliverable_payloads(ch) > 0 && |
2c2b94f9 | 649 | !(ch->flags & XPC_C_DISCONNECTING)) { |
bd3e64c1 | 650 | xpc_deliver_payload(ch); |
89eb8eb9 DN |
651 | } |
652 | ||
653 | if (atomic_inc_return(&ch->kthreads_idle) > | |
35190506 | 654 | ch->kthreads_idle_limit) { |
89eb8eb9 DN |
655 | /* too many idle kthreads on this channel */ |
656 | atomic_dec(&ch->kthreads_idle); | |
657 | break; | |
658 | } | |
659 | ||
660 | dev_dbg(xpc_chan, "idle kthread calling " | |
661 | "wait_event_interruptible_exclusive()\n"); | |
662 | ||
35190506 | 663 | (void)wait_event_interruptible_exclusive(ch->idle_wq, |
a7665b0a | 664 | (n_of_deliverable_payloads(ch) > 0 || |
2c2b94f9 | 665 | (ch->flags & XPC_C_DISCONNECTING))); |
89eb8eb9 DN |
666 | |
667 | atomic_dec(&ch->kthreads_idle); | |
668 | ||
2c2b94f9 | 669 | } while (!(ch->flags & XPC_C_DISCONNECTING)); |
89eb8eb9 DN |
670 | } |
671 | ||
89eb8eb9 | 672 | static int |
2c2b94f9 | 673 | xpc_kthread_start(void *args) |
89eb8eb9 | 674 | { |
64d032ba | 675 | short partid = XPC_UNPACK_ARG1(args); |
89eb8eb9 DN |
676 | u16 ch_number = XPC_UNPACK_ARG2(args); |
677 | struct xpc_partition *part = &xpc_partitions[partid]; | |
678 | struct xpc_channel *ch; | |
679 | int n_needed; | |
e54af724 | 680 | unsigned long irq_flags; |
a7665b0a RH |
681 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = |
682 | xpc_arch_ops.n_of_deliverable_payloads; | |
89eb8eb9 | 683 | |
89eb8eb9 DN |
684 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", |
685 | partid, ch_number); | |
686 | ||
687 | ch = &part->channels[ch_number]; | |
688 | ||
689 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | |
89eb8eb9 DN |
690 | |
691 | /* let registerer know that connection has been established */ | |
692 | ||
e54af724 | 693 | spin_lock_irqsave(&ch->lock, irq_flags); |
4c2cd966 DN |
694 | if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { |
695 | ch->flags |= XPC_C_CONNECTEDCALLOUT; | |
e54af724 DN |
696 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
697 | ||
89eb8eb9 DN |
698 | xpc_connected_callout(ch); |
699 | ||
4c2cd966 DN |
700 | spin_lock_irqsave(&ch->lock, irq_flags); |
701 | ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; | |
702 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
703 | ||
89eb8eb9 DN |
704 | /* |
705 | * It is possible that while the callout was being | |
706 | * made that the remote partition sent some messages. | |
707 | * If that is the case, we may need to activate | |
708 | * additional kthreads to help deliver them. We only | |
709 | * need one less than total #of messages to deliver. | |
710 | */ | |
a7665b0a | 711 | n_needed = n_of_deliverable_payloads(ch) - 1; |
2c2b94f9 | 712 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) |
89eb8eb9 | 713 | xpc_activate_kthreads(ch, n_needed); |
2c2b94f9 | 714 | |
e54af724 DN |
715 | } else { |
716 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
89eb8eb9 DN |
717 | } |
718 | ||
719 | xpc_kthread_waitmsgs(part, ch); | |
720 | } | |
721 | ||
a460ef8d | 722 | /* let registerer know that connection is disconnecting */ |
e54af724 | 723 | |
a460ef8d DN |
724 | spin_lock_irqsave(&ch->lock, irq_flags); |
725 | if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | |
35190506 | 726 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { |
a460ef8d | 727 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; |
4c2cd966 | 728 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
a460ef8d | 729 | |
65c17b80 | 730 | xpc_disconnect_callout(ch, xpDisconnecting); |
a460ef8d DN |
731 | |
732 | spin_lock_irqsave(&ch->lock, irq_flags); | |
733 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; | |
734 | } | |
735 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
736 | ||
a47d5dac DN |
737 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
738 | atomic_dec_return(&part->nchannels_engaged) == 0) { | |
a7665b0a | 739 | xpc_arch_ops.indicate_partition_disengaged(part); |
89eb8eb9 DN |
740 | } |
741 | ||
89eb8eb9 DN |
742 | xpc_msgqueue_deref(ch); |
743 | ||
744 | dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", | |
745 | partid, ch_number); | |
746 | ||
747 | xpc_part_deref(part); | |
748 | return 0; | |
749 | } | |
750 | ||
89eb8eb9 DN |
751 | /* |
752 | * For each partition that XPC has established communications with, there is | |
753 | * a minimum of one kernel thread assigned to perform any operation that | |
754 | * may potentially sleep or block (basically the callouts to the asynchronous | |
755 | * functions registered via xpc_connect()). | |
756 | * | |
757 | * Additional kthreads are created and destroyed by XPC as the workload | |
758 | * demands. | |
759 | * | |
760 | * A kthread is assigned to one of the active channels that exists for a given | |
761 | * partition. | |
762 | */ | |
763 | void | |
a460ef8d | 764 | xpc_create_kthreads(struct xpc_channel *ch, int needed, |
35190506 | 765 | int ignore_disconnecting) |
89eb8eb9 DN |
766 | { |
767 | unsigned long irq_flags; | |
89eb8eb9 | 768 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); |
a607c389 | 769 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
2c2b94f9 | 770 | struct task_struct *kthread; |
a7665b0a RH |
771 | void (*indicate_partition_disengaged) (struct xpc_partition *) = |
772 | xpc_arch_ops.indicate_partition_disengaged; | |
89eb8eb9 | 773 | |
89eb8eb9 | 774 | while (needed-- > 0) { |
e54af724 DN |
775 | |
776 | /* | |
777 | * The following is done on behalf of the newly created | |
778 | * kthread. That kthread is responsible for doing the | |
779 | * counterpart to the following before it exits. | |
780 | */ | |
a460ef8d DN |
781 | if (ignore_disconnecting) { |
782 | if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { | |
783 | /* kthreads assigned had gone to zero */ | |
784 | BUG_ON(!(ch->flags & | |
35190506 | 785 | XPC_C_DISCONNECTINGCALLOUT_MADE)); |
a460ef8d DN |
786 | break; |
787 | } | |
788 | ||
789 | } else if (ch->flags & XPC_C_DISCONNECTING) { | |
790 | break; | |
791 | ||
a47d5dac DN |
792 | } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && |
793 | atomic_inc_return(&part->nchannels_engaged) == 1) { | |
a7665b0a | 794 | xpc_arch_ops.indicate_partition_engaged(part); |
a460ef8d | 795 | } |
35190506 | 796 | (void)xpc_part_ref(part); |
e54af724 | 797 | xpc_msgqueue_ref(ch); |
e54af724 | 798 | |
2c2b94f9 DN |
799 | kthread = kthread_run(xpc_kthread_start, (void *)args, |
800 | "xpc%02dc%d", ch->partid, ch->number); | |
801 | if (IS_ERR(kthread)) { | |
89eb8eb9 | 802 | /* the fork failed */ |
a460ef8d DN |
803 | |
804 | /* | |
805 | * NOTE: if (ignore_disconnecting && | |
806 | * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, | |
807 | * then we'll deadlock if all other kthreads assigned | |
808 | * to this channel are blocked in the channel's | |
809 | * registerer, because the only thing that will unblock | |
65c17b80 | 810 | * them is the xpDisconnecting callout that this |
2c2b94f9 | 811 | * failed kthread_run() would have made. |
a460ef8d DN |
812 | */ |
813 | ||
e54af724 DN |
814 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
815 | atomic_dec_return(&part->nchannels_engaged) == 0) { | |
a7665b0a | 816 | indicate_partition_disengaged(part); |
e54af724 DN |
817 | } |
818 | xpc_msgqueue_deref(ch); | |
819 | xpc_part_deref(part); | |
89eb8eb9 DN |
820 | |
821 | if (atomic_read(&ch->kthreads_assigned) < | |
35190506 | 822 | ch->kthreads_idle_limit) { |
89eb8eb9 DN |
823 | /* |
824 | * Flag this as an error only if we have an | |
825 | * insufficient #of kthreads for the channel | |
826 | * to function. | |
89eb8eb9 DN |
827 | */ |
828 | spin_lock_irqsave(&ch->lock, irq_flags); | |
65c17b80 | 829 | XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, |
35190506 | 830 | &irq_flags); |
89eb8eb9 DN |
831 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
832 | } | |
833 | break; | |
834 | } | |
89eb8eb9 DN |
835 | } |
836 | } | |
837 | ||
89eb8eb9 DN |
838 | void |
839 | xpc_disconnect_wait(int ch_number) | |
840 | { | |
a607c389 | 841 | unsigned long irq_flags; |
64d032ba | 842 | short partid; |
89eb8eb9 DN |
843 | struct xpc_partition *part; |
844 | struct xpc_channel *ch; | |
e54af724 | 845 | int wakeup_channel_mgr; |
89eb8eb9 | 846 | |
89eb8eb9 | 847 | /* now wait for all callouts to the caller's function to cease */ |
bc63d387 | 848 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
89eb8eb9 DN |
849 | part = &xpc_partitions[partid]; |
850 | ||
2c2b94f9 | 851 | if (!xpc_part_ref(part)) |
e54af724 | 852 | continue; |
89eb8eb9 | 853 | |
e54af724 | 854 | ch = &part->channels[ch_number]; |
89eb8eb9 | 855 | |
e54af724 | 856 | if (!(ch->flags & XPC_C_WDISCONNECT)) { |
89eb8eb9 | 857 | xpc_part_deref(part); |
e54af724 | 858 | continue; |
89eb8eb9 | 859 | } |
e54af724 | 860 | |
f9e505a9 | 861 | wait_for_completion(&ch->wdisconnect_wait); |
e54af724 DN |
862 | |
863 | spin_lock_irqsave(&ch->lock, irq_flags); | |
864 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); | |
865 | wakeup_channel_mgr = 0; | |
866 | ||
7fb5e59d | 867 | if (ch->delayed_chctl_flags) { |
83469b55 | 868 | if (part->act_state != XPC_P_AS_DEACTIVATING) { |
7fb5e59d DN |
869 | spin_lock(&part->chctl_lock); |
870 | part->chctl.flags[ch->number] |= | |
871 | ch->delayed_chctl_flags; | |
872 | spin_unlock(&part->chctl_lock); | |
e54af724 DN |
873 | wakeup_channel_mgr = 1; |
874 | } | |
7fb5e59d | 875 | ch->delayed_chctl_flags = 0; |
89eb8eb9 | 876 | } |
e54af724 DN |
877 | |
878 | ch->flags &= ~XPC_C_WDISCONNECT; | |
879 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
880 | ||
2c2b94f9 | 881 | if (wakeup_channel_mgr) |
e54af724 | 882 | xpc_wakeup_channel_mgr(part); |
e54af724 DN |
883 | |
884 | xpc_part_deref(part); | |
89eb8eb9 DN |
885 | } |
886 | } | |
887 | ||
5b8669df DN |
888 | static int |
889 | xpc_setup_partitions(void) | |
890 | { | |
891 | short partid; | |
892 | struct xpc_partition *part; | |
893 | ||
6396bb22 KC |
894 | xpc_partitions = kcalloc(xp_max_npartitions, |
895 | sizeof(struct xpc_partition), | |
896 | GFP_KERNEL); | |
5b8669df DN |
897 | if (xpc_partitions == NULL) { |
898 | dev_err(xpc_part, "can't get memory for partition structure\n"); | |
899 | return -ENOMEM; | |
900 | } | |
901 | ||
902 | /* | |
903 | * The first few fields of each entry of xpc_partitions[] need to | |
904 | * be initialized now so that calls to xpc_connect() and | |
905 | * xpc_disconnect() can be made prior to the activation of any remote | |
906 | * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE | |
907 | * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING | |
908 | * PARTITION HAS BEEN ACTIVATED. | |
909 | */ | |
910 | for (partid = 0; partid < xp_max_npartitions; partid++) { | |
911 | part = &xpc_partitions[partid]; | |
912 | ||
913 | DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); | |
914 | ||
915 | part->activate_IRQ_rcvd = 0; | |
916 | spin_lock_init(&part->act_lock); | |
917 | part->act_state = XPC_P_AS_INACTIVE; | |
918 | XPC_SET_REASON(part, 0, 0); | |
919 | ||
25b42fa8 KC |
920 | timer_setup(&part->disengage_timer, |
921 | xpc_timeout_partition_disengage, 0); | |
5b8669df DN |
922 | |
923 | part->setup_state = XPC_P_SS_UNSET; | |
924 | init_waitqueue_head(&part->teardown_wq); | |
925 | atomic_set(&part->references, 0); | |
926 | } | |
927 | ||
a7665b0a | 928 | return xpc_arch_ops.setup_partitions(); |
5b8669df DN |
929 | } |
930 | ||
931 | static void | |
932 | xpc_teardown_partitions(void) | |
933 | { | |
a7665b0a | 934 | xpc_arch_ops.teardown_partitions(); |
5b8669df DN |
935 | kfree(xpc_partitions); |
936 | } | |
937 | ||
89eb8eb9 | 938 | static void |
65c17b80 | 939 | xpc_do_exit(enum xp_retval reason) |
89eb8eb9 | 940 | { |
64d032ba | 941 | short partid; |
1ecaded8 | 942 | int active_part_count, printed_waiting_msg = 0; |
89eb8eb9 | 943 | struct xpc_partition *part; |
a47d5dac | 944 | unsigned long printmsg_time, disengage_timeout = 0; |
89eb8eb9 | 945 | |
a607c389 DN |
946 | /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ |
947 | DBUG_ON(xpc_exiting == 1); | |
89eb8eb9 DN |
948 | |
949 | /* | |
a607c389 DN |
950 | * Let the heartbeat checker thread and the discovery thread |
951 | * (if one is running) know that they should exit. Also wake up | |
952 | * the heartbeat checker thread in case it's sleeping. | |
89eb8eb9 DN |
953 | */ |
954 | xpc_exiting = 1; | |
6e41017a | 955 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
89eb8eb9 | 956 | |
e54af724 | 957 | /* wait for the discovery thread to exit */ |
f9e505a9 | 958 | wait_for_completion(&xpc_discovery_exited); |
89eb8eb9 | 959 | |
e54af724 | 960 | /* wait for the heartbeat checker thread to exit */ |
f9e505a9 | 961 | wait_for_completion(&xpc_hb_checker_exited); |
89eb8eb9 | 962 | |
a607c389 | 963 | /* sleep for a 1/3 of a second or so */ |
35190506 | 964 | (void)msleep_interruptible(300); |
89eb8eb9 DN |
965 | |
966 | /* wait for all partitions to become inactive */ | |
967 | ||
a47d5dac DN |
968 | printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); |
969 | xpc_disengage_timedout = 0; | |
a607c389 | 970 | |
89eb8eb9 DN |
971 | do { |
972 | active_part_count = 0; | |
973 | ||
bc63d387 | 974 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
89eb8eb9 | 975 | part = &xpc_partitions[partid]; |
89eb8eb9 | 976 | |
a607c389 | 977 | if (xpc_partition_disengaged(part) && |
83469b55 | 978 | part->act_state == XPC_P_AS_INACTIVE) { |
a607c389 | 979 | continue; |
89eb8eb9 | 980 | } |
a607c389 DN |
981 | |
982 | active_part_count++; | |
983 | ||
984 | XPC_DEACTIVATE_PARTITION(part, reason); | |
89eb8eb9 | 985 | |
a47d5dac DN |
986 | if (part->disengage_timeout > disengage_timeout) |
987 | disengage_timeout = part->disengage_timeout; | |
a607c389 | 988 | } |
89eb8eb9 | 989 | |
a7665b0a | 990 | if (xpc_arch_ops.any_partition_engaged()) { |
aaa3cd69 | 991 | if (time_is_before_jiffies(printmsg_time)) { |
1ecaded8 | 992 | dev_info(xpc_part, "waiting for remote " |
a47d5dac DN |
993 | "partitions to deactivate, timeout in " |
994 | "%ld seconds\n", (disengage_timeout - | |
995 | jiffies) / HZ); | |
1ecaded8 | 996 | printmsg_time = jiffies + |
a47d5dac | 997 | (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); |
1ecaded8 DN |
998 | printed_waiting_msg = 1; |
999 | } | |
1000 | ||
1001 | } else if (active_part_count > 0) { | |
1002 | if (printed_waiting_msg) { | |
1003 | dev_info(xpc_part, "waiting for local partition" | |
a47d5dac | 1004 | " to deactivate\n"); |
1ecaded8 DN |
1005 | printed_waiting_msg = 0; |
1006 | } | |
1007 | ||
1008 | } else { | |
a47d5dac | 1009 | if (!xpc_disengage_timedout) { |
1ecaded8 | 1010 | dev_info(xpc_part, "all partitions have " |
a47d5dac | 1011 | "deactivated\n"); |
1ecaded8 DN |
1012 | } |
1013 | break; | |
89eb8eb9 DN |
1014 | } |
1015 | ||
a607c389 | 1016 | /* sleep for a 1/3 of a second or so */ |
35190506 | 1017 | (void)msleep_interruptible(300); |
a607c389 DN |
1018 | |
1019 | } while (1); | |
1020 | ||
a7665b0a | 1021 | DBUG_ON(xpc_arch_ops.any_partition_engaged()); |
a607c389 | 1022 | |
5b8669df | 1023 | xpc_teardown_rsvd_page(); |
a607c389 | 1024 | |
65c17b80 | 1025 | if (reason == xpUnloading) { |
35190506 | 1026 | (void)unregister_die_notifier(&xpc_die_notifier); |
bc63d387 | 1027 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); |
0752c670 | 1028 | } |
780d09e8 | 1029 | |
89eb8eb9 DN |
1030 | /* clear the interface to XPC's functions */ |
1031 | xpc_clear_interface(); | |
1032 | ||
2c2b94f9 | 1033 | if (xpc_sysctl) |
89eb8eb9 | 1034 | unregister_sysctl_table(xpc_sysctl); |
b085dbf6 LC |
1035 | if (xpc_sysctl_hb) |
1036 | unregister_sysctl_table(xpc_sysctl_hb); | |
7682a4c6 | 1037 | |
5b8669df | 1038 | xpc_teardown_partitions(); |
6e41017a | 1039 | |
788b66e3 | 1040 | if (is_uv_system()) |
6e41017a | 1041 | xpc_exit_uv(); |
89eb8eb9 DN |
1042 | } |
1043 | ||
780d09e8 | 1044 | /* |
d6ad033a DN |
1045 | * This function is called when the system is being rebooted. |
1046 | */ | |
1047 | static int | |
1048 | xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) | |
1049 | { | |
65c17b80 | 1050 | enum xp_retval reason; |
d6ad033a | 1051 | |
d6ad033a DN |
1052 | switch (event) { |
1053 | case SYS_RESTART: | |
65c17b80 | 1054 | reason = xpSystemReboot; |
d6ad033a DN |
1055 | break; |
1056 | case SYS_HALT: | |
65c17b80 | 1057 | reason = xpSystemHalt; |
d6ad033a DN |
1058 | break; |
1059 | case SYS_POWER_OFF: | |
65c17b80 | 1060 | reason = xpSystemPoweroff; |
d6ad033a DN |
1061 | break; |
1062 | default: | |
65c17b80 | 1063 | reason = xpSystemGoingDown; |
d6ad033a DN |
1064 | } |
1065 | ||
1066 | xpc_do_exit(reason); | |
1067 | return NOTIFY_DONE; | |
1068 | } | |
1069 | ||
891348ca RH |
1070 | /* Used to only allow one cpu to complete disconnect */ |
1071 | static unsigned int xpc_die_disconnecting; | |
1072 | ||
d6ad033a | 1073 | /* |
a47d5dac DN |
1074 | * Notify other partitions to deactivate from us by first disengaging from all |
1075 | * references to our memory. | |
780d09e8 DN |
1076 | */ |
1077 | static void | |
a47d5dac | 1078 | xpc_die_deactivate(void) |
780d09e8 DN |
1079 | { |
1080 | struct xpc_partition *part; | |
64d032ba | 1081 | short partid; |
a47d5dac | 1082 | int any_engaged; |
261f3b49 DN |
1083 | long keep_waiting; |
1084 | long wait_to_print; | |
780d09e8 | 1085 | |
891348ca RH |
1086 | if (cmpxchg(&xpc_die_disconnecting, 0, 1)) |
1087 | return; | |
1088 | ||
780d09e8 DN |
1089 | /* keep xpc_hb_checker thread from doing anything (just in case) */ |
1090 | xpc_exiting = 1; | |
1091 | ||
a7665b0a | 1092 | xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */ |
780d09e8 | 1093 | |
bc63d387 | 1094 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
780d09e8 DN |
1095 | part = &xpc_partitions[partid]; |
1096 | ||
a7665b0a | 1097 | if (xpc_arch_ops.partition_engaged(partid) || |
83469b55 | 1098 | part->act_state != XPC_P_AS_INACTIVE) { |
a7665b0a RH |
1099 | xpc_arch_ops.request_partition_deactivation(part); |
1100 | xpc_arch_ops.indicate_partition_disengaged(part); | |
780d09e8 DN |
1101 | } |
1102 | } | |
1103 | ||
a47d5dac DN |
1104 | /* |
1105 | * Though we requested that all other partitions deactivate from us, | |
261f3b49 DN |
1106 | * we only wait until they've all disengaged or we've reached the |
1107 | * defined timelimit. | |
1108 | * | |
1109 | * Given that one iteration through the following while-loop takes | |
1110 | * approximately 200 microseconds, calculate the #of loops to take | |
1111 | * before bailing and the #of loops before printing a waiting message. | |
a47d5dac | 1112 | */ |
261f3b49 DN |
1113 | keep_waiting = xpc_disengage_timelimit * 1000 * 5; |
1114 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; | |
780d09e8 | 1115 | |
1ecaded8 | 1116 | while (1) { |
a7665b0a | 1117 | any_engaged = xpc_arch_ops.any_partition_engaged(); |
a47d5dac DN |
1118 | if (!any_engaged) { |
1119 | dev_info(xpc_part, "all partitions have deactivated\n"); | |
1ecaded8 DN |
1120 | break; |
1121 | } | |
780d09e8 | 1122 | |
261f3b49 | 1123 | if (!keep_waiting--) { |
bc63d387 DN |
1124 | for (partid = 0; partid < xp_max_npartitions; |
1125 | partid++) { | |
a7665b0a | 1126 | if (xpc_arch_ops.partition_engaged(partid)) { |
a47d5dac | 1127 | dev_info(xpc_part, "deactivate from " |
35190506 DN |
1128 | "remote partition %d timed " |
1129 | "out\n", partid); | |
1ecaded8 DN |
1130 | } |
1131 | } | |
1132 | break; | |
1133 | } | |
1134 | ||
261f3b49 | 1135 | if (!wait_to_print--) { |
780d09e8 | 1136 | dev_info(xpc_part, "waiting for remote partitions to " |
a47d5dac | 1137 | "deactivate, timeout in %ld seconds\n", |
261f3b49 DN |
1138 | keep_waiting / (1000 * 5)); |
1139 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * | |
1140 | 1000 * 5; | |
780d09e8 | 1141 | } |
261f3b49 DN |
1142 | |
1143 | udelay(200); | |
780d09e8 | 1144 | } |
780d09e8 DN |
1145 | } |
1146 | ||
780d09e8 | 1147 | /* |
1f4674b2 DN |
1148 | * This function is called when the system is being restarted or halted due |
1149 | * to some sort of system failure. If this is the case we need to notify the | |
1150 | * other partitions to disengage from all references to our memory. | |
1151 | * This function can also be called when our heartbeater could be offlined | |
1152 | * for a time. In this case we need to notify other partitions to not worry | |
1153 | * about the lack of a heartbeat. | |
780d09e8 DN |
1154 | */ |
1155 | static int | |
891348ca | 1156 | xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) |
780d09e8 | 1157 | { |
261f3b49 | 1158 | #ifdef CONFIG_IA64 /* !!! temporary kludge */ |
780d09e8 DN |
1159 | switch (event) { |
1160 | case DIE_MACHINE_RESTART: | |
1161 | case DIE_MACHINE_HALT: | |
a47d5dac | 1162 | xpc_die_deactivate(); |
780d09e8 | 1163 | break; |
1f4674b2 DN |
1164 | |
1165 | case DIE_KDEBUG_ENTER: | |
1166 | /* Should lack of heartbeat be ignored by other partitions? */ | |
2c2b94f9 | 1167 | if (!xpc_kdebug_ignore) |
1f4674b2 | 1168 | break; |
2c2b94f9 | 1169 | |
df561f66 | 1170 | fallthrough; |
780d09e8 DN |
1171 | case DIE_MCA_MONARCH_ENTER: |
1172 | case DIE_INIT_MONARCH_ENTER: | |
a7665b0a | 1173 | xpc_arch_ops.offline_heartbeat(); |
780d09e8 | 1174 | break; |
1f4674b2 DN |
1175 | |
1176 | case DIE_KDEBUG_LEAVE: | |
1177 | /* Is lack of heartbeat being ignored by other partitions? */ | |
2c2b94f9 | 1178 | if (!xpc_kdebug_ignore) |
1f4674b2 | 1179 | break; |
2c2b94f9 | 1180 | |
df561f66 | 1181 | fallthrough; |
780d09e8 DN |
1182 | case DIE_MCA_MONARCH_LEAVE: |
1183 | case DIE_INIT_MONARCH_LEAVE: | |
a7665b0a | 1184 | xpc_arch_ops.online_heartbeat(); |
780d09e8 DN |
1185 | break; |
1186 | } | |
261f3b49 | 1187 | #else |
891348ca RH |
1188 | struct die_args *die_args = _die_args; |
1189 | ||
1190 | switch (event) { | |
1191 | case DIE_TRAP: | |
1192 | if (die_args->trapnr == X86_TRAP_DF) | |
1193 | xpc_die_deactivate(); | |
1194 | ||
1195 | if (((die_args->trapnr == X86_TRAP_MF) || | |
1196 | (die_args->trapnr == X86_TRAP_XF)) && | |
f39b6f0e | 1197 | !user_mode(die_args->regs)) |
891348ca RH |
1198 | xpc_die_deactivate(); |
1199 | ||
1200 | break; | |
1201 | case DIE_INT3: | |
1202 | case DIE_DEBUG: | |
1203 | break; | |
1204 | case DIE_OOPS: | |
1205 | case DIE_GPF: | |
1206 | default: | |
1207 | xpc_die_deactivate(); | |
1208 | } | |
261f3b49 | 1209 | #endif |
780d09e8 DN |
1210 | |
1211 | return NOTIFY_DONE; | |
1212 | } | |
1213 | ||
19df2f8e | 1214 | static int __init |
89eb8eb9 DN |
1215 | xpc_init(void) |
1216 | { | |
1217 | int ret; | |
2c2b94f9 | 1218 | struct task_struct *kthread; |
ee6665e3 | 1219 | |
bb0dc43e KS |
1220 | dev_set_name(xpc_part, "part"); |
1221 | dev_set_name(xpc_chan, "chan"); | |
89eb8eb9 | 1222 | |
788b66e3 | 1223 | if (is_uv_system()) { |
5b8669df | 1224 | ret = xpc_init_uv(); |
94bd2708 DN |
1225 | |
1226 | } else { | |
5b8669df | 1227 | ret = -ENODEV; |
94bd2708 | 1228 | } |
408865ce | 1229 | |
5b8669df DN |
1230 | if (ret != 0) |
1231 | return ret; | |
1232 | ||
1233 | ret = xpc_setup_partitions(); | |
1234 | if (ret != 0) { | |
bc63d387 | 1235 | dev_err(xpc_part, "can't get memory for partition structure\n"); |
ee6665e3 | 1236 | goto out_1; |
bc63d387 | 1237 | } |
89eb8eb9 | 1238 | |
b085dbf6 LC |
1239 | xpc_sysctl = register_sysctl("xpc", xpc_sys_xpc); |
1240 | xpc_sysctl_hb = register_sysctl("xpc/hb", xpc_sys_xpc_hb); | |
bc63d387 | 1241 | |
89eb8eb9 DN |
1242 | /* |
1243 | * Fill the partition reserved page with the information needed by | |
1244 | * other partitions to discover we are alive and establish initial | |
1245 | * communications. | |
1246 | */ | |
5b8669df DN |
1247 | ret = xpc_setup_rsvd_page(); |
1248 | if (ret != 0) { | |
bc63d387 | 1249 | dev_err(xpc_part, "can't setup our reserved page\n"); |
ee6665e3 | 1250 | goto out_2; |
89eb8eb9 DN |
1251 | } |
1252 | ||
a607c389 DN |
1253 | /* add ourselves to the reboot_notifier_list */ |
1254 | ret = register_reboot_notifier(&xpc_reboot_notifier); | |
2c2b94f9 | 1255 | if (ret != 0) |
a607c389 | 1256 | dev_warn(xpc_part, "can't register reboot notifier\n"); |
a607c389 | 1257 | |
1eeb66a1 | 1258 | /* add ourselves to the die_notifier list */ |
780d09e8 | 1259 | ret = register_die_notifier(&xpc_die_notifier); |
2c2b94f9 | 1260 | if (ret != 0) |
780d09e8 | 1261 | dev_warn(xpc_part, "can't register die notifier\n"); |
780d09e8 | 1262 | |
89eb8eb9 DN |
1263 | /* |
1264 | * The real work-horse behind xpc. This processes incoming | |
1265 | * interrupts and monitors remote heartbeats. | |
1266 | */ | |
2c2b94f9 DN |
1267 | kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); |
1268 | if (IS_ERR(kthread)) { | |
89eb8eb9 | 1269 | dev_err(xpc_part, "failed while forking hb check thread\n"); |
bc63d387 | 1270 | ret = -EBUSY; |
ee6665e3 | 1271 | goto out_3; |
89eb8eb9 DN |
1272 | } |
1273 | ||
89eb8eb9 DN |
1274 | /* |
1275 | * Startup a thread that will attempt to discover other partitions to | |
1276 | * activate based on info provided by SAL. This new thread is short | |
1277 | * lived and will exit once discovery is complete. | |
1278 | */ | |
2c2b94f9 DN |
1279 | kthread = kthread_run(xpc_initiate_discovery, NULL, |
1280 | XPC_DISCOVERY_THREAD_NAME); | |
1281 | if (IS_ERR(kthread)) { | |
89eb8eb9 DN |
1282 | dev_err(xpc_part, "failed while forking discovery thread\n"); |
1283 | ||
1284 | /* mark this new thread as a non-starter */ | |
f9e505a9 | 1285 | complete(&xpc_discovery_exited); |
89eb8eb9 | 1286 | |
65c17b80 | 1287 | xpc_do_exit(xpUnloading); |
89eb8eb9 DN |
1288 | return -EBUSY; |
1289 | } | |
1290 | ||
89eb8eb9 DN |
1291 | /* set the interface to point at XPC's functions */ |
1292 | xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, | |
97bf1aa1 DN |
1293 | xpc_initiate_send, xpc_initiate_send_notify, |
1294 | xpc_initiate_received, xpc_initiate_partid_to_nasids); | |
89eb8eb9 DN |
1295 | |
1296 | return 0; | |
bc63d387 DN |
1297 | |
1298 | /* initialization was not successful */ | |
ee6665e3 | 1299 | out_3: |
5b8669df | 1300 | xpc_teardown_rsvd_page(); |
94bd2708 | 1301 | |
bc63d387 DN |
1302 | (void)unregister_die_notifier(&xpc_die_notifier); |
1303 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); | |
ee6665e3 | 1304 | out_2: |
b085dbf6 LC |
1305 | if (xpc_sysctl_hb) |
1306 | unregister_sysctl_table(xpc_sysctl_hb); | |
bc63d387 DN |
1307 | if (xpc_sysctl) |
1308 | unregister_sysctl_table(xpc_sysctl); | |
5b8669df DN |
1309 | |
1310 | xpc_teardown_partitions(); | |
6e41017a | 1311 | out_1: |
788b66e3 | 1312 | if (is_uv_system()) |
6e41017a | 1313 | xpc_exit_uv(); |
bc63d387 | 1314 | return ret; |
89eb8eb9 | 1315 | } |
89eb8eb9 | 1316 | |
35190506 | 1317 | module_init(xpc_init); |
89eb8eb9 | 1318 | |
19df2f8e | 1319 | static void __exit |
89eb8eb9 DN |
1320 | xpc_exit(void) |
1321 | { | |
65c17b80 | 1322 | xpc_do_exit(xpUnloading); |
89eb8eb9 | 1323 | } |
89eb8eb9 | 1324 | |
35190506 | 1325 | module_exit(xpc_exit); |
89eb8eb9 DN |
1326 | |
1327 | MODULE_AUTHOR("Silicon Graphics, Inc."); | |
1328 | MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); | |
1329 | MODULE_LICENSE("GPL"); | |
1330 | ||
1331 | module_param(xpc_hb_interval, int, 0); | |
1332 | MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " | |
35190506 | 1333 | "heartbeat increments."); |
89eb8eb9 DN |
1334 | |
1335 | module_param(xpc_hb_check_interval, int, 0); | |
1336 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " | |
35190506 | 1337 | "heartbeat checks."); |
89eb8eb9 | 1338 | |
a47d5dac DN |
1339 | module_param(xpc_disengage_timelimit, int, 0); |
1340 | MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait " | |
1341 | "for disengage to complete."); | |
e54af724 | 1342 | |
1f4674b2 DN |
1343 | module_param(xpc_kdebug_ignore, int, 0); |
1344 | MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " | |
35190506 | 1345 | "other partitions when dropping into kdebug."); |