livepatch: Skip task_call_func() for current task
[linux-2.6-block.git] / kernel / livepatch / transition.c
CommitLineData
1ccea77e 1// SPDX-License-Identifier: GPL-2.0-or-later
d83a7cb3
JP
2/*
3 * transition.c - Kernel Live Patching transition functions
4 *
5 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
d83a7cb3
JP
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/cpu.h>
11#include <linux/stacktrace.h>
10517429 12#include "core.h"
d83a7cb3
JP
13#include "patch.h"
14#include "transition.h"
d83a7cb3
JP
15
16#define MAX_STACK_ENTRIES 100
e92606fa
JP
17DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries);
18
d83a7cb3
JP
19#define STACK_ERR_BUF_SIZE 128
20
cba82dea
MB
21#define SIGNALS_TIMEOUT 15
22
d83a7cb3
JP
23struct klp_patch *klp_transition_patch;
24
25static int klp_target_state = KLP_UNDEFINED;
26
cba82dea
MB
27static unsigned int klp_signals_cnt;
28
d83a7cb3
JP
29/*
30 * This work can be performed periodically to finish patching or unpatching any
31 * "straggler" tasks which failed to transition in the first attempt.
32 */
33static void klp_transition_work_fn(struct work_struct *work)
34{
35 mutex_lock(&klp_mutex);
36
37 if (klp_transition_patch)
38 klp_try_complete_transition();
39
40 mutex_unlock(&klp_mutex);
41}
42static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
43
842c0884
PM
44/*
45 * This function is just a stub to implement a hard force
6932689e 46 * of synchronize_rcu(). This requires synchronizing
842c0884
PM
47 * tasks even in userspace and idle.
48 */
49static void klp_sync(struct work_struct *work)
50{
51}
52
53/*
54 * We allow to patch also functions where RCU is not watching,
55 * e.g. before user_exit(). We can not rely on the RCU infrastructure
56 * to do the synchronization. Instead hard force the sched synchronization.
57 *
58 * This approach allows to use RCU functions for manipulating func_stack
59 * safely.
60 */
61static void klp_synchronize_transition(void)
62{
63 schedule_on_each_cpu(klp_sync);
64}
65
d83a7cb3
JP
66/*
67 * The transition to the target patch state is complete. Clean up the data
68 * structures.
69 */
70static void klp_complete_transition(void)
71{
72 struct klp_object *obj;
73 struct klp_func *func;
74 struct task_struct *g, *task;
75 unsigned int cpu;
76
af026796
JL
77 pr_debug("'%s': completing %s transition\n",
78 klp_transition_patch->mod->name,
79 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
80
d697bad5 81 if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
7e35e4eb 82 klp_unpatch_replaced_patches(klp_transition_patch);
d697bad5
PM
83 klp_discard_nops(klp_transition_patch);
84 }
e1452b60 85
d83a7cb3
JP
86 if (klp_target_state == KLP_UNPATCHED) {
87 /*
88 * All tasks have transitioned to KLP_UNPATCHED so we can now
89 * remove the new functions from the func_stack.
90 */
91 klp_unpatch_objects(klp_transition_patch);
92
93 /*
94 * Make sure klp_ftrace_handler() can no longer see functions
95 * from this patch on the ops->func_stack. Otherwise, after
96 * func->transition gets cleared, the handler may choose a
97 * removed function.
98 */
842c0884 99 klp_synchronize_transition();
d83a7cb3
JP
100 }
101
d0807da7
MB
102 klp_for_each_object(klp_transition_patch, obj)
103 klp_for_each_func(obj, func)
d83a7cb3 104 func->transition = false;
3ec24776 105
d83a7cb3
JP
106 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
107 if (klp_target_state == KLP_PATCHED)
842c0884 108 klp_synchronize_transition();
d83a7cb3
JP
109
110 read_lock(&tasklist_lock);
111 for_each_process_thread(g, task) {
112 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
113 task->patch_state = KLP_UNDEFINED;
114 }
115 read_unlock(&tasklist_lock);
116
117 for_each_possible_cpu(cpu) {
118 task = idle_task(cpu);
119 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
120 task->patch_state = KLP_UNDEFINED;
121 }
122
93862e38
JL
123 klp_for_each_object(klp_transition_patch, obj) {
124 if (!klp_is_object_loaded(obj))
125 continue;
126 if (klp_target_state == KLP_PATCHED)
127 klp_post_patch_callback(obj);
128 else if (klp_target_state == KLP_UNPATCHED)
129 klp_post_unpatch_callback(obj);
130 }
131
6116c303
JL
132 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
133 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
134
d83a7cb3
JP
135 klp_target_state = KLP_UNDEFINED;
136 klp_transition_patch = NULL;
137}
138
139/*
140 * This is called in the error path, to cancel a transition before it has
141 * started, i.e. klp_init_transition() has been called but
142 * klp_start_transition() hasn't. If the transition *has* been started,
143 * klp_reverse_transition() should be used instead.
144 */
145void klp_cancel_transition(void)
146{
3ec24776
JP
147 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
148 return;
149
af026796
JL
150 pr_debug("'%s': canceling patching transition, going to unpatch\n",
151 klp_transition_patch->mod->name);
152
3ec24776 153 klp_target_state = KLP_UNPATCHED;
d83a7cb3
JP
154 klp_complete_transition();
155}
156
157/*
158 * Switch the patched state of the task to the set of functions in the target
159 * patch state.
160 *
161 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
162 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
163 */
164void klp_update_patch_state(struct task_struct *task)
165{
842c0884 166 /*
6932689e 167 * A variant of synchronize_rcu() is used to allow patching functions
842c0884
PM
168 * where RCU is not watching, see klp_synchronize_transition().
169 */
170 preempt_disable_notrace();
d83a7cb3
JP
171
172 /*
173 * This test_and_clear_tsk_thread_flag() call also serves as a read
174 * barrier (smp_rmb) for two cases:
175 *
176 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
177 * klp_target_state read. The corresponding write barrier is in
178 * klp_init_transition().
179 *
180 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
181 * of func->transition, if klp_ftrace_handler() is called later on
182 * the same CPU. See __klp_disable_patch().
183 */
184 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
185 task->patch_state = READ_ONCE(klp_target_state);
186
842c0884 187 preempt_enable_notrace();
d83a7cb3
JP
188}
189
190/*
191 * Determine whether the given stack trace includes any references to a
192 * to-be-patched or to-be-unpatched function.
193 */
25e39e32
TG
194static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
195 unsigned int nr_entries)
d83a7cb3
JP
196{
197 unsigned long func_addr, func_size, address;
198 struct klp_ops *ops;
199 int i;
200
53910ef7
ZL
201 if (klp_target_state == KLP_UNPATCHED) {
202 /*
203 * Check for the to-be-unpatched function
204 * (the func itself).
205 */
206 func_addr = (unsigned long)func->new_func;
207 func_size = func->new_size;
208 } else {
209 /*
210 * Check for the to-be-patched function
211 * (the previous func).
212 */
213 ops = klp_find_ops(func->old_func);
d83a7cb3 214
53910ef7
ZL
215 if (list_is_singular(&ops->func_stack)) {
216 /* original function */
217 func_addr = (unsigned long)func->old_func;
218 func_size = func->old_size;
d83a7cb3 219 } else {
53910ef7
ZL
220 /* previously patched function */
221 struct klp_func *prev;
222
223 prev = list_next_entry(func, stack_node);
224 func_addr = (unsigned long)prev->new_func;
225 func_size = prev->new_size;
d83a7cb3 226 }
53910ef7
ZL
227 }
228
229 for (i = 0; i < nr_entries; i++) {
230 address = entries[i];
d83a7cb3
JP
231
232 if (address >= func_addr && address < func_addr + func_size)
233 return -EAGAIN;
234 }
235
236 return 0;
237}
238
239/*
240 * Determine whether it's safe to transition the task to the target patch state
241 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
242 */
00619f7c 243static int klp_check_stack(struct task_struct *task, const char **oldname)
d83a7cb3 244{
e92606fa 245 unsigned long *entries = this_cpu_ptr(klp_stack_entries);
d83a7cb3
JP
246 struct klp_object *obj;
247 struct klp_func *func;
25e39e32 248 int ret, nr_entries;
d83a7cb3 249
e92606fa
JP
250 /* Protect 'klp_stack_entries' */
251 lockdep_assert_preemption_disabled();
252
253 ret = stack_trace_save_tsk_reliable(task, entries, MAX_STACK_ENTRIES);
00619f7c
PZ
254 if (ret < 0)
255 return -EINVAL;
25e39e32 256 nr_entries = ret;
d83a7cb3
JP
257
258 klp_for_each_object(klp_transition_patch, obj) {
259 if (!obj->patched)
260 continue;
261 klp_for_each_func(obj, func) {
25e39e32 262 ret = klp_check_stack_func(func, entries, nr_entries);
d83a7cb3 263 if (ret) {
00619f7c
PZ
264 *oldname = func->old_name;
265 return -EADDRINUSE;
d83a7cb3
JP
266 }
267 }
268 }
269
270 return 0;
271}
272
00619f7c
PZ
273static int klp_check_and_switch_task(struct task_struct *task, void *arg)
274{
275 int ret;
276
277 if (task_curr(task) && task != current)
278 return -EBUSY;
279
280 ret = klp_check_stack(task, arg);
281 if (ret)
282 return ret;
283
284 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
285 task->patch_state = klp_target_state;
286 return 0;
287}
288
d83a7cb3
JP
289/*
290 * Try to safely switch a task to the target patch state. If it's currently
291 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
292 * if the stack is unreliable, return false.
293 */
294static bool klp_try_switch_task(struct task_struct *task)
295{
00619f7c 296 const char *old_name;
d83a7cb3 297 int ret;
d83a7cb3
JP
298
299 /* check if this task has already switched over */
300 if (task->patch_state == klp_target_state)
301 return true;
302
67059d65
MB
303 /*
304 * For arches which don't have reliable stack traces, we have to rely
305 * on other methods (e.g., switching tasks at kernel exit).
306 */
307 if (!klp_have_reliable_stack())
308 return false;
309
d83a7cb3
JP
310 /*
311 * Now try to check the stack for any to-be-patched or to-be-unpatched
312 * functions. If all goes well, switch the task to the target patch
313 * state.
314 */
383439d3
JP
315 if (task == current)
316 ret = klp_check_and_switch_task(current, &old_name);
317 else
318 ret = task_call_func(task, klp_check_and_switch_task, &old_name);
319
00619f7c
PZ
320 switch (ret) {
321 case 0: /* success */
322 break;
d83a7cb3 323
00619f7c
PZ
324 case -EBUSY: /* klp_check_and_switch_task() */
325 pr_debug("%s: %s:%d is running\n",
326 __func__, task->comm, task->pid);
327 break;
328 case -EINVAL: /* klp_check_and_switch_task() */
329 pr_debug("%s: %s:%d has an unreliable stack\n",
330 __func__, task->comm, task->pid);
331 break;
332 case -EADDRINUSE: /* klp_check_and_switch_task() */
333 pr_debug("%s: %s:%d is sleeping on function %s\n",
334 __func__, task->comm, task->pid, old_name);
335 break;
336
337 default:
338 pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n",
339 __func__, ret, task->comm, task->pid);
340 break;
d83a7cb3
JP
341 }
342
00619f7c 343 return !ret;
d83a7cb3
JP
344}
345
0b3d5279
MB
346/*
347 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
348 * Kthreads with TIF_PATCH_PENDING set are woken up.
349 */
350static void klp_send_signals(void)
351{
352 struct task_struct *g, *task;
353
354 if (klp_signals_cnt == SIGNALS_TIMEOUT)
355 pr_notice("signaling remaining tasks\n");
356
357 read_lock(&tasklist_lock);
358 for_each_process_thread(g, task) {
359 if (!klp_patch_pending(task))
360 continue;
361
362 /*
363 * There is a small race here. We could see TIF_PATCH_PENDING
364 * set and decide to wake up a kthread or send a fake signal.
365 * Meanwhile the task could migrate itself and the action
366 * would be meaningless. It is not serious though.
367 */
368 if (task->flags & PF_KTHREAD) {
369 /*
370 * Wake up a kthread which sleeps interruptedly and
371 * still has not been migrated.
372 */
373 wake_up_state(task, TASK_INTERRUPTIBLE);
374 } else {
375 /*
376 * Send fake signal to all non-kthread tasks which are
377 * still not migrated.
378 */
8df1947c 379 set_notify_signal(task);
0b3d5279
MB
380 }
381 }
382 read_unlock(&tasklist_lock);
383}
384
d83a7cb3
JP
385/*
386 * Try to switch all remaining tasks to the target patch state by walking the
387 * stacks of sleeping tasks and looking for any to-be-patched or
388 * to-be-unpatched functions. If such functions are found, the task can't be
389 * switched yet.
390 *
391 * If any tasks are still stuck in the initial patch state, schedule a retry.
392 */
393void klp_try_complete_transition(void)
394{
395 unsigned int cpu;
396 struct task_struct *g, *task;
958ef1e3 397 struct klp_patch *patch;
d83a7cb3
JP
398 bool complete = true;
399
400 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
401
d83a7cb3
JP
402 /*
403 * Try to switch the tasks to the target patch state by walking their
404 * stacks and looking for any to-be-patched or to-be-unpatched
405 * functions. If such functions are found on a stack, or if the stack
406 * is deemed unreliable, the task can't be switched yet.
407 *
408 * Usually this will transition most (or all) of the tasks on a system
409 * unless the patch includes changes to a very common function.
410 */
411 read_lock(&tasklist_lock);
412 for_each_process_thread(g, task)
413 if (!klp_try_switch_task(task))
414 complete = false;
415 read_unlock(&tasklist_lock);
416
417 /*
418 * Ditto for the idle "swapper" tasks.
419 */
1daf08a0 420 cpus_read_lock();
d83a7cb3
JP
421 for_each_possible_cpu(cpu) {
422 task = idle_task(cpu);
423 if (cpu_online(cpu)) {
5de62ea8 424 if (!klp_try_switch_task(task)) {
d83a7cb3 425 complete = false;
5de62ea8
PZ
426 /* Make idle task go through the main loop. */
427 wake_up_if_idle(cpu);
428 }
d83a7cb3
JP
429 } else if (task->patch_state != klp_target_state) {
430 /* offline idle tasks can be switched immediately */
431 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
432 task->patch_state = klp_target_state;
433 }
434 }
1daf08a0 435 cpus_read_unlock();
d83a7cb3
JP
436
437 if (!complete) {
cba82dea
MB
438 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
439 klp_send_signals();
440 klp_signals_cnt++;
441
d83a7cb3
JP
442 /*
443 * Some tasks weren't able to be switched over. Try again
444 * later and/or wait for other methods like kernel exit
445 * switching.
446 */
447 schedule_delayed_work(&klp_transition_work,
448 round_jiffies_relative(HZ));
449 return;
450 }
451
d83a7cb3 452 /* we're done, now cleanup the data structures */
958ef1e3 453 patch = klp_transition_patch;
d83a7cb3 454 klp_complete_transition();
958ef1e3
PM
455
456 /*
7e35e4eb 457 * It would make more sense to free the unused patches in
958ef1e3
PM
458 * klp_complete_transition() but it is called also
459 * from klp_cancel_transition().
460 */
7e35e4eb
PM
461 if (!patch->enabled)
462 klp_free_patch_async(patch);
463 else if (patch->replace)
464 klp_free_replaced_patches_async(patch);
d83a7cb3
JP
465}
466
467/*
468 * Start the transition to the specified target patch state so tasks can begin
469 * switching to it.
470 */
471void klp_start_transition(void)
472{
473 struct task_struct *g, *task;
474 unsigned int cpu;
475
476 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
477
af026796
JL
478 pr_notice("'%s': starting %s transition\n",
479 klp_transition_patch->mod->name,
d83a7cb3
JP
480 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
481
d83a7cb3
JP
482 /*
483 * Mark all normal tasks as needing a patch state update. They'll
484 * switch either in klp_try_complete_transition() or as they exit the
485 * kernel.
486 */
487 read_lock(&tasklist_lock);
488 for_each_process_thread(g, task)
489 if (task->patch_state != klp_target_state)
490 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
491 read_unlock(&tasklist_lock);
492
493 /*
494 * Mark all idle tasks as needing a patch state update. They'll switch
495 * either in klp_try_complete_transition() or at the idle loop switch
496 * point.
497 */
498 for_each_possible_cpu(cpu) {
499 task = idle_task(cpu);
500 if (task->patch_state != klp_target_state)
501 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
502 }
cba82dea
MB
503
504 klp_signals_cnt = 0;
d83a7cb3
JP
505}
506
507/*
508 * Initialize the global target patch state and all tasks to the initial patch
509 * state, and initialize all function transition states to true in preparation
510 * for patching or unpatching.
511 */
512void klp_init_transition(struct klp_patch *patch, int state)
513{
514 struct task_struct *g, *task;
515 unsigned int cpu;
516 struct klp_object *obj;
517 struct klp_func *func;
518 int initial_state = !state;
519
520 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
521
522 klp_transition_patch = patch;
523
524 /*
525 * Set the global target patch state which tasks will switch to. This
526 * has no effect until the TIF_PATCH_PENDING flags get set later.
527 */
528 klp_target_state = state;
529
af026796
JL
530 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
531 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
532
d83a7cb3
JP
533 /*
534 * Initialize all tasks to the initial patch state to prepare them for
535 * switching to the target state.
536 */
537 read_lock(&tasklist_lock);
538 for_each_process_thread(g, task) {
539 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
540 task->patch_state = initial_state;
541 }
542 read_unlock(&tasklist_lock);
543
544 /*
545 * Ditto for the idle "swapper" tasks.
546 */
547 for_each_possible_cpu(cpu) {
548 task = idle_task(cpu);
549 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
550 task->patch_state = initial_state;
551 }
552
553 /*
554 * Enforce the order of the task->patch_state initializations and the
555 * func->transition updates to ensure that klp_ftrace_handler() doesn't
556 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
557 *
558 * Also enforce the order of the klp_target_state write and future
559 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
560 * set a task->patch_state to KLP_UNDEFINED.
561 */
562 smp_wmb();
563
564 /*
565 * Set the func transition states so klp_ftrace_handler() will know to
566 * switch to the transition logic.
567 *
568 * When patching, the funcs aren't yet in the func_stack and will be
569 * made visible to the ftrace handler shortly by the calls to
570 * klp_patch_object().
571 *
572 * When unpatching, the funcs are already in the func_stack and so are
573 * already visible to the ftrace handler.
574 */
575 klp_for_each_object(patch, obj)
576 klp_for_each_func(obj, func)
577 func->transition = true;
578}
579
580/*
581 * This function can be called in the middle of an existing transition to
582 * reverse the direction of the target patch state. This can be done to
583 * effectively cancel an existing enable or disable operation if there are any
584 * tasks which are stuck in the initial patch state.
585 */
586void klp_reverse_transition(void)
587{
588 unsigned int cpu;
589 struct task_struct *g, *task;
590
af026796
JL
591 pr_debug("'%s': reversing transition from %s\n",
592 klp_transition_patch->mod->name,
593 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
594 "unpatching to patching");
595
d83a7cb3
JP
596 klp_transition_patch->enabled = !klp_transition_patch->enabled;
597
598 klp_target_state = !klp_target_state;
599
600 /*
601 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
602 * klp_update_patch_state() running in parallel with
603 * klp_start_transition().
604 */
605 read_lock(&tasklist_lock);
606 for_each_process_thread(g, task)
607 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
608 read_unlock(&tasklist_lock);
609
610 for_each_possible_cpu(cpu)
611 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
612
613 /* Let any remaining calls to klp_update_patch_state() complete */
842c0884 614 klp_synchronize_transition();
d83a7cb3
JP
615
616 klp_start_transition();
617}
618
619/* Called from copy_process() during fork */
620void klp_copy_process(struct task_struct *child)
621{
d83a7cb3 622
747f7a29
RR
623 /*
624 * The parent process may have gone through a KLP transition since
625 * the thread flag was copied in setup_thread_stack earlier. Bring
626 * the task flag up to date with the parent here.
627 *
628 * The operation is serialized against all klp_*_transition()
629 * operations by the tasklist_lock. The only exception is
630 * klp_update_patch_state(current), but we cannot race with
631 * that because we are current.
632 */
633 if (test_tsk_thread_flag(current, TIF_PATCH_PENDING))
634 set_tsk_thread_flag(child, TIF_PATCH_PENDING);
635 else
636 clear_tsk_thread_flag(child, TIF_PATCH_PENDING);
637
638 child->patch_state = current->patch_state;
d83a7cb3 639}
43347d56 640
c99a2be7
MB
641/*
642 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
643 * existing transition to finish.
644 *
645 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
646 * 'current'. This is not the case here and the consistency model could be
647 * broken. Administrator, who is the only one to execute the
648 * klp_force_transitions(), has to be aware of this.
649 */
650void klp_force_transition(void)
651{
68007289 652 struct klp_patch *patch;
c99a2be7
MB
653 struct task_struct *g, *task;
654 unsigned int cpu;
655
656 pr_warn("forcing remaining tasks to the patched state\n");
657
658 read_lock(&tasklist_lock);
659 for_each_process_thread(g, task)
660 klp_update_patch_state(task);
661 read_unlock(&tasklist_lock);
662
663 for_each_possible_cpu(cpu)
664 klp_update_patch_state(idle_task(cpu));
665
29573083
CZ
666 /* Set forced flag for patches being removed. */
667 if (klp_target_state == KLP_UNPATCHED)
668 klp_transition_patch->forced = true;
669 else if (klp_transition_patch->replace) {
670 klp_for_each_patch(patch) {
671 if (patch != klp_transition_patch)
672 patch->forced = true;
673 }
674 }
c99a2be7 675}