| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * padata.c - generic interface to process data streams in parallel |
| 4 | * |
| 5 | * See Documentation/core-api/padata.rst for more information. |
| 6 | * |
| 7 | * Copyright (C) 2008, 2009 secunet Security Networks AG |
| 8 | * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> |
| 9 | * |
| 10 | * Copyright (c) 2020 Oracle and/or its affiliates. |
| 11 | * Author: Daniel Jordan <daniel.m.jordan@oracle.com> |
| 12 | */ |
| 13 | |
| 14 | #include <linux/completion.h> |
| 15 | #include <linux/export.h> |
| 16 | #include <linux/cpumask.h> |
| 17 | #include <linux/err.h> |
| 18 | #include <linux/cpu.h> |
| 19 | #include <linux/padata.h> |
| 20 | #include <linux/mutex.h> |
| 21 | #include <linux/sched.h> |
| 22 | #include <linux/slab.h> |
| 23 | #include <linux/sysfs.h> |
| 24 | #include <linux/rcupdate.h> |
| 25 | |
| 26 | #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */ |
| 27 | |
| 28 | struct padata_work { |
| 29 | struct work_struct pw_work; |
| 30 | struct list_head pw_list; /* padata_free_works linkage */ |
| 31 | void *pw_data; |
| 32 | }; |
| 33 | |
| 34 | static DEFINE_SPINLOCK(padata_works_lock); |
| 35 | static struct padata_work *padata_works; |
| 36 | static LIST_HEAD(padata_free_works); |
| 37 | |
| 38 | struct padata_mt_job_state { |
| 39 | spinlock_t lock; |
| 40 | struct completion completion; |
| 41 | struct padata_mt_job *job; |
| 42 | int nworks; |
| 43 | int nworks_fini; |
| 44 | unsigned long chunk_size; |
| 45 | }; |
| 46 | |
| 47 | static void padata_free_pd(struct parallel_data *pd); |
| 48 | static void __init padata_mt_helper(struct work_struct *work); |
| 49 | |
| 50 | static inline void padata_get_pd(struct parallel_data *pd) |
| 51 | { |
| 52 | refcount_inc(&pd->refcnt); |
| 53 | } |
| 54 | |
| 55 | static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt) |
| 56 | { |
| 57 | if (refcount_sub_and_test(cnt, &pd->refcnt)) |
| 58 | padata_free_pd(pd); |
| 59 | } |
| 60 | |
| 61 | static inline void padata_put_pd(struct parallel_data *pd) |
| 62 | { |
| 63 | padata_put_pd_cnt(pd, 1); |
| 64 | } |
| 65 | |
| 66 | static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) |
| 67 | { |
| 68 | int cpu, target_cpu; |
| 69 | |
| 70 | target_cpu = cpumask_first(pd->cpumask.pcpu); |
| 71 | for (cpu = 0; cpu < cpu_index; cpu++) |
| 72 | target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); |
| 73 | |
| 74 | return target_cpu; |
| 75 | } |
| 76 | |
| 77 | static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr) |
| 78 | { |
| 79 | /* |
| 80 | * Hash the sequence numbers to the cpus by taking |
| 81 | * seq_nr mod. number of cpus in use. |
| 82 | */ |
| 83 | int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); |
| 84 | |
| 85 | return padata_index_to_cpu(pd, cpu_index); |
| 86 | } |
| 87 | |
| 88 | static struct padata_work *padata_work_alloc(void) |
| 89 | { |
| 90 | struct padata_work *pw; |
| 91 | |
| 92 | lockdep_assert_held(&padata_works_lock); |
| 93 | |
| 94 | if (list_empty(&padata_free_works)) |
| 95 | return NULL; /* No more work items allowed to be queued. */ |
| 96 | |
| 97 | pw = list_first_entry(&padata_free_works, struct padata_work, pw_list); |
| 98 | list_del(&pw->pw_list); |
| 99 | return pw; |
| 100 | } |
| 101 | |
| 102 | /* |
| 103 | * This function is marked __ref because this function may be optimized in such |
| 104 | * a way that it directly refers to work_fn's address, which causes modpost to |
| 105 | * complain when work_fn is marked __init. This scenario was observed with clang |
| 106 | * LTO, where padata_work_init() was optimized to refer directly to |
| 107 | * padata_mt_helper() because the calls to padata_work_init() with other work_fn |
| 108 | * values were eliminated or inlined. |
| 109 | */ |
| 110 | static void __ref padata_work_init(struct padata_work *pw, work_func_t work_fn, |
| 111 | void *data, int flags) |
| 112 | { |
| 113 | if (flags & PADATA_WORK_ONSTACK) |
| 114 | INIT_WORK_ONSTACK(&pw->pw_work, work_fn); |
| 115 | else |
| 116 | INIT_WORK(&pw->pw_work, work_fn); |
| 117 | pw->pw_data = data; |
| 118 | } |
| 119 | |
| 120 | static int __init padata_work_alloc_mt(int nworks, void *data, |
| 121 | struct list_head *head) |
| 122 | { |
| 123 | int i; |
| 124 | |
| 125 | spin_lock_bh(&padata_works_lock); |
| 126 | /* Start at 1 because the current task participates in the job. */ |
| 127 | for (i = 1; i < nworks; ++i) { |
| 128 | struct padata_work *pw = padata_work_alloc(); |
| 129 | |
| 130 | if (!pw) |
| 131 | break; |
| 132 | padata_work_init(pw, padata_mt_helper, data, 0); |
| 133 | list_add(&pw->pw_list, head); |
| 134 | } |
| 135 | spin_unlock_bh(&padata_works_lock); |
| 136 | |
| 137 | return i; |
| 138 | } |
| 139 | |
| 140 | static void padata_work_free(struct padata_work *pw) |
| 141 | { |
| 142 | lockdep_assert_held(&padata_works_lock); |
| 143 | list_add(&pw->pw_list, &padata_free_works); |
| 144 | } |
| 145 | |
| 146 | static void __init padata_works_free(struct list_head *works) |
| 147 | { |
| 148 | struct padata_work *cur, *next; |
| 149 | |
| 150 | if (list_empty(works)) |
| 151 | return; |
| 152 | |
| 153 | spin_lock_bh(&padata_works_lock); |
| 154 | list_for_each_entry_safe(cur, next, works, pw_list) { |
| 155 | list_del(&cur->pw_list); |
| 156 | padata_work_free(cur); |
| 157 | } |
| 158 | spin_unlock_bh(&padata_works_lock); |
| 159 | } |
| 160 | |
| 161 | static void padata_parallel_worker(struct work_struct *parallel_work) |
| 162 | { |
| 163 | struct padata_work *pw = container_of(parallel_work, struct padata_work, |
| 164 | pw_work); |
| 165 | struct padata_priv *padata = pw->pw_data; |
| 166 | |
| 167 | local_bh_disable(); |
| 168 | padata->parallel(padata); |
| 169 | spin_lock(&padata_works_lock); |
| 170 | padata_work_free(pw); |
| 171 | spin_unlock(&padata_works_lock); |
| 172 | local_bh_enable(); |
| 173 | } |
| 174 | |
| 175 | /** |
| 176 | * padata_do_parallel - padata parallelization function |
| 177 | * |
| 178 | * @ps: padatashell |
| 179 | * @padata: object to be parallelized |
| 180 | * @cb_cpu: pointer to the CPU that the serialization callback function should |
| 181 | * run on. If it's not in the serial cpumask of @pinst |
| 182 | * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if |
| 183 | * none found, returns -EINVAL. |
| 184 | * |
| 185 | * The parallelization callback function will run with BHs off. |
| 186 | * Note: Every object which is parallelized by padata_do_parallel |
| 187 | * must be seen by padata_do_serial. |
| 188 | * |
| 189 | * Return: 0 on success or else negative error code. |
| 190 | */ |
| 191 | int padata_do_parallel(struct padata_shell *ps, |
| 192 | struct padata_priv *padata, int *cb_cpu) |
| 193 | { |
| 194 | struct padata_instance *pinst = ps->pinst; |
| 195 | int i, cpu, cpu_index, err; |
| 196 | struct parallel_data *pd; |
| 197 | struct padata_work *pw; |
| 198 | |
| 199 | rcu_read_lock_bh(); |
| 200 | |
| 201 | pd = rcu_dereference_bh(ps->pd); |
| 202 | |
| 203 | err = -EINVAL; |
| 204 | if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) |
| 205 | goto out; |
| 206 | |
| 207 | if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { |
| 208 | if (cpumask_empty(pd->cpumask.cbcpu)) |
| 209 | goto out; |
| 210 | |
| 211 | /* Select an alternate fallback CPU and notify the caller. */ |
| 212 | cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); |
| 213 | |
| 214 | cpu = cpumask_first(pd->cpumask.cbcpu); |
| 215 | for (i = 0; i < cpu_index; i++) |
| 216 | cpu = cpumask_next(cpu, pd->cpumask.cbcpu); |
| 217 | |
| 218 | *cb_cpu = cpu; |
| 219 | } |
| 220 | |
| 221 | err = -EBUSY; |
| 222 | if ((pinst->flags & PADATA_RESET)) |
| 223 | goto out; |
| 224 | |
| 225 | padata_get_pd(pd); |
| 226 | padata->pd = pd; |
| 227 | padata->cb_cpu = *cb_cpu; |
| 228 | |
| 229 | spin_lock(&padata_works_lock); |
| 230 | padata->seq_nr = ++pd->seq_nr; |
| 231 | pw = padata_work_alloc(); |
| 232 | spin_unlock(&padata_works_lock); |
| 233 | |
| 234 | if (!pw) { |
| 235 | /* Maximum works limit exceeded, run in the current task. */ |
| 236 | padata->parallel(padata); |
| 237 | } |
| 238 | |
| 239 | rcu_read_unlock_bh(); |
| 240 | |
| 241 | if (pw) { |
| 242 | padata_work_init(pw, padata_parallel_worker, padata, 0); |
| 243 | queue_work(pinst->parallel_wq, &pw->pw_work); |
| 244 | } |
| 245 | |
| 246 | return 0; |
| 247 | out: |
| 248 | rcu_read_unlock_bh(); |
| 249 | |
| 250 | return err; |
| 251 | } |
| 252 | EXPORT_SYMBOL(padata_do_parallel); |
| 253 | |
| 254 | /* |
| 255 | * padata_find_next - Find the next object that needs serialization. |
| 256 | * |
| 257 | * Return: |
| 258 | * * A pointer to the control struct of the next object that needs |
| 259 | * serialization, if present in one of the percpu reorder queues. |
| 260 | * * NULL, if the next object that needs serialization will |
| 261 | * be parallel processed by another cpu and is not yet present in |
| 262 | * the cpu's reorder queue. |
| 263 | */ |
| 264 | static struct padata_priv *padata_find_next(struct parallel_data *pd, |
| 265 | bool remove_object) |
| 266 | { |
| 267 | struct padata_priv *padata; |
| 268 | struct padata_list *reorder; |
| 269 | int cpu = pd->cpu; |
| 270 | |
| 271 | reorder = per_cpu_ptr(pd->reorder_list, cpu); |
| 272 | |
| 273 | spin_lock(&reorder->lock); |
| 274 | if (list_empty(&reorder->list)) { |
| 275 | spin_unlock(&reorder->lock); |
| 276 | return NULL; |
| 277 | } |
| 278 | |
| 279 | padata = list_entry(reorder->list.next, struct padata_priv, list); |
| 280 | |
| 281 | /* |
| 282 | * Checks the rare case where two or more parallel jobs have hashed to |
| 283 | * the same CPU and one of the later ones finishes first. |
| 284 | */ |
| 285 | if (padata->seq_nr != pd->processed) { |
| 286 | spin_unlock(&reorder->lock); |
| 287 | return NULL; |
| 288 | } |
| 289 | |
| 290 | if (remove_object) { |
| 291 | list_del_init(&padata->list); |
| 292 | ++pd->processed; |
| 293 | pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu); |
| 294 | } |
| 295 | |
| 296 | spin_unlock(&reorder->lock); |
| 297 | return padata; |
| 298 | } |
| 299 | |
| 300 | static void padata_reorder(struct parallel_data *pd) |
| 301 | { |
| 302 | struct padata_instance *pinst = pd->ps->pinst; |
| 303 | int cb_cpu; |
| 304 | struct padata_priv *padata; |
| 305 | struct padata_serial_queue *squeue; |
| 306 | struct padata_list *reorder; |
| 307 | |
| 308 | /* |
| 309 | * We need to ensure that only one cpu can work on dequeueing of |
| 310 | * the reorder queue the time. Calculating in which percpu reorder |
| 311 | * queue the next object will arrive takes some time. A spinlock |
| 312 | * would be highly contended. Also it is not clear in which order |
| 313 | * the objects arrive to the reorder queues. So a cpu could wait to |
| 314 | * get the lock just to notice that there is nothing to do at the |
| 315 | * moment. Therefore we use a trylock and let the holder of the lock |
| 316 | * care for all the objects enqueued during the holdtime of the lock. |
| 317 | */ |
| 318 | if (!spin_trylock_bh(&pd->lock)) |
| 319 | return; |
| 320 | |
| 321 | while (1) { |
| 322 | padata = padata_find_next(pd, true); |
| 323 | |
| 324 | /* |
| 325 | * If the next object that needs serialization is parallel |
| 326 | * processed by another cpu and is still on it's way to the |
| 327 | * cpu's reorder queue, nothing to do for now. |
| 328 | */ |
| 329 | if (!padata) |
| 330 | break; |
| 331 | |
| 332 | cb_cpu = padata->cb_cpu; |
| 333 | squeue = per_cpu_ptr(pd->squeue, cb_cpu); |
| 334 | |
| 335 | spin_lock(&squeue->serial.lock); |
| 336 | list_add_tail(&padata->list, &squeue->serial.list); |
| 337 | spin_unlock(&squeue->serial.lock); |
| 338 | |
| 339 | queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); |
| 340 | } |
| 341 | |
| 342 | spin_unlock_bh(&pd->lock); |
| 343 | |
| 344 | /* |
| 345 | * The next object that needs serialization might have arrived to |
| 346 | * the reorder queues in the meantime. |
| 347 | * |
| 348 | * Ensure reorder queue is read after pd->lock is dropped so we see |
| 349 | * new objects from another task in padata_do_serial. Pairs with |
| 350 | * smp_mb in padata_do_serial. |
| 351 | */ |
| 352 | smp_mb(); |
| 353 | |
| 354 | reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); |
| 355 | if (!list_empty(&reorder->list) && padata_find_next(pd, false)) { |
| 356 | /* |
| 357 | * Other context(eg. the padata_serial_worker) can finish the request. |
| 358 | * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish. |
| 359 | */ |
| 360 | padata_get_pd(pd); |
| 361 | if (!queue_work(pinst->serial_wq, &pd->reorder_work)) |
| 362 | padata_put_pd(pd); |
| 363 | } |
| 364 | } |
| 365 | |
| 366 | static void invoke_padata_reorder(struct work_struct *work) |
| 367 | { |
| 368 | struct parallel_data *pd; |
| 369 | |
| 370 | local_bh_disable(); |
| 371 | pd = container_of(work, struct parallel_data, reorder_work); |
| 372 | padata_reorder(pd); |
| 373 | local_bh_enable(); |
| 374 | /* Pairs with putting the reorder_work in the serial_wq */ |
| 375 | padata_put_pd(pd); |
| 376 | } |
| 377 | |
| 378 | static void padata_serial_worker(struct work_struct *serial_work) |
| 379 | { |
| 380 | struct padata_serial_queue *squeue; |
| 381 | struct parallel_data *pd; |
| 382 | LIST_HEAD(local_list); |
| 383 | int cnt; |
| 384 | |
| 385 | local_bh_disable(); |
| 386 | squeue = container_of(serial_work, struct padata_serial_queue, work); |
| 387 | pd = squeue->pd; |
| 388 | |
| 389 | spin_lock(&squeue->serial.lock); |
| 390 | list_replace_init(&squeue->serial.list, &local_list); |
| 391 | spin_unlock(&squeue->serial.lock); |
| 392 | |
| 393 | cnt = 0; |
| 394 | |
| 395 | while (!list_empty(&local_list)) { |
| 396 | struct padata_priv *padata; |
| 397 | |
| 398 | padata = list_entry(local_list.next, |
| 399 | struct padata_priv, list); |
| 400 | |
| 401 | list_del_init(&padata->list); |
| 402 | |
| 403 | padata->serial(padata); |
| 404 | cnt++; |
| 405 | } |
| 406 | local_bh_enable(); |
| 407 | |
| 408 | padata_put_pd_cnt(pd, cnt); |
| 409 | } |
| 410 | |
| 411 | /** |
| 412 | * padata_do_serial - padata serialization function |
| 413 | * |
| 414 | * @padata: object to be serialized. |
| 415 | * |
| 416 | * padata_do_serial must be called for every parallelized object. |
| 417 | * The serialization callback function will run with BHs off. |
| 418 | */ |
| 419 | void padata_do_serial(struct padata_priv *padata) |
| 420 | { |
| 421 | struct parallel_data *pd = padata->pd; |
| 422 | int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); |
| 423 | struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); |
| 424 | struct padata_priv *cur; |
| 425 | struct list_head *pos; |
| 426 | |
| 427 | spin_lock(&reorder->lock); |
| 428 | /* Sort in ascending order of sequence number. */ |
| 429 | list_for_each_prev(pos, &reorder->list) { |
| 430 | cur = list_entry(pos, struct padata_priv, list); |
| 431 | /* Compare by difference to consider integer wrap around */ |
| 432 | if ((signed int)(cur->seq_nr - padata->seq_nr) < 0) |
| 433 | break; |
| 434 | } |
| 435 | list_add(&padata->list, pos); |
| 436 | spin_unlock(&reorder->lock); |
| 437 | |
| 438 | /* |
| 439 | * Ensure the addition to the reorder list is ordered correctly |
| 440 | * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb |
| 441 | * in padata_reorder. |
| 442 | */ |
| 443 | smp_mb(); |
| 444 | |
| 445 | padata_reorder(pd); |
| 446 | } |
| 447 | EXPORT_SYMBOL(padata_do_serial); |
| 448 | |
| 449 | static int padata_setup_cpumasks(struct padata_instance *pinst) |
| 450 | { |
| 451 | struct workqueue_attrs *attrs; |
| 452 | int err; |
| 453 | |
| 454 | attrs = alloc_workqueue_attrs(); |
| 455 | if (!attrs) |
| 456 | return -ENOMEM; |
| 457 | |
| 458 | /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ |
| 459 | cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); |
| 460 | err = apply_workqueue_attrs(pinst->parallel_wq, attrs); |
| 461 | free_workqueue_attrs(attrs); |
| 462 | |
| 463 | return err; |
| 464 | } |
| 465 | |
| 466 | static void __init padata_mt_helper(struct work_struct *w) |
| 467 | { |
| 468 | struct padata_work *pw = container_of(w, struct padata_work, pw_work); |
| 469 | struct padata_mt_job_state *ps = pw->pw_data; |
| 470 | struct padata_mt_job *job = ps->job; |
| 471 | bool done; |
| 472 | |
| 473 | spin_lock(&ps->lock); |
| 474 | |
| 475 | while (job->size > 0) { |
| 476 | unsigned long start, size, end; |
| 477 | |
| 478 | start = job->start; |
| 479 | /* So end is chunk size aligned if enough work remains. */ |
| 480 | size = roundup(start + 1, ps->chunk_size) - start; |
| 481 | size = min(size, job->size); |
| 482 | end = start + size; |
| 483 | |
| 484 | job->start = end; |
| 485 | job->size -= size; |
| 486 | |
| 487 | spin_unlock(&ps->lock); |
| 488 | job->thread_fn(start, end, job->fn_arg); |
| 489 | spin_lock(&ps->lock); |
| 490 | } |
| 491 | |
| 492 | ++ps->nworks_fini; |
| 493 | done = (ps->nworks_fini == ps->nworks); |
| 494 | spin_unlock(&ps->lock); |
| 495 | |
| 496 | if (done) |
| 497 | complete(&ps->completion); |
| 498 | } |
| 499 | |
| 500 | /** |
| 501 | * padata_do_multithreaded - run a multithreaded job |
| 502 | * @job: Description of the job. |
| 503 | * |
| 504 | * See the definition of struct padata_mt_job for more details. |
| 505 | */ |
| 506 | void __init padata_do_multithreaded(struct padata_mt_job *job) |
| 507 | { |
| 508 | /* In case threads finish at different times. */ |
| 509 | static const unsigned long load_balance_factor = 4; |
| 510 | struct padata_work my_work, *pw; |
| 511 | struct padata_mt_job_state ps; |
| 512 | LIST_HEAD(works); |
| 513 | int nworks, nid; |
| 514 | static atomic_t last_used_nid __initdata; |
| 515 | |
| 516 | if (job->size == 0) |
| 517 | return; |
| 518 | |
| 519 | /* Ensure at least one thread when size < min_chunk. */ |
| 520 | nworks = max(job->size / max(job->min_chunk, job->align), 1ul); |
| 521 | nworks = min(nworks, job->max_threads); |
| 522 | |
| 523 | if (nworks == 1) { |
| 524 | /* Single thread, no coordination needed, cut to the chase. */ |
| 525 | job->thread_fn(job->start, job->start + job->size, job->fn_arg); |
| 526 | return; |
| 527 | } |
| 528 | |
| 529 | spin_lock_init(&ps.lock); |
| 530 | init_completion(&ps.completion); |
| 531 | ps.job = job; |
| 532 | ps.nworks = padata_work_alloc_mt(nworks, &ps, &works); |
| 533 | ps.nworks_fini = 0; |
| 534 | |
| 535 | /* |
| 536 | * Chunk size is the amount of work a helper does per call to the |
| 537 | * thread function. Load balance large jobs between threads by |
| 538 | * increasing the number of chunks, guarantee at least the minimum |
| 539 | * chunk size from the caller, and honor the caller's alignment. |
| 540 | * Ensure chunk_size is at least 1 to prevent divide-by-0 |
| 541 | * panic in padata_mt_helper(). |
| 542 | */ |
| 543 | ps.chunk_size = job->size / (ps.nworks * load_balance_factor); |
| 544 | ps.chunk_size = max(ps.chunk_size, job->min_chunk); |
| 545 | ps.chunk_size = max(ps.chunk_size, 1ul); |
| 546 | ps.chunk_size = roundup(ps.chunk_size, job->align); |
| 547 | |
| 548 | list_for_each_entry(pw, &works, pw_list) |
| 549 | if (job->numa_aware) { |
| 550 | int old_node = atomic_read(&last_used_nid); |
| 551 | |
| 552 | do { |
| 553 | nid = next_node_in(old_node, node_states[N_CPU]); |
| 554 | } while (!atomic_try_cmpxchg(&last_used_nid, &old_node, nid)); |
| 555 | queue_work_node(nid, system_unbound_wq, &pw->pw_work); |
| 556 | } else { |
| 557 | queue_work(system_unbound_wq, &pw->pw_work); |
| 558 | } |
| 559 | |
| 560 | /* Use the current thread, which saves starting a workqueue worker. */ |
| 561 | padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK); |
| 562 | padata_mt_helper(&my_work.pw_work); |
| 563 | |
| 564 | /* Wait for all the helpers to finish. */ |
| 565 | wait_for_completion(&ps.completion); |
| 566 | |
| 567 | destroy_work_on_stack(&my_work.pw_work); |
| 568 | padata_works_free(&works); |
| 569 | } |
| 570 | |
| 571 | static void __padata_list_init(struct padata_list *pd_list) |
| 572 | { |
| 573 | INIT_LIST_HEAD(&pd_list->list); |
| 574 | spin_lock_init(&pd_list->lock); |
| 575 | } |
| 576 | |
| 577 | /* Initialize all percpu queues used by serial workers */ |
| 578 | static void padata_init_squeues(struct parallel_data *pd) |
| 579 | { |
| 580 | int cpu; |
| 581 | struct padata_serial_queue *squeue; |
| 582 | |
| 583 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
| 584 | squeue = per_cpu_ptr(pd->squeue, cpu); |
| 585 | squeue->pd = pd; |
| 586 | __padata_list_init(&squeue->serial); |
| 587 | INIT_WORK(&squeue->work, padata_serial_worker); |
| 588 | } |
| 589 | } |
| 590 | |
| 591 | /* Initialize per-CPU reorder lists */ |
| 592 | static void padata_init_reorder_list(struct parallel_data *pd) |
| 593 | { |
| 594 | int cpu; |
| 595 | struct padata_list *list; |
| 596 | |
| 597 | for_each_cpu(cpu, pd->cpumask.pcpu) { |
| 598 | list = per_cpu_ptr(pd->reorder_list, cpu); |
| 599 | __padata_list_init(list); |
| 600 | } |
| 601 | } |
| 602 | |
| 603 | /* Allocate and initialize the internal cpumask dependend resources. */ |
| 604 | static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) |
| 605 | { |
| 606 | struct padata_instance *pinst = ps->pinst; |
| 607 | struct parallel_data *pd; |
| 608 | |
| 609 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); |
| 610 | if (!pd) |
| 611 | goto err; |
| 612 | |
| 613 | pd->reorder_list = alloc_percpu(struct padata_list); |
| 614 | if (!pd->reorder_list) |
| 615 | goto err_free_pd; |
| 616 | |
| 617 | pd->squeue = alloc_percpu(struct padata_serial_queue); |
| 618 | if (!pd->squeue) |
| 619 | goto err_free_reorder_list; |
| 620 | |
| 621 | pd->ps = ps; |
| 622 | |
| 623 | if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) |
| 624 | goto err_free_squeue; |
| 625 | if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) |
| 626 | goto err_free_pcpu; |
| 627 | |
| 628 | cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); |
| 629 | cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); |
| 630 | |
| 631 | padata_init_reorder_list(pd); |
| 632 | padata_init_squeues(pd); |
| 633 | pd->seq_nr = -1; |
| 634 | refcount_set(&pd->refcnt, 1); |
| 635 | spin_lock_init(&pd->lock); |
| 636 | pd->cpu = cpumask_first(pd->cpumask.pcpu); |
| 637 | INIT_WORK(&pd->reorder_work, invoke_padata_reorder); |
| 638 | |
| 639 | return pd; |
| 640 | |
| 641 | err_free_pcpu: |
| 642 | free_cpumask_var(pd->cpumask.pcpu); |
| 643 | err_free_squeue: |
| 644 | free_percpu(pd->squeue); |
| 645 | err_free_reorder_list: |
| 646 | free_percpu(pd->reorder_list); |
| 647 | err_free_pd: |
| 648 | kfree(pd); |
| 649 | err: |
| 650 | return NULL; |
| 651 | } |
| 652 | |
| 653 | static void padata_free_pd(struct parallel_data *pd) |
| 654 | { |
| 655 | free_cpumask_var(pd->cpumask.pcpu); |
| 656 | free_cpumask_var(pd->cpumask.cbcpu); |
| 657 | free_percpu(pd->reorder_list); |
| 658 | free_percpu(pd->squeue); |
| 659 | kfree(pd); |
| 660 | } |
| 661 | |
| 662 | static void __padata_start(struct padata_instance *pinst) |
| 663 | { |
| 664 | pinst->flags |= PADATA_INIT; |
| 665 | } |
| 666 | |
| 667 | static void __padata_stop(struct padata_instance *pinst) |
| 668 | { |
| 669 | if (!(pinst->flags & PADATA_INIT)) |
| 670 | return; |
| 671 | |
| 672 | pinst->flags &= ~PADATA_INIT; |
| 673 | |
| 674 | synchronize_rcu(); |
| 675 | } |
| 676 | |
| 677 | /* Replace the internal control structure with a new one. */ |
| 678 | static int padata_replace_one(struct padata_shell *ps) |
| 679 | { |
| 680 | struct parallel_data *pd_new; |
| 681 | |
| 682 | pd_new = padata_alloc_pd(ps); |
| 683 | if (!pd_new) |
| 684 | return -ENOMEM; |
| 685 | |
| 686 | ps->opd = rcu_dereference_protected(ps->pd, 1); |
| 687 | rcu_assign_pointer(ps->pd, pd_new); |
| 688 | |
| 689 | return 0; |
| 690 | } |
| 691 | |
| 692 | static int padata_replace(struct padata_instance *pinst) |
| 693 | { |
| 694 | struct padata_shell *ps; |
| 695 | int err = 0; |
| 696 | |
| 697 | pinst->flags |= PADATA_RESET; |
| 698 | |
| 699 | list_for_each_entry(ps, &pinst->pslist, list) { |
| 700 | err = padata_replace_one(ps); |
| 701 | if (err) |
| 702 | break; |
| 703 | } |
| 704 | |
| 705 | synchronize_rcu(); |
| 706 | |
| 707 | list_for_each_entry_continue_reverse(ps, &pinst->pslist, list) |
| 708 | padata_put_pd(ps->opd); |
| 709 | |
| 710 | pinst->flags &= ~PADATA_RESET; |
| 711 | |
| 712 | return err; |
| 713 | } |
| 714 | |
| 715 | /* If cpumask contains no active cpu, we mark the instance as invalid. */ |
| 716 | static bool padata_validate_cpumask(struct padata_instance *pinst, |
| 717 | const struct cpumask *cpumask) |
| 718 | { |
| 719 | if (!cpumask_intersects(cpumask, cpu_online_mask)) { |
| 720 | pinst->flags |= PADATA_INVALID; |
| 721 | return false; |
| 722 | } |
| 723 | |
| 724 | pinst->flags &= ~PADATA_INVALID; |
| 725 | return true; |
| 726 | } |
| 727 | |
| 728 | static int __padata_set_cpumasks(struct padata_instance *pinst, |
| 729 | cpumask_var_t pcpumask, |
| 730 | cpumask_var_t cbcpumask) |
| 731 | { |
| 732 | int valid; |
| 733 | int err; |
| 734 | |
| 735 | valid = padata_validate_cpumask(pinst, pcpumask); |
| 736 | if (!valid) { |
| 737 | __padata_stop(pinst); |
| 738 | goto out_replace; |
| 739 | } |
| 740 | |
| 741 | valid = padata_validate_cpumask(pinst, cbcpumask); |
| 742 | if (!valid) |
| 743 | __padata_stop(pinst); |
| 744 | |
| 745 | out_replace: |
| 746 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); |
| 747 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); |
| 748 | |
| 749 | err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst); |
| 750 | |
| 751 | if (valid) |
| 752 | __padata_start(pinst); |
| 753 | |
| 754 | return err; |
| 755 | } |
| 756 | |
| 757 | /** |
| 758 | * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value |
| 759 | * equivalent to @cpumask. |
| 760 | * @pinst: padata instance |
| 761 | * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding |
| 762 | * to parallel and serial cpumasks respectively. |
| 763 | * @cpumask: the cpumask to use |
| 764 | * |
| 765 | * Return: 0 on success or negative error code |
| 766 | */ |
| 767 | int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, |
| 768 | cpumask_var_t cpumask) |
| 769 | { |
| 770 | struct cpumask *serial_mask, *parallel_mask; |
| 771 | int err = -EINVAL; |
| 772 | |
| 773 | cpus_read_lock(); |
| 774 | mutex_lock(&pinst->lock); |
| 775 | |
| 776 | switch (cpumask_type) { |
| 777 | case PADATA_CPU_PARALLEL: |
| 778 | serial_mask = pinst->cpumask.cbcpu; |
| 779 | parallel_mask = cpumask; |
| 780 | break; |
| 781 | case PADATA_CPU_SERIAL: |
| 782 | parallel_mask = pinst->cpumask.pcpu; |
| 783 | serial_mask = cpumask; |
| 784 | break; |
| 785 | default: |
| 786 | goto out; |
| 787 | } |
| 788 | |
| 789 | err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); |
| 790 | |
| 791 | out: |
| 792 | mutex_unlock(&pinst->lock); |
| 793 | cpus_read_unlock(); |
| 794 | |
| 795 | return err; |
| 796 | } |
| 797 | EXPORT_SYMBOL(padata_set_cpumask); |
| 798 | |
| 799 | #ifdef CONFIG_HOTPLUG_CPU |
| 800 | |
| 801 | static int __padata_add_cpu(struct padata_instance *pinst, int cpu) |
| 802 | { |
| 803 | int err = 0; |
| 804 | |
| 805 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
| 806 | err = padata_replace(pinst); |
| 807 | |
| 808 | if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && |
| 809 | padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) |
| 810 | __padata_start(pinst); |
| 811 | } |
| 812 | |
| 813 | return err; |
| 814 | } |
| 815 | |
| 816 | static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) |
| 817 | { |
| 818 | int err = 0; |
| 819 | |
| 820 | if (!cpumask_test_cpu(cpu, cpu_online_mask)) { |
| 821 | if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || |
| 822 | !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) |
| 823 | __padata_stop(pinst); |
| 824 | |
| 825 | err = padata_replace(pinst); |
| 826 | } |
| 827 | |
| 828 | return err; |
| 829 | } |
| 830 | |
| 831 | static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) |
| 832 | { |
| 833 | return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || |
| 834 | cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); |
| 835 | } |
| 836 | |
| 837 | static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) |
| 838 | { |
| 839 | struct padata_instance *pinst; |
| 840 | int ret; |
| 841 | |
| 842 | pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node); |
| 843 | if (!pinst_has_cpu(pinst, cpu)) |
| 844 | return 0; |
| 845 | |
| 846 | mutex_lock(&pinst->lock); |
| 847 | ret = __padata_add_cpu(pinst, cpu); |
| 848 | mutex_unlock(&pinst->lock); |
| 849 | return ret; |
| 850 | } |
| 851 | |
| 852 | static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) |
| 853 | { |
| 854 | struct padata_instance *pinst; |
| 855 | int ret; |
| 856 | |
| 857 | pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node); |
| 858 | if (!pinst_has_cpu(pinst, cpu)) |
| 859 | return 0; |
| 860 | |
| 861 | mutex_lock(&pinst->lock); |
| 862 | ret = __padata_remove_cpu(pinst, cpu); |
| 863 | mutex_unlock(&pinst->lock); |
| 864 | return ret; |
| 865 | } |
| 866 | |
| 867 | static enum cpuhp_state hp_online; |
| 868 | #endif |
| 869 | |
| 870 | static void __padata_free(struct padata_instance *pinst) |
| 871 | { |
| 872 | #ifdef CONFIG_HOTPLUG_CPU |
| 873 | cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, |
| 874 | &pinst->cpu_dead_node); |
| 875 | cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node); |
| 876 | #endif |
| 877 | |
| 878 | WARN_ON(!list_empty(&pinst->pslist)); |
| 879 | |
| 880 | free_cpumask_var(pinst->cpumask.pcpu); |
| 881 | free_cpumask_var(pinst->cpumask.cbcpu); |
| 882 | destroy_workqueue(pinst->serial_wq); |
| 883 | destroy_workqueue(pinst->parallel_wq); |
| 884 | kfree(pinst); |
| 885 | } |
| 886 | |
| 887 | #define kobj2pinst(_kobj) \ |
| 888 | container_of(_kobj, struct padata_instance, kobj) |
| 889 | #define attr2pentry(_attr) \ |
| 890 | container_of(_attr, struct padata_sysfs_entry, attr) |
| 891 | |
| 892 | static void padata_sysfs_release(struct kobject *kobj) |
| 893 | { |
| 894 | struct padata_instance *pinst = kobj2pinst(kobj); |
| 895 | __padata_free(pinst); |
| 896 | } |
| 897 | |
| 898 | struct padata_sysfs_entry { |
| 899 | struct attribute attr; |
| 900 | ssize_t (*show)(struct padata_instance *, struct attribute *, char *); |
| 901 | ssize_t (*store)(struct padata_instance *, struct attribute *, |
| 902 | const char *, size_t); |
| 903 | }; |
| 904 | |
| 905 | static ssize_t show_cpumask(struct padata_instance *pinst, |
| 906 | struct attribute *attr, char *buf) |
| 907 | { |
| 908 | struct cpumask *cpumask; |
| 909 | ssize_t len; |
| 910 | |
| 911 | mutex_lock(&pinst->lock); |
| 912 | if (!strcmp(attr->name, "serial_cpumask")) |
| 913 | cpumask = pinst->cpumask.cbcpu; |
| 914 | else |
| 915 | cpumask = pinst->cpumask.pcpu; |
| 916 | |
| 917 | len = snprintf(buf, PAGE_SIZE, "%*pb\n", |
| 918 | nr_cpu_ids, cpumask_bits(cpumask)); |
| 919 | mutex_unlock(&pinst->lock); |
| 920 | return len < PAGE_SIZE ? len : -EINVAL; |
| 921 | } |
| 922 | |
| 923 | static ssize_t store_cpumask(struct padata_instance *pinst, |
| 924 | struct attribute *attr, |
| 925 | const char *buf, size_t count) |
| 926 | { |
| 927 | cpumask_var_t new_cpumask; |
| 928 | ssize_t ret; |
| 929 | int mask_type; |
| 930 | |
| 931 | if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) |
| 932 | return -ENOMEM; |
| 933 | |
| 934 | ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), |
| 935 | nr_cpumask_bits); |
| 936 | if (ret < 0) |
| 937 | goto out; |
| 938 | |
| 939 | mask_type = !strcmp(attr->name, "serial_cpumask") ? |
| 940 | PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; |
| 941 | ret = padata_set_cpumask(pinst, mask_type, new_cpumask); |
| 942 | if (!ret) |
| 943 | ret = count; |
| 944 | |
| 945 | out: |
| 946 | free_cpumask_var(new_cpumask); |
| 947 | return ret; |
| 948 | } |
| 949 | |
| 950 | #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ |
| 951 | static struct padata_sysfs_entry _name##_attr = \ |
| 952 | __ATTR(_name, 0644, _show_name, _store_name) |
| 953 | #define PADATA_ATTR_RO(_name, _show_name) \ |
| 954 | static struct padata_sysfs_entry _name##_attr = \ |
| 955 | __ATTR(_name, 0400, _show_name, NULL) |
| 956 | |
| 957 | PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); |
| 958 | PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); |
| 959 | |
| 960 | /* |
| 961 | * Padata sysfs provides the following objects: |
| 962 | * serial_cpumask [RW] - cpumask for serial workers |
| 963 | * parallel_cpumask [RW] - cpumask for parallel workers |
| 964 | */ |
| 965 | static struct attribute *padata_default_attrs[] = { |
| 966 | &serial_cpumask_attr.attr, |
| 967 | ¶llel_cpumask_attr.attr, |
| 968 | NULL, |
| 969 | }; |
| 970 | ATTRIBUTE_GROUPS(padata_default); |
| 971 | |
| 972 | static ssize_t padata_sysfs_show(struct kobject *kobj, |
| 973 | struct attribute *attr, char *buf) |
| 974 | { |
| 975 | struct padata_instance *pinst; |
| 976 | struct padata_sysfs_entry *pentry; |
| 977 | ssize_t ret = -EIO; |
| 978 | |
| 979 | pinst = kobj2pinst(kobj); |
| 980 | pentry = attr2pentry(attr); |
| 981 | if (pentry->show) |
| 982 | ret = pentry->show(pinst, attr, buf); |
| 983 | |
| 984 | return ret; |
| 985 | } |
| 986 | |
| 987 | static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, |
| 988 | const char *buf, size_t count) |
| 989 | { |
| 990 | struct padata_instance *pinst; |
| 991 | struct padata_sysfs_entry *pentry; |
| 992 | ssize_t ret = -EIO; |
| 993 | |
| 994 | pinst = kobj2pinst(kobj); |
| 995 | pentry = attr2pentry(attr); |
| 996 | if (pentry->store) |
| 997 | ret = pentry->store(pinst, attr, buf, count); |
| 998 | |
| 999 | return ret; |
| 1000 | } |
| 1001 | |
| 1002 | static const struct sysfs_ops padata_sysfs_ops = { |
| 1003 | .show = padata_sysfs_show, |
| 1004 | .store = padata_sysfs_store, |
| 1005 | }; |
| 1006 | |
| 1007 | static const struct kobj_type padata_attr_type = { |
| 1008 | .sysfs_ops = &padata_sysfs_ops, |
| 1009 | .default_groups = padata_default_groups, |
| 1010 | .release = padata_sysfs_release, |
| 1011 | }; |
| 1012 | |
| 1013 | /** |
| 1014 | * padata_alloc - allocate and initialize a padata instance |
| 1015 | * @name: used to identify the instance |
| 1016 | * |
| 1017 | * Return: new instance on success, NULL on error |
| 1018 | */ |
| 1019 | struct padata_instance *padata_alloc(const char *name) |
| 1020 | { |
| 1021 | struct padata_instance *pinst; |
| 1022 | |
| 1023 | pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); |
| 1024 | if (!pinst) |
| 1025 | goto err; |
| 1026 | |
| 1027 | pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0, |
| 1028 | name); |
| 1029 | if (!pinst->parallel_wq) |
| 1030 | goto err_free_inst; |
| 1031 | |
| 1032 | cpus_read_lock(); |
| 1033 | |
| 1034 | pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM | |
| 1035 | WQ_CPU_INTENSIVE, 1, name); |
| 1036 | if (!pinst->serial_wq) |
| 1037 | goto err_put_cpus; |
| 1038 | |
| 1039 | if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) |
| 1040 | goto err_free_serial_wq; |
| 1041 | if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { |
| 1042 | free_cpumask_var(pinst->cpumask.pcpu); |
| 1043 | goto err_free_serial_wq; |
| 1044 | } |
| 1045 | |
| 1046 | INIT_LIST_HEAD(&pinst->pslist); |
| 1047 | |
| 1048 | cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask); |
| 1049 | cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask); |
| 1050 | |
| 1051 | if (padata_setup_cpumasks(pinst)) |
| 1052 | goto err_free_masks; |
| 1053 | |
| 1054 | __padata_start(pinst); |
| 1055 | |
| 1056 | kobject_init(&pinst->kobj, &padata_attr_type); |
| 1057 | mutex_init(&pinst->lock); |
| 1058 | |
| 1059 | #ifdef CONFIG_HOTPLUG_CPU |
| 1060 | cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, |
| 1061 | &pinst->cpu_online_node); |
| 1062 | cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, |
| 1063 | &pinst->cpu_dead_node); |
| 1064 | #endif |
| 1065 | |
| 1066 | cpus_read_unlock(); |
| 1067 | |
| 1068 | return pinst; |
| 1069 | |
| 1070 | err_free_masks: |
| 1071 | free_cpumask_var(pinst->cpumask.pcpu); |
| 1072 | free_cpumask_var(pinst->cpumask.cbcpu); |
| 1073 | err_free_serial_wq: |
| 1074 | destroy_workqueue(pinst->serial_wq); |
| 1075 | err_put_cpus: |
| 1076 | cpus_read_unlock(); |
| 1077 | destroy_workqueue(pinst->parallel_wq); |
| 1078 | err_free_inst: |
| 1079 | kfree(pinst); |
| 1080 | err: |
| 1081 | return NULL; |
| 1082 | } |
| 1083 | EXPORT_SYMBOL(padata_alloc); |
| 1084 | |
| 1085 | /** |
| 1086 | * padata_free - free a padata instance |
| 1087 | * |
| 1088 | * @pinst: padata instance to free |
| 1089 | */ |
| 1090 | void padata_free(struct padata_instance *pinst) |
| 1091 | { |
| 1092 | kobject_put(&pinst->kobj); |
| 1093 | } |
| 1094 | EXPORT_SYMBOL(padata_free); |
| 1095 | |
| 1096 | /** |
| 1097 | * padata_alloc_shell - Allocate and initialize padata shell. |
| 1098 | * |
| 1099 | * @pinst: Parent padata_instance object. |
| 1100 | * |
| 1101 | * Return: new shell on success, NULL on error |
| 1102 | */ |
| 1103 | struct padata_shell *padata_alloc_shell(struct padata_instance *pinst) |
| 1104 | { |
| 1105 | struct parallel_data *pd; |
| 1106 | struct padata_shell *ps; |
| 1107 | |
| 1108 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); |
| 1109 | if (!ps) |
| 1110 | goto out; |
| 1111 | |
| 1112 | ps->pinst = pinst; |
| 1113 | |
| 1114 | cpus_read_lock(); |
| 1115 | pd = padata_alloc_pd(ps); |
| 1116 | cpus_read_unlock(); |
| 1117 | |
| 1118 | if (!pd) |
| 1119 | goto out_free_ps; |
| 1120 | |
| 1121 | mutex_lock(&pinst->lock); |
| 1122 | RCU_INIT_POINTER(ps->pd, pd); |
| 1123 | list_add(&ps->list, &pinst->pslist); |
| 1124 | mutex_unlock(&pinst->lock); |
| 1125 | |
| 1126 | return ps; |
| 1127 | |
| 1128 | out_free_ps: |
| 1129 | kfree(ps); |
| 1130 | out: |
| 1131 | return NULL; |
| 1132 | } |
| 1133 | EXPORT_SYMBOL(padata_alloc_shell); |
| 1134 | |
| 1135 | /** |
| 1136 | * padata_free_shell - free a padata shell |
| 1137 | * |
| 1138 | * @ps: padata shell to free |
| 1139 | */ |
| 1140 | void padata_free_shell(struct padata_shell *ps) |
| 1141 | { |
| 1142 | struct parallel_data *pd; |
| 1143 | |
| 1144 | if (!ps) |
| 1145 | return; |
| 1146 | |
| 1147 | /* |
| 1148 | * Wait for all _do_serial calls to finish to avoid touching |
| 1149 | * freed pd's and ps's. |
| 1150 | */ |
| 1151 | synchronize_rcu(); |
| 1152 | |
| 1153 | mutex_lock(&ps->pinst->lock); |
| 1154 | list_del(&ps->list); |
| 1155 | pd = rcu_dereference_protected(ps->pd, 1); |
| 1156 | padata_put_pd(pd); |
| 1157 | mutex_unlock(&ps->pinst->lock); |
| 1158 | |
| 1159 | kfree(ps); |
| 1160 | } |
| 1161 | EXPORT_SYMBOL(padata_free_shell); |
| 1162 | |
| 1163 | void __init padata_init(void) |
| 1164 | { |
| 1165 | unsigned int i, possible_cpus; |
| 1166 | #ifdef CONFIG_HOTPLUG_CPU |
| 1167 | int ret; |
| 1168 | |
| 1169 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", |
| 1170 | padata_cpu_online, NULL); |
| 1171 | if (ret < 0) |
| 1172 | goto err; |
| 1173 | hp_online = ret; |
| 1174 | |
| 1175 | ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead", |
| 1176 | NULL, padata_cpu_dead); |
| 1177 | if (ret < 0) |
| 1178 | goto remove_online_state; |
| 1179 | #endif |
| 1180 | |
| 1181 | possible_cpus = num_possible_cpus(); |
| 1182 | padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work), |
| 1183 | GFP_KERNEL); |
| 1184 | if (!padata_works) |
| 1185 | goto remove_dead_state; |
| 1186 | |
| 1187 | for (i = 0; i < possible_cpus; ++i) |
| 1188 | list_add(&padata_works[i].pw_list, &padata_free_works); |
| 1189 | |
| 1190 | return; |
| 1191 | |
| 1192 | remove_dead_state: |
| 1193 | #ifdef CONFIG_HOTPLUG_CPU |
| 1194 | cpuhp_remove_multi_state(CPUHP_PADATA_DEAD); |
| 1195 | remove_online_state: |
| 1196 | cpuhp_remove_multi_state(hp_online); |
| 1197 | err: |
| 1198 | #endif |
| 1199 | pr_warn("padata: initialization failed\n"); |
| 1200 | } |