Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * pSeries_lpar.c | |
4 | * Copyright (C) 2001 Todd Inglett, IBM Corporation | |
5 | * | |
6 | * pSeries LPAR support. | |
1da177e4 LT |
7 | */ |
8 | ||
f7ebf352 ME |
9 | /* Enables debugging of low-level hash table routines - careful! */ |
10 | #undef DEBUG | |
65471d76 | 11 | #define pr_fmt(fmt) "lpar: " fmt |
1da177e4 | 12 | |
1da177e4 LT |
13 | #include <linux/kernel.h> |
14 | #include <linux/dma-mapping.h> | |
463ce0e1 | 15 | #include <linux/console.h> |
66b15db6 | 16 | #include <linux/export.h> |
58995a9a | 17 | #include <linux/jump_label.h> |
dbcf929c DG |
18 | #include <linux/delay.h> |
19 | #include <linux/stop_machine.h> | |
d62c8dee NR |
20 | #include <linux/spinlock.h> |
21 | #include <linux/cpuhotplug.h> | |
22 | #include <linux/workqueue.h> | |
23 | #include <linux/proc_fs.h> | |
1da177e4 LT |
24 | #include <asm/processor.h> |
25 | #include <asm/mmu.h> | |
26 | #include <asm/page.h> | |
27 | #include <asm/pgtable.h> | |
28 | #include <asm/machdep.h> | |
1da177e4 | 29 | #include <asm/mmu_context.h> |
1da177e4 | 30 | #include <asm/iommu.h> |
1da177e4 LT |
31 | #include <asm/tlb.h> |
32 | #include <asm/prom.h> | |
1da177e4 | 33 | #include <asm/cputable.h> |
dcad47fc | 34 | #include <asm/udbg.h> |
2249ca9d | 35 | #include <asm/smp.h> |
c8cd093a | 36 | #include <asm/trace.h> |
f5339277 | 37 | #include <asm/firmware.h> |
212bebb4 | 38 | #include <asm/plpar_wrappers.h> |
c1caae3d | 39 | #include <asm/kexec.h> |
408cddd9 | 40 | #include <asm/fadump.h> |
42f5b4ca | 41 | #include <asm/asm-prototypes.h> |
c6c26fb5 | 42 | #include <asm/debugfs.h> |
a1218720 | 43 | |
21cf9133 | 44 | #include "pseries.h" |
1da177e4 | 45 | |
1a527286 AK |
46 | /* Flag bits for H_BULK_REMOVE */ |
47 | #define HBR_REQUEST 0x4000000000000000UL | |
48 | #define HBR_RESPONSE 0x8000000000000000UL | |
49 | #define HBR_END 0xc000000000000000UL | |
50 | #define HBR_AVPN 0x0200000000000000UL | |
51 | #define HBR_ANDCOND 0x0100000000000000UL | |
52 | ||
1da177e4 | 53 | |
b9377ffc | 54 | /* in hvCall.S */ |
1da177e4 | 55 | EXPORT_SYMBOL(plpar_hcall); |
b9377ffc | 56 | EXPORT_SYMBOL(plpar_hcall9); |
1da177e4 | 57 | EXPORT_SYMBOL(plpar_hcall_norets); |
b9377ffc | 58 | |
1211ee61 LD |
59 | /* |
60 | * H_BLOCK_REMOVE supported block size for this page size in segment who's base | |
61 | * page size is that page size. | |
62 | * | |
63 | * The first index is the segment base page size, the second one is the actual | |
64 | * page size. | |
65 | */ | |
66 | static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init; | |
67 | ||
59545ebe LD |
68 | /* |
69 | * Due to the involved complexity, and that the current hypervisor is only | |
70 | * returning this value or 0, we are limiting the support of the H_BLOCK_REMOVE | |
71 | * buffer size to 8 size block. | |
72 | */ | |
73 | #define HBLKRM_SUPPORTED_BLOCK_SIZE 8 | |
74 | ||
d62c8dee NR |
75 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
76 | static u8 dtl_mask = DTL_LOG_PREEMPT; | |
77 | #else | |
78 | static u8 dtl_mask; | |
79 | #endif | |
80 | ||
18a593c8 | 81 | void alloc_dtl_buffers(unsigned long *time_limit) |
1c85a2a1 NR |
82 | { |
83 | int cpu; | |
84 | struct paca_struct *pp; | |
85 | struct dtl_entry *dtl; | |
86 | ||
87 | for_each_possible_cpu(cpu) { | |
88 | pp = paca_ptrs[cpu]; | |
d62c8dee NR |
89 | if (pp->dispatch_log) |
90 | continue; | |
1c85a2a1 NR |
91 | dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); |
92 | if (!dtl) { | |
93 | pr_warn("Failed to allocate dispatch trace log for cpu %d\n", | |
94 | cpu); | |
d62c8dee | 95 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
1c85a2a1 | 96 | pr_warn("Stolen time statistics will be unreliable\n"); |
d62c8dee | 97 | #endif |
1c85a2a1 NR |
98 | break; |
99 | } | |
100 | ||
101 | pp->dtl_ridx = 0; | |
102 | pp->dispatch_log = dtl; | |
103 | pp->dispatch_log_end = dtl + N_DISPATCH_LOG; | |
104 | pp->dtl_curr = dtl; | |
18a593c8 NR |
105 | |
106 | if (time_limit && time_after(jiffies, *time_limit)) { | |
107 | cond_resched(); | |
108 | *time_limit = jiffies + HZ; | |
109 | } | |
1c85a2a1 NR |
110 | } |
111 | } | |
112 | ||
113 | void register_dtl_buffer(int cpu) | |
114 | { | |
115 | long ret; | |
116 | struct paca_struct *pp; | |
117 | struct dtl_entry *dtl; | |
118 | int hwcpu = get_hard_smp_processor_id(cpu); | |
119 | ||
120 | pp = paca_ptrs[cpu]; | |
121 | dtl = pp->dispatch_log; | |
d62c8dee | 122 | if (dtl && dtl_mask) { |
1c85a2a1 NR |
123 | pp->dtl_ridx = 0; |
124 | pp->dtl_curr = dtl; | |
125 | lppaca_of(cpu).dtl_idx = 0; | |
126 | ||
127 | /* hypervisor reads buffer length from this field */ | |
128 | dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); | |
129 | ret = register_dtl(hwcpu, __pa(dtl)); | |
130 | if (ret) | |
131 | pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n", | |
132 | cpu, hwcpu, ret); | |
133 | ||
d62c8dee | 134 | lppaca_of(cpu).dtl_enable_mask = dtl_mask; |
1c85a2a1 NR |
135 | } |
136 | } | |
137 | ||
06220d78 | 138 | #ifdef CONFIG_PPC_SPLPAR |
d62c8dee NR |
139 | struct dtl_worker { |
140 | struct delayed_work work; | |
141 | int cpu; | |
142 | }; | |
143 | ||
144 | struct vcpu_dispatch_data { | |
145 | int last_disp_cpu; | |
146 | ||
147 | int total_disp; | |
148 | ||
149 | int same_cpu_disp; | |
150 | int same_chip_disp; | |
151 | int diff_chip_disp; | |
152 | int far_chip_disp; | |
153 | ||
154 | int numa_home_disp; | |
155 | int numa_remote_disp; | |
156 | int numa_far_disp; | |
157 | }; | |
158 | ||
159 | /* | |
160 | * This represents the number of cpus in the hypervisor. Since there is no | |
161 | * architected way to discover the number of processors in the host, we | |
162 | * provision for dealing with NR_CPUS. This is currently 2048 by default, and | |
163 | * is sufficient for our purposes. This will need to be tweaked if | |
164 | * CONFIG_NR_CPUS is changed. | |
165 | */ | |
166 | #define NR_CPUS_H NR_CPUS | |
167 | ||
06220d78 | 168 | DEFINE_RWLOCK(dtl_access_lock); |
d62c8dee NR |
169 | static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data); |
170 | static DEFINE_PER_CPU(u64, dtl_entry_ridx); | |
171 | static DEFINE_PER_CPU(struct dtl_worker, dtl_workers); | |
172 | static enum cpuhp_state dtl_worker_state; | |
173 | static DEFINE_MUTEX(dtl_enable_mutex); | |
174 | static int vcpudispatch_stats_on __read_mostly; | |
175 | static int vcpudispatch_stats_freq = 50; | |
176 | static __be32 *vcpu_associativity, *pcpu_associativity; | |
177 | ||
178 | ||
18a593c8 | 179 | static void free_dtl_buffers(unsigned long *time_limit) |
d62c8dee NR |
180 | { |
181 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
182 | int cpu; | |
183 | struct paca_struct *pp; | |
184 | ||
185 | for_each_possible_cpu(cpu) { | |
186 | pp = paca_ptrs[cpu]; | |
187 | if (!pp->dispatch_log) | |
188 | continue; | |
189 | kmem_cache_free(dtl_cache, pp->dispatch_log); | |
190 | pp->dtl_ridx = 0; | |
191 | pp->dispatch_log = 0; | |
192 | pp->dispatch_log_end = 0; | |
193 | pp->dtl_curr = 0; | |
18a593c8 NR |
194 | |
195 | if (time_limit && time_after(jiffies, *time_limit)) { | |
196 | cond_resched(); | |
197 | *time_limit = jiffies + HZ; | |
198 | } | |
d62c8dee NR |
199 | } |
200 | #endif | |
201 | } | |
202 | ||
203 | static int init_cpu_associativity(void) | |
204 | { | |
205 | vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core, | |
206 | VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL); | |
207 | pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core, | |
208 | VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL); | |
209 | ||
210 | if (!vcpu_associativity || !pcpu_associativity) { | |
211 | pr_err("error allocating memory for associativity information\n"); | |
212 | return -ENOMEM; | |
213 | } | |
214 | ||
215 | return 0; | |
216 | } | |
217 | ||
218 | static void destroy_cpu_associativity(void) | |
219 | { | |
220 | kfree(vcpu_associativity); | |
221 | kfree(pcpu_associativity); | |
222 | vcpu_associativity = pcpu_associativity = 0; | |
223 | } | |
224 | ||
225 | static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag) | |
226 | { | |
227 | __be32 *assoc; | |
228 | int rc = 0; | |
229 | ||
230 | assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE]; | |
231 | if (!assoc[0]) { | |
232 | rc = hcall_vphn(cpu, flag, &assoc[0]); | |
233 | if (rc) | |
234 | return NULL; | |
235 | } | |
236 | ||
237 | return assoc; | |
238 | } | |
239 | ||
240 | static __be32 *get_pcpu_associativity(int cpu) | |
241 | { | |
242 | return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU); | |
243 | } | |
244 | ||
245 | static __be32 *get_vcpu_associativity(int cpu) | |
246 | { | |
247 | return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU); | |
248 | } | |
249 | ||
250 | static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu) | |
251 | { | |
252 | __be32 *last_disp_cpu_assoc, *cur_disp_cpu_assoc; | |
253 | ||
254 | if (last_disp_cpu >= NR_CPUS_H || cur_disp_cpu >= NR_CPUS_H) | |
255 | return -EINVAL; | |
256 | ||
257 | last_disp_cpu_assoc = get_pcpu_associativity(last_disp_cpu); | |
258 | cur_disp_cpu_assoc = get_pcpu_associativity(cur_disp_cpu); | |
259 | ||
260 | if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc) | |
261 | return -EIO; | |
262 | ||
263 | return cpu_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc); | |
264 | } | |
265 | ||
266 | static int cpu_home_node_dispatch_distance(int disp_cpu) | |
267 | { | |
268 | __be32 *disp_cpu_assoc, *vcpu_assoc; | |
269 | int vcpu_id = smp_processor_id(); | |
270 | ||
271 | if (disp_cpu >= NR_CPUS_H) { | |
272 | pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n", | |
273 | disp_cpu, NR_CPUS_H); | |
274 | return -EINVAL; | |
275 | } | |
276 | ||
277 | disp_cpu_assoc = get_pcpu_associativity(disp_cpu); | |
278 | vcpu_assoc = get_vcpu_associativity(vcpu_id); | |
279 | ||
280 | if (!disp_cpu_assoc || !vcpu_assoc) | |
281 | return -EIO; | |
282 | ||
283 | return cpu_distance(disp_cpu_assoc, vcpu_assoc); | |
284 | } | |
285 | ||
286 | static void update_vcpu_disp_stat(int disp_cpu) | |
287 | { | |
288 | struct vcpu_dispatch_data *disp; | |
289 | int distance; | |
290 | ||
291 | disp = this_cpu_ptr(&vcpu_disp_data); | |
292 | if (disp->last_disp_cpu == -1) { | |
293 | disp->last_disp_cpu = disp_cpu; | |
294 | return; | |
295 | } | |
296 | ||
297 | disp->total_disp++; | |
298 | ||
299 | if (disp->last_disp_cpu == disp_cpu || | |
300 | (cpu_first_thread_sibling(disp->last_disp_cpu) == | |
301 | cpu_first_thread_sibling(disp_cpu))) | |
302 | disp->same_cpu_disp++; | |
303 | else { | |
304 | distance = cpu_relative_dispatch_distance(disp->last_disp_cpu, | |
305 | disp_cpu); | |
306 | if (distance < 0) | |
307 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n", | |
308 | smp_processor_id()); | |
309 | else { | |
310 | switch (distance) { | |
311 | case 0: | |
312 | disp->same_chip_disp++; | |
313 | break; | |
314 | case 1: | |
315 | disp->diff_chip_disp++; | |
316 | break; | |
317 | case 2: | |
318 | disp->far_chip_disp++; | |
319 | break; | |
320 | default: | |
321 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n", | |
322 | smp_processor_id(), | |
323 | disp->last_disp_cpu, | |
324 | disp_cpu, | |
325 | distance); | |
326 | } | |
327 | } | |
328 | } | |
329 | ||
330 | distance = cpu_home_node_dispatch_distance(disp_cpu); | |
331 | if (distance < 0) | |
332 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n", | |
333 | smp_processor_id()); | |
334 | else { | |
335 | switch (distance) { | |
336 | case 0: | |
337 | disp->numa_home_disp++; | |
338 | break; | |
339 | case 1: | |
340 | disp->numa_remote_disp++; | |
341 | break; | |
342 | case 2: | |
343 | disp->numa_far_disp++; | |
344 | break; | |
345 | default: | |
346 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n", | |
347 | smp_processor_id(), | |
348 | disp_cpu, | |
349 | distance); | |
350 | } | |
351 | } | |
352 | ||
353 | disp->last_disp_cpu = disp_cpu; | |
354 | } | |
355 | ||
356 | static void process_dtl_buffer(struct work_struct *work) | |
357 | { | |
358 | struct dtl_entry dtle; | |
359 | u64 i = __this_cpu_read(dtl_entry_ridx); | |
360 | struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | |
361 | struct dtl_entry *dtl_end = local_paca->dispatch_log_end; | |
362 | struct lppaca *vpa = local_paca->lppaca_ptr; | |
363 | struct dtl_worker *d = container_of(work, struct dtl_worker, work.work); | |
364 | ||
365 | if (!local_paca->dispatch_log) | |
366 | return; | |
367 | ||
368 | /* if we have been migrated away, we cancel ourself */ | |
369 | if (d->cpu != smp_processor_id()) { | |
370 | pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n", | |
371 | smp_processor_id()); | |
372 | return; | |
373 | } | |
374 | ||
375 | if (i == be64_to_cpu(vpa->dtl_idx)) | |
376 | goto out; | |
377 | ||
378 | while (i < be64_to_cpu(vpa->dtl_idx)) { | |
379 | dtle = *dtl; | |
380 | barrier(); | |
381 | if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { | |
382 | /* buffer has overflowed */ | |
383 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n", | |
384 | d->cpu, | |
385 | be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG - i); | |
386 | i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; | |
387 | dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | |
388 | continue; | |
389 | } | |
390 | update_vcpu_disp_stat(be16_to_cpu(dtle.processor_id)); | |
391 | ++i; | |
392 | ++dtl; | |
393 | if (dtl == dtl_end) | |
394 | dtl = local_paca->dispatch_log; | |
395 | } | |
396 | ||
397 | __this_cpu_write(dtl_entry_ridx, i); | |
398 | ||
399 | out: | |
400 | schedule_delayed_work_on(d->cpu, to_delayed_work(work), | |
401 | HZ / vcpudispatch_stats_freq); | |
402 | } | |
403 | ||
404 | static int dtl_worker_online(unsigned int cpu) | |
405 | { | |
406 | struct dtl_worker *d = &per_cpu(dtl_workers, cpu); | |
407 | ||
408 | memset(d, 0, sizeof(*d)); | |
409 | INIT_DELAYED_WORK(&d->work, process_dtl_buffer); | |
410 | d->cpu = cpu; | |
411 | ||
412 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
413 | per_cpu(dtl_entry_ridx, cpu) = 0; | |
414 | register_dtl_buffer(cpu); | |
415 | #else | |
416 | per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx); | |
417 | #endif | |
418 | ||
419 | schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq); | |
420 | return 0; | |
421 | } | |
422 | ||
423 | static int dtl_worker_offline(unsigned int cpu) | |
424 | { | |
425 | struct dtl_worker *d = &per_cpu(dtl_workers, cpu); | |
426 | ||
427 | cancel_delayed_work_sync(&d->work); | |
428 | ||
429 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
430 | unregister_dtl(get_hard_smp_processor_id(cpu)); | |
431 | #endif | |
432 | ||
433 | return 0; | |
434 | } | |
435 | ||
436 | static void set_global_dtl_mask(u8 mask) | |
437 | { | |
438 | int cpu; | |
439 | ||
440 | dtl_mask = mask; | |
441 | for_each_present_cpu(cpu) | |
442 | lppaca_of(cpu).dtl_enable_mask = dtl_mask; | |
443 | } | |
444 | ||
445 | static void reset_global_dtl_mask(void) | |
446 | { | |
447 | int cpu; | |
448 | ||
449 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
450 | dtl_mask = DTL_LOG_PREEMPT; | |
451 | #else | |
452 | dtl_mask = 0; | |
453 | #endif | |
454 | for_each_present_cpu(cpu) | |
455 | lppaca_of(cpu).dtl_enable_mask = dtl_mask; | |
456 | } | |
457 | ||
18a593c8 | 458 | static int dtl_worker_enable(unsigned long *time_limit) |
d62c8dee NR |
459 | { |
460 | int rc = 0, state; | |
461 | ||
462 | if (!write_trylock(&dtl_access_lock)) { | |
463 | rc = -EBUSY; | |
464 | goto out; | |
465 | } | |
466 | ||
467 | set_global_dtl_mask(DTL_LOG_ALL); | |
468 | ||
469 | /* Setup dtl buffers and register those */ | |
18a593c8 | 470 | alloc_dtl_buffers(time_limit); |
d62c8dee NR |
471 | |
472 | state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online", | |
473 | dtl_worker_online, dtl_worker_offline); | |
474 | if (state < 0) { | |
475 | pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n"); | |
18a593c8 | 476 | free_dtl_buffers(time_limit); |
d62c8dee NR |
477 | reset_global_dtl_mask(); |
478 | write_unlock(&dtl_access_lock); | |
479 | rc = -EINVAL; | |
480 | goto out; | |
481 | } | |
482 | dtl_worker_state = state; | |
483 | ||
484 | out: | |
485 | return rc; | |
486 | } | |
487 | ||
18a593c8 | 488 | static void dtl_worker_disable(unsigned long *time_limit) |
d62c8dee NR |
489 | { |
490 | cpuhp_remove_state(dtl_worker_state); | |
18a593c8 | 491 | free_dtl_buffers(time_limit); |
d62c8dee NR |
492 | reset_global_dtl_mask(); |
493 | write_unlock(&dtl_access_lock); | |
494 | } | |
495 | ||
496 | static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, | |
497 | size_t count, loff_t *ppos) | |
498 | { | |
18a593c8 | 499 | unsigned long time_limit = jiffies + HZ; |
d62c8dee NR |
500 | struct vcpu_dispatch_data *disp; |
501 | int rc, cmd, cpu; | |
502 | char buf[16]; | |
503 | ||
504 | if (count > 15) | |
505 | return -EINVAL; | |
506 | ||
507 | if (copy_from_user(buf, p, count)) | |
508 | return -EFAULT; | |
509 | ||
510 | buf[count] = 0; | |
511 | rc = kstrtoint(buf, 0, &cmd); | |
512 | if (rc || cmd < 0 || cmd > 1) { | |
513 | pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n"); | |
514 | return rc ? rc : -EINVAL; | |
515 | } | |
516 | ||
517 | mutex_lock(&dtl_enable_mutex); | |
518 | ||
519 | if ((cmd == 0 && !vcpudispatch_stats_on) || | |
520 | (cmd == 1 && vcpudispatch_stats_on)) | |
521 | goto out; | |
522 | ||
523 | if (cmd) { | |
524 | rc = init_cpu_associativity(); | |
525 | if (rc) | |
526 | goto out; | |
527 | ||
528 | for_each_possible_cpu(cpu) { | |
529 | disp = per_cpu_ptr(&vcpu_disp_data, cpu); | |
530 | memset(disp, 0, sizeof(*disp)); | |
531 | disp->last_disp_cpu = -1; | |
532 | } | |
533 | ||
18a593c8 | 534 | rc = dtl_worker_enable(&time_limit); |
d62c8dee NR |
535 | if (rc) { |
536 | destroy_cpu_associativity(); | |
537 | goto out; | |
538 | } | |
539 | } else { | |
18a593c8 | 540 | dtl_worker_disable(&time_limit); |
d62c8dee NR |
541 | destroy_cpu_associativity(); |
542 | } | |
543 | ||
544 | vcpudispatch_stats_on = cmd; | |
545 | ||
546 | out: | |
547 | mutex_unlock(&dtl_enable_mutex); | |
548 | if (rc) | |
549 | return rc; | |
550 | return count; | |
551 | } | |
552 | ||
553 | static int vcpudispatch_stats_display(struct seq_file *p, void *v) | |
554 | { | |
555 | int cpu; | |
556 | struct vcpu_dispatch_data *disp; | |
557 | ||
558 | if (!vcpudispatch_stats_on) { | |
559 | seq_puts(p, "off\n"); | |
560 | return 0; | |
561 | } | |
562 | ||
563 | for_each_online_cpu(cpu) { | |
564 | disp = per_cpu_ptr(&vcpu_disp_data, cpu); | |
565 | seq_printf(p, "cpu%d", cpu); | |
566 | seq_put_decimal_ull(p, " ", disp->total_disp); | |
567 | seq_put_decimal_ull(p, " ", disp->same_cpu_disp); | |
568 | seq_put_decimal_ull(p, " ", disp->same_chip_disp); | |
569 | seq_put_decimal_ull(p, " ", disp->diff_chip_disp); | |
570 | seq_put_decimal_ull(p, " ", disp->far_chip_disp); | |
571 | seq_put_decimal_ull(p, " ", disp->numa_home_disp); | |
572 | seq_put_decimal_ull(p, " ", disp->numa_remote_disp); | |
573 | seq_put_decimal_ull(p, " ", disp->numa_far_disp); | |
574 | seq_puts(p, "\n"); | |
575 | } | |
576 | ||
577 | return 0; | |
578 | } | |
579 | ||
580 | static int vcpudispatch_stats_open(struct inode *inode, struct file *file) | |
581 | { | |
582 | return single_open(file, vcpudispatch_stats_display, NULL); | |
583 | } | |
584 | ||
585 | static const struct file_operations vcpudispatch_stats_proc_ops = { | |
586 | .open = vcpudispatch_stats_open, | |
587 | .read = seq_read, | |
588 | .write = vcpudispatch_stats_write, | |
589 | .llseek = seq_lseek, | |
590 | .release = single_release, | |
591 | }; | |
592 | ||
593 | static ssize_t vcpudispatch_stats_freq_write(struct file *file, | |
594 | const char __user *p, size_t count, loff_t *ppos) | |
595 | { | |
596 | int rc, freq; | |
597 | char buf[16]; | |
598 | ||
599 | if (count > 15) | |
600 | return -EINVAL; | |
601 | ||
602 | if (copy_from_user(buf, p, count)) | |
603 | return -EFAULT; | |
604 | ||
605 | buf[count] = 0; | |
606 | rc = kstrtoint(buf, 0, &freq); | |
607 | if (rc || freq < 1 || freq > HZ) { | |
608 | pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n", | |
609 | HZ); | |
610 | return rc ? rc : -EINVAL; | |
611 | } | |
612 | ||
613 | vcpudispatch_stats_freq = freq; | |
614 | ||
615 | return count; | |
616 | } | |
617 | ||
618 | static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v) | |
619 | { | |
620 | seq_printf(p, "%d\n", vcpudispatch_stats_freq); | |
621 | return 0; | |
622 | } | |
623 | ||
624 | static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file) | |
625 | { | |
626 | return single_open(file, vcpudispatch_stats_freq_display, NULL); | |
627 | } | |
628 | ||
629 | static const struct file_operations vcpudispatch_stats_freq_proc_ops = { | |
630 | .open = vcpudispatch_stats_freq_open, | |
631 | .read = seq_read, | |
632 | .write = vcpudispatch_stats_freq_write, | |
633 | .llseek = seq_lseek, | |
634 | .release = single_release, | |
635 | }; | |
636 | ||
637 | static int __init vcpudispatch_stats_procfs_init(void) | |
638 | { | |
639 | if (!lppaca_shared_proc(get_lppaca())) | |
640 | return 0; | |
641 | ||
642 | if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL, | |
643 | &vcpudispatch_stats_proc_ops)) | |
644 | pr_err("vcpudispatch_stats: error creating procfs file\n"); | |
645 | else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL, | |
646 | &vcpudispatch_stats_freq_proc_ops)) | |
647 | pr_err("vcpudispatch_stats_freq: error creating procfs file\n"); | |
648 | ||
649 | return 0; | |
650 | } | |
651 | ||
652 | machine_device_initcall(pseries, vcpudispatch_stats_procfs_init); | |
06220d78 NR |
653 | #endif /* CONFIG_PPC_SPLPAR */ |
654 | ||
1da177e4 LT |
655 | void vpa_init(int cpu) |
656 | { | |
657 | int hwcpu = get_hard_smp_processor_id(cpu); | |
2f6093c8 | 658 | unsigned long addr; |
1da177e4 | 659 | long ret; |
233ccd0d | 660 | |
b89bdfb8 ME |
661 | /* |
662 | * The spec says it "may be problematic" if CPU x registers the VPA of | |
663 | * CPU y. We should never do that, but wail if we ever do. | |
664 | */ | |
665 | WARN_ON(cpu != smp_processor_id()); | |
666 | ||
233ccd0d | 667 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
8154c5d2 | 668 | lppaca_of(cpu).vmxregs_in_use = 1; |
233ccd0d | 669 | |
6e0b8bc9 ME |
670 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
671 | lppaca_of(cpu).ebb_regs_in_use = 1; | |
672 | ||
8154c5d2 | 673 | addr = __pa(&lppaca_of(cpu)); |
2f6093c8 | 674 | ret = register_vpa(hwcpu, addr); |
1da177e4 | 675 | |
2f6093c8 | 676 | if (ret) { |
711ef84e AB |
677 | pr_err("WARNING: VPA registration for cpu %d (hw %d) of area " |
678 | "%lx failed with %ld\n", cpu, hwcpu, addr, ret); | |
2f6093c8 MN |
679 | return; |
680 | } | |
d8c476ee | 681 | |
4e003747 | 682 | #ifdef CONFIG_PPC_BOOK3S_64 |
2f6093c8 MN |
683 | /* |
684 | * PAPR says this feature is SLB-Buffer but firmware never | |
685 | * reports that. All SPLPAR support SLB shadow buffer. | |
686 | */ | |
d8c476ee | 687 | if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) { |
d2e60075 | 688 | addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr); |
2f6093c8 MN |
689 | ret = register_slb_shadow(hwcpu, addr); |
690 | if (ret) | |
711ef84e AB |
691 | pr_err("WARNING: SLB shadow buffer registration for " |
692 | "cpu %d (hw %d) of area %lx failed with %ld\n", | |
693 | cpu, hwcpu, addr, ret); | |
2f6093c8 | 694 | } |
4e003747 | 695 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
cf9efce0 PM |
696 | |
697 | /* | |
698 | * Register dispatch trace log, if one has been allocated. | |
699 | */ | |
1c85a2a1 | 700 | register_dtl_buffer(cpu); |
1da177e4 LT |
701 | } |
702 | ||
4e003747 | 703 | #ifdef CONFIG_PPC_BOOK3S_64 |
d8c476ee | 704 | |
035223fb | 705 | static long pSeries_lpar_hpte_insert(unsigned long hpte_group, |
5524a27d AK |
706 | unsigned long vpn, unsigned long pa, |
707 | unsigned long rflags, unsigned long vflags, | |
b1022fbd | 708 | int psize, int apsize, int ssize) |
1da177e4 | 709 | { |
1da177e4 LT |
710 | unsigned long lpar_rc; |
711 | unsigned long flags; | |
712 | unsigned long slot; | |
96e28449 | 713 | unsigned long hpte_v, hpte_r; |
1da177e4 | 714 | |
3c726f8d | 715 | if (!(vflags & HPTE_V_BOLTED)) |
5524a27d AK |
716 | pr_devel("hpte_insert(group=%lx, vpn=%016lx, " |
717 | "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", | |
718 | hpte_group, vpn, pa, rflags, vflags, psize); | |
3c726f8d | 719 | |
b1022fbd | 720 | hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; |
6b243fcf | 721 | hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; |
3c726f8d BH |
722 | |
723 | if (!(vflags & HPTE_V_BOLTED)) | |
551a232c | 724 | pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); |
3c726f8d | 725 | |
1da177e4 LT |
726 | /* Now fill in the actual HPTE */ |
727 | /* Set CEC cookie to 0 */ | |
728 | /* Zero page = 0 */ | |
729 | /* I-cache Invalidate = 0 */ | |
730 | /* I-cache synchronize = 0 */ | |
731 | /* Exact = 0 */ | |
732 | flags = 0; | |
733 | ||
9ee820fa BK |
734 | if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) |
735 | flags |= H_COALESCE_CAND; | |
1da177e4 | 736 | |
b9377ffc | 737 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); |
706c8c93 | 738 | if (unlikely(lpar_rc == H_PTEG_FULL)) { |
ca42d8d2 | 739 | pr_devel("Hash table group is full\n"); |
1da177e4 | 740 | return -1; |
3c726f8d | 741 | } |
1da177e4 LT |
742 | |
743 | /* | |
744 | * Since we try and ioremap PHBs we don't own, the pte insert | |
745 | * will fail. However we must catch the failure in hash_page | |
746 | * or we will loop forever, so return -2 in this case. | |
747 | */ | |
706c8c93 | 748 | if (unlikely(lpar_rc != H_SUCCESS)) { |
ca42d8d2 | 749 | pr_err("Failed hash pte insert with error %ld\n", lpar_rc); |
1da177e4 | 750 | return -2; |
3c726f8d BH |
751 | } |
752 | if (!(vflags & HPTE_V_BOLTED)) | |
551a232c | 753 | pr_devel(" -> slot: %lu\n", slot & 7); |
1da177e4 LT |
754 | |
755 | /* Because of iSeries, we have to pass down the secondary | |
756 | * bucket bit here as well | |
757 | */ | |
96e28449 | 758 | return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3); |
1da177e4 LT |
759 | } |
760 | ||
761 | static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); | |
762 | ||
763 | static long pSeries_lpar_hpte_remove(unsigned long hpte_group) | |
764 | { | |
765 | unsigned long slot_offset; | |
766 | unsigned long lpar_rc; | |
767 | int i; | |
768 | unsigned long dummy1, dummy2; | |
769 | ||
770 | /* pick a random slot to start at */ | |
771 | slot_offset = mftb() & 0x7; | |
772 | ||
773 | for (i = 0; i < HPTES_PER_GROUP; i++) { | |
774 | ||
775 | /* don't remove a bolted entry */ | |
776 | lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, | |
777 | (0x1UL << 4), &dummy1, &dummy2); | |
706c8c93 | 778 | if (lpar_rc == H_SUCCESS) |
1da177e4 | 779 | return i; |
9fb26401 MW |
780 | |
781 | /* | |
782 | * The test for adjunct partition is performed before the | |
783 | * ANDCOND test. H_RESOURCE may be returned, so we need to | |
784 | * check for that as well. | |
785 | */ | |
786 | BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); | |
1da177e4 LT |
787 | |
788 | slot_offset++; | |
789 | slot_offset &= 0x7; | |
790 | } | |
791 | ||
792 | return -1; | |
793 | } | |
794 | ||
5246adec | 795 | static void manual_hpte_clear_all(void) |
1da177e4 LT |
796 | { |
797 | unsigned long size_bytes = 1UL << ppc64_pft_size; | |
798 | unsigned long hpte_count = size_bytes >> 4; | |
d504bed6 MN |
799 | struct { |
800 | unsigned long pteh; | |
801 | unsigned long ptel; | |
802 | } ptes[4]; | |
b7abc5c5 | 803 | long lpar_rc; |
bed9a315 | 804 | unsigned long i, j; |
d504bed6 MN |
805 | |
806 | /* Read in batches of 4, | |
807 | * invalidate only valid entries not in the VRMA | |
808 | * hpte_count will be a multiple of 4 | |
809 | */ | |
810 | for (i = 0; i < hpte_count; i += 4) { | |
811 | lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); | |
ca42d8d2 AK |
812 | if (lpar_rc != H_SUCCESS) { |
813 | pr_info("Failed to read hash page table at %ld err %ld\n", | |
814 | i, lpar_rc); | |
d504bed6 | 815 | continue; |
ca42d8d2 | 816 | } |
d504bed6 MN |
817 | for (j = 0; j < 4; j++){ |
818 | if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == | |
819 | HPTE_V_VRMA_MASK) | |
820 | continue; | |
821 | if (ptes[j].pteh & HPTE_V_VALID) | |
822 | plpar_pte_remove_raw(0, i + j, 0, | |
823 | &(ptes[j].pteh), &(ptes[j].ptel)); | |
b7abc5c5 SS |
824 | } |
825 | } | |
5246adec AB |
826 | } |
827 | ||
828 | static int hcall_hpte_clear_all(void) | |
829 | { | |
830 | int rc; | |
831 | ||
832 | do { | |
833 | rc = plpar_hcall_norets(H_CLEAR_HPT); | |
834 | } while (rc == H_CONTINUE); | |
835 | ||
836 | return rc; | |
837 | } | |
838 | ||
839 | static void pseries_hpte_clear_all(void) | |
840 | { | |
841 | int rc; | |
842 | ||
843 | rc = hcall_hpte_clear_all(); | |
844 | if (rc != H_SUCCESS) | |
845 | manual_hpte_clear_all(); | |
e844b1ee AB |
846 | |
847 | #ifdef __LITTLE_ENDIAN__ | |
408cddd9 HB |
848 | /* |
849 | * Reset exceptions to big endian. | |
850 | * | |
851 | * FIXME this is a hack for kexec, we need to reset the exception | |
852 | * endian before starting the new kernel and this is a convenient place | |
853 | * to do it. | |
854 | * | |
855 | * This is also called on boot when a fadump happens. In that case we | |
856 | * must not change the exception endian mode. | |
857 | */ | |
d3cbff1b BH |
858 | if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) |
859 | pseries_big_endian_exceptions(); | |
e844b1ee | 860 | #endif |
1da177e4 LT |
861 | } |
862 | ||
863 | /* | |
864 | * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and | |
865 | * the low 3 bits of flags happen to line up. So no transform is needed. | |
866 | * We can probably optimize here and assume the high bits of newpp are | |
867 | * already zero. For now I am paranoid. | |
868 | */ | |
3c726f8d BH |
869 | static long pSeries_lpar_hpte_updatepp(unsigned long slot, |
870 | unsigned long newpp, | |
5524a27d | 871 | unsigned long vpn, |
db3d8534 | 872 | int psize, int apsize, |
aefa5688 | 873 | int ssize, unsigned long inv_flags) |
1da177e4 LT |
874 | { |
875 | unsigned long lpar_rc; | |
e71ff982 | 876 | unsigned long flags; |
3c726f8d | 877 | unsigned long want_v; |
1da177e4 | 878 | |
5524a27d | 879 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
1da177e4 | 880 | |
e71ff982 BS |
881 | flags = (newpp & 7) | H_AVPN; |
882 | if (mmu_has_feature(MMU_FTR_KERNEL_RO)) | |
883 | /* Move pp0 into bit 8 (IBM 55) */ | |
884 | flags |= (newpp & HPTE_R_PP0) >> 55; | |
885 | ||
a8c0bf3c AK |
886 | pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", |
887 | want_v, slot, flags, psize); | |
888 | ||
1189be65 | 889 | lpar_rc = plpar_pte_protect(flags, slot, want_v); |
3c726f8d | 890 | |
706c8c93 | 891 | if (lpar_rc == H_NOT_FOUND) { |
551a232c | 892 | pr_devel("not found !\n"); |
1da177e4 | 893 | return -1; |
3c726f8d BH |
894 | } |
895 | ||
551a232c | 896 | pr_devel("ok\n"); |
1da177e4 | 897 | |
706c8c93 | 898 | BUG_ON(lpar_rc != H_SUCCESS); |
1da177e4 LT |
899 | |
900 | return 0; | |
901 | } | |
902 | ||
4ad90c86 | 903 | static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group) |
1da177e4 | 904 | { |
4ad90c86 AK |
905 | long lpar_rc; |
906 | unsigned long i, j; | |
907 | struct { | |
908 | unsigned long pteh; | |
909 | unsigned long ptel; | |
910 | } ptes[4]; | |
1da177e4 | 911 | |
4ad90c86 | 912 | for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { |
1da177e4 | 913 | |
4ad90c86 | 914 | lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); |
ca42d8d2 AK |
915 | if (lpar_rc != H_SUCCESS) { |
916 | pr_info("Failed to read hash page table at %ld err %ld\n", | |
917 | hpte_group, lpar_rc); | |
4ad90c86 | 918 | continue; |
ca42d8d2 | 919 | } |
1da177e4 | 920 | |
4ad90c86 AK |
921 | for (j = 0; j < 4; j++) { |
922 | if (HPTE_V_COMPARE(ptes[j].pteh, want_v) && | |
923 | (ptes[j].pteh & HPTE_V_VALID)) | |
924 | return i + j; | |
925 | } | |
926 | } | |
1da177e4 | 927 | |
4ad90c86 | 928 | return -1; |
1da177e4 LT |
929 | } |
930 | ||
5524a27d | 931 | static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) |
1da177e4 | 932 | { |
1da177e4 | 933 | long slot; |
4ad90c86 AK |
934 | unsigned long hash; |
935 | unsigned long want_v; | |
936 | unsigned long hpte_group; | |
1da177e4 | 937 | |
5524a27d AK |
938 | hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); |
939 | want_v = hpte_encode_avpn(vpn, psize, ssize); | |
1189be65 PM |
940 | |
941 | /* Bolted entries are always in the primary group */ | |
4ad90c86 AK |
942 | hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
943 | slot = __pSeries_lpar_hpte_find(want_v, hpte_group); | |
944 | if (slot < 0) | |
945 | return -1; | |
946 | return hpte_group + slot; | |
947 | } | |
1da177e4 LT |
948 | |
949 | static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, | |
3c726f8d | 950 | unsigned long ea, |
1189be65 | 951 | int psize, int ssize) |
1da177e4 | 952 | { |
5524a27d AK |
953 | unsigned long vpn; |
954 | unsigned long lpar_rc, slot, vsid, flags; | |
1da177e4 | 955 | |
1189be65 | 956 | vsid = get_kernel_vsid(ea, ssize); |
5524a27d | 957 | vpn = hpt_vpn(ea, vsid, ssize); |
1da177e4 | 958 | |
5524a27d | 959 | slot = pSeries_lpar_hpte_find(vpn, psize, ssize); |
1da177e4 LT |
960 | BUG_ON(slot == -1); |
961 | ||
962 | flags = newpp & 7; | |
e71ff982 BS |
963 | if (mmu_has_feature(MMU_FTR_KERNEL_RO)) |
964 | /* Move pp0 into bit 8 (IBM 55) */ | |
965 | flags |= (newpp & HPTE_R_PP0) >> 55; | |
966 | ||
1da177e4 LT |
967 | lpar_rc = plpar_pte_protect(flags, slot, 0); |
968 | ||
706c8c93 | 969 | BUG_ON(lpar_rc != H_SUCCESS); |
1da177e4 LT |
970 | } |
971 | ||
5524a27d | 972 | static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, |
db3d8534 AK |
973 | int psize, int apsize, |
974 | int ssize, int local) | |
1da177e4 | 975 | { |
3c726f8d | 976 | unsigned long want_v; |
1da177e4 LT |
977 | unsigned long lpar_rc; |
978 | unsigned long dummy1, dummy2; | |
979 | ||
5524a27d AK |
980 | pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", |
981 | slot, vpn, psize, local); | |
1da177e4 | 982 | |
5524a27d | 983 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
1189be65 | 984 | lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); |
706c8c93 | 985 | if (lpar_rc == H_NOT_FOUND) |
1da177e4 LT |
986 | return; |
987 | ||
706c8c93 | 988 | BUG_ON(lpar_rc != H_SUCCESS); |
1da177e4 LT |
989 | } |
990 | ||
ba2dd8a2 LD |
991 | |
992 | /* | |
993 | * As defined in the PAPR's section 14.5.4.1.8 | |
994 | * The control mask doesn't include the returned reference and change bit from | |
995 | * the processed PTE. | |
996 | */ | |
997 | #define HBLKR_AVPN 0x0100000000000000UL | |
998 | #define HBLKR_CTRL_MASK 0xf800000000000000UL | |
999 | #define HBLKR_CTRL_SUCCESS 0x8000000000000000UL | |
1000 | #define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL | |
1001 | #define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL | |
1002 | ||
59545ebe LD |
1003 | /* |
1004 | * Returned true if we are supporting this block size for the specified segment | |
1005 | * base page size and actual page size. | |
1006 | * | |
1007 | * Currently, we only support 8 size block. | |
1008 | */ | |
1009 | static inline bool is_supported_hlbkrm(int bpsize, int psize) | |
1010 | { | |
1011 | return (hblkrm_size[bpsize][psize] == HBLKRM_SUPPORTED_BLOCK_SIZE); | |
1012 | } | |
1013 | ||
ba2dd8a2 LD |
1014 | /** |
1015 | * H_BLOCK_REMOVE caller. | |
1016 | * @idx should point to the latest @param entry set with a PTEX. | |
1017 | * If PTE cannot be processed because another CPUs has already locked that | |
1018 | * group, those entries are put back in @param starting at index 1. | |
1019 | * If entries has to be retried and @retry_busy is set to true, these entries | |
1020 | * are retried until success. If @retry_busy is set to false, the returned | |
1021 | * is the number of entries yet to process. | |
1022 | */ | |
1023 | static unsigned long call_block_remove(unsigned long idx, unsigned long *param, | |
1024 | bool retry_busy) | |
1025 | { | |
1026 | unsigned long i, rc, new_idx; | |
1027 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | |
1028 | ||
1029 | if (idx < 2) { | |
1030 | pr_warn("Unexpected empty call to H_BLOCK_REMOVE"); | |
1031 | return 0; | |
1032 | } | |
1033 | again: | |
1034 | new_idx = 0; | |
1035 | if (idx > PLPAR_HCALL9_BUFSIZE) { | |
1036 | pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx); | |
1037 | idx = PLPAR_HCALL9_BUFSIZE; | |
1038 | } else if (idx < PLPAR_HCALL9_BUFSIZE) | |
1039 | param[idx] = HBR_END; | |
1040 | ||
1041 | rc = plpar_hcall9(H_BLOCK_REMOVE, retbuf, | |
1042 | param[0], /* AVA */ | |
1043 | param[1], param[2], param[3], param[4], /* TS0-7 */ | |
1044 | param[5], param[6], param[7], param[8]); | |
1045 | if (rc == H_SUCCESS) | |
1046 | return 0; | |
1047 | ||
1048 | BUG_ON(rc != H_PARTIAL); | |
1049 | ||
1050 | /* Check that the unprocessed entries were 'not found' or 'busy' */ | |
1051 | for (i = 0; i < idx-1; i++) { | |
1052 | unsigned long ctrl = retbuf[i] & HBLKR_CTRL_MASK; | |
1053 | ||
1054 | if (ctrl == HBLKR_CTRL_ERRBUSY) { | |
1055 | param[++new_idx] = param[i+1]; | |
1056 | continue; | |
1057 | } | |
1058 | ||
1059 | BUG_ON(ctrl != HBLKR_CTRL_SUCCESS | |
1060 | && ctrl != HBLKR_CTRL_ERRNOTFOUND); | |
1061 | } | |
1062 | ||
1063 | /* | |
1064 | * If there were entries found busy, retry these entries if requested, | |
1065 | * of if all the entries have to be retried. | |
1066 | */ | |
1067 | if (new_idx && (retry_busy || new_idx == (PLPAR_HCALL9_BUFSIZE-1))) { | |
1068 | idx = new_idx + 1; | |
1069 | goto again; | |
1070 | } | |
1071 | ||
1072 | return new_idx; | |
1073 | } | |
1074 | ||
e34aa03c | 1075 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1a527286 AK |
1076 | /* |
1077 | * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need | |
1078 | * to make sure that we avoid bouncing the hypervisor tlbie lock. | |
1079 | */ | |
1080 | #define PPC64_HUGE_HPTE_BATCH 12 | |
1081 | ||
ba2dd8a2 LD |
1082 | static void hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn, |
1083 | int count, int psize, int ssize) | |
1a527286 | 1084 | { |
05af40e8 | 1085 | unsigned long param[PLPAR_HCALL9_BUFSIZE]; |
ba2dd8a2 LD |
1086 | unsigned long shift, current_vpgb, vpgb; |
1087 | int i, pix = 0; | |
1a527286 | 1088 | |
ba2dd8a2 LD |
1089 | shift = mmu_psize_defs[psize].shift; |
1090 | ||
1091 | for (i = 0; i < count; i++) { | |
1092 | /* | |
1093 | * Shifting 3 bits more on the right to get a | |
1094 | * 8 pages aligned virtual addresse. | |
1095 | */ | |
1096 | vpgb = (vpn[i] >> (shift - VPN_SHIFT + 3)); | |
1097 | if (!pix || vpgb != current_vpgb) { | |
1098 | /* | |
1099 | * Need to start a new 8 pages block, flush | |
1100 | * the current one if needed. | |
1101 | */ | |
1102 | if (pix) | |
1103 | (void)call_block_remove(pix, param, true); | |
1104 | current_vpgb = vpgb; | |
1105 | param[0] = hpte_encode_avpn(vpn[i], psize, ssize); | |
1106 | pix = 1; | |
1107 | } | |
1108 | ||
1109 | param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot[i]; | |
1110 | if (pix == PLPAR_HCALL9_BUFSIZE) { | |
1111 | pix = call_block_remove(pix, param, false); | |
1112 | /* | |
1113 | * pix = 0 means that all the entries were | |
1114 | * removed, we can start a new block. | |
1115 | * Otherwise, this means that there are entries | |
1116 | * to retry, and pix points to latest one, so | |
1117 | * we should increment it and try to continue | |
1118 | * the same block. | |
1119 | */ | |
1120 | if (pix) | |
1121 | pix++; | |
1122 | } | |
1123 | } | |
1124 | if (pix) | |
1125 | (void)call_block_remove(pix, param, true); | |
1126 | } | |
1127 | ||
1128 | static void hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn, | |
1129 | int count, int psize, int ssize) | |
1130 | { | |
1131 | unsigned long param[PLPAR_HCALL9_BUFSIZE]; | |
1132 | int i = 0, pix = 0, rc; | |
1a527286 AK |
1133 | |
1134 | for (i = 0; i < count; i++) { | |
1135 | ||
1136 | if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { | |
1137 | pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0, | |
1138 | ssize, 0); | |
1139 | } else { | |
1140 | param[pix] = HBR_REQUEST | HBR_AVPN | slot[i]; | |
1141 | param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize); | |
1142 | pix += 2; | |
1143 | if (pix == 8) { | |
1144 | rc = plpar_hcall9(H_BULK_REMOVE, param, | |
1145 | param[0], param[1], param[2], | |
1146 | param[3], param[4], param[5], | |
1147 | param[6], param[7]); | |
1148 | BUG_ON(rc != H_SUCCESS); | |
1149 | pix = 0; | |
1150 | } | |
1151 | } | |
1152 | } | |
1153 | if (pix) { | |
1154 | param[pix] = HBR_END; | |
1155 | rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], | |
1156 | param[2], param[3], param[4], param[5], | |
1157 | param[6], param[7]); | |
1158 | BUG_ON(rc != H_SUCCESS); | |
1159 | } | |
ba2dd8a2 LD |
1160 | } |
1161 | ||
1162 | static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, | |
1163 | unsigned long *vpn, | |
1164 | int count, int psize, | |
1165 | int ssize) | |
1166 | { | |
1167 | unsigned long flags = 0; | |
1168 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); | |
1169 | ||
1170 | if (lock_tlbie) | |
1171 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); | |
1172 | ||
59545ebe LD |
1173 | /* Assuming THP size is 16M */ |
1174 | if (is_supported_hlbkrm(psize, MMU_PAGE_16M)) | |
ba2dd8a2 LD |
1175 | hugepage_block_invalidate(slot, vpn, count, psize, ssize); |
1176 | else | |
1177 | hugepage_bulk_invalidate(slot, vpn, count, psize, ssize); | |
1a527286 AK |
1178 | |
1179 | if (lock_tlbie) | |
1180 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); | |
1181 | } | |
1182 | ||
fa1f8ae8 AK |
1183 | static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, |
1184 | unsigned long addr, | |
1185 | unsigned char *hpte_slot_array, | |
d557b098 | 1186 | int psize, int ssize, int local) |
1a527286 | 1187 | { |
fa1f8ae8 | 1188 | int i, index = 0; |
1a527286 AK |
1189 | unsigned long s_addr = addr; |
1190 | unsigned int max_hpte_count, valid; | |
1191 | unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH]; | |
1192 | unsigned long slot_array[PPC64_HUGE_HPTE_BATCH]; | |
fa1f8ae8 | 1193 | unsigned long shift, hidx, vpn = 0, hash, slot; |
1a527286 AK |
1194 | |
1195 | shift = mmu_psize_defs[psize].shift; | |
1196 | max_hpte_count = 1U << (PMD_SHIFT - shift); | |
1197 | ||
1198 | for (i = 0; i < max_hpte_count; i++) { | |
1199 | valid = hpte_valid(hpte_slot_array, i); | |
1200 | if (!valid) | |
1201 | continue; | |
1202 | hidx = hpte_hash_index(hpte_slot_array, i); | |
1203 | ||
1204 | /* get the vpn */ | |
1205 | addr = s_addr + (i * (1ul << shift)); | |
1a527286 AK |
1206 | vpn = hpt_vpn(addr, vsid, ssize); |
1207 | hash = hpt_hash(vpn, shift, ssize); | |
1208 | if (hidx & _PTEIDX_SECONDARY) | |
1209 | hash = ~hash; | |
1210 | ||
1211 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
1212 | slot += hidx & _PTEIDX_GROUP_IX; | |
1213 | ||
1214 | slot_array[index] = slot; | |
1215 | vpn_array[index] = vpn; | |
1216 | if (index == PPC64_HUGE_HPTE_BATCH - 1) { | |
1217 | /* | |
1218 | * Now do a bluk invalidate | |
1219 | */ | |
1220 | __pSeries_lpar_hugepage_invalidate(slot_array, | |
1221 | vpn_array, | |
1222 | PPC64_HUGE_HPTE_BATCH, | |
1223 | psize, ssize); | |
1224 | index = 0; | |
1225 | } else | |
1226 | index++; | |
1227 | } | |
1228 | if (index) | |
1229 | __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, | |
1230 | index, psize, ssize); | |
1231 | } | |
e34aa03c AK |
1232 | #else |
1233 | static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, | |
1234 | unsigned long addr, | |
1235 | unsigned char *hpte_slot_array, | |
1236 | int psize, int ssize, int local) | |
1237 | { | |
1238 | WARN(1, "%s called without THP support\n", __func__); | |
1239 | } | |
1240 | #endif | |
1a527286 | 1241 | |
27828f98 DG |
1242 | static int pSeries_lpar_hpte_removebolted(unsigned long ea, |
1243 | int psize, int ssize) | |
f8c8803b | 1244 | { |
5524a27d AK |
1245 | unsigned long vpn; |
1246 | unsigned long slot, vsid; | |
f8c8803b BP |
1247 | |
1248 | vsid = get_kernel_vsid(ea, ssize); | |
5524a27d | 1249 | vpn = hpt_vpn(ea, vsid, ssize); |
f8c8803b | 1250 | |
5524a27d | 1251 | slot = pSeries_lpar_hpte_find(vpn, psize, ssize); |
27828f98 DG |
1252 | if (slot == -1) |
1253 | return -ENOENT; | |
1254 | ||
db3d8534 AK |
1255 | /* |
1256 | * lpar doesn't use the passed actual page size | |
1257 | */ | |
1258 | pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0); | |
27828f98 | 1259 | return 0; |
f8c8803b BP |
1260 | } |
1261 | ||
0effa488 LD |
1262 | |
1263 | static inline unsigned long compute_slot(real_pte_t pte, | |
1264 | unsigned long vpn, | |
1265 | unsigned long index, | |
1266 | unsigned long shift, | |
1267 | int ssize) | |
1268 | { | |
1269 | unsigned long slot, hash, hidx; | |
1270 | ||
1271 | hash = hpt_hash(vpn, shift, ssize); | |
1272 | hidx = __rpte_to_hidx(pte, index); | |
1273 | if (hidx & _PTEIDX_SECONDARY) | |
1274 | hash = ~hash; | |
1275 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
1276 | slot += hidx & _PTEIDX_GROUP_IX; | |
1277 | return slot; | |
1278 | } | |
1279 | ||
ba2dd8a2 LD |
1280 | /** |
1281 | * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are | |
1282 | * "all within the same naturally aligned 8 page virtual address block". | |
1283 | */ | |
1284 | static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch, | |
1285 | unsigned long *param) | |
1286 | { | |
1287 | unsigned long vpn; | |
1288 | unsigned long i, pix = 0; | |
1289 | unsigned long index, shift, slot, current_vpgb, vpgb; | |
1290 | real_pte_t pte; | |
1291 | int psize, ssize; | |
1292 | ||
1293 | psize = batch->psize; | |
1294 | ssize = batch->ssize; | |
1295 | ||
1296 | for (i = 0; i < number; i++) { | |
1297 | vpn = batch->vpn[i]; | |
1298 | pte = batch->pte[i]; | |
1299 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { | |
1300 | /* | |
1301 | * Shifting 3 bits more on the right to get a | |
1302 | * 8 pages aligned virtual addresse. | |
1303 | */ | |
1304 | vpgb = (vpn >> (shift - VPN_SHIFT + 3)); | |
1305 | if (!pix || vpgb != current_vpgb) { | |
1306 | /* | |
1307 | * Need to start a new 8 pages block, flush | |
1308 | * the current one if needed. | |
1309 | */ | |
1310 | if (pix) | |
1311 | (void)call_block_remove(pix, param, | |
1312 | true); | |
1313 | current_vpgb = vpgb; | |
1314 | param[0] = hpte_encode_avpn(vpn, psize, | |
1315 | ssize); | |
1316 | pix = 1; | |
1317 | } | |
1318 | ||
1319 | slot = compute_slot(pte, vpn, index, shift, ssize); | |
1320 | param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot; | |
1321 | ||
1322 | if (pix == PLPAR_HCALL9_BUFSIZE) { | |
1323 | pix = call_block_remove(pix, param, false); | |
1324 | /* | |
1325 | * pix = 0 means that all the entries were | |
1326 | * removed, we can start a new block. | |
1327 | * Otherwise, this means that there are entries | |
1328 | * to retry, and pix points to latest one, so | |
1329 | * we should increment it and try to continue | |
1330 | * the same block. | |
1331 | */ | |
1332 | if (pix) | |
1333 | pix++; | |
1334 | } | |
1335 | } pte_iterate_hashed_end(); | |
1336 | } | |
1337 | ||
1338 | if (pix) | |
1339 | (void)call_block_remove(pix, param, true); | |
1340 | } | |
1341 | ||
1211ee61 LD |
1342 | /* |
1343 | * TLB Block Invalidate Characteristics | |
1344 | * | |
1345 | * These characteristics define the size of the block the hcall H_BLOCK_REMOVE | |
1346 | * is able to process for each couple segment base page size, actual page size. | |
1347 | * | |
1348 | * The ibm,get-system-parameter properties is returning a buffer with the | |
1349 | * following layout: | |
1350 | * | |
1351 | * [ 2 bytes size of the RTAS buffer (excluding these 2 bytes) ] | |
1352 | * ----------------- | |
1353 | * TLB Block Invalidate Specifiers: | |
1354 | * [ 1 byte LOG base 2 of the TLB invalidate block size being specified ] | |
1355 | * [ 1 byte Number of page sizes (N) that are supported for the specified | |
1356 | * TLB invalidate block size ] | |
1357 | * [ 1 byte Encoded segment base page size and actual page size | |
1358 | * MSB=0 means 4k segment base page size and actual page size | |
1359 | * MSB=1 the penc value in mmu_psize_def ] | |
1360 | * ... | |
1361 | * ----------------- | |
1362 | * Next TLB Block Invalidate Specifiers... | |
1363 | * ----------------- | |
1364 | * [ 0 ] | |
1365 | */ | |
1366 | static inline void set_hblkrm_bloc_size(int bpsize, int psize, | |
1367 | unsigned int block_size) | |
1368 | { | |
1369 | if (block_size > hblkrm_size[bpsize][psize]) | |
1370 | hblkrm_size[bpsize][psize] = block_size; | |
1371 | } | |
1372 | ||
1373 | /* | |
1374 | * Decode the Encoded segment base page size and actual page size. | |
1375 | * PAPR specifies: | |
1376 | * - bit 7 is the L bit | |
1377 | * - bits 0-5 are the penc value | |
1378 | * If the L bit is 0, this means 4K segment base page size and actual page size | |
1379 | * otherwise the penc value should be read. | |
1380 | */ | |
1381 | #define HBLKRM_L_MASK 0x80 | |
1382 | #define HBLKRM_PENC_MASK 0x3f | |
1383 | static inline void __init check_lp_set_hblkrm(unsigned int lp, | |
1384 | unsigned int block_size) | |
1385 | { | |
1386 | unsigned int bpsize, psize; | |
1387 | ||
1388 | /* First, check the L bit, if not set, this means 4K */ | |
1389 | if ((lp & HBLKRM_L_MASK) == 0) { | |
1390 | set_hblkrm_bloc_size(MMU_PAGE_4K, MMU_PAGE_4K, block_size); | |
1391 | return; | |
1392 | } | |
1393 | ||
1394 | lp &= HBLKRM_PENC_MASK; | |
1395 | for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) { | |
1396 | struct mmu_psize_def *def = &mmu_psize_defs[bpsize]; | |
1397 | ||
1398 | for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { | |
1399 | if (def->penc[psize] == lp) { | |
1400 | set_hblkrm_bloc_size(bpsize, psize, block_size); | |
1401 | return; | |
1402 | } | |
1403 | } | |
1404 | } | |
1405 | } | |
1406 | ||
1407 | #define SPLPAR_TLB_BIC_TOKEN 50 | |
1408 | ||
1409 | /* | |
1410 | * The size of the TLB Block Invalidate Characteristics is variable. But at the | |
1411 | * maximum it will be the number of possible page sizes *2 + 10 bytes. | |
1412 | * Currently MMU_PAGE_COUNT is 16, which means 42 bytes. Use a cache line size | |
1413 | * (128 bytes) for the buffer to get plenty of space. | |
1414 | */ | |
1415 | #define SPLPAR_TLB_BIC_MAXLENGTH 128 | |
1416 | ||
1417 | void __init pseries_lpar_read_hblkrm_characteristics(void) | |
1418 | { | |
1419 | unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH]; | |
1420 | int call_status, len, idx, bpsize; | |
1421 | ||
1422 | spin_lock(&rtas_data_buf_lock); | |
1423 | memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE); | |
1424 | call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, | |
1425 | NULL, | |
1426 | SPLPAR_TLB_BIC_TOKEN, | |
1427 | __pa(rtas_data_buf), | |
1428 | RTAS_DATA_BUF_SIZE); | |
1429 | memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH); | |
1430 | local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0'; | |
1431 | spin_unlock(&rtas_data_buf_lock); | |
1432 | ||
1433 | if (call_status != 0) { | |
1434 | pr_warn("%s %s Error calling get-system-parameter (0x%x)\n", | |
1435 | __FILE__, __func__, call_status); | |
1436 | return; | |
1437 | } | |
1438 | ||
1439 | /* | |
1440 | * The first two (2) bytes of the data in the buffer are the length of | |
1441 | * the returned data, not counting these first two (2) bytes. | |
1442 | */ | |
1443 | len = be16_to_cpu(*((u16 *)local_buffer)) + 2; | |
1444 | if (len > SPLPAR_TLB_BIC_MAXLENGTH) { | |
1445 | pr_warn("%s too large returned buffer %d", __func__, len); | |
1446 | return; | |
1447 | } | |
1448 | ||
1449 | idx = 2; | |
1450 | while (idx < len) { | |
1451 | u8 block_shift = local_buffer[idx++]; | |
1452 | u32 block_size; | |
1453 | unsigned int npsize; | |
1454 | ||
1455 | if (!block_shift) | |
1456 | break; | |
1457 | ||
1458 | block_size = 1 << block_shift; | |
1459 | ||
1460 | for (npsize = local_buffer[idx++]; | |
1461 | npsize > 0 && idx < len; npsize--) | |
1462 | check_lp_set_hblkrm((unsigned int) local_buffer[idx++], | |
1463 | block_size); | |
1464 | } | |
1465 | ||
1466 | for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) | |
1467 | for (idx = 0; idx < MMU_PAGE_COUNT; idx++) | |
1468 | if (hblkrm_size[bpsize][idx]) | |
1469 | pr_info("H_BLOCK_REMOVE supports base psize:%d psize:%d block size:%d", | |
1470 | bpsize, idx, hblkrm_size[bpsize][idx]); | |
1471 | } | |
1472 | ||
1da177e4 LT |
1473 | /* |
1474 | * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie | |
1475 | * lock. | |
1476 | */ | |
035223fb | 1477 | static void pSeries_lpar_flush_hash_range(unsigned long number, int local) |
1da177e4 | 1478 | { |
5524a27d | 1479 | unsigned long vpn; |
f03e64f2 | 1480 | unsigned long i, pix, rc; |
12e86f92 | 1481 | unsigned long flags = 0; |
69111bac | 1482 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); |
44ae3ab3 | 1483 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
05af40e8 | 1484 | unsigned long param[PLPAR_HCALL9_BUFSIZE]; |
0effa488 | 1485 | unsigned long index, shift, slot; |
f03e64f2 | 1486 | real_pte_t pte; |
1189be65 | 1487 | int psize, ssize; |
1da177e4 LT |
1488 | |
1489 | if (lock_tlbie) | |
1490 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); | |
1491 | ||
59545ebe | 1492 | if (is_supported_hlbkrm(batch->psize, batch->psize)) { |
ba2dd8a2 LD |
1493 | do_block_remove(number, batch, param); |
1494 | goto out; | |
1495 | } | |
1496 | ||
f03e64f2 | 1497 | psize = batch->psize; |
1189be65 | 1498 | ssize = batch->ssize; |
f03e64f2 PM |
1499 | pix = 0; |
1500 | for (i = 0; i < number; i++) { | |
5524a27d | 1501 | vpn = batch->vpn[i]; |
f03e64f2 | 1502 | pte = batch->pte[i]; |
5524a27d | 1503 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { |
0effa488 | 1504 | slot = compute_slot(pte, vpn, index, shift, ssize); |
12e86f92 | 1505 | if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { |
db3d8534 AK |
1506 | /* |
1507 | * lpar doesn't use the passed actual page size | |
1508 | */ | |
5524a27d | 1509 | pSeries_lpar_hpte_invalidate(slot, vpn, psize, |
db3d8534 | 1510 | 0, ssize, local); |
12e86f92 PM |
1511 | } else { |
1512 | param[pix] = HBR_REQUEST | HBR_AVPN | slot; | |
5524a27d | 1513 | param[pix+1] = hpte_encode_avpn(vpn, psize, |
1189be65 | 1514 | ssize); |
12e86f92 PM |
1515 | pix += 2; |
1516 | if (pix == 8) { | |
1517 | rc = plpar_hcall9(H_BULK_REMOVE, param, | |
f03e64f2 PM |
1518 | param[0], param[1], param[2], |
1519 | param[3], param[4], param[5], | |
1520 | param[6], param[7]); | |
12e86f92 PM |
1521 | BUG_ON(rc != H_SUCCESS); |
1522 | pix = 0; | |
1523 | } | |
f03e64f2 PM |
1524 | } |
1525 | } pte_iterate_hashed_end(); | |
1526 | } | |
1527 | if (pix) { | |
1528 | param[pix] = HBR_END; | |
1529 | rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], | |
1530 | param[2], param[3], param[4], param[5], | |
1531 | param[6], param[7]); | |
1532 | BUG_ON(rc != H_SUCCESS); | |
1533 | } | |
1da177e4 | 1534 | |
ba2dd8a2 | 1535 | out: |
1da177e4 LT |
1536 | if (lock_tlbie) |
1537 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); | |
1538 | } | |
1539 | ||
4e89a2d8 WS |
1540 | static int __init disable_bulk_remove(char *str) |
1541 | { | |
1542 | if (strcmp(str, "off") == 0 && | |
1543 | firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { | |
65471d76 AK |
1544 | pr_info("Disabling BULK_REMOVE firmware feature"); |
1545 | powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE; | |
4e89a2d8 WS |
1546 | } |
1547 | return 1; | |
1548 | } | |
1549 | ||
1550 | __setup("bulk_remove=", disable_bulk_remove); | |
1551 | ||
dbcf929c DG |
1552 | #define HPT_RESIZE_TIMEOUT 10000 /* ms */ |
1553 | ||
1554 | struct hpt_resize_state { | |
1555 | unsigned long shift; | |
1556 | int commit_rc; | |
1557 | }; | |
1558 | ||
1559 | static int pseries_lpar_resize_hpt_commit(void *data) | |
1560 | { | |
1561 | struct hpt_resize_state *state = data; | |
1562 | ||
1563 | state->commit_rc = plpar_resize_hpt_commit(0, state->shift); | |
1564 | if (state->commit_rc != H_SUCCESS) | |
1565 | return -EIO; | |
1566 | ||
1567 | /* Hypervisor has transitioned the HTAB, update our globals */ | |
1568 | ppc64_pft_size = state->shift; | |
1569 | htab_size_bytes = 1UL << ppc64_pft_size; | |
1570 | htab_hash_mask = (htab_size_bytes >> 7) - 1; | |
1571 | ||
1572 | return 0; | |
1573 | } | |
1574 | ||
c784be43 GS |
1575 | /* |
1576 | * Must be called in process context. The caller must hold the | |
1577 | * cpus_lock. | |
1578 | */ | |
dbcf929c DG |
1579 | static int pseries_lpar_resize_hpt(unsigned long shift) |
1580 | { | |
1581 | struct hpt_resize_state state = { | |
1582 | .shift = shift, | |
1583 | .commit_rc = H_FUNCTION, | |
1584 | }; | |
1585 | unsigned int delay, total_delay = 0; | |
1586 | int rc; | |
1587 | ktime_t t0, t1, t2; | |
1588 | ||
1589 | might_sleep(); | |
1590 | ||
1591 | if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE)) | |
1592 | return -ENODEV; | |
1593 | ||
65471d76 | 1594 | pr_info("Attempting to resize HPT to shift %lu\n", shift); |
dbcf929c DG |
1595 | |
1596 | t0 = ktime_get(); | |
1597 | ||
1598 | rc = plpar_resize_hpt_prepare(0, shift); | |
1599 | while (H_IS_LONG_BUSY(rc)) { | |
1600 | delay = get_longbusy_msecs(rc); | |
1601 | total_delay += delay; | |
1602 | if (total_delay > HPT_RESIZE_TIMEOUT) { | |
1603 | /* prepare with shift==0 cancels an in-progress resize */ | |
1604 | rc = plpar_resize_hpt_prepare(0, 0); | |
1605 | if (rc != H_SUCCESS) | |
65471d76 | 1606 | pr_warn("Unexpected error %d cancelling timed out HPT resize\n", |
dbcf929c DG |
1607 | rc); |
1608 | return -ETIMEDOUT; | |
1609 | } | |
1610 | msleep(delay); | |
1611 | rc = plpar_resize_hpt_prepare(0, shift); | |
1612 | }; | |
1613 | ||
1614 | switch (rc) { | |
1615 | case H_SUCCESS: | |
1616 | /* Continue on */ | |
1617 | break; | |
1618 | ||
1619 | case H_PARAMETER: | |
f172acbf | 1620 | pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n"); |
dbcf929c DG |
1621 | return -EINVAL; |
1622 | case H_RESOURCE: | |
f172acbf | 1623 | pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n"); |
dbcf929c DG |
1624 | return -EPERM; |
1625 | default: | |
65471d76 | 1626 | pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc); |
dbcf929c DG |
1627 | return -EIO; |
1628 | } | |
1629 | ||
1630 | t1 = ktime_get(); | |
1631 | ||
c784be43 GS |
1632 | rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit, |
1633 | &state, NULL); | |
dbcf929c DG |
1634 | |
1635 | t2 = ktime_get(); | |
1636 | ||
1637 | if (rc != 0) { | |
1638 | switch (state.commit_rc) { | |
1639 | case H_PTEG_FULL: | |
dbcf929c DG |
1640 | return -ENOSPC; |
1641 | ||
1642 | default: | |
65471d76 AK |
1643 | pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n", |
1644 | state.commit_rc); | |
dbcf929c DG |
1645 | return -EIO; |
1646 | }; | |
1647 | } | |
1648 | ||
65471d76 AK |
1649 | pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n", |
1650 | shift, (long long) ktime_ms_delta(t1, t0), | |
1651 | (long long) ktime_ms_delta(t2, t1)); | |
dbcf929c DG |
1652 | |
1653 | return 0; | |
1654 | } | |
1655 | ||
cc3d2940 PM |
1656 | static int pseries_lpar_register_process_table(unsigned long base, |
1657 | unsigned long page_size, unsigned long table_size) | |
1658 | { | |
1659 | long rc; | |
dbfcf3cb | 1660 | unsigned long flags = 0; |
cc3d2940 | 1661 | |
dbfcf3cb PM |
1662 | if (table_size) |
1663 | flags |= PROC_TABLE_NEW; | |
cc3d2940 PM |
1664 | if (radix_enabled()) |
1665 | flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE; | |
dbfcf3cb PM |
1666 | else |
1667 | flags |= PROC_TABLE_HPT_SLB; | |
cc3d2940 PM |
1668 | for (;;) { |
1669 | rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base, | |
1670 | page_size, table_size); | |
1671 | if (!H_IS_LONG_BUSY(rc)) | |
1672 | break; | |
1673 | mdelay(get_longbusy_msecs(rc)); | |
1674 | } | |
1675 | if (rc != H_SUCCESS) { | |
1676 | pr_err("Failed to register process table (rc=%ld)\n", rc); | |
1677 | BUG(); | |
1678 | } | |
1679 | return rc; | |
1680 | } | |
1681 | ||
6364e84e | 1682 | void __init hpte_init_pseries(void) |
1da177e4 | 1683 | { |
7025776e BH |
1684 | mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate; |
1685 | mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp; | |
1686 | mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; | |
1687 | mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert; | |
1688 | mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove; | |
1689 | mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted; | |
1690 | mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; | |
5246adec | 1691 | mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; |
7025776e | 1692 | mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; |
8971e1c7 ME |
1693 | |
1694 | if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) | |
1695 | mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; | |
ed6546bd NP |
1696 | |
1697 | /* | |
1698 | * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall | |
1699 | * to inform the hypervisor that we wish to use the HPT. | |
1700 | */ | |
1701 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
1702 | pseries_lpar_register_process_table(0, 0, 0); | |
1da177e4 | 1703 | } |
14f966e7 | 1704 | |
cc3d2940 PM |
1705 | void radix_init_pseries(void) |
1706 | { | |
1707 | pr_info("Using radix MMU under hypervisor\n"); | |
ed6546bd NP |
1708 | |
1709 | pseries_lpar_register_process_table(__pa(process_tb), | |
1710 | 0, PRTB_SIZE_SHIFT - 12); | |
cc3d2940 PM |
1711 | } |
1712 | ||
14f966e7 RJ |
1713 | #ifdef CONFIG_PPC_SMLPAR |
1714 | #define CMO_FREE_HINT_DEFAULT 1 | |
1715 | static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT; | |
1716 | ||
1717 | static int __init cmo_free_hint(char *str) | |
1718 | { | |
1719 | char *parm; | |
1720 | parm = strstrip(str); | |
1721 | ||
1722 | if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) { | |
65471d76 | 1723 | pr_info("%s: CMO free page hinting is not active.\n", __func__); |
14f966e7 RJ |
1724 | cmo_free_hint_flag = 0; |
1725 | return 1; | |
1726 | } | |
1727 | ||
1728 | cmo_free_hint_flag = 1; | |
65471d76 | 1729 | pr_info("%s: CMO free page hinting is active.\n", __func__); |
14f966e7 RJ |
1730 | |
1731 | if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0) | |
1732 | return 1; | |
1733 | ||
1734 | return 0; | |
1735 | } | |
1736 | ||
1737 | __setup("cmo_free_hint=", cmo_free_hint); | |
1738 | ||
1739 | static void pSeries_set_page_state(struct page *page, int order, | |
1740 | unsigned long state) | |
1741 | { | |
1742 | int i, j; | |
1743 | unsigned long cmo_page_sz, addr; | |
1744 | ||
1745 | cmo_page_sz = cmo_get_page_size(); | |
1746 | addr = __pa((unsigned long)page_address(page)); | |
1747 | ||
1748 | for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) { | |
1749 | for (j = 0; j < PAGE_SIZE; j += cmo_page_sz) | |
1750 | plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0); | |
1751 | } | |
1752 | } | |
1753 | ||
1754 | void arch_free_page(struct page *page, int order) | |
1755 | { | |
d8c476ee AK |
1756 | if (radix_enabled()) |
1757 | return; | |
14f966e7 RJ |
1758 | if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO)) |
1759 | return; | |
1760 | ||
1761 | pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED); | |
1762 | } | |
1763 | EXPORT_SYMBOL(arch_free_page); | |
1764 | ||
d8c476ee | 1765 | #endif /* CONFIG_PPC_SMLPAR */ |
4e003747 | 1766 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
c8cd093a AB |
1767 | |
1768 | #ifdef CONFIG_TRACEPOINTS | |
e9666d10 | 1769 | #ifdef CONFIG_JUMP_LABEL |
cc1adb5f AB |
1770 | struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; |
1771 | ||
8cf868af | 1772 | int hcall_tracepoint_regfunc(void) |
cc1adb5f AB |
1773 | { |
1774 | static_key_slow_inc(&hcall_tracepoint_key); | |
8cf868af | 1775 | return 0; |
cc1adb5f AB |
1776 | } |
1777 | ||
1778 | void hcall_tracepoint_unregfunc(void) | |
1779 | { | |
1780 | static_key_slow_dec(&hcall_tracepoint_key); | |
1781 | } | |
1782 | #else | |
c8cd093a AB |
1783 | /* |
1784 | * We optimise our hcall path by placing hcall_tracepoint_refcount | |
1785 | * directly in the TOC so we can check if the hcall tracepoints are | |
1786 | * enabled via a single load. | |
1787 | */ | |
1788 | ||
1789 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | |
1790 | extern long hcall_tracepoint_refcount; | |
1791 | ||
8cf868af | 1792 | int hcall_tracepoint_regfunc(void) |
c8cd093a AB |
1793 | { |
1794 | hcall_tracepoint_refcount++; | |
8cf868af | 1795 | return 0; |
c8cd093a AB |
1796 | } |
1797 | ||
1798 | void hcall_tracepoint_unregfunc(void) | |
1799 | { | |
1800 | hcall_tracepoint_refcount--; | |
1801 | } | |
cc1adb5f AB |
1802 | #endif |
1803 | ||
1804 | /* | |
1805 | * Since the tracing code might execute hcalls we need to guard against | |
1806 | * recursion. One example of this are spinlocks calling H_YIELD on | |
1807 | * shared processor partitions. | |
1808 | */ | |
1809 | static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); | |
1810 | ||
c8cd093a | 1811 | |
6f26353c | 1812 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) |
c8cd093a | 1813 | { |
57cdfdf8 AB |
1814 | unsigned long flags; |
1815 | unsigned int *depth; | |
1816 | ||
a5ccfee0 AB |
1817 | /* |
1818 | * We cannot call tracepoints inside RCU idle regions which | |
1819 | * means we must not trace H_CEDE. | |
1820 | */ | |
1821 | if (opcode == H_CEDE) | |
1822 | return; | |
1823 | ||
57cdfdf8 AB |
1824 | local_irq_save(flags); |
1825 | ||
69111bac | 1826 | depth = this_cpu_ptr(&hcall_trace_depth); |
57cdfdf8 AB |
1827 | |
1828 | if (*depth) | |
1829 | goto out; | |
1830 | ||
1831 | (*depth)++; | |
e4f387d8 | 1832 | preempt_disable(); |
6f26353c | 1833 | trace_hcall_entry(opcode, args); |
57cdfdf8 AB |
1834 | (*depth)--; |
1835 | ||
1836 | out: | |
1837 | local_irq_restore(flags); | |
c8cd093a AB |
1838 | } |
1839 | ||
8f2133cc | 1840 | void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf) |
c8cd093a | 1841 | { |
57cdfdf8 AB |
1842 | unsigned long flags; |
1843 | unsigned int *depth; | |
1844 | ||
a5ccfee0 AB |
1845 | if (opcode == H_CEDE) |
1846 | return; | |
1847 | ||
57cdfdf8 AB |
1848 | local_irq_save(flags); |
1849 | ||
69111bac | 1850 | depth = this_cpu_ptr(&hcall_trace_depth); |
57cdfdf8 AB |
1851 | |
1852 | if (*depth) | |
1853 | goto out; | |
1854 | ||
1855 | (*depth)++; | |
6f26353c | 1856 | trace_hcall_exit(opcode, retval, retbuf); |
e4f387d8 | 1857 | preempt_enable(); |
57cdfdf8 AB |
1858 | (*depth)--; |
1859 | ||
1860 | out: | |
1861 | local_irq_restore(flags); | |
c8cd093a AB |
1862 | } |
1863 | #endif | |
9ee820fa BK |
1864 | |
1865 | /** | |
1866 | * h_get_mpp | |
1867 | * H_GET_MPP hcall returns info in 7 parms | |
1868 | */ | |
1869 | int h_get_mpp(struct hvcall_mpp_data *mpp_data) | |
1870 | { | |
1871 | int rc; | |
1872 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | |
1873 | ||
1874 | rc = plpar_hcall9(H_GET_MPP, retbuf); | |
1875 | ||
1876 | mpp_data->entitled_mem = retbuf[0]; | |
1877 | mpp_data->mapped_mem = retbuf[1]; | |
1878 | ||
1879 | mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; | |
1880 | mpp_data->pool_num = retbuf[2] & 0xffff; | |
1881 | ||
1882 | mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; | |
1883 | mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; | |
b0d436c7 | 1884 | mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL; |
9ee820fa BK |
1885 | |
1886 | mpp_data->pool_size = retbuf[4]; | |
1887 | mpp_data->loan_request = retbuf[5]; | |
1888 | mpp_data->backing_mem = retbuf[6]; | |
1889 | ||
1890 | return rc; | |
1891 | } | |
1892 | EXPORT_SYMBOL(h_get_mpp); | |
1893 | ||
1894 | int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) | |
1895 | { | |
1896 | int rc; | |
1897 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; | |
1898 | ||
1899 | rc = plpar_hcall9(H_GET_MPP_X, retbuf); | |
1900 | ||
1901 | mpp_x_data->coalesced_bytes = retbuf[0]; | |
1902 | mpp_x_data->pool_coalesced_bytes = retbuf[1]; | |
1903 | mpp_x_data->pool_purr_cycles = retbuf[2]; | |
1904 | mpp_x_data->pool_spurr_cycles = retbuf[3]; | |
1905 | ||
1906 | return rc; | |
1907 | } | |
82228e36 AK |
1908 | |
1909 | static unsigned long vsid_unscramble(unsigned long vsid, int ssize) | |
1910 | { | |
1911 | unsigned long protovsid; | |
1912 | unsigned long va_bits = VA_BITS; | |
1913 | unsigned long modinv, vsid_modulus; | |
1914 | unsigned long max_mod_inv, tmp_modinv; | |
1915 | ||
1916 | if (!mmu_has_feature(MMU_FTR_68_BIT_VA)) | |
1917 | va_bits = 65; | |
1918 | ||
1919 | if (ssize == MMU_SEGSIZE_256M) { | |
1920 | modinv = VSID_MULINV_256M; | |
1921 | vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1); | |
1922 | } else { | |
1923 | modinv = VSID_MULINV_1T; | |
1924 | vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1); | |
1925 | } | |
1926 | ||
1927 | /* | |
1928 | * vsid outside our range. | |
1929 | */ | |
1930 | if (vsid >= vsid_modulus) | |
1931 | return 0; | |
1932 | ||
1933 | /* | |
1934 | * If modinv is the modular multiplicate inverse of (x % vsid_modulus) | |
1935 | * and vsid = (protovsid * x) % vsid_modulus, then we say: | |
1936 | * protovsid = (vsid * modinv) % vsid_modulus | |
1937 | */ | |
1938 | ||
1939 | /* Check if (vsid * modinv) overflow (63 bits) */ | |
1940 | max_mod_inv = 0x7fffffffffffffffull / vsid; | |
1941 | if (modinv < max_mod_inv) | |
1942 | return (vsid * modinv) % vsid_modulus; | |
1943 | ||
1944 | tmp_modinv = modinv/max_mod_inv; | |
1945 | modinv %= max_mod_inv; | |
1946 | ||
1947 | protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus; | |
1948 | protovsid = (protovsid + vsid * modinv) % vsid_modulus; | |
1949 | ||
1950 | return protovsid; | |
1951 | } | |
1952 | ||
1953 | static int __init reserve_vrma_context_id(void) | |
1954 | { | |
1955 | unsigned long protovsid; | |
1956 | ||
1957 | /* | |
1958 | * Reserve context ids which map to reserved virtual addresses. For now | |
1959 | * we only reserve the context id which maps to the VRMA VSID. We ignore | |
1960 | * the addresses in "ibm,adjunct-virtual-addresses" because we don't | |
1961 | * enable adjunct support via the "ibm,client-architecture-support" | |
1962 | * interface. | |
1963 | */ | |
1964 | protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T); | |
1965 | hash__reserve_context_id(protovsid >> ESID_BITS_1T); | |
1966 | return 0; | |
1967 | } | |
1968 | machine_device_initcall(pseries, reserve_vrma_context_id); | |
c6c26fb5 AP |
1969 | |
1970 | #ifdef CONFIG_DEBUG_FS | |
1971 | /* debugfs file interface for vpa data */ | |
1972 | static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len, | |
1973 | loff_t *pos) | |
1974 | { | |
1975 | int cpu = (long)filp->private_data; | |
1976 | struct lppaca *lppaca = &lppaca_of(cpu); | |
1977 | ||
1978 | return simple_read_from_buffer(buf, len, pos, lppaca, | |
1979 | sizeof(struct lppaca)); | |
1980 | } | |
1981 | ||
1982 | static const struct file_operations vpa_fops = { | |
1983 | .open = simple_open, | |
1984 | .read = vpa_file_read, | |
1985 | .llseek = default_llseek, | |
1986 | }; | |
1987 | ||
1988 | static int __init vpa_debugfs_init(void) | |
1989 | { | |
1990 | char name[16]; | |
1991 | long i; | |
1992 | static struct dentry *vpa_dir; | |
1993 | ||
1994 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) | |
1995 | return 0; | |
1996 | ||
1997 | vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root); | |
1998 | if (!vpa_dir) { | |
1999 | pr_warn("%s: can't create vpa root dir\n", __func__); | |
2000 | return -ENOMEM; | |
2001 | } | |
2002 | ||
2003 | /* set up the per-cpu vpa file*/ | |
2004 | for_each_possible_cpu(i) { | |
2005 | struct dentry *d; | |
2006 | ||
2007 | sprintf(name, "cpu-%ld", i); | |
2008 | ||
2009 | d = debugfs_create_file(name, 0400, vpa_dir, (void *)i, | |
2010 | &vpa_fops); | |
2011 | if (!d) { | |
2012 | pr_warn("%s: can't create per-cpu vpa file\n", | |
2013 | __func__); | |
2014 | return -ENOMEM; | |
2015 | } | |
2016 | } | |
2017 | ||
2018 | return 0; | |
2019 | } | |
2020 | machine_arch_initcall(pseries, vpa_debugfs_init); | |
2021 | #endif /* CONFIG_DEBUG_FS */ |