Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * pSeries_lpar.c | |
3 | * Copyright (C) 2001 Todd Inglett, IBM Corporation | |
4 | * | |
5 | * pSeries LPAR support. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
20 | */ | |
21 | ||
f7ebf352 ME |
22 | /* Enables debugging of low-level hash table routines - careful! */ |
23 | #undef DEBUG | |
65471d76 | 24 | #define pr_fmt(fmt) "lpar: " fmt |
1da177e4 | 25 | |
1da177e4 LT |
26 | #include <linux/kernel.h> |
27 | #include <linux/dma-mapping.h> | |
463ce0e1 | 28 | #include <linux/console.h> |
66b15db6 | 29 | #include <linux/export.h> |
58995a9a | 30 | #include <linux/jump_label.h> |
dbcf929c DG |
31 | #include <linux/delay.h> |
32 | #include <linux/stop_machine.h> | |
d62c8dee NR |
33 | #include <linux/spinlock.h> |
34 | #include <linux/cpuhotplug.h> | |
35 | #include <linux/workqueue.h> | |
36 | #include <linux/proc_fs.h> | |
1da177e4 LT |
37 | #include <asm/processor.h> |
38 | #include <asm/mmu.h> | |
39 | #include <asm/page.h> | |
40 | #include <asm/pgtable.h> | |
41 | #include <asm/machdep.h> | |
1da177e4 | 42 | #include <asm/mmu_context.h> |
1da177e4 | 43 | #include <asm/iommu.h> |
1da177e4 LT |
44 | #include <asm/tlb.h> |
45 | #include <asm/prom.h> | |
1da177e4 | 46 | #include <asm/cputable.h> |
dcad47fc | 47 | #include <asm/udbg.h> |
2249ca9d | 48 | #include <asm/smp.h> |
c8cd093a | 49 | #include <asm/trace.h> |
f5339277 | 50 | #include <asm/firmware.h> |
212bebb4 | 51 | #include <asm/plpar_wrappers.h> |
c1caae3d | 52 | #include <asm/kexec.h> |
408cddd9 | 53 | #include <asm/fadump.h> |
42f5b4ca | 54 | #include <asm/asm-prototypes.h> |
c6c26fb5 | 55 | #include <asm/debugfs.h> |
a1218720 | 56 | |
21cf9133 | 57 | #include "pseries.h" |
1da177e4 | 58 | |
1a527286 AK |
59 | /* Flag bits for H_BULK_REMOVE */ |
60 | #define HBR_REQUEST 0x4000000000000000UL | |
61 | #define HBR_RESPONSE 0x8000000000000000UL | |
62 | #define HBR_END 0xc000000000000000UL | |
63 | #define HBR_AVPN 0x0200000000000000UL | |
64 | #define HBR_ANDCOND 0x0100000000000000UL | |
65 | ||
1da177e4 | 66 | |
b9377ffc | 67 | /* in hvCall.S */ |
1da177e4 | 68 | EXPORT_SYMBOL(plpar_hcall); |
b9377ffc | 69 | EXPORT_SYMBOL(plpar_hcall9); |
1da177e4 | 70 | EXPORT_SYMBOL(plpar_hcall_norets); |
b9377ffc | 71 | |
d62c8dee NR |
72 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
73 | static u8 dtl_mask = DTL_LOG_PREEMPT; | |
74 | #else | |
75 | static u8 dtl_mask; | |
76 | #endif | |
77 | ||
1c85a2a1 NR |
78 | void alloc_dtl_buffers(void) |
79 | { | |
80 | int cpu; | |
81 | struct paca_struct *pp; | |
82 | struct dtl_entry *dtl; | |
83 | ||
84 | for_each_possible_cpu(cpu) { | |
85 | pp = paca_ptrs[cpu]; | |
d62c8dee NR |
86 | if (pp->dispatch_log) |
87 | continue; | |
1c85a2a1 NR |
88 | dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); |
89 | if (!dtl) { | |
90 | pr_warn("Failed to allocate dispatch trace log for cpu %d\n", | |
91 | cpu); | |
d62c8dee | 92 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
1c85a2a1 | 93 | pr_warn("Stolen time statistics will be unreliable\n"); |
d62c8dee | 94 | #endif |
1c85a2a1 NR |
95 | break; |
96 | } | |
97 | ||
98 | pp->dtl_ridx = 0; | |
99 | pp->dispatch_log = dtl; | |
100 | pp->dispatch_log_end = dtl + N_DISPATCH_LOG; | |
101 | pp->dtl_curr = dtl; | |
102 | } | |
103 | } | |
104 | ||
105 | void register_dtl_buffer(int cpu) | |
106 | { | |
107 | long ret; | |
108 | struct paca_struct *pp; | |
109 | struct dtl_entry *dtl; | |
110 | int hwcpu = get_hard_smp_processor_id(cpu); | |
111 | ||
112 | pp = paca_ptrs[cpu]; | |
113 | dtl = pp->dispatch_log; | |
d62c8dee | 114 | if (dtl && dtl_mask) { |
1c85a2a1 NR |
115 | pp->dtl_ridx = 0; |
116 | pp->dtl_curr = dtl; | |
117 | lppaca_of(cpu).dtl_idx = 0; | |
118 | ||
119 | /* hypervisor reads buffer length from this field */ | |
120 | dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); | |
121 | ret = register_dtl(hwcpu, __pa(dtl)); | |
122 | if (ret) | |
123 | pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n", | |
124 | cpu, hwcpu, ret); | |
125 | ||
d62c8dee | 126 | lppaca_of(cpu).dtl_enable_mask = dtl_mask; |
1c85a2a1 NR |
127 | } |
128 | } | |
129 | ||
06220d78 | 130 | #ifdef CONFIG_PPC_SPLPAR |
d62c8dee NR |
131 | struct dtl_worker { |
132 | struct delayed_work work; | |
133 | int cpu; | |
134 | }; | |
135 | ||
136 | struct vcpu_dispatch_data { | |
137 | int last_disp_cpu; | |
138 | ||
139 | int total_disp; | |
140 | ||
141 | int same_cpu_disp; | |
142 | int same_chip_disp; | |
143 | int diff_chip_disp; | |
144 | int far_chip_disp; | |
145 | ||
146 | int numa_home_disp; | |
147 | int numa_remote_disp; | |
148 | int numa_far_disp; | |
149 | }; | |
150 | ||
151 | /* | |
152 | * This represents the number of cpus in the hypervisor. Since there is no | |
153 | * architected way to discover the number of processors in the host, we | |
154 | * provision for dealing with NR_CPUS. This is currently 2048 by default, and | |
155 | * is sufficient for our purposes. This will need to be tweaked if | |
156 | * CONFIG_NR_CPUS is changed. | |
157 | */ | |
158 | #define NR_CPUS_H NR_CPUS | |
159 | ||
06220d78 | 160 | DEFINE_RWLOCK(dtl_access_lock); |
d62c8dee NR |
161 | static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data); |
162 | static DEFINE_PER_CPU(u64, dtl_entry_ridx); | |
163 | static DEFINE_PER_CPU(struct dtl_worker, dtl_workers); | |
164 | static enum cpuhp_state dtl_worker_state; | |
165 | static DEFINE_MUTEX(dtl_enable_mutex); | |
166 | static int vcpudispatch_stats_on __read_mostly; | |
167 | static int vcpudispatch_stats_freq = 50; | |
168 | static __be32 *vcpu_associativity, *pcpu_associativity; | |
169 | ||
170 | ||
171 | static void free_dtl_buffers(void) | |
172 | { | |
173 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
174 | int cpu; | |
175 | struct paca_struct *pp; | |
176 | ||
177 | for_each_possible_cpu(cpu) { | |
178 | pp = paca_ptrs[cpu]; | |
179 | if (!pp->dispatch_log) | |
180 | continue; | |
181 | kmem_cache_free(dtl_cache, pp->dispatch_log); | |
182 | pp->dtl_ridx = 0; | |
183 | pp->dispatch_log = 0; | |
184 | pp->dispatch_log_end = 0; | |
185 | pp->dtl_curr = 0; | |
186 | } | |
187 | #endif | |
188 | } | |
189 | ||
190 | static int init_cpu_associativity(void) | |
191 | { | |
192 | vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core, | |
193 | VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL); | |
194 | pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core, | |
195 | VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL); | |
196 | ||
197 | if (!vcpu_associativity || !pcpu_associativity) { | |
198 | pr_err("error allocating memory for associativity information\n"); | |
199 | return -ENOMEM; | |
200 | } | |
201 | ||
202 | return 0; | |
203 | } | |
204 | ||
205 | static void destroy_cpu_associativity(void) | |
206 | { | |
207 | kfree(vcpu_associativity); | |
208 | kfree(pcpu_associativity); | |
209 | vcpu_associativity = pcpu_associativity = 0; | |
210 | } | |
211 | ||
212 | static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag) | |
213 | { | |
214 | __be32 *assoc; | |
215 | int rc = 0; | |
216 | ||
217 | assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE]; | |
218 | if (!assoc[0]) { | |
219 | rc = hcall_vphn(cpu, flag, &assoc[0]); | |
220 | if (rc) | |
221 | return NULL; | |
222 | } | |
223 | ||
224 | return assoc; | |
225 | } | |
226 | ||
227 | static __be32 *get_pcpu_associativity(int cpu) | |
228 | { | |
229 | return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU); | |
230 | } | |
231 | ||
232 | static __be32 *get_vcpu_associativity(int cpu) | |
233 | { | |
234 | return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU); | |
235 | } | |
236 | ||
237 | static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu) | |
238 | { | |
239 | __be32 *last_disp_cpu_assoc, *cur_disp_cpu_assoc; | |
240 | ||
241 | if (last_disp_cpu >= NR_CPUS_H || cur_disp_cpu >= NR_CPUS_H) | |
242 | return -EINVAL; | |
243 | ||
244 | last_disp_cpu_assoc = get_pcpu_associativity(last_disp_cpu); | |
245 | cur_disp_cpu_assoc = get_pcpu_associativity(cur_disp_cpu); | |
246 | ||
247 | if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc) | |
248 | return -EIO; | |
249 | ||
250 | return cpu_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc); | |
251 | } | |
252 | ||
253 | static int cpu_home_node_dispatch_distance(int disp_cpu) | |
254 | { | |
255 | __be32 *disp_cpu_assoc, *vcpu_assoc; | |
256 | int vcpu_id = smp_processor_id(); | |
257 | ||
258 | if (disp_cpu >= NR_CPUS_H) { | |
259 | pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n", | |
260 | disp_cpu, NR_CPUS_H); | |
261 | return -EINVAL; | |
262 | } | |
263 | ||
264 | disp_cpu_assoc = get_pcpu_associativity(disp_cpu); | |
265 | vcpu_assoc = get_vcpu_associativity(vcpu_id); | |
266 | ||
267 | if (!disp_cpu_assoc || !vcpu_assoc) | |
268 | return -EIO; | |
269 | ||
270 | return cpu_distance(disp_cpu_assoc, vcpu_assoc); | |
271 | } | |
272 | ||
273 | static void update_vcpu_disp_stat(int disp_cpu) | |
274 | { | |
275 | struct vcpu_dispatch_data *disp; | |
276 | int distance; | |
277 | ||
278 | disp = this_cpu_ptr(&vcpu_disp_data); | |
279 | if (disp->last_disp_cpu == -1) { | |
280 | disp->last_disp_cpu = disp_cpu; | |
281 | return; | |
282 | } | |
283 | ||
284 | disp->total_disp++; | |
285 | ||
286 | if (disp->last_disp_cpu == disp_cpu || | |
287 | (cpu_first_thread_sibling(disp->last_disp_cpu) == | |
288 | cpu_first_thread_sibling(disp_cpu))) | |
289 | disp->same_cpu_disp++; | |
290 | else { | |
291 | distance = cpu_relative_dispatch_distance(disp->last_disp_cpu, | |
292 | disp_cpu); | |
293 | if (distance < 0) | |
294 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n", | |
295 | smp_processor_id()); | |
296 | else { | |
297 | switch (distance) { | |
298 | case 0: | |
299 | disp->same_chip_disp++; | |
300 | break; | |
301 | case 1: | |
302 | disp->diff_chip_disp++; | |
303 | break; | |
304 | case 2: | |
305 | disp->far_chip_disp++; | |
306 | break; | |
307 | default: | |
308 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n", | |
309 | smp_processor_id(), | |
310 | disp->last_disp_cpu, | |
311 | disp_cpu, | |
312 | distance); | |
313 | } | |
314 | } | |
315 | } | |
316 | ||
317 | distance = cpu_home_node_dispatch_distance(disp_cpu); | |
318 | if (distance < 0) | |
319 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n", | |
320 | smp_processor_id()); | |
321 | else { | |
322 | switch (distance) { | |
323 | case 0: | |
324 | disp->numa_home_disp++; | |
325 | break; | |
326 | case 1: | |
327 | disp->numa_remote_disp++; | |
328 | break; | |
329 | case 2: | |
330 | disp->numa_far_disp++; | |
331 | break; | |
332 | default: | |
333 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n", | |
334 | smp_processor_id(), | |
335 | disp_cpu, | |
336 | distance); | |
337 | } | |
338 | } | |
339 | ||
340 | disp->last_disp_cpu = disp_cpu; | |
341 | } | |
342 | ||
343 | static void process_dtl_buffer(struct work_struct *work) | |
344 | { | |
345 | struct dtl_entry dtle; | |
346 | u64 i = __this_cpu_read(dtl_entry_ridx); | |
347 | struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | |
348 | struct dtl_entry *dtl_end = local_paca->dispatch_log_end; | |
349 | struct lppaca *vpa = local_paca->lppaca_ptr; | |
350 | struct dtl_worker *d = container_of(work, struct dtl_worker, work.work); | |
351 | ||
352 | if (!local_paca->dispatch_log) | |
353 | return; | |
354 | ||
355 | /* if we have been migrated away, we cancel ourself */ | |
356 | if (d->cpu != smp_processor_id()) { | |
357 | pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n", | |
358 | smp_processor_id()); | |
359 | return; | |
360 | } | |
361 | ||
362 | if (i == be64_to_cpu(vpa->dtl_idx)) | |
363 | goto out; | |
364 | ||
365 | while (i < be64_to_cpu(vpa->dtl_idx)) { | |
366 | dtle = *dtl; | |
367 | barrier(); | |
368 | if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { | |
369 | /* buffer has overflowed */ | |
370 | pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n", | |
371 | d->cpu, | |
372 | be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG - i); | |
373 | i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; | |
374 | dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | |
375 | continue; | |
376 | } | |
377 | update_vcpu_disp_stat(be16_to_cpu(dtle.processor_id)); | |
378 | ++i; | |
379 | ++dtl; | |
380 | if (dtl == dtl_end) | |
381 | dtl = local_paca->dispatch_log; | |
382 | } | |
383 | ||
384 | __this_cpu_write(dtl_entry_ridx, i); | |
385 | ||
386 | out: | |
387 | schedule_delayed_work_on(d->cpu, to_delayed_work(work), | |
388 | HZ / vcpudispatch_stats_freq); | |
389 | } | |
390 | ||
391 | static int dtl_worker_online(unsigned int cpu) | |
392 | { | |
393 | struct dtl_worker *d = &per_cpu(dtl_workers, cpu); | |
394 | ||
395 | memset(d, 0, sizeof(*d)); | |
396 | INIT_DELAYED_WORK(&d->work, process_dtl_buffer); | |
397 | d->cpu = cpu; | |
398 | ||
399 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
400 | per_cpu(dtl_entry_ridx, cpu) = 0; | |
401 | register_dtl_buffer(cpu); | |
402 | #else | |
403 | per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx); | |
404 | #endif | |
405 | ||
406 | schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq); | |
407 | return 0; | |
408 | } | |
409 | ||
410 | static int dtl_worker_offline(unsigned int cpu) | |
411 | { | |
412 | struct dtl_worker *d = &per_cpu(dtl_workers, cpu); | |
413 | ||
414 | cancel_delayed_work_sync(&d->work); | |
415 | ||
416 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
417 | unregister_dtl(get_hard_smp_processor_id(cpu)); | |
418 | #endif | |
419 | ||
420 | return 0; | |
421 | } | |
422 | ||
423 | static void set_global_dtl_mask(u8 mask) | |
424 | { | |
425 | int cpu; | |
426 | ||
427 | dtl_mask = mask; | |
428 | for_each_present_cpu(cpu) | |
429 | lppaca_of(cpu).dtl_enable_mask = dtl_mask; | |
430 | } | |
431 | ||
432 | static void reset_global_dtl_mask(void) | |
433 | { | |
434 | int cpu; | |
435 | ||
436 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
437 | dtl_mask = DTL_LOG_PREEMPT; | |
438 | #else | |
439 | dtl_mask = 0; | |
440 | #endif | |
441 | for_each_present_cpu(cpu) | |
442 | lppaca_of(cpu).dtl_enable_mask = dtl_mask; | |
443 | } | |
444 | ||
445 | static int dtl_worker_enable(void) | |
446 | { | |
447 | int rc = 0, state; | |
448 | ||
449 | if (!write_trylock(&dtl_access_lock)) { | |
450 | rc = -EBUSY; | |
451 | goto out; | |
452 | } | |
453 | ||
454 | set_global_dtl_mask(DTL_LOG_ALL); | |
455 | ||
456 | /* Setup dtl buffers and register those */ | |
457 | alloc_dtl_buffers(); | |
458 | ||
459 | state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online", | |
460 | dtl_worker_online, dtl_worker_offline); | |
461 | if (state < 0) { | |
462 | pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n"); | |
463 | free_dtl_buffers(); | |
464 | reset_global_dtl_mask(); | |
465 | write_unlock(&dtl_access_lock); | |
466 | rc = -EINVAL; | |
467 | goto out; | |
468 | } | |
469 | dtl_worker_state = state; | |
470 | ||
471 | out: | |
472 | return rc; | |
473 | } | |
474 | ||
475 | static void dtl_worker_disable(void) | |
476 | { | |
477 | cpuhp_remove_state(dtl_worker_state); | |
478 | free_dtl_buffers(); | |
479 | reset_global_dtl_mask(); | |
480 | write_unlock(&dtl_access_lock); | |
481 | } | |
482 | ||
483 | static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, | |
484 | size_t count, loff_t *ppos) | |
485 | { | |
486 | struct vcpu_dispatch_data *disp; | |
487 | int rc, cmd, cpu; | |
488 | char buf[16]; | |
489 | ||
490 | if (count > 15) | |
491 | return -EINVAL; | |
492 | ||
493 | if (copy_from_user(buf, p, count)) | |
494 | return -EFAULT; | |
495 | ||
496 | buf[count] = 0; | |
497 | rc = kstrtoint(buf, 0, &cmd); | |
498 | if (rc || cmd < 0 || cmd > 1) { | |
499 | pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n"); | |
500 | return rc ? rc : -EINVAL; | |
501 | } | |
502 | ||
503 | mutex_lock(&dtl_enable_mutex); | |
504 | ||
505 | if ((cmd == 0 && !vcpudispatch_stats_on) || | |
506 | (cmd == 1 && vcpudispatch_stats_on)) | |
507 | goto out; | |
508 | ||
509 | if (cmd) { | |
510 | rc = init_cpu_associativity(); | |
511 | if (rc) | |
512 | goto out; | |
513 | ||
514 | for_each_possible_cpu(cpu) { | |
515 | disp = per_cpu_ptr(&vcpu_disp_data, cpu); | |
516 | memset(disp, 0, sizeof(*disp)); | |
517 | disp->last_disp_cpu = -1; | |
518 | } | |
519 | ||
520 | rc = dtl_worker_enable(); | |
521 | if (rc) { | |
522 | destroy_cpu_associativity(); | |
523 | goto out; | |
524 | } | |
525 | } else { | |
526 | dtl_worker_disable(); | |
527 | destroy_cpu_associativity(); | |
528 | } | |
529 | ||
530 | vcpudispatch_stats_on = cmd; | |
531 | ||
532 | out: | |
533 | mutex_unlock(&dtl_enable_mutex); | |
534 | if (rc) | |
535 | return rc; | |
536 | return count; | |
537 | } | |
538 | ||
539 | static int vcpudispatch_stats_display(struct seq_file *p, void *v) | |
540 | { | |
541 | int cpu; | |
542 | struct vcpu_dispatch_data *disp; | |
543 | ||
544 | if (!vcpudispatch_stats_on) { | |
545 | seq_puts(p, "off\n"); | |
546 | return 0; | |
547 | } | |
548 | ||
549 | for_each_online_cpu(cpu) { | |
550 | disp = per_cpu_ptr(&vcpu_disp_data, cpu); | |
551 | seq_printf(p, "cpu%d", cpu); | |
552 | seq_put_decimal_ull(p, " ", disp->total_disp); | |
553 | seq_put_decimal_ull(p, " ", disp->same_cpu_disp); | |
554 | seq_put_decimal_ull(p, " ", disp->same_chip_disp); | |
555 | seq_put_decimal_ull(p, " ", disp->diff_chip_disp); | |
556 | seq_put_decimal_ull(p, " ", disp->far_chip_disp); | |
557 | seq_put_decimal_ull(p, " ", disp->numa_home_disp); | |
558 | seq_put_decimal_ull(p, " ", disp->numa_remote_disp); | |
559 | seq_put_decimal_ull(p, " ", disp->numa_far_disp); | |
560 | seq_puts(p, "\n"); | |
561 | } | |
562 | ||
563 | return 0; | |
564 | } | |
565 | ||
566 | static int vcpudispatch_stats_open(struct inode *inode, struct file *file) | |
567 | { | |
568 | return single_open(file, vcpudispatch_stats_display, NULL); | |
569 | } | |
570 | ||
571 | static const struct file_operations vcpudispatch_stats_proc_ops = { | |
572 | .open = vcpudispatch_stats_open, | |
573 | .read = seq_read, | |
574 | .write = vcpudispatch_stats_write, | |
575 | .llseek = seq_lseek, | |
576 | .release = single_release, | |
577 | }; | |
578 | ||
579 | static ssize_t vcpudispatch_stats_freq_write(struct file *file, | |
580 | const char __user *p, size_t count, loff_t *ppos) | |
581 | { | |
582 | int rc, freq; | |
583 | char buf[16]; | |
584 | ||
585 | if (count > 15) | |
586 | return -EINVAL; | |
587 | ||
588 | if (copy_from_user(buf, p, count)) | |
589 | return -EFAULT; | |
590 | ||
591 | buf[count] = 0; | |
592 | rc = kstrtoint(buf, 0, &freq); | |
593 | if (rc || freq < 1 || freq > HZ) { | |
594 | pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n", | |
595 | HZ); | |
596 | return rc ? rc : -EINVAL; | |
597 | } | |
598 | ||
599 | vcpudispatch_stats_freq = freq; | |
600 | ||
601 | return count; | |
602 | } | |
603 | ||
604 | static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v) | |
605 | { | |
606 | seq_printf(p, "%d\n", vcpudispatch_stats_freq); | |
607 | return 0; | |
608 | } | |
609 | ||
610 | static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file) | |
611 | { | |
612 | return single_open(file, vcpudispatch_stats_freq_display, NULL); | |
613 | } | |
614 | ||
615 | static const struct file_operations vcpudispatch_stats_freq_proc_ops = { | |
616 | .open = vcpudispatch_stats_freq_open, | |
617 | .read = seq_read, | |
618 | .write = vcpudispatch_stats_freq_write, | |
619 | .llseek = seq_lseek, | |
620 | .release = single_release, | |
621 | }; | |
622 | ||
623 | static int __init vcpudispatch_stats_procfs_init(void) | |
624 | { | |
625 | if (!lppaca_shared_proc(get_lppaca())) | |
626 | return 0; | |
627 | ||
628 | if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL, | |
629 | &vcpudispatch_stats_proc_ops)) | |
630 | pr_err("vcpudispatch_stats: error creating procfs file\n"); | |
631 | else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL, | |
632 | &vcpudispatch_stats_freq_proc_ops)) | |
633 | pr_err("vcpudispatch_stats_freq: error creating procfs file\n"); | |
634 | ||
635 | return 0; | |
636 | } | |
637 | ||
638 | machine_device_initcall(pseries, vcpudispatch_stats_procfs_init); | |
06220d78 NR |
639 | #endif /* CONFIG_PPC_SPLPAR */ |
640 | ||
1da177e4 LT |
641 | void vpa_init(int cpu) |
642 | { | |
643 | int hwcpu = get_hard_smp_processor_id(cpu); | |
2f6093c8 | 644 | unsigned long addr; |
1da177e4 | 645 | long ret; |
233ccd0d | 646 | |
b89bdfb8 ME |
647 | /* |
648 | * The spec says it "may be problematic" if CPU x registers the VPA of | |
649 | * CPU y. We should never do that, but wail if we ever do. | |
650 | */ | |
651 | WARN_ON(cpu != smp_processor_id()); | |
652 | ||
233ccd0d | 653 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
8154c5d2 | 654 | lppaca_of(cpu).vmxregs_in_use = 1; |
233ccd0d | 655 | |
6e0b8bc9 ME |
656 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
657 | lppaca_of(cpu).ebb_regs_in_use = 1; | |
658 | ||
8154c5d2 | 659 | addr = __pa(&lppaca_of(cpu)); |
2f6093c8 | 660 | ret = register_vpa(hwcpu, addr); |
1da177e4 | 661 | |
2f6093c8 | 662 | if (ret) { |
711ef84e AB |
663 | pr_err("WARNING: VPA registration for cpu %d (hw %d) of area " |
664 | "%lx failed with %ld\n", cpu, hwcpu, addr, ret); | |
2f6093c8 MN |
665 | return; |
666 | } | |
d8c476ee | 667 | |
4e003747 | 668 | #ifdef CONFIG_PPC_BOOK3S_64 |
2f6093c8 MN |
669 | /* |
670 | * PAPR says this feature is SLB-Buffer but firmware never | |
671 | * reports that. All SPLPAR support SLB shadow buffer. | |
672 | */ | |
d8c476ee | 673 | if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) { |
d2e60075 | 674 | addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr); |
2f6093c8 MN |
675 | ret = register_slb_shadow(hwcpu, addr); |
676 | if (ret) | |
711ef84e AB |
677 | pr_err("WARNING: SLB shadow buffer registration for " |
678 | "cpu %d (hw %d) of area %lx failed with %ld\n", | |
679 | cpu, hwcpu, addr, ret); | |
2f6093c8 | 680 | } |
4e003747 | 681 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
cf9efce0 PM |
682 | |
683 | /* | |
684 | * Register dispatch trace log, if one has been allocated. | |
685 | */ | |
1c85a2a1 | 686 | register_dtl_buffer(cpu); |
1da177e4 LT |
687 | } |
688 | ||
4e003747 | 689 | #ifdef CONFIG_PPC_BOOK3S_64 |
d8c476ee | 690 | |
035223fb | 691 | static long pSeries_lpar_hpte_insert(unsigned long hpte_group, |
5524a27d AK |
692 | unsigned long vpn, unsigned long pa, |
693 | unsigned long rflags, unsigned long vflags, | |
b1022fbd | 694 | int psize, int apsize, int ssize) |
1da177e4 | 695 | { |
1da177e4 LT |
696 | unsigned long lpar_rc; |
697 | unsigned long flags; | |
698 | unsigned long slot; | |
96e28449 | 699 | unsigned long hpte_v, hpte_r; |
1da177e4 | 700 | |
3c726f8d | 701 | if (!(vflags & HPTE_V_BOLTED)) |
5524a27d AK |
702 | pr_devel("hpte_insert(group=%lx, vpn=%016lx, " |
703 | "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", | |
704 | hpte_group, vpn, pa, rflags, vflags, psize); | |
3c726f8d | 705 | |
b1022fbd | 706 | hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; |
6b243fcf | 707 | hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; |
3c726f8d BH |
708 | |
709 | if (!(vflags & HPTE_V_BOLTED)) | |
551a232c | 710 | pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); |
3c726f8d | 711 | |
1da177e4 LT |
712 | /* Now fill in the actual HPTE */ |
713 | /* Set CEC cookie to 0 */ | |
714 | /* Zero page = 0 */ | |
715 | /* I-cache Invalidate = 0 */ | |
716 | /* I-cache synchronize = 0 */ | |
717 | /* Exact = 0 */ | |
718 | flags = 0; | |
719 | ||
9ee820fa BK |
720 | if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) |
721 | flags |= H_COALESCE_CAND; | |
1da177e4 | 722 | |
b9377ffc | 723 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); |
706c8c93 | 724 | if (unlikely(lpar_rc == H_PTEG_FULL)) { |
ca42d8d2 | 725 | pr_devel("Hash table group is full\n"); |
1da177e4 | 726 | return -1; |
3c726f8d | 727 | } |
1da177e4 LT |
728 | |
729 | /* | |
730 | * Since we try and ioremap PHBs we don't own, the pte insert | |
731 | * will fail. However we must catch the failure in hash_page | |
732 | * or we will loop forever, so return -2 in this case. | |
733 | */ | |
706c8c93 | 734 | if (unlikely(lpar_rc != H_SUCCESS)) { |
ca42d8d2 | 735 | pr_err("Failed hash pte insert with error %ld\n", lpar_rc); |
1da177e4 | 736 | return -2; |
3c726f8d BH |
737 | } |
738 | if (!(vflags & HPTE_V_BOLTED)) | |
551a232c | 739 | pr_devel(" -> slot: %lu\n", slot & 7); |
1da177e4 LT |
740 | |
741 | /* Because of iSeries, we have to pass down the secondary | |
742 | * bucket bit here as well | |
743 | */ | |
96e28449 | 744 | return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3); |
1da177e4 LT |
745 | } |
746 | ||
747 | static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); | |
748 | ||
749 | static long pSeries_lpar_hpte_remove(unsigned long hpte_group) | |
750 | { | |
751 | unsigned long slot_offset; | |
752 | unsigned long lpar_rc; | |
753 | int i; | |
754 | unsigned long dummy1, dummy2; | |
755 | ||
756 | /* pick a random slot to start at */ | |
757 | slot_offset = mftb() & 0x7; | |
758 | ||
759 | for (i = 0; i < HPTES_PER_GROUP; i++) { | |
760 | ||
761 | /* don't remove a bolted entry */ | |
762 | lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, | |
763 | (0x1UL << 4), &dummy1, &dummy2); | |
706c8c93 | 764 | if (lpar_rc == H_SUCCESS) |
1da177e4 | 765 | return i; |
9fb26401 MW |
766 | |
767 | /* | |
768 | * The test for adjunct partition is performed before the | |
769 | * ANDCOND test. H_RESOURCE may be returned, so we need to | |
770 | * check for that as well. | |
771 | */ | |
772 | BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); | |
1da177e4 LT |
773 | |
774 | slot_offset++; | |
775 | slot_offset &= 0x7; | |
776 | } | |
777 | ||
778 | return -1; | |
779 | } | |
780 | ||
5246adec | 781 | static void manual_hpte_clear_all(void) |
1da177e4 LT |
782 | { |
783 | unsigned long size_bytes = 1UL << ppc64_pft_size; | |
784 | unsigned long hpte_count = size_bytes >> 4; | |
d504bed6 MN |
785 | struct { |
786 | unsigned long pteh; | |
787 | unsigned long ptel; | |
788 | } ptes[4]; | |
b7abc5c5 | 789 | long lpar_rc; |
bed9a315 | 790 | unsigned long i, j; |
d504bed6 MN |
791 | |
792 | /* Read in batches of 4, | |
793 | * invalidate only valid entries not in the VRMA | |
794 | * hpte_count will be a multiple of 4 | |
795 | */ | |
796 | for (i = 0; i < hpte_count; i += 4) { | |
797 | lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); | |
ca42d8d2 AK |
798 | if (lpar_rc != H_SUCCESS) { |
799 | pr_info("Failed to read hash page table at %ld err %ld\n", | |
800 | i, lpar_rc); | |
d504bed6 | 801 | continue; |
ca42d8d2 | 802 | } |
d504bed6 MN |
803 | for (j = 0; j < 4; j++){ |
804 | if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == | |
805 | HPTE_V_VRMA_MASK) | |
806 | continue; | |
807 | if (ptes[j].pteh & HPTE_V_VALID) | |
808 | plpar_pte_remove_raw(0, i + j, 0, | |
809 | &(ptes[j].pteh), &(ptes[j].ptel)); | |
b7abc5c5 SS |
810 | } |
811 | } | |
5246adec AB |
812 | } |
813 | ||
814 | static int hcall_hpte_clear_all(void) | |
815 | { | |
816 | int rc; | |
817 | ||
818 | do { | |
819 | rc = plpar_hcall_norets(H_CLEAR_HPT); | |
820 | } while (rc == H_CONTINUE); | |
821 | ||
822 | return rc; | |
823 | } | |
824 | ||
825 | static void pseries_hpte_clear_all(void) | |
826 | { | |
827 | int rc; | |
828 | ||
829 | rc = hcall_hpte_clear_all(); | |
830 | if (rc != H_SUCCESS) | |
831 | manual_hpte_clear_all(); | |
e844b1ee AB |
832 | |
833 | #ifdef __LITTLE_ENDIAN__ | |
408cddd9 HB |
834 | /* |
835 | * Reset exceptions to big endian. | |
836 | * | |
837 | * FIXME this is a hack for kexec, we need to reset the exception | |
838 | * endian before starting the new kernel and this is a convenient place | |
839 | * to do it. | |
840 | * | |
841 | * This is also called on boot when a fadump happens. In that case we | |
842 | * must not change the exception endian mode. | |
843 | */ | |
d3cbff1b BH |
844 | if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) |
845 | pseries_big_endian_exceptions(); | |
e844b1ee | 846 | #endif |
1da177e4 LT |
847 | } |
848 | ||
849 | /* | |
850 | * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and | |
851 | * the low 3 bits of flags happen to line up. So no transform is needed. | |
852 | * We can probably optimize here and assume the high bits of newpp are | |
853 | * already zero. For now I am paranoid. | |
854 | */ | |
3c726f8d BH |
855 | static long pSeries_lpar_hpte_updatepp(unsigned long slot, |
856 | unsigned long newpp, | |
5524a27d | 857 | unsigned long vpn, |
db3d8534 | 858 | int psize, int apsize, |
aefa5688 | 859 | int ssize, unsigned long inv_flags) |
1da177e4 LT |
860 | { |
861 | unsigned long lpar_rc; | |
e71ff982 | 862 | unsigned long flags; |
3c726f8d | 863 | unsigned long want_v; |
1da177e4 | 864 | |
5524a27d | 865 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
1da177e4 | 866 | |
e71ff982 BS |
867 | flags = (newpp & 7) | H_AVPN; |
868 | if (mmu_has_feature(MMU_FTR_KERNEL_RO)) | |
869 | /* Move pp0 into bit 8 (IBM 55) */ | |
870 | flags |= (newpp & HPTE_R_PP0) >> 55; | |
871 | ||
a8c0bf3c AK |
872 | pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", |
873 | want_v, slot, flags, psize); | |
874 | ||
1189be65 | 875 | lpar_rc = plpar_pte_protect(flags, slot, want_v); |
3c726f8d | 876 | |
706c8c93 | 877 | if (lpar_rc == H_NOT_FOUND) { |
551a232c | 878 | pr_devel("not found !\n"); |
1da177e4 | 879 | return -1; |
3c726f8d BH |
880 | } |
881 | ||
551a232c | 882 | pr_devel("ok\n"); |
1da177e4 | 883 | |
706c8c93 | 884 | BUG_ON(lpar_rc != H_SUCCESS); |
1da177e4 LT |
885 | |
886 | return 0; | |
887 | } | |
888 | ||
4ad90c86 | 889 | static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group) |
1da177e4 | 890 | { |
4ad90c86 AK |
891 | long lpar_rc; |
892 | unsigned long i, j; | |
893 | struct { | |
894 | unsigned long pteh; | |
895 | unsigned long ptel; | |
896 | } ptes[4]; | |
1da177e4 | 897 | |
4ad90c86 | 898 | for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { |
1da177e4 | 899 | |
4ad90c86 | 900 | lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); |
ca42d8d2 AK |
901 | if (lpar_rc != H_SUCCESS) { |
902 | pr_info("Failed to read hash page table at %ld err %ld\n", | |
903 | hpte_group, lpar_rc); | |
4ad90c86 | 904 | continue; |
ca42d8d2 | 905 | } |
1da177e4 | 906 | |
4ad90c86 AK |
907 | for (j = 0; j < 4; j++) { |
908 | if (HPTE_V_COMPARE(ptes[j].pteh, want_v) && | |
909 | (ptes[j].pteh & HPTE_V_VALID)) | |
910 | return i + j; | |
911 | } | |
912 | } | |
1da177e4 | 913 | |
4ad90c86 | 914 | return -1; |
1da177e4 LT |
915 | } |
916 | ||
5524a27d | 917 | static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) |
1da177e4 | 918 | { |
1da177e4 | 919 | long slot; |
4ad90c86 AK |
920 | unsigned long hash; |
921 | unsigned long want_v; | |
922 | unsigned long hpte_group; | |
1da177e4 | 923 | |
5524a27d AK |
924 | hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); |
925 | want_v = hpte_encode_avpn(vpn, psize, ssize); | |
1189be65 PM |
926 | |
927 | /* Bolted entries are always in the primary group */ | |
4ad90c86 AK |
928 | hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
929 | slot = __pSeries_lpar_hpte_find(want_v, hpte_group); | |
930 | if (slot < 0) | |
931 | return -1; | |
932 | return hpte_group + slot; | |
933 | } | |
1da177e4 LT |
934 | |
935 | static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, | |
3c726f8d | 936 | unsigned long ea, |
1189be65 | 937 | int psize, int ssize) |
1da177e4 | 938 | { |
5524a27d AK |
939 | unsigned long vpn; |
940 | unsigned long lpar_rc, slot, vsid, flags; | |
1da177e4 | 941 | |
1189be65 | 942 | vsid = get_kernel_vsid(ea, ssize); |
5524a27d | 943 | vpn = hpt_vpn(ea, vsid, ssize); |
1da177e4 | 944 | |
5524a27d | 945 | slot = pSeries_lpar_hpte_find(vpn, psize, ssize); |
1da177e4 LT |
946 | BUG_ON(slot == -1); |
947 | ||
948 | flags = newpp & 7; | |
e71ff982 BS |
949 | if (mmu_has_feature(MMU_FTR_KERNEL_RO)) |
950 | /* Move pp0 into bit 8 (IBM 55) */ | |
951 | flags |= (newpp & HPTE_R_PP0) >> 55; | |
952 | ||
1da177e4 LT |
953 | lpar_rc = plpar_pte_protect(flags, slot, 0); |
954 | ||
706c8c93 | 955 | BUG_ON(lpar_rc != H_SUCCESS); |
1da177e4 LT |
956 | } |
957 | ||
5524a27d | 958 | static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, |
db3d8534 AK |
959 | int psize, int apsize, |
960 | int ssize, int local) | |
1da177e4 | 961 | { |
3c726f8d | 962 | unsigned long want_v; |
1da177e4 LT |
963 | unsigned long lpar_rc; |
964 | unsigned long dummy1, dummy2; | |
965 | ||
5524a27d AK |
966 | pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", |
967 | slot, vpn, psize, local); | |
1da177e4 | 968 | |
5524a27d | 969 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
1189be65 | 970 | lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); |
706c8c93 | 971 | if (lpar_rc == H_NOT_FOUND) |
1da177e4 LT |
972 | return; |
973 | ||
706c8c93 | 974 | BUG_ON(lpar_rc != H_SUCCESS); |
1da177e4 LT |
975 | } |
976 | ||
ba2dd8a2 LD |
977 | |
978 | /* | |
979 | * As defined in the PAPR's section 14.5.4.1.8 | |
980 | * The control mask doesn't include the returned reference and change bit from | |
981 | * the processed PTE. | |
982 | */ | |
983 | #define HBLKR_AVPN 0x0100000000000000UL | |
984 | #define HBLKR_CTRL_MASK 0xf800000000000000UL | |
985 | #define HBLKR_CTRL_SUCCESS 0x8000000000000000UL | |
986 | #define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL | |
987 | #define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL | |
988 | ||
989 | /** | |
990 | * H_BLOCK_REMOVE caller. | |
991 | * @idx should point to the latest @param entry set with a PTEX. | |
992 | * If PTE cannot be processed because another CPUs has already locked that | |
993 | * group, those entries are put back in @param starting at index 1. | |
994 | * If entries has to be retried and @retry_busy is set to true, these entries | |
995 | * are retried until success. If @retry_busy is set to false, the returned | |
996 | * is the number of entries yet to process. | |
997 | */ | |
998 | static unsigned long call_block_remove(unsigned long idx, unsigned long *param, | |
999 | bool retry_busy) | |
1000 | { | |
1001 | unsigned long i, rc, new_idx; | |
1002 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | |
1003 | ||
1004 | if (idx < 2) { | |
1005 | pr_warn("Unexpected empty call to H_BLOCK_REMOVE"); | |
1006 | return 0; | |
1007 | } | |
1008 | again: | |
1009 | new_idx = 0; | |
1010 | if (idx > PLPAR_HCALL9_BUFSIZE) { | |
1011 | pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx); | |
1012 | idx = PLPAR_HCALL9_BUFSIZE; | |
1013 | } else if (idx < PLPAR_HCALL9_BUFSIZE) | |
1014 | param[idx] = HBR_END; | |
1015 | ||
1016 | rc = plpar_hcall9(H_BLOCK_REMOVE, retbuf, | |
1017 | param[0], /* AVA */ | |
1018 | param[1], param[2], param[3], param[4], /* TS0-7 */ | |
1019 | param[5], param[6], param[7], param[8]); | |
1020 | if (rc == H_SUCCESS) | |
1021 | return 0; | |
1022 | ||
1023 | BUG_ON(rc != H_PARTIAL); | |
1024 | ||
1025 | /* Check that the unprocessed entries were 'not found' or 'busy' */ | |
1026 | for (i = 0; i < idx-1; i++) { | |
1027 | unsigned long ctrl = retbuf[i] & HBLKR_CTRL_MASK; | |
1028 | ||
1029 | if (ctrl == HBLKR_CTRL_ERRBUSY) { | |
1030 | param[++new_idx] = param[i+1]; | |
1031 | continue; | |
1032 | } | |
1033 | ||
1034 | BUG_ON(ctrl != HBLKR_CTRL_SUCCESS | |
1035 | && ctrl != HBLKR_CTRL_ERRNOTFOUND); | |
1036 | } | |
1037 | ||
1038 | /* | |
1039 | * If there were entries found busy, retry these entries if requested, | |
1040 | * of if all the entries have to be retried. | |
1041 | */ | |
1042 | if (new_idx && (retry_busy || new_idx == (PLPAR_HCALL9_BUFSIZE-1))) { | |
1043 | idx = new_idx + 1; | |
1044 | goto again; | |
1045 | } | |
1046 | ||
1047 | return new_idx; | |
1048 | } | |
1049 | ||
e34aa03c | 1050 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1a527286 AK |
1051 | /* |
1052 | * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need | |
1053 | * to make sure that we avoid bouncing the hypervisor tlbie lock. | |
1054 | */ | |
1055 | #define PPC64_HUGE_HPTE_BATCH 12 | |
1056 | ||
ba2dd8a2 LD |
1057 | static void hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn, |
1058 | int count, int psize, int ssize) | |
1a527286 | 1059 | { |
05af40e8 | 1060 | unsigned long param[PLPAR_HCALL9_BUFSIZE]; |
ba2dd8a2 LD |
1061 | unsigned long shift, current_vpgb, vpgb; |
1062 | int i, pix = 0; | |
1a527286 | 1063 | |
ba2dd8a2 LD |
1064 | shift = mmu_psize_defs[psize].shift; |
1065 | ||
1066 | for (i = 0; i < count; i++) { | |
1067 | /* | |
1068 | * Shifting 3 bits more on the right to get a | |
1069 | * 8 pages aligned virtual addresse. | |
1070 | */ | |
1071 | vpgb = (vpn[i] >> (shift - VPN_SHIFT + 3)); | |
1072 | if (!pix || vpgb != current_vpgb) { | |
1073 | /* | |
1074 | * Need to start a new 8 pages block, flush | |
1075 | * the current one if needed. | |
1076 | */ | |
1077 | if (pix) | |
1078 | (void)call_block_remove(pix, param, true); | |
1079 | current_vpgb = vpgb; | |
1080 | param[0] = hpte_encode_avpn(vpn[i], psize, ssize); | |
1081 | pix = 1; | |
1082 | } | |
1083 | ||
1084 | param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot[i]; | |
1085 | if (pix == PLPAR_HCALL9_BUFSIZE) { | |
1086 | pix = call_block_remove(pix, param, false); | |
1087 | /* | |
1088 | * pix = 0 means that all the entries were | |
1089 | * removed, we can start a new block. | |
1090 | * Otherwise, this means that there are entries | |
1091 | * to retry, and pix points to latest one, so | |
1092 | * we should increment it and try to continue | |
1093 | * the same block. | |
1094 | */ | |
1095 | if (pix) | |
1096 | pix++; | |
1097 | } | |
1098 | } | |
1099 | if (pix) | |
1100 | (void)call_block_remove(pix, param, true); | |
1101 | } | |
1102 | ||
1103 | static void hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn, | |
1104 | int count, int psize, int ssize) | |
1105 | { | |
1106 | unsigned long param[PLPAR_HCALL9_BUFSIZE]; | |
1107 | int i = 0, pix = 0, rc; | |
1a527286 AK |
1108 | |
1109 | for (i = 0; i < count; i++) { | |
1110 | ||
1111 | if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { | |
1112 | pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0, | |
1113 | ssize, 0); | |
1114 | } else { | |
1115 | param[pix] = HBR_REQUEST | HBR_AVPN | slot[i]; | |
1116 | param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize); | |
1117 | pix += 2; | |
1118 | if (pix == 8) { | |
1119 | rc = plpar_hcall9(H_BULK_REMOVE, param, | |
1120 | param[0], param[1], param[2], | |
1121 | param[3], param[4], param[5], | |
1122 | param[6], param[7]); | |
1123 | BUG_ON(rc != H_SUCCESS); | |
1124 | pix = 0; | |
1125 | } | |
1126 | } | |
1127 | } | |
1128 | if (pix) { | |
1129 | param[pix] = HBR_END; | |
1130 | rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], | |
1131 | param[2], param[3], param[4], param[5], | |
1132 | param[6], param[7]); | |
1133 | BUG_ON(rc != H_SUCCESS); | |
1134 | } | |
ba2dd8a2 LD |
1135 | } |
1136 | ||
1137 | static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, | |
1138 | unsigned long *vpn, | |
1139 | int count, int psize, | |
1140 | int ssize) | |
1141 | { | |
1142 | unsigned long flags = 0; | |
1143 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); | |
1144 | ||
1145 | if (lock_tlbie) | |
1146 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); | |
1147 | ||
1148 | if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) | |
1149 | hugepage_block_invalidate(slot, vpn, count, psize, ssize); | |
1150 | else | |
1151 | hugepage_bulk_invalidate(slot, vpn, count, psize, ssize); | |
1a527286 AK |
1152 | |
1153 | if (lock_tlbie) | |
1154 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); | |
1155 | } | |
1156 | ||
fa1f8ae8 AK |
1157 | static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, |
1158 | unsigned long addr, | |
1159 | unsigned char *hpte_slot_array, | |
d557b098 | 1160 | int psize, int ssize, int local) |
1a527286 | 1161 | { |
fa1f8ae8 | 1162 | int i, index = 0; |
1a527286 AK |
1163 | unsigned long s_addr = addr; |
1164 | unsigned int max_hpte_count, valid; | |
1165 | unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH]; | |
1166 | unsigned long slot_array[PPC64_HUGE_HPTE_BATCH]; | |
fa1f8ae8 | 1167 | unsigned long shift, hidx, vpn = 0, hash, slot; |
1a527286 AK |
1168 | |
1169 | shift = mmu_psize_defs[psize].shift; | |
1170 | max_hpte_count = 1U << (PMD_SHIFT - shift); | |
1171 | ||
1172 | for (i = 0; i < max_hpte_count; i++) { | |
1173 | valid = hpte_valid(hpte_slot_array, i); | |
1174 | if (!valid) | |
1175 | continue; | |
1176 | hidx = hpte_hash_index(hpte_slot_array, i); | |
1177 | ||
1178 | /* get the vpn */ | |
1179 | addr = s_addr + (i * (1ul << shift)); | |
1a527286 AK |
1180 | vpn = hpt_vpn(addr, vsid, ssize); |
1181 | hash = hpt_hash(vpn, shift, ssize); | |
1182 | if (hidx & _PTEIDX_SECONDARY) | |
1183 | hash = ~hash; | |
1184 | ||
1185 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
1186 | slot += hidx & _PTEIDX_GROUP_IX; | |
1187 | ||
1188 | slot_array[index] = slot; | |
1189 | vpn_array[index] = vpn; | |
1190 | if (index == PPC64_HUGE_HPTE_BATCH - 1) { | |
1191 | /* | |
1192 | * Now do a bluk invalidate | |
1193 | */ | |
1194 | __pSeries_lpar_hugepage_invalidate(slot_array, | |
1195 | vpn_array, | |
1196 | PPC64_HUGE_HPTE_BATCH, | |
1197 | psize, ssize); | |
1198 | index = 0; | |
1199 | } else | |
1200 | index++; | |
1201 | } | |
1202 | if (index) | |
1203 | __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, | |
1204 | index, psize, ssize); | |
1205 | } | |
e34aa03c AK |
1206 | #else |
1207 | static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, | |
1208 | unsigned long addr, | |
1209 | unsigned char *hpte_slot_array, | |
1210 | int psize, int ssize, int local) | |
1211 | { | |
1212 | WARN(1, "%s called without THP support\n", __func__); | |
1213 | } | |
1214 | #endif | |
1a527286 | 1215 | |
27828f98 DG |
1216 | static int pSeries_lpar_hpte_removebolted(unsigned long ea, |
1217 | int psize, int ssize) | |
f8c8803b | 1218 | { |
5524a27d AK |
1219 | unsigned long vpn; |
1220 | unsigned long slot, vsid; | |
f8c8803b BP |
1221 | |
1222 | vsid = get_kernel_vsid(ea, ssize); | |
5524a27d | 1223 | vpn = hpt_vpn(ea, vsid, ssize); |
f8c8803b | 1224 | |
5524a27d | 1225 | slot = pSeries_lpar_hpte_find(vpn, psize, ssize); |
27828f98 DG |
1226 | if (slot == -1) |
1227 | return -ENOENT; | |
1228 | ||
db3d8534 AK |
1229 | /* |
1230 | * lpar doesn't use the passed actual page size | |
1231 | */ | |
1232 | pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0); | |
27828f98 | 1233 | return 0; |
f8c8803b BP |
1234 | } |
1235 | ||
0effa488 LD |
1236 | |
1237 | static inline unsigned long compute_slot(real_pte_t pte, | |
1238 | unsigned long vpn, | |
1239 | unsigned long index, | |
1240 | unsigned long shift, | |
1241 | int ssize) | |
1242 | { | |
1243 | unsigned long slot, hash, hidx; | |
1244 | ||
1245 | hash = hpt_hash(vpn, shift, ssize); | |
1246 | hidx = __rpte_to_hidx(pte, index); | |
1247 | if (hidx & _PTEIDX_SECONDARY) | |
1248 | hash = ~hash; | |
1249 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
1250 | slot += hidx & _PTEIDX_GROUP_IX; | |
1251 | return slot; | |
1252 | } | |
1253 | ||
ba2dd8a2 LD |
1254 | /** |
1255 | * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are | |
1256 | * "all within the same naturally aligned 8 page virtual address block". | |
1257 | */ | |
1258 | static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch, | |
1259 | unsigned long *param) | |
1260 | { | |
1261 | unsigned long vpn; | |
1262 | unsigned long i, pix = 0; | |
1263 | unsigned long index, shift, slot, current_vpgb, vpgb; | |
1264 | real_pte_t pte; | |
1265 | int psize, ssize; | |
1266 | ||
1267 | psize = batch->psize; | |
1268 | ssize = batch->ssize; | |
1269 | ||
1270 | for (i = 0; i < number; i++) { | |
1271 | vpn = batch->vpn[i]; | |
1272 | pte = batch->pte[i]; | |
1273 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { | |
1274 | /* | |
1275 | * Shifting 3 bits more on the right to get a | |
1276 | * 8 pages aligned virtual addresse. | |
1277 | */ | |
1278 | vpgb = (vpn >> (shift - VPN_SHIFT + 3)); | |
1279 | if (!pix || vpgb != current_vpgb) { | |
1280 | /* | |
1281 | * Need to start a new 8 pages block, flush | |
1282 | * the current one if needed. | |
1283 | */ | |
1284 | if (pix) | |
1285 | (void)call_block_remove(pix, param, | |
1286 | true); | |
1287 | current_vpgb = vpgb; | |
1288 | param[0] = hpte_encode_avpn(vpn, psize, | |
1289 | ssize); | |
1290 | pix = 1; | |
1291 | } | |
1292 | ||
1293 | slot = compute_slot(pte, vpn, index, shift, ssize); | |
1294 | param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot; | |
1295 | ||
1296 | if (pix == PLPAR_HCALL9_BUFSIZE) { | |
1297 | pix = call_block_remove(pix, param, false); | |
1298 | /* | |
1299 | * pix = 0 means that all the entries were | |
1300 | * removed, we can start a new block. | |
1301 | * Otherwise, this means that there are entries | |
1302 | * to retry, and pix points to latest one, so | |
1303 | * we should increment it and try to continue | |
1304 | * the same block. | |
1305 | */ | |
1306 | if (pix) | |
1307 | pix++; | |
1308 | } | |
1309 | } pte_iterate_hashed_end(); | |
1310 | } | |
1311 | ||
1312 | if (pix) | |
1313 | (void)call_block_remove(pix, param, true); | |
1314 | } | |
1315 | ||
1da177e4 LT |
1316 | /* |
1317 | * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie | |
1318 | * lock. | |
1319 | */ | |
035223fb | 1320 | static void pSeries_lpar_flush_hash_range(unsigned long number, int local) |
1da177e4 | 1321 | { |
5524a27d | 1322 | unsigned long vpn; |
f03e64f2 | 1323 | unsigned long i, pix, rc; |
12e86f92 | 1324 | unsigned long flags = 0; |
69111bac | 1325 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); |
44ae3ab3 | 1326 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
05af40e8 | 1327 | unsigned long param[PLPAR_HCALL9_BUFSIZE]; |
0effa488 | 1328 | unsigned long index, shift, slot; |
f03e64f2 | 1329 | real_pte_t pte; |
1189be65 | 1330 | int psize, ssize; |
1da177e4 LT |
1331 | |
1332 | if (lock_tlbie) | |
1333 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); | |
1334 | ||
ba2dd8a2 LD |
1335 | if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) { |
1336 | do_block_remove(number, batch, param); | |
1337 | goto out; | |
1338 | } | |
1339 | ||
f03e64f2 | 1340 | psize = batch->psize; |
1189be65 | 1341 | ssize = batch->ssize; |
f03e64f2 PM |
1342 | pix = 0; |
1343 | for (i = 0; i < number; i++) { | |
5524a27d | 1344 | vpn = batch->vpn[i]; |
f03e64f2 | 1345 | pte = batch->pte[i]; |
5524a27d | 1346 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { |
0effa488 | 1347 | slot = compute_slot(pte, vpn, index, shift, ssize); |
12e86f92 | 1348 | if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { |
db3d8534 AK |
1349 | /* |
1350 | * lpar doesn't use the passed actual page size | |
1351 | */ | |
5524a27d | 1352 | pSeries_lpar_hpte_invalidate(slot, vpn, psize, |
db3d8534 | 1353 | 0, ssize, local); |
12e86f92 PM |
1354 | } else { |
1355 | param[pix] = HBR_REQUEST | HBR_AVPN | slot; | |
5524a27d | 1356 | param[pix+1] = hpte_encode_avpn(vpn, psize, |
1189be65 | 1357 | ssize); |
12e86f92 PM |
1358 | pix += 2; |
1359 | if (pix == 8) { | |
1360 | rc = plpar_hcall9(H_BULK_REMOVE, param, | |
f03e64f2 PM |
1361 | param[0], param[1], param[2], |
1362 | param[3], param[4], param[5], | |
1363 | param[6], param[7]); | |
12e86f92 PM |
1364 | BUG_ON(rc != H_SUCCESS); |
1365 | pix = 0; | |
1366 | } | |
f03e64f2 PM |
1367 | } |
1368 | } pte_iterate_hashed_end(); | |
1369 | } | |
1370 | if (pix) { | |
1371 | param[pix] = HBR_END; | |
1372 | rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], | |
1373 | param[2], param[3], param[4], param[5], | |
1374 | param[6], param[7]); | |
1375 | BUG_ON(rc != H_SUCCESS); | |
1376 | } | |
1da177e4 | 1377 | |
ba2dd8a2 | 1378 | out: |
1da177e4 LT |
1379 | if (lock_tlbie) |
1380 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); | |
1381 | } | |
1382 | ||
4e89a2d8 WS |
1383 | static int __init disable_bulk_remove(char *str) |
1384 | { | |
1385 | if (strcmp(str, "off") == 0 && | |
1386 | firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { | |
65471d76 AK |
1387 | pr_info("Disabling BULK_REMOVE firmware feature"); |
1388 | powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE; | |
4e89a2d8 WS |
1389 | } |
1390 | return 1; | |
1391 | } | |
1392 | ||
1393 | __setup("bulk_remove=", disable_bulk_remove); | |
1394 | ||
dbcf929c DG |
1395 | #define HPT_RESIZE_TIMEOUT 10000 /* ms */ |
1396 | ||
1397 | struct hpt_resize_state { | |
1398 | unsigned long shift; | |
1399 | int commit_rc; | |
1400 | }; | |
1401 | ||
1402 | static int pseries_lpar_resize_hpt_commit(void *data) | |
1403 | { | |
1404 | struct hpt_resize_state *state = data; | |
1405 | ||
1406 | state->commit_rc = plpar_resize_hpt_commit(0, state->shift); | |
1407 | if (state->commit_rc != H_SUCCESS) | |
1408 | return -EIO; | |
1409 | ||
1410 | /* Hypervisor has transitioned the HTAB, update our globals */ | |
1411 | ppc64_pft_size = state->shift; | |
1412 | htab_size_bytes = 1UL << ppc64_pft_size; | |
1413 | htab_hash_mask = (htab_size_bytes >> 7) - 1; | |
1414 | ||
1415 | return 0; | |
1416 | } | |
1417 | ||
1418 | /* Must be called in user context */ | |
1419 | static int pseries_lpar_resize_hpt(unsigned long shift) | |
1420 | { | |
1421 | struct hpt_resize_state state = { | |
1422 | .shift = shift, | |
1423 | .commit_rc = H_FUNCTION, | |
1424 | }; | |
1425 | unsigned int delay, total_delay = 0; | |
1426 | int rc; | |
1427 | ktime_t t0, t1, t2; | |
1428 | ||
1429 | might_sleep(); | |
1430 | ||
1431 | if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE)) | |
1432 | return -ENODEV; | |
1433 | ||
65471d76 | 1434 | pr_info("Attempting to resize HPT to shift %lu\n", shift); |
dbcf929c DG |
1435 | |
1436 | t0 = ktime_get(); | |
1437 | ||
1438 | rc = plpar_resize_hpt_prepare(0, shift); | |
1439 | while (H_IS_LONG_BUSY(rc)) { | |
1440 | delay = get_longbusy_msecs(rc); | |
1441 | total_delay += delay; | |
1442 | if (total_delay > HPT_RESIZE_TIMEOUT) { | |
1443 | /* prepare with shift==0 cancels an in-progress resize */ | |
1444 | rc = plpar_resize_hpt_prepare(0, 0); | |
1445 | if (rc != H_SUCCESS) | |
65471d76 | 1446 | pr_warn("Unexpected error %d cancelling timed out HPT resize\n", |
dbcf929c DG |
1447 | rc); |
1448 | return -ETIMEDOUT; | |
1449 | } | |
1450 | msleep(delay); | |
1451 | rc = plpar_resize_hpt_prepare(0, shift); | |
1452 | }; | |
1453 | ||
1454 | switch (rc) { | |
1455 | case H_SUCCESS: | |
1456 | /* Continue on */ | |
1457 | break; | |
1458 | ||
1459 | case H_PARAMETER: | |
f172acbf | 1460 | pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n"); |
dbcf929c DG |
1461 | return -EINVAL; |
1462 | case H_RESOURCE: | |
f172acbf | 1463 | pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n"); |
dbcf929c DG |
1464 | return -EPERM; |
1465 | default: | |
65471d76 | 1466 | pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc); |
dbcf929c DG |
1467 | return -EIO; |
1468 | } | |
1469 | ||
1470 | t1 = ktime_get(); | |
1471 | ||
1472 | rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL); | |
1473 | ||
1474 | t2 = ktime_get(); | |
1475 | ||
1476 | if (rc != 0) { | |
1477 | switch (state.commit_rc) { | |
1478 | case H_PTEG_FULL: | |
dbcf929c DG |
1479 | return -ENOSPC; |
1480 | ||
1481 | default: | |
65471d76 AK |
1482 | pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n", |
1483 | state.commit_rc); | |
dbcf929c DG |
1484 | return -EIO; |
1485 | }; | |
1486 | } | |
1487 | ||
65471d76 AK |
1488 | pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n", |
1489 | shift, (long long) ktime_ms_delta(t1, t0), | |
1490 | (long long) ktime_ms_delta(t2, t1)); | |
dbcf929c DG |
1491 | |
1492 | return 0; | |
1493 | } | |
1494 | ||
cc3d2940 PM |
1495 | static int pseries_lpar_register_process_table(unsigned long base, |
1496 | unsigned long page_size, unsigned long table_size) | |
1497 | { | |
1498 | long rc; | |
dbfcf3cb | 1499 | unsigned long flags = 0; |
cc3d2940 | 1500 | |
dbfcf3cb PM |
1501 | if (table_size) |
1502 | flags |= PROC_TABLE_NEW; | |
cc3d2940 PM |
1503 | if (radix_enabled()) |
1504 | flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE; | |
dbfcf3cb PM |
1505 | else |
1506 | flags |= PROC_TABLE_HPT_SLB; | |
cc3d2940 PM |
1507 | for (;;) { |
1508 | rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base, | |
1509 | page_size, table_size); | |
1510 | if (!H_IS_LONG_BUSY(rc)) | |
1511 | break; | |
1512 | mdelay(get_longbusy_msecs(rc)); | |
1513 | } | |
1514 | if (rc != H_SUCCESS) { | |
1515 | pr_err("Failed to register process table (rc=%ld)\n", rc); | |
1516 | BUG(); | |
1517 | } | |
1518 | return rc; | |
1519 | } | |
1520 | ||
6364e84e | 1521 | void __init hpte_init_pseries(void) |
1da177e4 | 1522 | { |
7025776e BH |
1523 | mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate; |
1524 | mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp; | |
1525 | mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; | |
1526 | mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert; | |
1527 | mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove; | |
1528 | mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted; | |
1529 | mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; | |
5246adec | 1530 | mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; |
7025776e | 1531 | mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; |
dbfcf3cb | 1532 | register_process_table = pseries_lpar_register_process_table; |
8971e1c7 ME |
1533 | |
1534 | if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) | |
1535 | mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; | |
1da177e4 | 1536 | } |
14f966e7 | 1537 | |
cc3d2940 PM |
1538 | void radix_init_pseries(void) |
1539 | { | |
1540 | pr_info("Using radix MMU under hypervisor\n"); | |
1541 | register_process_table = pseries_lpar_register_process_table; | |
1542 | } | |
1543 | ||
14f966e7 RJ |
1544 | #ifdef CONFIG_PPC_SMLPAR |
1545 | #define CMO_FREE_HINT_DEFAULT 1 | |
1546 | static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT; | |
1547 | ||
1548 | static int __init cmo_free_hint(char *str) | |
1549 | { | |
1550 | char *parm; | |
1551 | parm = strstrip(str); | |
1552 | ||
1553 | if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) { | |
65471d76 | 1554 | pr_info("%s: CMO free page hinting is not active.\n", __func__); |
14f966e7 RJ |
1555 | cmo_free_hint_flag = 0; |
1556 | return 1; | |
1557 | } | |
1558 | ||
1559 | cmo_free_hint_flag = 1; | |
65471d76 | 1560 | pr_info("%s: CMO free page hinting is active.\n", __func__); |
14f966e7 RJ |
1561 | |
1562 | if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0) | |
1563 | return 1; | |
1564 | ||
1565 | return 0; | |
1566 | } | |
1567 | ||
1568 | __setup("cmo_free_hint=", cmo_free_hint); | |
1569 | ||
1570 | static void pSeries_set_page_state(struct page *page, int order, | |
1571 | unsigned long state) | |
1572 | { | |
1573 | int i, j; | |
1574 | unsigned long cmo_page_sz, addr; | |
1575 | ||
1576 | cmo_page_sz = cmo_get_page_size(); | |
1577 | addr = __pa((unsigned long)page_address(page)); | |
1578 | ||
1579 | for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) { | |
1580 | for (j = 0; j < PAGE_SIZE; j += cmo_page_sz) | |
1581 | plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0); | |
1582 | } | |
1583 | } | |
1584 | ||
1585 | void arch_free_page(struct page *page, int order) | |
1586 | { | |
d8c476ee AK |
1587 | if (radix_enabled()) |
1588 | return; | |
14f966e7 RJ |
1589 | if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO)) |
1590 | return; | |
1591 | ||
1592 | pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED); | |
1593 | } | |
1594 | EXPORT_SYMBOL(arch_free_page); | |
1595 | ||
d8c476ee | 1596 | #endif /* CONFIG_PPC_SMLPAR */ |
4e003747 | 1597 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
c8cd093a AB |
1598 | |
1599 | #ifdef CONFIG_TRACEPOINTS | |
e9666d10 | 1600 | #ifdef CONFIG_JUMP_LABEL |
cc1adb5f AB |
1601 | struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; |
1602 | ||
8cf868af | 1603 | int hcall_tracepoint_regfunc(void) |
cc1adb5f AB |
1604 | { |
1605 | static_key_slow_inc(&hcall_tracepoint_key); | |
8cf868af | 1606 | return 0; |
cc1adb5f AB |
1607 | } |
1608 | ||
1609 | void hcall_tracepoint_unregfunc(void) | |
1610 | { | |
1611 | static_key_slow_dec(&hcall_tracepoint_key); | |
1612 | } | |
1613 | #else | |
c8cd093a AB |
1614 | /* |
1615 | * We optimise our hcall path by placing hcall_tracepoint_refcount | |
1616 | * directly in the TOC so we can check if the hcall tracepoints are | |
1617 | * enabled via a single load. | |
1618 | */ | |
1619 | ||
1620 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | |
1621 | extern long hcall_tracepoint_refcount; | |
1622 | ||
8cf868af | 1623 | int hcall_tracepoint_regfunc(void) |
c8cd093a AB |
1624 | { |
1625 | hcall_tracepoint_refcount++; | |
8cf868af | 1626 | return 0; |
c8cd093a AB |
1627 | } |
1628 | ||
1629 | void hcall_tracepoint_unregfunc(void) | |
1630 | { | |
1631 | hcall_tracepoint_refcount--; | |
1632 | } | |
cc1adb5f AB |
1633 | #endif |
1634 | ||
1635 | /* | |
1636 | * Since the tracing code might execute hcalls we need to guard against | |
1637 | * recursion. One example of this are spinlocks calling H_YIELD on | |
1638 | * shared processor partitions. | |
1639 | */ | |
1640 | static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); | |
1641 | ||
c8cd093a | 1642 | |
6f26353c | 1643 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) |
c8cd093a | 1644 | { |
57cdfdf8 AB |
1645 | unsigned long flags; |
1646 | unsigned int *depth; | |
1647 | ||
a5ccfee0 AB |
1648 | /* |
1649 | * We cannot call tracepoints inside RCU idle regions which | |
1650 | * means we must not trace H_CEDE. | |
1651 | */ | |
1652 | if (opcode == H_CEDE) | |
1653 | return; | |
1654 | ||
57cdfdf8 AB |
1655 | local_irq_save(flags); |
1656 | ||
69111bac | 1657 | depth = this_cpu_ptr(&hcall_trace_depth); |
57cdfdf8 AB |
1658 | |
1659 | if (*depth) | |
1660 | goto out; | |
1661 | ||
1662 | (*depth)++; | |
e4f387d8 | 1663 | preempt_disable(); |
6f26353c | 1664 | trace_hcall_entry(opcode, args); |
57cdfdf8 AB |
1665 | (*depth)--; |
1666 | ||
1667 | out: | |
1668 | local_irq_restore(flags); | |
c8cd093a AB |
1669 | } |
1670 | ||
8f2133cc | 1671 | void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf) |
c8cd093a | 1672 | { |
57cdfdf8 AB |
1673 | unsigned long flags; |
1674 | unsigned int *depth; | |
1675 | ||
a5ccfee0 AB |
1676 | if (opcode == H_CEDE) |
1677 | return; | |
1678 | ||
57cdfdf8 AB |
1679 | local_irq_save(flags); |
1680 | ||
69111bac | 1681 | depth = this_cpu_ptr(&hcall_trace_depth); |
57cdfdf8 AB |
1682 | |
1683 | if (*depth) | |
1684 | goto out; | |
1685 | ||
1686 | (*depth)++; | |
6f26353c | 1687 | trace_hcall_exit(opcode, retval, retbuf); |
e4f387d8 | 1688 | preempt_enable(); |
57cdfdf8 AB |
1689 | (*depth)--; |
1690 | ||
1691 | out: | |
1692 | local_irq_restore(flags); | |
c8cd093a AB |
1693 | } |
1694 | #endif | |
9ee820fa BK |
1695 | |
1696 | /** | |
1697 | * h_get_mpp | |
1698 | * H_GET_MPP hcall returns info in 7 parms | |
1699 | */ | |
1700 | int h_get_mpp(struct hvcall_mpp_data *mpp_data) | |
1701 | { | |
1702 | int rc; | |
1703 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | |
1704 | ||
1705 | rc = plpar_hcall9(H_GET_MPP, retbuf); | |
1706 | ||
1707 | mpp_data->entitled_mem = retbuf[0]; | |
1708 | mpp_data->mapped_mem = retbuf[1]; | |
1709 | ||
1710 | mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; | |
1711 | mpp_data->pool_num = retbuf[2] & 0xffff; | |
1712 | ||
1713 | mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; | |
1714 | mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; | |
b0d436c7 | 1715 | mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL; |
9ee820fa BK |
1716 | |
1717 | mpp_data->pool_size = retbuf[4]; | |
1718 | mpp_data->loan_request = retbuf[5]; | |
1719 | mpp_data->backing_mem = retbuf[6]; | |
1720 | ||
1721 | return rc; | |
1722 | } | |
1723 | EXPORT_SYMBOL(h_get_mpp); | |
1724 | ||
1725 | int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) | |
1726 | { | |
1727 | int rc; | |
1728 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; | |
1729 | ||
1730 | rc = plpar_hcall9(H_GET_MPP_X, retbuf); | |
1731 | ||
1732 | mpp_x_data->coalesced_bytes = retbuf[0]; | |
1733 | mpp_x_data->pool_coalesced_bytes = retbuf[1]; | |
1734 | mpp_x_data->pool_purr_cycles = retbuf[2]; | |
1735 | mpp_x_data->pool_spurr_cycles = retbuf[3]; | |
1736 | ||
1737 | return rc; | |
1738 | } | |
82228e36 AK |
1739 | |
1740 | static unsigned long vsid_unscramble(unsigned long vsid, int ssize) | |
1741 | { | |
1742 | unsigned long protovsid; | |
1743 | unsigned long va_bits = VA_BITS; | |
1744 | unsigned long modinv, vsid_modulus; | |
1745 | unsigned long max_mod_inv, tmp_modinv; | |
1746 | ||
1747 | if (!mmu_has_feature(MMU_FTR_68_BIT_VA)) | |
1748 | va_bits = 65; | |
1749 | ||
1750 | if (ssize == MMU_SEGSIZE_256M) { | |
1751 | modinv = VSID_MULINV_256M; | |
1752 | vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1); | |
1753 | } else { | |
1754 | modinv = VSID_MULINV_1T; | |
1755 | vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1); | |
1756 | } | |
1757 | ||
1758 | /* | |
1759 | * vsid outside our range. | |
1760 | */ | |
1761 | if (vsid >= vsid_modulus) | |
1762 | return 0; | |
1763 | ||
1764 | /* | |
1765 | * If modinv is the modular multiplicate inverse of (x % vsid_modulus) | |
1766 | * and vsid = (protovsid * x) % vsid_modulus, then we say: | |
1767 | * protovsid = (vsid * modinv) % vsid_modulus | |
1768 | */ | |
1769 | ||
1770 | /* Check if (vsid * modinv) overflow (63 bits) */ | |
1771 | max_mod_inv = 0x7fffffffffffffffull / vsid; | |
1772 | if (modinv < max_mod_inv) | |
1773 | return (vsid * modinv) % vsid_modulus; | |
1774 | ||
1775 | tmp_modinv = modinv/max_mod_inv; | |
1776 | modinv %= max_mod_inv; | |
1777 | ||
1778 | protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus; | |
1779 | protovsid = (protovsid + vsid * modinv) % vsid_modulus; | |
1780 | ||
1781 | return protovsid; | |
1782 | } | |
1783 | ||
1784 | static int __init reserve_vrma_context_id(void) | |
1785 | { | |
1786 | unsigned long protovsid; | |
1787 | ||
1788 | /* | |
1789 | * Reserve context ids which map to reserved virtual addresses. For now | |
1790 | * we only reserve the context id which maps to the VRMA VSID. We ignore | |
1791 | * the addresses in "ibm,adjunct-virtual-addresses" because we don't | |
1792 | * enable adjunct support via the "ibm,client-architecture-support" | |
1793 | * interface. | |
1794 | */ | |
1795 | protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T); | |
1796 | hash__reserve_context_id(protovsid >> ESID_BITS_1T); | |
1797 | return 0; | |
1798 | } | |
1799 | machine_device_initcall(pseries, reserve_vrma_context_id); | |
c6c26fb5 AP |
1800 | |
1801 | #ifdef CONFIG_DEBUG_FS | |
1802 | /* debugfs file interface for vpa data */ | |
1803 | static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len, | |
1804 | loff_t *pos) | |
1805 | { | |
1806 | int cpu = (long)filp->private_data; | |
1807 | struct lppaca *lppaca = &lppaca_of(cpu); | |
1808 | ||
1809 | return simple_read_from_buffer(buf, len, pos, lppaca, | |
1810 | sizeof(struct lppaca)); | |
1811 | } | |
1812 | ||
1813 | static const struct file_operations vpa_fops = { | |
1814 | .open = simple_open, | |
1815 | .read = vpa_file_read, | |
1816 | .llseek = default_llseek, | |
1817 | }; | |
1818 | ||
1819 | static int __init vpa_debugfs_init(void) | |
1820 | { | |
1821 | char name[16]; | |
1822 | long i; | |
1823 | static struct dentry *vpa_dir; | |
1824 | ||
1825 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) | |
1826 | return 0; | |
1827 | ||
1828 | vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root); | |
1829 | if (!vpa_dir) { | |
1830 | pr_warn("%s: can't create vpa root dir\n", __func__); | |
1831 | return -ENOMEM; | |
1832 | } | |
1833 | ||
1834 | /* set up the per-cpu vpa file*/ | |
1835 | for_each_possible_cpu(i) { | |
1836 | struct dentry *d; | |
1837 | ||
1838 | sprintf(name, "cpu-%ld", i); | |
1839 | ||
1840 | d = debugfs_create_file(name, 0400, vpa_dir, (void *)i, | |
1841 | &vpa_fops); | |
1842 | if (!d) { | |
1843 | pr_warn("%s: can't create per-cpu vpa file\n", | |
1844 | __func__); | |
1845 | return -ENOMEM; | |
1846 | } | |
1847 | } | |
1848 | ||
1849 | return 0; | |
1850 | } | |
1851 | machine_arch_initcall(pseries, vpa_debugfs_init); | |
1852 | #endif /* CONFIG_DEBUG_FS */ |