Commit | Line | Data |
---|---|---|
50acfb2b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
08f051ed AW |
2 | /* |
3 | * Copyright (C) 2017 SiFive | |
08f051ed AW |
4 | */ |
5 | ||
5c20a3a9 | 6 | #include <linux/of.h> |
08f051ed AW |
7 | #include <asm/cacheflush.h> |
8 | ||
58de7754 GG |
9 | #ifdef CONFIG_SMP |
10 | ||
11 | #include <asm/sbi.h> | |
12 | ||
8bf90f32 CH |
13 | static void ipi_remote_fence_i(void *info) |
14 | { | |
15 | return local_flush_icache_all(); | |
16 | } | |
17 | ||
58de7754 GG |
18 | void flush_icache_all(void) |
19 | { | |
bb8958d5 AG |
20 | local_flush_icache_all(); |
21 | ||
8bf90f32 CH |
22 | if (IS_ENABLED(CONFIG_RISCV_SBI)) |
23 | sbi_remote_fence_i(NULL); | |
24 | else | |
25 | on_each_cpu(ipi_remote_fence_i, NULL, 1); | |
58de7754 | 26 | } |
1833e327 | 27 | EXPORT_SYMBOL(flush_icache_all); |
58de7754 GG |
28 | |
29 | /* | |
30 | * Performs an icache flush for the given MM context. RISC-V has no direct | |
31 | * mechanism for instruction cache shoot downs, so instead we send an IPI that | |
32 | * informs the remote harts they need to flush their local instruction caches. | |
33 | * To avoid pathologically slow behavior in a common case (a bunch of | |
34 | * single-hart processes on a many-hart machine, ie 'make -j') we avoid the | |
35 | * IPIs for harts that are not currently executing a MM context and instead | |
36 | * schedule a deferred local instruction cache flush to be performed before | |
37 | * execution resumes on each hart. | |
38 | */ | |
39 | void flush_icache_mm(struct mm_struct *mm, bool local) | |
40 | { | |
41 | unsigned int cpu; | |
8bf90f32 | 42 | cpumask_t others, *mask; |
58de7754 GG |
43 | |
44 | preempt_disable(); | |
45 | ||
46 | /* Mark every hart's icache as needing a flush for this MM. */ | |
47 | mask = &mm->context.icache_stale_mask; | |
48 | cpumask_setall(mask); | |
49 | /* Flush this hart's I$ now, and mark it as flushed. */ | |
50 | cpu = smp_processor_id(); | |
51 | cpumask_clear_cpu(cpu, mask); | |
52 | local_flush_icache_all(); | |
53 | ||
54 | /* | |
55 | * Flush the I$ of other harts concurrently executing, and mark them as | |
56 | * flushed. | |
57 | */ | |
58 | cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); | |
59 | local |= cpumask_empty(&others); | |
8bf90f32 | 60 | if (mm == current->active_mm && local) { |
58de7754 GG |
61 | /* |
62 | * It's assumed that at least one strongly ordered operation is | |
63 | * performed on this hart between setting a hart's cpumask bit | |
64 | * and scheduling this MM context on that hart. Sending an SBI | |
65 | * remote message will do this, but in the case where no | |
66 | * messages are sent we still need to order this hart's writes | |
67 | * with flush_icache_deferred(). | |
68 | */ | |
69 | smp_mb(); | |
8bf90f32 | 70 | } else if (IS_ENABLED(CONFIG_RISCV_SBI)) { |
26fb751c | 71 | sbi_remote_fence_i(&others); |
8bf90f32 CH |
72 | } else { |
73 | on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); | |
58de7754 GG |
74 | } |
75 | ||
76 | preempt_enable(); | |
77 | } | |
78 | ||
79 | #endif /* CONFIG_SMP */ | |
80 | ||
6bd33e1e | 81 | #ifdef CONFIG_MMU |
08f051ed AW |
82 | void flush_icache_pte(pte_t pte) |
83 | { | |
84 | struct page *page = pte_page(pte); | |
85 | ||
86 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | |
87 | flush_icache_all(); | |
88 | } | |
6bd33e1e | 89 | #endif /* CONFIG_MMU */ |
5c20a3a9 AJ |
90 | |
91 | unsigned int riscv_cbom_block_size; | |
92 | EXPORT_SYMBOL_GPL(riscv_cbom_block_size); | |
93 | ||
94 | void riscv_init_cbom_blocksize(void) | |
95 | { | |
96 | struct device_node *node; | |
97 | unsigned long cbom_hartid; | |
98 | u32 val, probed_block_size; | |
99 | int ret; | |
100 | ||
101 | probed_block_size = 0; | |
102 | for_each_of_cpu_node(node) { | |
103 | unsigned long hartid; | |
104 | ||
105 | ret = riscv_of_processor_hartid(node, &hartid); | |
106 | if (ret) | |
107 | continue; | |
108 | ||
109 | /* set block-size for cbom extension if available */ | |
110 | ret = of_property_read_u32(node, "riscv,cbom-block-size", &val); | |
111 | if (ret) | |
112 | continue; | |
113 | ||
114 | if (!probed_block_size) { | |
115 | probed_block_size = val; | |
116 | cbom_hartid = hartid; | |
117 | } else { | |
118 | if (probed_block_size != val) | |
119 | pr_warn("cbom-block-size mismatched between harts %lu and %lu\n", | |
120 | cbom_hartid, hartid); | |
121 | } | |
122 | } | |
123 | ||
124 | if (probed_block_size) | |
125 | riscv_cbom_block_size = probed_block_size; | |
126 | } |