Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
913e4a75 | 6 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. |
1da177e4 LT |
7 | */ |
8 | ||
9 | #include <linux/config.h> | |
10 | #include <linux/module.h> | |
11 | #include <asm/sn/nodepda.h> | |
12 | #include <asm/sn/addrs.h> | |
13 | #include <asm/sn/arch.h> | |
14 | #include <asm/sn/sn_cpuid.h> | |
15 | #include <asm/sn/pda.h> | |
16 | #include <asm/sn/shubio.h> | |
17 | #include <asm/nodedata.h> | |
18 | #include <asm/delay.h> | |
19 | ||
20 | #include <linux/bootmem.h> | |
21 | #include <linux/string.h> | |
22 | #include <linux/sched.h> | |
23 | ||
24 | #include <asm/sn/bte.h> | |
25 | ||
26 | #ifndef L1_CACHE_MASK | |
27 | #define L1_CACHE_MASK (L1_CACHE_BYTES - 1) | |
28 | #endif | |
29 | ||
30 | /* two interfaces on two btes */ | |
31 | #define MAX_INTERFACES_TO_TRY 4 | |
7e95b9d6 | 32 | #define MAX_NODES_TO_TRY 2 |
1da177e4 LT |
33 | |
34 | static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface) | |
35 | { | |
36 | nodepda_t *tmp_nodepda; | |
37 | ||
7e95b9d6 | 38 | if (nasid_to_cnodeid(nasid) == -1) |
53b3531b | 39 | return (struct bteinfo_s *)NULL; |
7e95b9d6 | 40 | |
1da177e4 LT |
41 | tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid)); |
42 | return &tmp_nodepda->bte_if[interface]; | |
43 | ||
44 | } | |
45 | ||
7e95b9d6 JS |
46 | static inline void bte_start_transfer(struct bteinfo_s *bte, u64 len, u64 mode) |
47 | { | |
48 | if (is_shub2()) { | |
49 | BTE_CTRL_STORE(bte, (IBLS_BUSY | ((len) | (mode) << 24))); | |
50 | } else { | |
51 | BTE_LNSTAT_STORE(bte, len); | |
52 | BTE_CTRL_STORE(bte, mode); | |
53 | } | |
54 | } | |
55 | ||
1da177e4 LT |
56 | /************************************************************************ |
57 | * Block Transfer Engine copy related functions. | |
58 | * | |
59 | ***********************************************************************/ | |
60 | ||
61 | /* | |
62 | * bte_copy(src, dest, len, mode, notification) | |
63 | * | |
64 | * Use the block transfer engine to move kernel memory from src to dest | |
65 | * using the assigned mode. | |
66 | * | |
67 | * Paramaters: | |
68 | * src - physical address of the transfer source. | |
69 | * dest - physical address of the transfer destination. | |
70 | * len - number of bytes to transfer from source to dest. | |
71 | * mode - hardware defined. See reference information | |
72 | * for IBCT0/1 in the SHUB Programmers Reference | |
73 | * notification - kernel virtual address of the notification cache | |
74 | * line. If NULL, the default is used and | |
75 | * the bte_copy is synchronous. | |
76 | * | |
77 | * NOTE: This function requires src, dest, and len to | |
78 | * be cacheline aligned. | |
79 | */ | |
80 | bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) | |
81 | { | |
82 | u64 transfer_size; | |
83 | u64 transfer_stat; | |
7e95b9d6 | 84 | u64 notif_phys_addr; |
1da177e4 LT |
85 | struct bteinfo_s *bte; |
86 | bte_result_t bte_status; | |
87 | unsigned long irq_flags; | |
88 | unsigned long itc_end = 0; | |
7e95b9d6 | 89 | int nasid_to_try[MAX_NODES_TO_TRY]; |
e7f98dbb | 90 | int my_nasid = cpuid_to_nasid(raw_smp_processor_id()); |
7e95b9d6 JS |
91 | int bte_if_index, nasid_index; |
92 | int bte_first, btes_per_node = BTES_PER_NODE; | |
1da177e4 LT |
93 | |
94 | BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n", | |
95 | src, dest, len, mode, notification)); | |
96 | ||
97 | if (len == 0) { | |
98 | return BTE_SUCCESS; | |
99 | } | |
100 | ||
101 | BUG_ON((len & L1_CACHE_MASK) || | |
102 | (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)); | |
103 | BUG_ON(!(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT))); | |
104 | ||
7e95b9d6 JS |
105 | /* |
106 | * Start with interface corresponding to cpu number | |
107 | */ | |
d1e079b3 | 108 | bte_first = raw_smp_processor_id() % btes_per_node; |
1da177e4 LT |
109 | |
110 | if (mode & BTE_USE_DEST) { | |
111 | /* try remote then local */ | |
7e95b9d6 | 112 | nasid_to_try[0] = NASID_GET(dest); |
1da177e4 | 113 | if (mode & BTE_USE_ANY) { |
7e95b9d6 | 114 | nasid_to_try[1] = my_nasid; |
1da177e4 | 115 | } else { |
7e95b9d6 | 116 | nasid_to_try[1] = (int)NULL; |
1da177e4 LT |
117 | } |
118 | } else { | |
119 | /* try local then remote */ | |
7e95b9d6 | 120 | nasid_to_try[0] = my_nasid; |
1da177e4 | 121 | if (mode & BTE_USE_ANY) { |
7e95b9d6 | 122 | nasid_to_try[1] = NASID_GET(dest); |
1da177e4 | 123 | } else { |
7e95b9d6 | 124 | nasid_to_try[1] = (int)NULL; |
1da177e4 LT |
125 | } |
126 | } | |
127 | ||
128 | retry_bteop: | |
129 | do { | |
130 | local_irq_save(irq_flags); | |
131 | ||
7e95b9d6 JS |
132 | bte_if_index = bte_first; |
133 | nasid_index = 0; | |
1da177e4 LT |
134 | |
135 | /* Attempt to lock one of the BTE interfaces. */ | |
7e95b9d6 JS |
136 | while (nasid_index < MAX_NODES_TO_TRY) { |
137 | bte = bte_if_on_node(nasid_to_try[nasid_index],bte_if_index); | |
1da177e4 LT |
138 | |
139 | if (bte == NULL) { | |
ab2ff46a | 140 | nasid_index++; |
1da177e4 LT |
141 | continue; |
142 | } | |
143 | ||
144 | if (spin_trylock(&bte->spinlock)) { | |
145 | if (!(*bte->most_rcnt_na & BTE_WORD_AVAILABLE) || | |
146 | (BTE_LNSTAT_LOAD(bte) & BTE_ACTIVE)) { | |
147 | /* Got the lock but BTE still busy */ | |
148 | spin_unlock(&bte->spinlock); | |
149 | } else { | |
150 | /* we got the lock and it's not busy */ | |
151 | break; | |
152 | } | |
153 | } | |
7e95b9d6 JS |
154 | |
155 | bte_if_index = (bte_if_index + 1) % btes_per_node; /* Next interface */ | |
156 | if (bte_if_index == bte_first) { | |
157 | /* | |
158 | * We've tried all interfaces on this node | |
159 | */ | |
160 | nasid_index++; | |
161 | } | |
162 | ||
1da177e4 LT |
163 | bte = NULL; |
164 | } | |
165 | ||
166 | if (bte != NULL) { | |
167 | break; | |
168 | } | |
169 | ||
170 | local_irq_restore(irq_flags); | |
171 | ||
172 | if (!(mode & BTE_WACQUIRE)) { | |
173 | return BTEFAIL_NOTAVAIL; | |
174 | } | |
175 | } while (1); | |
176 | ||
177 | if (notification == NULL) { | |
178 | /* User does not want to be notified. */ | |
179 | bte->most_rcnt_na = &bte->notify; | |
180 | } else { | |
181 | bte->most_rcnt_na = notification; | |
182 | } | |
183 | ||
184 | /* Calculate the number of cache lines to transfer. */ | |
185 | transfer_size = ((len >> L1_CACHE_SHIFT) & BTE_LEN_MASK); | |
186 | ||
187 | /* Initialize the notification to a known value. */ | |
188 | *bte->most_rcnt_na = BTE_WORD_BUSY; | |
913e4a75 | 189 | notif_phys_addr = (u64)bte->most_rcnt_na; |
1da177e4 | 190 | |
1da177e4 | 191 | /* Set the source and destination registers */ |
913e4a75 RA |
192 | BTE_PRINTKV(("IBSA = 0x%lx)\n", src)); |
193 | BTE_SRC_STORE(bte, src); | |
194 | BTE_PRINTKV(("IBDA = 0x%lx)\n", dest)); | |
195 | BTE_DEST_STORE(bte, dest); | |
1da177e4 LT |
196 | |
197 | /* Set the notification register */ | |
7e95b9d6 JS |
198 | BTE_PRINTKV(("IBNA = 0x%lx)\n", notif_phys_addr)); |
199 | BTE_NOTIF_STORE(bte, notif_phys_addr); | |
1da177e4 LT |
200 | |
201 | /* Initiate the transfer */ | |
202 | BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode))); | |
7e95b9d6 | 203 | bte_start_transfer(bte, transfer_size, BTE_VALID_MODE(mode)); |
1da177e4 LT |
204 | |
205 | itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec); | |
206 | ||
207 | spin_unlock_irqrestore(&bte->spinlock, irq_flags); | |
208 | ||
209 | if (notification != NULL) { | |
210 | return BTE_SUCCESS; | |
211 | } | |
212 | ||
213 | while ((transfer_stat = *bte->most_rcnt_na) == BTE_WORD_BUSY) { | |
68b9753f | 214 | cpu_relax(); |
1da177e4 LT |
215 | if (ia64_get_itc() > itc_end) { |
216 | BTE_PRINTK(("BTE timeout nasid 0x%x bte%d IBLS = 0x%lx na 0x%lx\n", | |
217 | NASID_GET(bte->bte_base_addr), bte->bte_num, | |
218 | BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na) ); | |
219 | bte->bte_error_count++; | |
220 | bte->bh_error = IBLS_ERROR; | |
221 | bte_error_handler((unsigned long)NODEPDA(bte->bte_cnode)); | |
222 | *bte->most_rcnt_na = BTE_WORD_AVAILABLE; | |
223 | goto retry_bteop; | |
224 | } | |
225 | } | |
226 | ||
227 | BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n", | |
228 | BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na)); | |
229 | ||
230 | if (transfer_stat & IBLS_ERROR) { | |
231 | bte_status = transfer_stat & ~IBLS_ERROR; | |
232 | } else { | |
233 | bte_status = BTE_SUCCESS; | |
234 | } | |
235 | *bte->most_rcnt_na = BTE_WORD_AVAILABLE; | |
236 | ||
237 | BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n", | |
238 | BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na)); | |
239 | ||
240 | return bte_status; | |
241 | } | |
242 | ||
243 | EXPORT_SYMBOL(bte_copy); | |
244 | ||
245 | /* | |
246 | * bte_unaligned_copy(src, dest, len, mode) | |
247 | * | |
248 | * use the block transfer engine to move kernel | |
249 | * memory from src to dest using the assigned mode. | |
250 | * | |
251 | * Paramaters: | |
252 | * src - physical address of the transfer source. | |
253 | * dest - physical address of the transfer destination. | |
254 | * len - number of bytes to transfer from source to dest. | |
255 | * mode - hardware defined. See reference information | |
256 | * for IBCT0/1 in the SGI documentation. | |
257 | * | |
258 | * NOTE: If the source, dest, and len are all cache line aligned, | |
259 | * then it would be _FAR_ preferrable to use bte_copy instead. | |
260 | */ | |
261 | bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode) | |
262 | { | |
263 | int destFirstCacheOffset; | |
264 | u64 headBteSource; | |
265 | u64 headBteLen; | |
266 | u64 headBcopySrcOffset; | |
267 | u64 headBcopyDest; | |
268 | u64 headBcopyLen; | |
269 | u64 footBteSource; | |
270 | u64 footBteLen; | |
271 | u64 footBcopyDest; | |
272 | u64 footBcopyLen; | |
273 | bte_result_t rv; | |
274 | char *bteBlock, *bteBlock_unaligned; | |
275 | ||
276 | if (len == 0) { | |
277 | return BTE_SUCCESS; | |
278 | } | |
279 | ||
280 | /* temporary buffer used during unaligned transfers */ | |
281 | bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES, | |
282 | GFP_KERNEL | GFP_DMA); | |
283 | if (bteBlock_unaligned == NULL) { | |
284 | return BTEFAIL_NOTAVAIL; | |
285 | } | |
286 | bteBlock = (char *)L1_CACHE_ALIGN((u64) bteBlock_unaligned); | |
287 | ||
288 | headBcopySrcOffset = src & L1_CACHE_MASK; | |
289 | destFirstCacheOffset = dest & L1_CACHE_MASK; | |
290 | ||
291 | /* | |
292 | * At this point, the transfer is broken into | |
293 | * (up to) three sections. The first section is | |
294 | * from the start address to the first physical | |
295 | * cache line, the second is from the first physical | |
296 | * cache line to the last complete cache line, | |
297 | * and the third is from the last cache line to the | |
298 | * end of the buffer. The first and third sections | |
299 | * are handled by bte copying into a temporary buffer | |
300 | * and then bcopy'ing the necessary section into the | |
301 | * final location. The middle section is handled with | |
302 | * a standard bte copy. | |
303 | * | |
304 | * One nasty exception to the above rule is when the | |
305 | * source and destination are not symetrically | |
306 | * mis-aligned. If the source offset from the first | |
307 | * cache line is different from the destination offset, | |
308 | * we make the first section be the entire transfer | |
309 | * and the bcopy the entire block into place. | |
310 | */ | |
311 | if (headBcopySrcOffset == destFirstCacheOffset) { | |
312 | ||
313 | /* | |
314 | * Both the source and destination are the same | |
315 | * distance from a cache line boundary so we can | |
316 | * use the bte to transfer the bulk of the | |
317 | * data. | |
318 | */ | |
319 | headBteSource = src & ~L1_CACHE_MASK; | |
320 | headBcopyDest = dest; | |
321 | if (headBcopySrcOffset) { | |
322 | headBcopyLen = | |
323 | (len > | |
324 | (L1_CACHE_BYTES - | |
325 | headBcopySrcOffset) ? L1_CACHE_BYTES | |
326 | - headBcopySrcOffset : len); | |
327 | headBteLen = L1_CACHE_BYTES; | |
328 | } else { | |
329 | headBcopyLen = 0; | |
330 | headBteLen = 0; | |
331 | } | |
332 | ||
333 | if (len > headBcopyLen) { | |
334 | footBcopyLen = (len - headBcopyLen) & L1_CACHE_MASK; | |
335 | footBteLen = L1_CACHE_BYTES; | |
336 | ||
337 | footBteSource = src + len - footBcopyLen; | |
338 | footBcopyDest = dest + len - footBcopyLen; | |
339 | ||
340 | if (footBcopyDest == (headBcopyDest + headBcopyLen)) { | |
341 | /* | |
342 | * We have two contigous bcopy | |
343 | * blocks. Merge them. | |
344 | */ | |
345 | headBcopyLen += footBcopyLen; | |
346 | headBteLen += footBteLen; | |
347 | } else if (footBcopyLen > 0) { | |
348 | rv = bte_copy(footBteSource, | |
349 | ia64_tpa((unsigned long)bteBlock), | |
350 | footBteLen, mode, NULL); | |
351 | if (rv != BTE_SUCCESS) { | |
352 | kfree(bteBlock_unaligned); | |
353 | return rv; | |
354 | } | |
355 | ||
356 | memcpy(__va(footBcopyDest), | |
357 | (char *)bteBlock, footBcopyLen); | |
358 | } | |
359 | } else { | |
360 | footBcopyLen = 0; | |
361 | footBteLen = 0; | |
362 | } | |
363 | ||
364 | if (len > (headBcopyLen + footBcopyLen)) { | |
365 | /* now transfer the middle. */ | |
366 | rv = bte_copy((src + headBcopyLen), | |
367 | (dest + | |
368 | headBcopyLen), | |
369 | (len - headBcopyLen - | |
370 | footBcopyLen), mode, NULL); | |
371 | if (rv != BTE_SUCCESS) { | |
372 | kfree(bteBlock_unaligned); | |
373 | return rv; | |
374 | } | |
375 | ||
376 | } | |
377 | } else { | |
378 | ||
379 | /* | |
380 | * The transfer is not symetric, we will | |
381 | * allocate a buffer large enough for all the | |
382 | * data, bte_copy into that buffer and then | |
383 | * bcopy to the destination. | |
384 | */ | |
385 | ||
386 | /* Add the leader from source */ | |
387 | headBteLen = len + (src & L1_CACHE_MASK); | |
388 | /* Add the trailing bytes from footer. */ | |
389 | headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK); | |
390 | headBteSource = src & ~L1_CACHE_MASK; | |
391 | headBcopySrcOffset = src & L1_CACHE_MASK; | |
392 | headBcopyDest = dest; | |
393 | headBcopyLen = len; | |
394 | } | |
395 | ||
396 | if (headBcopyLen > 0) { | |
397 | rv = bte_copy(headBteSource, | |
398 | ia64_tpa((unsigned long)bteBlock), headBteLen, | |
399 | mode, NULL); | |
400 | if (rv != BTE_SUCCESS) { | |
401 | kfree(bteBlock_unaligned); | |
402 | return rv; | |
403 | } | |
404 | ||
405 | memcpy(__va(headBcopyDest), ((char *)bteBlock + | |
406 | headBcopySrcOffset), headBcopyLen); | |
407 | } | |
408 | kfree(bteBlock_unaligned); | |
409 | return BTE_SUCCESS; | |
410 | } | |
411 | ||
412 | EXPORT_SYMBOL(bte_unaligned_copy); | |
413 | ||
414 | /************************************************************************ | |
415 | * Block Transfer Engine initialization functions. | |
416 | * | |
417 | ***********************************************************************/ | |
418 | ||
419 | /* | |
420 | * bte_init_node(nodepda, cnode) | |
421 | * | |
422 | * Initialize the nodepda structure with BTE base addresses and | |
423 | * spinlocks. | |
424 | */ | |
425 | void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode) | |
426 | { | |
427 | int i; | |
428 | ||
429 | /* | |
430 | * Indicate that all the block transfer engines on this node | |
431 | * are available. | |
432 | */ | |
433 | ||
434 | /* | |
435 | * Allocate one bte_recover_t structure per node. It holds | |
436 | * the recovery lock for node. All the bte interface structures | |
437 | * will point at this one bte_recover structure to get the lock. | |
438 | */ | |
439 | spin_lock_init(&mynodepda->bte_recovery_lock); | |
440 | init_timer(&mynodepda->bte_recovery_timer); | |
441 | mynodepda->bte_recovery_timer.function = bte_error_handler; | |
442 | mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda; | |
443 | ||
444 | for (i = 0; i < BTES_PER_NODE; i++) { | |
95ff439a RA |
445 | u64 *base_addr; |
446 | ||
1da177e4 | 447 | /* Which link status register should we use? */ |
95ff439a RA |
448 | base_addr = (u64 *) |
449 | REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), BTE_BASE_ADDR(i)); | |
450 | mynodepda->bte_if[i].bte_base_addr = base_addr; | |
451 | mynodepda->bte_if[i].bte_source_addr = BTE_SOURCE_ADDR(base_addr); | |
452 | mynodepda->bte_if[i].bte_destination_addr = BTE_DEST_ADDR(base_addr); | |
453 | mynodepda->bte_if[i].bte_control_addr = BTE_CTRL_ADDR(base_addr); | |
454 | mynodepda->bte_if[i].bte_notify_addr = BTE_NOTIF_ADDR(base_addr); | |
1da177e4 LT |
455 | |
456 | /* | |
457 | * Initialize the notification and spinlock | |
458 | * so the first transfer can occur. | |
459 | */ | |
460 | mynodepda->bte_if[i].most_rcnt_na = | |
461 | &(mynodepda->bte_if[i].notify); | |
462 | mynodepda->bte_if[i].notify = BTE_WORD_AVAILABLE; | |
463 | spin_lock_init(&mynodepda->bte_if[i].spinlock); | |
464 | ||
465 | mynodepda->bte_if[i].bte_cnode = cnode; | |
466 | mynodepda->bte_if[i].bte_error_count = 0; | |
467 | mynodepda->bte_if[i].bte_num = i; | |
468 | mynodepda->bte_if[i].cleanup_active = 0; | |
469 | mynodepda->bte_if[i].bh_error = 0; | |
470 | } | |
471 | ||
472 | } |