locking/core: Introduce cpu_relax_yield()
[linux-2.6-block.git] / arch / alpha / include / asm / processor.h
CommitLineData
1da177e4
LT
1/*
2 * include/asm-alpha/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7#ifndef __ASM_ALPHA_PROCESSOR_H
8#define __ASM_ALPHA_PROCESSOR_H
9
10#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
11
12/*
13 * Returns current instruction pointer ("program counter").
14 */
15#define current_text_addr() \
16 ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })
17
18/*
19 * We have a 42-bit user address space: 4TB user VM...
20 */
21#define TASK_SIZE (0x40000000000UL)
22
922a70d3
DH
23#define STACK_TOP \
24 (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
25
26#define STACK_TOP_MAX 0x00120000000UL
27
1da177e4
LT
28/* This decides where the kernel will search for a free chunk of vm
29 * space during mmap's.
30 */
31#define TASK_UNMAPPED_BASE \
32 ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
33
34typedef struct {
35 unsigned long seg;
36} mm_segment_t;
37
38/* This is dead. Everything has been moved to thread_info. */
39struct thread_struct { };
40#define INIT_THREAD { }
41
42/* Return saved PC of a blocked thread. */
43struct task_struct;
44extern unsigned long thread_saved_pc(struct task_struct *);
45
46/* Do necessary setup to start up a newly executed thread. */
396ada68 47struct pt_regs;
1da177e4
LT
48extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
49
50/* Free all resources held by a thread. */
51extern void release_thread(struct task_struct *);
52
1da177e4
LT
53unsigned long get_wchan(struct task_struct *p);
54
e52f4ca2 55#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
1da177e4
LT
56
57#define KSTK_ESP(tsk) \
37bfbaf9 58 ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
1da177e4
LT
59
60#define cpu_relax() barrier()
79ab11cd 61#define cpu_relax_yield() cpu_relax()
3a6bfbc9 62#define cpu_relax_lowlatency() cpu_relax()
1da177e4
LT
63
64#define ARCH_HAS_PREFETCH
65#define ARCH_HAS_PREFETCHW
66#define ARCH_HAS_SPINLOCK_PREFETCH
67
68#ifndef CONFIG_SMP
69/* Nothing to prefetch. */
70#define spin_lock_prefetch(lock) do { } while (0)
71#endif
72
1da177e4
LT
73extern inline void prefetch(const void *ptr)
74{
75 __builtin_prefetch(ptr, 0, 3);
76}
77
78extern inline void prefetchw(const void *ptr)
79{
80 __builtin_prefetch(ptr, 1, 3);
81}
82
83#ifdef CONFIG_SMP
84extern inline void spin_lock_prefetch(const void *ptr)
85{
86 __builtin_prefetch(ptr, 1, 3);
87}
88#endif
89
1da177e4 90#endif /* __ASM_ALPHA_PROCESSOR_H */