Commit | Line | Data |
---|---|---|
295cbf6d RB |
1 | /* |
2 | * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels | |
3 | * Copyright (C) 2005 Mips Technologies, Inc | |
4 | */ | |
5 | #include <linux/cpu.h> | |
6 | #include <linux/cpumask.h> | |
7 | #include <linux/delay.h> | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/security.h> | |
12 | #include <linux/types.h> | |
13 | #include <asm/uaccess.h> | |
14 | ||
15 | /* | |
16 | * CPU mask used to set process affinity for MT VPEs/TCs with FPUs | |
17 | */ | |
18 | cpumask_t mt_fpu_cpumask; | |
19 | ||
20 | static int fpaff_threshold = -1; | |
21 | unsigned long mt_fpemul_threshold = 0; | |
22 | ||
23 | /* | |
24 | * Replacement functions for the sys_sched_setaffinity() and | |
25 | * sys_sched_getaffinity() system calls, so that we can integrate | |
26 | * FPU affinity with the user's requested processor affinity. | |
27 | * This code is 98% identical with the sys_sched_setaffinity() | |
28 | * and sys_sched_getaffinity() system calls, and should be | |
29 | * updated when kernel/sched.c changes. | |
30 | */ | |
31 | ||
32 | /* | |
33 | * find_process_by_pid - find a process with a matching PID value. | |
34 | * used in sys_sched_set/getaffinity() in kernel/sched.c, so | |
35 | * cloned here. | |
36 | */ | |
37 | static inline struct task_struct *find_process_by_pid(pid_t pid) | |
38 | { | |
39 | return pid ? find_task_by_pid(pid) : current; | |
40 | } | |
41 | ||
42 | ||
43 | /* | |
44 | * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process | |
45 | */ | |
46 | asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |
47 | unsigned long __user *user_mask_ptr) | |
48 | { | |
49 | cpumask_t new_mask; | |
50 | cpumask_t effective_mask; | |
51 | int retval; | |
52 | struct task_struct *p; | |
53 | ||
54 | if (len < sizeof(new_mask)) | |
55 | return -EINVAL; | |
56 | ||
57 | if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) | |
58 | return -EFAULT; | |
59 | ||
60 | lock_cpu_hotplug(); | |
61 | read_lock(&tasklist_lock); | |
62 | ||
63 | p = find_process_by_pid(pid); | |
64 | if (!p) { | |
65 | read_unlock(&tasklist_lock); | |
66 | unlock_cpu_hotplug(); | |
67 | return -ESRCH; | |
68 | } | |
69 | ||
70 | /* | |
71 | * It is not safe to call set_cpus_allowed with the | |
72 | * tasklist_lock held. We will bump the task_struct's | |
73 | * usage count and drop tasklist_lock before invoking | |
74 | * set_cpus_allowed. | |
75 | */ | |
76 | get_task_struct(p); | |
77 | ||
78 | retval = -EPERM; | |
79 | if ((current->euid != p->euid) && (current->euid != p->uid) && | |
80 | !capable(CAP_SYS_NICE)) { | |
81 | read_unlock(&tasklist_lock); | |
82 | goto out_unlock; | |
83 | } | |
84 | ||
85 | retval = security_task_setscheduler(p, 0, NULL); | |
86 | if (retval) | |
87 | goto out_unlock; | |
88 | ||
89 | /* Record new user-specified CPU set for future reference */ | |
90 | p->thread.user_cpus_allowed = new_mask; | |
91 | ||
92 | /* Unlock the task list */ | |
93 | read_unlock(&tasklist_lock); | |
94 | ||
95 | /* Compute new global allowed CPU set if necessary */ | |
96 | if ((p->thread.mflags & MF_FPUBOUND) | |
97 | && cpus_intersects(new_mask, mt_fpu_cpumask)) { | |
98 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); | |
99 | retval = set_cpus_allowed(p, effective_mask); | |
100 | } else { | |
101 | p->thread.mflags &= ~MF_FPUBOUND; | |
102 | retval = set_cpus_allowed(p, new_mask); | |
103 | } | |
104 | ||
105 | ||
106 | out_unlock: | |
107 | put_task_struct(p); | |
108 | unlock_cpu_hotplug(); | |
109 | return retval; | |
110 | } | |
111 | ||
112 | /* | |
113 | * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process | |
114 | */ | |
115 | asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | |
116 | unsigned long __user *user_mask_ptr) | |
117 | { | |
118 | unsigned int real_len; | |
119 | cpumask_t mask; | |
120 | int retval; | |
121 | struct task_struct *p; | |
122 | ||
123 | real_len = sizeof(mask); | |
124 | if (len < real_len) | |
125 | return -EINVAL; | |
126 | ||
127 | lock_cpu_hotplug(); | |
128 | read_lock(&tasklist_lock); | |
129 | ||
130 | retval = -ESRCH; | |
131 | p = find_process_by_pid(pid); | |
132 | if (!p) | |
133 | goto out_unlock; | |
134 | retval = security_task_getscheduler(p); | |
135 | if (retval) | |
136 | goto out_unlock; | |
137 | ||
138 | cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); | |
139 | ||
140 | out_unlock: | |
141 | read_unlock(&tasklist_lock); | |
142 | unlock_cpu_hotplug(); | |
143 | if (retval) | |
144 | return retval; | |
145 | if (copy_to_user(user_mask_ptr, &mask, real_len)) | |
146 | return -EFAULT; | |
147 | return real_len; | |
148 | } | |
149 | ||
150 | ||
151 | static int __init fpaff_thresh(char *str) | |
152 | { | |
153 | get_option(&str, &fpaff_threshold); | |
154 | return 1; | |
155 | } | |
156 | __setup("fpaff=", fpaff_thresh); | |
157 | ||
158 | /* | |
159 | * FPU Use Factor empirically derived from experiments on 34K | |
160 | */ | |
161 | #define FPUSEFACTOR 333 | |
162 | ||
163 | static __init int mt_fp_affinity_init(void) | |
164 | { | |
165 | if (fpaff_threshold >= 0) { | |
166 | mt_fpemul_threshold = fpaff_threshold; | |
167 | } else { | |
168 | mt_fpemul_threshold = | |
169 | (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; | |
170 | } | |
171 | printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n", | |
172 | mt_fpemul_threshold); | |
173 | ||
174 | return 0; | |
175 | } | |
176 | arch_initcall(mt_fp_affinity_init); |