Commit | Line | Data |
---|---|---|
2522fe45 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e7fd4179 DT |
2 | /****************************************************************************** |
3 | ******************************************************************************* | |
4 | ** | |
5 | ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
7fe2b319 | 6 | ** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. |
e7fd4179 | 7 | ** |
e7fd4179 DT |
8 | ** |
9 | ******************************************************************************* | |
10 | ******************************************************************************/ | |
11 | ||
f1d3b8f9 AA |
12 | #include <trace/events/dlm.h> |
13 | ||
e7fd4179 | 14 | #include "dlm_internal.h" |
61bed0ba | 15 | #include "memory.h" |
e7fd4179 | 16 | #include "lock.h" |
597d0cae | 17 | #include "user.h" |
95058571 | 18 | #include "ast.h" |
e7fd4179 | 19 | |
61bed0ba AA |
20 | void dlm_release_callback(struct kref *ref) |
21 | { | |
22 | struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref); | |
23 | ||
24 | dlm_free_cb(cb); | |
25 | } | |
26 | ||
27 | void dlm_callback_set_last_ptr(struct dlm_callback **from, | |
28 | struct dlm_callback *to) | |
29 | { | |
30 | if (*from) | |
31 | kref_put(&(*from)->ref, dlm_release_callback); | |
32 | ||
33 | if (to) | |
34 | kref_get(&to->ref); | |
35 | ||
36 | *from = to; | |
37 | } | |
e7fd4179 | 38 | |
61bed0ba | 39 | void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb) |
8304d6f2 | 40 | { |
61bed0ba AA |
41 | struct dlm_callback *cb, *safe; |
42 | ||
43 | list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) { | |
44 | list_del(&cb->list); | |
45 | kref_put(&cb->ref, dlm_release_callback); | |
8304d6f2 | 46 | } |
61bed0ba | 47 | |
554d8496 | 48 | lkb->lkb_flags &= ~DLM_IFL_CB_PENDING; |
61bed0ba AA |
49 | |
50 | /* invalidate */ | |
51 | dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL); | |
52 | dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL); | |
53 | lkb->lkb_last_bast_mode = -1; | |
8304d6f2 DT |
54 | } |
55 | ||
61bed0ba AA |
56 | int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, |
57 | int status, uint32_t sbflags) | |
8304d6f2 DT |
58 | { |
59 | struct dlm_ls *ls = lkb->lkb_resource->res_ls; | |
61bed0ba AA |
60 | int rv = DLM_ENQUEUE_CALLBACK_SUCCESS; |
61 | struct dlm_callback *cb; | |
8304d6f2 | 62 | int prev_mode; |
8304d6f2 | 63 | |
61bed0ba AA |
64 | if (flags & DLM_CB_BAST) { |
65 | /* if cb is a bast, it should be skipped if the blocking mode is | |
66 | * compatible with the last granted mode | |
67 | */ | |
68 | if (lkb->lkb_last_cast) { | |
69 | if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) { | |
70 | log_debug(ls, "skip %x bast mode %d for cast mode %d", | |
71 | lkb->lkb_id, mode, | |
72 | lkb->lkb_last_cast->mode); | |
73 | goto out; | |
74 | } | |
75 | } | |
8304d6f2 DT |
76 | |
77 | /* | |
78 | * Suppress some redundant basts here, do more on removal. | |
79 | * Don't even add a bast if the callback just before it | |
80 | * is a bast for the same mode or a more restrictive mode. | |
81 | * (the addional > PR check is needed for PR/CW inversion) | |
82 | */ | |
61bed0ba AA |
83 | if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) { |
84 | prev_mode = lkb->lkb_last_cb->mode; | |
8304d6f2 DT |
85 | |
86 | if ((prev_mode == mode) || | |
87 | (prev_mode > mode && prev_mode > DLM_LOCK_PR)) { | |
61bed0ba AA |
88 | log_debug(ls, "skip %x add bast mode %d for bast mode %d", |
89 | lkb->lkb_id, mode, prev_mode); | |
23e8e1aa | 90 | goto out; |
8304d6f2 DT |
91 | } |
92 | } | |
8304d6f2 DT |
93 | } |
94 | ||
61bed0ba AA |
95 | cb = dlm_allocate_cb(); |
96 | if (!cb) { | |
97 | rv = DLM_ENQUEUE_CALLBACK_FAILURE; | |
23e8e1aa | 98 | goto out; |
8304d6f2 | 99 | } |
8304d6f2 | 100 | |
61bed0ba AA |
101 | cb->flags = flags; |
102 | cb->mode = mode; | |
103 | cb->sb_status = status; | |
104 | cb->sb_flags = (sbflags & 0x000000FF); | |
105 | kref_init(&cb->ref); | |
554d8496 AA |
106 | if (!(lkb->lkb_flags & DLM_IFL_CB_PENDING)) { |
107 | lkb->lkb_flags |= DLM_IFL_CB_PENDING; | |
61bed0ba | 108 | rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED; |
8304d6f2 | 109 | } |
61bed0ba | 110 | list_add_tail(&cb->list, &lkb->lkb_callbacks); |
8304d6f2 | 111 | |
61bed0ba AA |
112 | if (flags & DLM_CB_CAST) |
113 | dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb); | |
8304d6f2 | 114 | |
61bed0ba | 115 | dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb); |
8304d6f2 | 116 | |
23e8e1aa DT |
117 | out: |
118 | return rv; | |
8304d6f2 DT |
119 | } |
120 | ||
61bed0ba AA |
121 | int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb) |
122 | { | |
123 | /* oldest undelivered cb is callbacks first entry */ | |
124 | *cb = list_first_entry_or_null(&lkb->lkb_callbacks, | |
125 | struct dlm_callback, list); | |
126 | if (!*cb) | |
127 | return DLM_DEQUEUE_CALLBACK_EMPTY; | |
128 | ||
129 | /* remove it from callbacks so shift others down */ | |
130 | list_del(&(*cb)->list); | |
131 | if (list_empty(&lkb->lkb_callbacks)) | |
132 | return DLM_DEQUEUE_CALLBACK_LAST; | |
133 | ||
134 | return DLM_DEQUEUE_CALLBACK_SUCCESS; | |
135 | } | |
136 | ||
23e8e1aa DT |
137 | void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, |
138 | uint32_t sbflags) | |
8304d6f2 | 139 | { |
23e8e1aa | 140 | struct dlm_ls *ls = lkb->lkb_resource->res_ls; |
8304d6f2 DT |
141 | int rv; |
142 | ||
597d0cae | 143 | if (lkb->lkb_flags & DLM_IFL_USER) { |
61bed0ba | 144 | dlm_user_add_ast(lkb, flags, mode, status, sbflags); |
597d0cae DT |
145 | return; |
146 | } | |
147 | ||
92e95733 | 148 | spin_lock(&lkb->lkb_cb_lock); |
61bed0ba AA |
149 | rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags); |
150 | switch (rv) { | |
151 | case DLM_ENQUEUE_CALLBACK_NEED_SCHED: | |
e7fd4179 | 152 | kref_get(&lkb->lkb_ref); |
e7fd4179 | 153 | |
a4c0352b | 154 | spin_lock(&ls->ls_cb_lock); |
23e8e1aa | 155 | if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { |
23e8e1aa | 156 | list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); |
23e8e1aa DT |
157 | } else { |
158 | queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); | |
159 | } | |
a4c0352b | 160 | spin_unlock(&ls->ls_cb_lock); |
61bed0ba AA |
161 | break; |
162 | case DLM_ENQUEUE_CALLBACK_FAILURE: | |
740bb8fc | 163 | WARN_ON_ONCE(1); |
61bed0ba AA |
164 | break; |
165 | case DLM_ENQUEUE_CALLBACK_SUCCESS: | |
166 | break; | |
167 | default: | |
740bb8fc | 168 | WARN_ON_ONCE(1); |
61bed0ba | 169 | break; |
23e8e1aa | 170 | } |
92e95733 | 171 | spin_unlock(&lkb->lkb_cb_lock); |
e7fd4179 DT |
172 | } |
173 | ||
23e8e1aa | 174 | void dlm_callback_work(struct work_struct *work) |
e7fd4179 | 175 | { |
23e8e1aa DT |
176 | struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work); |
177 | struct dlm_ls *ls = lkb->lkb_resource->res_ls; | |
7fe2b319 DT |
178 | void (*castfn) (void *astparam); |
179 | void (*bastfn) (void *astparam, int mode); | |
61bed0ba AA |
180 | struct dlm_callback *cb; |
181 | int rv; | |
e7fd4179 | 182 | |
92e95733 | 183 | spin_lock(&lkb->lkb_cb_lock); |
61bed0ba | 184 | rv = dlm_dequeue_lkb_callback(lkb, &cb); |
92e95733 | 185 | spin_unlock(&lkb->lkb_cb_lock); |
7fe2b319 | 186 | |
740bb8fc | 187 | if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY)) |
9267c857 | 188 | goto out; |
7fe2b319 | 189 | |
61bed0ba AA |
190 | for (;;) { |
191 | castfn = lkb->lkb_astfn; | |
192 | bastfn = lkb->lkb_bastfn; | |
193 | ||
194 | if (cb->flags & DLM_CB_BAST) { | |
195 | trace_dlm_bast(ls, lkb, cb->mode); | |
27d3994e | 196 | lkb->lkb_last_bast_time = ktime_get(); |
61bed0ba AA |
197 | lkb->lkb_last_bast_mode = cb->mode; |
198 | bastfn(lkb->lkb_astparam, cb->mode); | |
199 | } else if (cb->flags & DLM_CB_CAST) { | |
200 | lkb->lkb_lksb->sb_status = cb->sb_status; | |
201 | lkb->lkb_lksb->sb_flags = cb->sb_flags; | |
0c4c516f | 202 | trace_dlm_ast(ls, lkb); |
27d3994e | 203 | lkb->lkb_last_cast_time = ktime_get(); |
cd1e8ca9 | 204 | castfn(lkb->lkb_astparam); |
7fe2b319 | 205 | } |
61bed0ba AA |
206 | |
207 | kref_put(&cb->ref, dlm_release_callback); | |
208 | ||
209 | spin_lock(&lkb->lkb_cb_lock); | |
210 | rv = dlm_dequeue_lkb_callback(lkb, &cb); | |
211 | if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) { | |
554d8496 | 212 | lkb->lkb_flags &= ~DLM_IFL_CB_PENDING; |
61bed0ba AA |
213 | spin_unlock(&lkb->lkb_cb_lock); |
214 | break; | |
215 | } | |
216 | spin_unlock(&lkb->lkb_cb_lock); | |
e7fd4179 | 217 | } |
e7fd4179 | 218 | |
9267c857 | 219 | out: |
23e8e1aa DT |
220 | /* undo kref_get from dlm_add_callback, may cause lkb to be freed */ |
221 | dlm_put_lkb(lkb); | |
e7fd4179 DT |
222 | } |
223 | ||
23e8e1aa | 224 | int dlm_callback_start(struct dlm_ls *ls) |
e7fd4179 | 225 | { |
23e8e1aa | 226 | ls->ls_callback_wq = alloc_workqueue("dlm_callback", |
aa9f1012 | 227 | WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); |
23e8e1aa DT |
228 | if (!ls->ls_callback_wq) { |
229 | log_print("can't start dlm_callback workqueue"); | |
230 | return -ENOMEM; | |
e7fd4179 DT |
231 | } |
232 | return 0; | |
233 | } | |
234 | ||
23e8e1aa | 235 | void dlm_callback_stop(struct dlm_ls *ls) |
e7fd4179 | 236 | { |
23e8e1aa DT |
237 | if (ls->ls_callback_wq) |
238 | destroy_workqueue(ls->ls_callback_wq); | |
e7fd4179 DT |
239 | } |
240 | ||
23e8e1aa | 241 | void dlm_callback_suspend(struct dlm_ls *ls) |
e7fd4179 | 242 | { |
9cb16d42 | 243 | if (ls->ls_callback_wq) { |
a4c0352b | 244 | spin_lock(&ls->ls_cb_lock); |
9cb16d42 | 245 | set_bit(LSFL_CB_DELAY, &ls->ls_flags); |
a4c0352b | 246 | spin_unlock(&ls->ls_cb_lock); |
e7fd4179 | 247 | |
23e8e1aa | 248 | flush_workqueue(ls->ls_callback_wq); |
9cb16d42 | 249 | } |
e7fd4179 DT |
250 | } |
251 | ||
216f0efd BP |
252 | #define MAX_CB_QUEUE 25 |
253 | ||
23e8e1aa | 254 | void dlm_callback_resume(struct dlm_ls *ls) |
e7fd4179 | 255 | { |
23e8e1aa | 256 | struct dlm_lkb *lkb, *safe; |
2f05ec43 | 257 | int count = 0, sum = 0; |
f70813d6 | 258 | bool empty; |
e7fd4179 | 259 | |
23e8e1aa DT |
260 | if (!ls->ls_callback_wq) |
261 | return; | |
262 | ||
216f0efd | 263 | more: |
a4c0352b | 264 | spin_lock(&ls->ls_cb_lock); |
23e8e1aa DT |
265 | list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { |
266 | list_del_init(&lkb->lkb_cb_list); | |
267 | queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); | |
268 | count++; | |
216f0efd BP |
269 | if (count == MAX_CB_QUEUE) |
270 | break; | |
23e8e1aa | 271 | } |
f70813d6 | 272 | empty = list_empty(&ls->ls_cb_delay); |
85839f27 AA |
273 | if (empty) |
274 | clear_bit(LSFL_CB_DELAY, &ls->ls_flags); | |
a4c0352b | 275 | spin_unlock(&ls->ls_cb_lock); |
23e8e1aa | 276 | |
2f05ec43 | 277 | sum += count; |
f70813d6 | 278 | if (!empty) { |
216f0efd BP |
279 | count = 0; |
280 | cond_resched(); | |
281 | goto more; | |
282 | } | |
2f05ec43 AA |
283 | |
284 | if (sum) | |
285 | log_rinfo(ls, "%s %d", __func__, sum); | |
e7fd4179 DT |
286 | } |
287 |