Commit | Line | Data |
---|---|---|
a0ce85f5 CL |
1 | /* |
2 | * Copyright (c) 2015 Oracle. All rights reserved. | |
3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. | |
4 | */ | |
5 | ||
6 | /* Lightweight memory registration using Fast Registration Work | |
7 | * Requests (FRWR). Also referred to sometimes as FRMR mode. | |
8 | * | |
9 | * FRWR features ordered asynchronous registration and deregistration | |
10 | * of arbitrarily sized memory regions. This is the fastest and safest | |
11 | * but most complex memory registration mode. | |
12 | */ | |
13 | ||
c14d86e5 CL |
14 | /* Normal operation |
15 | * | |
16 | * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG | |
17 | * Work Request (frmr_op_map). When the RDMA operation is finished, this | |
18 | * Memory Region is invalidated using a LOCAL_INV Work Request | |
19 | * (frmr_op_unmap). | |
20 | * | |
21 | * Typically these Work Requests are not signaled, and neither are RDMA | |
22 | * SEND Work Requests (with the exception of signaling occasionally to | |
23 | * prevent provider work queue overflows). This greatly reduces HCA | |
24 | * interrupt workload. | |
25 | * | |
26 | * As an optimization, frwr_op_unmap marks MRs INVALID before the | |
27 | * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on | |
28 | * rb_mws immediately so that no work (like managing a linked list | |
29 | * under a spinlock) is needed in the completion upcall. | |
30 | * | |
31 | * But this means that frwr_op_map() can occasionally encounter an MR | |
32 | * that is INVALID but the LOCAL_INV WR has not completed. Work Queue | |
33 | * ordering prevents a subsequent FAST_REG WR from executing against | |
34 | * that MR while it is still being invalidated. | |
35 | */ | |
36 | ||
37 | /* Transport recovery | |
38 | * | |
39 | * ->op_map and the transport connect worker cannot run at the same | |
40 | * time, but ->op_unmap can fire while the transport connect worker | |
41 | * is running. Thus MR recovery is handled in ->op_map, to guarantee | |
42 | * that recovered MRs are owned by a sending RPC, and not one where | |
43 | * ->op_unmap could fire at the same time transport reconnect is | |
44 | * being done. | |
45 | * | |
46 | * When the underlying transport disconnects, MRs are left in one of | |
47 | * three states: | |
48 | * | |
49 | * INVALID: The MR was not in use before the QP entered ERROR state. | |
50 | * (Or, the LOCAL_INV WR has not completed or flushed yet). | |
51 | * | |
52 | * STALE: The MR was being registered or unregistered when the QP | |
53 | * entered ERROR state, and the pending WR was flushed. | |
54 | * | |
55 | * VALID: The MR was registered before the QP entered ERROR state. | |
56 | * | |
57 | * When frwr_op_map encounters STALE and VALID MRs, they are recovered | |
58 | * with ib_dereg_mr and then are re-initialized. Beause MR recovery | |
59 | * allocates fresh resources, it is deferred to a workqueue, and the | |
60 | * recovered MRs are placed back on the rb_mws list when recovery is | |
61 | * complete. frwr_op_map allocates another MR for the current RPC while | |
62 | * the broken MR is reset. | |
63 | * | |
64 | * To ensure that frwr_op_map doesn't encounter an MR that is marked | |
65 | * INVALID but that is about to be flushed due to a previous transport | |
66 | * disconnect, the transport connect worker attempts to drain all | |
67 | * pending send queue WRs before the transport is reconnected. | |
68 | */ | |
69 | ||
a0ce85f5 CL |
70 | #include "xprt_rdma.h" |
71 | ||
72 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | |
73 | # define RPCDBG_FACILITY RPCDBG_TRANS | |
74 | #endif | |
75 | ||
951e721c CL |
76 | static struct workqueue_struct *frwr_recovery_wq; |
77 | ||
78 | #define FRWR_RECOVERY_WQ_FLAGS (WQ_UNBOUND | WQ_MEM_RECLAIM) | |
79 | ||
80 | int | |
81 | frwr_alloc_recovery_wq(void) | |
82 | { | |
83 | frwr_recovery_wq = alloc_workqueue("frwr_recovery", | |
84 | FRWR_RECOVERY_WQ_FLAGS, 0); | |
85 | return !frwr_recovery_wq ? -ENOMEM : 0; | |
86 | } | |
87 | ||
88 | void | |
89 | frwr_destroy_recovery_wq(void) | |
90 | { | |
91 | struct workqueue_struct *wq; | |
92 | ||
93 | if (!frwr_recovery_wq) | |
94 | return; | |
95 | ||
96 | wq = frwr_recovery_wq; | |
97 | frwr_recovery_wq = NULL; | |
98 | destroy_workqueue(wq); | |
99 | } | |
100 | ||
101 | /* Deferred reset of a single FRMR. Generate a fresh rkey by | |
102 | * replacing the MR. | |
103 | * | |
104 | * There's no recovery if this fails. The FRMR is abandoned, but | |
105 | * remains in rb_all. It will be cleaned up when the transport is | |
106 | * destroyed. | |
107 | */ | |
108 | static void | |
109 | __frwr_recovery_worker(struct work_struct *work) | |
110 | { | |
111 | struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw, | |
112 | r.frmr.fr_work); | |
113 | struct rpcrdma_xprt *r_xprt = r->r.frmr.fr_xprt; | |
114 | unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; | |
115 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; | |
116 | ||
117 | if (ib_dereg_mr(r->r.frmr.fr_mr)) | |
118 | goto out_fail; | |
119 | ||
0410e38e | 120 | r->r.frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth); |
951e721c CL |
121 | if (IS_ERR(r->r.frmr.fr_mr)) |
122 | goto out_fail; | |
123 | ||
124 | dprintk("RPC: %s: recovered FRMR %p\n", __func__, r); | |
125 | r->r.frmr.fr_state = FRMR_IS_INVALID; | |
126 | rpcrdma_put_mw(r_xprt, r); | |
127 | return; | |
128 | ||
129 | out_fail: | |
130 | pr_warn("RPC: %s: FRMR %p unrecovered\n", | |
131 | __func__, r); | |
132 | } | |
133 | ||
134 | /* A broken MR was discovered in a context that can't sleep. | |
135 | * Defer recovery to the recovery worker. | |
136 | */ | |
137 | static void | |
138 | __frwr_queue_recovery(struct rpcrdma_mw *r) | |
139 | { | |
140 | INIT_WORK(&r->r.frmr.fr_work, __frwr_recovery_worker); | |
141 | queue_work(frwr_recovery_wq, &r->r.frmr.fr_work); | |
142 | } | |
143 | ||
91e70e70 CL |
144 | static int |
145 | __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device, | |
146 | unsigned int depth) | |
147 | { | |
148 | struct rpcrdma_frmr *f = &r->r.frmr; | |
149 | int rc; | |
150 | ||
0410e38e | 151 | f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth); |
91e70e70 CL |
152 | if (IS_ERR(f->fr_mr)) |
153 | goto out_mr_err; | |
4143f34e SG |
154 | |
155 | f->sg = kcalloc(depth, sizeof(*f->sg), GFP_KERNEL); | |
156 | if (!f->sg) | |
91e70e70 | 157 | goto out_list_err; |
4143f34e SG |
158 | |
159 | sg_init_table(f->sg, depth); | |
160 | ||
91e70e70 CL |
161 | return 0; |
162 | ||
163 | out_mr_err: | |
164 | rc = PTR_ERR(f->fr_mr); | |
0410e38e | 165 | dprintk("RPC: %s: ib_alloc_mr status %i\n", |
91e70e70 CL |
166 | __func__, rc); |
167 | return rc; | |
168 | ||
169 | out_list_err: | |
4143f34e SG |
170 | rc = -ENOMEM; |
171 | dprintk("RPC: %s: sg allocation failure\n", | |
172 | __func__); | |
91e70e70 CL |
173 | ib_dereg_mr(f->fr_mr); |
174 | return rc; | |
175 | } | |
176 | ||
31a701a9 CL |
177 | static void |
178 | __frwr_release(struct rpcrdma_mw *r) | |
179 | { | |
180 | int rc; | |
181 | ||
182 | rc = ib_dereg_mr(r->r.frmr.fr_mr); | |
183 | if (rc) | |
184 | dprintk("RPC: %s: ib_dereg_mr status %i\n", | |
185 | __func__, rc); | |
4143f34e | 186 | kfree(r->r.frmr.sg); |
31a701a9 CL |
187 | } |
188 | ||
3968cb58 CL |
189 | static int |
190 | frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |
191 | struct rpcrdma_create_data_internal *cdata) | |
192 | { | |
193 | struct ib_device_attr *devattr = &ia->ri_devattr; | |
194 | int depth, delta; | |
195 | ||
196 | ia->ri_max_frmr_depth = | |
197 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | |
198 | devattr->max_fast_reg_page_list_len); | |
199 | dprintk("RPC: %s: device's max FR page list len = %u\n", | |
200 | __func__, ia->ri_max_frmr_depth); | |
201 | ||
202 | /* Add room for frmr register and invalidate WRs. | |
203 | * 1. FRMR reg WR for head | |
204 | * 2. FRMR invalidate WR for head | |
205 | * 3. N FRMR reg WRs for pagelist | |
206 | * 4. N FRMR invalidate WRs for pagelist | |
207 | * 5. FRMR reg WR for tail | |
208 | * 6. FRMR invalidate WR for tail | |
209 | * 7. The RDMA_SEND WR | |
210 | */ | |
211 | depth = 7; | |
212 | ||
213 | /* Calculate N if the device max FRMR depth is smaller than | |
214 | * RPCRDMA_MAX_DATA_SEGS. | |
215 | */ | |
216 | if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { | |
217 | delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth; | |
218 | do { | |
219 | depth += 2; /* FRMR reg + invalidate */ | |
220 | delta -= ia->ri_max_frmr_depth; | |
221 | } while (delta > 0); | |
222 | } | |
223 | ||
224 | ep->rep_attr.cap.max_send_wr *= depth; | |
225 | if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) { | |
226 | cdata->max_requests = devattr->max_qp_wr / depth; | |
227 | if (!cdata->max_requests) | |
228 | return -EINVAL; | |
229 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * | |
230 | depth; | |
231 | } | |
232 | ||
233 | return 0; | |
234 | } | |
235 | ||
1c9351ee CL |
236 | /* FRWR mode conveys a list of pages per chunk segment. The |
237 | * maximum length of that list is the FRWR page list depth. | |
238 | */ | |
239 | static size_t | |
240 | frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) | |
241 | { | |
242 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
243 | ||
244 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | |
245 | rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth); | |
246 | } | |
247 | ||
e46ac34c CL |
248 | /* If FAST_REG or LOCAL_INV failed, indicate the frmr needs to be reset. */ |
249 | static void | |
250 | frwr_sendcompletion(struct ib_wc *wc) | |
251 | { | |
252 | struct rpcrdma_mw *r; | |
253 | ||
254 | if (likely(wc->status == IB_WC_SUCCESS)) | |
255 | return; | |
256 | ||
257 | /* WARNING: Only wr_id and status are reliable at this point */ | |
258 | r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; | |
8610586d SW |
259 | if (wc->status == IB_WC_WR_FLUSH_ERR) |
260 | dprintk("RPC: %s: frmr %p flushed\n", __func__, r); | |
261 | else | |
262 | pr_warn("RPC: %s: frmr %p error, status %s (%d)\n", | |
263 | __func__, r, ib_wc_status_msg(wc->status), wc->status); | |
e46ac34c CL |
264 | r->r.frmr.fr_state = FRMR_IS_STALE; |
265 | } | |
266 | ||
91e70e70 CL |
267 | static int |
268 | frwr_op_init(struct rpcrdma_xprt *r_xprt) | |
269 | { | |
270 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
89e0d112 | 271 | struct ib_device *device = r_xprt->rx_ia.ri_device; |
91e70e70 CL |
272 | unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; |
273 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; | |
274 | int i; | |
275 | ||
58d1dcf5 | 276 | spin_lock_init(&buf->rb_mwlock); |
91e70e70 CL |
277 | INIT_LIST_HEAD(&buf->rb_mws); |
278 | INIT_LIST_HEAD(&buf->rb_all); | |
279 | ||
40c6ed0c CL |
280 | i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1); |
281 | i += 2; /* head + tail */ | |
282 | i *= buf->rb_max_requests; /* one set for each RPC slot */ | |
283 | dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i); | |
91e70e70 CL |
284 | |
285 | while (i--) { | |
286 | struct rpcrdma_mw *r; | |
287 | int rc; | |
288 | ||
289 | r = kzalloc(sizeof(*r), GFP_KERNEL); | |
290 | if (!r) | |
291 | return -ENOMEM; | |
292 | ||
293 | rc = __frwr_init(r, pd, device, depth); | |
294 | if (rc) { | |
295 | kfree(r); | |
296 | return rc; | |
297 | } | |
298 | ||
299 | list_add(&r->mw_list, &buf->rb_mws); | |
300 | list_add(&r->mw_all, &buf->rb_all); | |
e46ac34c | 301 | r->mw_sendcompletion = frwr_sendcompletion; |
951e721c | 302 | r->r.frmr.fr_xprt = r_xprt; |
91e70e70 CL |
303 | } |
304 | ||
305 | return 0; | |
306 | } | |
307 | ||
9c1b4d77 CL |
308 | /* Post a FAST_REG Work Request to register a memory region |
309 | * for remote access via RDMA READ or RDMA WRITE. | |
310 | */ | |
311 | static int | |
312 | frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, | |
313 | int nsegs, bool writing) | |
314 | { | |
315 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
89e0d112 | 316 | struct ib_device *device = ia->ri_device; |
d654788e | 317 | enum dma_data_direction direction = rpcrdma_data_dir(writing); |
9c1b4d77 | 318 | struct rpcrdma_mr_seg *seg1 = seg; |
c14d86e5 CL |
319 | struct rpcrdma_mw *mw; |
320 | struct rpcrdma_frmr *frmr; | |
321 | struct ib_mr *mr; | |
4143f34e | 322 | struct ib_reg_wr reg_wr; |
e622f2f4 | 323 | struct ib_send_wr *bad_wr; |
4143f34e | 324 | int rc, i, n, dma_nents; |
9c1b4d77 | 325 | u8 key; |
9c1b4d77 | 326 | |
c14d86e5 CL |
327 | mw = seg1->rl_mw; |
328 | seg1->rl_mw = NULL; | |
329 | do { | |
330 | if (mw) | |
331 | __frwr_queue_recovery(mw); | |
332 | mw = rpcrdma_get_mw(r_xprt); | |
333 | if (!mw) | |
334 | return -ENOMEM; | |
335 | } while (mw->r.frmr.fr_state != FRMR_IS_INVALID); | |
336 | frmr = &mw->r.frmr; | |
337 | frmr->fr_state = FRMR_IS_VALID; | |
4143f34e | 338 | mr = frmr->fr_mr; |
c14d86e5 | 339 | |
9c1b4d77 CL |
340 | if (nsegs > ia->ri_max_frmr_depth) |
341 | nsegs = ia->ri_max_frmr_depth; | |
c14d86e5 | 342 | |
4143f34e SG |
343 | for (i = 0; i < nsegs;) { |
344 | if (seg->mr_page) | |
345 | sg_set_page(&frmr->sg[i], | |
346 | seg->mr_page, | |
347 | seg->mr_len, | |
348 | offset_in_page(seg->mr_offset)); | |
349 | else | |
350 | sg_set_buf(&frmr->sg[i], seg->mr_offset, | |
351 | seg->mr_len); | |
352 | ||
9c1b4d77 CL |
353 | ++seg; |
354 | ++i; | |
4143f34e | 355 | |
9c1b4d77 CL |
356 | /* Check for holes */ |
357 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || | |
358 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) | |
359 | break; | |
360 | } | |
4143f34e SG |
361 | frmr->sg_nents = i; |
362 | ||
363 | dma_nents = ib_dma_map_sg(device, frmr->sg, frmr->sg_nents, direction); | |
364 | if (!dma_nents) { | |
365 | pr_err("RPC: %s: failed to dma map sg %p sg_nents %u\n", | |
366 | __func__, frmr->sg, frmr->sg_nents); | |
367 | return -ENOMEM; | |
368 | } | |
369 | ||
370 | n = ib_map_mr_sg(mr, frmr->sg, frmr->sg_nents, PAGE_SIZE); | |
371 | if (unlikely(n != frmr->sg_nents)) { | |
372 | pr_err("RPC: %s: failed to map mr %p (%u/%u)\n", | |
373 | __func__, frmr->fr_mr, n, frmr->sg_nents); | |
374 | rc = n < 0 ? n : -EINVAL; | |
375 | goto out_senderr; | |
376 | } | |
377 | ||
378 | dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n", | |
379 | __func__, mw, frmr->sg_nents, mr->length); | |
380 | ||
9c1b4d77 CL |
381 | key = (u8)(mr->rkey & 0x000000FF); |
382 | ib_update_fast_reg_key(mr, ++key); | |
4143f34e SG |
383 | |
384 | reg_wr.wr.next = NULL; | |
385 | reg_wr.wr.opcode = IB_WR_REG_MR; | |
386 | reg_wr.wr.wr_id = (uintptr_t)mw; | |
387 | reg_wr.wr.num_sge = 0; | |
388 | reg_wr.wr.send_flags = 0; | |
389 | reg_wr.mr = mr; | |
390 | reg_wr.key = mr->rkey; | |
391 | reg_wr.access = writing ? | |
392 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : | |
393 | IB_ACCESS_REMOTE_READ; | |
9c1b4d77 CL |
394 | |
395 | DECR_CQCOUNT(&r_xprt->rx_ep); | |
4143f34e | 396 | rc = ib_post_send(ia->ri_id->qp, ®_wr.wr, &bad_wr); |
9c1b4d77 CL |
397 | if (rc) |
398 | goto out_senderr; | |
399 | ||
4143f34e | 400 | seg1->mr_dir = direction; |
c14d86e5 | 401 | seg1->rl_mw = mw; |
9c1b4d77 | 402 | seg1->mr_rkey = mr->rkey; |
4143f34e SG |
403 | seg1->mr_base = mr->iova; |
404 | seg1->mr_nsegs = frmr->sg_nents; | |
405 | seg1->mr_len = mr->length; | |
406 | ||
407 | return frmr->sg_nents; | |
9c1b4d77 CL |
408 | |
409 | out_senderr: | |
410 | dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); | |
4143f34e | 411 | ib_dma_unmap_sg(device, frmr->sg, dma_nents, direction); |
c14d86e5 | 412 | __frwr_queue_recovery(mw); |
9c1b4d77 CL |
413 | return rc; |
414 | } | |
415 | ||
6814baea CL |
416 | /* Post a LOCAL_INV Work Request to prevent further remote access |
417 | * via RDMA READ or RDMA WRITE. | |
418 | */ | |
419 | static int | |
420 | frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) | |
421 | { | |
422 | struct rpcrdma_mr_seg *seg1 = seg; | |
423 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
c14d86e5 | 424 | struct rpcrdma_mw *mw = seg1->rl_mw; |
4143f34e | 425 | struct rpcrdma_frmr *frmr = &mw->r.frmr; |
6814baea CL |
426 | struct ib_send_wr invalidate_wr, *bad_wr; |
427 | int rc, nsegs = seg->mr_nsegs; | |
428 | ||
c14d86e5 CL |
429 | dprintk("RPC: %s: FRMR %p\n", __func__, mw); |
430 | ||
431 | seg1->rl_mw = NULL; | |
4143f34e | 432 | frmr->fr_state = FRMR_IS_INVALID; |
6814baea CL |
433 | |
434 | memset(&invalidate_wr, 0, sizeof(invalidate_wr)); | |
c14d86e5 | 435 | invalidate_wr.wr_id = (unsigned long)(void *)mw; |
6814baea | 436 | invalidate_wr.opcode = IB_WR_LOCAL_INV; |
4143f34e | 437 | invalidate_wr.ex.invalidate_rkey = frmr->fr_mr->rkey; |
6814baea CL |
438 | DECR_CQCOUNT(&r_xprt->rx_ep); |
439 | ||
4143f34e | 440 | ib_dma_unmap_sg(ia->ri_device, frmr->sg, frmr->sg_nents, seg1->mr_dir); |
89e0d112 | 441 | read_lock(&ia->ri_qplock); |
6814baea CL |
442 | rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); |
443 | read_unlock(&ia->ri_qplock); | |
444 | if (rc) | |
445 | goto out_err; | |
c14d86e5 CL |
446 | |
447 | rpcrdma_put_mw(r_xprt, mw); | |
6814baea CL |
448 | return nsegs; |
449 | ||
450 | out_err: | |
6814baea | 451 | dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); |
c14d86e5 | 452 | __frwr_queue_recovery(mw); |
6814baea CL |
453 | return nsegs; |
454 | } | |
455 | ||
4561f347 CL |
456 | static void |
457 | frwr_op_destroy(struct rpcrdma_buffer *buf) | |
458 | { | |
459 | struct rpcrdma_mw *r; | |
460 | ||
c14d86e5 CL |
461 | /* Ensure stale MWs for "buf" are no longer in flight */ |
462 | flush_workqueue(frwr_recovery_wq); | |
463 | ||
4561f347 CL |
464 | while (!list_empty(&buf->rb_all)) { |
465 | r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); | |
466 | list_del(&r->mw_all); | |
467 | __frwr_release(r); | |
468 | kfree(r); | |
469 | } | |
470 | } | |
471 | ||
a0ce85f5 | 472 | const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { |
9c1b4d77 | 473 | .ro_map = frwr_op_map, |
6814baea | 474 | .ro_unmap = frwr_op_unmap, |
3968cb58 | 475 | .ro_open = frwr_op_open, |
1c9351ee | 476 | .ro_maxpages = frwr_op_maxpages, |
91e70e70 | 477 | .ro_init = frwr_op_init, |
4561f347 | 478 | .ro_destroy = frwr_op_destroy, |
a0ce85f5 CL |
479 | .ro_displayname = "frwr", |
480 | }; |