SUNRPC: Add trace event that reports reply page vector alignment
[linux-block.git] / net / sunrpc / xdr.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/xdr.c
3 *
4 * Generic XDR support.
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
8
a246b010 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
1da177e4 11#include <linux/types.h>
1da177e4
LT
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/pagemap.h>
15#include <linux/errno.h>
1da177e4
LT
16#include <linux/sunrpc/xdr.h>
17#include <linux/sunrpc/msg_prot.h>
9d96acbc 18#include <linux/bvec.h>
5582863f 19#include <trace/events/sunrpc.h>
1da177e4
LT
20
21/*
22 * XDR functions for basic NFS types
23 */
d8ed029d
AD
24__be32 *
25xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
1da177e4
LT
26{
27 unsigned int quadlen = XDR_QUADLEN(obj->len);
28
29 p[quadlen] = 0; /* zero trailing bytes */
9f162d2a 30 *p++ = cpu_to_be32(obj->len);
1da177e4
LT
31 memcpy(p, obj->data, obj->len);
32 return p + XDR_QUADLEN(obj->len);
33}
468039ee 34EXPORT_SYMBOL_GPL(xdr_encode_netobj);
1da177e4 35
d8ed029d
AD
36__be32 *
37xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
1da177e4
LT
38{
39 unsigned int len;
40
98866b5a 41 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
1da177e4
LT
42 return NULL;
43 obj->len = len;
44 obj->data = (u8 *) p;
45 return p + XDR_QUADLEN(len);
46}
468039ee 47EXPORT_SYMBOL_GPL(xdr_decode_netobj);
1da177e4
LT
48
49/**
50 * xdr_encode_opaque_fixed - Encode fixed length opaque data
4dc3b16b
PP
51 * @p: pointer to current position in XDR buffer.
52 * @ptr: pointer to data to encode (or NULL)
53 * @nbytes: size of data.
1da177e4
LT
54 *
55 * Copy the array of data of length nbytes at ptr to the XDR buffer
56 * at position p, then align to the next 32-bit boundary by padding
57 * with zero bytes (see RFC1832).
58 * Note: if ptr is NULL, only the padding is performed.
59 *
60 * Returns the updated current XDR buffer position
61 *
62 */
d8ed029d 63__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4
LT
64{
65 if (likely(nbytes != 0)) {
66 unsigned int quadlen = XDR_QUADLEN(nbytes);
67 unsigned int padding = (quadlen << 2) - nbytes;
68
69 if (ptr != NULL)
70 memcpy(p, ptr, nbytes);
71 if (padding != 0)
72 memset((char *)p + nbytes, 0, padding);
73 p += quadlen;
74 }
75 return p;
76}
468039ee 77EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
1da177e4
LT
78
79/**
80 * xdr_encode_opaque - Encode variable length opaque data
4dc3b16b
PP
81 * @p: pointer to current position in XDR buffer.
82 * @ptr: pointer to data to encode (or NULL)
83 * @nbytes: size of data.
1da177e4
LT
84 *
85 * Returns the updated current XDR buffer position
86 */
d8ed029d 87__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4 88{
9f162d2a 89 *p++ = cpu_to_be32(nbytes);
1da177e4
LT
90 return xdr_encode_opaque_fixed(p, ptr, nbytes);
91}
468039ee 92EXPORT_SYMBOL_GPL(xdr_encode_opaque);
1da177e4 93
d8ed029d
AD
94__be32 *
95xdr_encode_string(__be32 *p, const char *string)
1da177e4
LT
96{
97 return xdr_encode_array(p, string, strlen(string));
98}
468039ee 99EXPORT_SYMBOL_GPL(xdr_encode_string);
1da177e4 100
d8ed029d 101__be32 *
e5cff482
CL
102xdr_decode_string_inplace(__be32 *p, char **sp,
103 unsigned int *lenp, unsigned int maxlen)
1da177e4 104{
e5cff482 105 u32 len;
1da177e4 106
98866b5a 107 len = be32_to_cpu(*p++);
e5cff482 108 if (len > maxlen)
1da177e4
LT
109 return NULL;
110 *lenp = len;
111 *sp = (char *) p;
112 return p + XDR_QUADLEN(len);
113}
468039ee 114EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
1da177e4 115
b4687da7
CL
116/**
117 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
118 * @buf: XDR buffer where string resides
119 * @len: length of string, in bytes
120 *
121 */
122void
123xdr_terminate_string(struct xdr_buf *buf, const u32 len)
124{
125 char *kaddr;
126
b8541786 127 kaddr = kmap_atomic(buf->pages[0]);
b4687da7 128 kaddr[buf->page_base + len] = '\0';
b8541786 129 kunmap_atomic(kaddr);
b4687da7 130}
0d961aa9 131EXPORT_SYMBOL_GPL(xdr_terminate_string);
b4687da7 132
9d96acbc
TM
133size_t
134xdr_buf_pagecount(struct xdr_buf *buf)
135{
136 if (!buf->page_len)
137 return 0;
138 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
139}
140
141int
142xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
143{
144 size_t i, n = xdr_buf_pagecount(buf);
145
146 if (n != 0 && buf->bvec == NULL) {
147 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
148 if (!buf->bvec)
149 return -ENOMEM;
150 for (i = 0; i < n; i++) {
151 buf->bvec[i].bv_page = buf->pages[i];
152 buf->bvec[i].bv_len = PAGE_SIZE;
153 buf->bvec[i].bv_offset = 0;
154 }
155 }
156 return 0;
157}
158
159void
160xdr_free_bvec(struct xdr_buf *buf)
161{
162 kfree(buf->bvec);
163 buf->bvec = NULL;
164}
165
1da177e4
LT
166void
167xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
168 struct page **pages, unsigned int base, unsigned int len)
169{
170 struct kvec *head = xdr->head;
171 struct kvec *tail = xdr->tail;
172 char *buf = (char *)head->iov_base;
173 unsigned int buflen = head->iov_len;
174
175 head->iov_len = offset;
176
177 xdr->pages = pages;
178 xdr->page_base = base;
179 xdr->page_len = len;
180
181 tail->iov_base = buf + offset;
182 tail->iov_len = buflen - offset;
183
184 xdr->buflen += len;
185}
468039ee 186EXPORT_SYMBOL_GPL(xdr_inline_pages);
1da177e4 187
1da177e4
LT
188/*
189 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
2c53040f
BH
190 */
191
192/**
1da177e4
LT
193 * _shift_data_right_pages
194 * @pages: vector of pages containing both the source and dest memory area.
195 * @pgto_base: page vector address of destination
196 * @pgfrom_base: page vector address of source
197 * @len: number of bytes to copy
198 *
199 * Note: the addresses pgto_base and pgfrom_base are both calculated in
200 * the same way:
201 * if a memory area starts at byte 'base' in page 'pages[i]',
ea1754a0 202 * then its address is given as (i << PAGE_SHIFT) + base
1da177e4
LT
203 * Also note: pgfrom_base must be < pgto_base, but the memory areas
204 * they point to may overlap.
205 */
206static void
207_shift_data_right_pages(struct page **pages, size_t pgto_base,
208 size_t pgfrom_base, size_t len)
209{
210 struct page **pgfrom, **pgto;
211 char *vfrom, *vto;
212 size_t copy;
213
214 BUG_ON(pgto_base <= pgfrom_base);
215
216 pgto_base += len;
217 pgfrom_base += len;
218
09cbfeaf
KS
219 pgto = pages + (pgto_base >> PAGE_SHIFT);
220 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
1da177e4 221
09cbfeaf
KS
222 pgto_base &= ~PAGE_MASK;
223 pgfrom_base &= ~PAGE_MASK;
1da177e4
LT
224
225 do {
226 /* Are any pointers crossing a page boundary? */
227 if (pgto_base == 0) {
09cbfeaf 228 pgto_base = PAGE_SIZE;
1da177e4
LT
229 pgto--;
230 }
231 if (pgfrom_base == 0) {
09cbfeaf 232 pgfrom_base = PAGE_SIZE;
1da177e4
LT
233 pgfrom--;
234 }
235
236 copy = len;
237 if (copy > pgto_base)
238 copy = pgto_base;
239 if (copy > pgfrom_base)
240 copy = pgfrom_base;
241 pgto_base -= copy;
242 pgfrom_base -= copy;
243
b8541786 244 vto = kmap_atomic(*pgto);
347e2233
TM
245 if (*pgto != *pgfrom) {
246 vfrom = kmap_atomic(*pgfrom);
247 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
248 kunmap_atomic(vfrom);
249 } else
250 memmove(vto + pgto_base, vto + pgfrom_base, copy);
bce3481c 251 flush_dcache_page(*pgto);
b8541786 252 kunmap_atomic(vto);
1da177e4
LT
253
254 } while ((len -= copy) != 0);
1da177e4
LT
255}
256
2c53040f 257/**
1da177e4
LT
258 * _copy_to_pages
259 * @pages: array of pages
260 * @pgbase: page vector address of destination
261 * @p: pointer to source data
262 * @len: length
263 *
264 * Copies data from an arbitrary memory location into an array of pages
265 * The copy is assumed to be non-overlapping.
266 */
267static void
268_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
269{
270 struct page **pgto;
271 char *vto;
272 size_t copy;
273
09cbfeaf
KS
274 pgto = pages + (pgbase >> PAGE_SHIFT);
275 pgbase &= ~PAGE_MASK;
1da177e4 276
daeba89d 277 for (;;) {
09cbfeaf 278 copy = PAGE_SIZE - pgbase;
1da177e4
LT
279 if (copy > len)
280 copy = len;
281
b8541786 282 vto = kmap_atomic(*pgto);
1da177e4 283 memcpy(vto + pgbase, p, copy);
b8541786 284 kunmap_atomic(vto);
1da177e4 285
daeba89d
TM
286 len -= copy;
287 if (len == 0)
288 break;
289
1da177e4 290 pgbase += copy;
09cbfeaf 291 if (pgbase == PAGE_SIZE) {
1da177e4
LT
292 flush_dcache_page(*pgto);
293 pgbase = 0;
294 pgto++;
295 }
296 p += copy;
daeba89d 297 }
1da177e4
LT
298 flush_dcache_page(*pgto);
299}
300
2c53040f 301/**
1da177e4
LT
302 * _copy_from_pages
303 * @p: pointer to destination
304 * @pages: array of pages
305 * @pgbase: offset of source data
306 * @len: length
307 *
308 * Copies data into an arbitrary memory location from an array of pages
309 * The copy is assumed to be non-overlapping.
310 */
bf118a34 311void
1da177e4
LT
312_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
313{
314 struct page **pgfrom;
315 char *vfrom;
316 size_t copy;
317
09cbfeaf
KS
318 pgfrom = pages + (pgbase >> PAGE_SHIFT);
319 pgbase &= ~PAGE_MASK;
1da177e4
LT
320
321 do {
09cbfeaf 322 copy = PAGE_SIZE - pgbase;
1da177e4
LT
323 if (copy > len)
324 copy = len;
325
b8541786 326 vfrom = kmap_atomic(*pgfrom);
1da177e4 327 memcpy(p, vfrom + pgbase, copy);
b8541786 328 kunmap_atomic(vfrom);
1da177e4
LT
329
330 pgbase += copy;
09cbfeaf 331 if (pgbase == PAGE_SIZE) {
1da177e4
LT
332 pgbase = 0;
333 pgfrom++;
334 }
335 p += copy;
336
337 } while ((len -= copy) != 0);
338}
bf118a34 339EXPORT_SYMBOL_GPL(_copy_from_pages);
1da177e4 340
2c53040f 341/**
1da177e4
LT
342 * xdr_shrink_bufhead
343 * @buf: xdr_buf
344 * @len: bytes to remove from buf->head[0]
345 *
cca5172a 346 * Shrinks XDR buffer's header kvec buf->head[0] by
1da177e4
LT
347 * 'len' bytes. The extra data is not lost, but is instead
348 * moved into the inlined pages and/or the tail.
349 */
7be9cea3 350static unsigned int
1da177e4
LT
351xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
352{
353 struct kvec *head, *tail;
354 size_t copy, offs;
355 unsigned int pglen = buf->page_len;
7be9cea3 356 unsigned int result;
1da177e4 357
7be9cea3 358 result = 0;
1da177e4
LT
359 tail = buf->tail;
360 head = buf->head;
18e624ad
WAA
361
362 WARN_ON_ONCE(len > head->iov_len);
363 if (len > head->iov_len)
364 len = head->iov_len;
1da177e4
LT
365
366 /* Shift the tail first */
367 if (tail->iov_len != 0) {
368 if (tail->iov_len > len) {
369 copy = tail->iov_len - len;
370 memmove((char *)tail->iov_base + len,
371 tail->iov_base, copy);
7be9cea3 372 result += copy;
1da177e4
LT
373 }
374 /* Copy from the inlined pages into the tail */
375 copy = len;
376 if (copy > pglen)
377 copy = pglen;
378 offs = len - copy;
379 if (offs >= tail->iov_len)
380 copy = 0;
381 else if (copy > tail->iov_len - offs)
382 copy = tail->iov_len - offs;
7be9cea3 383 if (copy != 0) {
1da177e4
LT
384 _copy_from_pages((char *)tail->iov_base + offs,
385 buf->pages,
386 buf->page_base + pglen + offs - len,
387 copy);
7be9cea3
CL
388 result += copy;
389 }
1da177e4
LT
390 /* Do we also need to copy data from the head into the tail ? */
391 if (len > pglen) {
392 offs = copy = len - pglen;
393 if (copy > tail->iov_len)
394 copy = tail->iov_len;
395 memcpy(tail->iov_base,
396 (char *)head->iov_base +
397 head->iov_len - offs,
398 copy);
7be9cea3 399 result += copy;
1da177e4
LT
400 }
401 }
402 /* Now handle pages */
403 if (pglen != 0) {
404 if (pglen > len)
405 _shift_data_right_pages(buf->pages,
406 buf->page_base + len,
407 buf->page_base,
408 pglen - len);
409 copy = len;
410 if (len > pglen)
411 copy = pglen;
412 _copy_to_pages(buf->pages, buf->page_base,
413 (char *)head->iov_base + head->iov_len - len,
414 copy);
7be9cea3 415 result += copy;
1da177e4
LT
416 }
417 head->iov_len -= len;
418 buf->buflen -= len;
419 /* Have we truncated the message? */
420 if (buf->len > buf->buflen)
421 buf->len = buf->buflen;
7be9cea3
CL
422
423 return result;
1da177e4
LT
424}
425
2c53040f 426/**
1da177e4
LT
427 * xdr_shrink_pagelen
428 * @buf: xdr_buf
429 * @len: bytes to remove from buf->pages
430 *
cca5172a 431 * Shrinks XDR buffer's page array buf->pages by
1da177e4
LT
432 * 'len' bytes. The extra data is not lost, but is instead
433 * moved into the tail.
434 */
7be9cea3 435static unsigned int
1da177e4
LT
436xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
437{
438 struct kvec *tail;
439 size_t copy;
1da177e4 440 unsigned int pglen = buf->page_len;
cf187c2d 441 unsigned int tailbuf_len;
7be9cea3 442 unsigned int result;
1da177e4 443
7be9cea3 444 result = 0;
1da177e4
LT
445 tail = buf->tail;
446 BUG_ON (len > pglen);
447
cf187c2d
TM
448 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
449
1da177e4 450 /* Shift the tail first */
cf187c2d
TM
451 if (tailbuf_len != 0) {
452 unsigned int free_space = tailbuf_len - tail->iov_len;
453
454 if (len < free_space)
455 free_space = len;
456 tail->iov_len += free_space;
457
42d6d8ab 458 copy = len;
1da177e4 459 if (tail->iov_len > len) {
0fe62a35 460 char *p = (char *)tail->iov_base + len;
2e29ebb8 461 memmove(p, tail->iov_base, tail->iov_len - len);
7be9cea3 462 result += tail->iov_len - len;
42d6d8ab 463 } else
1da177e4 464 copy = tail->iov_len;
42d6d8ab 465 /* Copy from the inlined pages into the tail */
1da177e4
LT
466 _copy_from_pages((char *)tail->iov_base,
467 buf->pages, buf->page_base + pglen - len,
468 copy);
7be9cea3 469 result += copy;
1da177e4
LT
470 }
471 buf->page_len -= len;
472 buf->buflen -= len;
473 /* Have we truncated the message? */
474 if (buf->len > buf->buflen)
475 buf->len = buf->buflen;
7be9cea3
CL
476
477 return result;
1da177e4
LT
478}
479
480void
481xdr_shift_buf(struct xdr_buf *buf, size_t len)
482{
483 xdr_shrink_bufhead(buf, len);
484}
468039ee 485EXPORT_SYMBOL_GPL(xdr_shift_buf);
1da177e4 486
4517d526
TM
487/**
488 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
489 * @xdr: pointer to struct xdr_stream
490 */
491unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
492{
493 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
494}
495EXPORT_SYMBOL_GPL(xdr_stream_pos);
496
1da177e4
LT
497/**
498 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
499 * @xdr: pointer to xdr_stream struct
500 * @buf: pointer to XDR buffer in which to encode data
501 * @p: current pointer inside XDR buffer
0ccc61b1 502 * @rqst: pointer to controlling rpc_rqst, for debugging
1da177e4
LT
503 *
504 * Note: at the moment the RPC client only passes the length of our
505 * scratch buffer in the xdr_buf's header kvec. Previously this
506 * meant we needed to call xdr_adjust_iovec() after encoding the
507 * data. With the new scheme, the xdr_stream manages the details
508 * of the buffer length, and takes care of adjusting the kvec
509 * length for us.
510 */
0ccc61b1
CL
511void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
512 struct rpc_rqst *rqst)
1da177e4
LT
513{
514 struct kvec *iov = buf->head;
334ccfd5 515 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
1da177e4 516
2825a7f9 517 xdr_set_scratch_buffer(xdr, NULL, 0);
334ccfd5 518 BUG_ON(scratch_len < 0);
1da177e4
LT
519 xdr->buf = buf;
520 xdr->iov = iov;
d8ed029d
AD
521 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
522 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
334ccfd5
TM
523 BUG_ON(iov->iov_len > scratch_len);
524
525 if (p != xdr->p && p != NULL) {
526 size_t len;
527
528 BUG_ON(p < xdr->p || p > xdr->end);
529 len = (char *)p - (char *)xdr->p;
530 xdr->p = p;
531 buf->len += len;
532 iov->iov_len += len;
533 }
0ccc61b1 534 xdr->rqst = rqst;
1da177e4 535}
468039ee 536EXPORT_SYMBOL_GPL(xdr_init_encode);
1da177e4 537
2825a7f9
BF
538/**
539 * xdr_commit_encode - Ensure all data is written to buffer
540 * @xdr: pointer to xdr_stream
541 *
542 * We handle encoding across page boundaries by giving the caller a
543 * temporary location to write to, then later copying the data into
544 * place; xdr_commit_encode does that copying.
545 *
546 * Normally the caller doesn't need to call this directly, as the
547 * following xdr_reserve_space will do it. But an explicit call may be
548 * required at the end of encoding, or any other time when the xdr_buf
549 * data might be read.
550 */
551void xdr_commit_encode(struct xdr_stream *xdr)
552{
553 int shift = xdr->scratch.iov_len;
554 void *page;
555
556 if (shift == 0)
557 return;
558 page = page_address(*xdr->page_ptr);
559 memcpy(xdr->scratch.iov_base, page, shift);
560 memmove(page, page + shift, (void *)xdr->p - page);
561 xdr->scratch.iov_len = 0;
562}
563EXPORT_SYMBOL_GPL(xdr_commit_encode);
564
22cb4385
TM
565static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
566 size_t nbytes)
2825a7f9 567{
025911a5 568 __be32 *p;
2825a7f9
BF
569 int space_left;
570 int frag1bytes, frag2bytes;
571
572 if (nbytes > PAGE_SIZE)
5582863f 573 goto out_overflow; /* Bigger buffers require special handling */
2825a7f9 574 if (xdr->buf->len + nbytes > xdr->buf->buflen)
5582863f 575 goto out_overflow; /* Sorry, we're totally out of space */
2825a7f9
BF
576 frag1bytes = (xdr->end - xdr->p) << 2;
577 frag2bytes = nbytes - frag1bytes;
578 if (xdr->iov)
579 xdr->iov->iov_len += frag1bytes;
05638dc7 580 else
2825a7f9 581 xdr->buf->page_len += frag1bytes;
05638dc7 582 xdr->page_ptr++;
2825a7f9
BF
583 xdr->iov = NULL;
584 /*
585 * If the last encode didn't end exactly on a page boundary, the
586 * next one will straddle boundaries. Encode into the next
587 * page, then copy it back later in xdr_commit_encode. We use
588 * the "scratch" iov to track any temporarily unused fragment of
589 * space at the end of the previous buffer:
590 */
591 xdr->scratch.iov_base = xdr->p;
592 xdr->scratch.iov_len = frag1bytes;
593 p = page_address(*xdr->page_ptr);
594 /*
595 * Note this is where the next encode will start after we've
596 * shifted this one back:
597 */
598 xdr->p = (void *)p + frag2bytes;
599 space_left = xdr->buf->buflen - xdr->buf->len;
600 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
601 xdr->buf->page_len += frag2bytes;
602 xdr->buf->len += nbytes;
603 return p;
5582863f
CL
604out_overflow:
605 trace_rpc_xdr_overflow(xdr, nbytes);
606 return NULL;
2825a7f9
BF
607}
608
1da177e4
LT
609/**
610 * xdr_reserve_space - Reserve buffer space for sending
611 * @xdr: pointer to xdr_stream
612 * @nbytes: number of bytes to reserve
613 *
614 * Checks that we have enough buffer space to encode 'nbytes' more
615 * bytes of data. If so, update the total xdr_buf length, and
616 * adjust the length of the current kvec.
617 */
d8ed029d 618__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
1da177e4 619{
d8ed029d
AD
620 __be32 *p = xdr->p;
621 __be32 *q;
1da177e4 622
2825a7f9 623 xdr_commit_encode(xdr);
1da177e4
LT
624 /* align nbytes on the next 32-bit boundary */
625 nbytes += 3;
626 nbytes &= ~3;
627 q = p + (nbytes >> 2);
628 if (unlikely(q > xdr->end || q < p))
2825a7f9 629 return xdr_get_next_encode_buffer(xdr, nbytes);
1da177e4 630 xdr->p = q;
2825a7f9
BF
631 if (xdr->iov)
632 xdr->iov->iov_len += nbytes;
633 else
634 xdr->buf->page_len += nbytes;
1da177e4
LT
635 xdr->buf->len += nbytes;
636 return p;
637}
468039ee 638EXPORT_SYMBOL_GPL(xdr_reserve_space);
1da177e4 639
3e19ce76
BF
640/**
641 * xdr_truncate_encode - truncate an encode buffer
642 * @xdr: pointer to xdr_stream
643 * @len: new length of buffer
644 *
645 * Truncates the xdr stream, so that xdr->buf->len == len,
646 * and xdr->p points at offset len from the start of the buffer, and
647 * head, tail, and page lengths are adjusted to correspond.
648 *
649 * If this means moving xdr->p to a different buffer, we assume that
650 * that the end pointer should be set to the end of the current page,
651 * except in the case of the head buffer when we assume the head
652 * buffer's current length represents the end of the available buffer.
653 *
654 * This is *not* safe to use on a buffer that already has inlined page
655 * cache pages (as in a zero-copy server read reply), except for the
656 * simple case of truncating from one position in the tail to another.
657 *
658 */
659void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
660{
661 struct xdr_buf *buf = xdr->buf;
662 struct kvec *head = buf->head;
663 struct kvec *tail = buf->tail;
664 int fraglen;
49a068f8 665 int new;
3e19ce76
BF
666
667 if (len > buf->len) {
668 WARN_ON_ONCE(1);
669 return;
670 }
2825a7f9 671 xdr_commit_encode(xdr);
3e19ce76
BF
672
673 fraglen = min_t(int, buf->len - len, tail->iov_len);
674 tail->iov_len -= fraglen;
675 buf->len -= fraglen;
ed38c069 676 if (tail->iov_len) {
3e19ce76 677 xdr->p = tail->iov_base + tail->iov_len;
280caac0
BF
678 WARN_ON_ONCE(!xdr->end);
679 WARN_ON_ONCE(!xdr->iov);
3e19ce76
BF
680 return;
681 }
682 WARN_ON_ONCE(fraglen);
683 fraglen = min_t(int, buf->len - len, buf->page_len);
684 buf->page_len -= fraglen;
685 buf->len -= fraglen;
686
687 new = buf->page_base + buf->page_len;
49a068f8
BF
688
689 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
3e19ce76 690
ed38c069 691 if (buf->page_len) {
3e19ce76
BF
692 xdr->p = page_address(*xdr->page_ptr);
693 xdr->end = (void *)xdr->p + PAGE_SIZE;
694 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
280caac0 695 WARN_ON_ONCE(xdr->iov);
3e19ce76
BF
696 return;
697 }
5d7a5bcb 698 if (fraglen)
3e19ce76
BF
699 xdr->end = head->iov_base + head->iov_len;
700 /* (otherwise assume xdr->end is already set) */
5d7a5bcb 701 xdr->page_ptr--;
3e19ce76
BF
702 head->iov_len = len;
703 buf->len = len;
704 xdr->p = head->iov_base + head->iov_len;
705 xdr->iov = buf->head;
706}
707EXPORT_SYMBOL(xdr_truncate_encode);
708
db3f58a9
BF
709/**
710 * xdr_restrict_buflen - decrease available buffer space
711 * @xdr: pointer to xdr_stream
712 * @newbuflen: new maximum number of bytes available
713 *
714 * Adjust our idea of how much space is available in the buffer.
715 * If we've already used too much space in the buffer, returns -1.
716 * If the available space is already smaller than newbuflen, returns 0
717 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
718 * and ensures xdr->end is set at most offset newbuflen from the start
719 * of the buffer.
720 */
721int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
722{
723 struct xdr_buf *buf = xdr->buf;
724 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
725 int end_offset = buf->len + left_in_this_buf;
726
727 if (newbuflen < 0 || newbuflen < buf->len)
728 return -1;
729 if (newbuflen > buf->buflen)
730 return 0;
731 if (newbuflen < end_offset)
732 xdr->end = (void *)xdr->end + newbuflen - end_offset;
733 buf->buflen = newbuflen;
734 return 0;
735}
736EXPORT_SYMBOL(xdr_restrict_buflen);
737
1da177e4
LT
738/**
739 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
740 * @xdr: pointer to xdr_stream
741 * @pages: list of pages
742 * @base: offset of first byte
743 * @len: length of data in bytes
744 *
745 */
746void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
747 unsigned int len)
748{
749 struct xdr_buf *buf = xdr->buf;
750 struct kvec *iov = buf->tail;
751 buf->pages = pages;
752 buf->page_base = base;
753 buf->page_len = len;
754
755 iov->iov_base = (char *)xdr->p;
756 iov->iov_len = 0;
757 xdr->iov = iov;
758
759 if (len & 3) {
760 unsigned int pad = 4 - (len & 3);
761
762 BUG_ON(xdr->p >= xdr->end);
763 iov->iov_base = (char *)xdr->p + (len & 3);
764 iov->iov_len += pad;
765 len += pad;
766 *xdr->p++ = 0;
767 }
768 buf->buflen += len;
769 buf->len += len;
770}
468039ee 771EXPORT_SYMBOL_GPL(xdr_write_pages);
1da177e4 772
6650239a 773static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
1537693c 774 unsigned int len)
6650239a
TM
775{
776 if (len > iov->iov_len)
777 len = iov->iov_len;
1537693c 778 xdr->p = (__be32*)iov->iov_base;
6650239a
TM
779 xdr->end = (__be32*)(iov->iov_base + len);
780 xdr->iov = iov;
781 xdr->page_ptr = NULL;
782}
783
784static int xdr_set_page_base(struct xdr_stream *xdr,
785 unsigned int base, unsigned int len)
786{
787 unsigned int pgnr;
788 unsigned int maxlen;
789 unsigned int pgoff;
790 unsigned int pgend;
791 void *kaddr;
792
793 maxlen = xdr->buf->page_len;
794 if (base >= maxlen)
795 return -EINVAL;
796 maxlen -= base;
797 if (len > maxlen)
798 len = maxlen;
799
800 base += xdr->buf->page_base;
801
802 pgnr = base >> PAGE_SHIFT;
803 xdr->page_ptr = &xdr->buf->pages[pgnr];
804 kaddr = page_address(*xdr->page_ptr);
805
806 pgoff = base & ~PAGE_MASK;
807 xdr->p = (__be32*)(kaddr + pgoff);
808
809 pgend = pgoff + len;
810 if (pgend > PAGE_SIZE)
811 pgend = PAGE_SIZE;
812 xdr->end = (__be32*)(kaddr + pgend);
813 xdr->iov = NULL;
814 return 0;
815}
816
817static void xdr_set_next_page(struct xdr_stream *xdr)
818{
819 unsigned int newbase;
820
821 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
822 newbase -= xdr->buf->page_base;
823
824 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
a6cebd41 825 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
6650239a
TM
826}
827
828static bool xdr_set_next_buffer(struct xdr_stream *xdr)
829{
830 if (xdr->page_ptr != NULL)
831 xdr_set_next_page(xdr);
832 else if (xdr->iov == xdr->buf->head) {
833 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
a6cebd41 834 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
6650239a
TM
835 }
836 return xdr->p != xdr->end;
837}
838
1da177e4
LT
839/**
840 * xdr_init_decode - Initialize an xdr_stream for decoding data.
841 * @xdr: pointer to xdr_stream struct
842 * @buf: pointer to XDR buffer from which to decode data
843 * @p: current pointer inside XDR buffer
0ccc61b1 844 * @rqst: pointer to controlling rpc_rqst, for debugging
1da177e4 845 */
0ccc61b1
CL
846void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
847 struct rpc_rqst *rqst)
1da177e4 848{
1da177e4 849 xdr->buf = buf;
6650239a
TM
850 xdr->scratch.iov_base = NULL;
851 xdr->scratch.iov_len = 0;
bfeea1dc 852 xdr->nwords = XDR_QUADLEN(buf->len);
6650239a 853 if (buf->head[0].iov_len != 0)
1537693c 854 xdr_set_iov(xdr, buf->head, buf->len);
6650239a
TM
855 else if (buf->page_len != 0)
856 xdr_set_page_base(xdr, 0, buf->len);
06ef26a0
BC
857 else
858 xdr_set_iov(xdr, buf->head, buf->len);
bfeea1dc
TM
859 if (p != NULL && p > xdr->p && xdr->end >= p) {
860 xdr->nwords -= p - xdr->p;
1537693c 861 xdr->p = p;
bfeea1dc 862 }
0ccc61b1 863 xdr->rqst = rqst;
1da177e4 864}
468039ee 865EXPORT_SYMBOL_GPL(xdr_init_decode);
1da177e4 866
f7da7a12 867/**
7ecce75f 868 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
f7da7a12
BH
869 * @xdr: pointer to xdr_stream struct
870 * @buf: pointer to XDR buffer from which to decode data
871 * @pages: list of pages to decode into
872 * @len: length in bytes of buffer in pages
873 */
874void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
875 struct page **pages, unsigned int len)
876{
877 memset(buf, 0, sizeof(*buf));
878 buf->pages = pages;
879 buf->page_len = len;
880 buf->buflen = len;
881 buf->len = len;
0ccc61b1 882 xdr_init_decode(xdr, buf, NULL, NULL);
f7da7a12
BH
883}
884EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
885
6650239a 886static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
ba8e452a 887{
bfeea1dc 888 unsigned int nwords = XDR_QUADLEN(nbytes);
ba8e452a 889 __be32 *p = xdr->p;
bfeea1dc 890 __be32 *q = p + nwords;
ba8e452a 891
bfeea1dc 892 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
ba8e452a 893 return NULL;
6650239a 894 xdr->p = q;
bfeea1dc 895 xdr->nwords -= nwords;
ba8e452a
TM
896 return p;
897}
ba8e452a 898
1da177e4 899/**
6650239a
TM
900 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
901 * @xdr: pointer to xdr_stream struct
902 * @buf: pointer to an empty buffer
903 * @buflen: size of 'buf'
904 *
905 * The scratch buffer is used when decoding from an array of pages.
906 * If an xdr_inline_decode() call spans across page boundaries, then
907 * we copy the data into the scratch buffer in order to allow linear
908 * access.
909 */
910void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
911{
912 xdr->scratch.iov_base = buf;
913 xdr->scratch.iov_len = buflen;
914}
915EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
916
917static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
918{
919 __be32 *p;
ace0e14f 920 char *cpdest = xdr->scratch.iov_base;
6650239a
TM
921 size_t cplen = (char *)xdr->end - (char *)xdr->p;
922
923 if (nbytes > xdr->scratch.iov_len)
5582863f 924 goto out_overflow;
ace0e14f
TM
925 p = __xdr_inline_decode(xdr, cplen);
926 if (p == NULL)
927 return NULL;
928 memcpy(cpdest, p, cplen);
5582863f
CL
929 if (!xdr_set_next_buffer(xdr))
930 goto out_overflow;
6650239a
TM
931 cpdest += cplen;
932 nbytes -= cplen;
6650239a
TM
933 p = __xdr_inline_decode(xdr, nbytes);
934 if (p == NULL)
935 return NULL;
936 memcpy(cpdest, p, nbytes);
937 return xdr->scratch.iov_base;
5582863f
CL
938out_overflow:
939 trace_rpc_xdr_overflow(xdr, nbytes);
940 return NULL;
6650239a
TM
941}
942
943/**
944 * xdr_inline_decode - Retrieve XDR data to decode
1da177e4
LT
945 * @xdr: pointer to xdr_stream struct
946 * @nbytes: number of bytes of data to decode
947 *
948 * Check if the input buffer is long enough to enable us to decode
949 * 'nbytes' more bytes of data starting at the current position.
950 * If so return the current pointer, then update the current
951 * pointer position.
952 */
d8ed029d 953__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1da177e4 954{
6650239a 955 __be32 *p;
1da177e4 956
5582863f 957 if (unlikely(nbytes == 0))
6650239a
TM
958 return xdr->p;
959 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
5582863f 960 goto out_overflow;
6650239a
TM
961 p = __xdr_inline_decode(xdr, nbytes);
962 if (p != NULL)
963 return p;
964 return xdr_copy_to_scratch(xdr, nbytes);
5582863f
CL
965out_overflow:
966 trace_rpc_xdr_overflow(xdr, nbytes);
967 return NULL;
1da177e4 968}
468039ee 969EXPORT_SYMBOL_GPL(xdr_inline_decode);
1da177e4 970
3994ee6f 971static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
1da177e4
LT
972{
973 struct xdr_buf *buf = xdr->buf;
974 struct kvec *iov;
bfeea1dc 975 unsigned int nwords = XDR_QUADLEN(len);
b760b313 976 unsigned int cur = xdr_stream_pos(xdr);
7be9cea3 977 unsigned int copied, offset;
1da177e4 978
bfeea1dc 979 if (xdr->nwords == 0)
c337d365 980 return 0;
7be9cea3 981
1da177e4 982 /* Realign pages to current pointer position */
7be9cea3 983 iov = buf->head;
a11a2bf4 984 if (iov->iov_len > cur) {
7be9cea3
CL
985 offset = iov->iov_len - cur;
986 copied = xdr_shrink_bufhead(buf, offset);
987 trace_rpc_xdr_alignment(xdr, offset, copied);
a11a2bf4
TM
988 xdr->nwords = XDR_QUADLEN(buf->len - cur);
989 }
1da177e4 990
a11a2bf4
TM
991 if (nwords > xdr->nwords) {
992 nwords = xdr->nwords;
993 len = nwords << 2;
994 }
995 if (buf->page_len <= len)
8a9a8b83 996 len = buf->page_len;
a11a2bf4
TM
997 else if (nwords < xdr->nwords) {
998 /* Truncate page data and move it into the tail */
7be9cea3
CL
999 offset = buf->page_len - len;
1000 copied = xdr_shrink_pagelen(buf, offset);
1001 trace_rpc_xdr_alignment(xdr, offset, copied);
a11a2bf4
TM
1002 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1003 }
3994ee6f
TM
1004 return len;
1005}
bd00f84b 1006
1da177e4
LT
1007/**
1008 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
1009 * @xdr: pointer to xdr_stream struct
1010 * @len: number of bytes of page data
1011 *
1012 * Moves data beyond the current pointer position from the XDR head[] buffer
1013 * into the page list. Any data that lies beyond current position + "len"
8b23ea7b 1014 * bytes is moved into the XDR tail[].
3994ee6f
TM
1015 *
1016 * Returns the number of XDR encoded bytes now contained in the pages
1da177e4 1017 */
3994ee6f 1018unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1da177e4
LT
1019{
1020 struct xdr_buf *buf = xdr->buf;
1021 struct kvec *iov;
3994ee6f 1022 unsigned int nwords;
1da177e4 1023 unsigned int end;
3994ee6f 1024 unsigned int padding;
1da177e4 1025
3994ee6f
TM
1026 len = xdr_align_pages(xdr, len);
1027 if (len == 0)
1028 return 0;
1029 nwords = XDR_QUADLEN(len);
bfeea1dc 1030 padding = (nwords << 2) - len;
1da177e4
LT
1031 xdr->iov = iov = buf->tail;
1032 /* Compute remaining message length. */
bd00f84b
TM
1033 end = ((xdr->nwords - nwords) << 2) + padding;
1034 if (end > iov->iov_len)
1035 end = iov->iov_len;
1036
1da177e4
LT
1037 /*
1038 * Position current pointer at beginning of tail, and
1039 * set remaining message length.
1040 */
d8ed029d
AD
1041 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
1042 xdr->end = (__be32 *)((char *)iov->iov_base + end);
76cacaab 1043 xdr->page_ptr = NULL;
bfeea1dc 1044 xdr->nwords = XDR_QUADLEN(end - padding);
c337d365 1045 return len;
1da177e4 1046}
468039ee 1047EXPORT_SYMBOL_GPL(xdr_read_pages);
1da177e4 1048
8b23ea7b
TM
1049/**
1050 * xdr_enter_page - decode data from the XDR page
1051 * @xdr: pointer to xdr_stream struct
1052 * @len: number of bytes of page data
1053 *
1054 * Moves data beyond the current pointer position from the XDR head[] buffer
1055 * into the page list. Any data that lies beyond current position + "len"
1056 * bytes is moved into the XDR tail[]. The current pointer is then
1057 * repositioned at the beginning of the first XDR page.
1058 */
1059void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1060{
f8bb7f08 1061 len = xdr_align_pages(xdr, len);
8b23ea7b
TM
1062 /*
1063 * Position current pointer at beginning of tail, and
1064 * set remaining message length.
1065 */
f8bb7f08
TM
1066 if (len != 0)
1067 xdr_set_page_base(xdr, 0, len);
8b23ea7b 1068}
468039ee 1069EXPORT_SYMBOL_GPL(xdr_enter_page);
8b23ea7b 1070
1da177e4
LT
1071static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1072
1073void
1074xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
1075{
1076 buf->head[0] = *iov;
1077 buf->tail[0] = empty_iov;
1078 buf->page_len = 0;
1079 buf->buflen = buf->len = iov->iov_len;
1080}
468039ee 1081EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1da177e4 1082
de4aee2e
BF
1083/**
1084 * xdr_buf_subsegment - set subbuf to a portion of buf
1085 * @buf: an xdr buffer
1086 * @subbuf: the result buffer
1087 * @base: beginning of range in bytes
1088 * @len: length of range in bytes
1089 *
1090 * sets @subbuf to an xdr buffer representing the portion of @buf of
1091 * length @len starting at offset @base.
1092 *
1093 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1094 *
1095 * Returns -1 if base of length are out of bounds.
1096 */
1da177e4
LT
1097int
1098xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1e78957e 1099 unsigned int base, unsigned int len)
1da177e4 1100{
1da177e4 1101 subbuf->buflen = subbuf->len = len;
1e78957e
TM
1102 if (base < buf->head[0].iov_len) {
1103 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1104 subbuf->head[0].iov_len = min_t(unsigned int, len,
1105 buf->head[0].iov_len - base);
1106 len -= subbuf->head[0].iov_len;
1107 base = 0;
1108 } else {
1e78957e 1109 base -= buf->head[0].iov_len;
de4aee2e 1110 subbuf->head[0].iov_len = 0;
1e78957e 1111 }
1da177e4
LT
1112
1113 if (base < buf->page_len) {
1e78957e
TM
1114 subbuf->page_len = min(buf->page_len - base, len);
1115 base += buf->page_base;
09cbfeaf
KS
1116 subbuf->page_base = base & ~PAGE_MASK;
1117 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1da177e4
LT
1118 len -= subbuf->page_len;
1119 base = 0;
1120 } else {
1121 base -= buf->page_len;
1122 subbuf->page_len = 0;
1123 }
1124
1e78957e
TM
1125 if (base < buf->tail[0].iov_len) {
1126 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1127 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1128 buf->tail[0].iov_len - base);
1129 len -= subbuf->tail[0].iov_len;
1130 base = 0;
1131 } else {
1e78957e 1132 base -= buf->tail[0].iov_len;
de4aee2e 1133 subbuf->tail[0].iov_len = 0;
1e78957e
TM
1134 }
1135
1da177e4
LT
1136 if (base || len)
1137 return -1;
1138 return 0;
1139}
468039ee 1140EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1da177e4 1141
4c190e2f
JL
1142/**
1143 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1144 * @buf: buf to be trimmed
1145 * @len: number of bytes to reduce "buf" by
1146 *
1147 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1148 * that it's possible that we'll trim less than that amount if the xdr_buf is
1149 * too small, or if (for instance) it's all in the head and the parser has
1150 * already read too far into it.
1151 */
1152void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1153{
1154 size_t cur;
1155 unsigned int trim = len;
1156
1157 if (buf->tail[0].iov_len) {
1158 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1159 buf->tail[0].iov_len -= cur;
1160 trim -= cur;
1161 if (!trim)
1162 goto fix_len;
1163 }
1164
1165 if (buf->page_len) {
1166 cur = min_t(unsigned int, buf->page_len, trim);
1167 buf->page_len -= cur;
1168 trim -= cur;
1169 if (!trim)
1170 goto fix_len;
1171 }
1172
1173 if (buf->head[0].iov_len) {
1174 cur = min_t(size_t, buf->head[0].iov_len, trim);
1175 buf->head[0].iov_len -= cur;
1176 trim -= cur;
1177 }
1178fix_len:
1179 buf->len -= (len - trim);
1180}
1181EXPORT_SYMBOL_GPL(xdr_buf_trim);
1182
4e3e43ad 1183static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1da177e4 1184{
1e78957e 1185 unsigned int this_len;
1da177e4 1186
4e3e43ad
TM
1187 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1188 memcpy(obj, subbuf->head[0].iov_base, this_len);
1da177e4
LT
1189 len -= this_len;
1190 obj += this_len;
4e3e43ad 1191 this_len = min_t(unsigned int, len, subbuf->page_len);
1da177e4 1192 if (this_len)
4e3e43ad 1193 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1da177e4
LT
1194 len -= this_len;
1195 obj += this_len;
4e3e43ad
TM
1196 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1197 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1da177e4
LT
1198}
1199
bd8100e7 1200/* obj is assumed to point to allocated memory of size at least len: */
4e3e43ad 1201int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
bd8100e7
AG
1202{
1203 struct xdr_buf subbuf;
bd8100e7
AG
1204 int status;
1205
1206 status = xdr_buf_subsegment(buf, &subbuf, base, len);
4e3e43ad
TM
1207 if (status != 0)
1208 return status;
1209 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1210 return 0;
1211}
468039ee 1212EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
4e3e43ad
TM
1213
1214static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1215{
1216 unsigned int this_len;
1217
1218 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1219 memcpy(subbuf->head[0].iov_base, obj, this_len);
bd8100e7
AG
1220 len -= this_len;
1221 obj += this_len;
4e3e43ad 1222 this_len = min_t(unsigned int, len, subbuf->page_len);
bd8100e7 1223 if (this_len)
4e3e43ad 1224 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
bd8100e7
AG
1225 len -= this_len;
1226 obj += this_len;
4e3e43ad
TM
1227 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1228 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1229}
1230
1231/* obj is assumed to point to allocated memory of size at least len: */
1232int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1233{
1234 struct xdr_buf subbuf;
1235 int status;
1236
1237 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1238 if (status != 0)
1239 return status;
1240 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1241 return 0;
bd8100e7 1242}
c43abaed 1243EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
bd8100e7
AG
1244
1245int
1e78957e 1246xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1da177e4 1247{
d8ed029d 1248 __be32 raw;
1da177e4
LT
1249 int status;
1250
1251 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1252 if (status)
1253 return status;
98866b5a 1254 *obj = be32_to_cpu(raw);
1da177e4
LT
1255 return 0;
1256}
468039ee 1257EXPORT_SYMBOL_GPL(xdr_decode_word);
1da177e4 1258
bd8100e7 1259int
1e78957e 1260xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
bd8100e7 1261{
9f162d2a 1262 __be32 raw = cpu_to_be32(obj);
bd8100e7
AG
1263
1264 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1265}
468039ee 1266EXPORT_SYMBOL_GPL(xdr_encode_word);
bd8100e7 1267
1da177e4
LT
1268/* If the netobj starting offset bytes from the start of xdr_buf is contained
1269 * entirely in the head or the tail, set object to point to it; otherwise
1270 * try to find space for it at the end of the tail, copy it there, and
1271 * set obj to point to it. */
bee57c99 1272int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
1da177e4 1273{
bee57c99 1274 struct xdr_buf subbuf;
1da177e4 1275
bd8100e7 1276 if (xdr_decode_word(buf, offset, &obj->len))
bee57c99
TM
1277 return -EFAULT;
1278 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
1279 return -EFAULT;
1280
1281 /* Is the obj contained entirely in the head? */
1282 obj->data = subbuf.head[0].iov_base;
1283 if (subbuf.head[0].iov_len == obj->len)
1284 return 0;
1285 /* ..or is the obj contained entirely in the tail? */
1286 obj->data = subbuf.tail[0].iov_base;
1287 if (subbuf.tail[0].iov_len == obj->len)
1288 return 0;
1289
1290 /* use end of tail as storage for obj:
1291 * (We don't copy to the beginning because then we'd have
1292 * to worry about doing a potentially overlapping copy.
1293 * This assumes the object is at most half the length of the
1294 * tail.) */
1295 if (obj->len > buf->buflen - buf->len)
1296 return -ENOMEM;
1297 if (buf->tail[0].iov_len != 0)
1298 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1299 else
1300 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
1301 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
1da177e4 1302 return 0;
1da177e4 1303}
468039ee 1304EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
bd8100e7
AG
1305
1306/* Returns 0 on success, or else a negative error code. */
1307static int
1308xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1309 struct xdr_array2_desc *desc, int encode)
1310{
1311 char *elem = NULL, *c;
1312 unsigned int copied = 0, todo, avail_here;
1313 struct page **ppages = NULL;
1314 int err;
1315
1316 if (encode) {
1317 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1318 return -EINVAL;
1319 } else {
1320 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
58fcb8df 1321 desc->array_len > desc->array_maxlen ||
bd8100e7
AG
1322 (unsigned long) base + 4 + desc->array_len *
1323 desc->elem_size > buf->len)
1324 return -EINVAL;
1325 }
1326 base += 4;
1327
1328 if (!desc->xcode)
1329 return 0;
1330
1331 todo = desc->array_len * desc->elem_size;
1332
1333 /* process head */
1334 if (todo && base < buf->head->iov_len) {
1335 c = buf->head->iov_base + base;
1336 avail_here = min_t(unsigned int, todo,
1337 buf->head->iov_len - base);
1338 todo -= avail_here;
1339
1340 while (avail_here >= desc->elem_size) {
1341 err = desc->xcode(desc, c);
1342 if (err)
1343 goto out;
1344 c += desc->elem_size;
1345 avail_here -= desc->elem_size;
1346 }
1347 if (avail_here) {
1348 if (!elem) {
1349 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1350 err = -ENOMEM;
1351 if (!elem)
1352 goto out;
1353 }
1354 if (encode) {
1355 err = desc->xcode(desc, elem);
1356 if (err)
1357 goto out;
1358 memcpy(c, elem, avail_here);
1359 } else
1360 memcpy(elem, c, avail_here);
1361 copied = avail_here;
1362 }
1363 base = buf->head->iov_len; /* align to start of pages */
1364 }
1365
1366 /* process pages array */
1367 base -= buf->head->iov_len;
1368 if (todo && base < buf->page_len) {
1369 unsigned int avail_page;
1370
1371 avail_here = min(todo, buf->page_len - base);
1372 todo -= avail_here;
1373
1374 base += buf->page_base;
09cbfeaf
KS
1375 ppages = buf->pages + (base >> PAGE_SHIFT);
1376 base &= ~PAGE_MASK;
1377 avail_page = min_t(unsigned int, PAGE_SIZE - base,
bd8100e7
AG
1378 avail_here);
1379 c = kmap(*ppages) + base;
1380
1381 while (avail_here) {
1382 avail_here -= avail_page;
1383 if (copied || avail_page < desc->elem_size) {
1384 unsigned int l = min(avail_page,
1385 desc->elem_size - copied);
1386 if (!elem) {
1387 elem = kmalloc(desc->elem_size,
1388 GFP_KERNEL);
1389 err = -ENOMEM;
1390 if (!elem)
1391 goto out;
1392 }
1393 if (encode) {
1394 if (!copied) {
1395 err = desc->xcode(desc, elem);
1396 if (err)
1397 goto out;
1398 }
1399 memcpy(c, elem + copied, l);
1400 copied += l;
1401 if (copied == desc->elem_size)
1402 copied = 0;
1403 } else {
1404 memcpy(elem + copied, c, l);
1405 copied += l;
1406 if (copied == desc->elem_size) {
1407 err = desc->xcode(desc, elem);
1408 if (err)
1409 goto out;
1410 copied = 0;
1411 }
1412 }
1413 avail_page -= l;
1414 c += l;
1415 }
1416 while (avail_page >= desc->elem_size) {
1417 err = desc->xcode(desc, c);
1418 if (err)
1419 goto out;
1420 c += desc->elem_size;
1421 avail_page -= desc->elem_size;
1422 }
1423 if (avail_page) {
1424 unsigned int l = min(avail_page,
1425 desc->elem_size - copied);
1426 if (!elem) {
1427 elem = kmalloc(desc->elem_size,
1428 GFP_KERNEL);
1429 err = -ENOMEM;
1430 if (!elem)
1431 goto out;
1432 }
1433 if (encode) {
1434 if (!copied) {
1435 err = desc->xcode(desc, elem);
1436 if (err)
1437 goto out;
1438 }
1439 memcpy(c, elem + copied, l);
1440 copied += l;
1441 if (copied == desc->elem_size)
1442 copied = 0;
1443 } else {
1444 memcpy(elem + copied, c, l);
1445 copied += l;
1446 if (copied == desc->elem_size) {
1447 err = desc->xcode(desc, elem);
1448 if (err)
1449 goto out;
1450 copied = 0;
1451 }
1452 }
1453 }
1454 if (avail_here) {
1455 kunmap(*ppages);
1456 ppages++;
1457 c = kmap(*ppages);
1458 }
1459
1460 avail_page = min(avail_here,
09cbfeaf 1461 (unsigned int) PAGE_SIZE);
bd8100e7
AG
1462 }
1463 base = buf->page_len; /* align to start of tail */
1464 }
1465
1466 /* process tail */
1467 base -= buf->page_len;
1468 if (todo) {
1469 c = buf->tail->iov_base + base;
1470 if (copied) {
1471 unsigned int l = desc->elem_size - copied;
1472
1473 if (encode)
1474 memcpy(c, elem + copied, l);
1475 else {
1476 memcpy(elem + copied, c, l);
1477 err = desc->xcode(desc, elem);
1478 if (err)
1479 goto out;
1480 }
1481 todo -= l;
1482 c += l;
1483 }
1484 while (todo) {
1485 err = desc->xcode(desc, c);
1486 if (err)
1487 goto out;
1488 c += desc->elem_size;
1489 todo -= desc->elem_size;
1490 }
1491 }
1492 err = 0;
1493
1494out:
a51482bd 1495 kfree(elem);
bd8100e7
AG
1496 if (ppages)
1497 kunmap(*ppages);
1498 return err;
1499}
1500
1501int
1502xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1503 struct xdr_array2_desc *desc)
1504{
1505 if (base >= buf->len)
1506 return -EINVAL;
1507
1508 return xdr_xcode_array2(buf, base, desc, 0);
1509}
468039ee 1510EXPORT_SYMBOL_GPL(xdr_decode_array2);
bd8100e7
AG
1511
1512int
1513xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1514 struct xdr_array2_desc *desc)
1515{
1516 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1517 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1518 return -EINVAL;
1519
1520 return xdr_xcode_array2(buf, base, desc, 1);
1521}
468039ee 1522EXPORT_SYMBOL_GPL(xdr_encode_array2);
37a4e6cb
OK
1523
1524int
1525xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
cca5172a 1526 int (*actor)(struct scatterlist *, void *), void *data)
37a4e6cb
OK
1527{
1528 int i, ret = 0;
95c96174 1529 unsigned int page_len, thislen, page_offset;
37a4e6cb
OK
1530 struct scatterlist sg[1];
1531
68e3f5dd
HX
1532 sg_init_table(sg, 1);
1533
37a4e6cb
OK
1534 if (offset >= buf->head[0].iov_len) {
1535 offset -= buf->head[0].iov_len;
1536 } else {
1537 thislen = buf->head[0].iov_len - offset;
1538 if (thislen > len)
1539 thislen = len;
1540 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1541 ret = actor(sg, data);
1542 if (ret)
1543 goto out;
1544 offset = 0;
1545 len -= thislen;
1546 }
1547 if (len == 0)
1548 goto out;
1549
1550 if (offset >= buf->page_len) {
1551 offset -= buf->page_len;
1552 } else {
1553 page_len = buf->page_len - offset;
1554 if (page_len > len)
1555 page_len = len;
1556 len -= page_len;
09cbfeaf
KS
1557 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1558 i = (offset + buf->page_base) >> PAGE_SHIFT;
1559 thislen = PAGE_SIZE - page_offset;
37a4e6cb
OK
1560 do {
1561 if (thislen > page_len)
1562 thislen = page_len;
642f1490 1563 sg_set_page(sg, buf->pages[i], thislen, page_offset);
37a4e6cb
OK
1564 ret = actor(sg, data);
1565 if (ret)
1566 goto out;
1567 page_len -= thislen;
1568 i++;
1569 page_offset = 0;
09cbfeaf 1570 thislen = PAGE_SIZE;
37a4e6cb
OK
1571 } while (page_len != 0);
1572 offset = 0;
1573 }
1574 if (len == 0)
1575 goto out;
1576 if (offset < buf->tail[0].iov_len) {
1577 thislen = buf->tail[0].iov_len - offset;
1578 if (thislen > len)
1579 thislen = len;
1580 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1581 ret = actor(sg, data);
1582 len -= thislen;
1583 }
1584 if (len != 0)
1585 ret = -EINVAL;
1586out:
1587 return ret;
1588}
468039ee 1589EXPORT_SYMBOL_GPL(xdr_process_buf);
37a4e6cb 1590
0e779aa7
TM
1591/**
1592 * xdr_stream_decode_opaque - Decode variable length opaque
1593 * @xdr: pointer to xdr_stream
1594 * @ptr: location to store opaque data
1595 * @size: size of storage buffer @ptr
1596 *
1597 * Return values:
1598 * On success, returns size of object stored in *@ptr
1599 * %-EBADMSG on XDR buffer overflow
1600 * %-EMSGSIZE on overflow of storage buffer @ptr
1601 */
1602ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
1603{
1604 ssize_t ret;
1605 void *p;
1606
1607 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1608 if (ret <= 0)
1609 return ret;
1610 memcpy(ptr, p, ret);
1611 return ret;
1612}
1613EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
1614
1615/**
1616 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
1617 * @xdr: pointer to xdr_stream
1618 * @ptr: location to store pointer to opaque data
1619 * @maxlen: maximum acceptable object size
1620 * @gfp_flags: GFP mask to use
1621 *
1622 * Return values:
1623 * On success, returns size of object stored in *@ptr
1624 * %-EBADMSG on XDR buffer overflow
1625 * %-EMSGSIZE if the size of the object would exceed @maxlen
1626 * %-ENOMEM on memory allocation failure
1627 */
1628ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
1629 size_t maxlen, gfp_t gfp_flags)
1630{
1631 ssize_t ret;
1632 void *p;
1633
1634 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1635 if (ret > 0) {
1636 *ptr = kmemdup(p, ret, gfp_flags);
1637 if (*ptr != NULL)
1638 return ret;
1639 ret = -ENOMEM;
1640 }
1641 *ptr = NULL;
1642 return ret;
1643}
1644EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
1645
1646/**
1647 * xdr_stream_decode_string - Decode variable length string
1648 * @xdr: pointer to xdr_stream
1649 * @str: location to store string
1650 * @size: size of storage buffer @str
1651 *
1652 * Return values:
1653 * On success, returns length of NUL-terminated string stored in *@str
1654 * %-EBADMSG on XDR buffer overflow
1655 * %-EMSGSIZE on overflow of storage buffer @str
1656 */
1657ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
1658{
1659 ssize_t ret;
1660 void *p;
1661
1662 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1663 if (ret > 0) {
1664 memcpy(str, p, ret);
1665 str[ret] = '\0';
1666 return strlen(str);
1667 }
1668 *str = '\0';
1669 return ret;
1670}
1671EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
1672
5c741d4f
TM
1673/**
1674 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
1675 * @xdr: pointer to xdr_stream
1676 * @str: location to store pointer to string
1677 * @maxlen: maximum acceptable string length
1678 * @gfp_flags: GFP mask to use
1679 *
1680 * Return values:
1681 * On success, returns length of NUL-terminated string stored in *@ptr
1682 * %-EBADMSG on XDR buffer overflow
1683 * %-EMSGSIZE if the size of the string would exceed @maxlen
1684 * %-ENOMEM on memory allocation failure
1685 */
1686ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
1687 size_t maxlen, gfp_t gfp_flags)
1688{
1689 void *p;
1690 ssize_t ret;
1691
1692 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1693 if (ret > 0) {
1694 char *s = kmalloc(ret + 1, gfp_flags);
1695 if (s != NULL) {
1696 memcpy(s, p, ret);
1697 s[ret] = '\0';
1698 *str = s;
1699 return strlen(s);
1700 }
1701 ret = -ENOMEM;
1702 }
1703 *str = NULL;
1704 return ret;
1705}
1706EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);