SUNRPC: Add xdr_set_scratch_page() and xdr_reset_scratch_buffer()
[linux-block.git] / net / sunrpc / xdr.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/net/sunrpc/xdr.c
4 *
5 * Generic XDR support.
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 */
9
a246b010 10#include <linux/module.h>
5a0e3ad6 11#include <linux/slab.h>
1da177e4 12#include <linux/types.h>
1da177e4
LT
13#include <linux/string.h>
14#include <linux/kernel.h>
15#include <linux/pagemap.h>
16#include <linux/errno.h>
1da177e4
LT
17#include <linux/sunrpc/xdr.h>
18#include <linux/sunrpc/msg_prot.h>
9d96acbc 19#include <linux/bvec.h>
5582863f 20#include <trace/events/sunrpc.h>
1da177e4 21
e6ac0acc
AS
22static void _copy_to_pages(struct page **, size_t, const char *, size_t);
23
24
1da177e4
LT
25/*
26 * XDR functions for basic NFS types
27 */
d8ed029d
AD
28__be32 *
29xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
1da177e4
LT
30{
31 unsigned int quadlen = XDR_QUADLEN(obj->len);
32
33 p[quadlen] = 0; /* zero trailing bytes */
9f162d2a 34 *p++ = cpu_to_be32(obj->len);
1da177e4
LT
35 memcpy(p, obj->data, obj->len);
36 return p + XDR_QUADLEN(obj->len);
37}
468039ee 38EXPORT_SYMBOL_GPL(xdr_encode_netobj);
1da177e4 39
d8ed029d
AD
40__be32 *
41xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
1da177e4
LT
42{
43 unsigned int len;
44
98866b5a 45 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
1da177e4
LT
46 return NULL;
47 obj->len = len;
48 obj->data = (u8 *) p;
49 return p + XDR_QUADLEN(len);
50}
468039ee 51EXPORT_SYMBOL_GPL(xdr_decode_netobj);
1da177e4
LT
52
53/**
54 * xdr_encode_opaque_fixed - Encode fixed length opaque data
4dc3b16b
PP
55 * @p: pointer to current position in XDR buffer.
56 * @ptr: pointer to data to encode (or NULL)
57 * @nbytes: size of data.
1da177e4
LT
58 *
59 * Copy the array of data of length nbytes at ptr to the XDR buffer
60 * at position p, then align to the next 32-bit boundary by padding
61 * with zero bytes (see RFC1832).
62 * Note: if ptr is NULL, only the padding is performed.
63 *
64 * Returns the updated current XDR buffer position
65 *
66 */
d8ed029d 67__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4
LT
68{
69 if (likely(nbytes != 0)) {
70 unsigned int quadlen = XDR_QUADLEN(nbytes);
71 unsigned int padding = (quadlen << 2) - nbytes;
72
73 if (ptr != NULL)
74 memcpy(p, ptr, nbytes);
75 if (padding != 0)
76 memset((char *)p + nbytes, 0, padding);
77 p += quadlen;
78 }
79 return p;
80}
468039ee 81EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
1da177e4
LT
82
83/**
84 * xdr_encode_opaque - Encode variable length opaque data
4dc3b16b
PP
85 * @p: pointer to current position in XDR buffer.
86 * @ptr: pointer to data to encode (or NULL)
87 * @nbytes: size of data.
1da177e4
LT
88 *
89 * Returns the updated current XDR buffer position
90 */
d8ed029d 91__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4 92{
9f162d2a 93 *p++ = cpu_to_be32(nbytes);
1da177e4
LT
94 return xdr_encode_opaque_fixed(p, ptr, nbytes);
95}
468039ee 96EXPORT_SYMBOL_GPL(xdr_encode_opaque);
1da177e4 97
d8ed029d
AD
98__be32 *
99xdr_encode_string(__be32 *p, const char *string)
1da177e4
LT
100{
101 return xdr_encode_array(p, string, strlen(string));
102}
468039ee 103EXPORT_SYMBOL_GPL(xdr_encode_string);
1da177e4 104
d8ed029d 105__be32 *
e5cff482
CL
106xdr_decode_string_inplace(__be32 *p, char **sp,
107 unsigned int *lenp, unsigned int maxlen)
1da177e4 108{
e5cff482 109 u32 len;
1da177e4 110
98866b5a 111 len = be32_to_cpu(*p++);
e5cff482 112 if (len > maxlen)
1da177e4
LT
113 return NULL;
114 *lenp = len;
115 *sp = (char *) p;
116 return p + XDR_QUADLEN(len);
117}
468039ee 118EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
1da177e4 119
b4687da7
CL
120/**
121 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
122 * @buf: XDR buffer where string resides
123 * @len: length of string, in bytes
124 *
125 */
126void
127xdr_terminate_string(struct xdr_buf *buf, const u32 len)
128{
129 char *kaddr;
130
b8541786 131 kaddr = kmap_atomic(buf->pages[0]);
b4687da7 132 kaddr[buf->page_base + len] = '\0';
b8541786 133 kunmap_atomic(kaddr);
b4687da7 134}
0d961aa9 135EXPORT_SYMBOL_GPL(xdr_terminate_string);
b4687da7 136
9d96acbc
TM
137size_t
138xdr_buf_pagecount(struct xdr_buf *buf)
139{
140 if (!buf->page_len)
141 return 0;
142 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
143}
144
145int
146xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
147{
148 size_t i, n = xdr_buf_pagecount(buf);
149
150 if (n != 0 && buf->bvec == NULL) {
151 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
152 if (!buf->bvec)
153 return -ENOMEM;
154 for (i = 0; i < n; i++) {
155 buf->bvec[i].bv_page = buf->pages[i];
156 buf->bvec[i].bv_len = PAGE_SIZE;
157 buf->bvec[i].bv_offset = 0;
158 }
159 }
160 return 0;
161}
162
163void
164xdr_free_bvec(struct xdr_buf *buf)
165{
166 kfree(buf->bvec);
167 buf->bvec = NULL;
168}
169
cf500bac
CL
170/**
171 * xdr_inline_pages - Prepare receive buffer for a large reply
172 * @xdr: xdr_buf into which reply will be placed
173 * @offset: expected offset where data payload will start, in bytes
174 * @pages: vector of struct page pointers
175 * @base: offset in first page where receive should start, in bytes
176 * @len: expected size of the upper layer data payload, in bytes
177 *
178 */
1da177e4
LT
179void
180xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
181 struct page **pages, unsigned int base, unsigned int len)
182{
183 struct kvec *head = xdr->head;
184 struct kvec *tail = xdr->tail;
185 char *buf = (char *)head->iov_base;
186 unsigned int buflen = head->iov_len;
187
188 head->iov_len = offset;
189
190 xdr->pages = pages;
191 xdr->page_base = base;
192 xdr->page_len = len;
193
194 tail->iov_base = buf + offset;
195 tail->iov_len = buflen - offset;
02ef04e4
CL
196 if ((xdr->page_len & 3) == 0)
197 tail->iov_len -= sizeof(__be32);
1da177e4
LT
198
199 xdr->buflen += len;
200}
468039ee 201EXPORT_SYMBOL_GPL(xdr_inline_pages);
1da177e4 202
1da177e4
LT
203/*
204 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
2c53040f
BH
205 */
206
e6ac0acc
AS
207/**
208 * _shift_data_left_pages
209 * @pages: vector of pages containing both the source and dest memory area.
210 * @pgto_base: page vector address of destination
211 * @pgfrom_base: page vector address of source
212 * @len: number of bytes to copy
213 *
214 * Note: the addresses pgto_base and pgfrom_base are both calculated in
215 * the same way:
216 * if a memory area starts at byte 'base' in page 'pages[i]',
217 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
218 * Alse note: pgto_base must be < pgfrom_base, but the memory areas
219 * they point to may overlap.
220 */
221static void
222_shift_data_left_pages(struct page **pages, size_t pgto_base,
223 size_t pgfrom_base, size_t len)
224{
225 struct page **pgfrom, **pgto;
226 char *vfrom, *vto;
227 size_t copy;
228
229 BUG_ON(pgfrom_base <= pgto_base);
230
231 pgto = pages + (pgto_base >> PAGE_SHIFT);
232 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
233
234 pgto_base &= ~PAGE_MASK;
235 pgfrom_base &= ~PAGE_MASK;
236
237 do {
238 if (pgto_base >= PAGE_SIZE) {
239 pgto_base = 0;
240 pgto++;
241 }
242 if (pgfrom_base >= PAGE_SIZE){
243 pgfrom_base = 0;
244 pgfrom++;
245 }
246
247 copy = len;
248 if (copy > (PAGE_SIZE - pgto_base))
249 copy = PAGE_SIZE - pgto_base;
250 if (copy > (PAGE_SIZE - pgfrom_base))
251 copy = PAGE_SIZE - pgfrom_base;
252
253 vto = kmap_atomic(*pgto);
254 if (*pgto != *pgfrom) {
255 vfrom = kmap_atomic(*pgfrom);
256 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
257 kunmap_atomic(vfrom);
258 } else
259 memmove(vto + pgto_base, vto + pgfrom_base, copy);
260 flush_dcache_page(*pgto);
261 kunmap_atomic(vto);
262
263 pgto_base += copy;
264 pgfrom_base += copy;
265
266 } while ((len -= copy) != 0);
267}
268
269static void
270_shift_data_left_tail(struct xdr_buf *buf, unsigned int pgto, size_t len)
271{
272 struct kvec *tail = buf->tail;
273
274 if (len > tail->iov_len)
275 len = tail->iov_len;
276
277 _copy_to_pages(buf->pages,
278 buf->page_base + pgto,
279 (char *)tail->iov_base,
280 len);
281 tail->iov_len -= len;
282
283 if (tail->iov_len > 0)
284 memmove((char *)tail->iov_base,
285 tail->iov_base + len,
286 tail->iov_len);
287}
288
2c53040f 289/**
1da177e4
LT
290 * _shift_data_right_pages
291 * @pages: vector of pages containing both the source and dest memory area.
292 * @pgto_base: page vector address of destination
293 * @pgfrom_base: page vector address of source
294 * @len: number of bytes to copy
295 *
296 * Note: the addresses pgto_base and pgfrom_base are both calculated in
297 * the same way:
298 * if a memory area starts at byte 'base' in page 'pages[i]',
ea1754a0 299 * then its address is given as (i << PAGE_SHIFT) + base
1da177e4
LT
300 * Also note: pgfrom_base must be < pgto_base, but the memory areas
301 * they point to may overlap.
302 */
303static void
304_shift_data_right_pages(struct page **pages, size_t pgto_base,
305 size_t pgfrom_base, size_t len)
306{
307 struct page **pgfrom, **pgto;
308 char *vfrom, *vto;
309 size_t copy;
310
311 BUG_ON(pgto_base <= pgfrom_base);
312
313 pgto_base += len;
314 pgfrom_base += len;
315
09cbfeaf
KS
316 pgto = pages + (pgto_base >> PAGE_SHIFT);
317 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
1da177e4 318
09cbfeaf
KS
319 pgto_base &= ~PAGE_MASK;
320 pgfrom_base &= ~PAGE_MASK;
1da177e4
LT
321
322 do {
323 /* Are any pointers crossing a page boundary? */
324 if (pgto_base == 0) {
09cbfeaf 325 pgto_base = PAGE_SIZE;
1da177e4
LT
326 pgto--;
327 }
328 if (pgfrom_base == 0) {
09cbfeaf 329 pgfrom_base = PAGE_SIZE;
1da177e4
LT
330 pgfrom--;
331 }
332
333 copy = len;
334 if (copy > pgto_base)
335 copy = pgto_base;
336 if (copy > pgfrom_base)
337 copy = pgfrom_base;
338 pgto_base -= copy;
339 pgfrom_base -= copy;
340
b8541786 341 vto = kmap_atomic(*pgto);
347e2233
TM
342 if (*pgto != *pgfrom) {
343 vfrom = kmap_atomic(*pgfrom);
344 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
345 kunmap_atomic(vfrom);
346 } else
347 memmove(vto + pgto_base, vto + pgfrom_base, copy);
bce3481c 348 flush_dcache_page(*pgto);
b8541786 349 kunmap_atomic(vto);
1da177e4
LT
350
351 } while ((len -= copy) != 0);
1da177e4
LT
352}
353
43f0f081
AS
354static unsigned int
355_shift_data_right_tail(struct xdr_buf *buf, unsigned int pgfrom, size_t len)
356{
357 struct kvec *tail = buf->tail;
358 unsigned int tailbuf_len;
359 unsigned int result = 0;
360 size_t copy;
361
362 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
363
364 /* Shift the tail first */
365 if (tailbuf_len != 0) {
366 unsigned int free_space = tailbuf_len - tail->iov_len;
367
368 if (len < free_space)
369 free_space = len;
370 if (len > free_space)
371 len = free_space;
372
373 tail->iov_len += free_space;
374 copy = len;
375
376 if (tail->iov_len > len) {
377 char *p = (char *)tail->iov_base + len;
378 memmove(p, tail->iov_base, tail->iov_len - free_space);
379 result += tail->iov_len - free_space;
380 } else
381 copy = tail->iov_len;
382
383 /* Copy from the inlined pages into the tail */
384 _copy_from_pages((char *)tail->iov_base,
385 buf->pages,
386 buf->page_base + pgfrom,
387 copy);
388 result += copy;
389 }
390
391 return result;
392}
393
2c53040f 394/**
1da177e4
LT
395 * _copy_to_pages
396 * @pages: array of pages
397 * @pgbase: page vector address of destination
398 * @p: pointer to source data
399 * @len: length
400 *
401 * Copies data from an arbitrary memory location into an array of pages
402 * The copy is assumed to be non-overlapping.
403 */
404static void
405_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
406{
407 struct page **pgto;
408 char *vto;
409 size_t copy;
410
09cbfeaf
KS
411 pgto = pages + (pgbase >> PAGE_SHIFT);
412 pgbase &= ~PAGE_MASK;
1da177e4 413
daeba89d 414 for (;;) {
09cbfeaf 415 copy = PAGE_SIZE - pgbase;
1da177e4
LT
416 if (copy > len)
417 copy = len;
418
b8541786 419 vto = kmap_atomic(*pgto);
1da177e4 420 memcpy(vto + pgbase, p, copy);
b8541786 421 kunmap_atomic(vto);
1da177e4 422
daeba89d
TM
423 len -= copy;
424 if (len == 0)
425 break;
426
1da177e4 427 pgbase += copy;
09cbfeaf 428 if (pgbase == PAGE_SIZE) {
1da177e4
LT
429 flush_dcache_page(*pgto);
430 pgbase = 0;
431 pgto++;
432 }
433 p += copy;
daeba89d 434 }
1da177e4
LT
435 flush_dcache_page(*pgto);
436}
437
2c53040f 438/**
1da177e4
LT
439 * _copy_from_pages
440 * @p: pointer to destination
441 * @pages: array of pages
442 * @pgbase: offset of source data
443 * @len: length
444 *
445 * Copies data into an arbitrary memory location from an array of pages
446 * The copy is assumed to be non-overlapping.
447 */
bf118a34 448void
1da177e4
LT
449_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
450{
451 struct page **pgfrom;
452 char *vfrom;
453 size_t copy;
454
09cbfeaf
KS
455 pgfrom = pages + (pgbase >> PAGE_SHIFT);
456 pgbase &= ~PAGE_MASK;
1da177e4
LT
457
458 do {
09cbfeaf 459 copy = PAGE_SIZE - pgbase;
1da177e4
LT
460 if (copy > len)
461 copy = len;
462
b8541786 463 vfrom = kmap_atomic(*pgfrom);
1da177e4 464 memcpy(p, vfrom + pgbase, copy);
b8541786 465 kunmap_atomic(vfrom);
1da177e4
LT
466
467 pgbase += copy;
09cbfeaf 468 if (pgbase == PAGE_SIZE) {
1da177e4
LT
469 pgbase = 0;
470 pgfrom++;
471 }
472 p += copy;
473
474 } while ((len -= copy) != 0);
475}
bf118a34 476EXPORT_SYMBOL_GPL(_copy_from_pages);
1da177e4 477
84ce182a
AS
478/**
479 * _zero_pages
480 * @pages: array of pages
481 * @pgbase: beginning page vector address
482 * @len: length
483 */
484static void
485_zero_pages(struct page **pages, size_t pgbase, size_t len)
486{
487 struct page **page;
488 char *vpage;
489 size_t zero;
490
491 page = pages + (pgbase >> PAGE_SHIFT);
492 pgbase &= ~PAGE_MASK;
493
494 do {
495 zero = PAGE_SIZE - pgbase;
496 if (zero > len)
497 zero = len;
498
499 vpage = kmap_atomic(*page);
500 memset(vpage + pgbase, 0, zero);
501 kunmap_atomic(vpage);
502
503 flush_dcache_page(*page);
504 pgbase = 0;
505 page++;
506
507 } while ((len -= zero) != 0);
508}
509
2c53040f 510/**
1da177e4
LT
511 * xdr_shrink_bufhead
512 * @buf: xdr_buf
513 * @len: bytes to remove from buf->head[0]
514 *
cca5172a 515 * Shrinks XDR buffer's header kvec buf->head[0] by
1da177e4
LT
516 * 'len' bytes. The extra data is not lost, but is instead
517 * moved into the inlined pages and/or the tail.
518 */
7be9cea3 519static unsigned int
1da177e4
LT
520xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
521{
522 struct kvec *head, *tail;
523 size_t copy, offs;
524 unsigned int pglen = buf->page_len;
7be9cea3 525 unsigned int result;
1da177e4 526
7be9cea3 527 result = 0;
1da177e4
LT
528 tail = buf->tail;
529 head = buf->head;
18e624ad
WAA
530
531 WARN_ON_ONCE(len > head->iov_len);
532 if (len > head->iov_len)
533 len = head->iov_len;
1da177e4
LT
534
535 /* Shift the tail first */
536 if (tail->iov_len != 0) {
537 if (tail->iov_len > len) {
538 copy = tail->iov_len - len;
539 memmove((char *)tail->iov_base + len,
540 tail->iov_base, copy);
7be9cea3 541 result += copy;
1da177e4
LT
542 }
543 /* Copy from the inlined pages into the tail */
544 copy = len;
545 if (copy > pglen)
546 copy = pglen;
547 offs = len - copy;
548 if (offs >= tail->iov_len)
549 copy = 0;
550 else if (copy > tail->iov_len - offs)
551 copy = tail->iov_len - offs;
7be9cea3 552 if (copy != 0) {
1da177e4
LT
553 _copy_from_pages((char *)tail->iov_base + offs,
554 buf->pages,
555 buf->page_base + pglen + offs - len,
556 copy);
7be9cea3
CL
557 result += copy;
558 }
1da177e4
LT
559 /* Do we also need to copy data from the head into the tail ? */
560 if (len > pglen) {
561 offs = copy = len - pglen;
562 if (copy > tail->iov_len)
563 copy = tail->iov_len;
564 memcpy(tail->iov_base,
565 (char *)head->iov_base +
566 head->iov_len - offs,
567 copy);
7be9cea3 568 result += copy;
1da177e4
LT
569 }
570 }
571 /* Now handle pages */
572 if (pglen != 0) {
573 if (pglen > len)
574 _shift_data_right_pages(buf->pages,
575 buf->page_base + len,
576 buf->page_base,
577 pglen - len);
578 copy = len;
579 if (len > pglen)
580 copy = pglen;
581 _copy_to_pages(buf->pages, buf->page_base,
582 (char *)head->iov_base + head->iov_len - len,
583 copy);
7be9cea3 584 result += copy;
1da177e4
LT
585 }
586 head->iov_len -= len;
587 buf->buflen -= len;
588 /* Have we truncated the message? */
589 if (buf->len > buf->buflen)
590 buf->len = buf->buflen;
7be9cea3
CL
591
592 return result;
1da177e4
LT
593}
594
2c53040f 595/**
e8d70b32 596 * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes
1da177e4
LT
597 * @buf: xdr_buf
598 * @len: bytes to remove from buf->pages
599 *
e8d70b32
CL
600 * The extra data is not lost, but is instead moved into buf->tail.
601 * Returns the actual number of bytes moved.
1da177e4 602 */
7be9cea3 603static unsigned int
1da177e4
LT
604xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
605{
1da177e4 606 unsigned int pglen = buf->page_len;
7be9cea3 607 unsigned int result;
1da177e4 608
e8d70b32
CL
609 if (len > buf->page_len)
610 len = buf-> page_len;
cf187c2d 611
43f0f081 612 result = _shift_data_right_tail(buf, pglen - len, len);
1da177e4
LT
613 buf->page_len -= len;
614 buf->buflen -= len;
615 /* Have we truncated the message? */
616 if (buf->len > buf->buflen)
617 buf->len = buf->buflen;
7be9cea3
CL
618
619 return result;
1da177e4
LT
620}
621
622void
623xdr_shift_buf(struct xdr_buf *buf, size_t len)
624{
625 xdr_shrink_bufhead(buf, len);
626}
468039ee 627EXPORT_SYMBOL_GPL(xdr_shift_buf);
1da177e4 628
4517d526
TM
629/**
630 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
631 * @xdr: pointer to struct xdr_stream
632 */
633unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
634{
635 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
636}
637EXPORT_SYMBOL_GPL(xdr_stream_pos);
638
cf1f08ca
AS
639/**
640 * xdr_page_pos - Return the current offset from the start of the xdr pages
641 * @xdr: pointer to struct xdr_stream
642 */
643unsigned int xdr_page_pos(const struct xdr_stream *xdr)
644{
645 unsigned int pos = xdr_stream_pos(xdr);
646
647 WARN_ON(pos < xdr->buf->head[0].iov_len);
648 return pos - xdr->buf->head[0].iov_len;
649}
650EXPORT_SYMBOL_GPL(xdr_page_pos);
651
1da177e4
LT
652/**
653 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
654 * @xdr: pointer to xdr_stream struct
655 * @buf: pointer to XDR buffer in which to encode data
656 * @p: current pointer inside XDR buffer
0ccc61b1 657 * @rqst: pointer to controlling rpc_rqst, for debugging
1da177e4
LT
658 *
659 * Note: at the moment the RPC client only passes the length of our
660 * scratch buffer in the xdr_buf's header kvec. Previously this
661 * meant we needed to call xdr_adjust_iovec() after encoding the
662 * data. With the new scheme, the xdr_stream manages the details
663 * of the buffer length, and takes care of adjusting the kvec
664 * length for us.
665 */
0ccc61b1
CL
666void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
667 struct rpc_rqst *rqst)
1da177e4
LT
668{
669 struct kvec *iov = buf->head;
334ccfd5 670 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
1da177e4 671
0ae4c3e8 672 xdr_reset_scratch_buffer(xdr);
334ccfd5 673 BUG_ON(scratch_len < 0);
1da177e4
LT
674 xdr->buf = buf;
675 xdr->iov = iov;
d8ed029d
AD
676 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
677 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
334ccfd5
TM
678 BUG_ON(iov->iov_len > scratch_len);
679
680 if (p != xdr->p && p != NULL) {
681 size_t len;
682
683 BUG_ON(p < xdr->p || p > xdr->end);
684 len = (char *)p - (char *)xdr->p;
685 xdr->p = p;
686 buf->len += len;
687 iov->iov_len += len;
688 }
0ccc61b1 689 xdr->rqst = rqst;
1da177e4 690}
468039ee 691EXPORT_SYMBOL_GPL(xdr_init_encode);
1da177e4 692
2825a7f9
BF
693/**
694 * xdr_commit_encode - Ensure all data is written to buffer
695 * @xdr: pointer to xdr_stream
696 *
697 * We handle encoding across page boundaries by giving the caller a
698 * temporary location to write to, then later copying the data into
699 * place; xdr_commit_encode does that copying.
700 *
701 * Normally the caller doesn't need to call this directly, as the
702 * following xdr_reserve_space will do it. But an explicit call may be
703 * required at the end of encoding, or any other time when the xdr_buf
704 * data might be read.
705 */
95bd8304 706inline void xdr_commit_encode(struct xdr_stream *xdr)
2825a7f9
BF
707{
708 int shift = xdr->scratch.iov_len;
709 void *page;
710
711 if (shift == 0)
712 return;
713 page = page_address(*xdr->page_ptr);
714 memcpy(xdr->scratch.iov_base, page, shift);
715 memmove(page, page + shift, (void *)xdr->p - page);
0ae4c3e8 716 xdr_reset_scratch_buffer(xdr);
2825a7f9
BF
717}
718EXPORT_SYMBOL_GPL(xdr_commit_encode);
719
22cb4385
TM
720static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
721 size_t nbytes)
2825a7f9 722{
025911a5 723 __be32 *p;
2825a7f9
BF
724 int space_left;
725 int frag1bytes, frag2bytes;
726
727 if (nbytes > PAGE_SIZE)
5582863f 728 goto out_overflow; /* Bigger buffers require special handling */
2825a7f9 729 if (xdr->buf->len + nbytes > xdr->buf->buflen)
5582863f 730 goto out_overflow; /* Sorry, we're totally out of space */
2825a7f9
BF
731 frag1bytes = (xdr->end - xdr->p) << 2;
732 frag2bytes = nbytes - frag1bytes;
733 if (xdr->iov)
734 xdr->iov->iov_len += frag1bytes;
05638dc7 735 else
2825a7f9 736 xdr->buf->page_len += frag1bytes;
05638dc7 737 xdr->page_ptr++;
2825a7f9
BF
738 xdr->iov = NULL;
739 /*
740 * If the last encode didn't end exactly on a page boundary, the
741 * next one will straddle boundaries. Encode into the next
742 * page, then copy it back later in xdr_commit_encode. We use
743 * the "scratch" iov to track any temporarily unused fragment of
744 * space at the end of the previous buffer:
745 */
0ae4c3e8 746 xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes);
2825a7f9
BF
747 p = page_address(*xdr->page_ptr);
748 /*
749 * Note this is where the next encode will start after we've
750 * shifted this one back:
751 */
752 xdr->p = (void *)p + frag2bytes;
753 space_left = xdr->buf->buflen - xdr->buf->len;
754 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
755 xdr->buf->page_len += frag2bytes;
756 xdr->buf->len += nbytes;
757 return p;
5582863f
CL
758out_overflow:
759 trace_rpc_xdr_overflow(xdr, nbytes);
760 return NULL;
2825a7f9
BF
761}
762
1da177e4
LT
763/**
764 * xdr_reserve_space - Reserve buffer space for sending
765 * @xdr: pointer to xdr_stream
766 * @nbytes: number of bytes to reserve
767 *
768 * Checks that we have enough buffer space to encode 'nbytes' more
769 * bytes of data. If so, update the total xdr_buf length, and
770 * adjust the length of the current kvec.
771 */
d8ed029d 772__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
1da177e4 773{
d8ed029d
AD
774 __be32 *p = xdr->p;
775 __be32 *q;
1da177e4 776
2825a7f9 777 xdr_commit_encode(xdr);
1da177e4
LT
778 /* align nbytes on the next 32-bit boundary */
779 nbytes += 3;
780 nbytes &= ~3;
781 q = p + (nbytes >> 2);
782 if (unlikely(q > xdr->end || q < p))
2825a7f9 783 return xdr_get_next_encode_buffer(xdr, nbytes);
1da177e4 784 xdr->p = q;
2825a7f9
BF
785 if (xdr->iov)
786 xdr->iov->iov_len += nbytes;
787 else
788 xdr->buf->page_len += nbytes;
1da177e4
LT
789 xdr->buf->len += nbytes;
790 return p;
791}
468039ee 792EXPORT_SYMBOL_GPL(xdr_reserve_space);
1da177e4 793
403217f3
AS
794
795/**
796 * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
797 * @xdr: pointer to xdr_stream
798 * @vec: pointer to a kvec array
799 * @nbytes: number of bytes to reserve
800 *
801 * Reserves enough buffer space to encode 'nbytes' of data and stores the
802 * pointers in 'vec'. The size argument passed to xdr_reserve_space() is
803 * determined based on the number of bytes remaining in the current page to
804 * avoid invalidating iov_base pointers when xdr_commit_encode() is called.
805 */
806int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes)
807{
808 int thislen;
809 int v = 0;
810 __be32 *p;
811
812 /*
813 * svcrdma requires every READ payload to start somewhere
814 * in xdr->pages.
815 */
816 if (xdr->iov == xdr->buf->head) {
817 xdr->iov = NULL;
818 xdr->end = xdr->p;
819 }
820
821 while (nbytes) {
822 thislen = xdr->buf->page_len % PAGE_SIZE;
823 thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
824
825 p = xdr_reserve_space(xdr, thislen);
826 if (!p)
827 return -EIO;
828
829 vec[v].iov_base = p;
830 vec[v].iov_len = thislen;
831 v++;
832 nbytes -= thislen;
833 }
834
835 return v;
836}
837EXPORT_SYMBOL_GPL(xdr_reserve_space_vec);
838
3e19ce76
BF
839/**
840 * xdr_truncate_encode - truncate an encode buffer
841 * @xdr: pointer to xdr_stream
842 * @len: new length of buffer
843 *
844 * Truncates the xdr stream, so that xdr->buf->len == len,
845 * and xdr->p points at offset len from the start of the buffer, and
846 * head, tail, and page lengths are adjusted to correspond.
847 *
848 * If this means moving xdr->p to a different buffer, we assume that
1cc5213b 849 * the end pointer should be set to the end of the current page,
3e19ce76
BF
850 * except in the case of the head buffer when we assume the head
851 * buffer's current length represents the end of the available buffer.
852 *
853 * This is *not* safe to use on a buffer that already has inlined page
854 * cache pages (as in a zero-copy server read reply), except for the
855 * simple case of truncating from one position in the tail to another.
856 *
857 */
858void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
859{
860 struct xdr_buf *buf = xdr->buf;
861 struct kvec *head = buf->head;
862 struct kvec *tail = buf->tail;
863 int fraglen;
49a068f8 864 int new;
3e19ce76
BF
865
866 if (len > buf->len) {
867 WARN_ON_ONCE(1);
868 return;
869 }
2825a7f9 870 xdr_commit_encode(xdr);
3e19ce76
BF
871
872 fraglen = min_t(int, buf->len - len, tail->iov_len);
873 tail->iov_len -= fraglen;
874 buf->len -= fraglen;
ed38c069 875 if (tail->iov_len) {
3e19ce76 876 xdr->p = tail->iov_base + tail->iov_len;
280caac0
BF
877 WARN_ON_ONCE(!xdr->end);
878 WARN_ON_ONCE(!xdr->iov);
3e19ce76
BF
879 return;
880 }
881 WARN_ON_ONCE(fraglen);
882 fraglen = min_t(int, buf->len - len, buf->page_len);
883 buf->page_len -= fraglen;
884 buf->len -= fraglen;
885
886 new = buf->page_base + buf->page_len;
49a068f8
BF
887
888 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
3e19ce76 889
ed38c069 890 if (buf->page_len) {
3e19ce76
BF
891 xdr->p = page_address(*xdr->page_ptr);
892 xdr->end = (void *)xdr->p + PAGE_SIZE;
893 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
280caac0 894 WARN_ON_ONCE(xdr->iov);
3e19ce76
BF
895 return;
896 }
5d7a5bcb 897 if (fraglen)
3e19ce76
BF
898 xdr->end = head->iov_base + head->iov_len;
899 /* (otherwise assume xdr->end is already set) */
5d7a5bcb 900 xdr->page_ptr--;
3e19ce76
BF
901 head->iov_len = len;
902 buf->len = len;
903 xdr->p = head->iov_base + head->iov_len;
904 xdr->iov = buf->head;
905}
906EXPORT_SYMBOL(xdr_truncate_encode);
907
db3f58a9
BF
908/**
909 * xdr_restrict_buflen - decrease available buffer space
910 * @xdr: pointer to xdr_stream
911 * @newbuflen: new maximum number of bytes available
912 *
913 * Adjust our idea of how much space is available in the buffer.
914 * If we've already used too much space in the buffer, returns -1.
915 * If the available space is already smaller than newbuflen, returns 0
916 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
917 * and ensures xdr->end is set at most offset newbuflen from the start
918 * of the buffer.
919 */
920int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
921{
922 struct xdr_buf *buf = xdr->buf;
923 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
924 int end_offset = buf->len + left_in_this_buf;
925
926 if (newbuflen < 0 || newbuflen < buf->len)
927 return -1;
928 if (newbuflen > buf->buflen)
929 return 0;
930 if (newbuflen < end_offset)
931 xdr->end = (void *)xdr->end + newbuflen - end_offset;
932 buf->buflen = newbuflen;
933 return 0;
934}
935EXPORT_SYMBOL(xdr_restrict_buflen);
936
1da177e4
LT
937/**
938 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
939 * @xdr: pointer to xdr_stream
940 * @pages: list of pages
941 * @base: offset of first byte
942 * @len: length of data in bytes
943 *
944 */
945void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
946 unsigned int len)
947{
948 struct xdr_buf *buf = xdr->buf;
949 struct kvec *iov = buf->tail;
950 buf->pages = pages;
951 buf->page_base = base;
952 buf->page_len = len;
953
954 iov->iov_base = (char *)xdr->p;
955 iov->iov_len = 0;
956 xdr->iov = iov;
957
958 if (len & 3) {
959 unsigned int pad = 4 - (len & 3);
960
961 BUG_ON(xdr->p >= xdr->end);
962 iov->iov_base = (char *)xdr->p + (len & 3);
963 iov->iov_len += pad;
964 len += pad;
965 *xdr->p++ = 0;
966 }
967 buf->buflen += len;
968 buf->len += len;
969}
468039ee 970EXPORT_SYMBOL_GPL(xdr_write_pages);
1da177e4 971
6650239a 972static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
1537693c 973 unsigned int len)
6650239a
TM
974{
975 if (len > iov->iov_len)
976 len = iov->iov_len;
1537693c 977 xdr->p = (__be32*)iov->iov_base;
6650239a
TM
978 xdr->end = (__be32*)(iov->iov_base + len);
979 xdr->iov = iov;
980 xdr->page_ptr = NULL;
981}
982
983static int xdr_set_page_base(struct xdr_stream *xdr,
984 unsigned int base, unsigned int len)
985{
986 unsigned int pgnr;
987 unsigned int maxlen;
988 unsigned int pgoff;
989 unsigned int pgend;
990 void *kaddr;
991
992 maxlen = xdr->buf->page_len;
993 if (base >= maxlen)
994 return -EINVAL;
995 maxlen -= base;
996 if (len > maxlen)
997 len = maxlen;
998
999 base += xdr->buf->page_base;
1000
1001 pgnr = base >> PAGE_SHIFT;
1002 xdr->page_ptr = &xdr->buf->pages[pgnr];
1003 kaddr = page_address(*xdr->page_ptr);
1004
1005 pgoff = base & ~PAGE_MASK;
1006 xdr->p = (__be32*)(kaddr + pgoff);
1007
1008 pgend = pgoff + len;
1009 if (pgend > PAGE_SIZE)
1010 pgend = PAGE_SIZE;
1011 xdr->end = (__be32*)(kaddr + pgend);
1012 xdr->iov = NULL;
1013 return 0;
1014}
1015
f7d61ee4
AS
1016static void xdr_set_page(struct xdr_stream *xdr, unsigned int base,
1017 unsigned int len)
1018{
1019 if (xdr_set_page_base(xdr, base, len) < 0)
1020 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
1021}
1022
6650239a
TM
1023static void xdr_set_next_page(struct xdr_stream *xdr)
1024{
1025 unsigned int newbase;
1026
1027 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
1028 newbase -= xdr->buf->page_base;
1029
f7d61ee4 1030 xdr_set_page(xdr, newbase, PAGE_SIZE);
6650239a
TM
1031}
1032
1033static bool xdr_set_next_buffer(struct xdr_stream *xdr)
1034{
1035 if (xdr->page_ptr != NULL)
1036 xdr_set_next_page(xdr);
1037 else if (xdr->iov == xdr->buf->head) {
f7d61ee4 1038 xdr_set_page(xdr, 0, PAGE_SIZE);
6650239a
TM
1039 }
1040 return xdr->p != xdr->end;
1041}
1042
1da177e4
LT
1043/**
1044 * xdr_init_decode - Initialize an xdr_stream for decoding data.
1045 * @xdr: pointer to xdr_stream struct
1046 * @buf: pointer to XDR buffer from which to decode data
1047 * @p: current pointer inside XDR buffer
0ccc61b1 1048 * @rqst: pointer to controlling rpc_rqst, for debugging
1da177e4 1049 */
0ccc61b1
CL
1050void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
1051 struct rpc_rqst *rqst)
1da177e4 1052{
1da177e4 1053 xdr->buf = buf;
0ae4c3e8 1054 xdr_reset_scratch_buffer(xdr);
bfeea1dc 1055 xdr->nwords = XDR_QUADLEN(buf->len);
6650239a 1056 if (buf->head[0].iov_len != 0)
1537693c 1057 xdr_set_iov(xdr, buf->head, buf->len);
6650239a
TM
1058 else if (buf->page_len != 0)
1059 xdr_set_page_base(xdr, 0, buf->len);
06ef26a0
BC
1060 else
1061 xdr_set_iov(xdr, buf->head, buf->len);
bfeea1dc
TM
1062 if (p != NULL && p > xdr->p && xdr->end >= p) {
1063 xdr->nwords -= p - xdr->p;
1537693c 1064 xdr->p = p;
bfeea1dc 1065 }
0ccc61b1 1066 xdr->rqst = rqst;
1da177e4 1067}
468039ee 1068EXPORT_SYMBOL_GPL(xdr_init_decode);
1da177e4 1069
f7da7a12 1070/**
7ecce75f 1071 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
f7da7a12
BH
1072 * @xdr: pointer to xdr_stream struct
1073 * @buf: pointer to XDR buffer from which to decode data
1074 * @pages: list of pages to decode into
1075 * @len: length in bytes of buffer in pages
1076 */
1077void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
1078 struct page **pages, unsigned int len)
1079{
1080 memset(buf, 0, sizeof(*buf));
1081 buf->pages = pages;
1082 buf->page_len = len;
1083 buf->buflen = len;
1084 buf->len = len;
0ccc61b1 1085 xdr_init_decode(xdr, buf, NULL, NULL);
f7da7a12
BH
1086}
1087EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
1088
6650239a 1089static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
ba8e452a 1090{
bfeea1dc 1091 unsigned int nwords = XDR_QUADLEN(nbytes);
ba8e452a 1092 __be32 *p = xdr->p;
bfeea1dc 1093 __be32 *q = p + nwords;
ba8e452a 1094
bfeea1dc 1095 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
ba8e452a 1096 return NULL;
6650239a 1097 xdr->p = q;
bfeea1dc 1098 xdr->nwords -= nwords;
ba8e452a
TM
1099 return p;
1100}
ba8e452a 1101
6650239a
TM
1102static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
1103{
1104 __be32 *p;
ace0e14f 1105 char *cpdest = xdr->scratch.iov_base;
6650239a
TM
1106 size_t cplen = (char *)xdr->end - (char *)xdr->p;
1107
1108 if (nbytes > xdr->scratch.iov_len)
5582863f 1109 goto out_overflow;
ace0e14f
TM
1110 p = __xdr_inline_decode(xdr, cplen);
1111 if (p == NULL)
1112 return NULL;
1113 memcpy(cpdest, p, cplen);
5582863f
CL
1114 if (!xdr_set_next_buffer(xdr))
1115 goto out_overflow;
6650239a
TM
1116 cpdest += cplen;
1117 nbytes -= cplen;
6650239a
TM
1118 p = __xdr_inline_decode(xdr, nbytes);
1119 if (p == NULL)
1120 return NULL;
1121 memcpy(cpdest, p, nbytes);
1122 return xdr->scratch.iov_base;
5582863f
CL
1123out_overflow:
1124 trace_rpc_xdr_overflow(xdr, nbytes);
1125 return NULL;
6650239a
TM
1126}
1127
1128/**
1129 * xdr_inline_decode - Retrieve XDR data to decode
1da177e4
LT
1130 * @xdr: pointer to xdr_stream struct
1131 * @nbytes: number of bytes of data to decode
1132 *
1133 * Check if the input buffer is long enough to enable us to decode
1134 * 'nbytes' more bytes of data starting at the current position.
1135 * If so return the current pointer, then update the current
1136 * pointer position.
1137 */
d8ed029d 1138__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1da177e4 1139{
6650239a 1140 __be32 *p;
1da177e4 1141
5582863f 1142 if (unlikely(nbytes == 0))
6650239a
TM
1143 return xdr->p;
1144 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
5582863f 1145 goto out_overflow;
6650239a
TM
1146 p = __xdr_inline_decode(xdr, nbytes);
1147 if (p != NULL)
1148 return p;
1149 return xdr_copy_to_scratch(xdr, nbytes);
5582863f
CL
1150out_overflow:
1151 trace_rpc_xdr_overflow(xdr, nbytes);
1152 return NULL;
1da177e4 1153}
468039ee 1154EXPORT_SYMBOL_GPL(xdr_inline_decode);
1da177e4 1155
06216ecb 1156static void xdr_realign_pages(struct xdr_stream *xdr)
1da177e4
LT
1157{
1158 struct xdr_buf *buf = xdr->buf;
06216ecb 1159 struct kvec *iov = buf->head;
b760b313 1160 unsigned int cur = xdr_stream_pos(xdr);
7be9cea3 1161 unsigned int copied, offset;
1da177e4
LT
1162
1163 /* Realign pages to current pointer position */
a11a2bf4 1164 if (iov->iov_len > cur) {
7be9cea3
CL
1165 offset = iov->iov_len - cur;
1166 copied = xdr_shrink_bufhead(buf, offset);
1167 trace_rpc_xdr_alignment(xdr, offset, copied);
a11a2bf4
TM
1168 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1169 }
06216ecb
AS
1170}
1171
1172static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
1173{
1174 struct xdr_buf *buf = xdr->buf;
1175 unsigned int nwords = XDR_QUADLEN(len);
1176 unsigned int cur = xdr_stream_pos(xdr);
1177 unsigned int copied, offset;
1178
1179 if (xdr->nwords == 0)
1180 return 0;
1da177e4 1181
06216ecb 1182 xdr_realign_pages(xdr);
a11a2bf4
TM
1183 if (nwords > xdr->nwords) {
1184 nwords = xdr->nwords;
1185 len = nwords << 2;
1186 }
1187 if (buf->page_len <= len)
8a9a8b83 1188 len = buf->page_len;
a11a2bf4
TM
1189 else if (nwords < xdr->nwords) {
1190 /* Truncate page data and move it into the tail */
7be9cea3
CL
1191 offset = buf->page_len - len;
1192 copied = xdr_shrink_pagelen(buf, offset);
1193 trace_rpc_xdr_alignment(xdr, offset, copied);
a11a2bf4
TM
1194 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1195 }
3994ee6f
TM
1196 return len;
1197}
bd00f84b 1198
1da177e4
LT
1199/**
1200 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
1201 * @xdr: pointer to xdr_stream struct
1202 * @len: number of bytes of page data
1203 *
1204 * Moves data beyond the current pointer position from the XDR head[] buffer
1205 * into the page list. Any data that lies beyond current position + "len"
8b23ea7b 1206 * bytes is moved into the XDR tail[].
3994ee6f
TM
1207 *
1208 * Returns the number of XDR encoded bytes now contained in the pages
1da177e4 1209 */
3994ee6f 1210unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1da177e4
LT
1211{
1212 struct xdr_buf *buf = xdr->buf;
1213 struct kvec *iov;
3994ee6f 1214 unsigned int nwords;
1da177e4 1215 unsigned int end;
3994ee6f 1216 unsigned int padding;
1da177e4 1217
3994ee6f
TM
1218 len = xdr_align_pages(xdr, len);
1219 if (len == 0)
1220 return 0;
1221 nwords = XDR_QUADLEN(len);
bfeea1dc 1222 padding = (nwords << 2) - len;
1da177e4
LT
1223 xdr->iov = iov = buf->tail;
1224 /* Compute remaining message length. */
bd00f84b
TM
1225 end = ((xdr->nwords - nwords) << 2) + padding;
1226 if (end > iov->iov_len)
1227 end = iov->iov_len;
1228
1da177e4
LT
1229 /*
1230 * Position current pointer at beginning of tail, and
1231 * set remaining message length.
1232 */
d8ed029d
AD
1233 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
1234 xdr->end = (__be32 *)((char *)iov->iov_base + end);
76cacaab 1235 xdr->page_ptr = NULL;
bfeea1dc 1236 xdr->nwords = XDR_QUADLEN(end - padding);
c337d365 1237 return len;
1da177e4 1238}
468039ee 1239EXPORT_SYMBOL_GPL(xdr_read_pages);
1da177e4 1240
e6ac0acc
AS
1241uint64_t xdr_align_data(struct xdr_stream *xdr, uint64_t offset, uint32_t length)
1242{
1243 struct xdr_buf *buf = xdr->buf;
1244 unsigned int from, bytes;
1245 unsigned int shift = 0;
1246
1247 if ((offset + length) < offset ||
1248 (offset + length) > buf->page_len)
1249 length = buf->page_len - offset;
1250
1251 xdr_realign_pages(xdr);
1252 from = xdr_page_pos(xdr);
1253 bytes = xdr->nwords << 2;
1254 if (length < bytes)
1255 bytes = length;
1256
1257 /* Move page data to the left */
1258 if (from > offset) {
1259 shift = min_t(unsigned int, bytes, buf->page_len - from);
1260 _shift_data_left_pages(buf->pages,
1261 buf->page_base + offset,
1262 buf->page_base + from,
1263 shift);
1264 bytes -= shift;
1265
1266 /* Move tail data into the pages, if necessary */
1267 if (bytes > 0)
1268 _shift_data_left_tail(buf, offset + shift, bytes);
1269 }
1270
1271 xdr->nwords -= XDR_QUADLEN(length);
1272 xdr_set_page(xdr, from + length, PAGE_SIZE);
1273 return length;
1274}
1275EXPORT_SYMBOL_GPL(xdr_align_data);
1276
84ce182a
AS
1277uint64_t xdr_expand_hole(struct xdr_stream *xdr, uint64_t offset, uint64_t length)
1278{
1279 struct xdr_buf *buf = xdr->buf;
1280 unsigned int bytes;
1281 unsigned int from;
1282 unsigned int truncated = 0;
1283
1284 if ((offset + length) < offset ||
1285 (offset + length) > buf->page_len)
1286 length = buf->page_len - offset;
1287
1288 xdr_realign_pages(xdr);
1289 from = xdr_page_pos(xdr);
1290 bytes = xdr->nwords << 2;
1291
1292 if (offset + length + bytes > buf->page_len) {
1293 unsigned int shift = (offset + length + bytes) - buf->page_len;
1294 unsigned int res = _shift_data_right_tail(buf, from + bytes - shift, shift);
1295 truncated = shift - res;
1296 xdr->nwords -= XDR_QUADLEN(truncated);
1297 bytes -= shift;
1298 }
1299
1300 /* Now move the page data over and zero pages */
1301 if (bytes > 0)
1302 _shift_data_right_pages(buf->pages,
1303 buf->page_base + offset + length,
1304 buf->page_base + from,
1305 bytes);
1306 _zero_pages(buf->pages, buf->page_base + offset, length);
1307
1308 buf->len += length - (from - offset) - truncated;
1309 xdr_set_page(xdr, offset + length, PAGE_SIZE);
1310 return length;
1311}
1312EXPORT_SYMBOL_GPL(xdr_expand_hole);
1313
8b23ea7b
TM
1314/**
1315 * xdr_enter_page - decode data from the XDR page
1316 * @xdr: pointer to xdr_stream struct
1317 * @len: number of bytes of page data
1318 *
1319 * Moves data beyond the current pointer position from the XDR head[] buffer
1320 * into the page list. Any data that lies beyond current position + "len"
1321 * bytes is moved into the XDR tail[]. The current pointer is then
1322 * repositioned at the beginning of the first XDR page.
1323 */
1324void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1325{
f8bb7f08 1326 len = xdr_align_pages(xdr, len);
8b23ea7b
TM
1327 /*
1328 * Position current pointer at beginning of tail, and
1329 * set remaining message length.
1330 */
f8bb7f08
TM
1331 if (len != 0)
1332 xdr_set_page_base(xdr, 0, len);
8b23ea7b 1333}
468039ee 1334EXPORT_SYMBOL_GPL(xdr_enter_page);
8b23ea7b 1335
c2bd2c0a 1336static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1da177e4
LT
1337
1338void
1339xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
1340{
1341 buf->head[0] = *iov;
1342 buf->tail[0] = empty_iov;
1343 buf->page_len = 0;
1344 buf->buflen = buf->len = iov->iov_len;
1345}
468039ee 1346EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1da177e4 1347
de4aee2e
BF
1348/**
1349 * xdr_buf_subsegment - set subbuf to a portion of buf
1350 * @buf: an xdr buffer
1351 * @subbuf: the result buffer
1352 * @base: beginning of range in bytes
1353 * @len: length of range in bytes
1354 *
1355 * sets @subbuf to an xdr buffer representing the portion of @buf of
1356 * length @len starting at offset @base.
1357 *
1358 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1359 *
1360 * Returns -1 if base of length are out of bounds.
1361 */
5a7e7026
CL
1362int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf,
1363 unsigned int base, unsigned int len)
1da177e4 1364{
1da177e4 1365 subbuf->buflen = subbuf->len = len;
1e78957e
TM
1366 if (base < buf->head[0].iov_len) {
1367 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1368 subbuf->head[0].iov_len = min_t(unsigned int, len,
1369 buf->head[0].iov_len - base);
1370 len -= subbuf->head[0].iov_len;
1371 base = 0;
1372 } else {
1e78957e 1373 base -= buf->head[0].iov_len;
89a3c9f5 1374 subbuf->head[0].iov_base = buf->head[0].iov_base;
de4aee2e 1375 subbuf->head[0].iov_len = 0;
1e78957e 1376 }
1da177e4
LT
1377
1378 if (base < buf->page_len) {
1e78957e
TM
1379 subbuf->page_len = min(buf->page_len - base, len);
1380 base += buf->page_base;
09cbfeaf
KS
1381 subbuf->page_base = base & ~PAGE_MASK;
1382 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1da177e4
LT
1383 len -= subbuf->page_len;
1384 base = 0;
1385 } else {
1386 base -= buf->page_len;
89a3c9f5
CL
1387 subbuf->pages = buf->pages;
1388 subbuf->page_base = 0;
1da177e4
LT
1389 subbuf->page_len = 0;
1390 }
1391
1e78957e
TM
1392 if (base < buf->tail[0].iov_len) {
1393 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1394 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1395 buf->tail[0].iov_len - base);
1396 len -= subbuf->tail[0].iov_len;
1397 base = 0;
1398 } else {
1e78957e 1399 base -= buf->tail[0].iov_len;
89a3c9f5 1400 subbuf->tail[0].iov_base = buf->tail[0].iov_base;
de4aee2e 1401 subbuf->tail[0].iov_len = 0;
1e78957e
TM
1402 }
1403
1da177e4
LT
1404 if (base || len)
1405 return -1;
1406 return 0;
1407}
468039ee 1408EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1da177e4 1409
0a8e7b7d
CL
1410/**
1411 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1412 * @buf: buf to be trimmed
1413 * @len: number of bytes to reduce "buf" by
1414 *
1415 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1416 * that it's possible that we'll trim less than that amount if the xdr_buf is
1417 * too small, or if (for instance) it's all in the head and the parser has
1418 * already read too far into it.
1419 */
1420void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1421{
1422 size_t cur;
1423 unsigned int trim = len;
1424
1425 if (buf->tail[0].iov_len) {
1426 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1427 buf->tail[0].iov_len -= cur;
1428 trim -= cur;
1429 if (!trim)
1430 goto fix_len;
1431 }
1432
1433 if (buf->page_len) {
1434 cur = min_t(unsigned int, buf->page_len, trim);
1435 buf->page_len -= cur;
1436 trim -= cur;
1437 if (!trim)
1438 goto fix_len;
1439 }
1440
1441 if (buf->head[0].iov_len) {
1442 cur = min_t(size_t, buf->head[0].iov_len, trim);
1443 buf->head[0].iov_len -= cur;
1444 trim -= cur;
1445 }
1446fix_len:
1447 buf->len -= (len - trim);
1448}
1449EXPORT_SYMBOL_GPL(xdr_buf_trim);
1450
4e3e43ad 1451static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1da177e4 1452{
1e78957e 1453 unsigned int this_len;
1da177e4 1454
4e3e43ad
TM
1455 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1456 memcpy(obj, subbuf->head[0].iov_base, this_len);
1da177e4
LT
1457 len -= this_len;
1458 obj += this_len;
4e3e43ad 1459 this_len = min_t(unsigned int, len, subbuf->page_len);
1da177e4 1460 if (this_len)
4e3e43ad 1461 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1da177e4
LT
1462 len -= this_len;
1463 obj += this_len;
4e3e43ad
TM
1464 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1465 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1da177e4
LT
1466}
1467
bd8100e7 1468/* obj is assumed to point to allocated memory of size at least len: */
4e3e43ad 1469int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
bd8100e7
AG
1470{
1471 struct xdr_buf subbuf;
bd8100e7
AG
1472 int status;
1473
1474 status = xdr_buf_subsegment(buf, &subbuf, base, len);
4e3e43ad
TM
1475 if (status != 0)
1476 return status;
1477 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1478 return 0;
1479}
468039ee 1480EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
4e3e43ad
TM
1481
1482static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1483{
1484 unsigned int this_len;
1485
1486 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1487 memcpy(subbuf->head[0].iov_base, obj, this_len);
bd8100e7
AG
1488 len -= this_len;
1489 obj += this_len;
4e3e43ad 1490 this_len = min_t(unsigned int, len, subbuf->page_len);
bd8100e7 1491 if (this_len)
4e3e43ad 1492 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
bd8100e7
AG
1493 len -= this_len;
1494 obj += this_len;
4e3e43ad
TM
1495 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1496 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1497}
1498
1499/* obj is assumed to point to allocated memory of size at least len: */
1500int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1501{
1502 struct xdr_buf subbuf;
1503 int status;
1504
1505 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1506 if (status != 0)
1507 return status;
1508 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1509 return 0;
bd8100e7 1510}
c43abaed 1511EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
bd8100e7
AG
1512
1513int
1e78957e 1514xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1da177e4 1515{
d8ed029d 1516 __be32 raw;
1da177e4
LT
1517 int status;
1518
1519 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1520 if (status)
1521 return status;
98866b5a 1522 *obj = be32_to_cpu(raw);
1da177e4
LT
1523 return 0;
1524}
468039ee 1525EXPORT_SYMBOL_GPL(xdr_decode_word);
1da177e4 1526
bd8100e7 1527int
1e78957e 1528xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
bd8100e7 1529{
9f162d2a 1530 __be32 raw = cpu_to_be32(obj);
bd8100e7
AG
1531
1532 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1533}
468039ee 1534EXPORT_SYMBOL_GPL(xdr_encode_word);
bd8100e7 1535
bd8100e7
AG
1536/* Returns 0 on success, or else a negative error code. */
1537static int
1538xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1539 struct xdr_array2_desc *desc, int encode)
1540{
1541 char *elem = NULL, *c;
1542 unsigned int copied = 0, todo, avail_here;
1543 struct page **ppages = NULL;
1544 int err;
1545
1546 if (encode) {
1547 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1548 return -EINVAL;
1549 } else {
1550 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
58fcb8df 1551 desc->array_len > desc->array_maxlen ||
bd8100e7
AG
1552 (unsigned long) base + 4 + desc->array_len *
1553 desc->elem_size > buf->len)
1554 return -EINVAL;
1555 }
1556 base += 4;
1557
1558 if (!desc->xcode)
1559 return 0;
1560
1561 todo = desc->array_len * desc->elem_size;
1562
1563 /* process head */
1564 if (todo && base < buf->head->iov_len) {
1565 c = buf->head->iov_base + base;
1566 avail_here = min_t(unsigned int, todo,
1567 buf->head->iov_len - base);
1568 todo -= avail_here;
1569
1570 while (avail_here >= desc->elem_size) {
1571 err = desc->xcode(desc, c);
1572 if (err)
1573 goto out;
1574 c += desc->elem_size;
1575 avail_here -= desc->elem_size;
1576 }
1577 if (avail_here) {
1578 if (!elem) {
1579 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1580 err = -ENOMEM;
1581 if (!elem)
1582 goto out;
1583 }
1584 if (encode) {
1585 err = desc->xcode(desc, elem);
1586 if (err)
1587 goto out;
1588 memcpy(c, elem, avail_here);
1589 } else
1590 memcpy(elem, c, avail_here);
1591 copied = avail_here;
1592 }
1593 base = buf->head->iov_len; /* align to start of pages */
1594 }
1595
1596 /* process pages array */
1597 base -= buf->head->iov_len;
1598 if (todo && base < buf->page_len) {
1599 unsigned int avail_page;
1600
1601 avail_here = min(todo, buf->page_len - base);
1602 todo -= avail_here;
1603
1604 base += buf->page_base;
09cbfeaf
KS
1605 ppages = buf->pages + (base >> PAGE_SHIFT);
1606 base &= ~PAGE_MASK;
1607 avail_page = min_t(unsigned int, PAGE_SIZE - base,
bd8100e7
AG
1608 avail_here);
1609 c = kmap(*ppages) + base;
1610
1611 while (avail_here) {
1612 avail_here -= avail_page;
1613 if (copied || avail_page < desc->elem_size) {
1614 unsigned int l = min(avail_page,
1615 desc->elem_size - copied);
1616 if (!elem) {
1617 elem = kmalloc(desc->elem_size,
1618 GFP_KERNEL);
1619 err = -ENOMEM;
1620 if (!elem)
1621 goto out;
1622 }
1623 if (encode) {
1624 if (!copied) {
1625 err = desc->xcode(desc, elem);
1626 if (err)
1627 goto out;
1628 }
1629 memcpy(c, elem + copied, l);
1630 copied += l;
1631 if (copied == desc->elem_size)
1632 copied = 0;
1633 } else {
1634 memcpy(elem + copied, c, l);
1635 copied += l;
1636 if (copied == desc->elem_size) {
1637 err = desc->xcode(desc, elem);
1638 if (err)
1639 goto out;
1640 copied = 0;
1641 }
1642 }
1643 avail_page -= l;
1644 c += l;
1645 }
1646 while (avail_page >= desc->elem_size) {
1647 err = desc->xcode(desc, c);
1648 if (err)
1649 goto out;
1650 c += desc->elem_size;
1651 avail_page -= desc->elem_size;
1652 }
1653 if (avail_page) {
1654 unsigned int l = min(avail_page,
1655 desc->elem_size - copied);
1656 if (!elem) {
1657 elem = kmalloc(desc->elem_size,
1658 GFP_KERNEL);
1659 err = -ENOMEM;
1660 if (!elem)
1661 goto out;
1662 }
1663 if (encode) {
1664 if (!copied) {
1665 err = desc->xcode(desc, elem);
1666 if (err)
1667 goto out;
1668 }
1669 memcpy(c, elem + copied, l);
1670 copied += l;
1671 if (copied == desc->elem_size)
1672 copied = 0;
1673 } else {
1674 memcpy(elem + copied, c, l);
1675 copied += l;
1676 if (copied == desc->elem_size) {
1677 err = desc->xcode(desc, elem);
1678 if (err)
1679 goto out;
1680 copied = 0;
1681 }
1682 }
1683 }
1684 if (avail_here) {
1685 kunmap(*ppages);
1686 ppages++;
1687 c = kmap(*ppages);
1688 }
1689
1690 avail_page = min(avail_here,
09cbfeaf 1691 (unsigned int) PAGE_SIZE);
bd8100e7
AG
1692 }
1693 base = buf->page_len; /* align to start of tail */
1694 }
1695
1696 /* process tail */
1697 base -= buf->page_len;
1698 if (todo) {
1699 c = buf->tail->iov_base + base;
1700 if (copied) {
1701 unsigned int l = desc->elem_size - copied;
1702
1703 if (encode)
1704 memcpy(c, elem + copied, l);
1705 else {
1706 memcpy(elem + copied, c, l);
1707 err = desc->xcode(desc, elem);
1708 if (err)
1709 goto out;
1710 }
1711 todo -= l;
1712 c += l;
1713 }
1714 while (todo) {
1715 err = desc->xcode(desc, c);
1716 if (err)
1717 goto out;
1718 c += desc->elem_size;
1719 todo -= desc->elem_size;
1720 }
1721 }
1722 err = 0;
1723
1724out:
a51482bd 1725 kfree(elem);
bd8100e7
AG
1726 if (ppages)
1727 kunmap(*ppages);
1728 return err;
1729}
1730
1731int
1732xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1733 struct xdr_array2_desc *desc)
1734{
1735 if (base >= buf->len)
1736 return -EINVAL;
1737
1738 return xdr_xcode_array2(buf, base, desc, 0);
1739}
468039ee 1740EXPORT_SYMBOL_GPL(xdr_decode_array2);
bd8100e7
AG
1741
1742int
1743xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1744 struct xdr_array2_desc *desc)
1745{
1746 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1747 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1748 return -EINVAL;
1749
1750 return xdr_xcode_array2(buf, base, desc, 1);
1751}
468039ee 1752EXPORT_SYMBOL_GPL(xdr_encode_array2);
37a4e6cb
OK
1753
1754int
1755xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
cca5172a 1756 int (*actor)(struct scatterlist *, void *), void *data)
37a4e6cb
OK
1757{
1758 int i, ret = 0;
95c96174 1759 unsigned int page_len, thislen, page_offset;
37a4e6cb
OK
1760 struct scatterlist sg[1];
1761
68e3f5dd
HX
1762 sg_init_table(sg, 1);
1763
37a4e6cb
OK
1764 if (offset >= buf->head[0].iov_len) {
1765 offset -= buf->head[0].iov_len;
1766 } else {
1767 thislen = buf->head[0].iov_len - offset;
1768 if (thislen > len)
1769 thislen = len;
1770 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1771 ret = actor(sg, data);
1772 if (ret)
1773 goto out;
1774 offset = 0;
1775 len -= thislen;
1776 }
1777 if (len == 0)
1778 goto out;
1779
1780 if (offset >= buf->page_len) {
1781 offset -= buf->page_len;
1782 } else {
1783 page_len = buf->page_len - offset;
1784 if (page_len > len)
1785 page_len = len;
1786 len -= page_len;
09cbfeaf
KS
1787 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1788 i = (offset + buf->page_base) >> PAGE_SHIFT;
1789 thislen = PAGE_SIZE - page_offset;
37a4e6cb
OK
1790 do {
1791 if (thislen > page_len)
1792 thislen = page_len;
642f1490 1793 sg_set_page(sg, buf->pages[i], thislen, page_offset);
37a4e6cb
OK
1794 ret = actor(sg, data);
1795 if (ret)
1796 goto out;
1797 page_len -= thislen;
1798 i++;
1799 page_offset = 0;
09cbfeaf 1800 thislen = PAGE_SIZE;
37a4e6cb
OK
1801 } while (page_len != 0);
1802 offset = 0;
1803 }
1804 if (len == 0)
1805 goto out;
1806 if (offset < buf->tail[0].iov_len) {
1807 thislen = buf->tail[0].iov_len - offset;
1808 if (thislen > len)
1809 thislen = len;
1810 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1811 ret = actor(sg, data);
1812 len -= thislen;
1813 }
1814 if (len != 0)
1815 ret = -EINVAL;
1816out:
1817 return ret;
1818}
468039ee 1819EXPORT_SYMBOL_GPL(xdr_process_buf);
37a4e6cb 1820
0e779aa7
TM
1821/**
1822 * xdr_stream_decode_opaque - Decode variable length opaque
1823 * @xdr: pointer to xdr_stream
1824 * @ptr: location to store opaque data
1825 * @size: size of storage buffer @ptr
1826 *
1827 * Return values:
1828 * On success, returns size of object stored in *@ptr
1829 * %-EBADMSG on XDR buffer overflow
1830 * %-EMSGSIZE on overflow of storage buffer @ptr
1831 */
1832ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
1833{
1834 ssize_t ret;
1835 void *p;
1836
1837 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1838 if (ret <= 0)
1839 return ret;
1840 memcpy(ptr, p, ret);
1841 return ret;
1842}
1843EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
1844
1845/**
1846 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
1847 * @xdr: pointer to xdr_stream
1848 * @ptr: location to store pointer to opaque data
1849 * @maxlen: maximum acceptable object size
1850 * @gfp_flags: GFP mask to use
1851 *
1852 * Return values:
1853 * On success, returns size of object stored in *@ptr
1854 * %-EBADMSG on XDR buffer overflow
1855 * %-EMSGSIZE if the size of the object would exceed @maxlen
1856 * %-ENOMEM on memory allocation failure
1857 */
1858ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
1859 size_t maxlen, gfp_t gfp_flags)
1860{
1861 ssize_t ret;
1862 void *p;
1863
1864 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1865 if (ret > 0) {
1866 *ptr = kmemdup(p, ret, gfp_flags);
1867 if (*ptr != NULL)
1868 return ret;
1869 ret = -ENOMEM;
1870 }
1871 *ptr = NULL;
1872 return ret;
1873}
1874EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
1875
1876/**
1877 * xdr_stream_decode_string - Decode variable length string
1878 * @xdr: pointer to xdr_stream
1879 * @str: location to store string
1880 * @size: size of storage buffer @str
1881 *
1882 * Return values:
1883 * On success, returns length of NUL-terminated string stored in *@str
1884 * %-EBADMSG on XDR buffer overflow
1885 * %-EMSGSIZE on overflow of storage buffer @str
1886 */
1887ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
1888{
1889 ssize_t ret;
1890 void *p;
1891
1892 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1893 if (ret > 0) {
1894 memcpy(str, p, ret);
1895 str[ret] = '\0';
1896 return strlen(str);
1897 }
1898 *str = '\0';
1899 return ret;
1900}
1901EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
1902
5c741d4f
TM
1903/**
1904 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
1905 * @xdr: pointer to xdr_stream
1906 * @str: location to store pointer to string
1907 * @maxlen: maximum acceptable string length
1908 * @gfp_flags: GFP mask to use
1909 *
1910 * Return values:
1911 * On success, returns length of NUL-terminated string stored in *@ptr
1912 * %-EBADMSG on XDR buffer overflow
1913 * %-EMSGSIZE if the size of the string would exceed @maxlen
1914 * %-ENOMEM on memory allocation failure
1915 */
1916ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
1917 size_t maxlen, gfp_t gfp_flags)
1918{
1919 void *p;
1920 ssize_t ret;
1921
1922 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1923 if (ret > 0) {
1924 char *s = kmalloc(ret + 1, gfp_flags);
1925 if (s != NULL) {
1926 memcpy(s, p, ret);
1927 s[ret] = '\0';
1928 *str = s;
1929 return strlen(s);
1930 }
1931 ret = -ENOMEM;
1932 }
1933 *str = NULL;
1934 return ret;
1935}
1936EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);