SUNRPC: Simplify the end-of-buffer calculation in xdr_read_pages
[linux-2.6-block.git] / net / sunrpc / xdr.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/xdr.c
3 *
4 * Generic XDR support.
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
8
a246b010 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
1da177e4 11#include <linux/types.h>
1da177e4
LT
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/pagemap.h>
15#include <linux/errno.h>
1da177e4
LT
16#include <linux/sunrpc/xdr.h>
17#include <linux/sunrpc/msg_prot.h>
18
19/*
20 * XDR functions for basic NFS types
21 */
d8ed029d
AD
22__be32 *
23xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
1da177e4
LT
24{
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
26
27 p[quadlen] = 0; /* zero trailing bytes */
9f162d2a 28 *p++ = cpu_to_be32(obj->len);
1da177e4
LT
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
31}
468039ee 32EXPORT_SYMBOL_GPL(xdr_encode_netobj);
1da177e4 33
d8ed029d
AD
34__be32 *
35xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
1da177e4
LT
36{
37 unsigned int len;
38
98866b5a 39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
1da177e4
LT
40 return NULL;
41 obj->len = len;
42 obj->data = (u8 *) p;
43 return p + XDR_QUADLEN(len);
44}
468039ee 45EXPORT_SYMBOL_GPL(xdr_decode_netobj);
1da177e4
LT
46
47/**
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
4dc3b16b
PP
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
1da177e4
LT
52 *
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
57 *
58 * Returns the updated current XDR buffer position
59 *
60 */
d8ed029d 61__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4
LT
62{
63 if (likely(nbytes != 0)) {
64 unsigned int quadlen = XDR_QUADLEN(nbytes);
65 unsigned int padding = (quadlen << 2) - nbytes;
66
67 if (ptr != NULL)
68 memcpy(p, ptr, nbytes);
69 if (padding != 0)
70 memset((char *)p + nbytes, 0, padding);
71 p += quadlen;
72 }
73 return p;
74}
468039ee 75EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
1da177e4
LT
76
77/**
78 * xdr_encode_opaque - Encode variable length opaque data
4dc3b16b
PP
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
1da177e4
LT
82 *
83 * Returns the updated current XDR buffer position
84 */
d8ed029d 85__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4 86{
9f162d2a 87 *p++ = cpu_to_be32(nbytes);
1da177e4
LT
88 return xdr_encode_opaque_fixed(p, ptr, nbytes);
89}
468039ee 90EXPORT_SYMBOL_GPL(xdr_encode_opaque);
1da177e4 91
d8ed029d
AD
92__be32 *
93xdr_encode_string(__be32 *p, const char *string)
1da177e4
LT
94{
95 return xdr_encode_array(p, string, strlen(string));
96}
468039ee 97EXPORT_SYMBOL_GPL(xdr_encode_string);
1da177e4 98
d8ed029d 99__be32 *
e5cff482
CL
100xdr_decode_string_inplace(__be32 *p, char **sp,
101 unsigned int *lenp, unsigned int maxlen)
1da177e4 102{
e5cff482 103 u32 len;
1da177e4 104
98866b5a 105 len = be32_to_cpu(*p++);
e5cff482 106 if (len > maxlen)
1da177e4
LT
107 return NULL;
108 *lenp = len;
109 *sp = (char *) p;
110 return p + XDR_QUADLEN(len);
111}
468039ee 112EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
1da177e4 113
b4687da7
CL
114/**
115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
116 * @buf: XDR buffer where string resides
117 * @len: length of string, in bytes
118 *
119 */
120void
121xdr_terminate_string(struct xdr_buf *buf, const u32 len)
122{
123 char *kaddr;
124
b8541786 125 kaddr = kmap_atomic(buf->pages[0]);
b4687da7 126 kaddr[buf->page_base + len] = '\0';
b8541786 127 kunmap_atomic(kaddr);
b4687da7 128}
0d961aa9 129EXPORT_SYMBOL_GPL(xdr_terminate_string);
b4687da7 130
1da177e4
LT
131void
132xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
133 unsigned int len)
134{
135 struct kvec *tail = xdr->tail;
136 u32 *p;
137
138 xdr->pages = pages;
139 xdr->page_base = base;
140 xdr->page_len = len;
141
142 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
143 tail->iov_base = p;
144 tail->iov_len = 0;
145
146 if (len & 3) {
147 unsigned int pad = 4 - (len & 3);
148
149 *p = 0;
150 tail->iov_base = (char *)p + (len & 3);
151 tail->iov_len = pad;
152 len += pad;
153 }
154 xdr->buflen += len;
155 xdr->len += len;
156}
468039ee 157EXPORT_SYMBOL_GPL(xdr_encode_pages);
1da177e4
LT
158
159void
160xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
161 struct page **pages, unsigned int base, unsigned int len)
162{
163 struct kvec *head = xdr->head;
164 struct kvec *tail = xdr->tail;
165 char *buf = (char *)head->iov_base;
166 unsigned int buflen = head->iov_len;
167
168 head->iov_len = offset;
169
170 xdr->pages = pages;
171 xdr->page_base = base;
172 xdr->page_len = len;
173
174 tail->iov_base = buf + offset;
175 tail->iov_len = buflen - offset;
176
177 xdr->buflen += len;
178}
468039ee 179EXPORT_SYMBOL_GPL(xdr_inline_pages);
1da177e4 180
1da177e4
LT
181/*
182 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
183 *
184 * _shift_data_right_pages
185 * @pages: vector of pages containing both the source and dest memory area.
186 * @pgto_base: page vector address of destination
187 * @pgfrom_base: page vector address of source
188 * @len: number of bytes to copy
189 *
190 * Note: the addresses pgto_base and pgfrom_base are both calculated in
191 * the same way:
192 * if a memory area starts at byte 'base' in page 'pages[i]',
193 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
194 * Also note: pgfrom_base must be < pgto_base, but the memory areas
195 * they point to may overlap.
196 */
197static void
198_shift_data_right_pages(struct page **pages, size_t pgto_base,
199 size_t pgfrom_base, size_t len)
200{
201 struct page **pgfrom, **pgto;
202 char *vfrom, *vto;
203 size_t copy;
204
205 BUG_ON(pgto_base <= pgfrom_base);
206
207 pgto_base += len;
208 pgfrom_base += len;
209
210 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
211 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
212
213 pgto_base &= ~PAGE_CACHE_MASK;
214 pgfrom_base &= ~PAGE_CACHE_MASK;
215
216 do {
217 /* Are any pointers crossing a page boundary? */
218 if (pgto_base == 0) {
1da177e4
LT
219 pgto_base = PAGE_CACHE_SIZE;
220 pgto--;
221 }
222 if (pgfrom_base == 0) {
223 pgfrom_base = PAGE_CACHE_SIZE;
224 pgfrom--;
225 }
226
227 copy = len;
228 if (copy > pgto_base)
229 copy = pgto_base;
230 if (copy > pgfrom_base)
231 copy = pgfrom_base;
232 pgto_base -= copy;
233 pgfrom_base -= copy;
234
b8541786
CW
235 vto = kmap_atomic(*pgto);
236 vfrom = kmap_atomic(*pgfrom);
1da177e4 237 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
bce3481c 238 flush_dcache_page(*pgto);
b8541786
CW
239 kunmap_atomic(vfrom);
240 kunmap_atomic(vto);
1da177e4
LT
241
242 } while ((len -= copy) != 0);
1da177e4
LT
243}
244
245/*
246 * _copy_to_pages
247 * @pages: array of pages
248 * @pgbase: page vector address of destination
249 * @p: pointer to source data
250 * @len: length
251 *
252 * Copies data from an arbitrary memory location into an array of pages
253 * The copy is assumed to be non-overlapping.
254 */
255static void
256_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
257{
258 struct page **pgto;
259 char *vto;
260 size_t copy;
261
262 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
263 pgbase &= ~PAGE_CACHE_MASK;
264
daeba89d 265 for (;;) {
1da177e4
LT
266 copy = PAGE_CACHE_SIZE - pgbase;
267 if (copy > len)
268 copy = len;
269
b8541786 270 vto = kmap_atomic(*pgto);
1da177e4 271 memcpy(vto + pgbase, p, copy);
b8541786 272 kunmap_atomic(vto);
1da177e4 273
daeba89d
TM
274 len -= copy;
275 if (len == 0)
276 break;
277
1da177e4
LT
278 pgbase += copy;
279 if (pgbase == PAGE_CACHE_SIZE) {
280 flush_dcache_page(*pgto);
281 pgbase = 0;
282 pgto++;
283 }
284 p += copy;
daeba89d 285 }
1da177e4
LT
286 flush_dcache_page(*pgto);
287}
288
289/*
290 * _copy_from_pages
291 * @p: pointer to destination
292 * @pages: array of pages
293 * @pgbase: offset of source data
294 * @len: length
295 *
296 * Copies data into an arbitrary memory location from an array of pages
297 * The copy is assumed to be non-overlapping.
298 */
bf118a34 299void
1da177e4
LT
300_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
301{
302 struct page **pgfrom;
303 char *vfrom;
304 size_t copy;
305
306 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
307 pgbase &= ~PAGE_CACHE_MASK;
308
309 do {
310 copy = PAGE_CACHE_SIZE - pgbase;
311 if (copy > len)
312 copy = len;
313
b8541786 314 vfrom = kmap_atomic(*pgfrom);
1da177e4 315 memcpy(p, vfrom + pgbase, copy);
b8541786 316 kunmap_atomic(vfrom);
1da177e4
LT
317
318 pgbase += copy;
319 if (pgbase == PAGE_CACHE_SIZE) {
320 pgbase = 0;
321 pgfrom++;
322 }
323 p += copy;
324
325 } while ((len -= copy) != 0);
326}
bf118a34 327EXPORT_SYMBOL_GPL(_copy_from_pages);
1da177e4
LT
328
329/*
330 * xdr_shrink_bufhead
331 * @buf: xdr_buf
332 * @len: bytes to remove from buf->head[0]
333 *
cca5172a 334 * Shrinks XDR buffer's header kvec buf->head[0] by
1da177e4
LT
335 * 'len' bytes. The extra data is not lost, but is instead
336 * moved into the inlined pages and/or the tail.
337 */
338static void
339xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
340{
341 struct kvec *head, *tail;
342 size_t copy, offs;
343 unsigned int pglen = buf->page_len;
344
345 tail = buf->tail;
346 head = buf->head;
347 BUG_ON (len > head->iov_len);
348
349 /* Shift the tail first */
350 if (tail->iov_len != 0) {
351 if (tail->iov_len > len) {
352 copy = tail->iov_len - len;
353 memmove((char *)tail->iov_base + len,
354 tail->iov_base, copy);
355 }
356 /* Copy from the inlined pages into the tail */
357 copy = len;
358 if (copy > pglen)
359 copy = pglen;
360 offs = len - copy;
361 if (offs >= tail->iov_len)
362 copy = 0;
363 else if (copy > tail->iov_len - offs)
364 copy = tail->iov_len - offs;
365 if (copy != 0)
366 _copy_from_pages((char *)tail->iov_base + offs,
367 buf->pages,
368 buf->page_base + pglen + offs - len,
369 copy);
370 /* Do we also need to copy data from the head into the tail ? */
371 if (len > pglen) {
372 offs = copy = len - pglen;
373 if (copy > tail->iov_len)
374 copy = tail->iov_len;
375 memcpy(tail->iov_base,
376 (char *)head->iov_base +
377 head->iov_len - offs,
378 copy);
379 }
380 }
381 /* Now handle pages */
382 if (pglen != 0) {
383 if (pglen > len)
384 _shift_data_right_pages(buf->pages,
385 buf->page_base + len,
386 buf->page_base,
387 pglen - len);
388 copy = len;
389 if (len > pglen)
390 copy = pglen;
391 _copy_to_pages(buf->pages, buf->page_base,
392 (char *)head->iov_base + head->iov_len - len,
393 copy);
394 }
395 head->iov_len -= len;
396 buf->buflen -= len;
397 /* Have we truncated the message? */
398 if (buf->len > buf->buflen)
399 buf->len = buf->buflen;
400}
401
402/*
403 * xdr_shrink_pagelen
404 * @buf: xdr_buf
405 * @len: bytes to remove from buf->pages
406 *
cca5172a 407 * Shrinks XDR buffer's page array buf->pages by
1da177e4
LT
408 * 'len' bytes. The extra data is not lost, but is instead
409 * moved into the tail.
410 */
411static void
412xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
413{
414 struct kvec *tail;
415 size_t copy;
1da177e4 416 unsigned int pglen = buf->page_len;
cf187c2d 417 unsigned int tailbuf_len;
1da177e4
LT
418
419 tail = buf->tail;
420 BUG_ON (len > pglen);
421
cf187c2d
TM
422 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
423
1da177e4 424 /* Shift the tail first */
cf187c2d
TM
425 if (tailbuf_len != 0) {
426 unsigned int free_space = tailbuf_len - tail->iov_len;
427
428 if (len < free_space)
429 free_space = len;
430 tail->iov_len += free_space;
431
42d6d8ab 432 copy = len;
1da177e4 433 if (tail->iov_len > len) {
0fe62a35 434 char *p = (char *)tail->iov_base + len;
2e29ebb8 435 memmove(p, tail->iov_base, tail->iov_len - len);
42d6d8ab 436 } else
1da177e4 437 copy = tail->iov_len;
42d6d8ab 438 /* Copy from the inlined pages into the tail */
1da177e4
LT
439 _copy_from_pages((char *)tail->iov_base,
440 buf->pages, buf->page_base + pglen - len,
441 copy);
442 }
443 buf->page_len -= len;
444 buf->buflen -= len;
445 /* Have we truncated the message? */
446 if (buf->len > buf->buflen)
447 buf->len = buf->buflen;
448}
449
450void
451xdr_shift_buf(struct xdr_buf *buf, size_t len)
452{
453 xdr_shrink_bufhead(buf, len);
454}
468039ee 455EXPORT_SYMBOL_GPL(xdr_shift_buf);
1da177e4 456
4517d526
TM
457/**
458 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
459 * @xdr: pointer to struct xdr_stream
460 */
461unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
462{
463 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
464}
465EXPORT_SYMBOL_GPL(xdr_stream_pos);
466
1da177e4
LT
467/**
468 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
469 * @xdr: pointer to xdr_stream struct
470 * @buf: pointer to XDR buffer in which to encode data
471 * @p: current pointer inside XDR buffer
472 *
473 * Note: at the moment the RPC client only passes the length of our
474 * scratch buffer in the xdr_buf's header kvec. Previously this
475 * meant we needed to call xdr_adjust_iovec() after encoding the
476 * data. With the new scheme, the xdr_stream manages the details
477 * of the buffer length, and takes care of adjusting the kvec
478 * length for us.
479 */
d8ed029d 480void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
1da177e4
LT
481{
482 struct kvec *iov = buf->head;
334ccfd5 483 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
1da177e4 484
334ccfd5 485 BUG_ON(scratch_len < 0);
1da177e4
LT
486 xdr->buf = buf;
487 xdr->iov = iov;
d8ed029d
AD
488 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
489 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
334ccfd5
TM
490 BUG_ON(iov->iov_len > scratch_len);
491
492 if (p != xdr->p && p != NULL) {
493 size_t len;
494
495 BUG_ON(p < xdr->p || p > xdr->end);
496 len = (char *)p - (char *)xdr->p;
497 xdr->p = p;
498 buf->len += len;
499 iov->iov_len += len;
500 }
1da177e4 501}
468039ee 502EXPORT_SYMBOL_GPL(xdr_init_encode);
1da177e4
LT
503
504/**
505 * xdr_reserve_space - Reserve buffer space for sending
506 * @xdr: pointer to xdr_stream
507 * @nbytes: number of bytes to reserve
508 *
509 * Checks that we have enough buffer space to encode 'nbytes' more
510 * bytes of data. If so, update the total xdr_buf length, and
511 * adjust the length of the current kvec.
512 */
d8ed029d 513__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
1da177e4 514{
d8ed029d
AD
515 __be32 *p = xdr->p;
516 __be32 *q;
1da177e4
LT
517
518 /* align nbytes on the next 32-bit boundary */
519 nbytes += 3;
520 nbytes &= ~3;
521 q = p + (nbytes >> 2);
522 if (unlikely(q > xdr->end || q < p))
523 return NULL;
524 xdr->p = q;
525 xdr->iov->iov_len += nbytes;
526 xdr->buf->len += nbytes;
527 return p;
528}
468039ee 529EXPORT_SYMBOL_GPL(xdr_reserve_space);
1da177e4
LT
530
531/**
532 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
533 * @xdr: pointer to xdr_stream
534 * @pages: list of pages
535 * @base: offset of first byte
536 * @len: length of data in bytes
537 *
538 */
539void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
540 unsigned int len)
541{
542 struct xdr_buf *buf = xdr->buf;
543 struct kvec *iov = buf->tail;
544 buf->pages = pages;
545 buf->page_base = base;
546 buf->page_len = len;
547
548 iov->iov_base = (char *)xdr->p;
549 iov->iov_len = 0;
550 xdr->iov = iov;
551
552 if (len & 3) {
553 unsigned int pad = 4 - (len & 3);
554
555 BUG_ON(xdr->p >= xdr->end);
556 iov->iov_base = (char *)xdr->p + (len & 3);
557 iov->iov_len += pad;
558 len += pad;
559 *xdr->p++ = 0;
560 }
561 buf->buflen += len;
562 buf->len += len;
563}
468039ee 564EXPORT_SYMBOL_GPL(xdr_write_pages);
1da177e4 565
6650239a 566static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
1537693c 567 unsigned int len)
6650239a
TM
568{
569 if (len > iov->iov_len)
570 len = iov->iov_len;
1537693c 571 xdr->p = (__be32*)iov->iov_base;
6650239a
TM
572 xdr->end = (__be32*)(iov->iov_base + len);
573 xdr->iov = iov;
574 xdr->page_ptr = NULL;
575}
576
577static int xdr_set_page_base(struct xdr_stream *xdr,
578 unsigned int base, unsigned int len)
579{
580 unsigned int pgnr;
581 unsigned int maxlen;
582 unsigned int pgoff;
583 unsigned int pgend;
584 void *kaddr;
585
586 maxlen = xdr->buf->page_len;
587 if (base >= maxlen)
588 return -EINVAL;
589 maxlen -= base;
590 if (len > maxlen)
591 len = maxlen;
592
593 base += xdr->buf->page_base;
594
595 pgnr = base >> PAGE_SHIFT;
596 xdr->page_ptr = &xdr->buf->pages[pgnr];
597 kaddr = page_address(*xdr->page_ptr);
598
599 pgoff = base & ~PAGE_MASK;
600 xdr->p = (__be32*)(kaddr + pgoff);
601
602 pgend = pgoff + len;
603 if (pgend > PAGE_SIZE)
604 pgend = PAGE_SIZE;
605 xdr->end = (__be32*)(kaddr + pgend);
606 xdr->iov = NULL;
607 return 0;
608}
609
610static void xdr_set_next_page(struct xdr_stream *xdr)
611{
612 unsigned int newbase;
613
614 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
615 newbase -= xdr->buf->page_base;
616
617 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
1537693c 618 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
6650239a
TM
619}
620
621static bool xdr_set_next_buffer(struct xdr_stream *xdr)
622{
623 if (xdr->page_ptr != NULL)
624 xdr_set_next_page(xdr);
625 else if (xdr->iov == xdr->buf->head) {
626 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
1537693c 627 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
6650239a
TM
628 }
629 return xdr->p != xdr->end;
630}
631
1da177e4
LT
632/**
633 * xdr_init_decode - Initialize an xdr_stream for decoding data.
634 * @xdr: pointer to xdr_stream struct
635 * @buf: pointer to XDR buffer from which to decode data
636 * @p: current pointer inside XDR buffer
637 */
d8ed029d 638void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
1da177e4 639{
1da177e4 640 xdr->buf = buf;
6650239a
TM
641 xdr->scratch.iov_base = NULL;
642 xdr->scratch.iov_len = 0;
bfeea1dc 643 xdr->nwords = XDR_QUADLEN(buf->len);
6650239a 644 if (buf->head[0].iov_len != 0)
1537693c 645 xdr_set_iov(xdr, buf->head, buf->len);
6650239a
TM
646 else if (buf->page_len != 0)
647 xdr_set_page_base(xdr, 0, buf->len);
bfeea1dc
TM
648 if (p != NULL && p > xdr->p && xdr->end >= p) {
649 xdr->nwords -= p - xdr->p;
1537693c 650 xdr->p = p;
bfeea1dc 651 }
1da177e4 652}
468039ee 653EXPORT_SYMBOL_GPL(xdr_init_decode);
1da177e4 654
f7da7a12
BH
655/**
656 * xdr_init_decode - Initialize an xdr_stream for decoding data.
657 * @xdr: pointer to xdr_stream struct
658 * @buf: pointer to XDR buffer from which to decode data
659 * @pages: list of pages to decode into
660 * @len: length in bytes of buffer in pages
661 */
662void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
663 struct page **pages, unsigned int len)
664{
665 memset(buf, 0, sizeof(*buf));
666 buf->pages = pages;
667 buf->page_len = len;
668 buf->buflen = len;
669 buf->len = len;
670 xdr_init_decode(xdr, buf, NULL);
671}
672EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
673
6650239a 674static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
ba8e452a 675{
bfeea1dc 676 unsigned int nwords = XDR_QUADLEN(nbytes);
ba8e452a 677 __be32 *p = xdr->p;
bfeea1dc 678 __be32 *q = p + nwords;
ba8e452a 679
bfeea1dc 680 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
ba8e452a 681 return NULL;
6650239a 682 xdr->p = q;
bfeea1dc 683 xdr->nwords -= nwords;
ba8e452a
TM
684 return p;
685}
ba8e452a 686
1da177e4 687/**
6650239a
TM
688 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
689 * @xdr: pointer to xdr_stream struct
690 * @buf: pointer to an empty buffer
691 * @buflen: size of 'buf'
692 *
693 * The scratch buffer is used when decoding from an array of pages.
694 * If an xdr_inline_decode() call spans across page boundaries, then
695 * we copy the data into the scratch buffer in order to allow linear
696 * access.
697 */
698void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
699{
700 xdr->scratch.iov_base = buf;
701 xdr->scratch.iov_len = buflen;
702}
703EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
704
705static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
706{
707 __be32 *p;
708 void *cpdest = xdr->scratch.iov_base;
709 size_t cplen = (char *)xdr->end - (char *)xdr->p;
710
711 if (nbytes > xdr->scratch.iov_len)
712 return NULL;
713 memcpy(cpdest, xdr->p, cplen);
714 cpdest += cplen;
715 nbytes -= cplen;
716 if (!xdr_set_next_buffer(xdr))
717 return NULL;
718 p = __xdr_inline_decode(xdr, nbytes);
719 if (p == NULL)
720 return NULL;
721 memcpy(cpdest, p, nbytes);
722 return xdr->scratch.iov_base;
723}
724
725/**
726 * xdr_inline_decode - Retrieve XDR data to decode
1da177e4
LT
727 * @xdr: pointer to xdr_stream struct
728 * @nbytes: number of bytes of data to decode
729 *
730 * Check if the input buffer is long enough to enable us to decode
731 * 'nbytes' more bytes of data starting at the current position.
732 * If so return the current pointer, then update the current
733 * pointer position.
734 */
d8ed029d 735__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1da177e4 736{
6650239a 737 __be32 *p;
1da177e4 738
6650239a
TM
739 if (nbytes == 0)
740 return xdr->p;
741 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1da177e4 742 return NULL;
6650239a
TM
743 p = __xdr_inline_decode(xdr, nbytes);
744 if (p != NULL)
745 return p;
746 return xdr_copy_to_scratch(xdr, nbytes);
1da177e4 747}
468039ee 748EXPORT_SYMBOL_GPL(xdr_inline_decode);
1da177e4
LT
749
750/**
751 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
752 * @xdr: pointer to xdr_stream struct
753 * @len: number of bytes of page data
754 *
755 * Moves data beyond the current pointer position from the XDR head[] buffer
756 * into the page list. Any data that lies beyond current position + "len"
8b23ea7b 757 * bytes is moved into the XDR tail[].
c337d365
TM
758 *
759 * Returns the number of XDR encoded bytes now contained in the pages
1da177e4 760 */
c337d365 761unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1da177e4
LT
762{
763 struct xdr_buf *buf = xdr->buf;
764 struct kvec *iov;
bfeea1dc 765 unsigned int nwords = XDR_QUADLEN(len);
b760b313 766 unsigned int cur = xdr_stream_pos(xdr);
1da177e4 767 unsigned int end;
bd00f84b 768 unsigned int padding;
1da177e4 769
bfeea1dc 770 if (xdr->nwords == 0)
c337d365 771 return 0;
bfeea1dc
TM
772 if (nwords > xdr->nwords) {
773 nwords = xdr->nwords;
774 len = nwords << 2;
775 }
1da177e4
LT
776 /* Realign pages to current pointer position */
777 iov = buf->head;
b760b313
TM
778 if (iov->iov_len > cur)
779 xdr_shrink_bufhead(buf, iov->iov_len - cur);
1da177e4
LT
780
781 /* Truncate page data and move it into the tail */
782 if (buf->page_len > len)
783 xdr_shrink_pagelen(buf, buf->page_len - len);
bd00f84b
TM
784 xdr->nwords = XDR_QUADLEN(buf->len - cur);
785
bfeea1dc 786 padding = (nwords << 2) - len;
1da177e4
LT
787 xdr->iov = iov = buf->tail;
788 /* Compute remaining message length. */
bd00f84b
TM
789 end = ((xdr->nwords - nwords) << 2) + padding;
790 if (end > iov->iov_len)
791 end = iov->iov_len;
792
1da177e4
LT
793 /*
794 * Position current pointer at beginning of tail, and
795 * set remaining message length.
796 */
d8ed029d
AD
797 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
798 xdr->end = (__be32 *)((char *)iov->iov_base + end);
76cacaab 799 xdr->page_ptr = NULL;
bfeea1dc 800 xdr->nwords = XDR_QUADLEN(end - padding);
c337d365 801 return len;
1da177e4 802}
468039ee 803EXPORT_SYMBOL_GPL(xdr_read_pages);
1da177e4 804
8b23ea7b
TM
805/**
806 * xdr_enter_page - decode data from the XDR page
807 * @xdr: pointer to xdr_stream struct
808 * @len: number of bytes of page data
809 *
810 * Moves data beyond the current pointer position from the XDR head[] buffer
811 * into the page list. Any data that lies beyond current position + "len"
812 * bytes is moved into the XDR tail[]. The current pointer is then
813 * repositioned at the beginning of the first XDR page.
814 */
815void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
816{
c337d365 817 len = xdr_read_pages(xdr, len);
8b23ea7b
TM
818 /*
819 * Position current pointer at beginning of tail, and
820 * set remaining message length.
821 */
6650239a 822 xdr_set_page_base(xdr, 0, len);
bfeea1dc 823 xdr->nwords += XDR_QUADLEN(xdr->buf->page_len);
8b23ea7b 824}
468039ee 825EXPORT_SYMBOL_GPL(xdr_enter_page);
8b23ea7b 826
1da177e4
LT
827static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
828
829void
830xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
831{
832 buf->head[0] = *iov;
833 buf->tail[0] = empty_iov;
834 buf->page_len = 0;
835 buf->buflen = buf->len = iov->iov_len;
836}
468039ee 837EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1da177e4 838
1da177e4
LT
839/* Sets subbuf to the portion of buf of length len beginning base bytes
840 * from the start of buf. Returns -1 if base of length are out of bounds. */
841int
842xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1e78957e 843 unsigned int base, unsigned int len)
1da177e4 844{
1da177e4 845 subbuf->buflen = subbuf->len = len;
1e78957e
TM
846 if (base < buf->head[0].iov_len) {
847 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
848 subbuf->head[0].iov_len = min_t(unsigned int, len,
849 buf->head[0].iov_len - base);
850 len -= subbuf->head[0].iov_len;
851 base = 0;
852 } else {
853 subbuf->head[0].iov_base = NULL;
854 subbuf->head[0].iov_len = 0;
855 base -= buf->head[0].iov_len;
856 }
1da177e4
LT
857
858 if (base < buf->page_len) {
1e78957e
TM
859 subbuf->page_len = min(buf->page_len - base, len);
860 base += buf->page_base;
861 subbuf->page_base = base & ~PAGE_CACHE_MASK;
862 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
1da177e4
LT
863 len -= subbuf->page_len;
864 base = 0;
865 } else {
866 base -= buf->page_len;
867 subbuf->page_len = 0;
868 }
869
1e78957e
TM
870 if (base < buf->tail[0].iov_len) {
871 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
872 subbuf->tail[0].iov_len = min_t(unsigned int, len,
873 buf->tail[0].iov_len - base);
874 len -= subbuf->tail[0].iov_len;
875 base = 0;
876 } else {
877 subbuf->tail[0].iov_base = NULL;
878 subbuf->tail[0].iov_len = 0;
879 base -= buf->tail[0].iov_len;
880 }
881
1da177e4
LT
882 if (base || len)
883 return -1;
884 return 0;
885}
468039ee 886EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1da177e4 887
4e3e43ad 888static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1da177e4 889{
1e78957e 890 unsigned int this_len;
1da177e4 891
4e3e43ad
TM
892 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
893 memcpy(obj, subbuf->head[0].iov_base, this_len);
1da177e4
LT
894 len -= this_len;
895 obj += this_len;
4e3e43ad 896 this_len = min_t(unsigned int, len, subbuf->page_len);
1da177e4 897 if (this_len)
4e3e43ad 898 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1da177e4
LT
899 len -= this_len;
900 obj += this_len;
4e3e43ad
TM
901 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
902 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1da177e4
LT
903}
904
bd8100e7 905/* obj is assumed to point to allocated memory of size at least len: */
4e3e43ad 906int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
bd8100e7
AG
907{
908 struct xdr_buf subbuf;
bd8100e7
AG
909 int status;
910
911 status = xdr_buf_subsegment(buf, &subbuf, base, len);
4e3e43ad
TM
912 if (status != 0)
913 return status;
914 __read_bytes_from_xdr_buf(&subbuf, obj, len);
915 return 0;
916}
468039ee 917EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
4e3e43ad
TM
918
919static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
920{
921 unsigned int this_len;
922
923 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
924 memcpy(subbuf->head[0].iov_base, obj, this_len);
bd8100e7
AG
925 len -= this_len;
926 obj += this_len;
4e3e43ad 927 this_len = min_t(unsigned int, len, subbuf->page_len);
bd8100e7 928 if (this_len)
4e3e43ad 929 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
bd8100e7
AG
930 len -= this_len;
931 obj += this_len;
4e3e43ad
TM
932 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
933 memcpy(subbuf->tail[0].iov_base, obj, this_len);
934}
935
936/* obj is assumed to point to allocated memory of size at least len: */
937int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
938{
939 struct xdr_buf subbuf;
940 int status;
941
942 status = xdr_buf_subsegment(buf, &subbuf, base, len);
943 if (status != 0)
944 return status;
945 __write_bytes_to_xdr_buf(&subbuf, obj, len);
946 return 0;
bd8100e7 947}
c43abaed 948EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
bd8100e7
AG
949
950int
1e78957e 951xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1da177e4 952{
d8ed029d 953 __be32 raw;
1da177e4
LT
954 int status;
955
956 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
957 if (status)
958 return status;
98866b5a 959 *obj = be32_to_cpu(raw);
1da177e4
LT
960 return 0;
961}
468039ee 962EXPORT_SYMBOL_GPL(xdr_decode_word);
1da177e4 963
bd8100e7 964int
1e78957e 965xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
bd8100e7 966{
9f162d2a 967 __be32 raw = cpu_to_be32(obj);
bd8100e7
AG
968
969 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
970}
468039ee 971EXPORT_SYMBOL_GPL(xdr_encode_word);
bd8100e7 972
1da177e4
LT
973/* If the netobj starting offset bytes from the start of xdr_buf is contained
974 * entirely in the head or the tail, set object to point to it; otherwise
975 * try to find space for it at the end of the tail, copy it there, and
976 * set obj to point to it. */
bee57c99 977int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
1da177e4 978{
bee57c99 979 struct xdr_buf subbuf;
1da177e4 980
bd8100e7 981 if (xdr_decode_word(buf, offset, &obj->len))
bee57c99
TM
982 return -EFAULT;
983 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
984 return -EFAULT;
985
986 /* Is the obj contained entirely in the head? */
987 obj->data = subbuf.head[0].iov_base;
988 if (subbuf.head[0].iov_len == obj->len)
989 return 0;
990 /* ..or is the obj contained entirely in the tail? */
991 obj->data = subbuf.tail[0].iov_base;
992 if (subbuf.tail[0].iov_len == obj->len)
993 return 0;
994
995 /* use end of tail as storage for obj:
996 * (We don't copy to the beginning because then we'd have
997 * to worry about doing a potentially overlapping copy.
998 * This assumes the object is at most half the length of the
999 * tail.) */
1000 if (obj->len > buf->buflen - buf->len)
1001 return -ENOMEM;
1002 if (buf->tail[0].iov_len != 0)
1003 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1004 else
1005 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
1006 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
1da177e4 1007 return 0;
1da177e4 1008}
468039ee 1009EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
bd8100e7
AG
1010
1011/* Returns 0 on success, or else a negative error code. */
1012static int
1013xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1014 struct xdr_array2_desc *desc, int encode)
1015{
1016 char *elem = NULL, *c;
1017 unsigned int copied = 0, todo, avail_here;
1018 struct page **ppages = NULL;
1019 int err;
1020
1021 if (encode) {
1022 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1023 return -EINVAL;
1024 } else {
1025 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
58fcb8df 1026 desc->array_len > desc->array_maxlen ||
bd8100e7
AG
1027 (unsigned long) base + 4 + desc->array_len *
1028 desc->elem_size > buf->len)
1029 return -EINVAL;
1030 }
1031 base += 4;
1032
1033 if (!desc->xcode)
1034 return 0;
1035
1036 todo = desc->array_len * desc->elem_size;
1037
1038 /* process head */
1039 if (todo && base < buf->head->iov_len) {
1040 c = buf->head->iov_base + base;
1041 avail_here = min_t(unsigned int, todo,
1042 buf->head->iov_len - base);
1043 todo -= avail_here;
1044
1045 while (avail_here >= desc->elem_size) {
1046 err = desc->xcode(desc, c);
1047 if (err)
1048 goto out;
1049 c += desc->elem_size;
1050 avail_here -= desc->elem_size;
1051 }
1052 if (avail_here) {
1053 if (!elem) {
1054 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1055 err = -ENOMEM;
1056 if (!elem)
1057 goto out;
1058 }
1059 if (encode) {
1060 err = desc->xcode(desc, elem);
1061 if (err)
1062 goto out;
1063 memcpy(c, elem, avail_here);
1064 } else
1065 memcpy(elem, c, avail_here);
1066 copied = avail_here;
1067 }
1068 base = buf->head->iov_len; /* align to start of pages */
1069 }
1070
1071 /* process pages array */
1072 base -= buf->head->iov_len;
1073 if (todo && base < buf->page_len) {
1074 unsigned int avail_page;
1075
1076 avail_here = min(todo, buf->page_len - base);
1077 todo -= avail_here;
1078
1079 base += buf->page_base;
1080 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
1081 base &= ~PAGE_CACHE_MASK;
1082 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
1083 avail_here);
1084 c = kmap(*ppages) + base;
1085
1086 while (avail_here) {
1087 avail_here -= avail_page;
1088 if (copied || avail_page < desc->elem_size) {
1089 unsigned int l = min(avail_page,
1090 desc->elem_size - copied);
1091 if (!elem) {
1092 elem = kmalloc(desc->elem_size,
1093 GFP_KERNEL);
1094 err = -ENOMEM;
1095 if (!elem)
1096 goto out;
1097 }
1098 if (encode) {
1099 if (!copied) {
1100 err = desc->xcode(desc, elem);
1101 if (err)
1102 goto out;
1103 }
1104 memcpy(c, elem + copied, l);
1105 copied += l;
1106 if (copied == desc->elem_size)
1107 copied = 0;
1108 } else {
1109 memcpy(elem + copied, c, l);
1110 copied += l;
1111 if (copied == desc->elem_size) {
1112 err = desc->xcode(desc, elem);
1113 if (err)
1114 goto out;
1115 copied = 0;
1116 }
1117 }
1118 avail_page -= l;
1119 c += l;
1120 }
1121 while (avail_page >= desc->elem_size) {
1122 err = desc->xcode(desc, c);
1123 if (err)
1124 goto out;
1125 c += desc->elem_size;
1126 avail_page -= desc->elem_size;
1127 }
1128 if (avail_page) {
1129 unsigned int l = min(avail_page,
1130 desc->elem_size - copied);
1131 if (!elem) {
1132 elem = kmalloc(desc->elem_size,
1133 GFP_KERNEL);
1134 err = -ENOMEM;
1135 if (!elem)
1136 goto out;
1137 }
1138 if (encode) {
1139 if (!copied) {
1140 err = desc->xcode(desc, elem);
1141 if (err)
1142 goto out;
1143 }
1144 memcpy(c, elem + copied, l);
1145 copied += l;
1146 if (copied == desc->elem_size)
1147 copied = 0;
1148 } else {
1149 memcpy(elem + copied, c, l);
1150 copied += l;
1151 if (copied == desc->elem_size) {
1152 err = desc->xcode(desc, elem);
1153 if (err)
1154 goto out;
1155 copied = 0;
1156 }
1157 }
1158 }
1159 if (avail_here) {
1160 kunmap(*ppages);
1161 ppages++;
1162 c = kmap(*ppages);
1163 }
1164
1165 avail_page = min(avail_here,
1166 (unsigned int) PAGE_CACHE_SIZE);
1167 }
1168 base = buf->page_len; /* align to start of tail */
1169 }
1170
1171 /* process tail */
1172 base -= buf->page_len;
1173 if (todo) {
1174 c = buf->tail->iov_base + base;
1175 if (copied) {
1176 unsigned int l = desc->elem_size - copied;
1177
1178 if (encode)
1179 memcpy(c, elem + copied, l);
1180 else {
1181 memcpy(elem + copied, c, l);
1182 err = desc->xcode(desc, elem);
1183 if (err)
1184 goto out;
1185 }
1186 todo -= l;
1187 c += l;
1188 }
1189 while (todo) {
1190 err = desc->xcode(desc, c);
1191 if (err)
1192 goto out;
1193 c += desc->elem_size;
1194 todo -= desc->elem_size;
1195 }
1196 }
1197 err = 0;
1198
1199out:
a51482bd 1200 kfree(elem);
bd8100e7
AG
1201 if (ppages)
1202 kunmap(*ppages);
1203 return err;
1204}
1205
1206int
1207xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1208 struct xdr_array2_desc *desc)
1209{
1210 if (base >= buf->len)
1211 return -EINVAL;
1212
1213 return xdr_xcode_array2(buf, base, desc, 0);
1214}
468039ee 1215EXPORT_SYMBOL_GPL(xdr_decode_array2);
bd8100e7
AG
1216
1217int
1218xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1219 struct xdr_array2_desc *desc)
1220{
1221 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1222 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1223 return -EINVAL;
1224
1225 return xdr_xcode_array2(buf, base, desc, 1);
1226}
468039ee 1227EXPORT_SYMBOL_GPL(xdr_encode_array2);
37a4e6cb
OK
1228
1229int
1230xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
cca5172a 1231 int (*actor)(struct scatterlist *, void *), void *data)
37a4e6cb
OK
1232{
1233 int i, ret = 0;
95c96174 1234 unsigned int page_len, thislen, page_offset;
37a4e6cb
OK
1235 struct scatterlist sg[1];
1236
68e3f5dd
HX
1237 sg_init_table(sg, 1);
1238
37a4e6cb
OK
1239 if (offset >= buf->head[0].iov_len) {
1240 offset -= buf->head[0].iov_len;
1241 } else {
1242 thislen = buf->head[0].iov_len - offset;
1243 if (thislen > len)
1244 thislen = len;
1245 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1246 ret = actor(sg, data);
1247 if (ret)
1248 goto out;
1249 offset = 0;
1250 len -= thislen;
1251 }
1252 if (len == 0)
1253 goto out;
1254
1255 if (offset >= buf->page_len) {
1256 offset -= buf->page_len;
1257 } else {
1258 page_len = buf->page_len - offset;
1259 if (page_len > len)
1260 page_len = len;
1261 len -= page_len;
1262 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1263 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1264 thislen = PAGE_CACHE_SIZE - page_offset;
1265 do {
1266 if (thislen > page_len)
1267 thislen = page_len;
642f1490 1268 sg_set_page(sg, buf->pages[i], thislen, page_offset);
37a4e6cb
OK
1269 ret = actor(sg, data);
1270 if (ret)
1271 goto out;
1272 page_len -= thislen;
1273 i++;
1274 page_offset = 0;
1275 thislen = PAGE_CACHE_SIZE;
1276 } while (page_len != 0);
1277 offset = 0;
1278 }
1279 if (len == 0)
1280 goto out;
1281 if (offset < buf->tail[0].iov_len) {
1282 thislen = buf->tail[0].iov_len - offset;
1283 if (thislen > len)
1284 thislen = len;
1285 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1286 ret = actor(sg, data);
1287 len -= thislen;
1288 }
1289 if (len != 0)
1290 ret = -EINVAL;
1291out:
1292 return ret;
1293}
468039ee 1294EXPORT_SYMBOL_GPL(xdr_process_buf);
37a4e6cb 1295