SUNRPC: Remove open coded stream position calculation in xdr_read_pages
[linux-2.6-block.git] / net / sunrpc / xdr.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/xdr.c
3 *
4 * Generic XDR support.
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
8
a246b010 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
1da177e4 11#include <linux/types.h>
1da177e4
LT
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/pagemap.h>
15#include <linux/errno.h>
1da177e4
LT
16#include <linux/sunrpc/xdr.h>
17#include <linux/sunrpc/msg_prot.h>
18
19/*
20 * XDR functions for basic NFS types
21 */
d8ed029d
AD
22__be32 *
23xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
1da177e4
LT
24{
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
26
27 p[quadlen] = 0; /* zero trailing bytes */
9f162d2a 28 *p++ = cpu_to_be32(obj->len);
1da177e4
LT
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
31}
468039ee 32EXPORT_SYMBOL_GPL(xdr_encode_netobj);
1da177e4 33
d8ed029d
AD
34__be32 *
35xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
1da177e4
LT
36{
37 unsigned int len;
38
98866b5a 39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
1da177e4
LT
40 return NULL;
41 obj->len = len;
42 obj->data = (u8 *) p;
43 return p + XDR_QUADLEN(len);
44}
468039ee 45EXPORT_SYMBOL_GPL(xdr_decode_netobj);
1da177e4
LT
46
47/**
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
4dc3b16b
PP
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
1da177e4
LT
52 *
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
57 *
58 * Returns the updated current XDR buffer position
59 *
60 */
d8ed029d 61__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4
LT
62{
63 if (likely(nbytes != 0)) {
64 unsigned int quadlen = XDR_QUADLEN(nbytes);
65 unsigned int padding = (quadlen << 2) - nbytes;
66
67 if (ptr != NULL)
68 memcpy(p, ptr, nbytes);
69 if (padding != 0)
70 memset((char *)p + nbytes, 0, padding);
71 p += quadlen;
72 }
73 return p;
74}
468039ee 75EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
1da177e4
LT
76
77/**
78 * xdr_encode_opaque - Encode variable length opaque data
4dc3b16b
PP
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
1da177e4
LT
82 *
83 * Returns the updated current XDR buffer position
84 */
d8ed029d 85__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4 86{
9f162d2a 87 *p++ = cpu_to_be32(nbytes);
1da177e4
LT
88 return xdr_encode_opaque_fixed(p, ptr, nbytes);
89}
468039ee 90EXPORT_SYMBOL_GPL(xdr_encode_opaque);
1da177e4 91
d8ed029d
AD
92__be32 *
93xdr_encode_string(__be32 *p, const char *string)
1da177e4
LT
94{
95 return xdr_encode_array(p, string, strlen(string));
96}
468039ee 97EXPORT_SYMBOL_GPL(xdr_encode_string);
1da177e4 98
d8ed029d 99__be32 *
e5cff482
CL
100xdr_decode_string_inplace(__be32 *p, char **sp,
101 unsigned int *lenp, unsigned int maxlen)
1da177e4 102{
e5cff482 103 u32 len;
1da177e4 104
98866b5a 105 len = be32_to_cpu(*p++);
e5cff482 106 if (len > maxlen)
1da177e4
LT
107 return NULL;
108 *lenp = len;
109 *sp = (char *) p;
110 return p + XDR_QUADLEN(len);
111}
468039ee 112EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
1da177e4 113
b4687da7
CL
114/**
115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
116 * @buf: XDR buffer where string resides
117 * @len: length of string, in bytes
118 *
119 */
120void
121xdr_terminate_string(struct xdr_buf *buf, const u32 len)
122{
123 char *kaddr;
124
b8541786 125 kaddr = kmap_atomic(buf->pages[0]);
b4687da7 126 kaddr[buf->page_base + len] = '\0';
b8541786 127 kunmap_atomic(kaddr);
b4687da7 128}
0d961aa9 129EXPORT_SYMBOL_GPL(xdr_terminate_string);
b4687da7 130
1da177e4
LT
131void
132xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
133 unsigned int len)
134{
135 struct kvec *tail = xdr->tail;
136 u32 *p;
137
138 xdr->pages = pages;
139 xdr->page_base = base;
140 xdr->page_len = len;
141
142 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
143 tail->iov_base = p;
144 tail->iov_len = 0;
145
146 if (len & 3) {
147 unsigned int pad = 4 - (len & 3);
148
149 *p = 0;
150 tail->iov_base = (char *)p + (len & 3);
151 tail->iov_len = pad;
152 len += pad;
153 }
154 xdr->buflen += len;
155 xdr->len += len;
156}
468039ee 157EXPORT_SYMBOL_GPL(xdr_encode_pages);
1da177e4
LT
158
159void
160xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
161 struct page **pages, unsigned int base, unsigned int len)
162{
163 struct kvec *head = xdr->head;
164 struct kvec *tail = xdr->tail;
165 char *buf = (char *)head->iov_base;
166 unsigned int buflen = head->iov_len;
167
168 head->iov_len = offset;
169
170 xdr->pages = pages;
171 xdr->page_base = base;
172 xdr->page_len = len;
173
174 tail->iov_base = buf + offset;
175 tail->iov_len = buflen - offset;
176
177 xdr->buflen += len;
178}
468039ee 179EXPORT_SYMBOL_GPL(xdr_inline_pages);
1da177e4 180
1da177e4
LT
181/*
182 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
183 *
184 * _shift_data_right_pages
185 * @pages: vector of pages containing both the source and dest memory area.
186 * @pgto_base: page vector address of destination
187 * @pgfrom_base: page vector address of source
188 * @len: number of bytes to copy
189 *
190 * Note: the addresses pgto_base and pgfrom_base are both calculated in
191 * the same way:
192 * if a memory area starts at byte 'base' in page 'pages[i]',
193 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
194 * Also note: pgfrom_base must be < pgto_base, but the memory areas
195 * they point to may overlap.
196 */
197static void
198_shift_data_right_pages(struct page **pages, size_t pgto_base,
199 size_t pgfrom_base, size_t len)
200{
201 struct page **pgfrom, **pgto;
202 char *vfrom, *vto;
203 size_t copy;
204
205 BUG_ON(pgto_base <= pgfrom_base);
206
207 pgto_base += len;
208 pgfrom_base += len;
209
210 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
211 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
212
213 pgto_base &= ~PAGE_CACHE_MASK;
214 pgfrom_base &= ~PAGE_CACHE_MASK;
215
216 do {
217 /* Are any pointers crossing a page boundary? */
218 if (pgto_base == 0) {
1da177e4
LT
219 pgto_base = PAGE_CACHE_SIZE;
220 pgto--;
221 }
222 if (pgfrom_base == 0) {
223 pgfrom_base = PAGE_CACHE_SIZE;
224 pgfrom--;
225 }
226
227 copy = len;
228 if (copy > pgto_base)
229 copy = pgto_base;
230 if (copy > pgfrom_base)
231 copy = pgfrom_base;
232 pgto_base -= copy;
233 pgfrom_base -= copy;
234
b8541786
CW
235 vto = kmap_atomic(*pgto);
236 vfrom = kmap_atomic(*pgfrom);
1da177e4 237 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
bce3481c 238 flush_dcache_page(*pgto);
b8541786
CW
239 kunmap_atomic(vfrom);
240 kunmap_atomic(vto);
1da177e4
LT
241
242 } while ((len -= copy) != 0);
1da177e4
LT
243}
244
245/*
246 * _copy_to_pages
247 * @pages: array of pages
248 * @pgbase: page vector address of destination
249 * @p: pointer to source data
250 * @len: length
251 *
252 * Copies data from an arbitrary memory location into an array of pages
253 * The copy is assumed to be non-overlapping.
254 */
255static void
256_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
257{
258 struct page **pgto;
259 char *vto;
260 size_t copy;
261
262 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
263 pgbase &= ~PAGE_CACHE_MASK;
264
daeba89d 265 for (;;) {
1da177e4
LT
266 copy = PAGE_CACHE_SIZE - pgbase;
267 if (copy > len)
268 copy = len;
269
b8541786 270 vto = kmap_atomic(*pgto);
1da177e4 271 memcpy(vto + pgbase, p, copy);
b8541786 272 kunmap_atomic(vto);
1da177e4 273
daeba89d
TM
274 len -= copy;
275 if (len == 0)
276 break;
277
1da177e4
LT
278 pgbase += copy;
279 if (pgbase == PAGE_CACHE_SIZE) {
280 flush_dcache_page(*pgto);
281 pgbase = 0;
282 pgto++;
283 }
284 p += copy;
daeba89d 285 }
1da177e4
LT
286 flush_dcache_page(*pgto);
287}
288
289/*
290 * _copy_from_pages
291 * @p: pointer to destination
292 * @pages: array of pages
293 * @pgbase: offset of source data
294 * @len: length
295 *
296 * Copies data into an arbitrary memory location from an array of pages
297 * The copy is assumed to be non-overlapping.
298 */
bf118a34 299void
1da177e4
LT
300_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
301{
302 struct page **pgfrom;
303 char *vfrom;
304 size_t copy;
305
306 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
307 pgbase &= ~PAGE_CACHE_MASK;
308
309 do {
310 copy = PAGE_CACHE_SIZE - pgbase;
311 if (copy > len)
312 copy = len;
313
b8541786 314 vfrom = kmap_atomic(*pgfrom);
1da177e4 315 memcpy(p, vfrom + pgbase, copy);
b8541786 316 kunmap_atomic(vfrom);
1da177e4
LT
317
318 pgbase += copy;
319 if (pgbase == PAGE_CACHE_SIZE) {
320 pgbase = 0;
321 pgfrom++;
322 }
323 p += copy;
324
325 } while ((len -= copy) != 0);
326}
bf118a34 327EXPORT_SYMBOL_GPL(_copy_from_pages);
1da177e4
LT
328
329/*
330 * xdr_shrink_bufhead
331 * @buf: xdr_buf
332 * @len: bytes to remove from buf->head[0]
333 *
cca5172a 334 * Shrinks XDR buffer's header kvec buf->head[0] by
1da177e4
LT
335 * 'len' bytes. The extra data is not lost, but is instead
336 * moved into the inlined pages and/or the tail.
337 */
338static void
339xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
340{
341 struct kvec *head, *tail;
342 size_t copy, offs;
343 unsigned int pglen = buf->page_len;
344
345 tail = buf->tail;
346 head = buf->head;
347 BUG_ON (len > head->iov_len);
348
349 /* Shift the tail first */
350 if (tail->iov_len != 0) {
351 if (tail->iov_len > len) {
352 copy = tail->iov_len - len;
353 memmove((char *)tail->iov_base + len,
354 tail->iov_base, copy);
355 }
356 /* Copy from the inlined pages into the tail */
357 copy = len;
358 if (copy > pglen)
359 copy = pglen;
360 offs = len - copy;
361 if (offs >= tail->iov_len)
362 copy = 0;
363 else if (copy > tail->iov_len - offs)
364 copy = tail->iov_len - offs;
365 if (copy != 0)
366 _copy_from_pages((char *)tail->iov_base + offs,
367 buf->pages,
368 buf->page_base + pglen + offs - len,
369 copy);
370 /* Do we also need to copy data from the head into the tail ? */
371 if (len > pglen) {
372 offs = copy = len - pglen;
373 if (copy > tail->iov_len)
374 copy = tail->iov_len;
375 memcpy(tail->iov_base,
376 (char *)head->iov_base +
377 head->iov_len - offs,
378 copy);
379 }
380 }
381 /* Now handle pages */
382 if (pglen != 0) {
383 if (pglen > len)
384 _shift_data_right_pages(buf->pages,
385 buf->page_base + len,
386 buf->page_base,
387 pglen - len);
388 copy = len;
389 if (len > pglen)
390 copy = pglen;
391 _copy_to_pages(buf->pages, buf->page_base,
392 (char *)head->iov_base + head->iov_len - len,
393 copy);
394 }
395 head->iov_len -= len;
396 buf->buflen -= len;
397 /* Have we truncated the message? */
398 if (buf->len > buf->buflen)
399 buf->len = buf->buflen;
400}
401
402/*
403 * xdr_shrink_pagelen
404 * @buf: xdr_buf
405 * @len: bytes to remove from buf->pages
406 *
cca5172a 407 * Shrinks XDR buffer's page array buf->pages by
1da177e4
LT
408 * 'len' bytes. The extra data is not lost, but is instead
409 * moved into the tail.
410 */
411static void
412xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
413{
414 struct kvec *tail;
415 size_t copy;
1da177e4 416 unsigned int pglen = buf->page_len;
cf187c2d 417 unsigned int tailbuf_len;
1da177e4
LT
418
419 tail = buf->tail;
420 BUG_ON (len > pglen);
421
cf187c2d
TM
422 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
423
1da177e4 424 /* Shift the tail first */
cf187c2d
TM
425 if (tailbuf_len != 0) {
426 unsigned int free_space = tailbuf_len - tail->iov_len;
427
428 if (len < free_space)
429 free_space = len;
430 tail->iov_len += free_space;
431
42d6d8ab 432 copy = len;
1da177e4 433 if (tail->iov_len > len) {
0fe62a35 434 char *p = (char *)tail->iov_base + len;
2e29ebb8 435 memmove(p, tail->iov_base, tail->iov_len - len);
42d6d8ab 436 } else
1da177e4 437 copy = tail->iov_len;
42d6d8ab 438 /* Copy from the inlined pages into the tail */
1da177e4
LT
439 _copy_from_pages((char *)tail->iov_base,
440 buf->pages, buf->page_base + pglen - len,
441 copy);
442 }
443 buf->page_len -= len;
444 buf->buflen -= len;
445 /* Have we truncated the message? */
446 if (buf->len > buf->buflen)
447 buf->len = buf->buflen;
448}
449
450void
451xdr_shift_buf(struct xdr_buf *buf, size_t len)
452{
453 xdr_shrink_bufhead(buf, len);
454}
468039ee 455EXPORT_SYMBOL_GPL(xdr_shift_buf);
1da177e4 456
4517d526
TM
457/**
458 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
459 * @xdr: pointer to struct xdr_stream
460 */
461unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
462{
463 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
464}
465EXPORT_SYMBOL_GPL(xdr_stream_pos);
466
1da177e4
LT
467/**
468 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
469 * @xdr: pointer to xdr_stream struct
470 * @buf: pointer to XDR buffer in which to encode data
471 * @p: current pointer inside XDR buffer
472 *
473 * Note: at the moment the RPC client only passes the length of our
474 * scratch buffer in the xdr_buf's header kvec. Previously this
475 * meant we needed to call xdr_adjust_iovec() after encoding the
476 * data. With the new scheme, the xdr_stream manages the details
477 * of the buffer length, and takes care of adjusting the kvec
478 * length for us.
479 */
d8ed029d 480void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
1da177e4
LT
481{
482 struct kvec *iov = buf->head;
334ccfd5 483 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
1da177e4 484
334ccfd5 485 BUG_ON(scratch_len < 0);
1da177e4
LT
486 xdr->buf = buf;
487 xdr->iov = iov;
d8ed029d
AD
488 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
489 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
334ccfd5
TM
490 BUG_ON(iov->iov_len > scratch_len);
491
492 if (p != xdr->p && p != NULL) {
493 size_t len;
494
495 BUG_ON(p < xdr->p || p > xdr->end);
496 len = (char *)p - (char *)xdr->p;
497 xdr->p = p;
498 buf->len += len;
499 iov->iov_len += len;
500 }
1da177e4 501}
468039ee 502EXPORT_SYMBOL_GPL(xdr_init_encode);
1da177e4
LT
503
504/**
505 * xdr_reserve_space - Reserve buffer space for sending
506 * @xdr: pointer to xdr_stream
507 * @nbytes: number of bytes to reserve
508 *
509 * Checks that we have enough buffer space to encode 'nbytes' more
510 * bytes of data. If so, update the total xdr_buf length, and
511 * adjust the length of the current kvec.
512 */
d8ed029d 513__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
1da177e4 514{
d8ed029d
AD
515 __be32 *p = xdr->p;
516 __be32 *q;
1da177e4
LT
517
518 /* align nbytes on the next 32-bit boundary */
519 nbytes += 3;
520 nbytes &= ~3;
521 q = p + (nbytes >> 2);
522 if (unlikely(q > xdr->end || q < p))
523 return NULL;
524 xdr->p = q;
525 xdr->iov->iov_len += nbytes;
526 xdr->buf->len += nbytes;
527 return p;
528}
468039ee 529EXPORT_SYMBOL_GPL(xdr_reserve_space);
1da177e4
LT
530
531/**
532 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
533 * @xdr: pointer to xdr_stream
534 * @pages: list of pages
535 * @base: offset of first byte
536 * @len: length of data in bytes
537 *
538 */
539void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
540 unsigned int len)
541{
542 struct xdr_buf *buf = xdr->buf;
543 struct kvec *iov = buf->tail;
544 buf->pages = pages;
545 buf->page_base = base;
546 buf->page_len = len;
547
548 iov->iov_base = (char *)xdr->p;
549 iov->iov_len = 0;
550 xdr->iov = iov;
551
552 if (len & 3) {
553 unsigned int pad = 4 - (len & 3);
554
555 BUG_ON(xdr->p >= xdr->end);
556 iov->iov_base = (char *)xdr->p + (len & 3);
557 iov->iov_len += pad;
558 len += pad;
559 *xdr->p++ = 0;
560 }
561 buf->buflen += len;
562 buf->len += len;
563}
468039ee 564EXPORT_SYMBOL_GPL(xdr_write_pages);
1da177e4 565
6650239a 566static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
1537693c 567 unsigned int len)
6650239a
TM
568{
569 if (len > iov->iov_len)
570 len = iov->iov_len;
1537693c 571 xdr->p = (__be32*)iov->iov_base;
6650239a
TM
572 xdr->end = (__be32*)(iov->iov_base + len);
573 xdr->iov = iov;
574 xdr->page_ptr = NULL;
575}
576
577static int xdr_set_page_base(struct xdr_stream *xdr,
578 unsigned int base, unsigned int len)
579{
580 unsigned int pgnr;
581 unsigned int maxlen;
582 unsigned int pgoff;
583 unsigned int pgend;
584 void *kaddr;
585
586 maxlen = xdr->buf->page_len;
587 if (base >= maxlen)
588 return -EINVAL;
589 maxlen -= base;
590 if (len > maxlen)
591 len = maxlen;
592
593 base += xdr->buf->page_base;
594
595 pgnr = base >> PAGE_SHIFT;
596 xdr->page_ptr = &xdr->buf->pages[pgnr];
597 kaddr = page_address(*xdr->page_ptr);
598
599 pgoff = base & ~PAGE_MASK;
600 xdr->p = (__be32*)(kaddr + pgoff);
601
602 pgend = pgoff + len;
603 if (pgend > PAGE_SIZE)
604 pgend = PAGE_SIZE;
605 xdr->end = (__be32*)(kaddr + pgend);
606 xdr->iov = NULL;
607 return 0;
608}
609
610static void xdr_set_next_page(struct xdr_stream *xdr)
611{
612 unsigned int newbase;
613
614 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
615 newbase -= xdr->buf->page_base;
616
617 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
1537693c 618 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
6650239a
TM
619}
620
621static bool xdr_set_next_buffer(struct xdr_stream *xdr)
622{
623 if (xdr->page_ptr != NULL)
624 xdr_set_next_page(xdr);
625 else if (xdr->iov == xdr->buf->head) {
626 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
1537693c 627 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
6650239a
TM
628 }
629 return xdr->p != xdr->end;
630}
631
1da177e4
LT
632/**
633 * xdr_init_decode - Initialize an xdr_stream for decoding data.
634 * @xdr: pointer to xdr_stream struct
635 * @buf: pointer to XDR buffer from which to decode data
636 * @p: current pointer inside XDR buffer
637 */
d8ed029d 638void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
1da177e4 639{
1da177e4 640 xdr->buf = buf;
6650239a
TM
641 xdr->scratch.iov_base = NULL;
642 xdr->scratch.iov_len = 0;
bfeea1dc 643 xdr->nwords = XDR_QUADLEN(buf->len);
6650239a 644 if (buf->head[0].iov_len != 0)
1537693c 645 xdr_set_iov(xdr, buf->head, buf->len);
6650239a
TM
646 else if (buf->page_len != 0)
647 xdr_set_page_base(xdr, 0, buf->len);
bfeea1dc
TM
648 if (p != NULL && p > xdr->p && xdr->end >= p) {
649 xdr->nwords -= p - xdr->p;
1537693c 650 xdr->p = p;
bfeea1dc 651 }
1da177e4 652}
468039ee 653EXPORT_SYMBOL_GPL(xdr_init_decode);
1da177e4 654
f7da7a12
BH
655/**
656 * xdr_init_decode - Initialize an xdr_stream for decoding data.
657 * @xdr: pointer to xdr_stream struct
658 * @buf: pointer to XDR buffer from which to decode data
659 * @pages: list of pages to decode into
660 * @len: length in bytes of buffer in pages
661 */
662void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
663 struct page **pages, unsigned int len)
664{
665 memset(buf, 0, sizeof(*buf));
666 buf->pages = pages;
667 buf->page_len = len;
668 buf->buflen = len;
669 buf->len = len;
670 xdr_init_decode(xdr, buf, NULL);
671}
672EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
673
6650239a 674static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
ba8e452a 675{
bfeea1dc 676 unsigned int nwords = XDR_QUADLEN(nbytes);
ba8e452a 677 __be32 *p = xdr->p;
bfeea1dc 678 __be32 *q = p + nwords;
ba8e452a 679
bfeea1dc 680 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
ba8e452a 681 return NULL;
6650239a 682 xdr->p = q;
bfeea1dc 683 xdr->nwords -= nwords;
ba8e452a
TM
684 return p;
685}
ba8e452a 686
1da177e4 687/**
6650239a
TM
688 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
689 * @xdr: pointer to xdr_stream struct
690 * @buf: pointer to an empty buffer
691 * @buflen: size of 'buf'
692 *
693 * The scratch buffer is used when decoding from an array of pages.
694 * If an xdr_inline_decode() call spans across page boundaries, then
695 * we copy the data into the scratch buffer in order to allow linear
696 * access.
697 */
698void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
699{
700 xdr->scratch.iov_base = buf;
701 xdr->scratch.iov_len = buflen;
702}
703EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
704
705static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
706{
707 __be32 *p;
708 void *cpdest = xdr->scratch.iov_base;
709 size_t cplen = (char *)xdr->end - (char *)xdr->p;
710
711 if (nbytes > xdr->scratch.iov_len)
712 return NULL;
713 memcpy(cpdest, xdr->p, cplen);
714 cpdest += cplen;
715 nbytes -= cplen;
716 if (!xdr_set_next_buffer(xdr))
717 return NULL;
718 p = __xdr_inline_decode(xdr, nbytes);
719 if (p == NULL)
720 return NULL;
721 memcpy(cpdest, p, nbytes);
722 return xdr->scratch.iov_base;
723}
724
725/**
726 * xdr_inline_decode - Retrieve XDR data to decode
1da177e4
LT
727 * @xdr: pointer to xdr_stream struct
728 * @nbytes: number of bytes of data to decode
729 *
730 * Check if the input buffer is long enough to enable us to decode
731 * 'nbytes' more bytes of data starting at the current position.
732 * If so return the current pointer, then update the current
733 * pointer position.
734 */
d8ed029d 735__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1da177e4 736{
6650239a 737 __be32 *p;
1da177e4 738
6650239a
TM
739 if (nbytes == 0)
740 return xdr->p;
741 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1da177e4 742 return NULL;
6650239a
TM
743 p = __xdr_inline_decode(xdr, nbytes);
744 if (p != NULL)
745 return p;
746 return xdr_copy_to_scratch(xdr, nbytes);
1da177e4 747}
468039ee 748EXPORT_SYMBOL_GPL(xdr_inline_decode);
1da177e4
LT
749
750/**
751 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
752 * @xdr: pointer to xdr_stream struct
753 * @len: number of bytes of page data
754 *
755 * Moves data beyond the current pointer position from the XDR head[] buffer
756 * into the page list. Any data that lies beyond current position + "len"
8b23ea7b 757 * bytes is moved into the XDR tail[].
c337d365
TM
758 *
759 * Returns the number of XDR encoded bytes now contained in the pages
1da177e4 760 */
c337d365 761unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1da177e4
LT
762{
763 struct xdr_buf *buf = xdr->buf;
764 struct kvec *iov;
765 ssize_t shift;
bfeea1dc 766 unsigned int nwords = XDR_QUADLEN(len);
b760b313 767 unsigned int cur = xdr_stream_pos(xdr);
1da177e4
LT
768 unsigned int end;
769 int padding;
770
bfeea1dc 771 if (xdr->nwords == 0)
c337d365 772 return 0;
bfeea1dc
TM
773 if (nwords > xdr->nwords) {
774 nwords = xdr->nwords;
775 len = nwords << 2;
776 }
1da177e4
LT
777 /* Realign pages to current pointer position */
778 iov = buf->head;
b760b313
TM
779 if (iov->iov_len > cur)
780 xdr_shrink_bufhead(buf, iov->iov_len - cur);
1da177e4
LT
781
782 /* Truncate page data and move it into the tail */
783 if (buf->page_len > len)
784 xdr_shrink_pagelen(buf, buf->page_len - len);
bfeea1dc 785 padding = (nwords << 2) - len;
1da177e4
LT
786 xdr->iov = iov = buf->tail;
787 /* Compute remaining message length. */
788 end = iov->iov_len;
789 shift = buf->buflen - buf->len;
bfeea1dc 790 if (end > shift + padding)
1da177e4 791 end -= shift;
bfeea1dc
TM
792 else
793 end = padding;
1da177e4
LT
794 /*
795 * Position current pointer at beginning of tail, and
796 * set remaining message length.
797 */
d8ed029d
AD
798 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
799 xdr->end = (__be32 *)((char *)iov->iov_base + end);
76cacaab 800 xdr->page_ptr = NULL;
bfeea1dc 801 xdr->nwords = XDR_QUADLEN(end - padding);
c337d365 802 return len;
1da177e4 803}
468039ee 804EXPORT_SYMBOL_GPL(xdr_read_pages);
1da177e4 805
8b23ea7b
TM
806/**
807 * xdr_enter_page - decode data from the XDR page
808 * @xdr: pointer to xdr_stream struct
809 * @len: number of bytes of page data
810 *
811 * Moves data beyond the current pointer position from the XDR head[] buffer
812 * into the page list. Any data that lies beyond current position + "len"
813 * bytes is moved into the XDR tail[]. The current pointer is then
814 * repositioned at the beginning of the first XDR page.
815 */
816void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
817{
c337d365 818 len = xdr_read_pages(xdr, len);
8b23ea7b
TM
819 /*
820 * Position current pointer at beginning of tail, and
821 * set remaining message length.
822 */
6650239a 823 xdr_set_page_base(xdr, 0, len);
bfeea1dc 824 xdr->nwords += XDR_QUADLEN(xdr->buf->page_len);
8b23ea7b 825}
468039ee 826EXPORT_SYMBOL_GPL(xdr_enter_page);
8b23ea7b 827
1da177e4
LT
828static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
829
830void
831xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
832{
833 buf->head[0] = *iov;
834 buf->tail[0] = empty_iov;
835 buf->page_len = 0;
836 buf->buflen = buf->len = iov->iov_len;
837}
468039ee 838EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1da177e4 839
1da177e4
LT
840/* Sets subbuf to the portion of buf of length len beginning base bytes
841 * from the start of buf. Returns -1 if base of length are out of bounds. */
842int
843xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1e78957e 844 unsigned int base, unsigned int len)
1da177e4 845{
1da177e4 846 subbuf->buflen = subbuf->len = len;
1e78957e
TM
847 if (base < buf->head[0].iov_len) {
848 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
849 subbuf->head[0].iov_len = min_t(unsigned int, len,
850 buf->head[0].iov_len - base);
851 len -= subbuf->head[0].iov_len;
852 base = 0;
853 } else {
854 subbuf->head[0].iov_base = NULL;
855 subbuf->head[0].iov_len = 0;
856 base -= buf->head[0].iov_len;
857 }
1da177e4
LT
858
859 if (base < buf->page_len) {
1e78957e
TM
860 subbuf->page_len = min(buf->page_len - base, len);
861 base += buf->page_base;
862 subbuf->page_base = base & ~PAGE_CACHE_MASK;
863 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
1da177e4
LT
864 len -= subbuf->page_len;
865 base = 0;
866 } else {
867 base -= buf->page_len;
868 subbuf->page_len = 0;
869 }
870
1e78957e
TM
871 if (base < buf->tail[0].iov_len) {
872 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
873 subbuf->tail[0].iov_len = min_t(unsigned int, len,
874 buf->tail[0].iov_len - base);
875 len -= subbuf->tail[0].iov_len;
876 base = 0;
877 } else {
878 subbuf->tail[0].iov_base = NULL;
879 subbuf->tail[0].iov_len = 0;
880 base -= buf->tail[0].iov_len;
881 }
882
1da177e4
LT
883 if (base || len)
884 return -1;
885 return 0;
886}
468039ee 887EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1da177e4 888
4e3e43ad 889static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1da177e4 890{
1e78957e 891 unsigned int this_len;
1da177e4 892
4e3e43ad
TM
893 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
894 memcpy(obj, subbuf->head[0].iov_base, this_len);
1da177e4
LT
895 len -= this_len;
896 obj += this_len;
4e3e43ad 897 this_len = min_t(unsigned int, len, subbuf->page_len);
1da177e4 898 if (this_len)
4e3e43ad 899 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1da177e4
LT
900 len -= this_len;
901 obj += this_len;
4e3e43ad
TM
902 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
903 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1da177e4
LT
904}
905
bd8100e7 906/* obj is assumed to point to allocated memory of size at least len: */
4e3e43ad 907int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
bd8100e7
AG
908{
909 struct xdr_buf subbuf;
bd8100e7
AG
910 int status;
911
912 status = xdr_buf_subsegment(buf, &subbuf, base, len);
4e3e43ad
TM
913 if (status != 0)
914 return status;
915 __read_bytes_from_xdr_buf(&subbuf, obj, len);
916 return 0;
917}
468039ee 918EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
4e3e43ad
TM
919
920static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
921{
922 unsigned int this_len;
923
924 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
925 memcpy(subbuf->head[0].iov_base, obj, this_len);
bd8100e7
AG
926 len -= this_len;
927 obj += this_len;
4e3e43ad 928 this_len = min_t(unsigned int, len, subbuf->page_len);
bd8100e7 929 if (this_len)
4e3e43ad 930 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
bd8100e7
AG
931 len -= this_len;
932 obj += this_len;
4e3e43ad
TM
933 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
934 memcpy(subbuf->tail[0].iov_base, obj, this_len);
935}
936
937/* obj is assumed to point to allocated memory of size at least len: */
938int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
939{
940 struct xdr_buf subbuf;
941 int status;
942
943 status = xdr_buf_subsegment(buf, &subbuf, base, len);
944 if (status != 0)
945 return status;
946 __write_bytes_to_xdr_buf(&subbuf, obj, len);
947 return 0;
bd8100e7 948}
c43abaed 949EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
bd8100e7
AG
950
951int
1e78957e 952xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1da177e4 953{
d8ed029d 954 __be32 raw;
1da177e4
LT
955 int status;
956
957 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
958 if (status)
959 return status;
98866b5a 960 *obj = be32_to_cpu(raw);
1da177e4
LT
961 return 0;
962}
468039ee 963EXPORT_SYMBOL_GPL(xdr_decode_word);
1da177e4 964
bd8100e7 965int
1e78957e 966xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
bd8100e7 967{
9f162d2a 968 __be32 raw = cpu_to_be32(obj);
bd8100e7
AG
969
970 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
971}
468039ee 972EXPORT_SYMBOL_GPL(xdr_encode_word);
bd8100e7 973
1da177e4
LT
974/* If the netobj starting offset bytes from the start of xdr_buf is contained
975 * entirely in the head or the tail, set object to point to it; otherwise
976 * try to find space for it at the end of the tail, copy it there, and
977 * set obj to point to it. */
bee57c99 978int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
1da177e4 979{
bee57c99 980 struct xdr_buf subbuf;
1da177e4 981
bd8100e7 982 if (xdr_decode_word(buf, offset, &obj->len))
bee57c99
TM
983 return -EFAULT;
984 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
985 return -EFAULT;
986
987 /* Is the obj contained entirely in the head? */
988 obj->data = subbuf.head[0].iov_base;
989 if (subbuf.head[0].iov_len == obj->len)
990 return 0;
991 /* ..or is the obj contained entirely in the tail? */
992 obj->data = subbuf.tail[0].iov_base;
993 if (subbuf.tail[0].iov_len == obj->len)
994 return 0;
995
996 /* use end of tail as storage for obj:
997 * (We don't copy to the beginning because then we'd have
998 * to worry about doing a potentially overlapping copy.
999 * This assumes the object is at most half the length of the
1000 * tail.) */
1001 if (obj->len > buf->buflen - buf->len)
1002 return -ENOMEM;
1003 if (buf->tail[0].iov_len != 0)
1004 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1005 else
1006 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
1007 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
1da177e4 1008 return 0;
1da177e4 1009}
468039ee 1010EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
bd8100e7
AG
1011
1012/* Returns 0 on success, or else a negative error code. */
1013static int
1014xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1015 struct xdr_array2_desc *desc, int encode)
1016{
1017 char *elem = NULL, *c;
1018 unsigned int copied = 0, todo, avail_here;
1019 struct page **ppages = NULL;
1020 int err;
1021
1022 if (encode) {
1023 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1024 return -EINVAL;
1025 } else {
1026 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
58fcb8df 1027 desc->array_len > desc->array_maxlen ||
bd8100e7
AG
1028 (unsigned long) base + 4 + desc->array_len *
1029 desc->elem_size > buf->len)
1030 return -EINVAL;
1031 }
1032 base += 4;
1033
1034 if (!desc->xcode)
1035 return 0;
1036
1037 todo = desc->array_len * desc->elem_size;
1038
1039 /* process head */
1040 if (todo && base < buf->head->iov_len) {
1041 c = buf->head->iov_base + base;
1042 avail_here = min_t(unsigned int, todo,
1043 buf->head->iov_len - base);
1044 todo -= avail_here;
1045
1046 while (avail_here >= desc->elem_size) {
1047 err = desc->xcode(desc, c);
1048 if (err)
1049 goto out;
1050 c += desc->elem_size;
1051 avail_here -= desc->elem_size;
1052 }
1053 if (avail_here) {
1054 if (!elem) {
1055 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1056 err = -ENOMEM;
1057 if (!elem)
1058 goto out;
1059 }
1060 if (encode) {
1061 err = desc->xcode(desc, elem);
1062 if (err)
1063 goto out;
1064 memcpy(c, elem, avail_here);
1065 } else
1066 memcpy(elem, c, avail_here);
1067 copied = avail_here;
1068 }
1069 base = buf->head->iov_len; /* align to start of pages */
1070 }
1071
1072 /* process pages array */
1073 base -= buf->head->iov_len;
1074 if (todo && base < buf->page_len) {
1075 unsigned int avail_page;
1076
1077 avail_here = min(todo, buf->page_len - base);
1078 todo -= avail_here;
1079
1080 base += buf->page_base;
1081 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
1082 base &= ~PAGE_CACHE_MASK;
1083 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
1084 avail_here);
1085 c = kmap(*ppages) + base;
1086
1087 while (avail_here) {
1088 avail_here -= avail_page;
1089 if (copied || avail_page < desc->elem_size) {
1090 unsigned int l = min(avail_page,
1091 desc->elem_size - copied);
1092 if (!elem) {
1093 elem = kmalloc(desc->elem_size,
1094 GFP_KERNEL);
1095 err = -ENOMEM;
1096 if (!elem)
1097 goto out;
1098 }
1099 if (encode) {
1100 if (!copied) {
1101 err = desc->xcode(desc, elem);
1102 if (err)
1103 goto out;
1104 }
1105 memcpy(c, elem + copied, l);
1106 copied += l;
1107 if (copied == desc->elem_size)
1108 copied = 0;
1109 } else {
1110 memcpy(elem + copied, c, l);
1111 copied += l;
1112 if (copied == desc->elem_size) {
1113 err = desc->xcode(desc, elem);
1114 if (err)
1115 goto out;
1116 copied = 0;
1117 }
1118 }
1119 avail_page -= l;
1120 c += l;
1121 }
1122 while (avail_page >= desc->elem_size) {
1123 err = desc->xcode(desc, c);
1124 if (err)
1125 goto out;
1126 c += desc->elem_size;
1127 avail_page -= desc->elem_size;
1128 }
1129 if (avail_page) {
1130 unsigned int l = min(avail_page,
1131 desc->elem_size - copied);
1132 if (!elem) {
1133 elem = kmalloc(desc->elem_size,
1134 GFP_KERNEL);
1135 err = -ENOMEM;
1136 if (!elem)
1137 goto out;
1138 }
1139 if (encode) {
1140 if (!copied) {
1141 err = desc->xcode(desc, elem);
1142 if (err)
1143 goto out;
1144 }
1145 memcpy(c, elem + copied, l);
1146 copied += l;
1147 if (copied == desc->elem_size)
1148 copied = 0;
1149 } else {
1150 memcpy(elem + copied, c, l);
1151 copied += l;
1152 if (copied == desc->elem_size) {
1153 err = desc->xcode(desc, elem);
1154 if (err)
1155 goto out;
1156 copied = 0;
1157 }
1158 }
1159 }
1160 if (avail_here) {
1161 kunmap(*ppages);
1162 ppages++;
1163 c = kmap(*ppages);
1164 }
1165
1166 avail_page = min(avail_here,
1167 (unsigned int) PAGE_CACHE_SIZE);
1168 }
1169 base = buf->page_len; /* align to start of tail */
1170 }
1171
1172 /* process tail */
1173 base -= buf->page_len;
1174 if (todo) {
1175 c = buf->tail->iov_base + base;
1176 if (copied) {
1177 unsigned int l = desc->elem_size - copied;
1178
1179 if (encode)
1180 memcpy(c, elem + copied, l);
1181 else {
1182 memcpy(elem + copied, c, l);
1183 err = desc->xcode(desc, elem);
1184 if (err)
1185 goto out;
1186 }
1187 todo -= l;
1188 c += l;
1189 }
1190 while (todo) {
1191 err = desc->xcode(desc, c);
1192 if (err)
1193 goto out;
1194 c += desc->elem_size;
1195 todo -= desc->elem_size;
1196 }
1197 }
1198 err = 0;
1199
1200out:
a51482bd 1201 kfree(elem);
bd8100e7
AG
1202 if (ppages)
1203 kunmap(*ppages);
1204 return err;
1205}
1206
1207int
1208xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1209 struct xdr_array2_desc *desc)
1210{
1211 if (base >= buf->len)
1212 return -EINVAL;
1213
1214 return xdr_xcode_array2(buf, base, desc, 0);
1215}
468039ee 1216EXPORT_SYMBOL_GPL(xdr_decode_array2);
bd8100e7
AG
1217
1218int
1219xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1220 struct xdr_array2_desc *desc)
1221{
1222 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1223 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1224 return -EINVAL;
1225
1226 return xdr_xcode_array2(buf, base, desc, 1);
1227}
468039ee 1228EXPORT_SYMBOL_GPL(xdr_encode_array2);
37a4e6cb
OK
1229
1230int
1231xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
cca5172a 1232 int (*actor)(struct scatterlist *, void *), void *data)
37a4e6cb
OK
1233{
1234 int i, ret = 0;
95c96174 1235 unsigned int page_len, thislen, page_offset;
37a4e6cb
OK
1236 struct scatterlist sg[1];
1237
68e3f5dd
HX
1238 sg_init_table(sg, 1);
1239
37a4e6cb
OK
1240 if (offset >= buf->head[0].iov_len) {
1241 offset -= buf->head[0].iov_len;
1242 } else {
1243 thislen = buf->head[0].iov_len - offset;
1244 if (thislen > len)
1245 thislen = len;
1246 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1247 ret = actor(sg, data);
1248 if (ret)
1249 goto out;
1250 offset = 0;
1251 len -= thislen;
1252 }
1253 if (len == 0)
1254 goto out;
1255
1256 if (offset >= buf->page_len) {
1257 offset -= buf->page_len;
1258 } else {
1259 page_len = buf->page_len - offset;
1260 if (page_len > len)
1261 page_len = len;
1262 len -= page_len;
1263 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1264 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1265 thislen = PAGE_CACHE_SIZE - page_offset;
1266 do {
1267 if (thislen > page_len)
1268 thislen = page_len;
642f1490 1269 sg_set_page(sg, buf->pages[i], thislen, page_offset);
37a4e6cb
OK
1270 ret = actor(sg, data);
1271 if (ret)
1272 goto out;
1273 page_len -= thislen;
1274 i++;
1275 page_offset = 0;
1276 thislen = PAGE_CACHE_SIZE;
1277 } while (page_len != 0);
1278 offset = 0;
1279 }
1280 if (len == 0)
1281 goto out;
1282 if (offset < buf->tail[0].iov_len) {
1283 thislen = buf->tail[0].iov_len - offset;
1284 if (thislen > len)
1285 thislen = len;
1286 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1287 ret = actor(sg, data);
1288 len -= thislen;
1289 }
1290 if (len != 0)
1291 ret = -EINVAL;
1292out:
1293 return ret;
1294}
468039ee 1295EXPORT_SYMBOL_GPL(xdr_process_buf);
37a4e6cb 1296