sunrpc: clean up xdr_shrink_pagelen use of temporary pointer
[linux-2.6-block.git] / net / sunrpc / xdr.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/xdr.c
3 *
4 * Generic XDR support.
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
8
a246b010 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
1da177e4 11#include <linux/types.h>
1da177e4
LT
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/pagemap.h>
15#include <linux/errno.h>
1da177e4
LT
16#include <linux/sunrpc/xdr.h>
17#include <linux/sunrpc/msg_prot.h>
18
19/*
20 * XDR functions for basic NFS types
21 */
d8ed029d
AD
22__be32 *
23xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
1da177e4
LT
24{
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
26
27 p[quadlen] = 0; /* zero trailing bytes */
9f162d2a 28 *p++ = cpu_to_be32(obj->len);
1da177e4
LT
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
31}
468039ee 32EXPORT_SYMBOL_GPL(xdr_encode_netobj);
1da177e4 33
d8ed029d
AD
34__be32 *
35xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
1da177e4
LT
36{
37 unsigned int len;
38
98866b5a 39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
1da177e4
LT
40 return NULL;
41 obj->len = len;
42 obj->data = (u8 *) p;
43 return p + XDR_QUADLEN(len);
44}
468039ee 45EXPORT_SYMBOL_GPL(xdr_decode_netobj);
1da177e4
LT
46
47/**
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
4dc3b16b
PP
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
1da177e4
LT
52 *
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
57 *
58 * Returns the updated current XDR buffer position
59 *
60 */
d8ed029d 61__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4
LT
62{
63 if (likely(nbytes != 0)) {
64 unsigned int quadlen = XDR_QUADLEN(nbytes);
65 unsigned int padding = (quadlen << 2) - nbytes;
66
67 if (ptr != NULL)
68 memcpy(p, ptr, nbytes);
69 if (padding != 0)
70 memset((char *)p + nbytes, 0, padding);
71 p += quadlen;
72 }
73 return p;
74}
468039ee 75EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
1da177e4
LT
76
77/**
78 * xdr_encode_opaque - Encode variable length opaque data
4dc3b16b
PP
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
1da177e4
LT
82 *
83 * Returns the updated current XDR buffer position
84 */
d8ed029d 85__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
1da177e4 86{
9f162d2a 87 *p++ = cpu_to_be32(nbytes);
1da177e4
LT
88 return xdr_encode_opaque_fixed(p, ptr, nbytes);
89}
468039ee 90EXPORT_SYMBOL_GPL(xdr_encode_opaque);
1da177e4 91
d8ed029d
AD
92__be32 *
93xdr_encode_string(__be32 *p, const char *string)
1da177e4
LT
94{
95 return xdr_encode_array(p, string, strlen(string));
96}
468039ee 97EXPORT_SYMBOL_GPL(xdr_encode_string);
1da177e4 98
d8ed029d 99__be32 *
e5cff482
CL
100xdr_decode_string_inplace(__be32 *p, char **sp,
101 unsigned int *lenp, unsigned int maxlen)
1da177e4 102{
e5cff482 103 u32 len;
1da177e4 104
98866b5a 105 len = be32_to_cpu(*p++);
e5cff482 106 if (len > maxlen)
1da177e4
LT
107 return NULL;
108 *lenp = len;
109 *sp = (char *) p;
110 return p + XDR_QUADLEN(len);
111}
468039ee 112EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
1da177e4
LT
113
114void
115xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
116 unsigned int len)
117{
118 struct kvec *tail = xdr->tail;
119 u32 *p;
120
121 xdr->pages = pages;
122 xdr->page_base = base;
123 xdr->page_len = len;
124
125 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
126 tail->iov_base = p;
127 tail->iov_len = 0;
128
129 if (len & 3) {
130 unsigned int pad = 4 - (len & 3);
131
132 *p = 0;
133 tail->iov_base = (char *)p + (len & 3);
134 tail->iov_len = pad;
135 len += pad;
136 }
137 xdr->buflen += len;
138 xdr->len += len;
139}
468039ee 140EXPORT_SYMBOL_GPL(xdr_encode_pages);
1da177e4
LT
141
142void
143xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
144 struct page **pages, unsigned int base, unsigned int len)
145{
146 struct kvec *head = xdr->head;
147 struct kvec *tail = xdr->tail;
148 char *buf = (char *)head->iov_base;
149 unsigned int buflen = head->iov_len;
150
151 head->iov_len = offset;
152
153 xdr->pages = pages;
154 xdr->page_base = base;
155 xdr->page_len = len;
156
157 tail->iov_base = buf + offset;
158 tail->iov_len = buflen - offset;
159
160 xdr->buflen += len;
161}
468039ee 162EXPORT_SYMBOL_GPL(xdr_inline_pages);
1da177e4 163
1da177e4
LT
164/*
165 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
166 *
167 * _shift_data_right_pages
168 * @pages: vector of pages containing both the source and dest memory area.
169 * @pgto_base: page vector address of destination
170 * @pgfrom_base: page vector address of source
171 * @len: number of bytes to copy
172 *
173 * Note: the addresses pgto_base and pgfrom_base are both calculated in
174 * the same way:
175 * if a memory area starts at byte 'base' in page 'pages[i]',
176 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
177 * Also note: pgfrom_base must be < pgto_base, but the memory areas
178 * they point to may overlap.
179 */
180static void
181_shift_data_right_pages(struct page **pages, size_t pgto_base,
182 size_t pgfrom_base, size_t len)
183{
184 struct page **pgfrom, **pgto;
185 char *vfrom, *vto;
186 size_t copy;
187
188 BUG_ON(pgto_base <= pgfrom_base);
189
190 pgto_base += len;
191 pgfrom_base += len;
192
193 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
194 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
195
196 pgto_base &= ~PAGE_CACHE_MASK;
197 pgfrom_base &= ~PAGE_CACHE_MASK;
198
199 do {
200 /* Are any pointers crossing a page boundary? */
201 if (pgto_base == 0) {
1da177e4
LT
202 pgto_base = PAGE_CACHE_SIZE;
203 pgto--;
204 }
205 if (pgfrom_base == 0) {
206 pgfrom_base = PAGE_CACHE_SIZE;
207 pgfrom--;
208 }
209
210 copy = len;
211 if (copy > pgto_base)
212 copy = pgto_base;
213 if (copy > pgfrom_base)
214 copy = pgfrom_base;
215 pgto_base -= copy;
216 pgfrom_base -= copy;
217
218 vto = kmap_atomic(*pgto, KM_USER0);
219 vfrom = kmap_atomic(*pgfrom, KM_USER1);
220 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
bce3481c 221 flush_dcache_page(*pgto);
1da177e4
LT
222 kunmap_atomic(vfrom, KM_USER1);
223 kunmap_atomic(vto, KM_USER0);
224
225 } while ((len -= copy) != 0);
1da177e4
LT
226}
227
228/*
229 * _copy_to_pages
230 * @pages: array of pages
231 * @pgbase: page vector address of destination
232 * @p: pointer to source data
233 * @len: length
234 *
235 * Copies data from an arbitrary memory location into an array of pages
236 * The copy is assumed to be non-overlapping.
237 */
238static void
239_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
240{
241 struct page **pgto;
242 char *vto;
243 size_t copy;
244
245 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
246 pgbase &= ~PAGE_CACHE_MASK;
247
daeba89d 248 for (;;) {
1da177e4
LT
249 copy = PAGE_CACHE_SIZE - pgbase;
250 if (copy > len)
251 copy = len;
252
253 vto = kmap_atomic(*pgto, KM_USER0);
254 memcpy(vto + pgbase, p, copy);
255 kunmap_atomic(vto, KM_USER0);
256
daeba89d
TM
257 len -= copy;
258 if (len == 0)
259 break;
260
1da177e4
LT
261 pgbase += copy;
262 if (pgbase == PAGE_CACHE_SIZE) {
263 flush_dcache_page(*pgto);
264 pgbase = 0;
265 pgto++;
266 }
267 p += copy;
daeba89d 268 }
1da177e4
LT
269 flush_dcache_page(*pgto);
270}
271
272/*
273 * _copy_from_pages
274 * @p: pointer to destination
275 * @pages: array of pages
276 * @pgbase: offset of source data
277 * @len: length
278 *
279 * Copies data into an arbitrary memory location from an array of pages
280 * The copy is assumed to be non-overlapping.
281 */
282static void
283_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
284{
285 struct page **pgfrom;
286 char *vfrom;
287 size_t copy;
288
289 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
290 pgbase &= ~PAGE_CACHE_MASK;
291
292 do {
293 copy = PAGE_CACHE_SIZE - pgbase;
294 if (copy > len)
295 copy = len;
296
297 vfrom = kmap_atomic(*pgfrom, KM_USER0);
298 memcpy(p, vfrom + pgbase, copy);
299 kunmap_atomic(vfrom, KM_USER0);
300
301 pgbase += copy;
302 if (pgbase == PAGE_CACHE_SIZE) {
303 pgbase = 0;
304 pgfrom++;
305 }
306 p += copy;
307
308 } while ((len -= copy) != 0);
309}
310
311/*
312 * xdr_shrink_bufhead
313 * @buf: xdr_buf
314 * @len: bytes to remove from buf->head[0]
315 *
cca5172a 316 * Shrinks XDR buffer's header kvec buf->head[0] by
1da177e4
LT
317 * 'len' bytes. The extra data is not lost, but is instead
318 * moved into the inlined pages and/or the tail.
319 */
320static void
321xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
322{
323 struct kvec *head, *tail;
324 size_t copy, offs;
325 unsigned int pglen = buf->page_len;
326
327 tail = buf->tail;
328 head = buf->head;
329 BUG_ON (len > head->iov_len);
330
331 /* Shift the tail first */
332 if (tail->iov_len != 0) {
333 if (tail->iov_len > len) {
334 copy = tail->iov_len - len;
335 memmove((char *)tail->iov_base + len,
336 tail->iov_base, copy);
337 }
338 /* Copy from the inlined pages into the tail */
339 copy = len;
340 if (copy > pglen)
341 copy = pglen;
342 offs = len - copy;
343 if (offs >= tail->iov_len)
344 copy = 0;
345 else if (copy > tail->iov_len - offs)
346 copy = tail->iov_len - offs;
347 if (copy != 0)
348 _copy_from_pages((char *)tail->iov_base + offs,
349 buf->pages,
350 buf->page_base + pglen + offs - len,
351 copy);
352 /* Do we also need to copy data from the head into the tail ? */
353 if (len > pglen) {
354 offs = copy = len - pglen;
355 if (copy > tail->iov_len)
356 copy = tail->iov_len;
357 memcpy(tail->iov_base,
358 (char *)head->iov_base +
359 head->iov_len - offs,
360 copy);
361 }
362 }
363 /* Now handle pages */
364 if (pglen != 0) {
365 if (pglen > len)
366 _shift_data_right_pages(buf->pages,
367 buf->page_base + len,
368 buf->page_base,
369 pglen - len);
370 copy = len;
371 if (len > pglen)
372 copy = pglen;
373 _copy_to_pages(buf->pages, buf->page_base,
374 (char *)head->iov_base + head->iov_len - len,
375 copy);
376 }
377 head->iov_len -= len;
378 buf->buflen -= len;
379 /* Have we truncated the message? */
380 if (buf->len > buf->buflen)
381 buf->len = buf->buflen;
382}
383
384/*
385 * xdr_shrink_pagelen
386 * @buf: xdr_buf
387 * @len: bytes to remove from buf->pages
388 *
cca5172a 389 * Shrinks XDR buffer's page array buf->pages by
1da177e4
LT
390 * 'len' bytes. The extra data is not lost, but is instead
391 * moved into the tail.
392 */
393static void
394xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
395{
396 struct kvec *tail;
397 size_t copy;
1da177e4
LT
398 unsigned int pglen = buf->page_len;
399
400 tail = buf->tail;
401 BUG_ON (len > pglen);
402
403 /* Shift the tail first */
404 if (tail->iov_len != 0) {
1da177e4 405 if (tail->iov_len > len) {
0fe62a35 406 char *p = (char *)tail->iov_base + len;
1da177e4
LT
407 copy = tail->iov_len - len;
408 memmove(p, tail->iov_base, copy);
b1a7a91a 409 }
1da177e4
LT
410 /* Copy from the inlined pages into the tail */
411 copy = len;
412 if (copy > tail->iov_len)
413 copy = tail->iov_len;
414 _copy_from_pages((char *)tail->iov_base,
415 buf->pages, buf->page_base + pglen - len,
416 copy);
417 }
418 buf->page_len -= len;
419 buf->buflen -= len;
420 /* Have we truncated the message? */
421 if (buf->len > buf->buflen)
422 buf->len = buf->buflen;
423}
424
425void
426xdr_shift_buf(struct xdr_buf *buf, size_t len)
427{
428 xdr_shrink_bufhead(buf, len);
429}
468039ee 430EXPORT_SYMBOL_GPL(xdr_shift_buf);
1da177e4
LT
431
432/**
433 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
434 * @xdr: pointer to xdr_stream struct
435 * @buf: pointer to XDR buffer in which to encode data
436 * @p: current pointer inside XDR buffer
437 *
438 * Note: at the moment the RPC client only passes the length of our
439 * scratch buffer in the xdr_buf's header kvec. Previously this
440 * meant we needed to call xdr_adjust_iovec() after encoding the
441 * data. With the new scheme, the xdr_stream manages the details
442 * of the buffer length, and takes care of adjusting the kvec
443 * length for us.
444 */
d8ed029d 445void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
1da177e4
LT
446{
447 struct kvec *iov = buf->head;
334ccfd5 448 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
1da177e4 449
334ccfd5 450 BUG_ON(scratch_len < 0);
1da177e4
LT
451 xdr->buf = buf;
452 xdr->iov = iov;
d8ed029d
AD
453 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
454 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
334ccfd5
TM
455 BUG_ON(iov->iov_len > scratch_len);
456
457 if (p != xdr->p && p != NULL) {
458 size_t len;
459
460 BUG_ON(p < xdr->p || p > xdr->end);
461 len = (char *)p - (char *)xdr->p;
462 xdr->p = p;
463 buf->len += len;
464 iov->iov_len += len;
465 }
1da177e4 466}
468039ee 467EXPORT_SYMBOL_GPL(xdr_init_encode);
1da177e4
LT
468
469/**
470 * xdr_reserve_space - Reserve buffer space for sending
471 * @xdr: pointer to xdr_stream
472 * @nbytes: number of bytes to reserve
473 *
474 * Checks that we have enough buffer space to encode 'nbytes' more
475 * bytes of data. If so, update the total xdr_buf length, and
476 * adjust the length of the current kvec.
477 */
d8ed029d 478__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
1da177e4 479{
d8ed029d
AD
480 __be32 *p = xdr->p;
481 __be32 *q;
1da177e4
LT
482
483 /* align nbytes on the next 32-bit boundary */
484 nbytes += 3;
485 nbytes &= ~3;
486 q = p + (nbytes >> 2);
487 if (unlikely(q > xdr->end || q < p))
488 return NULL;
489 xdr->p = q;
490 xdr->iov->iov_len += nbytes;
491 xdr->buf->len += nbytes;
492 return p;
493}
468039ee 494EXPORT_SYMBOL_GPL(xdr_reserve_space);
1da177e4
LT
495
496/**
497 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
498 * @xdr: pointer to xdr_stream
499 * @pages: list of pages
500 * @base: offset of first byte
501 * @len: length of data in bytes
502 *
503 */
504void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
505 unsigned int len)
506{
507 struct xdr_buf *buf = xdr->buf;
508 struct kvec *iov = buf->tail;
509 buf->pages = pages;
510 buf->page_base = base;
511 buf->page_len = len;
512
513 iov->iov_base = (char *)xdr->p;
514 iov->iov_len = 0;
515 xdr->iov = iov;
516
517 if (len & 3) {
518 unsigned int pad = 4 - (len & 3);
519
520 BUG_ON(xdr->p >= xdr->end);
521 iov->iov_base = (char *)xdr->p + (len & 3);
522 iov->iov_len += pad;
523 len += pad;
524 *xdr->p++ = 0;
525 }
526 buf->buflen += len;
527 buf->len += len;
528}
468039ee 529EXPORT_SYMBOL_GPL(xdr_write_pages);
1da177e4
LT
530
531/**
532 * xdr_init_decode - Initialize an xdr_stream for decoding data.
533 * @xdr: pointer to xdr_stream struct
534 * @buf: pointer to XDR buffer from which to decode data
535 * @p: current pointer inside XDR buffer
536 */
d8ed029d 537void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
1da177e4
LT
538{
539 struct kvec *iov = buf->head;
540 unsigned int len = iov->iov_len;
541
542 if (len > buf->len)
543 len = buf->len;
544 xdr->buf = buf;
545 xdr->iov = iov;
546 xdr->p = p;
d8ed029d 547 xdr->end = (__be32 *)((char *)iov->iov_base + len);
1da177e4 548}
468039ee 549EXPORT_SYMBOL_GPL(xdr_init_decode);
1da177e4
LT
550
551/**
552 * xdr_inline_decode - Retrieve non-page XDR data to decode
553 * @xdr: pointer to xdr_stream struct
554 * @nbytes: number of bytes of data to decode
555 *
556 * Check if the input buffer is long enough to enable us to decode
557 * 'nbytes' more bytes of data starting at the current position.
558 * If so return the current pointer, then update the current
559 * pointer position.
560 */
d8ed029d 561__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1da177e4 562{
d8ed029d
AD
563 __be32 *p = xdr->p;
564 __be32 *q = p + XDR_QUADLEN(nbytes);
1da177e4
LT
565
566 if (unlikely(q > xdr->end || q < p))
567 return NULL;
568 xdr->p = q;
569 return p;
570}
468039ee 571EXPORT_SYMBOL_GPL(xdr_inline_decode);
1da177e4
LT
572
573/**
574 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
575 * @xdr: pointer to xdr_stream struct
576 * @len: number of bytes of page data
577 *
578 * Moves data beyond the current pointer position from the XDR head[] buffer
579 * into the page list. Any data that lies beyond current position + "len"
8b23ea7b 580 * bytes is moved into the XDR tail[].
1da177e4
LT
581 */
582void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
583{
584 struct xdr_buf *buf = xdr->buf;
585 struct kvec *iov;
586 ssize_t shift;
587 unsigned int end;
588 int padding;
589
590 /* Realign pages to current pointer position */
591 iov = buf->head;
592 shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
593 if (shift > 0)
594 xdr_shrink_bufhead(buf, shift);
595
596 /* Truncate page data and move it into the tail */
597 if (buf->page_len > len)
598 xdr_shrink_pagelen(buf, buf->page_len - len);
599 padding = (XDR_QUADLEN(len) << 2) - len;
600 xdr->iov = iov = buf->tail;
601 /* Compute remaining message length. */
602 end = iov->iov_len;
603 shift = buf->buflen - buf->len;
604 if (shift < end)
605 end -= shift;
606 else if (shift > 0)
607 end = 0;
608 /*
609 * Position current pointer at beginning of tail, and
610 * set remaining message length.
611 */
d8ed029d
AD
612 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
613 xdr->end = (__be32 *)((char *)iov->iov_base + end);
1da177e4 614}
468039ee 615EXPORT_SYMBOL_GPL(xdr_read_pages);
1da177e4 616
8b23ea7b
TM
617/**
618 * xdr_enter_page - decode data from the XDR page
619 * @xdr: pointer to xdr_stream struct
620 * @len: number of bytes of page data
621 *
622 * Moves data beyond the current pointer position from the XDR head[] buffer
623 * into the page list. Any data that lies beyond current position + "len"
624 * bytes is moved into the XDR tail[]. The current pointer is then
625 * repositioned at the beginning of the first XDR page.
626 */
627void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
628{
629 char * kaddr = page_address(xdr->buf->pages[0]);
630 xdr_read_pages(xdr, len);
631 /*
632 * Position current pointer at beginning of tail, and
633 * set remaining message length.
634 */
635 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
636 len = PAGE_CACHE_SIZE - xdr->buf->page_base;
d8ed029d
AD
637 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
638 xdr->end = (__be32 *)((char *)xdr->p + len);
8b23ea7b 639}
468039ee 640EXPORT_SYMBOL_GPL(xdr_enter_page);
8b23ea7b 641
1da177e4
LT
642static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
643
644void
645xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
646{
647 buf->head[0] = *iov;
648 buf->tail[0] = empty_iov;
649 buf->page_len = 0;
650 buf->buflen = buf->len = iov->iov_len;
651}
468039ee 652EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1da177e4 653
1da177e4
LT
654/* Sets subbuf to the portion of buf of length len beginning base bytes
655 * from the start of buf. Returns -1 if base of length are out of bounds. */
656int
657xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1e78957e 658 unsigned int base, unsigned int len)
1da177e4 659{
1da177e4 660 subbuf->buflen = subbuf->len = len;
1e78957e
TM
661 if (base < buf->head[0].iov_len) {
662 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
663 subbuf->head[0].iov_len = min_t(unsigned int, len,
664 buf->head[0].iov_len - base);
665 len -= subbuf->head[0].iov_len;
666 base = 0;
667 } else {
668 subbuf->head[0].iov_base = NULL;
669 subbuf->head[0].iov_len = 0;
670 base -= buf->head[0].iov_len;
671 }
1da177e4
LT
672
673 if (base < buf->page_len) {
1e78957e
TM
674 subbuf->page_len = min(buf->page_len - base, len);
675 base += buf->page_base;
676 subbuf->page_base = base & ~PAGE_CACHE_MASK;
677 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
1da177e4
LT
678 len -= subbuf->page_len;
679 base = 0;
680 } else {
681 base -= buf->page_len;
682 subbuf->page_len = 0;
683 }
684
1e78957e
TM
685 if (base < buf->tail[0].iov_len) {
686 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
687 subbuf->tail[0].iov_len = min_t(unsigned int, len,
688 buf->tail[0].iov_len - base);
689 len -= subbuf->tail[0].iov_len;
690 base = 0;
691 } else {
692 subbuf->tail[0].iov_base = NULL;
693 subbuf->tail[0].iov_len = 0;
694 base -= buf->tail[0].iov_len;
695 }
696
1da177e4
LT
697 if (base || len)
698 return -1;
699 return 0;
700}
468039ee 701EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1da177e4 702
4e3e43ad 703static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1da177e4 704{
1e78957e 705 unsigned int this_len;
1da177e4 706
4e3e43ad
TM
707 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
708 memcpy(obj, subbuf->head[0].iov_base, this_len);
1da177e4
LT
709 len -= this_len;
710 obj += this_len;
4e3e43ad 711 this_len = min_t(unsigned int, len, subbuf->page_len);
1da177e4 712 if (this_len)
4e3e43ad 713 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1da177e4
LT
714 len -= this_len;
715 obj += this_len;
4e3e43ad
TM
716 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
717 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1da177e4
LT
718}
719
bd8100e7 720/* obj is assumed to point to allocated memory of size at least len: */
4e3e43ad 721int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
bd8100e7
AG
722{
723 struct xdr_buf subbuf;
bd8100e7
AG
724 int status;
725
726 status = xdr_buf_subsegment(buf, &subbuf, base, len);
4e3e43ad
TM
727 if (status != 0)
728 return status;
729 __read_bytes_from_xdr_buf(&subbuf, obj, len);
730 return 0;
731}
468039ee 732EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
4e3e43ad
TM
733
734static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
735{
736 unsigned int this_len;
737
738 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
739 memcpy(subbuf->head[0].iov_base, obj, this_len);
bd8100e7
AG
740 len -= this_len;
741 obj += this_len;
4e3e43ad 742 this_len = min_t(unsigned int, len, subbuf->page_len);
bd8100e7 743 if (this_len)
4e3e43ad 744 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
bd8100e7
AG
745 len -= this_len;
746 obj += this_len;
4e3e43ad
TM
747 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
748 memcpy(subbuf->tail[0].iov_base, obj, this_len);
749}
750
751/* obj is assumed to point to allocated memory of size at least len: */
752int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
753{
754 struct xdr_buf subbuf;
755 int status;
756
757 status = xdr_buf_subsegment(buf, &subbuf, base, len);
758 if (status != 0)
759 return status;
760 __write_bytes_to_xdr_buf(&subbuf, obj, len);
761 return 0;
bd8100e7 762}
c43abaed 763EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
bd8100e7
AG
764
765int
1e78957e 766xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1da177e4 767{
d8ed029d 768 __be32 raw;
1da177e4
LT
769 int status;
770
771 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
772 if (status)
773 return status;
98866b5a 774 *obj = be32_to_cpu(raw);
1da177e4
LT
775 return 0;
776}
468039ee 777EXPORT_SYMBOL_GPL(xdr_decode_word);
1da177e4 778
bd8100e7 779int
1e78957e 780xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
bd8100e7 781{
9f162d2a 782 __be32 raw = cpu_to_be32(obj);
bd8100e7
AG
783
784 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
785}
468039ee 786EXPORT_SYMBOL_GPL(xdr_encode_word);
bd8100e7 787
1da177e4
LT
788/* If the netobj starting offset bytes from the start of xdr_buf is contained
789 * entirely in the head or the tail, set object to point to it; otherwise
790 * try to find space for it at the end of the tail, copy it there, and
791 * set obj to point to it. */
bee57c99 792int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
1da177e4 793{
bee57c99 794 struct xdr_buf subbuf;
1da177e4 795
bd8100e7 796 if (xdr_decode_word(buf, offset, &obj->len))
bee57c99
TM
797 return -EFAULT;
798 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
799 return -EFAULT;
800
801 /* Is the obj contained entirely in the head? */
802 obj->data = subbuf.head[0].iov_base;
803 if (subbuf.head[0].iov_len == obj->len)
804 return 0;
805 /* ..or is the obj contained entirely in the tail? */
806 obj->data = subbuf.tail[0].iov_base;
807 if (subbuf.tail[0].iov_len == obj->len)
808 return 0;
809
810 /* use end of tail as storage for obj:
811 * (We don't copy to the beginning because then we'd have
812 * to worry about doing a potentially overlapping copy.
813 * This assumes the object is at most half the length of the
814 * tail.) */
815 if (obj->len > buf->buflen - buf->len)
816 return -ENOMEM;
817 if (buf->tail[0].iov_len != 0)
818 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
819 else
820 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
821 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
1da177e4 822 return 0;
1da177e4 823}
468039ee 824EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
bd8100e7
AG
825
826/* Returns 0 on success, or else a negative error code. */
827static int
828xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
829 struct xdr_array2_desc *desc, int encode)
830{
831 char *elem = NULL, *c;
832 unsigned int copied = 0, todo, avail_here;
833 struct page **ppages = NULL;
834 int err;
835
836 if (encode) {
837 if (xdr_encode_word(buf, base, desc->array_len) != 0)
838 return -EINVAL;
839 } else {
840 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
58fcb8df 841 desc->array_len > desc->array_maxlen ||
bd8100e7
AG
842 (unsigned long) base + 4 + desc->array_len *
843 desc->elem_size > buf->len)
844 return -EINVAL;
845 }
846 base += 4;
847
848 if (!desc->xcode)
849 return 0;
850
851 todo = desc->array_len * desc->elem_size;
852
853 /* process head */
854 if (todo && base < buf->head->iov_len) {
855 c = buf->head->iov_base + base;
856 avail_here = min_t(unsigned int, todo,
857 buf->head->iov_len - base);
858 todo -= avail_here;
859
860 while (avail_here >= desc->elem_size) {
861 err = desc->xcode(desc, c);
862 if (err)
863 goto out;
864 c += desc->elem_size;
865 avail_here -= desc->elem_size;
866 }
867 if (avail_here) {
868 if (!elem) {
869 elem = kmalloc(desc->elem_size, GFP_KERNEL);
870 err = -ENOMEM;
871 if (!elem)
872 goto out;
873 }
874 if (encode) {
875 err = desc->xcode(desc, elem);
876 if (err)
877 goto out;
878 memcpy(c, elem, avail_here);
879 } else
880 memcpy(elem, c, avail_here);
881 copied = avail_here;
882 }
883 base = buf->head->iov_len; /* align to start of pages */
884 }
885
886 /* process pages array */
887 base -= buf->head->iov_len;
888 if (todo && base < buf->page_len) {
889 unsigned int avail_page;
890
891 avail_here = min(todo, buf->page_len - base);
892 todo -= avail_here;
893
894 base += buf->page_base;
895 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
896 base &= ~PAGE_CACHE_MASK;
897 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
898 avail_here);
899 c = kmap(*ppages) + base;
900
901 while (avail_here) {
902 avail_here -= avail_page;
903 if (copied || avail_page < desc->elem_size) {
904 unsigned int l = min(avail_page,
905 desc->elem_size - copied);
906 if (!elem) {
907 elem = kmalloc(desc->elem_size,
908 GFP_KERNEL);
909 err = -ENOMEM;
910 if (!elem)
911 goto out;
912 }
913 if (encode) {
914 if (!copied) {
915 err = desc->xcode(desc, elem);
916 if (err)
917 goto out;
918 }
919 memcpy(c, elem + copied, l);
920 copied += l;
921 if (copied == desc->elem_size)
922 copied = 0;
923 } else {
924 memcpy(elem + copied, c, l);
925 copied += l;
926 if (copied == desc->elem_size) {
927 err = desc->xcode(desc, elem);
928 if (err)
929 goto out;
930 copied = 0;
931 }
932 }
933 avail_page -= l;
934 c += l;
935 }
936 while (avail_page >= desc->elem_size) {
937 err = desc->xcode(desc, c);
938 if (err)
939 goto out;
940 c += desc->elem_size;
941 avail_page -= desc->elem_size;
942 }
943 if (avail_page) {
944 unsigned int l = min(avail_page,
945 desc->elem_size - copied);
946 if (!elem) {
947 elem = kmalloc(desc->elem_size,
948 GFP_KERNEL);
949 err = -ENOMEM;
950 if (!elem)
951 goto out;
952 }
953 if (encode) {
954 if (!copied) {
955 err = desc->xcode(desc, elem);
956 if (err)
957 goto out;
958 }
959 memcpy(c, elem + copied, l);
960 copied += l;
961 if (copied == desc->elem_size)
962 copied = 0;
963 } else {
964 memcpy(elem + copied, c, l);
965 copied += l;
966 if (copied == desc->elem_size) {
967 err = desc->xcode(desc, elem);
968 if (err)
969 goto out;
970 copied = 0;
971 }
972 }
973 }
974 if (avail_here) {
975 kunmap(*ppages);
976 ppages++;
977 c = kmap(*ppages);
978 }
979
980 avail_page = min(avail_here,
981 (unsigned int) PAGE_CACHE_SIZE);
982 }
983 base = buf->page_len; /* align to start of tail */
984 }
985
986 /* process tail */
987 base -= buf->page_len;
988 if (todo) {
989 c = buf->tail->iov_base + base;
990 if (copied) {
991 unsigned int l = desc->elem_size - copied;
992
993 if (encode)
994 memcpy(c, elem + copied, l);
995 else {
996 memcpy(elem + copied, c, l);
997 err = desc->xcode(desc, elem);
998 if (err)
999 goto out;
1000 }
1001 todo -= l;
1002 c += l;
1003 }
1004 while (todo) {
1005 err = desc->xcode(desc, c);
1006 if (err)
1007 goto out;
1008 c += desc->elem_size;
1009 todo -= desc->elem_size;
1010 }
1011 }
1012 err = 0;
1013
1014out:
a51482bd 1015 kfree(elem);
bd8100e7
AG
1016 if (ppages)
1017 kunmap(*ppages);
1018 return err;
1019}
1020
1021int
1022xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1023 struct xdr_array2_desc *desc)
1024{
1025 if (base >= buf->len)
1026 return -EINVAL;
1027
1028 return xdr_xcode_array2(buf, base, desc, 0);
1029}
468039ee 1030EXPORT_SYMBOL_GPL(xdr_decode_array2);
bd8100e7
AG
1031
1032int
1033xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1034 struct xdr_array2_desc *desc)
1035{
1036 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1037 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1038 return -EINVAL;
1039
1040 return xdr_xcode_array2(buf, base, desc, 1);
1041}
468039ee 1042EXPORT_SYMBOL_GPL(xdr_encode_array2);
37a4e6cb
OK
1043
1044int
1045xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
cca5172a 1046 int (*actor)(struct scatterlist *, void *), void *data)
37a4e6cb
OK
1047{
1048 int i, ret = 0;
1049 unsigned page_len, thislen, page_offset;
1050 struct scatterlist sg[1];
1051
68e3f5dd
HX
1052 sg_init_table(sg, 1);
1053
37a4e6cb
OK
1054 if (offset >= buf->head[0].iov_len) {
1055 offset -= buf->head[0].iov_len;
1056 } else {
1057 thislen = buf->head[0].iov_len - offset;
1058 if (thislen > len)
1059 thislen = len;
1060 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1061 ret = actor(sg, data);
1062 if (ret)
1063 goto out;
1064 offset = 0;
1065 len -= thislen;
1066 }
1067 if (len == 0)
1068 goto out;
1069
1070 if (offset >= buf->page_len) {
1071 offset -= buf->page_len;
1072 } else {
1073 page_len = buf->page_len - offset;
1074 if (page_len > len)
1075 page_len = len;
1076 len -= page_len;
1077 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1078 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1079 thislen = PAGE_CACHE_SIZE - page_offset;
1080 do {
1081 if (thislen > page_len)
1082 thislen = page_len;
642f1490 1083 sg_set_page(sg, buf->pages[i], thislen, page_offset);
37a4e6cb
OK
1084 ret = actor(sg, data);
1085 if (ret)
1086 goto out;
1087 page_len -= thislen;
1088 i++;
1089 page_offset = 0;
1090 thislen = PAGE_CACHE_SIZE;
1091 } while (page_len != 0);
1092 offset = 0;
1093 }
1094 if (len == 0)
1095 goto out;
1096 if (offset < buf->tail[0].iov_len) {
1097 thislen = buf->tail[0].iov_len - offset;
1098 if (thislen > len)
1099 thislen = len;
1100 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1101 ret = actor(sg, data);
1102 len -= thislen;
1103 }
1104 if (len != 0)
1105 ret = -EINVAL;
1106out:
1107 return ret;
1108}
468039ee 1109EXPORT_SYMBOL_GPL(xdr_process_buf);
37a4e6cb 1110