Merge tag 'pinctrl-v4.6-4' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6-block.git] / drivers / hv / ring_buffer.c
1 /*
2  *
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Haiyang Zhang <haiyangz@microsoft.com>
20  *   Hank Janssen  <hjanssen@microsoft.com>
21  *   K. Y. Srinivasan <kys@microsoft.com>
22  *
23  */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30
31 #include "hyperv_vmbus.h"
32
33 void hv_begin_read(struct hv_ring_buffer_info *rbi)
34 {
35         rbi->ring_buffer->interrupt_mask = 1;
36         mb();
37 }
38
39 u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40 {
41         u32 read;
42         u32 write;
43
44         rbi->ring_buffer->interrupt_mask = 0;
45         mb();
46
47         /*
48          * Now check to see if the ring buffer is still empty.
49          * If it is not, we raced and we need to process new
50          * incoming messages.
51          */
52         hv_get_ringbuffer_availbytes(rbi, &read, &write);
53
54         return read;
55 }
56
57 /*
58  * When we write to the ring buffer, check if the host needs to
59  * be signaled. Here is the details of this protocol:
60  *
61  *      1. The host guarantees that while it is draining the
62  *         ring buffer, it will set the interrupt_mask to
63  *         indicate it does not need to be interrupted when
64  *         new data is placed.
65  *
66  *      2. The host guarantees that it will completely drain
67  *         the ring buffer before exiting the read loop. Further,
68  *         once the ring buffer is empty, it will clear the
69  *         interrupt_mask and re-check to see if new data has
70  *         arrived.
71  */
72
73 static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
74 {
75         mb();
76         if (rbi->ring_buffer->interrupt_mask)
77                 return false;
78
79         /* check interrupt_mask before read_index */
80         rmb();
81         /*
82          * This is the only case we need to signal when the
83          * ring transitions from being empty to non-empty.
84          */
85         if (old_write == rbi->ring_buffer->read_index)
86                 return true;
87
88         return false;
89 }
90
91 /*
92  * To optimize the flow management on the send-side,
93  * when the sender is blocked because of lack of
94  * sufficient space in the ring buffer, potential the
95  * consumer of the ring buffer can signal the producer.
96  * This is controlled by the following parameters:
97  *
98  * 1. pending_send_sz: This is the size in bytes that the
99  *    producer is trying to send.
100  * 2. The feature bit feat_pending_send_sz set to indicate if
101  *    the consumer of the ring will signal when the ring
102  *    state transitions from being full to a state where
103  *    there is room for the producer to send the pending packet.
104  */
105
106 static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
107 {
108         u32 cur_write_sz;
109         u32 r_size;
110         u32 write_loc;
111         u32 read_loc = rbi->ring_buffer->read_index;
112         u32 pending_sz;
113
114         /*
115          * Issue a full memory barrier before making the signaling decision.
116          * Here is the reason for having this barrier:
117          * If the reading of the pend_sz (in this function)
118          * were to be reordered and read before we commit the new read
119          * index (in the calling function)  we could
120          * have a problem. If the host were to set the pending_sz after we
121          * have sampled pending_sz and go to sleep before we commit the
122          * read index, we could miss sending the interrupt. Issue a full
123          * memory barrier to address this.
124          */
125         mb();
126
127         pending_sz = rbi->ring_buffer->pending_send_sz;
128         write_loc = rbi->ring_buffer->write_index;
129         /* If the other end is not blocked on write don't bother. */
130         if (pending_sz == 0)
131                 return false;
132
133         r_size = rbi->ring_datasize;
134         cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
135                         read_loc - write_loc;
136
137         if (cur_write_sz >= pending_sz)
138                 return true;
139
140         return false;
141 }
142
143 /* Get the next write location for the specified ring buffer. */
144 static inline u32
145 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
146 {
147         u32 next = ring_info->ring_buffer->write_index;
148
149         return next;
150 }
151
152 /* Set the next write location for the specified ring buffer. */
153 static inline void
154 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
155                      u32 next_write_location)
156 {
157         ring_info->ring_buffer->write_index = next_write_location;
158 }
159
160 /* Get the next read location for the specified ring buffer. */
161 static inline u32
162 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
163 {
164         u32 next = ring_info->ring_buffer->read_index;
165
166         return next;
167 }
168
169 /*
170  * Get the next read location + offset for the specified ring buffer.
171  * This allows the caller to skip.
172  */
173 static inline u32
174 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
175                                  u32 offset)
176 {
177         u32 next = ring_info->ring_buffer->read_index;
178
179         next += offset;
180         next %= ring_info->ring_datasize;
181
182         return next;
183 }
184
185 /* Set the next read location for the specified ring buffer. */
186 static inline void
187 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
188                     u32 next_read_location)
189 {
190         ring_info->ring_buffer->read_index = next_read_location;
191 }
192
193
194 /* Get the start of the ring buffer. */
195 static inline void *
196 hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
197 {
198         return (void *)ring_info->ring_buffer->buffer;
199 }
200
201
202 /* Get the size of the ring buffer. */
203 static inline u32
204 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
205 {
206         return ring_info->ring_datasize;
207 }
208
209 /* Get the read and write indices as u64 of the specified ring buffer. */
210 static inline u64
211 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
212 {
213         return (u64)ring_info->ring_buffer->write_index << 32;
214 }
215
216 /*
217  * Helper routine to copy to source from ring buffer.
218  * Assume there is enough room. Handles wrap-around in src case only!!
219  */
220 static u32 hv_copyfrom_ringbuffer(
221         struct hv_ring_buffer_info      *ring_info,
222         void                            *dest,
223         u32                             destlen,
224         u32                             start_read_offset)
225 {
226         void *ring_buffer = hv_get_ring_buffer(ring_info);
227         u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
228
229         u32 frag_len;
230
231         /* wrap-around detected at the src */
232         if (destlen > ring_buffer_size - start_read_offset) {
233                 frag_len = ring_buffer_size - start_read_offset;
234
235                 memcpy(dest, ring_buffer + start_read_offset, frag_len);
236                 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
237         } else
238
239                 memcpy(dest, ring_buffer + start_read_offset, destlen);
240
241
242         start_read_offset += destlen;
243         start_read_offset %= ring_buffer_size;
244
245         return start_read_offset;
246 }
247
248
249 /*
250  * Helper routine to copy from source to ring buffer.
251  * Assume there is enough room. Handles wrap-around in dest case only!!
252  */
253 static u32 hv_copyto_ringbuffer(
254         struct hv_ring_buffer_info      *ring_info,
255         u32                             start_write_offset,
256         void                            *src,
257         u32                             srclen)
258 {
259         void *ring_buffer = hv_get_ring_buffer(ring_info);
260         u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
261         u32 frag_len;
262
263         /* wrap-around detected! */
264         if (srclen > ring_buffer_size - start_write_offset) {
265                 frag_len = ring_buffer_size - start_write_offset;
266                 memcpy(ring_buffer + start_write_offset, src, frag_len);
267                 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
268         } else
269                 memcpy(ring_buffer + start_write_offset, src, srclen);
270
271         start_write_offset += srclen;
272         start_write_offset %= ring_buffer_size;
273
274         return start_write_offset;
275 }
276
277 /* Get various debug metrics for the specified ring buffer. */
278 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
279                             struct hv_ring_buffer_debug_info *debug_info)
280 {
281         u32 bytes_avail_towrite;
282         u32 bytes_avail_toread;
283
284         if (ring_info->ring_buffer) {
285                 hv_get_ringbuffer_availbytes(ring_info,
286                                         &bytes_avail_toread,
287                                         &bytes_avail_towrite);
288
289                 debug_info->bytes_avail_toread = bytes_avail_toread;
290                 debug_info->bytes_avail_towrite = bytes_avail_towrite;
291                 debug_info->current_read_index =
292                         ring_info->ring_buffer->read_index;
293                 debug_info->current_write_index =
294                         ring_info->ring_buffer->write_index;
295                 debug_info->current_interrupt_mask =
296                         ring_info->ring_buffer->interrupt_mask;
297         }
298 }
299
300 /* Initialize the ring buffer. */
301 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
302                    void *buffer, u32 buflen)
303 {
304         if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
305                 return -EINVAL;
306
307         memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
308
309         ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
310         ring_info->ring_buffer->read_index =
311                 ring_info->ring_buffer->write_index = 0;
312
313         /* Set the feature bit for enabling flow control. */
314         ring_info->ring_buffer->feature_bits.value = 1;
315
316         ring_info->ring_size = buflen;
317         ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
318
319         spin_lock_init(&ring_info->ring_lock);
320
321         return 0;
322 }
323
324 /* Cleanup the ring buffer. */
325 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
326 {
327 }
328
329 /* Write to the ring buffer. */
330 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
331                     struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
332 {
333         int i = 0;
334         u32 bytes_avail_towrite;
335         u32 bytes_avail_toread;
336         u32 totalbytes_towrite = 0;
337
338         u32 next_write_location;
339         u32 old_write;
340         u64 prev_indices = 0;
341         unsigned long flags = 0;
342
343         for (i = 0; i < kv_count; i++)
344                 totalbytes_towrite += kv_list[i].iov_len;
345
346         totalbytes_towrite += sizeof(u64);
347
348         if (lock)
349                 spin_lock_irqsave(&outring_info->ring_lock, flags);
350
351         hv_get_ringbuffer_availbytes(outring_info,
352                                 &bytes_avail_toread,
353                                 &bytes_avail_towrite);
354
355         /*
356          * If there is only room for the packet, assume it is full.
357          * Otherwise, the next time around, we think the ring buffer
358          * is empty since the read index == write index.
359          */
360         if (bytes_avail_towrite <= totalbytes_towrite) {
361                 if (lock)
362                         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
363                 return -EAGAIN;
364         }
365
366         /* Write to the ring buffer */
367         next_write_location = hv_get_next_write_location(outring_info);
368
369         old_write = next_write_location;
370
371         for (i = 0; i < kv_count; i++) {
372                 next_write_location = hv_copyto_ringbuffer(outring_info,
373                                                      next_write_location,
374                                                      kv_list[i].iov_base,
375                                                      kv_list[i].iov_len);
376         }
377
378         /* Set previous packet start */
379         prev_indices = hv_get_ring_bufferindices(outring_info);
380
381         next_write_location = hv_copyto_ringbuffer(outring_info,
382                                              next_write_location,
383                                              &prev_indices,
384                                              sizeof(u64));
385
386         /* Issue a full memory barrier before updating the write index */
387         mb();
388
389         /* Now, update the write location */
390         hv_set_next_write_location(outring_info, next_write_location);
391
392
393         if (lock)
394                 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
395
396         *signal = hv_need_to_signal(old_write, outring_info);
397         return 0;
398 }
399
400 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
401                        void *buffer, u32 buflen, u32 *buffer_actual_len,
402                        u64 *requestid, bool *signal, bool raw)
403 {
404         u32 bytes_avail_towrite;
405         u32 bytes_avail_toread;
406         u32 next_read_location = 0;
407         u64 prev_indices = 0;
408         struct vmpacket_descriptor desc;
409         u32 offset;
410         u32 packetlen;
411         int ret = 0;
412
413         if (buflen <= 0)
414                 return -EINVAL;
415
416
417         *buffer_actual_len = 0;
418         *requestid = 0;
419
420         hv_get_ringbuffer_availbytes(inring_info,
421                                 &bytes_avail_toread,
422                                 &bytes_avail_towrite);
423
424         /* Make sure there is something to read */
425         if (bytes_avail_toread < sizeof(desc)) {
426                 /*
427                  * No error is set when there is even no header, drivers are
428                  * supposed to analyze buffer_actual_len.
429                  */
430                 return ret;
431         }
432
433         next_read_location = hv_get_next_read_location(inring_info);
434         next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
435                                                     sizeof(desc),
436                                                     next_read_location);
437
438         offset = raw ? 0 : (desc.offset8 << 3);
439         packetlen = (desc.len8 << 3) - offset;
440         *buffer_actual_len = packetlen;
441         *requestid = desc.trans_id;
442
443         if (bytes_avail_toread < packetlen + offset)
444                 return -EAGAIN;
445
446         if (packetlen > buflen)
447                 return -ENOBUFS;
448
449         next_read_location =
450                 hv_get_next_readlocation_withoffset(inring_info, offset);
451
452         next_read_location = hv_copyfrom_ringbuffer(inring_info,
453                                                 buffer,
454                                                 packetlen,
455                                                 next_read_location);
456
457         next_read_location = hv_copyfrom_ringbuffer(inring_info,
458                                                 &prev_indices,
459                                                 sizeof(u64),
460                                                 next_read_location);
461
462         /*
463          * Make sure all reads are done before we update the read index since
464          * the writer may start writing to the read area once the read index
465          * is updated.
466          */
467         mb();
468
469         /* Update the read index */
470         hv_set_next_read_location(inring_info, next_read_location);
471
472         *signal = hv_need_to_signal_on_read(inring_info);
473
474         return ret;
475 }