2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/rculist.h>
42 * This is stored as mr->r_trans_private.
45 struct rds_ib_device *device;
46 struct rds_ib_mr_pool *pool;
48 struct list_head list;
49 unsigned int remap_count;
51 struct scatterlist *sg;
58 * Our own little FMR pool
60 struct rds_ib_mr_pool {
61 struct mutex flush_lock; /* serialize fmr invalidate */
62 struct work_struct flush_worker; /* flush worker */
64 spinlock_t list_lock; /* protect variables below */
65 atomic_t item_count; /* total # of MRs */
66 atomic_t dirty_count; /* # dirty of MRs */
67 struct list_head drop_list; /* MRs that have reached their max_maps limit */
68 struct list_head free_list; /* unused MRs */
69 struct list_head clean_list; /* unused & unamapped MRs */
70 atomic_t free_pinned; /* memory pinned by free MRs */
71 unsigned long max_items;
72 unsigned long max_items_soft;
73 unsigned long max_free_pinned;
74 struct ib_fmr_attr fmr_attr;
77 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all);
78 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
79 static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
81 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
83 struct rds_ib_device *rds_ibdev;
84 struct rds_ib_ipaddr *i_ipaddr;
86 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
88 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
89 if (i_ipaddr->ipaddr == ipaddr) {
90 atomic_inc(&rds_ibdev->refcount);
101 static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
103 struct rds_ib_ipaddr *i_ipaddr;
105 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
109 i_ipaddr->ipaddr = ipaddr;
111 spin_lock_irq(&rds_ibdev->spinlock);
112 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
113 spin_unlock_irq(&rds_ibdev->spinlock);
118 static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
120 struct rds_ib_ipaddr *i_ipaddr;
121 struct rds_ib_ipaddr *to_free = NULL;
124 spin_lock_irq(&rds_ibdev->spinlock);
125 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
126 if (i_ipaddr->ipaddr == ipaddr) {
127 list_del_rcu(&i_ipaddr->list);
132 spin_unlock_irq(&rds_ibdev->spinlock);
140 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
142 struct rds_ib_device *rds_ibdev_old;
144 rds_ibdev_old = rds_ib_get_device(ipaddr);
146 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
147 rds_ib_dev_put(rds_ibdev_old);
150 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
153 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
155 struct rds_ib_connection *ic = conn->c_transport_data;
157 /* conn was previously on the nodev_conns_list */
158 spin_lock_irq(&ib_nodev_conns_lock);
159 BUG_ON(list_empty(&ib_nodev_conns));
160 BUG_ON(list_empty(&ic->ib_node));
161 list_del(&ic->ib_node);
163 spin_lock_irq(&rds_ibdev->spinlock);
164 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
165 spin_unlock_irq(&rds_ibdev->spinlock);
166 spin_unlock_irq(&ib_nodev_conns_lock);
168 ic->rds_ibdev = rds_ibdev;
169 atomic_inc(&rds_ibdev->refcount);
172 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
174 struct rds_ib_connection *ic = conn->c_transport_data;
176 /* place conn on nodev_conns_list */
177 spin_lock(&ib_nodev_conns_lock);
179 spin_lock_irq(&rds_ibdev->spinlock);
180 BUG_ON(list_empty(&ic->ib_node));
181 list_del(&ic->ib_node);
182 spin_unlock_irq(&rds_ibdev->spinlock);
184 list_add_tail(&ic->ib_node, &ib_nodev_conns);
186 spin_unlock(&ib_nodev_conns_lock);
188 ic->rds_ibdev = NULL;
189 rds_ib_dev_put(rds_ibdev);
192 void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
194 struct rds_ib_connection *ic, *_ic;
197 /* avoid calling conn_destroy with irqs off */
198 spin_lock_irq(list_lock);
199 list_splice(list, &tmp_list);
200 INIT_LIST_HEAD(list);
201 spin_unlock_irq(list_lock);
203 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
204 rds_conn_destroy(ic->conn);
207 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
209 struct rds_ib_mr_pool *pool;
211 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
213 return ERR_PTR(-ENOMEM);
215 INIT_LIST_HEAD(&pool->free_list);
216 INIT_LIST_HEAD(&pool->drop_list);
217 INIT_LIST_HEAD(&pool->clean_list);
218 mutex_init(&pool->flush_lock);
219 spin_lock_init(&pool->list_lock);
220 INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
222 pool->fmr_attr.max_pages = fmr_message_size;
223 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
224 pool->fmr_attr.page_shift = PAGE_SHIFT;
225 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
227 /* We never allow more than max_items MRs to be allocated.
228 * When we exceed more than max_items_soft, we start freeing
229 * items more aggressively.
230 * Make sure that max_items > max_items_soft > max_items / 2
232 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
233 pool->max_items = rds_ibdev->max_fmrs;
238 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
240 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
242 iinfo->rdma_mr_max = pool->max_items;
243 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
246 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
248 cancel_work_sync(&pool->flush_worker);
249 rds_ib_flush_mr_pool(pool, 1);
250 WARN_ON(atomic_read(&pool->item_count));
251 WARN_ON(atomic_read(&pool->free_pinned));
255 static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
257 struct rds_ib_mr *ibmr = NULL;
260 spin_lock_irqsave(&pool->list_lock, flags);
261 if (!list_empty(&pool->clean_list)) {
262 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list);
263 list_del_init(&ibmr->list);
265 spin_unlock_irqrestore(&pool->list_lock, flags);
270 static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
272 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
273 struct rds_ib_mr *ibmr = NULL;
274 int err = 0, iter = 0;
277 ibmr = rds_ib_reuse_fmr(pool);
281 /* No clean MRs - now we have the choice of either
282 * allocating a fresh MR up to the limit imposed by the
283 * driver, or flush any dirty unused MRs.
284 * We try to avoid stalling in the send path if possible,
285 * so we allocate as long as we're allowed to.
287 * We're fussy with enforcing the FMR limit, though. If the driver
288 * tells us we can't use more than N fmrs, we shouldn't start
290 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
293 atomic_dec(&pool->item_count);
296 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
297 return ERR_PTR(-EAGAIN);
300 /* We do have some empty MRs. Flush them out. */
301 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
302 rds_ib_flush_mr_pool(pool, 0);
305 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
311 memset(ibmr, 0, sizeof(*ibmr));
313 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
314 (IB_ACCESS_LOCAL_WRITE |
315 IB_ACCESS_REMOTE_READ |
316 IB_ACCESS_REMOTE_WRITE|
317 IB_ACCESS_REMOTE_ATOMIC),
320 if (IS_ERR(ibmr->fmr)) {
321 err = PTR_ERR(ibmr->fmr);
323 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
327 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
333 ib_dealloc_fmr(ibmr->fmr);
336 atomic_dec(&pool->item_count);
340 static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
341 struct scatterlist *sg, unsigned int nents)
343 struct ib_device *dev = rds_ibdev->dev;
344 struct scatterlist *scat = sg;
348 int page_cnt, sg_dma_len;
352 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
354 if (unlikely(!sg_dma_len)) {
355 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
362 for (i = 0; i < sg_dma_len; ++i) {
363 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
364 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
366 if (dma_addr & ~PAGE_MASK) {
372 if ((dma_addr + dma_len) & ~PAGE_MASK) {
373 if (i < sg_dma_len - 1)
382 page_cnt += len >> PAGE_SHIFT;
383 if (page_cnt > fmr_message_size)
386 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
387 rdsibdev_to_node(rds_ibdev));
392 for (i = 0; i < sg_dma_len; ++i) {
393 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
394 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
396 for (j = 0; j < dma_len; j += PAGE_SIZE)
397 dma_pages[page_cnt++] =
398 (dma_addr & PAGE_MASK) + j;
401 ret = ib_map_phys_fmr(ibmr->fmr,
402 dma_pages, page_cnt, io_addr);
406 /* Success - we successfully remapped the MR, so we can
407 * safely tear down the old mapping. */
408 rds_ib_teardown_mr(ibmr);
411 ibmr->sg_len = nents;
412 ibmr->sg_dma_len = sg_dma_len;
415 rds_ib_stats_inc(s_ib_rdma_mr_used);
424 void rds_ib_sync_mr(void *trans_private, int direction)
426 struct rds_ib_mr *ibmr = trans_private;
427 struct rds_ib_device *rds_ibdev = ibmr->device;
430 case DMA_FROM_DEVICE:
431 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
432 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
435 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
436 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
441 static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
443 struct rds_ib_device *rds_ibdev = ibmr->device;
445 if (ibmr->sg_dma_len) {
446 ib_dma_unmap_sg(rds_ibdev->dev,
447 ibmr->sg, ibmr->sg_len,
449 ibmr->sg_dma_len = 0;
452 /* Release the s/g list */
456 for (i = 0; i < ibmr->sg_len; ++i) {
457 struct page *page = sg_page(&ibmr->sg[i]);
459 /* FIXME we need a way to tell a r/w MR
461 BUG_ON(irqs_disabled());
462 set_page_dirty(page);
472 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
474 unsigned int pinned = ibmr->sg_len;
476 __rds_ib_teardown_mr(ibmr);
478 struct rds_ib_device *rds_ibdev = ibmr->device;
479 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
481 atomic_sub(pinned, &pool->free_pinned);
485 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
487 unsigned int item_count;
489 item_count = atomic_read(&pool->item_count);
497 * Flush our pool of MRs.
498 * At a minimum, all currently unused MRs are unmapped.
499 * If the number of MRs allocated exceeds the limit, we also try
500 * to free as many MRs as needed to get back to this limit.
502 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
504 struct rds_ib_mr *ibmr, *next;
505 LIST_HEAD(unmap_list);
507 unsigned long unpinned = 0;
509 unsigned int nfreed = 0, ncleaned = 0, free_goal;
512 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
514 mutex_lock(&pool->flush_lock);
516 spin_lock_irqsave(&pool->list_lock, flags);
517 /* Get the list of all MRs to be dropped. Ordering matters -
518 * we want to put drop_list ahead of free_list. */
519 list_splice_init(&pool->free_list, &unmap_list);
520 list_splice_init(&pool->drop_list, &unmap_list);
522 list_splice_init(&pool->clean_list, &unmap_list);
523 spin_unlock_irqrestore(&pool->list_lock, flags);
525 free_goal = rds_ib_flush_goal(pool, free_all);
527 if (list_empty(&unmap_list))
530 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
531 list_for_each_entry(ibmr, &unmap_list, list)
532 list_add(&ibmr->fmr->list, &fmr_list);
533 ret = ib_unmap_fmr(&fmr_list);
535 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
537 /* Now we can destroy the DMA mapping and unpin any pages */
538 list_for_each_entry_safe(ibmr, next, &unmap_list, list) {
539 unpinned += ibmr->sg_len;
540 __rds_ib_teardown_mr(ibmr);
541 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
542 rds_ib_stats_inc(s_ib_rdma_mr_free);
543 list_del(&ibmr->list);
544 ib_dealloc_fmr(ibmr->fmr);
551 spin_lock_irqsave(&pool->list_lock, flags);
552 list_splice(&unmap_list, &pool->clean_list);
553 spin_unlock_irqrestore(&pool->list_lock, flags);
555 atomic_sub(unpinned, &pool->free_pinned);
556 atomic_sub(ncleaned, &pool->dirty_count);
557 atomic_sub(nfreed, &pool->item_count);
560 mutex_unlock(&pool->flush_lock);
564 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
566 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker);
568 rds_ib_flush_mr_pool(pool, 0);
571 void rds_ib_free_mr(void *trans_private, int invalidate)
573 struct rds_ib_mr *ibmr = trans_private;
574 struct rds_ib_device *rds_ibdev = ibmr->device;
575 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
578 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
580 /* Return it to the pool's free list */
581 spin_lock_irqsave(&pool->list_lock, flags);
582 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
583 list_add(&ibmr->list, &pool->drop_list);
585 list_add(&ibmr->list, &pool->free_list);
587 atomic_add(ibmr->sg_len, &pool->free_pinned);
588 atomic_inc(&pool->dirty_count);
589 spin_unlock_irqrestore(&pool->list_lock, flags);
591 /* If we've pinned too many pages, request a flush */
592 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
593 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
594 queue_work(rds_wq, &pool->flush_worker);
597 if (likely(!in_interrupt())) {
598 rds_ib_flush_mr_pool(pool, 0);
600 /* We get here if the user created a MR marked
601 * as use_once and invalidate at the same time. */
602 queue_work(rds_wq, &pool->flush_worker);
606 rds_ib_dev_put(rds_ibdev);
609 void rds_ib_flush_mrs(void)
611 struct rds_ib_device *rds_ibdev;
613 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
614 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
617 rds_ib_flush_mr_pool(pool, 0);
621 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
622 struct rds_sock *rs, u32 *key_ret)
624 struct rds_ib_device *rds_ibdev;
625 struct rds_ib_mr *ibmr = NULL;
628 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
634 if (!rds_ibdev->mr_pool) {
639 ibmr = rds_ib_alloc_fmr(rds_ibdev);
643 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
645 *key_ret = ibmr->fmr->rkey;
647 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
649 ibmr->device = rds_ibdev;
655 rds_ib_free_mr(ibmr, 0);
659 rds_ib_dev_put(rds_ibdev);