NVMe: Only release requested regions
[linux-2.6-block.git] / include / linux / iova.h
CommitLineData
f8de50eb
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
98bcef56 6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
f8de50eb
KA
8 *
9 */
10
11#ifndef _IOVA_H_
12#define _IOVA_H_
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/rbtree.h>
17#include <linux/dma-mapping.h>
18
f8de50eb
KA
19/* iova structure */
20struct iova {
21 struct rb_node node;
9257b4a2
OP
22 unsigned long pfn_hi; /* Highest allocated pfn */
23 unsigned long pfn_lo; /* Lowest allocated pfn */
24};
25
26struct iova_magazine;
27struct iova_cpu_rcache;
28
29#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
30#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
31
32struct iova_rcache {
33 spinlock_t lock;
34 unsigned long depot_size;
35 struct iova_magazine *depot[MAX_GLOBAL_MAGS];
36 struct iova_cpu_rcache __percpu *cpu_rcaches;
f8de50eb
KA
37};
38
39/* holds all the iova translations for a domain */
40struct iova_domain {
f8de50eb
KA
41 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
42 struct rb_root rbroot; /* iova domain rbtree root */
43 struct rb_node *cached32_node; /* Save last alloced node */
0fb5fe87 44 unsigned long granule; /* pfn granularity for this domain */
1b722500 45 unsigned long start_pfn; /* Lower limit for this domain */
f661197e 46 unsigned long dma_32bit_pfn;
9257b4a2 47 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
f8de50eb
KA
48};
49
a156ef99
JL
50static inline unsigned long iova_size(struct iova *iova)
51{
52 return iova->pfn_hi - iova->pfn_lo + 1;
53}
54
0fb5fe87
RM
55static inline unsigned long iova_shift(struct iova_domain *iovad)
56{
57 return __ffs(iovad->granule);
58}
59
60static inline unsigned long iova_mask(struct iova_domain *iovad)
61{
62 return iovad->granule - 1;
63}
64
65static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
66{
67 return iova & iova_mask(iovad);
68}
69
70static inline size_t iova_align(struct iova_domain *iovad, size_t size)
71{
72 return ALIGN(size, iovad->granule);
73}
74
75static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
76{
77 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
78}
79
80static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
81{
82 return iova >> iova_shift(iovad);
83}
84
ae1ff3d6
SA
85int iova_cache_get(void);
86void iova_cache_put(void);
85b45456 87
f8de50eb
KA
88struct iova *alloc_iova_mem(void);
89void free_iova_mem(struct iova *iova);
90void free_iova(struct iova_domain *iovad, unsigned long pfn);
91void __free_iova(struct iova_domain *iovad, struct iova *iova);
92struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
f76aec76
KA
93 unsigned long limit_pfn,
94 bool size_aligned);
9257b4a2
OP
95void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
96 unsigned long size);
97unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
98 unsigned long limit_pfn);
f8de50eb
KA
99struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
100 unsigned long pfn_hi);
101void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
0fb5fe87
RM
102void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
103 unsigned long start_pfn, unsigned long pfn_32bit);
f8de50eb
KA
104struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
105void put_iova_domain(struct iova_domain *iovad);
75f05569
JL
106struct iova *split_and_remove_iova(struct iova_domain *iovad,
107 struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
9257b4a2 108void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
f8de50eb
KA
109
110#endif