Merge tag 'gpio-fixes-for-v6.2-rc5' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / include / target / target_core_fabric.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
c4795fb2
CH
2#ifndef TARGET_CORE_FABRIC_H
3#define TARGET_CORE_FABRIC_H
c66ac9db 4
8dcf07be
BVA
5#include <linux/configfs.h>
6#include <linux/types.h>
7#include <target/target_core_base.h>
8
c66ac9db 9struct target_core_fabric_ops {
9ac8928e 10 struct module *module;
30c7ca93 11 /*
59a206b4
DD
12 * XXX: Special case for iscsi/iSCSI...
13 * If non-null, fabric_alias is used for matching target/$fabric
14 * ConfigFS paths. If null, fabric_name is used for this (see below).
15 */
16 const char *fabric_alias;
17 /*
18 * fabric_name is used for matching target/$fabric ConfigFS paths
19 * without a fabric_alias (see above). It's also used for the ALUA state
20 * path and is stored on disk with PR state.
30c7ca93
DD
21 */
22 const char *fabric_name;
144bc4c2 23 size_t node_acl_size;
8f9b5654
NB
24 /*
25 * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
26 * Setting this value tells target-core to enforce this limit, and
27 * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
28 *
29 * target-core will currently reset se_cmd->data_length to this
30 * maximum size, and set UNDERFLOW residual count if length exceeds
31 * this limit.
32 *
33 * XXX: Not all initiator hosts honor this block-limit EVPD
34 * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
35 */
36 u32 max_data_sg_nents;
c66ac9db
NB
37 char *(*tpg_get_wwn)(struct se_portal_group *);
38 u16 (*tpg_get_tag)(struct se_portal_group *);
39 u32 (*tpg_get_default_depth)(struct se_portal_group *);
c66ac9db
NB
40 int (*tpg_check_demo_mode)(struct se_portal_group *);
41 int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
42 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
43 int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
052605c6
NB
44 /*
45 * Optionally used by fabrics to allow demo-mode login, but not
46 * expose any TPG LUNs, and return 'not connected' in standard
47 * inquiry response
48 */
49 int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
38b57f82
NB
50 /*
51 * Optionally used as a configfs tunable to determine when
52 * target-core should signal the PROTECT=1 feature bit for
53 * backends that don't support T10-PI, so that either fabric
54 * HW offload or target-core emulation performs the associated
55 * WRITE_STRIP and READ_INSERT operations.
56 */
57 int (*tpg_check_prot_fabric_only)(struct se_portal_group *);
c66ac9db 58 u32 (*tpg_get_inst_index)(struct se_portal_group *);
c66ac9db
NB
59 /*
60 * Optional to release struct se_cmd and fabric dependent allocated
b1a2ecda 61 * I/O descriptor after command execution has finished.
88dd9e26
NB
62 *
63 * Returning 1 will signal a descriptor has been released.
64 * Returning 0 will signal a descriptor has not been released.
c66ac9db 65 */
88dd9e26 66 int (*check_stop_free)(struct se_cmd *);
35462975 67 void (*release_cmd)(struct se_cmd *);
c66ac9db 68 void (*close_session)(struct se_session *);
c66ac9db
NB
69 u32 (*sess_get_index)(struct se_session *);
70 /*
71 * Used only for SCSI fabrics that contain multi-value TransportIDs
72 * (like iSCSI). All other SCSI fabrics should set this to NULL.
73 */
74 u32 (*sess_get_initiator_sid)(struct se_session *,
75 unsigned char *, u32);
76 int (*write_pending)(struct se_cmd *);
c66ac9db 77 void (*set_default_node_attributes)(struct se_node_acl *);
c66ac9db 78 int (*get_cmd_state)(struct se_cmd *);
c66ac9db
NB
79 int (*queue_data_in)(struct se_cmd *);
80 int (*queue_status)(struct se_cmd *);
b79fafac 81 void (*queue_tm_rsp)(struct se_cmd *);
131e6abc 82 void (*aborted_task)(struct se_cmd *);
c66ac9db
NB
83 /*
84 * fabric module calls for target_core_fabric_configfs.c
85 */
86 struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
87 struct config_group *, const char *);
88 void (*fabric_drop_wwn)(struct se_wwn *);
839559e1 89 void (*add_wwn_groups)(struct se_wwn *);
c66ac9db 90 struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
aa090eab 91 const char *);
80ed33c8 92 int (*fabric_enable_tpg)(struct se_portal_group *se_tpg, bool enable);
c66ac9db
NB
93 void (*fabric_drop_tpg)(struct se_portal_group *);
94 int (*fabric_post_link)(struct se_portal_group *,
95 struct se_lun *);
96 void (*fabric_pre_unlink)(struct se_portal_group *,
97 struct se_lun *);
98 struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
99 struct config_group *, const char *);
100 void (*fabric_drop_np)(struct se_tpg_np *);
c7d6a803 101 int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
9ac8928e
CH
102
103 struct configfs_attribute **tfc_discovery_attrs;
104 struct configfs_attribute **tfc_wwn_attrs;
105 struct configfs_attribute **tfc_tpg_base_attrs;
106 struct configfs_attribute **tfc_tpg_np_base_attrs;
107 struct configfs_attribute **tfc_tpg_attrib_attrs;
108 struct configfs_attribute **tfc_tpg_auth_attrs;
109 struct configfs_attribute **tfc_tpg_param_attrs;
110 struct configfs_attribute **tfc_tpg_nacl_base_attrs;
111 struct configfs_attribute **tfc_tpg_nacl_attrib_attrs;
112 struct configfs_attribute **tfc_tpg_nacl_auth_attrs;
113 struct configfs_attribute **tfc_tpg_nacl_param_attrs;
fbbd4923
BVA
114
115 /*
116 * Set this member variable to true if the SCSI transport protocol
117 * (e.g. iSCSI) requires that the Data-Out buffer is transferred in
118 * its entirety before a command is aborted.
119 */
120 bool write_pending_must_be_called;
c66ac9db 121};
c4795fb2 122
9ac8928e
CH
123int target_register_template(const struct target_core_fabric_ops *fo);
124void target_unregister_template(const struct target_core_fabric_ops *fo);
125
d588cf8f
CH
126int target_depend_item(struct config_item *item);
127void target_undepend_item(struct config_item *item);
128
fa834287 129struct se_session *target_setup_session(struct se_portal_group *,
7861728d
NB
130 unsigned int, unsigned int, enum target_prot_op prot_op,
131 const char *, void *,
132 int (*callback)(struct se_portal_group *,
133 struct se_session *, void *));
fb7c70f2 134void target_remove_session(struct se_session *);
7861728d 135
ad669505 136int transport_init_session(struct se_session *se_sess);
317f8971 137struct se_session *transport_alloc_session(enum target_prot_op);
c0add7fd
NB
138int transport_alloc_session_tags(struct se_session *, unsigned int,
139 unsigned int);
c4795fb2
CH
140void __transport_register_session(struct se_portal_group *,
141 struct se_node_acl *, struct se_session *, void *);
142void transport_register_session(struct se_portal_group *,
143 struct se_node_acl *, struct se_session *, void *);
f8e471f9 144ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
c4795fb2 145void transport_free_session(struct se_session *);
fae43461 146void target_spc2_release(struct se_node_acl *nacl);
afb999ff 147void target_put_nacl(struct se_node_acl *);
c4795fb2
CH
148void transport_deregister_session_configfs(struct se_session *);
149void transport_deregister_session(struct se_session *);
150
151
a78b7136 152void __target_init_cmd(struct se_cmd *,
9ac8928e 153 const struct target_core_fabric_ops *,
a36840d8 154 struct se_session *, u32, int, int, unsigned char *, u64);
750a1d93
MC
155int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
156 unsigned char *sense, u64 unpacked_lun, u32 data_length,
157 int task_attr, int data_dir, int flags);
158int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
159 struct scatterlist *sgl, u32 sgl_count,
160 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
08694199 161 struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp);
750a1d93 162void target_submit(struct se_cmd *se_cmd);
a36840d8 163sense_reason_t transport_lookup_cmd_lun(struct se_cmd *);
08694199
MC
164sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb,
165 gfp_t gfp);
987db587 166sense_reason_t target_cmd_parse_cdb(struct se_cmd *);
0fa50a8b 167void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
f2d30680 168 unsigned char *, u64, u32, int, int, int);
eb44ce8c
MC
169void target_queue_submission(struct se_cmd *se_cmd);
170
c7042cae 171int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
f2d30680 172 unsigned char *sense, u64 unpacked_lun,
c0974f89 173 void *fabric_tmr_ptr, unsigned char tm_type,
5261d86c 174 gfp_t, u64, int);
c4795fb2 175int transport_handle_cdb_direct(struct se_cmd *);
de103c93 176sense_reason_t transport_generic_new_cmd(struct se_cmd *);
c4795fb2 177
2c9fa49e 178void target_put_cmd_and_wait(struct se_cmd *cmd);
70baf0ab 179void target_execute_cmd(struct se_cmd *cmd);
c4795fb2 180
d5ddad41 181int transport_generic_free_cmd(struct se_cmd *, int);
c4795fb2
CH
182
183bool transport_wait_for_tasks(struct se_cmd *);
de103c93
CH
184int transport_send_check_condition_and_sense(struct se_cmd *,
185 sense_reason_t, int);
94ebb471 186int target_send_busy(struct se_cmd *cmd);
afc16604
BVA
187int target_get_sess_cmd(struct se_cmd *, bool);
188int target_put_sess_cmd(struct se_cmd *);
6f55b06f 189void target_stop_session(struct se_session *se_sess);
be646c2d 190void target_wait_for_sess_cmds(struct se_session *);
c00e6220 191void target_show_cmd(const char *pfx, struct se_cmd *cmd);
c4795fb2
CH
192
193int core_alua_check_nonop_delay(struct se_cmd *);
194
c8e31f26 195int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
c4795fb2
CH
196void core_tmr_release_req(struct se_tmr_req *);
197int transport_generic_handle_tmr(struct se_cmd *);
de103c93 198void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
a36840d8 199int transport_lookup_tmr_lun(struct se_cmd *);
e986a35a 200void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
c4795fb2 201
b3fde035
TG
202struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
203 unsigned char *);
21aaa23b
NB
204bool target_tpg_has_node_acl(struct se_portal_group *tpg,
205 const char *);
c4795fb2
CH
206struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
207 unsigned char *);
d36ad77f 208int core_tpg_set_initiator_node_queue_depth(struct se_node_acl *, u32);
79e62fc3
AG
209int core_tpg_set_initiator_node_tag(struct se_portal_group *,
210 struct se_node_acl *, const char *);
bc0c94b1 211int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
c4795fb2
CH
212int core_tpg_deregister(struct se_portal_group *);
213
e64aa657
CH
214int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
215 u32 length, bool zero_page, bool chainable);
216void target_free_sgl(struct scatterlist *sgl, int nents);
217
b3faa2e8
NB
218/*
219 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
220 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
221 * that data is coming from the target (eg handling a READ). However,
222 * this is just the opposite of what we have to tell the DMA mapping
223 * layer -- eg when handling a READ, the HBA will have to DMA the data
224 * out of memory so it can send it to the initiator, which means we
225 * need to use DMA_TO_DEVICE when we map the data.
226 */
227static inline enum dma_data_direction
228target_reverse_dma_direction(struct se_cmd *se_cmd)
229{
230 if (se_cmd->se_cmd_flags & SCF_BIDI)
231 return DMA_BIDIRECTIONAL;
232
233 switch (se_cmd->data_direction) {
234 case DMA_TO_DEVICE:
235 return DMA_FROM_DEVICE;
236 case DMA_FROM_DEVICE:
237 return DMA_TO_DEVICE;
238 case DMA_NONE:
239 default:
240 return DMA_NONE;
241 }
242}
243
c4795fb2 244#endif /* TARGET_CORE_FABRICH */