Merge tag 'hyperv-fixes-signed-20230804' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / mlx5_core.h
CommitLineData
e126ba97 1/*
f62b8bb8 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_CORE_H__
34#define __MLX5_CORE_H__
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
81848731 39#include <linux/if_link.h>
62bd22cf 40#include <linux/firmware.h>
d5c07157 41#include <linux/mlx5/cq.h>
31ca3648 42#include <linux/mlx5/fs.h>
27b942fb 43#include <linux/mlx5/driver.h>
e126ba97 44
f663ad98 45extern uint mlx5_core_debug_mask;
e126ba97 46
5a788398 47#define mlx5_core_dbg(__dev, format, ...) \
27b942fb 48 dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \
9e5b2fc1 49 __func__, __LINE__, current->pid, \
1a91de28 50 ##__VA_ARGS__)
e126ba97 51
27b942fb
PP
52#define mlx5_core_dbg_once(__dev, format, ...) \
53 dev_dbg_once((__dev)->device, \
54 "%s:%d:(pid %d): " format, \
55 __func__, __LINE__, current->pid, \
0608d4db
TT
56 ##__VA_ARGS__)
57
27b942fb
PP
58#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
59do { \
60 if ((mask) & mlx5_core_debug_mask) \
61 mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
e126ba97
EC
62} while (0)
63
27b942fb
PP
64#define mlx5_core_err(__dev, format, ...) \
65 dev_err((__dev)->device, "%s:%d:(pid %d): " format, \
66 __func__, __LINE__, current->pid, \
1a91de28 67 ##__VA_ARGS__)
e126ba97 68
27b942fb
PP
69#define mlx5_core_err_rl(__dev, format, ...) \
70 dev_err_ratelimited((__dev)->device, \
71 "%s:%d:(pid %d): " format, \
72 __func__, __LINE__, current->pid, \
73 ##__VA_ARGS__)
b30408d7 74
27b942fb
PP
75#define mlx5_core_warn(__dev, format, ...) \
76 dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
77 __func__, __LINE__, current->pid, \
78 ##__VA_ARGS__)
e126ba97 79
0f597ed4 80#define mlx5_core_warn_once(__dev, format, ...) \
27b942fb 81 dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \
0f597ed4
SM
82 __func__, __LINE__, current->pid, \
83 ##__VA_ARGS__)
84
27b942fb
PP
85#define mlx5_core_warn_rl(__dev, format, ...) \
86 dev_warn_ratelimited((__dev)->device, \
87 "%s:%d:(pid %d): " format, \
88 __func__, __LINE__, current->pid, \
89 ##__VA_ARGS__)
3732b972 90
27b942fb
PP
91#define mlx5_core_info(__dev, format, ...) \
92 dev_info((__dev)->device, format, ##__VA_ARGS__)
108805fc 93
27b942fb
PP
94#define mlx5_core_info_rl(__dev, format, ...) \
95 dev_info_ratelimited((__dev)->device, \
96 "%s:%d:(pid %d): " format, \
97 __func__, __LINE__, current->pid, \
98 ##__VA_ARGS__)
108805fc 99
b87ef75c
AL
100static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
101{
102 struct device *device = dev->device;
103 struct va_format vaf;
104 va_list args;
105
106 if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG,
107 "Level %d is out of range, set to default level\n", level))
108 level = LOGLEVEL_DEFAULT;
109
110 va_start(args, format);
111 vaf.fmt = format;
112 vaf.va = &args;
113
114 dev_printk_emit(level, device, "%s %s: %pV", dev_driver_string(device), dev_name(device),
115 &vaf);
116 va_end(args);
117}
118
119#define mlx5_log(__dev, level, format, ...) \
120 mlx5_printk(__dev, level, "%s:%d:(pid %d): " format, \
121 __func__, __LINE__, current->pid, \
122 ##__VA_ARGS__)
123
7be3412a
PP
124static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
125{
126 return &dev->pdev->dev;
127}
128
e126ba97
EC
129enum {
130 MLX5_CMD_DATA, /* print command payload only */
131 MLX5_CMD_TIME, /* print command execution time */
132};
133
f9c14e46
KH
134enum {
135 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
136 MLX5_DRIVER_SYND = 0xbadd00de,
137};
138
1ef6f1a1
FD
139enum mlx5_semaphore_space_address {
140 MLX5_SEMAPHORE_SPACE_DOMAIN = 0xA,
3e5b72ac 141 MLX5_SEMAPHORE_SW_RESET = 0x20,
1ef6f1a1
FD
142};
143
1958fc2f 144#define MLX5_DEFAULT_PROF 2
9df839a7 145#define MLX5_SF_PROF 3
1958fc2f 146
c4418f34
MM
147static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
148 size_t item_size, size_t num_items,
149 const char *func, int line)
150{
151 int inlen;
152
153 if (fixed > INT_MAX || item_size > INT_MAX || num_items > INT_MAX) {
154 mlx5_core_err(dev, "%s: %s:%d: input values too big: %zu + %zu * %zu\n",
155 __func__, func, line, fixed, item_size, num_items);
156 return -ENOMEM;
157 }
158
159 if (check_mul_overflow((int)item_size, (int)num_items, &inlen)) {
160 mlx5_core_err(dev, "%s: %s:%d: multiplication overflow: %zu + %zu * %zu\n",
161 __func__, func, line, fixed, item_size, num_items);
162 return -ENOMEM;
163 }
164
165 if (check_add_overflow((int)fixed, inlen, &inlen)) {
166 mlx5_core_err(dev, "%s: %s:%d: addition overflow: %zu + %zu * %zu\n",
167 __func__, func, line, fixed, item_size, num_items);
168 return -ENOMEM;
169 }
170
171 return inlen;
172}
173
174#define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \
175 mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__)
176
938fe83c 177int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
211e6c80 178int mlx5_query_board_id(struct mlx5_core_dev *dev);
e5dfe6b5
PP
179int mlx5_cmd_init(struct mlx5_core_dev *dev);
180void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
181void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
182 enum mlx5_cmdif_state cmdif_state);
8737f818 183int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
e126ba97 184int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
8812c24d 185int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
fcd29ad1 186int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
8812c24d 187void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
3e5b72ac 188void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
38b9f903
MS
189u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev);
190int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev);
89d44f0a 191void mlx5_disable_device(struct mlx5_core_dev *dev);
fe06992b 192int mlx5_recover_device(struct mlx5_core_dev *dev);
6b6adee3
MHY
193int mlx5_sriov_init(struct mlx5_core_dev *dev);
194void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
acab721b
MHY
195int mlx5_sriov_attach(struct mlx5_core_dev *dev);
196void mlx5_sriov_detach(struct mlx5_core_dev *dev);
fc50db98 197int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
6d98f314 198void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change);
e71b75f7 199int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
0b107106
EC
200int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
201int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
813f8540
MHY
202int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
203 void *context, u32 *element_id);
204int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
205 void *context, u32 element_id,
206 u32 modify_bitmask);
207int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
208 u32 element_id);
591905ba 209int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
d5c07157 210
4cab346b 211void mlx5_cmd_flush(struct mlx5_core_dev *dev);
9f818c8a 212void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
3ec5693b 213void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
e126ba97 214
c835ad64
GP
215int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
216 u8 access_reg_group);
217int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
218 u8 access_reg_group);
c02762eb
HN
219int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
220 u8 feature_group, u8 access_reg_group);
c835ad64 221
8a66e458
MB
222void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
223void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
224void mlx5_lag_add_mdev(struct mlx5_core_dev *dev);
225void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev);
cac1eb2c
MB
226void mlx5_lag_disable_change(struct mlx5_core_dev *dev);
227void mlx5_lag_enable_change(struct mlx5_core_dev *dev);
7907f23a 228
69c1280b
SM
229int mlx5_events_init(struct mlx5_core_dev *dev);
230void mlx5_events_cleanup(struct mlx5_core_dev *dev);
231void mlx5_events_start(struct mlx5_core_dev *dev);
232void mlx5_events_stop(struct mlx5_core_dev *dev);
233
a925b5e3
LR
234int mlx5_adev_idx_alloc(void);
235void mlx5_adev_idx_free(int idx);
236void mlx5_adev_cleanup(struct mlx5_core_dev *dev);
237int mlx5_adev_init(struct mlx5_core_dev *dev);
238
a925b5e3 239int mlx5_attach_device(struct mlx5_core_dev *dev);
72ed5d56 240void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend);
a925b5e3 241int mlx5_register_device(struct mlx5_core_dev *dev);
f1ee87fe 242void mlx5_unregister_device(struct mlx5_core_dev *dev);
e71383fb
SD
243void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev);
244bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev);
bc4c2f2e 245struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
f1ee87fe
MHY
246void mlx5_dev_list_lock(void);
247void mlx5_dev_list_unlock(void);
248int mlx5_dev_list_trylock(void);
2de24fed 249
e71383fb 250void mlx5_fw_reporters_create(struct mlx5_core_dev *dev);
f9a1ef72
EE
251int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
252int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
253int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
254int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
255
c9b9dcb4
AL
256struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
257void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
258
fa367688
EE
259#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
260 MLX5_CAP_GEN((mdev), pps_modify) && \
261 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
262 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
263
44f18db5
JP
264int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
265 struct netlink_ext_ack *extack);
9c86b07e
SA
266int mlx5_fw_version_query(struct mlx5_core_dev *dev,
267 u32 *running_ver, u32 *stored_ver);
62bd22cf 268
c633e799 269#ifdef CONFIG_MLX5_CORE_EN
912cebf4 270int mlx5e_init(void);
f62b8bb8 271void mlx5e_cleanup(void);
c633e799
LR
272#else
273static inline int mlx5e_init(void){ return 0; }
274static inline void mlx5e_cleanup(void){}
275#endif
f62b8bb8 276
eb5cc431
PP
277static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
278{
279 return pci_num_vf(dev->pdev) ? true : false;
280}
281
a925b5e3
LR
282int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev);
283static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
284{
285 int ret;
286
287 mlx5_dev_list_lock();
288 ret = mlx5_rescan_drivers_locked(dev);
289 mlx5_dev_list_unlock();
290 return ret;
291}
292
eff849b2 293void mlx5_lag_update(struct mlx5_core_dev *dev);
fcd29ad1
FD
294
295enum {
296 MLX5_NIC_IFC_FULL = 0,
297 MLX5_NIC_IFC_DISABLED = 1,
298 MLX5_NIC_IFC_NO_DRAM_NIC = 2,
63cbc552 299 MLX5_NIC_IFC_SW_RESET = 7
fcd29ad1
FD
300};
301
302u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
303void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
4383cfcc 304
1958fc2f
PP
305static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
306{
307 return dev->coredev_type == MLX5_COREDEV_SF;
308}
309
310int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
311void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
6dea2f7e 312int mlx5_init_one(struct mlx5_core_dev *dev);
e71383fb 313int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev);
6dea2f7e 314void mlx5_uninit_one(struct mlx5_core_dev *dev);
72ed5d56
JP
315void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
316void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
21608a2c 317int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
84a433a4 318int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
e71383fb
SD
319int mlx5_init_one_light(struct mlx5_core_dev *dev);
320void mlx5_uninit_one_light(struct mlx5_core_dev *dev);
321void mlx5_unload_one_light(struct mlx5_core_dev *dev);
f3196bb0 322
9ac0b128 323int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport,
7db98396 324 u16 opmod);
9ac0b128
DJ
325#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
326 mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
84ae9c1f 327
f3196bb0 328void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
e71b75f7
LR
329static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
330{
331 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
332
333 return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
334}
a17beb28
PP
335
336bool mlx5_eth_supported(struct mlx5_core_dev *dev);
87158ced 337bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
70862a5d 338bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
c9c079b4 339bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
a17beb28 340
dc131808
DJ
341static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev)
342{
343 return MLX5_CAP_GEN_2(dev, ec_vf_vport_base);
344}
345
346static inline u16 mlx5_core_ec_sriov_enabled(const struct mlx5_core_dev *dev)
347{
348 return mlx5_core_is_ecpf(dev) && mlx5_core_ec_vf_vport_base(dev);
349}
350
351static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16 vport_num)
352{
353 int base_vport = mlx5_core_ec_vf_vport_base(dev);
354 int max_vport = base_vport + mlx5_core_max_ec_vfs(dev);
355
356 if (!mlx5_core_ec_sriov_enabled(dev))
357 return false;
358
359 return (vport_num >= base_vport && vport_num < max_vport);
360}
8bbe544e
DJ
361
362static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func)
363{
364 return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev)
365 : vport;
366}
367
e126ba97 368#endif /* __MLX5_CORE_H__ */