Commit | Line | Data |
---|---|---|
c0c050c5 MC |
1 | /* Broadcom NetXtreme-C/E network driver. |
2 | * | |
11f15ed3 | 3 | * Copyright (c) 2014-2016 Broadcom Corporation |
c0c050c5 MC |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/netdevice.h> | |
13 | #include <linux/if_vlan.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/etherdevice.h> | |
16 | #include "bnxt_hsi.h" | |
17 | #include "bnxt.h" | |
18 | #include "bnxt_sriov.h" | |
19 | #include "bnxt_ethtool.h" | |
20 | ||
21 | #ifdef CONFIG_BNXT_SRIOV | |
350a7149 EW |
22 | static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, |
23 | struct bnxt_vf_info *vf, u16 event_id) | |
24 | { | |
25 | struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; | |
26 | struct hwrm_fwd_async_event_cmpl_input req = {0}; | |
27 | struct hwrm_async_event_cmpl *async_cmpl; | |
28 | int rc = 0; | |
29 | ||
30 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); | |
31 | if (vf) | |
32 | req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); | |
33 | else | |
34 | /* broadcast this async event to all VFs */ | |
35 | req.encap_async_event_target_id = cpu_to_le16(0xffff); | |
36 | async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; | |
37 | async_cmpl->type = | |
38 | cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); | |
39 | async_cmpl->event_id = cpu_to_le16(event_id); | |
40 | ||
41 | mutex_lock(&bp->hwrm_cmd_lock); | |
42 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
43 | ||
44 | if (rc) { | |
45 | netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", | |
46 | rc); | |
47 | goto fwd_async_event_cmpl_exit; | |
48 | } | |
49 | ||
50 | if (resp->error_code) { | |
51 | netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", | |
52 | resp->error_code); | |
53 | rc = -1; | |
54 | } | |
55 | ||
56 | fwd_async_event_cmpl_exit: | |
57 | mutex_unlock(&bp->hwrm_cmd_lock); | |
58 | return rc; | |
59 | } | |
60 | ||
c0c050c5 MC |
61 | static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) |
62 | { | |
caefe526 | 63 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
c0c050c5 MC |
64 | netdev_err(bp->dev, "vf ndo called though PF is down\n"); |
65 | return -EINVAL; | |
66 | } | |
67 | if (!bp->pf.active_vfs) { | |
68 | netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); | |
69 | return -EINVAL; | |
70 | } | |
71 | if (vf_id >= bp->pf.max_vfs) { | |
72 | netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); | |
73 | return -EINVAL; | |
74 | } | |
75 | return 0; | |
76 | } | |
77 | ||
78 | int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) | |
79 | { | |
80 | struct hwrm_func_cfg_input req = {0}; | |
81 | struct bnxt *bp = netdev_priv(dev); | |
82 | struct bnxt_vf_info *vf; | |
83 | bool old_setting = false; | |
84 | u32 func_flags; | |
85 | int rc; | |
86 | ||
87 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
88 | if (rc) | |
89 | return rc; | |
90 | ||
91 | vf = &bp->pf.vf[vf_id]; | |
92 | if (vf->flags & BNXT_VF_SPOOFCHK) | |
93 | old_setting = true; | |
94 | if (old_setting == setting) | |
95 | return 0; | |
96 | ||
97 | func_flags = vf->func_flags; | |
98 | if (setting) | |
99 | func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; | |
100 | else | |
101 | func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; | |
102 | /*TODO: if the driver supports VLAN filter on guest VLAN, | |
103 | * the spoof check should also include vlan anti-spoofing | |
104 | */ | |
105 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | |
c193554e | 106 | req.fid = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
107 | req.flags = cpu_to_le32(func_flags); |
108 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
109 | if (!rc) { | |
110 | vf->func_flags = func_flags; | |
111 | if (setting) | |
112 | vf->flags |= BNXT_VF_SPOOFCHK; | |
113 | else | |
114 | vf->flags &= ~BNXT_VF_SPOOFCHK; | |
115 | } | |
116 | return rc; | |
117 | } | |
118 | ||
119 | int bnxt_get_vf_config(struct net_device *dev, int vf_id, | |
120 | struct ifla_vf_info *ivi) | |
121 | { | |
122 | struct bnxt *bp = netdev_priv(dev); | |
123 | struct bnxt_vf_info *vf; | |
124 | int rc; | |
125 | ||
126 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
127 | if (rc) | |
128 | return rc; | |
129 | ||
130 | ivi->vf = vf_id; | |
131 | vf = &bp->pf.vf[vf_id]; | |
132 | ||
133 | memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); | |
134 | ivi->max_tx_rate = vf->max_tx_rate; | |
135 | ivi->min_tx_rate = vf->min_tx_rate; | |
136 | ivi->vlan = vf->vlan; | |
137 | ivi->qos = vf->flags & BNXT_VF_QOS; | |
138 | ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK; | |
139 | if (!(vf->flags & BNXT_VF_LINK_FORCED)) | |
140 | ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; | |
141 | else if (vf->flags & BNXT_VF_LINK_UP) | |
142 | ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; | |
143 | else | |
144 | ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; | |
145 | ||
146 | return 0; | |
147 | } | |
148 | ||
149 | int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) | |
150 | { | |
151 | struct hwrm_func_cfg_input req = {0}; | |
152 | struct bnxt *bp = netdev_priv(dev); | |
153 | struct bnxt_vf_info *vf; | |
154 | int rc; | |
155 | ||
156 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
157 | if (rc) | |
158 | return rc; | |
159 | /* reject bc or mc mac addr, zero mac addr means allow | |
160 | * VF to use its own mac addr | |
161 | */ | |
162 | if (is_multicast_ether_addr(mac)) { | |
163 | netdev_err(dev, "Invalid VF ethernet address\n"); | |
164 | return -EINVAL; | |
165 | } | |
166 | vf = &bp->pf.vf[vf_id]; | |
167 | ||
168 | memcpy(vf->mac_addr, mac, ETH_ALEN); | |
169 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | |
c193554e | 170 | req.fid = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
171 | req.flags = cpu_to_le32(vf->func_flags); |
172 | req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); | |
173 | memcpy(req.dflt_mac_addr, mac, ETH_ALEN); | |
174 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
175 | } | |
176 | ||
79aab093 MS |
177 | int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, |
178 | __be16 vlan_proto) | |
c0c050c5 MC |
179 | { |
180 | struct hwrm_func_cfg_input req = {0}; | |
181 | struct bnxt *bp = netdev_priv(dev); | |
182 | struct bnxt_vf_info *vf; | |
183 | u16 vlan_tag; | |
184 | int rc; | |
185 | ||
cf6645f8 MC |
186 | if (bp->hwrm_spec_code < 0x10201) |
187 | return -ENOTSUPP; | |
188 | ||
79aab093 MS |
189 | if (vlan_proto != htons(ETH_P_8021Q)) |
190 | return -EPROTONOSUPPORT; | |
191 | ||
c0c050c5 MC |
192 | rc = bnxt_vf_ndo_prep(bp, vf_id); |
193 | if (rc) | |
194 | return rc; | |
195 | ||
196 | /* TODO: needed to implement proper handling of user priority, | |
197 | * currently fail the command if there is valid priority | |
198 | */ | |
199 | if (vlan_id > 4095 || qos) | |
200 | return -EINVAL; | |
201 | ||
202 | vf = &bp->pf.vf[vf_id]; | |
203 | vlan_tag = vlan_id; | |
204 | if (vlan_tag == vf->vlan) | |
205 | return 0; | |
206 | ||
207 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | |
c193554e | 208 | req.fid = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
209 | req.flags = cpu_to_le32(vf->func_flags); |
210 | req.dflt_vlan = cpu_to_le16(vlan_tag); | |
211 | req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); | |
212 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
213 | if (!rc) | |
214 | vf->vlan = vlan_tag; | |
215 | return rc; | |
216 | } | |
217 | ||
218 | int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, | |
219 | int max_tx_rate) | |
220 | { | |
221 | struct hwrm_func_cfg_input req = {0}; | |
222 | struct bnxt *bp = netdev_priv(dev); | |
223 | struct bnxt_vf_info *vf; | |
224 | u32 pf_link_speed; | |
225 | int rc; | |
226 | ||
227 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
228 | if (rc) | |
229 | return rc; | |
230 | ||
231 | vf = &bp->pf.vf[vf_id]; | |
232 | pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); | |
233 | if (max_tx_rate > pf_link_speed) { | |
234 | netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", | |
235 | max_tx_rate, vf_id); | |
236 | return -EINVAL; | |
237 | } | |
238 | ||
239 | if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { | |
240 | netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", | |
241 | min_tx_rate, vf_id); | |
242 | return -EINVAL; | |
243 | } | |
244 | if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) | |
245 | return 0; | |
246 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | |
c193554e | 247 | req.fid = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
248 | req.flags = cpu_to_le32(vf->func_flags); |
249 | req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); | |
250 | req.max_bw = cpu_to_le32(max_tx_rate); | |
251 | req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); | |
252 | req.min_bw = cpu_to_le32(min_tx_rate); | |
253 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
254 | if (!rc) { | |
255 | vf->min_tx_rate = min_tx_rate; | |
256 | vf->max_tx_rate = max_tx_rate; | |
257 | } | |
258 | return rc; | |
259 | } | |
260 | ||
261 | int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) | |
262 | { | |
263 | struct bnxt *bp = netdev_priv(dev); | |
264 | struct bnxt_vf_info *vf; | |
265 | int rc; | |
266 | ||
267 | rc = bnxt_vf_ndo_prep(bp, vf_id); | |
268 | if (rc) | |
269 | return rc; | |
270 | ||
271 | vf = &bp->pf.vf[vf_id]; | |
272 | ||
273 | vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); | |
274 | switch (link) { | |
275 | case IFLA_VF_LINK_STATE_AUTO: | |
276 | vf->flags |= BNXT_VF_LINK_UP; | |
277 | break; | |
278 | case IFLA_VF_LINK_STATE_DISABLE: | |
279 | vf->flags |= BNXT_VF_LINK_FORCED; | |
280 | break; | |
281 | case IFLA_VF_LINK_STATE_ENABLE: | |
282 | vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; | |
283 | break; | |
284 | default: | |
285 | netdev_err(bp->dev, "Invalid link option\n"); | |
286 | rc = -EINVAL; | |
287 | break; | |
288 | } | |
350a7149 EW |
289 | if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) |
290 | rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, | |
291 | HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); | |
c0c050c5 MC |
292 | return rc; |
293 | } | |
294 | ||
295 | static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) | |
296 | { | |
297 | int i; | |
298 | struct bnxt_vf_info *vf; | |
299 | ||
300 | for (i = 0; i < num_vfs; i++) { | |
301 | vf = &bp->pf.vf[i]; | |
302 | memset(vf, 0, sizeof(*vf)); | |
303 | vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP; | |
304 | } | |
305 | return 0; | |
306 | } | |
307 | ||
4bb6cdce | 308 | static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) |
c0c050c5 MC |
309 | { |
310 | int i, rc = 0; | |
311 | struct bnxt_pf_info *pf = &bp->pf; | |
312 | struct hwrm_func_vf_resc_free_input req = {0}; | |
313 | ||
314 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); | |
315 | ||
316 | mutex_lock(&bp->hwrm_cmd_lock); | |
4bb6cdce | 317 | for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { |
c0c050c5 MC |
318 | req.vf_id = cpu_to_le16(i); |
319 | rc = _hwrm_send_message(bp, &req, sizeof(req), | |
320 | HWRM_CMD_TIMEOUT); | |
321 | if (rc) | |
322 | break; | |
323 | } | |
324 | mutex_unlock(&bp->hwrm_cmd_lock); | |
325 | return rc; | |
326 | } | |
327 | ||
328 | static void bnxt_free_vf_resources(struct bnxt *bp) | |
329 | { | |
330 | struct pci_dev *pdev = bp->pdev; | |
331 | int i; | |
332 | ||
333 | kfree(bp->pf.vf_event_bmap); | |
334 | bp->pf.vf_event_bmap = NULL; | |
335 | ||
336 | for (i = 0; i < 4; i++) { | |
337 | if (bp->pf.hwrm_cmd_req_addr[i]) { | |
338 | dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, | |
339 | bp->pf.hwrm_cmd_req_addr[i], | |
340 | bp->pf.hwrm_cmd_req_dma_addr[i]); | |
341 | bp->pf.hwrm_cmd_req_addr[i] = NULL; | |
342 | } | |
343 | } | |
344 | ||
345 | kfree(bp->pf.vf); | |
346 | bp->pf.vf = NULL; | |
347 | } | |
348 | ||
349 | static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) | |
350 | { | |
351 | struct pci_dev *pdev = bp->pdev; | |
352 | u32 nr_pages, size, i, j, k = 0; | |
353 | ||
354 | bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); | |
355 | if (!bp->pf.vf) | |
356 | return -ENOMEM; | |
357 | ||
358 | bnxt_set_vf_attr(bp, num_vfs); | |
359 | ||
360 | size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; | |
361 | nr_pages = size / BNXT_PAGE_SIZE; | |
362 | if (size & (BNXT_PAGE_SIZE - 1)) | |
363 | nr_pages++; | |
364 | ||
365 | for (i = 0; i < nr_pages; i++) { | |
366 | bp->pf.hwrm_cmd_req_addr[i] = | |
367 | dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, | |
368 | &bp->pf.hwrm_cmd_req_dma_addr[i], | |
369 | GFP_KERNEL); | |
370 | ||
371 | if (!bp->pf.hwrm_cmd_req_addr[i]) | |
372 | return -ENOMEM; | |
373 | ||
374 | for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { | |
375 | struct bnxt_vf_info *vf = &bp->pf.vf[k]; | |
376 | ||
377 | vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + | |
378 | j * BNXT_HWRM_REQ_MAX_SIZE; | |
379 | vf->hwrm_cmd_req_dma_addr = | |
380 | bp->pf.hwrm_cmd_req_dma_addr[i] + j * | |
381 | BNXT_HWRM_REQ_MAX_SIZE; | |
382 | k++; | |
383 | } | |
384 | } | |
385 | ||
386 | /* Max 128 VF's */ | |
387 | bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); | |
388 | if (!bp->pf.vf_event_bmap) | |
389 | return -ENOMEM; | |
390 | ||
391 | bp->pf.hwrm_cmd_req_pages = nr_pages; | |
392 | return 0; | |
393 | } | |
394 | ||
395 | static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) | |
396 | { | |
397 | struct hwrm_func_buf_rgtr_input req = {0}; | |
398 | ||
399 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); | |
400 | ||
401 | req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); | |
402 | req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); | |
403 | req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); | |
404 | req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); | |
405 | req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); | |
406 | req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); | |
407 | req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); | |
408 | ||
409 | return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
410 | } | |
411 | ||
412 | /* only call by PF to reserve resources for VF */ | |
92268c32 | 413 | static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) |
c0c050c5 MC |
414 | { |
415 | u32 rc = 0, mtu, i; | |
416 | u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; | |
b72d4a68 | 417 | u16 vf_ring_grps; |
c0c050c5 MC |
418 | struct hwrm_func_cfg_input req = {0}; |
419 | struct bnxt_pf_info *pf = &bp->pf; | |
420 | ||
421 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | |
422 | ||
423 | /* Remaining rings are distributed equally amongs VF's for now */ | |
424 | /* TODO: the following workaroud is needed to restrict total number | |
425 | * of vf_cp_rings not exceed number of HW ring groups. This WA should | |
426 | * be removed once new HWRM provides HW ring groups capability in | |
427 | * hwrm_func_qcap. | |
428 | */ | |
92268c32 MC |
429 | vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs); |
430 | vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs; | |
c0c050c5 | 431 | /* TODO: restore this logic below once the WA above is removed */ |
92268c32 MC |
432 | /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */ |
433 | vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; | |
c0c050c5 | 434 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
92268c32 MC |
435 | vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) / |
436 | num_vfs; | |
c0c050c5 | 437 | else |
92268c32 | 438 | vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; |
b72d4a68 | 439 | vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; |
92268c32 | 440 | vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; |
c0c050c5 MC |
441 | |
442 | req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | | |
443 | FUNC_CFG_REQ_ENABLES_MRU | | |
444 | FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | | |
445 | FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | | |
446 | FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | | |
447 | FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | | |
448 | FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | | |
449 | FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | | |
b72d4a68 MC |
450 | FUNC_CFG_REQ_ENABLES_NUM_VNICS | |
451 | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); | |
c0c050c5 MC |
452 | |
453 | mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | |
454 | req.mru = cpu_to_le16(mtu); | |
455 | req.mtu = cpu_to_le16(mtu); | |
456 | ||
457 | req.num_rsscos_ctxs = cpu_to_le16(1); | |
458 | req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); | |
459 | req.num_tx_rings = cpu_to_le16(vf_tx_rings); | |
460 | req.num_rx_rings = cpu_to_le16(vf_rx_rings); | |
b72d4a68 | 461 | req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); |
c0c050c5 MC |
462 | req.num_l2_ctxs = cpu_to_le16(4); |
463 | vf_vnics = 1; | |
464 | ||
465 | req.num_vnics = cpu_to_le16(vf_vnics); | |
466 | /* FIXME spec currently uses 1 bit for stats ctx */ | |
467 | req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); | |
468 | ||
469 | mutex_lock(&bp->hwrm_cmd_lock); | |
92268c32 | 470 | for (i = 0; i < num_vfs; i++) { |
c193554e | 471 | req.fid = cpu_to_le16(pf->first_vf_id + i); |
c0c050c5 MC |
472 | rc = _hwrm_send_message(bp, &req, sizeof(req), |
473 | HWRM_CMD_TIMEOUT); | |
474 | if (rc) | |
475 | break; | |
92268c32 | 476 | pf->active_vfs = i + 1; |
c193554e | 477 | pf->vf[i].fw_fid = le16_to_cpu(req.fid); |
c0c050c5 MC |
478 | } |
479 | mutex_unlock(&bp->hwrm_cmd_lock); | |
480 | if (!rc) { | |
4a21b49b MC |
481 | pf->max_tx_rings -= vf_tx_rings * num_vfs; |
482 | pf->max_rx_rings -= vf_rx_rings * num_vfs; | |
b72d4a68 | 483 | pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; |
4a21b49b MC |
484 | pf->max_cp_rings -= vf_cp_rings * num_vfs; |
485 | pf->max_rsscos_ctxs -= num_vfs; | |
486 | pf->max_stat_ctxs -= vf_stat_ctx * num_vfs; | |
487 | pf->max_vnics -= vf_vnics * num_vfs; | |
c0c050c5 MC |
488 | } |
489 | return rc; | |
490 | } | |
491 | ||
492 | static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |
493 | { | |
494 | int rc = 0, vfs_supported; | |
495 | int min_rx_rings, min_tx_rings, min_rss_ctxs; | |
496 | int tx_ok = 0, rx_ok = 0, rss_ok = 0; | |
497 | ||
498 | /* Check if we can enable requested num of vf's. At a mininum | |
499 | * we require 1 RX 1 TX rings for each VF. In this minimum conf | |
500 | * features like TPA will not be available. | |
501 | */ | |
502 | vfs_supported = *num_vfs; | |
503 | ||
504 | while (vfs_supported) { | |
505 | min_rx_rings = vfs_supported; | |
506 | min_tx_rings = vfs_supported; | |
507 | min_rss_ctxs = vfs_supported; | |
508 | ||
509 | if (bp->flags & BNXT_FLAG_AGG_RINGS) { | |
510 | if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >= | |
511 | min_rx_rings) | |
512 | rx_ok = 1; | |
513 | } else { | |
514 | if (bp->pf.max_rx_rings - bp->rx_nr_rings >= | |
515 | min_rx_rings) | |
516 | rx_ok = 1; | |
517 | } | |
518 | ||
519 | if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) | |
520 | tx_ok = 1; | |
521 | ||
522 | if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) | |
523 | rss_ok = 1; | |
524 | ||
525 | if (tx_ok && rx_ok && rss_ok) | |
526 | break; | |
527 | ||
528 | vfs_supported--; | |
529 | } | |
530 | ||
531 | if (!vfs_supported) { | |
532 | netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); | |
533 | return -EINVAL; | |
534 | } | |
535 | ||
536 | if (vfs_supported != *num_vfs) { | |
537 | netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", | |
538 | *num_vfs, vfs_supported); | |
539 | *num_vfs = vfs_supported; | |
540 | } | |
541 | ||
542 | rc = bnxt_alloc_vf_resources(bp, *num_vfs); | |
543 | if (rc) | |
544 | goto err_out1; | |
545 | ||
546 | /* Reserve resources for VFs */ | |
92268c32 | 547 | rc = bnxt_hwrm_func_cfg(bp, *num_vfs); |
c0c050c5 MC |
548 | if (rc) |
549 | goto err_out2; | |
550 | ||
551 | /* Register buffers for VFs */ | |
552 | rc = bnxt_hwrm_func_buf_rgtr(bp); | |
553 | if (rc) | |
554 | goto err_out2; | |
555 | ||
556 | rc = pci_enable_sriov(bp->pdev, *num_vfs); | |
557 | if (rc) | |
558 | goto err_out2; | |
559 | ||
560 | return 0; | |
561 | ||
562 | err_out2: | |
563 | /* Free the resources reserved for various VF's */ | |
4bb6cdce | 564 | bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); |
c0c050c5 MC |
565 | |
566 | err_out1: | |
567 | bnxt_free_vf_resources(bp); | |
568 | ||
569 | return rc; | |
570 | } | |
571 | ||
572 | void bnxt_sriov_disable(struct bnxt *bp) | |
573 | { | |
4bb6cdce | 574 | u16 num_vfs = pci_num_vf(bp->pdev); |
c0c050c5 | 575 | |
4bb6cdce JH |
576 | if (!num_vfs) |
577 | return; | |
c0c050c5 | 578 | |
4bb6cdce | 579 | if (pci_vfs_assigned(bp->pdev)) { |
19241368 JH |
580 | bnxt_hwrm_fwd_async_event_cmpl( |
581 | bp, NULL, | |
582 | HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); | |
4bb6cdce JH |
583 | netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", |
584 | num_vfs); | |
585 | } else { | |
586 | pci_disable_sriov(bp->pdev); | |
587 | /* Free the HW resources reserved for various VF's */ | |
588 | bnxt_hwrm_func_vf_resource_free(bp, num_vfs); | |
589 | } | |
c0c050c5 MC |
590 | |
591 | bnxt_free_vf_resources(bp); | |
592 | ||
593 | bp->pf.active_vfs = 0; | |
4a21b49b MC |
594 | /* Reclaim all resources for the PF. */ |
595 | bnxt_hwrm_func_qcaps(bp); | |
c0c050c5 MC |
596 | } |
597 | ||
598 | int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) | |
599 | { | |
600 | struct net_device *dev = pci_get_drvdata(pdev); | |
601 | struct bnxt *bp = netdev_priv(dev); | |
602 | ||
603 | if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { | |
604 | netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); | |
605 | return 0; | |
606 | } | |
607 | ||
608 | rtnl_lock(); | |
609 | if (!netif_running(dev)) { | |
610 | netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); | |
611 | rtnl_unlock(); | |
612 | return 0; | |
613 | } | |
614 | bp->sriov_cfg = true; | |
615 | rtnl_unlock(); | |
4bb6cdce JH |
616 | |
617 | if (pci_vfs_assigned(bp->pdev)) { | |
618 | netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); | |
619 | num_vfs = 0; | |
620 | goto sriov_cfg_exit; | |
c0c050c5 MC |
621 | } |
622 | ||
623 | /* Check if enabled VFs is same as requested */ | |
4bb6cdce JH |
624 | if (num_vfs && num_vfs == bp->pf.active_vfs) |
625 | goto sriov_cfg_exit; | |
626 | ||
627 | /* if there are previous existing VFs, clean them up */ | |
628 | bnxt_sriov_disable(bp); | |
629 | if (!num_vfs) | |
630 | goto sriov_cfg_exit; | |
c0c050c5 MC |
631 | |
632 | bnxt_sriov_enable(bp, &num_vfs); | |
633 | ||
4bb6cdce | 634 | sriov_cfg_exit: |
c0c050c5 MC |
635 | bp->sriov_cfg = false; |
636 | wake_up(&bp->sriov_cfg_wait); | |
637 | ||
638 | return num_vfs; | |
639 | } | |
640 | ||
641 | static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, | |
642 | void *encap_resp, __le64 encap_resp_addr, | |
643 | __le16 encap_resp_cpr, u32 msg_size) | |
644 | { | |
645 | int rc = 0; | |
646 | struct hwrm_fwd_resp_input req = {0}; | |
647 | struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; | |
648 | ||
649 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); | |
650 | ||
651 | /* Set the new target id */ | |
652 | req.target_id = cpu_to_le16(vf->fw_fid); | |
c193554e | 653 | req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
654 | req.encap_resp_len = cpu_to_le16(msg_size); |
655 | req.encap_resp_addr = encap_resp_addr; | |
656 | req.encap_resp_cmpl_ring = encap_resp_cpr; | |
657 | memcpy(req.encap_resp, encap_resp, msg_size); | |
658 | ||
659 | mutex_lock(&bp->hwrm_cmd_lock); | |
660 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
661 | ||
662 | if (rc) { | |
663 | netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); | |
664 | goto fwd_resp_exit; | |
665 | } | |
666 | ||
667 | if (resp->error_code) { | |
668 | netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", | |
669 | resp->error_code); | |
670 | rc = -1; | |
671 | } | |
672 | ||
673 | fwd_resp_exit: | |
674 | mutex_unlock(&bp->hwrm_cmd_lock); | |
675 | return rc; | |
676 | } | |
677 | ||
678 | static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, | |
679 | u32 msg_size) | |
680 | { | |
681 | int rc = 0; | |
682 | struct hwrm_reject_fwd_resp_input req = {0}; | |
683 | struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; | |
684 | ||
685 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); | |
686 | /* Set the new target id */ | |
687 | req.target_id = cpu_to_le16(vf->fw_fid); | |
c193554e | 688 | req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
689 | memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); |
690 | ||
691 | mutex_lock(&bp->hwrm_cmd_lock); | |
692 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
693 | ||
694 | if (rc) { | |
695 | netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); | |
696 | goto fwd_err_resp_exit; | |
697 | } | |
698 | ||
699 | if (resp->error_code) { | |
700 | netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", | |
701 | resp->error_code); | |
702 | rc = -1; | |
703 | } | |
704 | ||
705 | fwd_err_resp_exit: | |
706 | mutex_unlock(&bp->hwrm_cmd_lock); | |
707 | return rc; | |
708 | } | |
709 | ||
710 | static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, | |
711 | u32 msg_size) | |
712 | { | |
713 | int rc = 0; | |
714 | struct hwrm_exec_fwd_resp_input req = {0}; | |
715 | struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; | |
716 | ||
717 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); | |
718 | /* Set the new target id */ | |
719 | req.target_id = cpu_to_le16(vf->fw_fid); | |
c193554e | 720 | req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); |
c0c050c5 MC |
721 | memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); |
722 | ||
723 | mutex_lock(&bp->hwrm_cmd_lock); | |
724 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
725 | ||
726 | if (rc) { | |
727 | netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); | |
728 | goto exec_fwd_resp_exit; | |
729 | } | |
730 | ||
731 | if (resp->error_code) { | |
732 | netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", | |
733 | resp->error_code); | |
734 | rc = -1; | |
735 | } | |
736 | ||
737 | exec_fwd_resp_exit: | |
738 | mutex_unlock(&bp->hwrm_cmd_lock); | |
739 | return rc; | |
740 | } | |
741 | ||
742 | static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) | |
743 | { | |
744 | u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); | |
745 | struct hwrm_cfa_l2_filter_alloc_input *req = | |
746 | (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; | |
747 | ||
748 | if (!is_valid_ether_addr(vf->mac_addr) || | |
749 | ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) | |
750 | return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); | |
751 | else | |
752 | return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); | |
753 | } | |
754 | ||
755 | static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) | |
756 | { | |
757 | int rc = 0; | |
758 | ||
759 | if (!(vf->flags & BNXT_VF_LINK_FORCED)) { | |
760 | /* real link */ | |
761 | rc = bnxt_hwrm_exec_fwd_resp( | |
762 | bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); | |
763 | } else { | |
764 | struct hwrm_port_phy_qcfg_output phy_qcfg_resp; | |
765 | struct hwrm_port_phy_qcfg_input *phy_qcfg_req; | |
766 | ||
767 | phy_qcfg_req = | |
768 | (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; | |
769 | mutex_lock(&bp->hwrm_cmd_lock); | |
770 | memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, | |
771 | sizeof(phy_qcfg_resp)); | |
772 | mutex_unlock(&bp->hwrm_cmd_lock); | |
773 | phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; | |
774 | ||
775 | if (vf->flags & BNXT_VF_LINK_UP) { | |
776 | /* if physical link is down, force link up on VF */ | |
73b9bad6 MC |
777 | if (phy_qcfg_resp.link != |
778 | PORT_PHY_QCFG_RESP_LINK_LINK) { | |
c0c050c5 MC |
779 | phy_qcfg_resp.link = |
780 | PORT_PHY_QCFG_RESP_LINK_LINK; | |
11f15ed3 MC |
781 | phy_qcfg_resp.link_speed = cpu_to_le16( |
782 | PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); | |
c0c050c5 MC |
783 | phy_qcfg_resp.duplex = |
784 | PORT_PHY_QCFG_RESP_DUPLEX_FULL; | |
785 | phy_qcfg_resp.pause = | |
786 | (PORT_PHY_QCFG_RESP_PAUSE_TX | | |
787 | PORT_PHY_QCFG_RESP_PAUSE_RX); | |
788 | } | |
789 | } else { | |
790 | /* force link down */ | |
791 | phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; | |
792 | phy_qcfg_resp.link_speed = 0; | |
793 | phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF; | |
794 | phy_qcfg_resp.pause = 0; | |
795 | } | |
796 | rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, | |
797 | phy_qcfg_req->resp_addr, | |
798 | phy_qcfg_req->cmpl_ring, | |
799 | sizeof(phy_qcfg_resp)); | |
800 | } | |
801 | return rc; | |
802 | } | |
803 | ||
804 | static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) | |
805 | { | |
806 | int rc = 0; | |
a8643e16 MC |
807 | struct input *encap_req = vf->hwrm_cmd_req_addr; |
808 | u32 req_type = le16_to_cpu(encap_req->req_type); | |
c0c050c5 MC |
809 | |
810 | switch (req_type) { | |
811 | case HWRM_CFA_L2_FILTER_ALLOC: | |
812 | rc = bnxt_vf_validate_set_mac(bp, vf); | |
813 | break; | |
814 | case HWRM_FUNC_CFG: | |
815 | /* TODO Validate if VF is allowed to change mac address, | |
816 | * mtu, num of rings etc | |
817 | */ | |
818 | rc = bnxt_hwrm_exec_fwd_resp( | |
819 | bp, vf, sizeof(struct hwrm_func_cfg_input)); | |
820 | break; | |
821 | case HWRM_PORT_PHY_QCFG: | |
822 | rc = bnxt_vf_set_link(bp, vf); | |
823 | break; | |
824 | default: | |
825 | break; | |
826 | } | |
827 | return rc; | |
828 | } | |
829 | ||
830 | void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) | |
831 | { | |
832 | u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; | |
833 | ||
834 | /* Scan through VF's and process commands */ | |
835 | while (1) { | |
836 | vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); | |
837 | if (vf_id >= active_vfs) | |
838 | break; | |
839 | ||
840 | clear_bit(vf_id, bp->pf.vf_event_bmap); | |
841 | bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); | |
842 | i = vf_id + 1; | |
843 | } | |
844 | } | |
379a80a1 MC |
845 | |
846 | void bnxt_update_vf_mac(struct bnxt *bp) | |
847 | { | |
848 | struct hwrm_func_qcaps_input req = {0}; | |
849 | struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; | |
850 | ||
851 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); | |
852 | req.fid = cpu_to_le16(0xffff); | |
853 | ||
854 | mutex_lock(&bp->hwrm_cmd_lock); | |
855 | if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) | |
856 | goto update_vf_mac_exit; | |
857 | ||
3874d6a8 JH |
858 | /* Store MAC address from the firmware. There are 2 cases: |
859 | * 1. MAC address is valid. It is assigned from the PF and we | |
860 | * need to override the current VF MAC address with it. | |
861 | * 2. MAC address is zero. The VF will use a random MAC address by | |
862 | * default but the stored zero MAC will allow the VF user to change | |
863 | * the random MAC address using ndo_set_mac_address() if he wants. | |
864 | */ | |
11f15ed3 MC |
865 | if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) |
866 | memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); | |
3874d6a8 JH |
867 | |
868 | /* overwrite netdev dev_addr with admin VF MAC */ | |
869 | if (is_valid_ether_addr(bp->vf.mac_addr)) | |
870 | memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); | |
379a80a1 MC |
871 | update_vf_mac_exit: |
872 | mutex_unlock(&bp->hwrm_cmd_lock); | |
873 | } | |
874 | ||
84c33dd3 MC |
875 | int bnxt_approve_mac(struct bnxt *bp, u8 *mac) |
876 | { | |
877 | struct hwrm_func_vf_cfg_input req = {0}; | |
878 | int rc = 0; | |
879 | ||
880 | if (!BNXT_VF(bp)) | |
881 | return 0; | |
882 | ||
883 | if (bp->hwrm_spec_code < 0x10202) { | |
884 | if (is_valid_ether_addr(bp->vf.mac_addr)) | |
885 | rc = -EADDRNOTAVAIL; | |
886 | goto mac_done; | |
887 | } | |
888 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); | |
889 | req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); | |
890 | memcpy(req.dflt_mac_addr, mac, ETH_ALEN); | |
891 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | |
892 | mac_done: | |
893 | if (rc) { | |
894 | rc = -EADDRNOTAVAIL; | |
895 | netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", | |
896 | mac); | |
897 | } | |
898 | return rc; | |
899 | } | |
c0c050c5 MC |
900 | #else |
901 | ||
902 | void bnxt_sriov_disable(struct bnxt *bp) | |
903 | { | |
904 | } | |
905 | ||
906 | void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) | |
907 | { | |
379a80a1 MC |
908 | netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); |
909 | } | |
910 | ||
911 | void bnxt_update_vf_mac(struct bnxt *bp) | |
912 | { | |
c0c050c5 | 913 | } |
84c33dd3 MC |
914 | |
915 | int bnxt_approve_mac(struct bnxt *bp, u8 *mac) | |
916 | { | |
917 | return 0; | |
918 | } | |
c0c050c5 | 919 | #endif |