include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-2.6-block.git] / drivers / net / vxge / vxge-config.h
CommitLineData
40a3a915
RV
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#ifndef VXGE_CONFIG_H
15#define VXGE_CONFIG_H
16#include <linux/list.h>
5a0e3ad6 17#include <linux/slab.h>
40a3a915
RV
18
19#ifndef VXGE_CACHE_LINE_SIZE
20#define VXGE_CACHE_LINE_SIZE 128
21#endif
22
23#define vxge_os_vaprintf(level, mask, fmt, ...) { \
24 char buff[255]; \
25 snprintf(buff, 255, fmt, __VA_ARGS__); \
26 printk(buff); \
27 printk("\n"); \
28}
29
30#ifndef VXGE_ALIGN
31#define VXGE_ALIGN(adrs, size) \
32 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
33#endif
34
35#define VXGE_HW_MIN_MTU 68
36#define VXGE_HW_MAX_MTU 9600
37#define VXGE_HW_DEFAULT_MTU 1500
38
39#ifdef VXGE_DEBUG_ASSERT
40
41/**
42 * vxge_assert
43 * @test: C-condition to check
44 * @fmt: printf like format string
45 *
46 * This function implements traditional assert. By default assertions
47 * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
48 * compilation
49 * time.
50 */
51#define vxge_assert(test) { \
52 if (!(test)) \
53 vxge_os_bug("bad cond: "#test" at %s:%d\n", \
54 __FILE__, __LINE__); }
55#else
56#define vxge_assert(test)
57#endif /* end of VXGE_DEBUG_ASSERT */
58
59/**
60 * enum enum vxge_debug_level
61 * @VXGE_NONE: debug disabled
62 * @VXGE_ERR: all errors going to be logged out
63 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
64 * going to be logged out. Very noisy.
65 *
66 * This enumeration going to be used to switch between different
67 * debug levels during runtime if DEBUG macro defined during
68 * compilation. If DEBUG macro not defined than code will be
69 * compiled out.
70 */
71enum vxge_debug_level {
72 VXGE_NONE = 0,
73 VXGE_TRACE = 1,
74 VXGE_ERR = 2
75};
76
77#define NULL_VPID 0xFFFFFFFF
78#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
79#define VXGE_DEBUG_MODULE_MASK 0xffffffff
80#define VXGE_DEBUG_TRACE_MASK 0xffffffff
81#define VXGE_DEBUG_ERR_MASK 0xffffffff
82#define VXGE_DEBUG_MASK 0x000001ff
83#else
84#define VXGE_DEBUG_MODULE_MASK 0x20000000
85#define VXGE_DEBUG_TRACE_MASK 0x20000000
86#define VXGE_DEBUG_ERR_MASK 0x20000000
87#define VXGE_DEBUG_MASK 0x00000001
88#endif
89
90/*
91 * @VXGE_COMPONENT_LL: do debug for vxge link layer module
92 * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
93 *
94 * This enumeration going to be used to distinguish modules
95 * or libraries during compilation and runtime. Makefile must declare
96 * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
97 */
98#define VXGE_COMPONENT_LL 0x20000000
99#define VXGE_COMPONENT_ALL 0xffffffff
100
101#define VXGE_HW_BASE_INF 100
102#define VXGE_HW_BASE_ERR 200
103#define VXGE_HW_BASE_BADCFG 300
104
105enum vxge_hw_status {
106 VXGE_HW_OK = 0,
107 VXGE_HW_FAIL = 1,
108 VXGE_HW_PENDING = 2,
109 VXGE_HW_COMPLETIONS_REMAIN = 3,
110
111 VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
112 VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
113
114 VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
115 VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
116 VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
117 VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
118 VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
119 VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
120 VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
121 VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
122 VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
123 VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
124 VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
125 VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
126 VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
127 VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
128 VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
129 VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
130 VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
131 VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
132 VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
133 VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
134 VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
135 VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
136
137 VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
138 VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
139 VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
140 VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
141 VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
142 VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
143 VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
144
145 VXGE_HW_EOF_TRACE_BUF = -1
146};
147
148/**
149 * enum enum vxge_hw_device_link_state - Link state enumeration.
150 * @VXGE_HW_LINK_NONE: Invalid link state.
151 * @VXGE_HW_LINK_DOWN: Link is down.
152 * @VXGE_HW_LINK_UP: Link is up.
153 *
154 */
155enum vxge_hw_device_link_state {
156 VXGE_HW_LINK_NONE,
157 VXGE_HW_LINK_DOWN,
158 VXGE_HW_LINK_UP
159};
160
161/**
162 * struct vxge_hw_device_date - Date Format
163 * @day: Day
164 * @month: Month
165 * @year: Year
166 * @date: Date in string format
167 *
168 * Structure for returning date
169 */
170
171#define VXGE_HW_FW_STRLEN 32
172struct vxge_hw_device_date {
173 u32 day;
174 u32 month;
175 u32 year;
176 char date[VXGE_HW_FW_STRLEN];
177};
178
179struct vxge_hw_device_version {
180 u32 major;
181 u32 minor;
182 u32 build;
183 char version[VXGE_HW_FW_STRLEN];
184};
185
186u64
187__vxge_hw_vpath_pci_func_mode_get(
188 u32 vp_id,
189 struct vxge_hw_vpath_reg __iomem *vpath_reg);
190
191/**
192 * struct vxge_hw_fifo_config - Configuration of fifo.
193 * @enable: Is this fifo to be commissioned
194 * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
195 * blocks per queue.
196 * @max_frags: Max number of Tx buffers per TxDL (that is, per single
197 * transmit operation).
198 * No more than 256 transmit buffers can be specified.
199 * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
200 * bytes. Setting @memblock_size to page size ensures
201 * by-page allocation of descriptors. 128K bytes is the
202 * maximum supported block size.
203 * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
204 * (e.g., to align on a cache line).
205 * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
206 * Use 0 otherwise.
207 * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
208 * which generally improves latency of the host bridge operation
209 * (see PCI specification). For valid values please refer
210 * to struct vxge_hw_fifo_config{} in the driver sources.
211 * Configuration of all Titan fifos.
212 * Note: Valid (min, max) range for each attribute is specified in the body of
213 * the struct vxge_hw_fifo_config{} structure.
214 */
215struct vxge_hw_fifo_config {
216 u32 enable;
217#define VXGE_HW_FIFO_ENABLE 1
218#define VXGE_HW_FIFO_DISABLE 0
219
220 u32 fifo_blocks;
221#define VXGE_HW_MIN_FIFO_BLOCKS 2
222#define VXGE_HW_MAX_FIFO_BLOCKS 128
223
224 u32 max_frags;
225#define VXGE_HW_MIN_FIFO_FRAGS 1
226#define VXGE_HW_MAX_FIFO_FRAGS 256
227
228 u32 memblock_size;
229#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
230#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
231#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
232
233 u32 alignment_size;
234#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
235#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
236#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
237
238 u32 intr;
239#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
240#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
241#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
242
243 u32 no_snoop_bits;
244#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
245#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
246#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
247#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
248#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
249
250};
251/**
252 * struct vxge_hw_ring_config - Ring configurations.
253 * @enable: Is this ring to be commissioned
254 * @ring_blocks: Numbers of RxD blocks in the ring
255 * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
256 * to Titan User Guide.
257 * @scatter_mode: Titan supports two receive scatter modes: A and B.
258 * For details please refer to Titan User Guide.
259 * @rx_timer_val: The number of 32ns periods that would be counted between two
260 * timer interrupts.
261 * @greedy_return: If Set it forces the device to return absolutely all RxD
262 * that are consumed and still on board when a timer interrupt
263 * triggers. If Clear, then if the device has already returned
264 * RxD before current timer interrupt trigerred and after the
265 * previous timer interrupt triggered, then the device is not
266 * forced to returned the rest of the consumed RxD that it has
267 * on board which account for a byte count less than the one
268 * programmed into PRC_CFG6.RXD_CRXDT field
269 * @rx_timer_ci: TBD
270 * @backoff_interval_us: Time (in microseconds), after which Titan
271 * tries to download RxDs posted by the host.
272 * Note that the "backoff" does not happen if host posts receive
273 * descriptors in the timely fashion.
274 * Ring configuration.
275 */
276struct vxge_hw_ring_config {
277 u32 enable;
278#define VXGE_HW_RING_ENABLE 1
279#define VXGE_HW_RING_DISABLE 0
280#define VXGE_HW_RING_DEFAULT 1
281
282 u32 ring_blocks;
283#define VXGE_HW_MIN_RING_BLOCKS 1
284#define VXGE_HW_MAX_RING_BLOCKS 128
285#define VXGE_HW_DEF_RING_BLOCKS 2
286
287 u32 buffer_mode;
288#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
289#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
290#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
291#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
292
293 u32 scatter_mode;
294#define VXGE_HW_RING_SCATTER_MODE_A 0
295#define VXGE_HW_RING_SCATTER_MODE_B 1
296#define VXGE_HW_RING_SCATTER_MODE_C 2
297#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
298
299 u64 rxds_limit;
300#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
301};
302
303/**
304 * struct vxge_hw_vp_config - Configuration of virtual path
305 * @vp_id: Virtual Path Id
306 * @min_bandwidth: Minimum Guaranteed bandwidth
307 * @ring: See struct vxge_hw_ring_config{}.
308 * @fifo: See struct vxge_hw_fifo_config{}.
309 * @tti: Configuration of interrupt associated with Transmit.
310 * see struct vxge_hw_tim_intr_config();
311 * @rti: Configuration of interrupt associated with Receive.
312 * see struct vxge_hw_tim_intr_config();
313 * @mtu: mtu size used on this port.
314 * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
315 * remove the VLAN tag from all received tagged frames that are not
316 * replicated at the internal L2 switch.
317 * 0 - Do not strip the VLAN tag.
318 * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
319 * always placed into the RxDMA descriptor.
320 *
321 * This structure is used by the driver to pass the configuration parameters to
322 * configure Virtual Path.
323 */
324struct vxge_hw_vp_config {
325 u32 vp_id;
326
327#define VXGE_HW_VPATH_PRIORITY_MIN 0
328#define VXGE_HW_VPATH_PRIORITY_MAX 16
329#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
330
331 u32 min_bandwidth;
332#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
333#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
334#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
335
336 struct vxge_hw_ring_config ring;
337 struct vxge_hw_fifo_config fifo;
338 struct vxge_hw_tim_intr_config tti;
339 struct vxge_hw_tim_intr_config rti;
340
341 u32 mtu;
342#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
343#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
344#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
345
346 u32 rpa_strip_vlan_tag;
347#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
348#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
349#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
350
351};
352/**
353 * struct vxge_hw_device_config - Device configuration.
354 * @dma_blockpool_initial: Initial size of DMA Pool
355 * @dma_blockpool_max: Maximum blocks in DMA pool
356 * @intr_mode: Line, or MSI-X interrupt.
357 *
358 * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
359 * @rth_it_type: RTH IT table programming type
360 * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
361 * @vp_config: Configuration for virtual paths
362 * @device_poll_millis: Specify the interval (in mulliseconds)
363 * to wait for register reads
364 *
365 * Titan configuration.
366 * Contains per-device configuration parameters, including:
367 * - stats sampling interval, etc.
368 *
369 * In addition, struct vxge_hw_device_config{} includes "subordinate"
370 * configurations, including:
371 * - fifos and rings;
372 * - MAC (done at firmware level).
373 *
374 * See Titan User Guide for more details.
375 * Note: Valid (min, max) range for each attribute is specified in the body of
376 * the struct vxge_hw_device_config{} structure. Please refer to the
377 * corresponding include file.
378 * See also: struct vxge_hw_tim_intr_config{}.
379 */
380struct vxge_hw_device_config {
381 u32 dma_blockpool_initial;
382 u32 dma_blockpool_max;
383#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
384#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
385#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
386#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
387
388#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
389
390 u32 intr_mode;
391#define VXGE_HW_INTR_MODE_IRQLINE 0
392#define VXGE_HW_INTR_MODE_MSIX 1
393#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
394
395#define VXGE_HW_INTR_MODE_DEF 0
396
397 u32 rth_en;
398#define VXGE_HW_RTH_DISABLE 0
399#define VXGE_HW_RTH_ENABLE 1
400#define VXGE_HW_RTH_DEFAULT 0
401
402 u32 rth_it_type;
403#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
404#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
405#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
406
407 u32 rts_mac_en;
408#define VXGE_HW_RTS_MAC_DISABLE 0
409#define VXGE_HW_RTS_MAC_ENABLE 1
410#define VXGE_HW_RTS_MAC_DEFAULT 0
411
412 struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
413
414 u32 device_poll_millis;
415#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
416#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
417#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
418
419};
420
421/**
422 * function vxge_uld_link_up_f - Link-Up callback provided by driver.
423 * @devh: HW device handle.
424 * Link-up notification callback provided by the driver.
425 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
426 *
427 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
428 * vxge_hw_driver_initialize().
429 */
430
431/**
432 * function vxge_uld_link_down_f - Link-Down callback provided by
433 * driver.
434 * @devh: HW device handle.
435 *
436 * Link-Down notification callback provided by the driver.
437 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
438 *
439 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
440 * vxge_hw_driver_initialize().
441 */
442
443/**
444 * function vxge_uld_crit_err_f - Critical Error notification callback.
445 * @devh: HW device handle.
446 * (typically - at HW device iinitialization time).
447 * @type: Enumerated hw error, e.g.: double ECC.
448 * @serr_data: Titan status.
449 * @ext_data: Extended data. The contents depends on the @type.
450 *
451 * Link-Down notification callback provided by the driver.
452 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
453 *
454 * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
455 * vxge_hw_driver_initialize().
456 */
457
458/**
459 * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
460 * @link_up: See vxge_uld_link_up_f{}.
461 * @link_down: See vxge_uld_link_down_f{}.
462 * @crit_err: See vxge_uld_crit_err_f{}.
463 *
464 * Driver slow-path (per-driver) callbacks.
465 * Implemented by driver and provided to HW via
466 * vxge_hw_driver_initialize().
467 * Note that these callbacks are not mandatory: HW will not invoke
468 * a callback if NULL is specified.
469 *
470 * See also: vxge_hw_driver_initialize().
471 */
472struct vxge_hw_uld_cbs {
473
474 void (*link_up)(struct __vxge_hw_device *devh);
475 void (*link_down)(struct __vxge_hw_device *devh);
476 void (*crit_err)(struct __vxge_hw_device *devh,
477 enum vxge_hw_event type, u64 ext_data);
478};
479
480/*
481 * struct __vxge_hw_blockpool_entry - Block private data structure
482 * @item: List header used to link.
483 * @length: Length of the block
484 * @memblock: Virtual address block
485 * @dma_addr: DMA Address of the block.
486 * @dma_handle: DMA handle of the block.
487 * @acc_handle: DMA acc handle
488 *
489 * Block is allocated with a header to put the blocks into list.
490 *
491 */
492struct __vxge_hw_blockpool_entry {
493 struct list_head item;
494 u32 length;
495 void *memblock;
496 dma_addr_t dma_addr;
497 struct pci_dev *dma_handle;
498 struct pci_dev *acc_handle;
499};
500
501/*
502 * struct __vxge_hw_blockpool - Block Pool
503 * @hldev: HW device
504 * @block_size: size of each block.
505 * @Pool_size: Number of blocks in the pool
506 * @pool_max: Maximum number of blocks above which to free additional blocks
507 * @req_out: Number of block requests with OS out standing
508 * @free_block_list: List of free blocks
509 *
510 * Block pool contains the DMA blocks preallocated.
511 *
512 */
513struct __vxge_hw_blockpool {
514 struct __vxge_hw_device *hldev;
515 u32 block_size;
516 u32 pool_size;
517 u32 pool_max;
518 u32 req_out;
519 struct list_head free_block_list;
520 struct list_head free_entry_list;
521};
522
523/*
524 * enum enum __vxge_hw_channel_type - Enumerated channel types.
525 * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
526 * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
527 * @VXGE_HW_CHANNEL_TYPE_RING: ring.
528 * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
529 * (and recognized) channel types. Currently: 2.
530 *
531 * Enumerated channel types. Currently there are only two link-layer
532 * channels - Titan fifo and Titan ring. In the future the list will grow.
533 */
534enum __vxge_hw_channel_type {
535 VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
536 VXGE_HW_CHANNEL_TYPE_FIFO = 1,
537 VXGE_HW_CHANNEL_TYPE_RING = 2,
538 VXGE_HW_CHANNEL_TYPE_MAX = 3
539};
540
541/*
542 * struct __vxge_hw_channel
543 * @item: List item; used to maintain a list of open channels.
544 * @type: Channel type. See enum vxge_hw_channel_type{}.
545 * @devh: Device handle. HW device object that contains _this_ channel.
546 * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
547 * @length: Channel length. Currently allocated number of descriptors.
548 * The channel length "grows" when more descriptors get allocated.
549 * See _hw_mempool_grow.
550 * @reserve_arr: Reserve array. Contains descriptors that can be reserved
551 * by driver for the subsequent send or receive operation.
552 * See vxge_hw_fifo_txdl_reserve(),
553 * vxge_hw_ring_rxd_reserve().
554 * @reserve_ptr: Current pointer in the resrve array
555 * @reserve_top: Reserve top gives the maximum number of dtrs available in
556 * reserve array.
557 * @work_arr: Work array. Contains descriptors posted to the channel.
558 * Note that at any point in time @work_arr contains 3 types of
559 * descriptors:
560 * 1) posted but not yet consumed by Titan device;
561 * 2) consumed but not yet completed;
562 * 3) completed but not yet freed
563 * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
564 * @post_index: Post index. At any point in time points on the
565 * position in the channel, which'll contain next to-be-posted
566 * descriptor.
567 * @compl_index: Completion index. At any point in time points on the
568 * position in the channel, which will contain next
569 * to-be-completed descriptor.
570 * @free_arr: Free array. Contains completed descriptors that were freed
571 * (i.e., handed over back to HW) by driver.
572 * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
573 * @free_ptr: current pointer in free array
574 * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
575 * to store per-operation control information.
576 * @stats: Pointer to common statistics
577 * @userdata: Per-channel opaque (void*) user-defined context, which may be
578 * driver object, ULP connection, etc.
579 * Once channel is open, @userdata is passed back to user via
580 * vxge_hw_channel_callback_f.
581 *
582 * HW channel object.
583 *
584 * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
585 */
586struct __vxge_hw_channel {
587 struct list_head item;
588 enum __vxge_hw_channel_type type;
589 struct __vxge_hw_device *devh;
590 struct __vxge_hw_vpath_handle *vph;
591 u32 length;
592 u32 vp_id;
593 void **reserve_arr;
594 u32 reserve_ptr;
595 u32 reserve_top;
596 void **work_arr;
597 u32 post_index ____cacheline_aligned;
598 u32 compl_index ____cacheline_aligned;
599 void **free_arr;
600 u32 free_ptr;
601 void **orig_arr;
602 u32 per_dtr_space;
603 void *userdata;
604 struct vxge_hw_common_reg __iomem *common_reg;
605 u32 first_vp_id;
606 struct vxge_hw_vpath_stats_sw_common_info *stats;
607
608} ____cacheline_aligned;
609
610/*
611 * struct __vxge_hw_virtualpath - Virtual Path
612 *
613 * @vp_id: Virtual path id
614 * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
615 * @hldev: Hal device
616 * @vp_config: Virtual Path Config
617 * @vp_reg: VPATH Register map address in BAR0
618 * @vpmgmt_reg: VPATH_MGMT register map address
619 * @max_mtu: Max mtu that can be supported
620 * @vsport_number: vsport attached to this vpath
621 * @max_kdfc_db: Maximum kernel mode doorbells
622 * @max_nofl_db: Maximum non offload doorbells
623 * @tx_intr_num: Interrupt Number associated with the TX
624
625 * @ringh: Ring Queue
626 * @fifoh: FIFO Queue
627 * @vpath_handles: Virtual Path handles list
628 * @stats_block: Memory for DMAing stats
629 * @stats: Vpath statistics
630 *
631 * Virtual path structure to encapsulate the data related to a virtual path.
632 * Virtual paths are allocated by the HW upon getting configuration from the
633 * driver and inserted into the list of virtual paths.
634 */
635struct __vxge_hw_virtualpath {
636 u32 vp_id;
637
638 u32 vp_open;
639#define VXGE_HW_VP_NOT_OPEN 0
640#define VXGE_HW_VP_OPEN 1
641
642 struct __vxge_hw_device *hldev;
643 struct vxge_hw_vp_config *vp_config;
644 struct vxge_hw_vpath_reg __iomem *vp_reg;
645 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
646 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
647
648 u32 max_mtu;
649 u32 vsport_number;
650 u32 max_kdfc_db;
651 u32 max_nofl_db;
652
653 struct __vxge_hw_ring *____cacheline_aligned ringh;
654 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
655 struct list_head vpath_handles;
656 struct __vxge_hw_blockpool_entry *stats_block;
657 struct vxge_hw_vpath_stats_hw_info *hw_stats;
658 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
659 struct vxge_hw_vpath_stats_sw_info *sw_stats;
660};
661
662/*
663 * struct __vxge_hw_vpath_handle - List item to store callback information
664 * @item: List head to keep the item in linked list
665 * @vpath: Virtual path to which this item belongs
666 *
667 * This structure is used to store the callback information.
668 */
669struct __vxge_hw_vpath_handle{
670 struct list_head item;
671 struct __vxge_hw_virtualpath *vpath;
672};
673
674/*
675 * struct __vxge_hw_device
676 *
677 * HW device object.
678 */
679/**
680 * struct __vxge_hw_device - Hal device object
681 * @magic: Magic Number
682 * @device_id: PCI Device Id of the adapter
683 * @major_revision: PCI Device major revision
684 * @minor_revision: PCI Device minor revision
685 * @bar0: BAR0 virtual address.
40a3a915
RV
686 * @pdev: Physical device handle
687 * @config: Confguration passed by the LL driver at initialization
688 * @link_state: Link state
689 *
690 * HW device object. Represents Titan adapter
691 */
692struct __vxge_hw_device {
693 u32 magic;
694#define VXGE_HW_DEVICE_MAGIC 0x12345678
695#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
696 u16 device_id;
697 u8 major_revision;
698 u8 minor_revision;
699 void __iomem *bar0;
40a3a915
RV
700 struct pci_dev *pdev;
701 struct net_device *ndev;
702 struct vxge_hw_device_config config;
703 enum vxge_hw_device_link_state link_state;
704
705 struct vxge_hw_uld_cbs uld_callbacks;
706
707 u32 host_type;
708 u32 func_id;
709 u32 access_rights;
710#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
711#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
712#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
713 struct vxge_hw_legacy_reg __iomem *legacy_reg;
714 struct vxge_hw_toc_reg __iomem *toc_reg;
715 struct vxge_hw_common_reg __iomem *common_reg;
716 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
717 struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
718 [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
719 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
720 [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
721 struct vxge_hw_vpath_reg __iomem *vpath_reg \
722 [VXGE_HW_TITAN_VPATH_REG_SPACES];
723 u8 __iomem *kdfc;
724 u8 __iomem *usdc;
725 struct __vxge_hw_virtualpath virtual_paths \
726 [VXGE_HW_MAX_VIRTUAL_PATHS];
727 u64 vpath_assignments;
728 u64 vpaths_deployed;
729 u32 first_vp_id;
730 u64 tim_int_mask0[4];
731 u32 tim_int_mask1[4];
732
733 struct __vxge_hw_blockpool block_pool;
734 struct vxge_hw_device_stats stats;
735 u32 debug_module_mask;
736 u32 debug_level;
737 u32 level_err;
738 u32 level_trace;
739};
740
741#define VXGE_HW_INFO_LEN 64
742/**
743 * struct vxge_hw_device_hw_info - Device information
744 * @host_type: Host Type
745 * @func_id: Function Id
746 * @vpath_mask: vpath bit mask
747 * @fw_version: Firmware version
748 * @fw_date: Firmware Date
749 * @flash_version: Firmware version
750 * @flash_date: Firmware Date
751 * @mac_addrs: Mac addresses for each vpath
752 * @mac_addr_masks: Mac address masks for each vpath
753 *
754 * Returns the vpath mask that has the bits set for each vpath allocated
755 * for the driver and the first mac address for each vpath
756 */
757struct vxge_hw_device_hw_info {
758 u32 host_type;
759#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
760#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
761#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
762#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
763#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
764#define VXGE_HW_SR_VH_FUNCTION0 5
765#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
766#define VXGE_HW_VH_NORMAL_FUNCTION 7
767 u64 function_mode;
768#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0
769#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1
770#define VXGE_HW_FUNCTION_MODE_SRIOV 2
771#define VXGE_HW_FUNCTION_MODE_MRIOV 3
772 u32 func_id;
773 u64 vpath_mask;
774 struct vxge_hw_device_version fw_version;
775 struct vxge_hw_device_date fw_date;
776 struct vxge_hw_device_version flash_version;
777 struct vxge_hw_device_date flash_date;
778 u8 serial_number[VXGE_HW_INFO_LEN];
779 u8 part_number[VXGE_HW_INFO_LEN];
780 u8 product_desc[VXGE_HW_INFO_LEN];
781 u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
782 u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
783};
784
785/**
786 * struct vxge_hw_device_attr - Device memory spaces.
787 * @bar0: BAR0 virtual address.
40a3a915
RV
788 * @pdev: PCI device object.
789 *
7975d1ee 790 * Device memory spaces. Includes configuration, BAR0 etc. per device
40a3a915
RV
791 * mapped memories. Also, includes a pointer to OS-specific PCI device object.
792 */
793struct vxge_hw_device_attr {
794 void __iomem *bar0;
40a3a915
RV
795 struct pci_dev *pdev;
796 struct vxge_hw_uld_cbs uld_callbacks;
797};
798
799#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
800
801#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
802 if (i < 16) { \
803 m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
804 m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
805 } \
806 else { \
807 m1[0] = 0x80000000; \
808 m1[1] = 0x40000000; \
809 } \
810}
811
812#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
813 if (i < 16) { \
814 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
815 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
816 } \
817 else { \
818 m1[0] = 0; \
819 m1[1] = 0; \
820 } \
821}
822
823#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
824 status = vxge_hw_mrpcim_stats_access(hldev, \
825 VXGE_HW_STATS_OP_READ, \
826 loc, \
827 offset, \
828 &val64); \
829 \
830 if (status != VXGE_HW_OK) \
831 return status; \
832}
833
834#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
835 status = __vxge_hw_vpath_stats_access(vpath, \
836 VXGE_HW_STATS_OP_READ, \
837 offset, \
838 &val64); \
839 if (status != VXGE_HW_OK) \
840 return status; \
841}
842
843/*
844 * struct __vxge_hw_ring - Ring channel.
845 * @channel: Channel "base" of this ring, the common part of all HW
846 * channels.
847 * @mempool: Memory pool, the pool from which descriptors get allocated.
848 * (See vxge_hw_mm.h).
849 * @config: Ring configuration, part of device configuration
850 * (see struct vxge_hw_device_config{}).
851 * @ring_length: Length of the ring
852 * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
853 * as per Titan User Guide.
854 * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
855 * 1-buffer mode descriptor is 32 byte long, etc.
856 * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
857 * per-descriptor data (e.g., DMA handle for Solaris)
858 * @per_rxd_space: Per rxd space requested by driver
859 * @rxds_per_block: Number of descriptors per hardware-defined RxD
860 * block. Depends on the (1-, 3-, 5-) buffer mode.
861 * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
862 * usage. Not to confuse with @rxd_priv_size.
863 * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
864 * @callback: Channel completion callback. HW invokes the callback when there
865 * are new completions on that channel. In many implementations
866 * the @callback executes in the hw interrupt context.
867 * @rxd_init: Channel's descriptor-initialize callback.
868 * See vxge_hw_ring_rxd_init_f{}.
869 * If not NULL, HW invokes the callback when opening
870 * the ring.
871 * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
872 * HW invokes the callback when closing the corresponding channel.
873 * See also vxge_hw_channel_rxd_term_f{}.
874 * @stats: Statistics for ring
875 * Ring channel.
876 *
877 * Note: The structure is cache line aligned to better utilize
878 * CPU cache performance.
879 */
880struct __vxge_hw_ring {
881 struct __vxge_hw_channel channel;
882 struct vxge_hw_mempool *mempool;
883 struct vxge_hw_vpath_reg __iomem *vp_reg;
884 struct vxge_hw_common_reg __iomem *common_reg;
885 u32 ring_length;
886 u32 buffer_mode;
887 u32 rxd_size;
888 u32 rxd_priv_size;
889 u32 per_rxd_space;
890 u32 rxds_per_block;
891 u32 rxdblock_priv_size;
892 u32 cmpl_cnt;
893 u32 vp_id;
894 u32 doorbell_cnt;
895 u32 total_db_cnt;
896 u64 rxds_limit;
897
898 enum vxge_hw_status (*callback)(
899 struct __vxge_hw_ring *ringh,
900 void *rxdh,
901 u8 t_code,
902 void *userdata);
903
904 enum vxge_hw_status (*rxd_init)(
905 void *rxdh,
906 void *userdata);
907
908 void (*rxd_term)(
909 void *rxdh,
910 enum vxge_hw_rxd_state state,
911 void *userdata);
912
913 struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
914 struct vxge_hw_ring_config *config;
915} ____cacheline_aligned;
916
917/**
918 * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
919 * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
920 * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
921 * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
922 * device.
923 * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
924 * filling-in and posting later.
925 *
926 * Titan/HW descriptor states.
927 *
928 */
929enum vxge_hw_txdl_state {
930 VXGE_HW_TXDL_STATE_NONE = 0,
931 VXGE_HW_TXDL_STATE_AVAIL = 1,
932 VXGE_HW_TXDL_STATE_POSTED = 2,
933 VXGE_HW_TXDL_STATE_FREED = 3
934};
935/*
936 * struct __vxge_hw_fifo - Fifo.
937 * @channel: Channel "base" of this fifo, the common part of all HW
938 * channels.
939 * @mempool: Memory pool, from which descriptors get allocated.
940 * @config: Fifo configuration, part of device configuration
941 * (see struct vxge_hw_device_config{}).
942 * @interrupt_type: Interrupt type to be used
943 * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
944 * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
945 * on TxDL please refer to Titan UG.
946 * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
947 * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
948 * @priv_size: Per-Tx descriptor space reserved for driver
949 * usage.
950 * @per_txdl_space: Per txdl private space for the driver
951 * @callback: Fifo completion callback. HW invokes the callback when there
952 * are new completions on that fifo. In many implementations
953 * the @callback executes in the hw interrupt context.
954 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
955 * HW invokes the callback when closing the corresponding fifo.
956 * See also vxge_hw_fifo_txdl_term_f{}.
957 * @stats: Statistics of this fifo
958 *
959 * Fifo channel.
960 * Note: The structure is cache line aligned.
961 */
962struct __vxge_hw_fifo {
963 struct __vxge_hw_channel channel;
964 struct vxge_hw_mempool *mempool;
965 struct vxge_hw_fifo_config *config;
966 struct vxge_hw_vpath_reg __iomem *vp_reg;
967 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
968 u64 interrupt_type;
969 u32 no_snoop_bits;
970 u32 txdl_per_memblock;
971 u32 txdl_size;
972 u32 priv_size;
973 u32 per_txdl_space;
974 u32 vp_id;
975 u32 tx_intr_num;
976
977 enum vxge_hw_status (*callback)(
978 struct __vxge_hw_fifo *fifo_handle,
979 void *txdlh,
980 enum vxge_hw_fifo_tcode t_code,
981 void *userdata,
ff67df55
BL
982 struct sk_buff ***skb_ptr,
983 int nr_skb,
984 int *more);
40a3a915
RV
985
986 void (*txdl_term)(
987 void *txdlh,
988 enum vxge_hw_txdl_state state,
989 void *userdata);
990
991 struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
992} ____cacheline_aligned;
993
994/*
995 * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
996 * @dma_addr: DMA (mapped) address of _this_ descriptor.
997 * @dma_handle: DMA handle used to map the descriptor onto device.
998 * @dma_offset: Descriptor's offset in the memory block. HW allocates
999 * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
1000 * Each memblock is a contiguous block of DMA-able memory.
1001 * @frags: Total number of fragments (that is, contiguous data buffers)
1002 * carried by this TxDL.
1003 * @align_vaddr_start: Aligned virtual address start
1004 * @align_vaddr: Virtual address of the per-TxDL area in memory used for
1005 * alignement. Used to place one or more mis-aligned fragments
1006 * @align_dma_addr: DMA address translated from the @align_vaddr.
1007 * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
1008 * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
1009 * @align_dma_offset: The current offset into the @align_vaddr area.
1010 * Grows while filling the descriptor, gets reset.
1011 * @align_used_frags: Number of fragments used.
1012 * @alloc_frags: Total number of fragments allocated.
1013 * @unused: TODO
1014 * @next_txdl_priv: (TODO).
1015 * @first_txdp: (TODO).
1016 * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
1017 * TxDL list.
1018 * @txdlh: Corresponding txdlh to this TxDL.
1019 * @memblock: Pointer to the TxDL memory block or memory page.
1020 * on the next send operation.
1021 * @dma_object: DMA address and handle of the memory block that contains
1022 * the descriptor. This member is used only in the "checked"
1023 * version of the HW (to enforce certain assertions);
1024 * otherwise it gets compiled out.
1025 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
1026 *
1027 * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
1028 * information associated with the descriptor. Note that driver can ask HW
1029 * to allocate additional per-descriptor space for its own (driver-specific)
1030 * purposes.
1031 *
1032 * See also: struct vxge_hw_ring_rxd_priv{}.
1033 */
1034struct __vxge_hw_fifo_txdl_priv {
1035 dma_addr_t dma_addr;
1036 struct pci_dev *dma_handle;
1037 ptrdiff_t dma_offset;
1038 u32 frags;
1039 u8 *align_vaddr_start;
1040 u8 *align_vaddr;
1041 dma_addr_t align_dma_addr;
1042 struct pci_dev *align_dma_handle;
1043 struct pci_dev *align_dma_acch;
1044 ptrdiff_t align_dma_offset;
1045 u32 align_used_frags;
1046 u32 alloc_frags;
1047 u32 unused;
1048 struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
1049 struct vxge_hw_fifo_txd *first_txdp;
1050 void *memblock;
1051};
1052
1053/*
1054 * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
1055 * @control_0: Bits 0 to 7 - Doorbell type.
1056 * Bits 8 to 31 - Reserved.
1057 * Bits 32 to 39 - The highest TxD in this TxDL.
1058 * Bits 40 to 47 - Reserved.
1059 * Bits 48 to 55 - Reserved.
1060 * Bits 56 to 63 - No snoop flags.
1061 * @txdl_ptr: The starting location of the TxDL in host memory.
1062 *
1063 * Created by the host and written to the adapter via PIO to a Kernel Doorbell
1064 * FIFO. All non-offload doorbell wrapper fields must be written by the host as
1065 * part of a doorbell write. Consumed by the adapter but is not written by the
1066 * adapter.
1067 */
1068struct __vxge_hw_non_offload_db_wrapper {
1069 u64 control_0;
1070#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
1071#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
1072#define VXGE_HW_NODBW_TYPE_NODBW 0
1073
1074#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
1075#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
1076
1077#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
1078#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
1079#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
1080#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
1081
1082 u64 txdl_ptr;
1083};
1084
1085/*
1086 * TX Descriptor
1087 */
1088
1089/**
1090 * struct vxge_hw_fifo_txd - Transmit Descriptor
1091 * @control_0: Bits 0 to 6 - Reserved.
1092 * Bit 7 - List Ownership. This field should be initialized
1093 * to '1' by the driver before the transmit list pointer is
1094 * written to the adapter. This field will be set to '0' by the
1095 * adapter once it has completed transmitting the frame or frames in
1096 * the list. Note - This field is only valid in TxD0. Additionally,
1097 * for multi-list sequences, the driver should not release any
1098 * buffers until the ownership of the last list in the multi-list
1099 * sequence has been returned to the host.
1100 * Bits 8 to 11 - Reserved
1101 * Bits 12 to 15 - Transfer_Code. This field is only valid in
1102 * TxD0. It is used to describe the status of the transmit data
1103 * buffer transfer. This field is always overwritten by the
1104 * adapter, so this field may be initialized to any value.
1105 * Bits 16 to 17 - Host steering. This field allows the host to
1106 * override the selection of the physical transmit port.
1107 * Attention:
1108 * Normal sounds as if learned from the switch rather than from
1109 * the aggregation algorythms.
1110 * 00: Normal. Use Destination/MAC Address
1111 * lookup to determine the transmit port.
1112 * 01: Send on physical Port1.
1113 * 10: Send on physical Port0.
1114 * 11: Send on both ports.
1115 * Bits 18 to 21 - Reserved
1116 * Bits 22 to 23 - Gather_Code. This field is set by the host and
1117 * is used to describe how individual buffers comprise a frame.
1118 * 10: First descriptor of a frame.
1119 * 00: Middle of a multi-descriptor frame.
1120 * 01: Last descriptor of a frame.
1121 * 11: First and last descriptor of a frame (the entire frame
1122 * resides in a single buffer).
1123 * For multi-descriptor frames, the only valid gather code sequence
1124 * is {10, [00], 01}. In other words, the descriptors must be placed
1125 * in the list in the correct order.
1126 * Bits 24 to 27 - Reserved
1127 * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
1128 * definition. Only valid in TxD0. This field allows the host to
1129 * indicate the Ethernet encapsulation of an outbound LSO packet.
1130 * 00 - classic mode (best guess)
1131 * 01 - LLC
1132 * 10 - SNAP
1133 * 11 - DIX
1134 * If "classic mode" is selected, the adapter will attempt to
1135 * decode the frame's Ethernet encapsulation by examining the L/T
1136 * field as follows:
1137 * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
1138 * if packet is IPv4 or IPv6.
1139 * 0x8870 Jumbo-SNAP encoding.
1140 * 0x0800 IPv4 DIX encoding
1141 * 0x86DD IPv6 DIX encoding
1142 * others illegal encapsulation
1143 * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
1144 * Set to 1 to perform segmentation offload for TCP/UDP.
1145 * This field is valid only in TxD0.
1146 * Bits 31 to 33 - Reserved.
1147 * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
1148 * This field is meaningful only when LSO_Control is non-zero.
1149 * When LSO_Control is set to TCP_LSO, the single (possibly large)
1150 * TCP segment described by this TxDL will be sent as a series of
1151 * TCP segments each of which contains no more than LSO_MSS
1152 * payload bytes.
1153 * When LSO_Control is set to UDP_LSO, the single (possibly large)
1154 * UDP datagram described by this TxDL will be sent as a series of
1155 * UDP datagrams each of which contains no more than LSO_MSS
1156 * payload bytes.
1157 * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
1158 * or TCP payload, with the exception of the last, which will have
1159 * <= LSO_MSS bytes of payload.
1160 * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
1161 * buffer to be read by the adapter. This field is written by the
1162 * host. A value of 0 is illegal.
1163 * Bits 32 to 63 - This value is written by the adapter upon
1164 * completion of a UDP or TCP LSO operation and indicates the number
1165 * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
1166 * returned for any non-LSO operation.
1167 * @control_1: Bits 0 to 4 - Reserved.
1168 * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
1169 * offload. This field is only valid in the first TxD of a frame.
1170 * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
1171 * This field is only valid in the first TxD of a frame (the TxD's
1172 * gather code must be 10 or 11). The driver should only set this
1173 * bit if it can guarantee that TCP is present.
1174 * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
1175 * This field is only valid in the first TxD of a frame (the TxD's
1176 * gather code must be 10 or 11). The driver should only set this
1177 * bit if it can guarantee that UDP is present.
1178 * Bits 8 to 14 - Reserved.
1179 * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
1180 * instruct the adapter to insert the VLAN tag specified by the
1181 * Tx_VLAN_Tag field. This field is only valid in the first TxD of
1182 * a frame.
1183 * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
1184 * to be inserted into the frame by the adapter (the first two bytes
1185 * of a VLAN tag are always 0x8100). This field is only valid if the
1186 * Tx_VLAN_Enable field is set to '1'.
1187 * Bits 32 to 33 - Reserved.
1188 * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
1189 * number the frame associated with. This field is written by the
1190 * host. It is only valid in the first TxD of a frame.
1191 * Bits 40 to 42 - Reserved.
1192 * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
1193 * functions. This field is valid only in the first TxD
1194 * of a frame.
1195 * Bits 44 to 45 - Reserved.
1196 * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
1197 * generate an interrupt as soon as all of the frames in the list
1198 * have been transmitted. In order to have per-frame interrupts,
1199 * the driver should place a maximum of one frame per list. This
1200 * field is only valid in the first TxD of a frame.
1201 * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
1202 * to count the frame toward the utilization interrupt specified in
1203 * the Tx_Int_Number field. This field is only valid in the first
1204 * TxD of a frame.
1205 * Bits 48 to 63 - Reserved.
1206 * @buffer_pointer: Buffer start address.
1207 * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
1208 * Titan descriptor prior to posting the latter on the fifo
1209 * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
1210 * to the driver with each completed descriptor.
1211 *
1212 * Transmit descriptor (TxD).Fifo descriptor contains configured number
1213 * (list) of TxDs. * For more details please refer to Titan User Guide,
1214 * Section 5.4.2 "Transmit Descriptor (TxD) Format".
1215 */
1216struct vxge_hw_fifo_txd {
1217 u64 control_0;
1218#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1219
1220#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1221#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1222#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
1223
1224
1225#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
1226#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
1227#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
1228
1229
1230#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
1231
1232#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
1233
1234#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
1235
1236 u64 control_1;
1237#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
1238#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
1239#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
1240#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
1241
1242#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
1243
1244#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
1245
1246#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
1247#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
1248
1249 u64 buffer_pointer;
1250
1251 u64 host_control;
1252};
1253
1254/**
1255 * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
1256 * @host_control: This field is exclusively for host use and is "readonly"
1257 * from the adapter's perspective.
1258 * @control_0:Bits 0 to 6 - RTH_Bucket get
1259 * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
1260 * by the host, and is set to 0 by the adapter.
1261 * 0 - Host owns RxD and buffer.
1262 * 1 - The adapter owns RxD and buffer.
1263 * Bit 8 - Fast_Path_Eligible When set, indicates that the
1264 * received frame meets all of the criteria for fast path processing.
1265 * The required criteria are as follows:
1266 * !SYN &
1267 * (Transfer_Code == "Transfer OK") &
1268 * (!Is_IP_Fragment) &
1269 * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
1270 * (Is_IPv6)) &
1271 * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
1272 * (Is_UDP & (computed_L4_checksum == 0xFFFF |
1273 * computed _L4_checksum == 0x0000)))
1274 * (same meaning for all RxD buffer modes)
1275 * Bit 9 - L3 Checksum Correct
1276 * Bit 10 - L4 Checksum Correct
1277 * Bit 11 - Reserved
1278 * Bit 12 to 15 - This field is written by the adapter. It is
1279 * used to report the status of the frame transfer to the host.
1280 * 0x0 - Transfer OK
1281 * 0x4 - RDA Failure During Transfer
1282 * 0x5 - Unparseable Packet, such as unknown IPv6 header.
1283 * 0x6 - Frame integrity error (FCS or ECC).
1284 * 0x7 - Buffer Size Error. The provided buffer(s) were not
1285 * appropriately sized and data loss occurred.
1286 * 0x8 - Internal ECC Error. RxD corrupted.
1287 * 0x9 - IPv4 Checksum error
1288 * 0xA - TCP/UDP Checksum error
1289 * 0xF - Unknown Error or Multiple Error. Indicates an
1290 * unknown problem or that more than one of transfer codes is set.
1291 * Bit 16 - SYN The adapter sets this field to indicate that
1292 * the incoming frame contained a TCP segment with its SYN bit
1293 * set and its ACK bit NOT set. (same meaning for all RxD buffer
1294 * modes)
1295 * Bit 17 - Is ICMP
1296 * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
1297 * Socket Pair Direct Match Table and the frame was steered based
1298 * on SPDM.
1299 * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
1300 * Indirection Table and the frame was steered based on hash
1301 * indirection.
1302 * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
1303 * type) that was used to calculate the hash.
1304 * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
1305 * tagged.
1306 * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
1307 * of the received frame.
1308 * 0x0 - Ethernet DIX
1309 * 0x1 - LLC
1310 * 0x2 - SNAP (includes Jumbo-SNAP)
1311 * 0x3 - IPX
1312 * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
1313 * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
1314 * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
1315 * IP packet.
1316 * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
1317 * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
1318 * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
1319 * arrived with the frame. If the resulting computed IPv4 header
1320 * checksum for the frame did not produce the expected 0xFFFF value,
1321 * then the transfer code would be set to 0x9.
1322 * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
1323 * arrived with the frame. If the resulting computed TCP/UDP checksum
1324 * for the frame did not produce the expected 0xFFFF value, then the
1325 * transfer code would be set to 0xA.
1326 * @control_1:Bits 0 to 1 - Reserved
1327 * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
1328 * eventually overwritten by the adapter. The host writes the
1329 * available buffer size in bytes when it passes the descriptor to
1330 * the adapter. When a frame is delivered the host, the adapter
1331 * populates this field with the number of bytes written into the
1332 * buffer. The largest supported buffer is 16, 383 bytes.
1333 * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
1334 * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
1335 * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
1336 * of the VLAN tag, if one was detected by the adapter. This field is
1337 * populated even if VLAN-tag stripping is enabled.
1338 * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
1339 *
1340 * One buffer mode RxD for ring structure
1341 */
1342struct vxge_hw_ring_rxd_1 {
1343 u64 host_control;
1344 u64 control_0;
1345#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
1346
1347#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1348
1349#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
1350
1351#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
1352
1353#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
1354
1355#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1356#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1357
1358#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
1359
1360#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
1361
1362#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
1363
1364#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
1365
1366#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
1367
1368#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
1369
1370#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
1371
1372#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
1373
1374#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
1375
1376#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
1377
1378#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
1379
1380 u64 control_1;
1381
1382#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
1383#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
1384#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
1385
1386#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
1387
1388#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
1389
1390 u64 buffer0_ptr;
1391};
1392
1393enum vxge_hw_rth_algoritms {
1394 RTH_ALG_JENKINS = 0,
1395 RTH_ALG_MS_RSS = 1,
1396 RTH_ALG_CRC32C = 2
1397};
1398
1399/**
1400 * struct vxge_hw_rth_hash_types - RTH hash types.
1401 * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
1402 * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
1403 * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
1404 * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
1405 * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
1406 * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
1407 *
1408 * Used to pass RTH hash types to rts_rts_set.
1409 *
1410 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1411 */
1412struct vxge_hw_rth_hash_types {
1413 u8 hash_type_tcpipv4_en;
1414 u8 hash_type_ipv4_en;
1415 u8 hash_type_tcpipv6_en;
1416 u8 hash_type_ipv6_en;
1417 u8 hash_type_tcpipv6ex_en;
1418 u8 hash_type_ipv6ex_en;
1419};
1420
1421u32
1422vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
1423
1424void vxge_hw_device_debug_set(
1425 struct __vxge_hw_device *devh,
1426 enum vxge_debug_level level,
1427 u32 mask);
1428
1429u32
1430vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
1431
1432u32
1433vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
1434
1435u32
1436vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
1437
1438/**
1439 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
1440 * @buf_mode: Buffer mode (1, 3 or 5)
1441 *
1442 * This function returns the size of RxD for given buffer mode
1443 */
1444static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
1445{
1446 return sizeof(struct vxge_hw_ring_rxd_1);
1447}
1448
1449/**
1450 * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
1451 * @buf_mode: Buffer mode (1 buffer mode only)
1452 *
1453 * This function returns the number of RxD for RxD block for given buffer mode
1454 */
1455static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
1456{
1457 return (u32)((VXGE_HW_BLOCK_SIZE-16) /
1458 sizeof(struct vxge_hw_ring_rxd_1));
1459}
1460
1461/**
1462 * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
1463 * @rxdh: Descriptor handle.
1464 * @dma_pointer: DMA address of a single receive buffer this descriptor
1465 * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
1466 * the receive buffer should be already mapped to the device
1467 * @size: Size of the receive @dma_pointer buffer.
1468 *
1469 * Prepare 1-buffer-mode Rx descriptor for posting
1470 * (via vxge_hw_ring_rxd_post()).
1471 *
1472 * This inline helper-function does not return any parameters and always
1473 * succeeds.
1474 *
1475 */
1476static inline
1477void vxge_hw_ring_rxd_1b_set(
1478 void *rxdh,
1479 dma_addr_t dma_pointer,
1480 u32 size)
1481{
1482 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1483 rxdp->buffer0_ptr = dma_pointer;
1484 rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
1485 rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
1486}
1487
1488/**
1489 * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
1490 * descriptor.
1491 * @vpath_handle: Virtual Path handle.
1492 * @rxdh: Descriptor handle.
1493 * @dma_pointer: DMA address of a single receive buffer this descriptor
1494 * carries. Returned by HW.
1495 * @pkt_length: Length (in bytes) of the data in the buffer pointed by
1496 *
1497 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
1498 * This inline helper-function uses completed descriptor to populate receive
1499 * buffer pointer and other "out" parameters. The function always succeeds.
1500 *
1501 */
1502static inline
1503void vxge_hw_ring_rxd_1b_get(
1504 struct __vxge_hw_ring *ring_handle,
1505 void *rxdh,
1506 u32 *pkt_length)
1507{
1508 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1509
1510 *pkt_length =
1511 (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
1512}
1513
1514/**
1515 * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
1516 * a completed receive descriptor for 1b mode.
1517 * @vpath_handle: Virtual Path handle.
1518 * @rxdh: Descriptor handle.
1519 * @rxd_info: Descriptor information
1520 *
1521 * Retrieve extended information associated with a completed receive descriptor.
1522 *
1523 */
1524static inline
1525void vxge_hw_ring_rxd_1b_info_get(
1526 struct __vxge_hw_ring *ring_handle,
1527 void *rxdh,
1528 struct vxge_hw_ring_rxd_info *rxd_info)
1529{
1530
1531 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1532 rxd_info->syn_flag =
1533 (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
1534 rxd_info->is_icmp =
1535 (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
1536 rxd_info->fast_path_eligible =
1537 (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
1538 rxd_info->l3_cksum_valid =
1539 (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
1540 rxd_info->l3_cksum =
1541 (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
1542 rxd_info->l4_cksum_valid =
1543 (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
1544 rxd_info->l4_cksum =
a419aef8 1545 (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
40a3a915
RV
1546 rxd_info->frame =
1547 (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
1548 rxd_info->proto =
1549 (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
1550 rxd_info->is_vlan =
1551 (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
1552 rxd_info->vlan =
1553 (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
1554 rxd_info->rth_bucket =
1555 (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
1556 rxd_info->rth_it_hit =
1557 (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
1558 rxd_info->rth_spdm_hit =
1559 (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
1560 rxd_info->rth_hash_type =
1561 (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
1562 rxd_info->rth_value =
1563 (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
1564}
1565
1566/**
1567 * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
1568 * of 1b mode 3b mode ring.
1569 * @rxdh: Descriptor handle.
1570 *
1571 * Returns: private driver info associated with the descriptor.
1572 * driver requests per-descriptor space via vxge_hw_ring_attr.
1573 *
1574 */
1575static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
1576{
1577 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1578 return (void *)(size_t)rxdp->host_control;
1579}
1580
1581/**
1582 * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
1583 * @txdlh: Descriptor handle.
1584 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1585 * and/or TCP and/or UDP.
1586 *
1587 * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
1588 * descriptor.
1589 * This API is part of the preparation of the transmit descriptor for posting
1590 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1591 * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1592 * and vxge_hw_fifo_txdl_buffer_set().
1593 * All these APIs fill in the fields of the fifo descriptor,
1594 * in accordance with the Titan specification.
1595 *
1596 */
1597static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
1598{
1599 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1600 txdp->control_1 |= cksum_bits;
1601}
1602
1603/**
1604 * vxge_hw_fifo_txdl_mss_set - Set MSS.
1605 * @txdlh: Descriptor handle.
1606 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1607 * driver, which in turn inserts the MSS into the @txdlh.
1608 *
1609 * This API is part of the preparation of the transmit descriptor for posting
1610 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1611 * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1612 * and vxge_hw_fifo_txdl_cksum_set_bits().
1613 * All these APIs fill in the fields of the fifo descriptor,
1614 * in accordance with the Titan specification.
1615 *
1616 */
1617static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
1618{
1619 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1620
1621 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
1622 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
1623}
1624
1625/**
1626 * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
1627 * @txdlh: Descriptor handle.
1628 * @vlan_tag: 16bit VLAN tag.
1629 *
1630 * Insert VLAN tag into specified transmit descriptor.
1631 * The actual insertion of the tag into outgoing frame is done by the hardware.
1632 */
1633static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
1634{
1635 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1636
1637 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
1638 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
1639}
1640
1641/**
1642 * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
1643 * @txdlh: Descriptor handle.
1644 *
1645 * Retrieve per-descriptor private data.
1646 * Note that driver requests per-descriptor space via
1647 * struct vxge_hw_fifo_attr passed to
1648 * vxge_hw_vpath_open().
1649 *
1650 * Returns: private driver data associated with the descriptor.
1651 */
1652static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
1653{
1654 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1655
1656 return (void *)(size_t)txdp->host_control;
1657}
1658
1659/**
1660 * struct vxge_hw_ring_attr - Ring open "template".
1661 * @callback: Ring completion callback. HW invokes the callback when there
1662 * are new completions on that ring. In many implementations
1663 * the @callback executes in the hw interrupt context.
1664 * @rxd_init: Ring's descriptor-initialize callback.
1665 * See vxge_hw_ring_rxd_init_f{}.
1666 * If not NULL, HW invokes the callback when opening
1667 * the ring.
1668 * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
1669 * HW invokes the callback when closing the corresponding ring.
1670 * See also vxge_hw_ring_rxd_term_f{}.
1671 * @userdata: User-defined "context" of _that_ ring. Passed back to the
1672 * user as one of the @callback, @rxd_init, and @rxd_term arguments.
1673 * @per_rxd_space: If specified (i.e., greater than zero): extra space
1674 * reserved by HW per each receive descriptor.
1675 * Can be used to store
1676 * and retrieve on completion, information specific
1677 * to the driver.
1678 *
1679 * Ring open "template". User fills the structure with ring
1680 * attributes and passes it to vxge_hw_vpath_open().
1681 */
1682struct vxge_hw_ring_attr {
1683 enum vxge_hw_status (*callback)(
1684 struct __vxge_hw_ring *ringh,
1685 void *rxdh,
1686 u8 t_code,
1687 void *userdata);
1688
1689 enum vxge_hw_status (*rxd_init)(
1690 void *rxdh,
1691 void *userdata);
1692
1693 void (*rxd_term)(
1694 void *rxdh,
1695 enum vxge_hw_rxd_state state,
1696 void *userdata);
1697
1698 void *userdata;
1699 u32 per_rxd_space;
1700};
1701
1702/**
1703 * function vxge_hw_fifo_callback_f - FIFO callback.
1704 * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
1705 * descriptors.
1706 * @txdlh: First completed descriptor.
1707 * @txdl_priv: Pointer to per txdl space allocated
1708 * @t_code: Transfer code, as per Titan User Guide.
1709 * Returned by HW.
1710 * @host_control: Opaque 64bit data stored by driver inside the Titan
1711 * descriptor prior to posting the latter on the fifo
1712 * via vxge_hw_fifo_txdl_post(). The @host_control is returned
1713 * as is to the driver with each completed descriptor.
1714 * @userdata: Opaque per-fifo data specified at fifo open
1715 * time, via vxge_hw_vpath_open().
1716 *
1717 * Fifo completion callback (type declaration). A single per-fifo
1718 * callback is specified at fifo open time, via
1719 * vxge_hw_vpath_open(). Typically gets called as part of the processing
1720 * of the Interrupt Service Routine.
1721 *
1722 * Fifo callback gets called by HW if, and only if, there is at least
1723 * one new completion on a given fifo. Upon processing the first @txdlh driver
1724 * is _supposed_ to continue consuming completions using:
1725 * - vxge_hw_fifo_txdl_next_completed()
1726 *
1727 * Note that failure to process new completions in a timely fashion
1728 * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
1729 *
1730 * Non-zero @t_code means failure to process transmit descriptor.
1731 *
1732 * In the "transmit" case the failure could happen, for instance, when the
1733 * link is down, in which case Titan completes the descriptor because it
1734 * is not able to send the data out.
1735 *
1736 * For details please refer to Titan User Guide.
1737 *
1738 * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
1739 */
1740/**
1741 * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
1742 * @txdlh: First completed descriptor.
1743 * @txdl_priv: Pointer to per txdl space allocated
1744 * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
1745 * @userdata: Per-fifo user data (a.k.a. context) specified at
1746 * fifo open time, via vxge_hw_vpath_open().
1747 *
1748 * Terminate descriptor callback. Unless NULL is specified in the
1749 * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
1750 * HW invokes the callback as part of closing fifo, prior to
1751 * de-allocating the ring and associated data structures
1752 * (including descriptors).
1753 * driver should utilize the callback to (for instance) unmap
1754 * and free DMA data buffers associated with the posted (state =
1755 * VXGE_HW_TXDL_STATE_POSTED) descriptors,
1756 * as well as other relevant cleanup functions.
1757 *
1758 * See also: struct vxge_hw_fifo_attr{}
1759 */
1760/**
1761 * struct vxge_hw_fifo_attr - Fifo open "template".
1762 * @callback: Fifo completion callback. HW invokes the callback when there
1763 * are new completions on that fifo. In many implementations
1764 * the @callback executes in the hw interrupt context.
1765 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
1766 * HW invokes the callback when closing the corresponding fifo.
1767 * See also vxge_hw_fifo_txdl_term_f{}.
1768 * @userdata: User-defined "context" of _that_ fifo. Passed back to the
1769 * user as one of the @callback, and @txdl_term arguments.
1770 * @per_txdl_space: If specified (i.e., greater than zero): extra space
1771 * reserved by HW per each transmit descriptor. Can be used to
1772 * store, and retrieve on completion, information specific
1773 * to the driver.
1774 *
1775 * Fifo open "template". User fills the structure with fifo
1776 * attributes and passes it to vxge_hw_vpath_open().
1777 */
1778struct vxge_hw_fifo_attr {
1779
1780 enum vxge_hw_status (*callback)(
1781 struct __vxge_hw_fifo *fifo_handle,
1782 void *txdlh,
1783 enum vxge_hw_fifo_tcode t_code,
1784 void *userdata,
ff67df55
BL
1785 struct sk_buff ***skb_ptr,
1786 int nr_skb, int *more);
40a3a915
RV
1787
1788 void (*txdl_term)(
1789 void *txdlh,
1790 enum vxge_hw_txdl_state state,
1791 void *userdata);
1792
1793 void *userdata;
1794 u32 per_txdl_space;
1795};
1796
1797/**
1798 * struct vxge_hw_vpath_attr - Attributes of virtual path
1799 * @vp_id: Identifier of Virtual Path
1800 * @ring_attr: Attributes of ring for non-offload receive
1801 * @fifo_attr: Attributes of fifo for non-offload transmit
1802 *
1803 * Attributes of virtual path. This structure is passed as parameter
1804 * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
1805 */
1806struct vxge_hw_vpath_attr {
1807 u32 vp_id;
1808 struct vxge_hw_ring_attr ring_attr;
1809 struct vxge_hw_fifo_attr fifo_attr;
1810};
1811
1812enum vxge_hw_status
1813__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1814 struct __vxge_hw_blockpool *blockpool,
1815 u32 pool_size,
1816 u32 pool_max);
1817
1818void
1819__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
1820
1821struct __vxge_hw_blockpool_entry *
1822__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
1823 u32 size);
1824
1825void
1826__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
1827 struct __vxge_hw_blockpool_entry *entry);
1828
1829void *
1830__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
1831 u32 size,
1832 struct vxge_hw_mempool_dma *dma_object);
1833
1834void
1835__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
1836 void *memblock,
1837 u32 size,
1838 struct vxge_hw_mempool_dma *dma_object);
1839
1840enum vxge_hw_status
1841__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
1842
1843enum vxge_hw_status
1844__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
1845
1846enum vxge_hw_status
1847vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
1848 struct vxge_hw_device_config *dev_config, int size);
1849
1850enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
1851 void __iomem *bar0,
1852 struct vxge_hw_device_hw_info *hw_info);
1853
1854enum vxge_hw_status
1855__vxge_hw_vpath_fw_ver_get(
1856 u32 vp_id,
1857 struct vxge_hw_vpath_reg __iomem *vpath_reg,
1858 struct vxge_hw_device_hw_info *hw_info);
1859
1860enum vxge_hw_status
1861__vxge_hw_vpath_card_info_get(
1862 u32 vp_id,
1863 struct vxge_hw_vpath_reg __iomem *vpath_reg,
1864 struct vxge_hw_device_hw_info *hw_info);
1865
1866enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
1867 struct vxge_hw_device_config *device_config);
1868
1869/**
1870 * vxge_hw_device_link_state_get - Get link state.
1871 * @devh: HW device handle.
1872 *
1873 * Get link state.
1874 * Returns: link state.
1875 */
1876static inline
1877enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
1878 struct __vxge_hw_device *devh)
1879{
1880 return devh->link_state;
1881}
1882
1883void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
1884
1885const u8 *
1886vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
1887
1888u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
1889
1890const u8 *
1891vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
1892
1893enum vxge_hw_status __devinit vxge_hw_device_initialize(
1894 struct __vxge_hw_device **devh,
1895 struct vxge_hw_device_attr *attr,
1896 struct vxge_hw_device_config *device_config);
1897
1898enum vxge_hw_status vxge_hw_device_getpause_data(
1899 struct __vxge_hw_device *devh,
1900 u32 port,
1901 u32 *tx,
1902 u32 *rx);
1903
1904enum vxge_hw_status vxge_hw_device_setpause_data(
1905 struct __vxge_hw_device *devh,
1906 u32 port,
1907 u32 tx,
1908 u32 rx);
1909
1910static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1911 unsigned long size,
1912 struct pci_dev **p_dmah,
1913 struct pci_dev **p_dma_acch)
1914{
1915 gfp_t flags;
1916 void *vaddr;
1917 unsigned long misaligned = 0;
1918 *p_dma_acch = *p_dmah = NULL;
1919
1920 if (in_interrupt())
1921 flags = GFP_ATOMIC | GFP_DMA;
1922 else
1923 flags = GFP_KERNEL | GFP_DMA;
1924
1925 size += VXGE_CACHE_LINE_SIZE;
1926
1927 vaddr = kmalloc((size), flags);
1928 if (vaddr == NULL)
1929 return vaddr;
1930 misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr),
1931 VXGE_CACHE_LINE_SIZE);
1932 *(unsigned long *)p_dma_acch = misaligned;
1933 vaddr = (void *)((u8 *)vaddr + misaligned);
1934 return vaddr;
1935}
1936
1937extern void vxge_hw_blockpool_block_add(
1938 struct __vxge_hw_device *devh,
1939 void *block_addr,
1940 u32 length,
1941 struct pci_dev *dma_h,
1942 struct pci_dev *acc_handle);
1943
1944static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
1945 unsigned long size)
1946{
1947 gfp_t flags;
1948 void *vaddr;
1949
1950 if (in_interrupt())
1951 flags = GFP_ATOMIC | GFP_DMA;
1952 else
1953 flags = GFP_KERNEL | GFP_DMA;
1954
1955 vaddr = kmalloc((size), flags);
1956
1957 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
1958}
1959
1960static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1961 struct pci_dev **p_dma_acch)
1962{
1963 unsigned long misaligned = *(unsigned long *)p_dma_acch;
1964 u8 *tmp = (u8 *)vaddr;
1965 tmp -= misaligned;
1966 kfree((void *)tmp);
1967}
1968
1969/*
1970 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1971 */
1972static inline void*
1973__vxge_hw_mempool_item_priv(
1974 struct vxge_hw_mempool *mempool,
1975 u32 memblock_idx,
1976 void *item,
1977 u32 *memblock_item_idx)
1978{
1979 ptrdiff_t offset;
1980 void *memblock = mempool->memblocks_arr[memblock_idx];
1981
1982
1983 offset = (u32)((u8 *)item - (u8 *)memblock);
1984 vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
1985
1986 (*memblock_item_idx) = (u32) offset / mempool->item_size;
1987 vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
1988
1989 return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
1990 (*memblock_item_idx) * mempool->items_priv_size;
1991}
1992
1993enum vxge_hw_status
1994__vxge_hw_mempool_grow(
1995 struct vxge_hw_mempool *mempool,
1996 u32 num_allocate,
1997 u32 *num_allocated);
1998
1999struct vxge_hw_mempool*
2000__vxge_hw_mempool_create(
2001 struct __vxge_hw_device *devh,
2002 u32 memblock_size,
2003 u32 item_size,
2004 u32 private_size,
2005 u32 items_initial,
2006 u32 items_max,
2007 struct vxge_hw_mempool_cbs *mp_callback,
2008 void *userdata);
2009
2010struct __vxge_hw_channel*
2011__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
2012 enum __vxge_hw_channel_type type, u32 length,
2013 u32 per_dtr_space, void *userdata);
2014
2015void
2016__vxge_hw_channel_free(
2017 struct __vxge_hw_channel *channel);
2018
2019enum vxge_hw_status
2020__vxge_hw_channel_initialize(
2021 struct __vxge_hw_channel *channel);
2022
2023enum vxge_hw_status
2024__vxge_hw_channel_reset(
2025 struct __vxge_hw_channel *channel);
2026
2027/*
2028 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
2029 * for the fifo.
2030 * @fifo: Fifo
2031 * @txdp: Poniter to a TxD
2032 */
2033static inline struct __vxge_hw_fifo_txdl_priv *
2034__vxge_hw_fifo_txdl_priv(
2035 struct __vxge_hw_fifo *fifo,
2036 struct vxge_hw_fifo_txd *txdp)
2037{
2038 return (struct __vxge_hw_fifo_txdl_priv *)
2039 (((char *)((ulong)txdp->host_control)) +
2040 fifo->per_txdl_space);
2041}
2042
2043enum vxge_hw_status vxge_hw_vpath_open(
2044 struct __vxge_hw_device *devh,
2045 struct vxge_hw_vpath_attr *attr,
2046 struct __vxge_hw_vpath_handle **vpath_handle);
2047
2048enum vxge_hw_status
2049__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
2050
2051enum vxge_hw_status vxge_hw_vpath_close(
2052 struct __vxge_hw_vpath_handle *vpath_handle);
2053
2054enum vxge_hw_status
2055vxge_hw_vpath_reset(
2056 struct __vxge_hw_vpath_handle *vpath_handle);
2057
2058enum vxge_hw_status
2059vxge_hw_vpath_recover_from_reset(
2060 struct __vxge_hw_vpath_handle *vpath_handle);
2061
2062void
2063vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
2064
2065enum vxge_hw_status
2066vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
2067
2068enum vxge_hw_status vxge_hw_vpath_mtu_set(
2069 struct __vxge_hw_vpath_handle *vpath_handle,
2070 u32 new_mtu);
2071
2072enum vxge_hw_status vxge_hw_vpath_stats_enable(
2073 struct __vxge_hw_vpath_handle *vpath_handle);
2074
2075enum vxge_hw_status
2076__vxge_hw_vpath_stats_access(
2077 struct __vxge_hw_virtualpath *vpath,
2078 u32 operation,
2079 u32 offset,
2080 u64 *stat);
2081
2082enum vxge_hw_status
2083__vxge_hw_vpath_xmac_tx_stats_get(
2084 struct __vxge_hw_virtualpath *vpath,
2085 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
2086
2087enum vxge_hw_status
2088__vxge_hw_vpath_xmac_rx_stats_get(
2089 struct __vxge_hw_virtualpath *vpath,
2090 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
2091
2092enum vxge_hw_status
2093__vxge_hw_vpath_stats_get(
2094 struct __vxge_hw_virtualpath *vpath,
2095 struct vxge_hw_vpath_stats_hw_info *hw_stats);
2096
2097void
2098vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
2099
2100enum vxge_hw_status
2101__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
2102
2103void
2104__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
2105
2106enum vxge_hw_status
2107__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
2108
2109enum vxge_hw_status
2110__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
2111
2112enum vxge_hw_status
2113__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
2114 struct vxge_hw_vpath_reg __iomem *vpath_reg);
2115
2116enum vxge_hw_status
2117__vxge_hw_device_register_poll(
2118 void __iomem *reg,
2119 u64 mask, u32 max_millis);
2120
2121#ifndef readq
2122static inline u64 readq(void __iomem *addr)
2123{
2124 u64 ret = 0;
2125 ret = readl(addr + 4);
2126 ret <<= 32;
2127 ret |= readl(addr);
2128
2129 return ret;
2130}
2131#endif
2132
2133#ifndef writeq
2134static inline void writeq(u64 val, void __iomem *addr)
2135{
2136 writel((u32) (val), addr);
2137 writel((u32) (val >> 32), (addr + 4));
2138}
2139#endif
2140
2141static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
2142{
2143 writel(val, addr + 4);
2144}
2145
2146static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
2147{
2148 writel(val, addr);
2149}
2150
2151static inline enum vxge_hw_status
2152__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
2153 u64 mask, u32 max_millis)
2154{
2155 enum vxge_hw_status status = VXGE_HW_OK;
2156
2157 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
2158 wmb();
2159 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
2160 wmb();
2161
2162 status = __vxge_hw_device_register_poll(addr, mask, max_millis);
2163 return status;
2164}
2165
2166struct vxge_hw_toc_reg __iomem *
2167__vxge_hw_device_toc_get(void __iomem *bar0);
2168
2169enum vxge_hw_status
2170__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
2171
2172void
2173__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
2174
2175void
2176__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
2177
2178enum vxge_hw_status
2179vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
2180
2181enum vxge_hw_status
2182__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
2183
2184enum vxge_hw_status
2185__vxge_hw_vpath_pci_read(
2186 struct __vxge_hw_virtualpath *vpath,
2187 u32 phy_func_0,
2188 u32 offset,
2189 u32 *val);
2190
2191enum vxge_hw_status
2192__vxge_hw_vpath_addr_get(
2193 u32 vp_id,
2194 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2195 u8 (macaddr)[ETH_ALEN],
2196 u8 (macaddr_mask)[ETH_ALEN]);
2197
2198u32
2199__vxge_hw_vpath_func_id_get(
2200 u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
2201
2202enum vxge_hw_status
2203__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
2204
fa41fd10
SH
2205enum vxge_hw_status
2206vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
40a3a915
RV
2207/**
2208 * vxge_debug
2209 * @level: level of debug verbosity.
2210 * @mask: mask for the debug
2211 * @buf: Circular buffer for tracing
2212 * @fmt: printf like format string
2213 *
2214 * Provides logging facilities. Can be customized on per-module
2215 * basis or/and with debug levels. Input parameters, except
2216 * module and level, are the same as posix printf. This function
2217 * may be compiled out if DEBUG macro was never defined.
2218 * See also: enum vxge_debug_level{}.
2219 */
2220
2221#define vxge_trace_aux(level, mask, fmt, ...) \
2222{\
2223 vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
2224}
2225
2226#define vxge_debug(module, level, mask, fmt, ...) { \
2227if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
2228 (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
2229 if ((mask & VXGE_DEBUG_MASK) == mask)\
2230 vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
2231} \
2232}
2233
2234#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2235#define vxge_debug_ll(level, mask, fmt, ...) \
2236{\
2237 vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\
2238}
2239
2240#else
2241#define vxge_debug_ll(level, mask, fmt, ...)
2242#endif
2243
2244enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
2245 struct __vxge_hw_vpath_handle **vpath_handles,
2246 u32 vpath_count,
2247 u8 *mtable,
2248 u8 *itable,
2249 u32 itable_size);
2250
2251enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2252 struct __vxge_hw_vpath_handle *vpath_handle,
2253 enum vxge_hw_rth_algoritms algorithm,
2254 struct vxge_hw_rth_hash_types *hash_type,
2255 u16 bucket_size);
2256
2257#endif