Commit | Line | Data |
---|---|---|
ae06c70b | 1 | // SPDX-License-Identifier: GPL-2.0 |
51dce24b | 2 | /* Copyright(c) 2007 - 2018 Intel Corporation. */ |
f96a8a0b CW |
3 | |
4 | /* e1000_i210 | |
5 | * e1000_i211 | |
6 | */ | |
7 | ||
8 | #include <linux/types.h> | |
9 | #include <linux/if_ether.h> | |
10 | ||
11 | #include "e1000_hw.h" | |
12 | #include "e1000_i210.h" | |
13 | ||
167f3f71 JK |
14 | static s32 igb_update_flash_i210(struct e1000_hw *hw); |
15 | ||
7916a53d CW |
16 | /** |
17 | * igb_get_hw_semaphore_i210 - Acquire hardware semaphore | |
18 | * @hw: pointer to the HW structure | |
19 | * | |
20 | * Acquire the HW semaphore to access the PHY or NVM | |
21 | */ | |
22 | static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) | |
23 | { | |
24 | u32 swsm; | |
7916a53d CW |
25 | s32 timeout = hw->nvm.word_size + 1; |
26 | s32 i = 0; | |
27 | ||
d44e7a9a MV |
28 | /* Get the SW semaphore */ |
29 | while (i < timeout) { | |
30 | swsm = rd32(E1000_SWSM); | |
31 | if (!(swsm & E1000_SWSM_SMBI)) | |
32 | break; | |
33 | ||
34 | udelay(50); | |
35 | i++; | |
36 | } | |
37 | ||
38 | if (i == timeout) { | |
39 | /* In rare circumstances, the SW semaphore may already be held | |
40 | * unintentionally. Clear the semaphore once before giving up. | |
41 | */ | |
42 | if (hw->dev_spec._82575.clear_semaphore_once) { | |
43 | hw->dev_spec._82575.clear_semaphore_once = false; | |
44 | igb_put_hw_semaphore(hw); | |
45 | for (i = 0; i < timeout; i++) { | |
46 | swsm = rd32(E1000_SWSM); | |
47 | if (!(swsm & E1000_SWSM_SMBI)) | |
48 | break; | |
49 | ||
50 | udelay(50); | |
51 | } | |
52 | } | |
53 | ||
54 | /* If we do not have the semaphore here, we have to give up. */ | |
55 | if (i == timeout) { | |
56 | hw_dbg("Driver can't access device - SMBI bit is set.\n"); | |
57 | return -E1000_ERR_NVM; | |
58 | } | |
59 | } | |
60 | ||
7916a53d CW |
61 | /* Get the FW semaphore. */ |
62 | for (i = 0; i < timeout; i++) { | |
63 | swsm = rd32(E1000_SWSM); | |
64 | wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); | |
65 | ||
66 | /* Semaphore acquired if bit latched */ | |
67 | if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) | |
68 | break; | |
69 | ||
70 | udelay(50); | |
71 | } | |
72 | ||
73 | if (i == timeout) { | |
74 | /* Release semaphores */ | |
75 | igb_put_hw_semaphore(hw); | |
76 | hw_dbg("Driver can't access the NVM\n"); | |
d44e7a9a | 77 | return -E1000_ERR_NVM; |
7916a53d CW |
78 | } |
79 | ||
23d87824 | 80 | return 0; |
7916a53d | 81 | } |
f96a8a0b CW |
82 | |
83 | /** | |
84 | * igb_acquire_nvm_i210 - Request for access to EEPROM | |
85 | * @hw: pointer to the HW structure | |
86 | * | |
87 | * Acquire the necessary semaphores for exclusive access to the EEPROM. | |
88 | * Set the EEPROM access request bit and wait for EEPROM access grant bit. | |
89 | * Return successful if access grant bit set, else clear the request for | |
90 | * EEPROM access and return -E1000_ERR_NVM (-1). | |
91 | **/ | |
167f3f71 | 92 | static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) |
f96a8a0b CW |
93 | { |
94 | return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); | |
95 | } | |
96 | ||
97 | /** | |
98 | * igb_release_nvm_i210 - Release exclusive access to EEPROM | |
99 | * @hw: pointer to the HW structure | |
100 | * | |
101 | * Stop any current commands to the EEPROM and clear the EEPROM request bit, | |
102 | * then release the semaphores acquired. | |
103 | **/ | |
167f3f71 | 104 | static void igb_release_nvm_i210(struct e1000_hw *hw) |
f96a8a0b CW |
105 | { |
106 | igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); | |
107 | } | |
108 | ||
109 | /** | |
110 | * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore | |
111 | * @hw: pointer to the HW structure | |
112 | * @mask: specifies which semaphore to acquire | |
113 | * | |
114 | * Acquire the SW/FW semaphore to access the PHY or NVM. The mask | |
115 | * will also specify which port we're acquiring the lock for. | |
116 | **/ | |
117 | s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) | |
118 | { | |
119 | u32 swfw_sync; | |
120 | u32 swmask = mask; | |
121 | u32 fwmask = mask << 16; | |
23d87824 | 122 | s32 ret_val = 0; |
f96a8a0b CW |
123 | s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ |
124 | ||
125 | while (i < timeout) { | |
126 | if (igb_get_hw_semaphore_i210(hw)) { | |
127 | ret_val = -E1000_ERR_SWFW_SYNC; | |
128 | goto out; | |
129 | } | |
130 | ||
131 | swfw_sync = rd32(E1000_SW_FW_SYNC); | |
d44e7a9a | 132 | if (!(swfw_sync & (fwmask | swmask))) |
f96a8a0b CW |
133 | break; |
134 | ||
b980ac18 | 135 | /* Firmware currently using resource (fwmask) */ |
d44e7a9a | 136 | igb_put_hw_semaphore(hw); |
f96a8a0b CW |
137 | mdelay(5); |
138 | i++; | |
139 | } | |
140 | ||
141 | if (i == timeout) { | |
142 | hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); | |
143 | ret_val = -E1000_ERR_SWFW_SYNC; | |
144 | goto out; | |
145 | } | |
146 | ||
147 | swfw_sync |= swmask; | |
148 | wr32(E1000_SW_FW_SYNC, swfw_sync); | |
149 | ||
d44e7a9a | 150 | igb_put_hw_semaphore(hw); |
f96a8a0b CW |
151 | out: |
152 | return ret_val; | |
153 | } | |
154 | ||
155 | /** | |
156 | * igb_release_swfw_sync_i210 - Release SW/FW semaphore | |
157 | * @hw: pointer to the HW structure | |
158 | * @mask: specifies which semaphore to acquire | |
159 | * | |
160 | * Release the SW/FW semaphore used to access the PHY or NVM. The mask | |
161 | * will also specify which port we're releasing the lock for. | |
162 | **/ | |
163 | void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) | |
164 | { | |
165 | u32 swfw_sync; | |
166 | ||
23d87824 | 167 | while (igb_get_hw_semaphore_i210(hw)) |
f96a8a0b CW |
168 | ; /* Empty */ |
169 | ||
170 | swfw_sync = rd32(E1000_SW_FW_SYNC); | |
171 | swfw_sync &= ~mask; | |
172 | wr32(E1000_SW_FW_SYNC, swfw_sync); | |
173 | ||
d44e7a9a | 174 | igb_put_hw_semaphore(hw); |
f96a8a0b CW |
175 | } |
176 | ||
f96a8a0b CW |
177 | /** |
178 | * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register | |
179 | * @hw: pointer to the HW structure | |
180 | * @offset: offset of word in the Shadow Ram to read | |
181 | * @words: number of words to read | |
182 | * @data: word read from the Shadow Ram | |
183 | * | |
184 | * Reads a 16 bit word from the Shadow Ram using the EERD register. | |
185 | * Uses necessary synchronization semaphores. | |
186 | **/ | |
167f3f71 JK |
187 | static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, |
188 | u16 *data) | |
f96a8a0b | 189 | { |
23d87824 | 190 | s32 status = 0; |
f96a8a0b CW |
191 | u16 i, count; |
192 | ||
193 | /* We cannot hold synchronization semaphores for too long, | |
194 | * because of forceful takeover procedure. However it is more efficient | |
b980ac18 JK |
195 | * to read in bursts than synchronizing access for each word. |
196 | */ | |
f96a8a0b CW |
197 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { |
198 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? | |
199 | E1000_EERD_EEWR_MAX_COUNT : (words - i); | |
23d87824 | 200 | if (!(hw->nvm.ops.acquire(hw))) { |
f96a8a0b CW |
201 | status = igb_read_nvm_eerd(hw, offset, count, |
202 | data + i); | |
203 | hw->nvm.ops.release(hw); | |
204 | } else { | |
205 | status = E1000_ERR_SWFW_SYNC; | |
206 | } | |
207 | ||
23d87824 | 208 | if (status) |
f96a8a0b CW |
209 | break; |
210 | } | |
211 | ||
212 | return status; | |
213 | } | |
214 | ||
f96a8a0b CW |
215 | /** |
216 | * igb_write_nvm_srwr - Write to Shadow Ram using EEWR | |
217 | * @hw: pointer to the HW structure | |
218 | * @offset: offset within the Shadow Ram to be written to | |
219 | * @words: number of words to write | |
220 | * @data: 16 bit word(s) to be written to the Shadow Ram | |
221 | * | |
222 | * Writes data to Shadow Ram at offset using EEWR register. | |
223 | * | |
224 | * If igb_update_nvm_checksum is not called after this function , the | |
225 | * Shadow Ram will most likely contain an invalid checksum. | |
226 | **/ | |
227 | static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, | |
228 | u16 *data) | |
229 | { | |
230 | struct e1000_nvm_info *nvm = &hw->nvm; | |
231 | u32 i, k, eewr = 0; | |
232 | u32 attempts = 100000; | |
23d87824 | 233 | s32 ret_val = 0; |
f96a8a0b | 234 | |
b980ac18 | 235 | /* A check for invalid values: offset too large, too many words, |
f96a8a0b CW |
236 | * too many words for the offset, and not enough words. |
237 | */ | |
238 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | |
239 | (words == 0)) { | |
240 | hw_dbg("nvm parameter(s) out of bounds\n"); | |
241 | ret_val = -E1000_ERR_NVM; | |
242 | goto out; | |
243 | } | |
244 | ||
245 | for (i = 0; i < words; i++) { | |
246 | eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | | |
247 | (data[i] << E1000_NVM_RW_REG_DATA) | | |
248 | E1000_NVM_RW_REG_START; | |
249 | ||
250 | wr32(E1000_SRWR, eewr); | |
251 | ||
252 | for (k = 0; k < attempts; k++) { | |
253 | if (E1000_NVM_RW_REG_DONE & | |
254 | rd32(E1000_SRWR)) { | |
23d87824 | 255 | ret_val = 0; |
f96a8a0b CW |
256 | break; |
257 | } | |
258 | udelay(5); | |
259 | } | |
260 | ||
23d87824 | 261 | if (ret_val) { |
f96a8a0b CW |
262 | hw_dbg("Shadow RAM write EEWR timed out\n"); |
263 | break; | |
264 | } | |
265 | } | |
266 | ||
267 | out: | |
268 | return ret_val; | |
269 | } | |
270 | ||
7916a53d CW |
271 | /** |
272 | * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR | |
273 | * @hw: pointer to the HW structure | |
274 | * @offset: offset within the Shadow RAM to be written to | |
275 | * @words: number of words to write | |
276 | * @data: 16 bit word(s) to be written to the Shadow RAM | |
277 | * | |
278 | * Writes data to Shadow RAM at offset using EEWR register. | |
279 | * | |
280 | * If e1000_update_nvm_checksum is not called after this function , the | |
281 | * data will not be committed to FLASH and also Shadow RAM will most likely | |
282 | * contain an invalid checksum. | |
283 | * | |
284 | * If error code is returned, data and Shadow RAM may be inconsistent - buffer | |
285 | * partially written. | |
b980ac18 | 286 | **/ |
167f3f71 JK |
287 | static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, |
288 | u16 *data) | |
7916a53d | 289 | { |
23d87824 | 290 | s32 status = 0; |
7916a53d CW |
291 | u16 i, count; |
292 | ||
293 | /* We cannot hold synchronization semaphores for too long, | |
294 | * because of forceful takeover procedure. However it is more efficient | |
295 | * to write in bursts than synchronizing access for each word. | |
296 | */ | |
297 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { | |
298 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? | |
299 | E1000_EERD_EEWR_MAX_COUNT : (words - i); | |
23d87824 | 300 | if (!(hw->nvm.ops.acquire(hw))) { |
7916a53d CW |
301 | status = igb_write_nvm_srwr(hw, offset, count, |
302 | data + i); | |
303 | hw->nvm.ops.release(hw); | |
304 | } else { | |
305 | status = E1000_ERR_SWFW_SYNC; | |
306 | } | |
307 | ||
23d87824 | 308 | if (status) |
7916a53d CW |
309 | break; |
310 | } | |
311 | ||
312 | return status; | |
313 | } | |
314 | ||
f96a8a0b | 315 | /** |
ef3a0092 CW |
316 | * igb_read_invm_word_i210 - Reads OTP |
317 | * @hw: pointer to the HW structure | |
318 | * @address: the word address (aka eeprom offset) to read | |
319 | * @data: pointer to the data read | |
320 | * | |
321 | * Reads 16-bit words from the OTP. Return error when the word is not | |
322 | * stored in OTP. | |
323 | **/ | |
324 | static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) | |
325 | { | |
326 | s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; | |
327 | u32 invm_dword; | |
328 | u16 i; | |
329 | u8 record_type, word_address; | |
330 | ||
331 | for (i = 0; i < E1000_INVM_SIZE; i++) { | |
332 | invm_dword = rd32(E1000_INVM_DATA_REG(i)); | |
333 | /* Get record type */ | |
334 | record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); | |
335 | if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) | |
336 | break; | |
337 | if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) | |
338 | i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; | |
339 | if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) | |
340 | i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; | |
341 | if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { | |
342 | word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); | |
343 | if (word_address == address) { | |
344 | *data = INVM_DWORD_TO_WORD_DATA(invm_dword); | |
c5ffe7e1 | 345 | hw_dbg("Read INVM Word 0x%02x = %x\n", |
ef3a0092 | 346 | address, *data); |
23d87824 | 347 | status = 0; |
ef3a0092 CW |
348 | break; |
349 | } | |
350 | } | |
351 | } | |
23d87824 | 352 | if (status) |
ef3a0092 CW |
353 | hw_dbg("Requested word 0x%02x not found in OTP\n", address); |
354 | return status; | |
355 | } | |
356 | ||
357 | /** | |
358 | * igb_read_invm_i210 - Read invm wrapper function for I210/I211 | |
f96a8a0b | 359 | * @hw: pointer to the HW structure |
5c17a203 | 360 | * @words: number of words to read |
f96a8a0b CW |
361 | * @data: pointer to the data read |
362 | * | |
363 | * Wrapper function to return data formerly found in the NVM. | |
364 | **/ | |
ef3a0092 CW |
365 | static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, |
366 | u16 words __always_unused, u16 *data) | |
f96a8a0b | 367 | { |
23d87824 | 368 | s32 ret_val = 0; |
f96a8a0b CW |
369 | |
370 | /* Only the MAC addr is required to be present in the iNVM */ | |
371 | switch (offset) { | |
372 | case NVM_MAC_ADDR: | |
ef3a0092 CW |
373 | ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); |
374 | ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, | |
375 | &data[1]); | |
376 | ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, | |
377 | &data[2]); | |
23d87824 | 378 | if (ret_val) |
f96a8a0b CW |
379 | hw_dbg("MAC Addr not found in iNVM\n"); |
380 | break; | |
f96a8a0b | 381 | case NVM_INIT_CTRL_2: |
ef3a0092 | 382 | ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); |
23d87824 | 383 | if (ret_val) { |
1720ee3e | 384 | *data = NVM_INIT_CTRL_2_DEFAULT_I211; |
23d87824 | 385 | ret_val = 0; |
1720ee3e CW |
386 | } |
387 | break; | |
f96a8a0b | 388 | case NVM_INIT_CTRL_4: |
ef3a0092 | 389 | ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); |
23d87824 | 390 | if (ret_val) { |
1720ee3e | 391 | *data = NVM_INIT_CTRL_4_DEFAULT_I211; |
23d87824 | 392 | ret_val = 0; |
1720ee3e CW |
393 | } |
394 | break; | |
f96a8a0b | 395 | case NVM_LED_1_CFG: |
ef3a0092 | 396 | ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); |
23d87824 | 397 | if (ret_val) { |
1720ee3e | 398 | *data = NVM_LED_1_CFG_DEFAULT_I211; |
23d87824 | 399 | ret_val = 0; |
1720ee3e CW |
400 | } |
401 | break; | |
f96a8a0b | 402 | case NVM_LED_0_2_CFG: |
ef3a0092 | 403 | ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); |
23d87824 | 404 | if (ret_val) { |
1720ee3e | 405 | *data = NVM_LED_0_2_CFG_DEFAULT_I211; |
23d87824 | 406 | ret_val = 0; |
1720ee3e | 407 | } |
f96a8a0b | 408 | break; |
1720ee3e | 409 | case NVM_ID_LED_SETTINGS: |
ef3a0092 | 410 | ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); |
23d87824 | 411 | if (ret_val) { |
1720ee3e | 412 | *data = ID_LED_RESERVED_FFFF; |
23d87824 | 413 | ret_val = 0; |
1720ee3e | 414 | } |
b26141d4 | 415 | break; |
f96a8a0b CW |
416 | case NVM_SUB_DEV_ID: |
417 | *data = hw->subsystem_device_id; | |
418 | break; | |
419 | case NVM_SUB_VEN_ID: | |
420 | *data = hw->subsystem_vendor_id; | |
421 | break; | |
422 | case NVM_DEV_ID: | |
423 | *data = hw->device_id; | |
424 | break; | |
425 | case NVM_VEN_ID: | |
426 | *data = hw->vendor_id; | |
427 | break; | |
428 | default: | |
429 | hw_dbg("NVM word 0x%02x is not mapped.\n", offset); | |
430 | *data = NVM_RESERVED_WORD; | |
431 | break; | |
432 | } | |
433 | return ret_val; | |
434 | } | |
435 | ||
09e77287 CW |
436 | /** |
437 | * igb_read_invm_version - Reads iNVM version and image type | |
438 | * @hw: pointer to the HW structure | |
439 | * @invm_ver: version structure for the version read | |
440 | * | |
441 | * Reads iNVM version and image type. | |
442 | **/ | |
443 | s32 igb_read_invm_version(struct e1000_hw *hw, | |
444 | struct e1000_fw_version *invm_ver) { | |
445 | u32 *record = NULL; | |
446 | u32 *next_record = NULL; | |
447 | u32 i = 0; | |
448 | u32 invm_dword = 0; | |
449 | u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / | |
450 | E1000_INVM_RECORD_SIZE_IN_BYTES); | |
451 | u32 buffer[E1000_INVM_SIZE]; | |
452 | s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; | |
453 | u16 version = 0; | |
454 | ||
455 | /* Read iNVM memory */ | |
456 | for (i = 0; i < E1000_INVM_SIZE; i++) { | |
457 | invm_dword = rd32(E1000_INVM_DATA_REG(i)); | |
458 | buffer[i] = invm_dword; | |
459 | } | |
460 | ||
461 | /* Read version number */ | |
462 | for (i = 1; i < invm_blocks; i++) { | |
463 | record = &buffer[invm_blocks - i]; | |
464 | next_record = &buffer[invm_blocks - i + 1]; | |
465 | ||
466 | /* Check if we have first version location used */ | |
467 | if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { | |
468 | version = 0; | |
23d87824 | 469 | status = 0; |
09e77287 CW |
470 | break; |
471 | } | |
472 | /* Check if we have second version location used */ | |
473 | else if ((i == 1) && | |
474 | ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { | |
475 | version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; | |
23d87824 | 476 | status = 0; |
09e77287 CW |
477 | break; |
478 | } | |
479 | /* Check if we have odd version location | |
480 | * used and it is the last one used | |
481 | */ | |
482 | else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && | |
483 | ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && | |
484 | (i != 1))) { | |
485 | version = (*next_record & E1000_INVM_VER_FIELD_TWO) | |
486 | >> 13; | |
23d87824 | 487 | status = 0; |
09e77287 CW |
488 | break; |
489 | } | |
490 | /* Check if we have even version location | |
491 | * used and it is the last one used | |
492 | */ | |
493 | else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && | |
494 | ((*record & 0x3) == 0)) { | |
495 | version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; | |
23d87824 | 496 | status = 0; |
09e77287 CW |
497 | break; |
498 | } | |
499 | } | |
500 | ||
23d87824 | 501 | if (!status) { |
09e77287 CW |
502 | invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) |
503 | >> E1000_INVM_MAJOR_SHIFT; | |
504 | invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; | |
505 | } | |
506 | /* Read Image Type */ | |
507 | for (i = 1; i < invm_blocks; i++) { | |
508 | record = &buffer[invm_blocks - i]; | |
509 | next_record = &buffer[invm_blocks - i + 1]; | |
510 | ||
511 | /* Check if we have image type in first location used */ | |
512 | if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { | |
513 | invm_ver->invm_img_type = 0; | |
23d87824 | 514 | status = 0; |
09e77287 CW |
515 | break; |
516 | } | |
517 | /* Check if we have image type in first location used */ | |
518 | else if ((((*record & 0x3) == 0) && | |
519 | ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || | |
520 | ((((*record & 0x3) != 0) && (i != 1)))) { | |
521 | invm_ver->invm_img_type = | |
522 | (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; | |
23d87824 | 523 | status = 0; |
09e77287 CW |
524 | break; |
525 | } | |
526 | } | |
527 | return status; | |
528 | } | |
529 | ||
f96a8a0b CW |
530 | /** |
531 | * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum | |
532 | * @hw: pointer to the HW structure | |
533 | * | |
534 | * Calculates the EEPROM checksum by reading/adding each word of the EEPROM | |
535 | * and then verifies that the sum of the EEPROM is equal to 0xBABA. | |
536 | **/ | |
167f3f71 | 537 | static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) |
f96a8a0b | 538 | { |
23d87824 | 539 | s32 status = 0; |
f96a8a0b CW |
540 | s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); |
541 | ||
23d87824 | 542 | if (!(hw->nvm.ops.acquire(hw))) { |
f96a8a0b | 543 | |
b980ac18 | 544 | /* Replace the read function with semaphore grabbing with |
f96a8a0b CW |
545 | * the one that skips this for a while. |
546 | * We have semaphore taken already here. | |
547 | */ | |
548 | read_op_ptr = hw->nvm.ops.read; | |
549 | hw->nvm.ops.read = igb_read_nvm_eerd; | |
550 | ||
551 | status = igb_validate_nvm_checksum(hw); | |
552 | ||
553 | /* Revert original read operation. */ | |
554 | hw->nvm.ops.read = read_op_ptr; | |
555 | ||
556 | hw->nvm.ops.release(hw); | |
557 | } else { | |
558 | status = E1000_ERR_SWFW_SYNC; | |
559 | } | |
560 | ||
561 | return status; | |
562 | } | |
563 | ||
f96a8a0b CW |
564 | /** |
565 | * igb_update_nvm_checksum_i210 - Update EEPROM checksum | |
566 | * @hw: pointer to the HW structure | |
567 | * | |
568 | * Updates the EEPROM checksum by reading/adding each word of the EEPROM | |
569 | * up to the checksum. Then calculates the EEPROM checksum and writes the | |
570 | * value to the EEPROM. Next commit EEPROM data onto the Flash. | |
571 | **/ | |
167f3f71 | 572 | static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) |
f96a8a0b | 573 | { |
23d87824 | 574 | s32 ret_val = 0; |
f96a8a0b CW |
575 | u16 checksum = 0; |
576 | u16 i, nvm_data; | |
577 | ||
b980ac18 | 578 | /* Read the first word from the EEPROM. If this times out or fails, do |
f96a8a0b CW |
579 | * not continue or we could be in for a very long wait while every |
580 | * EEPROM read fails | |
581 | */ | |
582 | ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); | |
23d87824 | 583 | if (ret_val) { |
f96a8a0b CW |
584 | hw_dbg("EEPROM read failed\n"); |
585 | goto out; | |
586 | } | |
587 | ||
23d87824 | 588 | if (!(hw->nvm.ops.acquire(hw))) { |
b980ac18 | 589 | /* Do not use hw->nvm.ops.write, hw->nvm.ops.read |
f96a8a0b CW |
590 | * because we do not want to take the synchronization |
591 | * semaphores twice here. | |
592 | */ | |
593 | ||
594 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { | |
595 | ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); | |
596 | if (ret_val) { | |
597 | hw->nvm.ops.release(hw); | |
598 | hw_dbg("NVM Read Error while updating checksum.\n"); | |
599 | goto out; | |
600 | } | |
601 | checksum += nvm_data; | |
602 | } | |
603 | checksum = (u16) NVM_SUM - checksum; | |
604 | ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, | |
605 | &checksum); | |
23d87824 | 606 | if (ret_val) { |
f96a8a0b CW |
607 | hw->nvm.ops.release(hw); |
608 | hw_dbg("NVM Write Error while updating checksum.\n"); | |
609 | goto out; | |
610 | } | |
611 | ||
612 | hw->nvm.ops.release(hw); | |
613 | ||
614 | ret_val = igb_update_flash_i210(hw); | |
615 | } else { | |
616 | ret_val = -E1000_ERR_SWFW_SYNC; | |
617 | } | |
618 | out: | |
619 | return ret_val; | |
620 | } | |
621 | ||
7916a53d CW |
622 | /** |
623 | * igb_pool_flash_update_done_i210 - Pool FLUDONE status. | |
624 | * @hw: pointer to the HW structure | |
625 | * | |
b980ac18 | 626 | **/ |
7916a53d CW |
627 | static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) |
628 | { | |
629 | s32 ret_val = -E1000_ERR_NVM; | |
630 | u32 i, reg; | |
631 | ||
632 | for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { | |
633 | reg = rd32(E1000_EECD); | |
634 | if (reg & E1000_EECD_FLUDONE_I210) { | |
23d87824 | 635 | ret_val = 0; |
7916a53d CW |
636 | break; |
637 | } | |
638 | udelay(5); | |
639 | } | |
640 | ||
641 | return ret_val; | |
642 | } | |
643 | ||
5a823d8c CW |
644 | /** |
645 | * igb_get_flash_presence_i210 - Check if flash device is detected. | |
646 | * @hw: pointer to the HW structure | |
647 | * | |
648 | **/ | |
649 | bool igb_get_flash_presence_i210(struct e1000_hw *hw) | |
650 | { | |
651 | u32 eec = 0; | |
652 | bool ret_val = false; | |
653 | ||
654 | eec = rd32(E1000_EECD); | |
655 | if (eec & E1000_EECD_FLASH_DETECTED_I210) | |
656 | ret_val = true; | |
657 | ||
658 | return ret_val; | |
659 | } | |
660 | ||
f96a8a0b CW |
661 | /** |
662 | * igb_update_flash_i210 - Commit EEPROM to the flash | |
663 | * @hw: pointer to the HW structure | |
664 | * | |
665 | **/ | |
167f3f71 | 666 | static s32 igb_update_flash_i210(struct e1000_hw *hw) |
f96a8a0b | 667 | { |
23d87824 | 668 | s32 ret_val = 0; |
f96a8a0b CW |
669 | u32 flup; |
670 | ||
671 | ret_val = igb_pool_flash_update_done_i210(hw); | |
672 | if (ret_val == -E1000_ERR_NVM) { | |
673 | hw_dbg("Flash update time out\n"); | |
674 | goto out; | |
675 | } | |
676 | ||
677 | flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; | |
678 | wr32(E1000_EECD, flup); | |
679 | ||
680 | ret_val = igb_pool_flash_update_done_i210(hw); | |
23d87824 | 681 | if (ret_val) |
f96a8a0b | 682 | hw_dbg("Flash update time out\n"); |
76ed5a8f HL |
683 | else |
684 | hw_dbg("Flash update complete\n"); | |
f96a8a0b CW |
685 | |
686 | out: | |
687 | return ret_val; | |
688 | } | |
689 | ||
f96a8a0b CW |
690 | /** |
691 | * igb_valid_led_default_i210 - Verify a valid default LED config | |
692 | * @hw: pointer to the HW structure | |
693 | * @data: pointer to the NVM (EEPROM) | |
694 | * | |
695 | * Read the EEPROM for the current default LED configuration. If the | |
696 | * LED configuration is not valid, set to a valid LED configuration. | |
697 | **/ | |
698 | s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) | |
699 | { | |
700 | s32 ret_val; | |
701 | ||
702 | ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); | |
703 | if (ret_val) { | |
704 | hw_dbg("NVM Read Error\n"); | |
705 | goto out; | |
706 | } | |
707 | ||
708 | if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { | |
709 | switch (hw->phy.media_type) { | |
710 | case e1000_media_type_internal_serdes: | |
711 | *data = ID_LED_DEFAULT_I210_SERDES; | |
712 | break; | |
713 | case e1000_media_type_copper: | |
714 | default: | |
715 | *data = ID_LED_DEFAULT_I210; | |
716 | break; | |
717 | } | |
718 | } | |
719 | out: | |
720 | return ret_val; | |
721 | } | |
87371b9d MV |
722 | |
723 | /** | |
724 | * __igb_access_xmdio_reg - Read/write XMDIO register | |
725 | * @hw: pointer to the HW structure | |
726 | * @address: XMDIO address to program | |
727 | * @dev_addr: device address to program | |
728 | * @data: pointer to value to read/write from/to the XMDIO address | |
729 | * @read: boolean flag to indicate read or write | |
730 | **/ | |
731 | static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, | |
732 | u8 dev_addr, u16 *data, bool read) | |
733 | { | |
23d87824 | 734 | s32 ret_val = 0; |
87371b9d MV |
735 | |
736 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); | |
737 | if (ret_val) | |
738 | return ret_val; | |
739 | ||
740 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); | |
741 | if (ret_val) | |
742 | return ret_val; | |
743 | ||
744 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | | |
745 | dev_addr); | |
746 | if (ret_val) | |
747 | return ret_val; | |
748 | ||
749 | if (read) | |
750 | ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); | |
751 | else | |
752 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); | |
753 | if (ret_val) | |
754 | return ret_val; | |
755 | ||
756 | /* Recalibrate the device back to 0 */ | |
757 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); | |
758 | if (ret_val) | |
759 | return ret_val; | |
760 | ||
761 | return ret_val; | |
762 | } | |
763 | ||
764 | /** | |
765 | * igb_read_xmdio_reg - Read XMDIO register | |
766 | * @hw: pointer to the HW structure | |
767 | * @addr: XMDIO address to program | |
768 | * @dev_addr: device address to program | |
769 | * @data: value to be read from the EMI address | |
770 | **/ | |
771 | s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) | |
772 | { | |
773 | return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); | |
774 | } | |
775 | ||
776 | /** | |
777 | * igb_write_xmdio_reg - Write XMDIO register | |
778 | * @hw: pointer to the HW structure | |
779 | * @addr: XMDIO address to program | |
780 | * @dev_addr: device address to program | |
781 | * @data: value to be written to the XMDIO address | |
782 | **/ | |
783 | s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) | |
784 | { | |
785 | return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); | |
786 | } | |
5a823d8c CW |
787 | |
788 | /** | |
789 | * igb_init_nvm_params_i210 - Init NVM func ptrs. | |
790 | * @hw: pointer to the HW structure | |
791 | **/ | |
792 | s32 igb_init_nvm_params_i210(struct e1000_hw *hw) | |
793 | { | |
794 | s32 ret_val = 0; | |
795 | struct e1000_nvm_info *nvm = &hw->nvm; | |
796 | ||
797 | nvm->ops.acquire = igb_acquire_nvm_i210; | |
798 | nvm->ops.release = igb_release_nvm_i210; | |
799 | nvm->ops.valid_led_default = igb_valid_led_default_i210; | |
800 | ||
801 | /* NVM Function Pointers */ | |
802 | if (igb_get_flash_presence_i210(hw)) { | |
803 | hw->nvm.type = e1000_nvm_flash_hw; | |
804 | nvm->ops.read = igb_read_nvm_srrd_i210; | |
805 | nvm->ops.write = igb_write_nvm_srwr_i210; | |
806 | nvm->ops.validate = igb_validate_nvm_checksum_i210; | |
807 | nvm->ops.update = igb_update_nvm_checksum_i210; | |
808 | } else { | |
809 | hw->nvm.type = e1000_nvm_invm; | |
ef3a0092 | 810 | nvm->ops.read = igb_read_invm_i210; |
5a823d8c CW |
811 | nvm->ops.write = NULL; |
812 | nvm->ops.validate = NULL; | |
813 | nvm->ops.update = NULL; | |
814 | } | |
815 | return ret_val; | |
816 | } | |
94826487 TF |
817 | |
818 | /** | |
819 | * igb_pll_workaround_i210 | |
820 | * @hw: pointer to the HW structure | |
821 | * | |
822 | * Works around an errata in the PLL circuit where it occasionally | |
823 | * provides the wrong clock frequency after power up. | |
824 | **/ | |
825 | s32 igb_pll_workaround_i210(struct e1000_hw *hw) | |
826 | { | |
827 | s32 ret_val; | |
828 | u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; | |
829 | u16 nvm_word, phy_word, pci_word, tmp_nvm; | |
830 | int i; | |
831 | ||
832 | /* Get and set needed register values */ | |
833 | wuc = rd32(E1000_WUC); | |
834 | mdicnfg = rd32(E1000_MDICNFG); | |
835 | reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; | |
836 | wr32(E1000_MDICNFG, reg_val); | |
837 | ||
838 | /* Get data from NVM, or set default */ | |
839 | ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, | |
840 | &nvm_word); | |
841 | if (ret_val) | |
842 | nvm_word = E1000_INVM_DEFAULT_AL; | |
843 | tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; | |
2a3cdead | 844 | igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); |
e4c39f79 | 845 | phy_word = E1000_PHY_PLL_UNCONF; |
94826487 TF |
846 | for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { |
847 | /* check current state directly from internal PHY */ | |
2a3cdead | 848 | igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); |
94826487 TF |
849 | if ((phy_word & E1000_PHY_PLL_UNCONF) |
850 | != E1000_PHY_PLL_UNCONF) { | |
851 | ret_val = 0; | |
852 | break; | |
853 | } else { | |
854 | ret_val = -E1000_ERR_PHY; | |
855 | } | |
856 | /* directly reset the internal PHY */ | |
857 | ctrl = rd32(E1000_CTRL); | |
858 | wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); | |
859 | ||
860 | ctrl_ext = rd32(E1000_CTRL_EXT); | |
861 | ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); | |
862 | wr32(E1000_CTRL_EXT, ctrl_ext); | |
863 | ||
864 | wr32(E1000_WUC, 0); | |
865 | reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); | |
866 | wr32(E1000_EEARBC_I210, reg_val); | |
867 | ||
868 | igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); | |
869 | pci_word |= E1000_PCI_PMCSR_D3; | |
870 | igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); | |
871 | usleep_range(1000, 2000); | |
872 | pci_word &= ~E1000_PCI_PMCSR_D3; | |
873 | igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); | |
874 | reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); | |
875 | wr32(E1000_EEARBC_I210, reg_val); | |
876 | ||
877 | /* restore WUC register */ | |
878 | wr32(E1000_WUC, wuc); | |
879 | } | |
2a3cdead | 880 | igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); |
94826487 TF |
881 | /* restore MDICNFG setting */ |
882 | wr32(E1000_MDICNFG, mdicnfg); | |
883 | return ret_val; | |
884 | } | |
08c99129 TF |
885 | |
886 | /** | |
887 | * igb_get_cfg_done_i210 - Read config done bit | |
888 | * @hw: pointer to the HW structure | |
889 | * | |
890 | * Read the management control register for the config done bit for | |
891 | * completion status. NOTE: silicon which is EEPROM-less will fail trying | |
892 | * to read the config done bit, so an error is *ONLY* logged and returns | |
893 | * 0. If we were to return with error, EEPROM-less silicon | |
894 | * would not be able to be reset or change link. | |
895 | **/ | |
896 | s32 igb_get_cfg_done_i210(struct e1000_hw *hw) | |
897 | { | |
898 | s32 timeout = PHY_CFG_TIMEOUT; | |
899 | u32 mask = E1000_NVM_CFG_DONE_PORT_0; | |
900 | ||
901 | while (timeout) { | |
902 | if (rd32(E1000_EEMNGCTL_I210) & mask) | |
903 | break; | |
904 | usleep_range(1000, 2000); | |
905 | timeout--; | |
906 | } | |
907 | if (!timeout) | |
908 | hw_dbg("MNG configuration cycle has not completed.\n"); | |
909 | ||
910 | return 0; | |
911 | } |