Merge branch 'regulator-4.20' into regulator-next
[linux-2.6-block.git] / drivers / thunderbolt / icm.c
1 /*
2  * Internal Thunderbolt Connection Manager. This is a firmware running on
3  * the Thunderbolt host controller performing most of the low-level
4  * handling.
5  *
6  * Copyright (C) 2017, Intel Corporation
7  * Authors: Michael Jamet <michael.jamet@intel.com>
8  *          Mika Westerberg <mika.westerberg@linux.intel.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/delay.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/platform_data/x86/apple.h>
20 #include <linux/sizes.h>
21 #include <linux/slab.h>
22 #include <linux/workqueue.h>
23
24 #include "ctl.h"
25 #include "nhi_regs.h"
26 #include "tb.h"
27
28 #define PCIE2CIO_CMD                    0x30
29 #define PCIE2CIO_CMD_TIMEOUT            BIT(31)
30 #define PCIE2CIO_CMD_START              BIT(30)
31 #define PCIE2CIO_CMD_WRITE              BIT(21)
32 #define PCIE2CIO_CMD_CS_MASK            GENMASK(20, 19)
33 #define PCIE2CIO_CMD_CS_SHIFT           19
34 #define PCIE2CIO_CMD_PORT_MASK          GENMASK(18, 13)
35 #define PCIE2CIO_CMD_PORT_SHIFT         13
36
37 #define PCIE2CIO_WRDATA                 0x34
38 #define PCIE2CIO_RDDATA                 0x38
39
40 #define PHY_PORT_CS1                    0x37
41 #define PHY_PORT_CS1_LINK_DISABLE       BIT(14)
42 #define PHY_PORT_CS1_LINK_STATE_MASK    GENMASK(29, 26)
43 #define PHY_PORT_CS1_LINK_STATE_SHIFT   26
44
45 #define ICM_TIMEOUT                     5000    /* ms */
46 #define ICM_APPROVE_TIMEOUT             10000   /* ms */
47 #define ICM_MAX_LINK                    4
48 #define ICM_MAX_DEPTH                   6
49
50 /**
51  * struct icm - Internal connection manager private data
52  * @request_lock: Makes sure only one message is send to ICM at time
53  * @rescan_work: Work used to rescan the surviving switches after resume
54  * @upstream_port: Pointer to the PCIe upstream port this host
55  *                 controller is connected. This is only set for systems
56  *                 where ICM needs to be started manually
57  * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
58  *           (only set when @upstream_port is not %NULL)
59  * @safe_mode: ICM is in safe mode
60  * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
61  * @rpm: Does the controller support runtime PM (RTD3)
62  * @is_supported: Checks if we can support ICM on this controller
63  * @get_mode: Read and return the ICM firmware mode (optional)
64  * @get_route: Find a route string for given switch
65  * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
66  * @driver_ready: Send driver ready message to ICM
67  * @device_connected: Handle device connected ICM message
68  * @device_disconnected: Handle device disconnected ICM message
69  * @xdomain_connected - Handle XDomain connected ICM message
70  * @xdomain_disconnected - Handle XDomain disconnected ICM message
71  */
72 struct icm {
73         struct mutex request_lock;
74         struct delayed_work rescan_work;
75         struct pci_dev *upstream_port;
76         size_t max_boot_acl;
77         int vnd_cap;
78         bool safe_mode;
79         bool rpm;
80         bool (*is_supported)(struct tb *tb);
81         int (*get_mode)(struct tb *tb);
82         int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
83         void (*save_devices)(struct tb *tb);
84         int (*driver_ready)(struct tb *tb,
85                             enum tb_security_level *security_level,
86                             size_t *nboot_acl, bool *rpm);
87         void (*device_connected)(struct tb *tb,
88                                  const struct icm_pkg_header *hdr);
89         void (*device_disconnected)(struct tb *tb,
90                                     const struct icm_pkg_header *hdr);
91         void (*xdomain_connected)(struct tb *tb,
92                                   const struct icm_pkg_header *hdr);
93         void (*xdomain_disconnected)(struct tb *tb,
94                                      const struct icm_pkg_header *hdr);
95 };
96
97 struct icm_notification {
98         struct work_struct work;
99         struct icm_pkg_header *pkg;
100         struct tb *tb;
101 };
102
103 struct ep_name_entry {
104         u8 len;
105         u8 type;
106         u8 data[0];
107 };
108
109 #define EP_NAME_INTEL_VSS       0x10
110
111 /* Intel Vendor specific structure */
112 struct intel_vss {
113         u16 vendor;
114         u16 model;
115         u8 mc;
116         u8 flags;
117         u16 pci_devid;
118         u32 nvm_version;
119 };
120
121 #define INTEL_VSS_FLAGS_RTD3    BIT(0)
122
123 static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
124 {
125         const void *end = ep_name + size;
126
127         while (ep_name < end) {
128                 const struct ep_name_entry *ep = ep_name;
129
130                 if (!ep->len)
131                         break;
132                 if (ep_name + ep->len > end)
133                         break;
134
135                 if (ep->type == EP_NAME_INTEL_VSS)
136                         return (const struct intel_vss *)ep->data;
137
138                 ep_name += ep->len;
139         }
140
141         return NULL;
142 }
143
144 static inline struct tb *icm_to_tb(struct icm *icm)
145 {
146         return ((void *)icm - sizeof(struct tb));
147 }
148
149 static inline u8 phy_port_from_route(u64 route, u8 depth)
150 {
151         u8 link;
152
153         link = depth ? route >> ((depth - 1) * 8) : route;
154         return tb_phy_port_from_link(link);
155 }
156
157 static inline u8 dual_link_from_link(u8 link)
158 {
159         return link ? ((link - 1) ^ 0x01) + 1 : 0;
160 }
161
162 static inline u64 get_route(u32 route_hi, u32 route_lo)
163 {
164         return (u64)route_hi << 32 | route_lo;
165 }
166
167 static inline u64 get_parent_route(u64 route)
168 {
169         int depth = tb_route_length(route);
170         return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
171 }
172
173 static bool icm_match(const struct tb_cfg_request *req,
174                       const struct ctl_pkg *pkg)
175 {
176         const struct icm_pkg_header *res_hdr = pkg->buffer;
177         const struct icm_pkg_header *req_hdr = req->request;
178
179         if (pkg->frame.eof != req->response_type)
180                 return false;
181         if (res_hdr->code != req_hdr->code)
182                 return false;
183
184         return true;
185 }
186
187 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
188 {
189         const struct icm_pkg_header *hdr = pkg->buffer;
190
191         if (hdr->packet_id < req->npackets) {
192                 size_t offset = hdr->packet_id * req->response_size;
193
194                 memcpy(req->response + offset, pkg->buffer, req->response_size);
195         }
196
197         return hdr->packet_id == hdr->total_packets - 1;
198 }
199
200 static int icm_request(struct tb *tb, const void *request, size_t request_size,
201                        void *response, size_t response_size, size_t npackets,
202                        unsigned int timeout_msec)
203 {
204         struct icm *icm = tb_priv(tb);
205         int retries = 3;
206
207         do {
208                 struct tb_cfg_request *req;
209                 struct tb_cfg_result res;
210
211                 req = tb_cfg_request_alloc();
212                 if (!req)
213                         return -ENOMEM;
214
215                 req->match = icm_match;
216                 req->copy = icm_copy;
217                 req->request = request;
218                 req->request_size = request_size;
219                 req->request_type = TB_CFG_PKG_ICM_CMD;
220                 req->response = response;
221                 req->npackets = npackets;
222                 req->response_size = response_size;
223                 req->response_type = TB_CFG_PKG_ICM_RESP;
224
225                 mutex_lock(&icm->request_lock);
226                 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
227                 mutex_unlock(&icm->request_lock);
228
229                 tb_cfg_request_put(req);
230
231                 if (res.err != -ETIMEDOUT)
232                         return res.err == 1 ? -EIO : res.err;
233
234                 usleep_range(20, 50);
235         } while (retries--);
236
237         return -ETIMEDOUT;
238 }
239
240 static bool icm_fr_is_supported(struct tb *tb)
241 {
242         return !x86_apple_machine;
243 }
244
245 static inline int icm_fr_get_switch_index(u32 port)
246 {
247         int index;
248
249         if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
250                 return 0;
251
252         index = port >> ICM_PORT_INDEX_SHIFT;
253         return index != 0xff ? index : 0;
254 }
255
256 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
257 {
258         struct icm_fr_pkg_get_topology_response *switches, *sw;
259         struct icm_fr_pkg_get_topology request = {
260                 .hdr = { .code = ICM_GET_TOPOLOGY },
261         };
262         size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
263         int ret, index;
264         u8 i;
265
266         switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
267         if (!switches)
268                 return -ENOMEM;
269
270         ret = icm_request(tb, &request, sizeof(request), switches,
271                           sizeof(*switches), npackets, ICM_TIMEOUT);
272         if (ret)
273                 goto err_free;
274
275         sw = &switches[0];
276         index = icm_fr_get_switch_index(sw->ports[link]);
277         if (!index) {
278                 ret = -ENODEV;
279                 goto err_free;
280         }
281
282         sw = &switches[index];
283         for (i = 1; i < depth; i++) {
284                 unsigned int j;
285
286                 if (!(sw->first_data & ICM_SWITCH_USED)) {
287                         ret = -ENODEV;
288                         goto err_free;
289                 }
290
291                 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
292                         index = icm_fr_get_switch_index(sw->ports[j]);
293                         if (index > sw->switch_index) {
294                                 sw = &switches[index];
295                                 break;
296                         }
297                 }
298         }
299
300         *route = get_route(sw->route_hi, sw->route_lo);
301
302 err_free:
303         kfree(switches);
304         return ret;
305 }
306
307 static void icm_fr_save_devices(struct tb *tb)
308 {
309         nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
310 }
311
312 static int
313 icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
314                     size_t *nboot_acl, bool *rpm)
315 {
316         struct icm_fr_pkg_driver_ready_response reply;
317         struct icm_pkg_driver_ready request = {
318                 .hdr.code = ICM_DRIVER_READY,
319         };
320         int ret;
321
322         memset(&reply, 0, sizeof(reply));
323         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
324                           1, ICM_TIMEOUT);
325         if (ret)
326                 return ret;
327
328         if (security_level)
329                 *security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
330
331         return 0;
332 }
333
334 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
335 {
336         struct icm_fr_pkg_approve_device request;
337         struct icm_fr_pkg_approve_device reply;
338         int ret;
339
340         memset(&request, 0, sizeof(request));
341         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
342         request.hdr.code = ICM_APPROVE_DEVICE;
343         request.connection_id = sw->connection_id;
344         request.connection_key = sw->connection_key;
345
346         memset(&reply, 0, sizeof(reply));
347         /* Use larger timeout as establishing tunnels can take some time */
348         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
349                           1, ICM_APPROVE_TIMEOUT);
350         if (ret)
351                 return ret;
352
353         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
354                 tb_warn(tb, "PCIe tunnel creation failed\n");
355                 return -EIO;
356         }
357
358         return 0;
359 }
360
361 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
362 {
363         struct icm_fr_pkg_add_device_key request;
364         struct icm_fr_pkg_add_device_key_response reply;
365         int ret;
366
367         memset(&request, 0, sizeof(request));
368         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
369         request.hdr.code = ICM_ADD_DEVICE_KEY;
370         request.connection_id = sw->connection_id;
371         request.connection_key = sw->connection_key;
372         memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
373
374         memset(&reply, 0, sizeof(reply));
375         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
376                           1, ICM_TIMEOUT);
377         if (ret)
378                 return ret;
379
380         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
381                 tb_warn(tb, "Adding key to switch failed\n");
382                 return -EIO;
383         }
384
385         return 0;
386 }
387
388 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
389                                        const u8 *challenge, u8 *response)
390 {
391         struct icm_fr_pkg_challenge_device request;
392         struct icm_fr_pkg_challenge_device_response reply;
393         int ret;
394
395         memset(&request, 0, sizeof(request));
396         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
397         request.hdr.code = ICM_CHALLENGE_DEVICE;
398         request.connection_id = sw->connection_id;
399         request.connection_key = sw->connection_key;
400         memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
401
402         memset(&reply, 0, sizeof(reply));
403         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
404                           1, ICM_TIMEOUT);
405         if (ret)
406                 return ret;
407
408         if (reply.hdr.flags & ICM_FLAGS_ERROR)
409                 return -EKEYREJECTED;
410         if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
411                 return -ENOKEY;
412
413         memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
414
415         return 0;
416 }
417
418 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
419 {
420         struct icm_fr_pkg_approve_xdomain_response reply;
421         struct icm_fr_pkg_approve_xdomain request;
422         int ret;
423
424         memset(&request, 0, sizeof(request));
425         request.hdr.code = ICM_APPROVE_XDOMAIN;
426         request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
427         memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
428
429         request.transmit_path = xd->transmit_path;
430         request.transmit_ring = xd->transmit_ring;
431         request.receive_path = xd->receive_path;
432         request.receive_ring = xd->receive_ring;
433
434         memset(&reply, 0, sizeof(reply));
435         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
436                           1, ICM_TIMEOUT);
437         if (ret)
438                 return ret;
439
440         if (reply.hdr.flags & ICM_FLAGS_ERROR)
441                 return -EIO;
442
443         return 0;
444 }
445
446 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
447 {
448         u8 phy_port;
449         u8 cmd;
450
451         phy_port = tb_phy_port_from_link(xd->link);
452         if (phy_port == 0)
453                 cmd = NHI_MAILBOX_DISCONNECT_PA;
454         else
455                 cmd = NHI_MAILBOX_DISCONNECT_PB;
456
457         nhi_mailbox_cmd(tb->nhi, cmd, 1);
458         usleep_range(10, 50);
459         nhi_mailbox_cmd(tb->nhi, cmd, 2);
460         return 0;
461 }
462
463 static void add_switch(struct tb_switch *parent_sw, u64 route,
464                        const uuid_t *uuid, const u8 *ep_name,
465                        size_t ep_name_size, u8 connection_id, u8 connection_key,
466                        u8 link, u8 depth, enum tb_security_level security_level,
467                        bool authorized, bool boot)
468 {
469         const struct intel_vss *vss;
470         struct tb_switch *sw;
471
472         pm_runtime_get_sync(&parent_sw->dev);
473
474         sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
475         if (!sw)
476                 goto out;
477
478         sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
479         sw->connection_id = connection_id;
480         sw->connection_key = connection_key;
481         sw->link = link;
482         sw->depth = depth;
483         sw->authorized = authorized;
484         sw->security_level = security_level;
485         sw->boot = boot;
486
487         vss = parse_intel_vss(ep_name, ep_name_size);
488         if (vss)
489                 sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
490
491         /* Link the two switches now */
492         tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
493         tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
494
495         if (tb_switch_add(sw)) {
496                 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
497                 tb_switch_put(sw);
498         }
499
500 out:
501         pm_runtime_mark_last_busy(&parent_sw->dev);
502         pm_runtime_put_autosuspend(&parent_sw->dev);
503 }
504
505 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
506                           u64 route, u8 connection_id, u8 connection_key,
507                           u8 link, u8 depth, bool boot)
508 {
509         /* Disconnect from parent */
510         tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
511         /* Re-connect via updated port*/
512         tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
513
514         /* Update with the new addressing information */
515         sw->config.route_hi = upper_32_bits(route);
516         sw->config.route_lo = lower_32_bits(route);
517         sw->connection_id = connection_id;
518         sw->connection_key = connection_key;
519         sw->link = link;
520         sw->depth = depth;
521         sw->boot = boot;
522
523         /* This switch still exists */
524         sw->is_unplugged = false;
525 }
526
527 static void remove_switch(struct tb_switch *sw)
528 {
529         struct tb_switch *parent_sw;
530
531         parent_sw = tb_to_switch(sw->dev.parent);
532         tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
533         tb_switch_remove(sw);
534 }
535
536 static void add_xdomain(struct tb_switch *sw, u64 route,
537                         const uuid_t *local_uuid, const uuid_t *remote_uuid,
538                         u8 link, u8 depth)
539 {
540         struct tb_xdomain *xd;
541
542         pm_runtime_get_sync(&sw->dev);
543
544         xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
545         if (!xd)
546                 goto out;
547
548         xd->link = link;
549         xd->depth = depth;
550
551         tb_port_at(route, sw)->xdomain = xd;
552
553         tb_xdomain_add(xd);
554
555 out:
556         pm_runtime_mark_last_busy(&sw->dev);
557         pm_runtime_put_autosuspend(&sw->dev);
558 }
559
560 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
561 {
562         xd->link = link;
563         xd->route = route;
564         xd->is_unplugged = false;
565 }
566
567 static void remove_xdomain(struct tb_xdomain *xd)
568 {
569         struct tb_switch *sw;
570
571         sw = tb_to_switch(xd->dev.parent);
572         tb_port_at(xd->route, sw)->xdomain = NULL;
573         tb_xdomain_remove(xd);
574 }
575
576 static void
577 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
578 {
579         const struct icm_fr_event_device_connected *pkg =
580                 (const struct icm_fr_event_device_connected *)hdr;
581         enum tb_security_level security_level;
582         struct tb_switch *sw, *parent_sw;
583         struct icm *icm = tb_priv(tb);
584         bool authorized = false;
585         struct tb_xdomain *xd;
586         u8 link, depth;
587         bool boot;
588         u64 route;
589         int ret;
590
591         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
592         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
593                 ICM_LINK_INFO_DEPTH_SHIFT;
594         authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
595         security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
596                          ICM_FLAGS_SLEVEL_SHIFT;
597         boot = pkg->link_info & ICM_LINK_INFO_BOOT;
598
599         if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
600                 tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
601                         link, depth);
602                 return;
603         }
604
605         sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
606         if (sw) {
607                 u8 phy_port, sw_phy_port;
608
609                 parent_sw = tb_to_switch(sw->dev.parent);
610                 sw_phy_port = tb_phy_port_from_link(sw->link);
611                 phy_port = tb_phy_port_from_link(link);
612
613                 /*
614                  * On resume ICM will send us connected events for the
615                  * devices that still are present. However, that
616                  * information might have changed for example by the
617                  * fact that a switch on a dual-link connection might
618                  * have been enumerated using the other link now. Make
619                  * sure our book keeping matches that.
620                  */
621                 if (sw->depth == depth && sw_phy_port == phy_port &&
622                     !!sw->authorized == authorized) {
623                         /*
624                          * It was enumerated through another link so update
625                          * route string accordingly.
626                          */
627                         if (sw->link != link) {
628                                 ret = icm->get_route(tb, link, depth, &route);
629                                 if (ret) {
630                                         tb_err(tb, "failed to update route string for switch at %u.%u\n",
631                                                link, depth);
632                                         tb_switch_put(sw);
633                                         return;
634                                 }
635                         } else {
636                                 route = tb_route(sw);
637                         }
638
639                         update_switch(parent_sw, sw, route, pkg->connection_id,
640                                       pkg->connection_key, link, depth, boot);
641                         tb_switch_put(sw);
642                         return;
643                 }
644
645                 /*
646                  * User connected the same switch to another physical
647                  * port or to another part of the topology. Remove the
648                  * existing switch now before adding the new one.
649                  */
650                 remove_switch(sw);
651                 tb_switch_put(sw);
652         }
653
654         /*
655          * If the switch was not found by UUID, look for a switch on
656          * same physical port (taking possible link aggregation into
657          * account) and depth. If we found one it is definitely a stale
658          * one so remove it first.
659          */
660         sw = tb_switch_find_by_link_depth(tb, link, depth);
661         if (!sw) {
662                 u8 dual_link;
663
664                 dual_link = dual_link_from_link(link);
665                 if (dual_link)
666                         sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
667         }
668         if (sw) {
669                 remove_switch(sw);
670                 tb_switch_put(sw);
671         }
672
673         /* Remove existing XDomain connection if found */
674         xd = tb_xdomain_find_by_link_depth(tb, link, depth);
675         if (xd) {
676                 remove_xdomain(xd);
677                 tb_xdomain_put(xd);
678         }
679
680         parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
681         if (!parent_sw) {
682                 tb_err(tb, "failed to find parent switch for %u.%u\n",
683                        link, depth);
684                 return;
685         }
686
687         ret = icm->get_route(tb, link, depth, &route);
688         if (ret) {
689                 tb_err(tb, "failed to find route string for switch at %u.%u\n",
690                        link, depth);
691                 tb_switch_put(parent_sw);
692                 return;
693         }
694
695         add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
696                    sizeof(pkg->ep_name), pkg->connection_id,
697                    pkg->connection_key, link, depth, security_level,
698                    authorized, boot);
699
700         tb_switch_put(parent_sw);
701 }
702
703 static void
704 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
705 {
706         const struct icm_fr_event_device_disconnected *pkg =
707                 (const struct icm_fr_event_device_disconnected *)hdr;
708         struct tb_switch *sw;
709         u8 link, depth;
710
711         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
712         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
713                 ICM_LINK_INFO_DEPTH_SHIFT;
714
715         if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
716                 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
717                 return;
718         }
719
720         sw = tb_switch_find_by_link_depth(tb, link, depth);
721         if (!sw) {
722                 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
723                         depth);
724                 return;
725         }
726
727         remove_switch(sw);
728         tb_switch_put(sw);
729 }
730
731 static void
732 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
733 {
734         const struct icm_fr_event_xdomain_connected *pkg =
735                 (const struct icm_fr_event_xdomain_connected *)hdr;
736         struct tb_xdomain *xd;
737         struct tb_switch *sw;
738         u8 link, depth;
739         u64 route;
740
741         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
742         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
743                 ICM_LINK_INFO_DEPTH_SHIFT;
744
745         if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
746                 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
747                 return;
748         }
749
750         route = get_route(pkg->local_route_hi, pkg->local_route_lo);
751
752         xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
753         if (xd) {
754                 u8 xd_phy_port, phy_port;
755
756                 xd_phy_port = phy_port_from_route(xd->route, xd->depth);
757                 phy_port = phy_port_from_route(route, depth);
758
759                 if (xd->depth == depth && xd_phy_port == phy_port) {
760                         update_xdomain(xd, route, link);
761                         tb_xdomain_put(xd);
762                         return;
763                 }
764
765                 /*
766                  * If we find an existing XDomain connection remove it
767                  * now. We need to go through login handshake and
768                  * everything anyway to be able to re-establish the
769                  * connection.
770                  */
771                 remove_xdomain(xd);
772                 tb_xdomain_put(xd);
773         }
774
775         /*
776          * Look if there already exists an XDomain in the same place
777          * than the new one and in that case remove it because it is
778          * most likely another host that got disconnected.
779          */
780         xd = tb_xdomain_find_by_link_depth(tb, link, depth);
781         if (!xd) {
782                 u8 dual_link;
783
784                 dual_link = dual_link_from_link(link);
785                 if (dual_link)
786                         xd = tb_xdomain_find_by_link_depth(tb, dual_link,
787                                                            depth);
788         }
789         if (xd) {
790                 remove_xdomain(xd);
791                 tb_xdomain_put(xd);
792         }
793
794         /*
795          * If the user disconnected a switch during suspend and
796          * connected another host to the same port, remove the switch
797          * first.
798          */
799         sw = get_switch_at_route(tb->root_switch, route);
800         if (sw)
801                 remove_switch(sw);
802
803         sw = tb_switch_find_by_link_depth(tb, link, depth);
804         if (!sw) {
805                 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
806                         depth);
807                 return;
808         }
809
810         add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
811                     depth);
812         tb_switch_put(sw);
813 }
814
815 static void
816 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
817 {
818         const struct icm_fr_event_xdomain_disconnected *pkg =
819                 (const struct icm_fr_event_xdomain_disconnected *)hdr;
820         struct tb_xdomain *xd;
821
822         /*
823          * If the connection is through one or multiple devices, the
824          * XDomain device is removed along with them so it is fine if we
825          * cannot find it here.
826          */
827         xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
828         if (xd) {
829                 remove_xdomain(xd);
830                 tb_xdomain_put(xd);
831         }
832 }
833
834 static int
835 icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
836                     size_t *nboot_acl, bool *rpm)
837 {
838         struct icm_tr_pkg_driver_ready_response reply;
839         struct icm_pkg_driver_ready request = {
840                 .hdr.code = ICM_DRIVER_READY,
841         };
842         int ret;
843
844         memset(&reply, 0, sizeof(reply));
845         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
846                           1, 20000);
847         if (ret)
848                 return ret;
849
850         if (security_level)
851                 *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
852         if (nboot_acl)
853                 *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
854                                 ICM_TR_INFO_BOOT_ACL_SHIFT;
855         if (rpm)
856                 *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
857
858         return 0;
859 }
860
861 static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
862 {
863         struct icm_tr_pkg_approve_device request;
864         struct icm_tr_pkg_approve_device reply;
865         int ret;
866
867         memset(&request, 0, sizeof(request));
868         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
869         request.hdr.code = ICM_APPROVE_DEVICE;
870         request.route_lo = sw->config.route_lo;
871         request.route_hi = sw->config.route_hi;
872         request.connection_id = sw->connection_id;
873
874         memset(&reply, 0, sizeof(reply));
875         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
876                           1, ICM_APPROVE_TIMEOUT);
877         if (ret)
878                 return ret;
879
880         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
881                 tb_warn(tb, "PCIe tunnel creation failed\n");
882                 return -EIO;
883         }
884
885         return 0;
886 }
887
888 static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
889 {
890         struct icm_tr_pkg_add_device_key_response reply;
891         struct icm_tr_pkg_add_device_key request;
892         int ret;
893
894         memset(&request, 0, sizeof(request));
895         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
896         request.hdr.code = ICM_ADD_DEVICE_KEY;
897         request.route_lo = sw->config.route_lo;
898         request.route_hi = sw->config.route_hi;
899         request.connection_id = sw->connection_id;
900         memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
901
902         memset(&reply, 0, sizeof(reply));
903         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
904                           1, ICM_TIMEOUT);
905         if (ret)
906                 return ret;
907
908         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
909                 tb_warn(tb, "Adding key to switch failed\n");
910                 return -EIO;
911         }
912
913         return 0;
914 }
915
916 static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
917                                        const u8 *challenge, u8 *response)
918 {
919         struct icm_tr_pkg_challenge_device_response reply;
920         struct icm_tr_pkg_challenge_device request;
921         int ret;
922
923         memset(&request, 0, sizeof(request));
924         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
925         request.hdr.code = ICM_CHALLENGE_DEVICE;
926         request.route_lo = sw->config.route_lo;
927         request.route_hi = sw->config.route_hi;
928         request.connection_id = sw->connection_id;
929         memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
930
931         memset(&reply, 0, sizeof(reply));
932         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
933                           1, ICM_TIMEOUT);
934         if (ret)
935                 return ret;
936
937         if (reply.hdr.flags & ICM_FLAGS_ERROR)
938                 return -EKEYREJECTED;
939         if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
940                 return -ENOKEY;
941
942         memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
943
944         return 0;
945 }
946
947 static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
948 {
949         struct icm_tr_pkg_approve_xdomain_response reply;
950         struct icm_tr_pkg_approve_xdomain request;
951         int ret;
952
953         memset(&request, 0, sizeof(request));
954         request.hdr.code = ICM_APPROVE_XDOMAIN;
955         request.route_hi = upper_32_bits(xd->route);
956         request.route_lo = lower_32_bits(xd->route);
957         request.transmit_path = xd->transmit_path;
958         request.transmit_ring = xd->transmit_ring;
959         request.receive_path = xd->receive_path;
960         request.receive_ring = xd->receive_ring;
961         memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
962
963         memset(&reply, 0, sizeof(reply));
964         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
965                           1, ICM_TIMEOUT);
966         if (ret)
967                 return ret;
968
969         if (reply.hdr.flags & ICM_FLAGS_ERROR)
970                 return -EIO;
971
972         return 0;
973 }
974
975 static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
976                                     int stage)
977 {
978         struct icm_tr_pkg_disconnect_xdomain_response reply;
979         struct icm_tr_pkg_disconnect_xdomain request;
980         int ret;
981
982         memset(&request, 0, sizeof(request));
983         request.hdr.code = ICM_DISCONNECT_XDOMAIN;
984         request.stage = stage;
985         request.route_hi = upper_32_bits(xd->route);
986         request.route_lo = lower_32_bits(xd->route);
987         memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
988
989         memset(&reply, 0, sizeof(reply));
990         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
991                           1, ICM_TIMEOUT);
992         if (ret)
993                 return ret;
994
995         if (reply.hdr.flags & ICM_FLAGS_ERROR)
996                 return -EIO;
997
998         return 0;
999 }
1000
1001 static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1002 {
1003         int ret;
1004
1005         ret = icm_tr_xdomain_tear_down(tb, xd, 1);
1006         if (ret)
1007                 return ret;
1008
1009         usleep_range(10, 50);
1010         return icm_tr_xdomain_tear_down(tb, xd, 2);
1011 }
1012
1013 static void
1014 icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1015 {
1016         const struct icm_tr_event_device_connected *pkg =
1017                 (const struct icm_tr_event_device_connected *)hdr;
1018         enum tb_security_level security_level;
1019         struct tb_switch *sw, *parent_sw;
1020         struct tb_xdomain *xd;
1021         bool authorized, boot;
1022         u64 route;
1023
1024         /*
1025          * Currently we don't use the QoS information coming with the
1026          * device connected message so simply just ignore that extra
1027          * packet for now.
1028          */
1029         if (pkg->hdr.packet_id)
1030                 return;
1031
1032         route = get_route(pkg->route_hi, pkg->route_lo);
1033         authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
1034         security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
1035                          ICM_FLAGS_SLEVEL_SHIFT;
1036         boot = pkg->link_info & ICM_LINK_INFO_BOOT;
1037
1038         if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
1039                 tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
1040                         route);
1041                 return;
1042         }
1043
1044         sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
1045         if (sw) {
1046                 /* Update the switch if it is still in the same place */
1047                 if (tb_route(sw) == route && !!sw->authorized == authorized) {
1048                         parent_sw = tb_to_switch(sw->dev.parent);
1049                         update_switch(parent_sw, sw, route, pkg->connection_id,
1050                                       0, 0, 0, boot);
1051                         tb_switch_put(sw);
1052                         return;
1053                 }
1054
1055                 remove_switch(sw);
1056                 tb_switch_put(sw);
1057         }
1058
1059         /* Another switch with the same address */
1060         sw = tb_switch_find_by_route(tb, route);
1061         if (sw) {
1062                 remove_switch(sw);
1063                 tb_switch_put(sw);
1064         }
1065
1066         /* XDomain connection with the same address */
1067         xd = tb_xdomain_find_by_route(tb, route);
1068         if (xd) {
1069                 remove_xdomain(xd);
1070                 tb_xdomain_put(xd);
1071         }
1072
1073         parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
1074         if (!parent_sw) {
1075                 tb_err(tb, "failed to find parent switch for %llx\n", route);
1076                 return;
1077         }
1078
1079         add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
1080                    sizeof(pkg->ep_name), pkg->connection_id,
1081                    0, 0, 0, security_level, authorized, boot);
1082
1083         tb_switch_put(parent_sw);
1084 }
1085
1086 static void
1087 icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1088 {
1089         const struct icm_tr_event_device_disconnected *pkg =
1090                 (const struct icm_tr_event_device_disconnected *)hdr;
1091         struct tb_switch *sw;
1092         u64 route;
1093
1094         route = get_route(pkg->route_hi, pkg->route_lo);
1095
1096         sw = tb_switch_find_by_route(tb, route);
1097         if (!sw) {
1098                 tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1099                 return;
1100         }
1101
1102         remove_switch(sw);
1103         tb_switch_put(sw);
1104 }
1105
1106 static void
1107 icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1108 {
1109         const struct icm_tr_event_xdomain_connected *pkg =
1110                 (const struct icm_tr_event_xdomain_connected *)hdr;
1111         struct tb_xdomain *xd;
1112         struct tb_switch *sw;
1113         u64 route;
1114
1115         if (!tb->root_switch)
1116                 return;
1117
1118         route = get_route(pkg->local_route_hi, pkg->local_route_lo);
1119
1120         xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
1121         if (xd) {
1122                 if (xd->route == route) {
1123                         update_xdomain(xd, route, 0);
1124                         tb_xdomain_put(xd);
1125                         return;
1126                 }
1127
1128                 remove_xdomain(xd);
1129                 tb_xdomain_put(xd);
1130         }
1131
1132         /* An existing xdomain with the same address */
1133         xd = tb_xdomain_find_by_route(tb, route);
1134         if (xd) {
1135                 remove_xdomain(xd);
1136                 tb_xdomain_put(xd);
1137         }
1138
1139         /*
1140          * If the user disconnected a switch during suspend and
1141          * connected another host to the same port, remove the switch
1142          * first.
1143          */
1144         sw = get_switch_at_route(tb->root_switch, route);
1145         if (sw)
1146                 remove_switch(sw);
1147
1148         sw = tb_switch_find_by_route(tb, get_parent_route(route));
1149         if (!sw) {
1150                 tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1151                 return;
1152         }
1153
1154         add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
1155         tb_switch_put(sw);
1156 }
1157
1158 static void
1159 icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1160 {
1161         const struct icm_tr_event_xdomain_disconnected *pkg =
1162                 (const struct icm_tr_event_xdomain_disconnected *)hdr;
1163         struct tb_xdomain *xd;
1164         u64 route;
1165
1166         route = get_route(pkg->route_hi, pkg->route_lo);
1167
1168         xd = tb_xdomain_find_by_route(tb, route);
1169         if (xd) {
1170                 remove_xdomain(xd);
1171                 tb_xdomain_put(xd);
1172         }
1173 }
1174
1175 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
1176 {
1177         struct pci_dev *parent;
1178
1179         parent = pci_upstream_bridge(pdev);
1180         while (parent) {
1181                 if (!pci_is_pcie(parent))
1182                         return NULL;
1183                 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
1184                         break;
1185                 parent = pci_upstream_bridge(parent);
1186         }
1187
1188         if (!parent)
1189                 return NULL;
1190
1191         switch (parent->device) {
1192         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1193         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1194         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1195         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1196         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1197                 return parent;
1198         }
1199
1200         return NULL;
1201 }
1202
1203 static bool icm_ar_is_supported(struct tb *tb)
1204 {
1205         struct pci_dev *upstream_port;
1206         struct icm *icm = tb_priv(tb);
1207
1208         /*
1209          * Starting from Alpine Ridge we can use ICM on Apple machines
1210          * as well. We just need to reset and re-enable it first.
1211          */
1212         if (!x86_apple_machine)
1213                 return true;
1214
1215         /*
1216          * Find the upstream PCIe port in case we need to do reset
1217          * through its vendor specific registers.
1218          */
1219         upstream_port = get_upstream_port(tb->nhi->pdev);
1220         if (upstream_port) {
1221                 int cap;
1222
1223                 cap = pci_find_ext_capability(upstream_port,
1224                                               PCI_EXT_CAP_ID_VNDR);
1225                 if (cap > 0) {
1226                         icm->upstream_port = upstream_port;
1227                         icm->vnd_cap = cap;
1228
1229                         return true;
1230                 }
1231         }
1232
1233         return false;
1234 }
1235
1236 static int icm_ar_get_mode(struct tb *tb)
1237 {
1238         struct tb_nhi *nhi = tb->nhi;
1239         int retries = 60;
1240         u32 val;
1241
1242         do {
1243                 val = ioread32(nhi->iobase + REG_FW_STS);
1244                 if (val & REG_FW_STS_NVM_AUTH_DONE)
1245                         break;
1246                 msleep(50);
1247         } while (--retries);
1248
1249         if (!retries) {
1250                 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
1251                 return -ENODEV;
1252         }
1253
1254         return nhi_mailbox_mode(nhi);
1255 }
1256
1257 static int
1258 icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1259                     size_t *nboot_acl, bool *rpm)
1260 {
1261         struct icm_ar_pkg_driver_ready_response reply;
1262         struct icm_pkg_driver_ready request = {
1263                 .hdr.code = ICM_DRIVER_READY,
1264         };
1265         int ret;
1266
1267         memset(&reply, 0, sizeof(reply));
1268         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1269                           1, ICM_TIMEOUT);
1270         if (ret)
1271                 return ret;
1272
1273         if (security_level)
1274                 *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
1275         if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
1276                 *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
1277                                 ICM_AR_INFO_BOOT_ACL_SHIFT;
1278         if (rpm)
1279                 *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
1280
1281         return 0;
1282 }
1283
1284 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
1285 {
1286         struct icm_ar_pkg_get_route_response reply;
1287         struct icm_ar_pkg_get_route request = {
1288                 .hdr = { .code = ICM_GET_ROUTE },
1289                 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
1290         };
1291         int ret;
1292
1293         memset(&reply, 0, sizeof(reply));
1294         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1295                           1, ICM_TIMEOUT);
1296         if (ret)
1297                 return ret;
1298
1299         if (reply.hdr.flags & ICM_FLAGS_ERROR)
1300                 return -EIO;
1301
1302         *route = get_route(reply.route_hi, reply.route_lo);
1303         return 0;
1304 }
1305
1306 static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
1307 {
1308         struct icm_ar_pkg_preboot_acl_response reply;
1309         struct icm_ar_pkg_preboot_acl request = {
1310                 .hdr = { .code = ICM_PREBOOT_ACL },
1311         };
1312         int ret, i;
1313
1314         memset(&reply, 0, sizeof(reply));
1315         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1316                           1, ICM_TIMEOUT);
1317         if (ret)
1318                 return ret;
1319
1320         if (reply.hdr.flags & ICM_FLAGS_ERROR)
1321                 return -EIO;
1322
1323         for (i = 0; i < nuuids; i++) {
1324                 u32 *uuid = (u32 *)&uuids[i];
1325
1326                 uuid[0] = reply.acl[i].uuid_lo;
1327                 uuid[1] = reply.acl[i].uuid_hi;
1328
1329                 if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
1330                         /* Map empty entries to null UUID */
1331                         uuid[0] = 0;
1332                         uuid[1] = 0;
1333                 } else if (uuid[0] != 0 || uuid[1] != 0) {
1334                         /* Upper two DWs are always one's */
1335                         uuid[2] = 0xffffffff;
1336                         uuid[3] = 0xffffffff;
1337                 }
1338         }
1339
1340         return ret;
1341 }
1342
1343 static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
1344                                size_t nuuids)
1345 {
1346         struct icm_ar_pkg_preboot_acl_response reply;
1347         struct icm_ar_pkg_preboot_acl request = {
1348                 .hdr = {
1349                         .code = ICM_PREBOOT_ACL,
1350                         .flags = ICM_FLAGS_WRITE,
1351                 },
1352         };
1353         int ret, i;
1354
1355         for (i = 0; i < nuuids; i++) {
1356                 const u32 *uuid = (const u32 *)&uuids[i];
1357
1358                 if (uuid_is_null(&uuids[i])) {
1359                         /*
1360                          * Map null UUID to the empty (all one) entries
1361                          * for ICM.
1362                          */
1363                         request.acl[i].uuid_lo = 0xffffffff;
1364                         request.acl[i].uuid_hi = 0xffffffff;
1365                 } else {
1366                         /* Two high DWs need to be set to all one */
1367                         if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
1368                                 return -EINVAL;
1369
1370                         request.acl[i].uuid_lo = uuid[0];
1371                         request.acl[i].uuid_hi = uuid[1];
1372                 }
1373         }
1374
1375         memset(&reply, 0, sizeof(reply));
1376         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1377                           1, ICM_TIMEOUT);
1378         if (ret)
1379                 return ret;
1380
1381         if (reply.hdr.flags & ICM_FLAGS_ERROR)
1382                 return -EIO;
1383
1384         return 0;
1385 }
1386
1387 static void icm_handle_notification(struct work_struct *work)
1388 {
1389         struct icm_notification *n = container_of(work, typeof(*n), work);
1390         struct tb *tb = n->tb;
1391         struct icm *icm = tb_priv(tb);
1392
1393         mutex_lock(&tb->lock);
1394
1395         /*
1396          * When the domain is stopped we flush its workqueue but before
1397          * that the root switch is removed. In that case we should treat
1398          * the queued events as being canceled.
1399          */
1400         if (tb->root_switch) {
1401                 switch (n->pkg->code) {
1402                 case ICM_EVENT_DEVICE_CONNECTED:
1403                         icm->device_connected(tb, n->pkg);
1404                         break;
1405                 case ICM_EVENT_DEVICE_DISCONNECTED:
1406                         icm->device_disconnected(tb, n->pkg);
1407                         break;
1408                 case ICM_EVENT_XDOMAIN_CONNECTED:
1409                         icm->xdomain_connected(tb, n->pkg);
1410                         break;
1411                 case ICM_EVENT_XDOMAIN_DISCONNECTED:
1412                         icm->xdomain_disconnected(tb, n->pkg);
1413                         break;
1414                 }
1415         }
1416
1417         mutex_unlock(&tb->lock);
1418
1419         kfree(n->pkg);
1420         kfree(n);
1421 }
1422
1423 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1424                              const void *buf, size_t size)
1425 {
1426         struct icm_notification *n;
1427
1428         n = kmalloc(sizeof(*n), GFP_KERNEL);
1429         if (!n)
1430                 return;
1431
1432         INIT_WORK(&n->work, icm_handle_notification);
1433         n->pkg = kmemdup(buf, size, GFP_KERNEL);
1434         n->tb = tb;
1435
1436         queue_work(tb->wq, &n->work);
1437 }
1438
1439 static int
1440 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1441                    size_t *nboot_acl, bool *rpm)
1442 {
1443         struct icm *icm = tb_priv(tb);
1444         unsigned int retries = 50;
1445         int ret;
1446
1447         ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
1448         if (ret) {
1449                 tb_err(tb, "failed to send driver ready to ICM\n");
1450                 return ret;
1451         }
1452
1453         /*
1454          * Hold on here until the switch config space is accessible so
1455          * that we can read root switch config successfully.
1456          */
1457         do {
1458                 struct tb_cfg_result res;
1459                 u32 tmp;
1460
1461                 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
1462                                       0, 1, 100);
1463                 if (!res.err)
1464                         return 0;
1465
1466                 msleep(50);
1467         } while (--retries);
1468
1469         tb_err(tb, "failed to read root switch config space, giving up\n");
1470         return -ETIMEDOUT;
1471 }
1472
1473 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
1474 {
1475         unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
1476         u32 cmd;
1477
1478         do {
1479                 pci_read_config_dword(icm->upstream_port,
1480                                       icm->vnd_cap + PCIE2CIO_CMD, &cmd);
1481                 if (!(cmd & PCIE2CIO_CMD_START)) {
1482                         if (cmd & PCIE2CIO_CMD_TIMEOUT)
1483                                 break;
1484                         return 0;
1485                 }
1486
1487                 msleep(50);
1488         } while (time_before(jiffies, end));
1489
1490         return -ETIMEDOUT;
1491 }
1492
1493 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
1494                          unsigned int port, unsigned int index, u32 *data)
1495 {
1496         struct pci_dev *pdev = icm->upstream_port;
1497         int ret, vnd_cap = icm->vnd_cap;
1498         u32 cmd;
1499
1500         cmd = index;
1501         cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
1502         cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
1503         cmd |= PCIE2CIO_CMD_START;
1504         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
1505
1506         ret = pci2cio_wait_completion(icm, 5000);
1507         if (ret)
1508                 return ret;
1509
1510         pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
1511         return 0;
1512 }
1513
1514 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
1515                           unsigned int port, unsigned int index, u32 data)
1516 {
1517         struct pci_dev *pdev = icm->upstream_port;
1518         int vnd_cap = icm->vnd_cap;
1519         u32 cmd;
1520
1521         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
1522
1523         cmd = index;
1524         cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
1525         cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
1526         cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
1527         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
1528
1529         return pci2cio_wait_completion(icm, 5000);
1530 }
1531
1532 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
1533 {
1534         struct icm *icm = tb_priv(tb);
1535         u32 val;
1536
1537         if (!icm->upstream_port)
1538                 return -ENODEV;
1539
1540         /* Put ARC to wait for CIO reset event to happen */
1541         val = ioread32(nhi->iobase + REG_FW_STS);
1542         val |= REG_FW_STS_CIO_RESET_REQ;
1543         iowrite32(val, nhi->iobase + REG_FW_STS);
1544
1545         /* Re-start ARC */
1546         val = ioread32(nhi->iobase + REG_FW_STS);
1547         val |= REG_FW_STS_ICM_EN_INVERT;
1548         val |= REG_FW_STS_ICM_EN_CPU;
1549         iowrite32(val, nhi->iobase + REG_FW_STS);
1550
1551         /* Trigger CIO reset now */
1552         return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
1553 }
1554
1555 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
1556 {
1557         unsigned int retries = 10;
1558         int ret;
1559         u32 val;
1560
1561         /* Check if the ICM firmware is already running */
1562         val = ioread32(nhi->iobase + REG_FW_STS);
1563         if (val & REG_FW_STS_ICM_EN)
1564                 return 0;
1565
1566         dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
1567
1568         ret = icm_firmware_reset(tb, nhi);
1569         if (ret)
1570                 return ret;
1571
1572         /* Wait until the ICM firmware tells us it is up and running */
1573         do {
1574                 /* Check that the ICM firmware is running */
1575                 val = ioread32(nhi->iobase + REG_FW_STS);
1576                 if (val & REG_FW_STS_NVM_AUTH_DONE)
1577                         return 0;
1578
1579                 msleep(300);
1580         } while (--retries);
1581
1582         return -ETIMEDOUT;
1583 }
1584
1585 static int icm_reset_phy_port(struct tb *tb, int phy_port)
1586 {
1587         struct icm *icm = tb_priv(tb);
1588         u32 state0, state1;
1589         int port0, port1;
1590         u32 val0, val1;
1591         int ret;
1592
1593         if (!icm->upstream_port)
1594                 return 0;
1595
1596         if (phy_port) {
1597                 port0 = 3;
1598                 port1 = 4;
1599         } else {
1600                 port0 = 1;
1601                 port1 = 2;
1602         }
1603
1604         /*
1605          * Read link status of both null ports belonging to a single
1606          * physical port.
1607          */
1608         ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1609         if (ret)
1610                 return ret;
1611         ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1612         if (ret)
1613                 return ret;
1614
1615         state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
1616         state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1617         state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
1618         state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1619
1620         /* If they are both up we need to reset them now */
1621         if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1622                 return 0;
1623
1624         val0 |= PHY_PORT_CS1_LINK_DISABLE;
1625         ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1626         if (ret)
1627                 return ret;
1628
1629         val1 |= PHY_PORT_CS1_LINK_DISABLE;
1630         ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1631         if (ret)
1632                 return ret;
1633
1634         /* Wait a bit and then re-enable both ports */
1635         usleep_range(10, 100);
1636
1637         ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1638         if (ret)
1639                 return ret;
1640         ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1641         if (ret)
1642                 return ret;
1643
1644         val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1645         ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1646         if (ret)
1647                 return ret;
1648
1649         val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1650         return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1651 }
1652
1653 static int icm_firmware_init(struct tb *tb)
1654 {
1655         struct icm *icm = tb_priv(tb);
1656         struct tb_nhi *nhi = tb->nhi;
1657         int ret;
1658
1659         ret = icm_firmware_start(tb, nhi);
1660         if (ret) {
1661                 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1662                 return ret;
1663         }
1664
1665         if (icm->get_mode) {
1666                 ret = icm->get_mode(tb);
1667
1668                 switch (ret) {
1669                 case NHI_FW_SAFE_MODE:
1670                         icm->safe_mode = true;
1671                         break;
1672
1673                 case NHI_FW_CM_MODE:
1674                         /* Ask ICM to accept all Thunderbolt devices */
1675                         nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1676                         break;
1677
1678                 default:
1679                         if (ret < 0)
1680                                 return ret;
1681
1682                         tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1683                         return -ENODEV;
1684                 }
1685         }
1686
1687         /*
1688          * Reset both physical ports if there is anything connected to
1689          * them already.
1690          */
1691         ret = icm_reset_phy_port(tb, 0);
1692         if (ret)
1693                 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1694         ret = icm_reset_phy_port(tb, 1);
1695         if (ret)
1696                 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1697
1698         return 0;
1699 }
1700
1701 static int icm_driver_ready(struct tb *tb)
1702 {
1703         struct icm *icm = tb_priv(tb);
1704         int ret;
1705
1706         ret = icm_firmware_init(tb);
1707         if (ret)
1708                 return ret;
1709
1710         if (icm->safe_mode) {
1711                 tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1712                 tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1713                 tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1714                 return 0;
1715         }
1716
1717         ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
1718                                  &icm->rpm);
1719         if (ret)
1720                 return ret;
1721
1722         /*
1723          * Make sure the number of supported preboot ACL matches what we
1724          * expect or disable the whole feature.
1725          */
1726         if (tb->nboot_acl > icm->max_boot_acl)
1727                 tb->nboot_acl = 0;
1728
1729         return 0;
1730 }
1731
1732 static int icm_suspend(struct tb *tb)
1733 {
1734         struct icm *icm = tb_priv(tb);
1735
1736         if (icm->save_devices)
1737                 icm->save_devices(tb);
1738
1739         nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1740         return 0;
1741 }
1742
1743 /*
1744  * Mark all switches (except root switch) below this one unplugged. ICM
1745  * firmware will send us an updated list of switches after we have send
1746  * it driver ready command. If a switch is not in that list it will be
1747  * removed when we perform rescan.
1748  */
1749 static void icm_unplug_children(struct tb_switch *sw)
1750 {
1751         unsigned int i;
1752
1753         if (tb_route(sw))
1754                 sw->is_unplugged = true;
1755
1756         for (i = 1; i <= sw->config.max_port_number; i++) {
1757                 struct tb_port *port = &sw->ports[i];
1758
1759                 if (tb_is_upstream_port(port))
1760                         continue;
1761                 if (port->xdomain) {
1762                         port->xdomain->is_unplugged = true;
1763                         continue;
1764                 }
1765                 if (!port->remote)
1766                         continue;
1767
1768                 icm_unplug_children(port->remote->sw);
1769         }
1770 }
1771
1772 static void icm_free_unplugged_children(struct tb_switch *sw)
1773 {
1774         unsigned int i;
1775
1776         for (i = 1; i <= sw->config.max_port_number; i++) {
1777                 struct tb_port *port = &sw->ports[i];
1778
1779                 if (tb_is_upstream_port(port))
1780                         continue;
1781
1782                 if (port->xdomain && port->xdomain->is_unplugged) {
1783                         tb_xdomain_remove(port->xdomain);
1784                         port->xdomain = NULL;
1785                         continue;
1786                 }
1787
1788                 if (!port->remote)
1789                         continue;
1790
1791                 if (port->remote->sw->is_unplugged) {
1792                         tb_switch_remove(port->remote->sw);
1793                         port->remote = NULL;
1794                 } else {
1795                         icm_free_unplugged_children(port->remote->sw);
1796                 }
1797         }
1798 }
1799
1800 static void icm_rescan_work(struct work_struct *work)
1801 {
1802         struct icm *icm = container_of(work, struct icm, rescan_work.work);
1803         struct tb *tb = icm_to_tb(icm);
1804
1805         mutex_lock(&tb->lock);
1806         if (tb->root_switch)
1807                 icm_free_unplugged_children(tb->root_switch);
1808         mutex_unlock(&tb->lock);
1809 }
1810
1811 static void icm_complete(struct tb *tb)
1812 {
1813         struct icm *icm = tb_priv(tb);
1814
1815         if (tb->nhi->going_away)
1816                 return;
1817
1818         icm_unplug_children(tb->root_switch);
1819
1820         /*
1821          * Now all existing children should be resumed, start events
1822          * from ICM to get updated status.
1823          */
1824         __icm_driver_ready(tb, NULL, NULL, NULL);
1825
1826         /*
1827          * We do not get notifications of devices that have been
1828          * unplugged during suspend so schedule rescan to clean them up
1829          * if any.
1830          */
1831         queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
1832 }
1833
1834 static int icm_runtime_suspend(struct tb *tb)
1835 {
1836         nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1837         return 0;
1838 }
1839
1840 static int icm_runtime_resume(struct tb *tb)
1841 {
1842         /*
1843          * We can reuse the same resume functionality than with system
1844          * suspend.
1845          */
1846         icm_complete(tb);
1847         return 0;
1848 }
1849
1850 static int icm_start(struct tb *tb)
1851 {
1852         struct icm *icm = tb_priv(tb);
1853         int ret;
1854
1855         if (icm->safe_mode)
1856                 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
1857         else
1858                 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1859         if (!tb->root_switch)
1860                 return -ENODEV;
1861
1862         /*
1863          * NVM upgrade has not been tested on Apple systems and they
1864          * don't provide images publicly either. To be on the safe side
1865          * prevent root switch NVM upgrade on Macs for now.
1866          */
1867         tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1868         tb->root_switch->rpm = icm->rpm;
1869
1870         ret = tb_switch_add(tb->root_switch);
1871         if (ret) {
1872                 tb_switch_put(tb->root_switch);
1873                 tb->root_switch = NULL;
1874         }
1875
1876         return ret;
1877 }
1878
1879 static void icm_stop(struct tb *tb)
1880 {
1881         struct icm *icm = tb_priv(tb);
1882
1883         cancel_delayed_work(&icm->rescan_work);
1884         tb_switch_remove(tb->root_switch);
1885         tb->root_switch = NULL;
1886         nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1887 }
1888
1889 static int icm_disconnect_pcie_paths(struct tb *tb)
1890 {
1891         return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
1892 }
1893
1894 /* Falcon Ridge */
1895 static const struct tb_cm_ops icm_fr_ops = {
1896         .driver_ready = icm_driver_ready,
1897         .start = icm_start,
1898         .stop = icm_stop,
1899         .suspend = icm_suspend,
1900         .complete = icm_complete,
1901         .handle_event = icm_handle_event,
1902         .approve_switch = icm_fr_approve_switch,
1903         .add_switch_key = icm_fr_add_switch_key,
1904         .challenge_switch_key = icm_fr_challenge_switch_key,
1905         .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1906         .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1907         .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1908 };
1909
1910 /* Alpine Ridge */
1911 static const struct tb_cm_ops icm_ar_ops = {
1912         .driver_ready = icm_driver_ready,
1913         .start = icm_start,
1914         .stop = icm_stop,
1915         .suspend = icm_suspend,
1916         .complete = icm_complete,
1917         .runtime_suspend = icm_runtime_suspend,
1918         .runtime_resume = icm_runtime_resume,
1919         .handle_event = icm_handle_event,
1920         .get_boot_acl = icm_ar_get_boot_acl,
1921         .set_boot_acl = icm_ar_set_boot_acl,
1922         .approve_switch = icm_fr_approve_switch,
1923         .add_switch_key = icm_fr_add_switch_key,
1924         .challenge_switch_key = icm_fr_challenge_switch_key,
1925         .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1926         .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1927         .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1928 };
1929
1930 /* Titan Ridge */
1931 static const struct tb_cm_ops icm_tr_ops = {
1932         .driver_ready = icm_driver_ready,
1933         .start = icm_start,
1934         .stop = icm_stop,
1935         .suspend = icm_suspend,
1936         .complete = icm_complete,
1937         .runtime_suspend = icm_runtime_suspend,
1938         .runtime_resume = icm_runtime_resume,
1939         .handle_event = icm_handle_event,
1940         .get_boot_acl = icm_ar_get_boot_acl,
1941         .set_boot_acl = icm_ar_set_boot_acl,
1942         .approve_switch = icm_tr_approve_switch,
1943         .add_switch_key = icm_tr_add_switch_key,
1944         .challenge_switch_key = icm_tr_challenge_switch_key,
1945         .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1946         .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
1947         .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
1948 };
1949
1950 struct tb *icm_probe(struct tb_nhi *nhi)
1951 {
1952         struct icm *icm;
1953         struct tb *tb;
1954
1955         tb = tb_domain_alloc(nhi, sizeof(struct icm));
1956         if (!tb)
1957                 return NULL;
1958
1959         icm = tb_priv(tb);
1960         INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
1961         mutex_init(&icm->request_lock);
1962
1963         switch (nhi->pdev->device) {
1964         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1965         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1966                 icm->is_supported = icm_fr_is_supported;
1967                 icm->get_route = icm_fr_get_route;
1968                 icm->save_devices = icm_fr_save_devices;
1969                 icm->driver_ready = icm_fr_driver_ready;
1970                 icm->device_connected = icm_fr_device_connected;
1971                 icm->device_disconnected = icm_fr_device_disconnected;
1972                 icm->xdomain_connected = icm_fr_xdomain_connected;
1973                 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1974                 tb->cm_ops = &icm_fr_ops;
1975                 break;
1976
1977         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
1978         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
1979         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
1980         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
1981         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
1982                 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
1983                 icm->is_supported = icm_ar_is_supported;
1984                 icm->get_mode = icm_ar_get_mode;
1985                 icm->get_route = icm_ar_get_route;
1986                 icm->save_devices = icm_fr_save_devices;
1987                 icm->driver_ready = icm_ar_driver_ready;
1988                 icm->device_connected = icm_fr_device_connected;
1989                 icm->device_disconnected = icm_fr_device_disconnected;
1990                 icm->xdomain_connected = icm_fr_xdomain_connected;
1991                 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1992                 tb->cm_ops = &icm_ar_ops;
1993                 break;
1994
1995         case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
1996         case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
1997                 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
1998                 icm->is_supported = icm_ar_is_supported;
1999                 icm->get_mode = icm_ar_get_mode;
2000                 icm->driver_ready = icm_tr_driver_ready;
2001                 icm->device_connected = icm_tr_device_connected;
2002                 icm->device_disconnected = icm_tr_device_disconnected;
2003                 icm->xdomain_connected = icm_tr_xdomain_connected;
2004                 icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2005                 tb->cm_ops = &icm_tr_ops;
2006                 break;
2007         }
2008
2009         if (!icm->is_supported || !icm->is_supported(tb)) {
2010                 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
2011                 tb_domain_put(tb);
2012                 return NULL;
2013         }
2014
2015         return tb;
2016 }