Merge tag 'sound-fix-6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-block.git] / drivers / ufs / host / ufshcd-pci.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller PCI glue driver
4  *
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  *
7  * Authors:
8  *      Santosh Yaraganavi <santosh.sy@samsung.com>
9  *      Vinayak Holikatti <h.vinayak@samsung.com>
10  */
11
12 #include <ufs/ufshcd.h>
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/pm_qos.h>
18 #include <linux/debugfs.h>
19 #include <linux/uuid.h>
20 #include <linux/acpi.h>
21 #include <linux/gpio/consumer.h>
22
23 struct ufs_host {
24         void (*late_init)(struct ufs_hba *hba);
25 };
26
27 enum intel_ufs_dsm_func_id {
28         INTEL_DSM_FNS           =  0,
29         INTEL_DSM_RESET         =  1,
30 };
31
32 struct intel_host {
33         struct ufs_host ufs_host;
34         u32             dsm_fns;
35         u32             active_ltr;
36         u32             idle_ltr;
37         struct dentry   *debugfs_root;
38         struct gpio_desc *reset_gpio;
39 };
40
41 static const guid_t intel_dsm_guid =
42         GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
43                   0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
44
45 static bool __intel_dsm_supported(struct intel_host *host,
46                                   enum intel_ufs_dsm_func_id fn)
47 {
48         return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn));
49 }
50
51 #define INTEL_DSM_SUPPORTED(host, name) \
52         __intel_dsm_supported(host, INTEL_DSM_##name)
53
54 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
55                        unsigned int fn, u32 *result)
56 {
57         union acpi_object *obj;
58         int err = 0;
59         size_t len;
60
61         obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
62         if (!obj)
63                 return -EOPNOTSUPP;
64
65         if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
66                 err = -EINVAL;
67                 goto out;
68         }
69
70         len = min_t(size_t, obj->buffer.length, 4);
71
72         *result = 0;
73         memcpy(result, obj->buffer.pointer, len);
74 out:
75         ACPI_FREE(obj);
76
77         return err;
78 }
79
80 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
81                      unsigned int fn, u32 *result)
82 {
83         if (!__intel_dsm_supported(intel_host, fn))
84                 return -EOPNOTSUPP;
85
86         return __intel_dsm(intel_host, dev, fn, result);
87 }
88
89 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
90 {
91         int err;
92
93         err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
94         dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
95 }
96
97 static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
98                                        enum ufs_notify_change_status status)
99 {
100         /* Cannot enable ICE until after HC enable */
101         if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
102                 u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
103
104                 hce |= CRYPTO_GENERAL_ENABLE;
105                 ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
106         }
107
108         return 0;
109 }
110
111 static int ufs_intel_disable_lcc(struct ufs_hba *hba)
112 {
113         u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
114         u32 lcc_enable = 0;
115
116         ufshcd_dme_get(hba, attr, &lcc_enable);
117         if (lcc_enable)
118                 ufshcd_disable_host_tx_lcc(hba);
119
120         return 0;
121 }
122
123 static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
124                                          enum ufs_notify_change_status status)
125 {
126         int err = 0;
127
128         switch (status) {
129         case PRE_CHANGE:
130                 err = ufs_intel_disable_lcc(hba);
131                 break;
132         case POST_CHANGE:
133                 break;
134         default:
135                 break;
136         }
137
138         return err;
139 }
140
141 static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
142 {
143         struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
144         int ret;
145
146         pwr_info.lane_rx = lanes;
147         pwr_info.lane_tx = lanes;
148         ret = ufshcd_config_pwr_mode(hba, &pwr_info);
149         if (ret)
150                 dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
151                         __func__, lanes, ret);
152         return ret;
153 }
154
155 static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
156                                 enum ufs_notify_change_status status,
157                                 struct ufs_pa_layer_attr *dev_max_params,
158                                 struct ufs_pa_layer_attr *dev_req_params)
159 {
160         int err = 0;
161
162         switch (status) {
163         case PRE_CHANGE:
164                 if (ufshcd_is_hs_mode(dev_max_params) &&
165                     (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
166                         ufs_intel_set_lanes(hba, 2);
167                 memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
168                 break;
169         case POST_CHANGE:
170                 if (ufshcd_is_hs_mode(dev_req_params)) {
171                         u32 peer_granularity;
172
173                         usleep_range(1000, 1250);
174                         err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
175                                                   &peer_granularity);
176                 }
177                 break;
178         default:
179                 break;
180         }
181
182         return err;
183 }
184
185 static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
186 {
187         u32 granularity, peer_granularity;
188         u32 pa_tactivate, peer_pa_tactivate;
189         int ret;
190
191         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
192         if (ret)
193                 goto out;
194
195         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
196         if (ret)
197                 goto out;
198
199         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
200         if (ret)
201                 goto out;
202
203         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
204         if (ret)
205                 goto out;
206
207         if (granularity == peer_granularity) {
208                 u32 new_peer_pa_tactivate = pa_tactivate + 2;
209
210                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
211         }
212 out:
213         return ret;
214 }
215
216 #define INTEL_ACTIVELTR         0x804
217 #define INTEL_IDLELTR           0x808
218
219 #define INTEL_LTR_REQ           BIT(15)
220 #define INTEL_LTR_SCALE_MASK    GENMASK(11, 10)
221 #define INTEL_LTR_SCALE_1US     (2 << 10)
222 #define INTEL_LTR_SCALE_32US    (3 << 10)
223 #define INTEL_LTR_VALUE_MASK    GENMASK(9, 0)
224
225 static void intel_cache_ltr(struct ufs_hba *hba)
226 {
227         struct intel_host *host = ufshcd_get_variant(hba);
228
229         host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
230         host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
231 }
232
233 static void intel_ltr_set(struct device *dev, s32 val)
234 {
235         struct ufs_hba *hba = dev_get_drvdata(dev);
236         struct intel_host *host = ufshcd_get_variant(hba);
237         u32 ltr;
238
239         pm_runtime_get_sync(dev);
240
241         /*
242          * Program latency tolerance (LTR) accordingly what has been asked
243          * by the PM QoS layer or disable it in case we were passed
244          * negative value or PM_QOS_LATENCY_ANY.
245          */
246         ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
247
248         if (val == PM_QOS_LATENCY_ANY || val < 0) {
249                 ltr &= ~INTEL_LTR_REQ;
250         } else {
251                 ltr |= INTEL_LTR_REQ;
252                 ltr &= ~INTEL_LTR_SCALE_MASK;
253                 ltr &= ~INTEL_LTR_VALUE_MASK;
254
255                 if (val > INTEL_LTR_VALUE_MASK) {
256                         val >>= 5;
257                         if (val > INTEL_LTR_VALUE_MASK)
258                                 val = INTEL_LTR_VALUE_MASK;
259                         ltr |= INTEL_LTR_SCALE_32US | val;
260                 } else {
261                         ltr |= INTEL_LTR_SCALE_1US | val;
262                 }
263         }
264
265         if (ltr == host->active_ltr)
266                 goto out;
267
268         writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
269         writel(ltr, hba->mmio_base + INTEL_IDLELTR);
270
271         /* Cache the values into intel_host structure */
272         intel_cache_ltr(hba);
273 out:
274         pm_runtime_put(dev);
275 }
276
277 static void intel_ltr_expose(struct device *dev)
278 {
279         dev->power.set_latency_tolerance = intel_ltr_set;
280         dev_pm_qos_expose_latency_tolerance(dev);
281 }
282
283 static void intel_ltr_hide(struct device *dev)
284 {
285         dev_pm_qos_hide_latency_tolerance(dev);
286         dev->power.set_latency_tolerance = NULL;
287 }
288
289 static void intel_add_debugfs(struct ufs_hba *hba)
290 {
291         struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
292         struct intel_host *host = ufshcd_get_variant(hba);
293
294         intel_cache_ltr(hba);
295
296         host->debugfs_root = dir;
297         debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
298         debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
299 }
300
301 static void intel_remove_debugfs(struct ufs_hba *hba)
302 {
303         struct intel_host *host = ufshcd_get_variant(hba);
304
305         debugfs_remove_recursive(host->debugfs_root);
306 }
307
308 static int ufs_intel_device_reset(struct ufs_hba *hba)
309 {
310         struct intel_host *host = ufshcd_get_variant(hba);
311
312         if (INTEL_DSM_SUPPORTED(host, RESET)) {
313                 u32 result = 0;
314                 int err;
315
316                 err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
317                 if (!err && !result)
318                         err = -EIO;
319                 if (err)
320                         dev_err(hba->dev, "%s: DSM error %d result %u\n",
321                                 __func__, err, result);
322                 return err;
323         }
324
325         if (!host->reset_gpio)
326                 return -EOPNOTSUPP;
327
328         gpiod_set_value_cansleep(host->reset_gpio, 1);
329         usleep_range(10, 15);
330
331         gpiod_set_value_cansleep(host->reset_gpio, 0);
332         usleep_range(10, 15);
333
334         return 0;
335 }
336
337 static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
338 {
339         /* GPIO in _DSD has active low setting */
340         return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
341 }
342
343 static int ufs_intel_common_init(struct ufs_hba *hba)
344 {
345         struct intel_host *host;
346
347         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
348
349         host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
350         if (!host)
351                 return -ENOMEM;
352         ufshcd_set_variant(hba, host);
353         intel_dsm_init(host, hba->dev);
354         if (INTEL_DSM_SUPPORTED(host, RESET)) {
355                 if (hba->vops->device_reset)
356                         hba->caps |= UFSHCD_CAP_DEEPSLEEP;
357         } else {
358                 if (hba->vops->device_reset)
359                         host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
360                 if (IS_ERR(host->reset_gpio)) {
361                         dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
362                                 __func__, PTR_ERR(host->reset_gpio));
363                         host->reset_gpio = NULL;
364                 }
365                 if (host->reset_gpio) {
366                         gpiod_set_value_cansleep(host->reset_gpio, 0);
367                         hba->caps |= UFSHCD_CAP_DEEPSLEEP;
368                 }
369         }
370         intel_ltr_expose(hba->dev);
371         intel_add_debugfs(hba);
372         return 0;
373 }
374
375 static void ufs_intel_common_exit(struct ufs_hba *hba)
376 {
377         intel_remove_debugfs(hba);
378         intel_ltr_hide(hba->dev);
379 }
380
381 static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
382 {
383         if (ufshcd_is_link_hibern8(hba)) {
384                 int ret = ufshcd_uic_hibern8_exit(hba);
385
386                 if (!ret) {
387                         ufshcd_set_link_active(hba);
388                 } else {
389                         dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
390                                 __func__, ret);
391                         /*
392                          * Force reset and restore. Any other actions can lead
393                          * to an unrecoverable state.
394                          */
395                         ufshcd_set_link_off(hba);
396                 }
397         }
398
399         return 0;
400 }
401
402 static int ufs_intel_ehl_init(struct ufs_hba *hba)
403 {
404         hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
405         return ufs_intel_common_init(hba);
406 }
407
408 static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
409 {
410         /* LKF always needs a full reset, so set PM accordingly */
411         if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
412                 hba->spm_lvl = UFS_PM_LVL_6;
413                 hba->rpm_lvl = UFS_PM_LVL_6;
414         } else {
415                 hba->spm_lvl = UFS_PM_LVL_5;
416                 hba->rpm_lvl = UFS_PM_LVL_5;
417         }
418 }
419
420 static int ufs_intel_lkf_init(struct ufs_hba *hba)
421 {
422         struct ufs_host *ufs_host;
423         int err;
424
425         hba->nop_out_timeout = 200;
426         hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
427         hba->caps |= UFSHCD_CAP_CRYPTO;
428         err = ufs_intel_common_init(hba);
429         ufs_host = ufshcd_get_variant(hba);
430         ufs_host->late_init = ufs_intel_lkf_late_init;
431         return err;
432 }
433
434 static int ufs_intel_adl_init(struct ufs_hba *hba)
435 {
436         hba->nop_out_timeout = 200;
437         hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
438         hba->caps |= UFSHCD_CAP_WB_EN;
439         return ufs_intel_common_init(hba);
440 }
441
442 static int ufs_intel_mtl_init(struct ufs_hba *hba)
443 {
444         hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
445         return ufs_intel_common_init(hba);
446 }
447
448 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
449         .name                   = "intel-pci",
450         .init                   = ufs_intel_common_init,
451         .exit                   = ufs_intel_common_exit,
452         .link_startup_notify    = ufs_intel_link_startup_notify,
453         .resume                 = ufs_intel_resume,
454 };
455
456 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
457         .name                   = "intel-pci",
458         .init                   = ufs_intel_ehl_init,
459         .exit                   = ufs_intel_common_exit,
460         .link_startup_notify    = ufs_intel_link_startup_notify,
461         .resume                 = ufs_intel_resume,
462 };
463
464 static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
465         .name                   = "intel-pci",
466         .init                   = ufs_intel_lkf_init,
467         .exit                   = ufs_intel_common_exit,
468         .hce_enable_notify      = ufs_intel_hce_enable_notify,
469         .link_startup_notify    = ufs_intel_link_startup_notify,
470         .pwr_change_notify      = ufs_intel_lkf_pwr_change_notify,
471         .apply_dev_quirks       = ufs_intel_lkf_apply_dev_quirks,
472         .resume                 = ufs_intel_resume,
473         .device_reset           = ufs_intel_device_reset,
474 };
475
476 static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
477         .name                   = "intel-pci",
478         .init                   = ufs_intel_adl_init,
479         .exit                   = ufs_intel_common_exit,
480         .link_startup_notify    = ufs_intel_link_startup_notify,
481         .resume                 = ufs_intel_resume,
482         .device_reset           = ufs_intel_device_reset,
483 };
484
485 static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
486         .name                   = "intel-pci",
487         .init                   = ufs_intel_mtl_init,
488         .exit                   = ufs_intel_common_exit,
489         .hce_enable_notify      = ufs_intel_hce_enable_notify,
490         .link_startup_notify    = ufs_intel_link_startup_notify,
491         .resume                 = ufs_intel_resume,
492         .device_reset           = ufs_intel_device_reset,
493 };
494
495 #ifdef CONFIG_PM_SLEEP
496 static int ufshcd_pci_restore(struct device *dev)
497 {
498         struct ufs_hba *hba = dev_get_drvdata(dev);
499
500         /* Force a full reset and restore */
501         ufshcd_set_link_off(hba);
502
503         return ufshcd_system_resume(dev);
504 }
505 #endif
506
507 /**
508  * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
509  *              data structure memory
510  * @pdev: pointer to PCI handle
511  */
512 static void ufshcd_pci_remove(struct pci_dev *pdev)
513 {
514         struct ufs_hba *hba = pci_get_drvdata(pdev);
515
516         pm_runtime_forbid(&pdev->dev);
517         pm_runtime_get_noresume(&pdev->dev);
518         ufshcd_remove(hba);
519         ufshcd_dealloc_host(hba);
520 }
521
522 /**
523  * ufshcd_pci_probe - probe routine of the driver
524  * @pdev: pointer to PCI device handle
525  * @id: PCI device id
526  *
527  * Returns 0 on success, non-zero value on failure
528  */
529 static int
530 ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
531 {
532         struct ufs_host *ufs_host;
533         struct ufs_hba *hba;
534         void __iomem *mmio_base;
535         int err;
536
537         err = pcim_enable_device(pdev);
538         if (err) {
539                 dev_err(&pdev->dev, "pcim_enable_device failed\n");
540                 return err;
541         }
542
543         pci_set_master(pdev);
544
545         err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
546         if (err < 0) {
547                 dev_err(&pdev->dev, "request and iomap failed\n");
548                 return err;
549         }
550
551         mmio_base = pcim_iomap_table(pdev)[0];
552
553         err = ufshcd_alloc_host(&pdev->dev, &hba);
554         if (err) {
555                 dev_err(&pdev->dev, "Allocation failed\n");
556                 return err;
557         }
558
559         hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
560
561         err = ufshcd_init(hba, mmio_base, pdev->irq);
562         if (err) {
563                 dev_err(&pdev->dev, "Initialization failed\n");
564                 ufshcd_dealloc_host(hba);
565                 return err;
566         }
567
568         ufs_host = ufshcd_get_variant(hba);
569         if (ufs_host && ufs_host->late_init)
570                 ufs_host->late_init(hba);
571
572         pm_runtime_put_noidle(&pdev->dev);
573         pm_runtime_allow(&pdev->dev);
574
575         return 0;
576 }
577
578 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
579         SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
580 #ifdef CONFIG_PM_SLEEP
581         .suspend        = ufshcd_system_suspend,
582         .resume         = ufshcd_system_resume,
583         .freeze         = ufshcd_system_suspend,
584         .thaw           = ufshcd_system_resume,
585         .poweroff       = ufshcd_system_suspend,
586         .restore        = ufshcd_pci_restore,
587         .prepare        = ufshcd_suspend_prepare,
588         .complete       = ufshcd_resume_complete,
589 #endif
590 };
591
592 static const struct pci_device_id ufshcd_pci_tbl[] = {
593         { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
594         { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
595         { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
596         { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
597         { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
598         { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
599         { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
600         { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
601         { PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
602         { PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
603         { }     /* terminate list */
604 };
605
606 MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
607
608 static struct pci_driver ufshcd_pci_driver = {
609         .name = UFSHCD,
610         .id_table = ufshcd_pci_tbl,
611         .probe = ufshcd_pci_probe,
612         .remove = ufshcd_pci_remove,
613         .driver = {
614                 .pm = &ufshcd_pci_pm_ops
615         },
616 };
617
618 module_pci_driver(ufshcd_pci_driver);
619
620 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
621 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
622 MODULE_DESCRIPTION("UFS host controller PCI glue driver");
623 MODULE_LICENSE("GPL");