Linux 6.12-rc1
[linux-block.git] / drivers / platform / x86 / amd / pmf / core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Platform Management Framework Driver
4  *
5  * Copyright (c) 2022, Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
9  */
10
11 #include <asm/amd_nb.h>
12 #include <linux/debugfs.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/platform_device.h>
17 #include <linux/power_supply.h>
18 #include "pmf.h"
19
20 /* PMF-SMU communication registers */
21 #define AMD_PMF_REGISTER_MESSAGE        0xA18
22 #define AMD_PMF_REGISTER_RESPONSE       0xA78
23 #define AMD_PMF_REGISTER_ARGUMENT       0xA58
24
25 /* Base address of SMU for mapping physical address to virtual address */
26 #define AMD_PMF_MAPPING_SIZE            0x01000
27 #define AMD_PMF_BASE_ADDR_OFFSET        0x10000
28 #define AMD_PMF_BASE_ADDR_LO            0x13B102E8
29 #define AMD_PMF_BASE_ADDR_HI            0x13B102EC
30 #define AMD_PMF_BASE_ADDR_LO_MASK       GENMASK(15, 0)
31 #define AMD_PMF_BASE_ADDR_HI_MASK       GENMASK(31, 20)
32
33 /* SMU Response Codes */
34 #define AMD_PMF_RESULT_OK                    0x01
35 #define AMD_PMF_RESULT_CMD_REJECT_BUSY       0xFC
36 #define AMD_PMF_RESULT_CMD_REJECT_PREREQ     0xFD
37 #define AMD_PMF_RESULT_CMD_UNKNOWN           0xFE
38 #define AMD_PMF_RESULT_FAILED                0xFF
39
40 #define PMF_MSG_DELAY_MIN_US            50
41 #define RESPONSE_REGISTER_LOOP_MAX      20000
42
43 #define DELAY_MIN_US    2000
44 #define DELAY_MAX_US    3000
45
46 /* override Metrics Table sample size time (in ms) */
47 static int metrics_table_loop_ms = 1000;
48 module_param(metrics_table_loop_ms, int, 0644);
49 MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
50
51 /* Force load on supported older platforms */
52 static bool force_load;
53 module_param(force_load, bool, 0444);
54 MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
55
56 static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
57 {
58         struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
59
60         if (event != PSY_EVENT_PROP_CHANGED)
61                 return NOTIFY_OK;
62
63         if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
64             is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
65             is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
66                 if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
67                         return NOTIFY_DONE;
68         }
69
70         if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
71                 amd_pmf_set_sps_power_limits(pmf);
72
73         if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
74                 amd_pmf_power_slider_update_event(pmf);
75
76         return NOTIFY_OK;
77 }
78
79 static int current_power_limits_show(struct seq_file *seq, void *unused)
80 {
81         struct amd_pmf_dev *dev = seq->private;
82         struct amd_pmf_static_slider_granular table;
83         int mode, src = 0;
84
85         mode = amd_pmf_get_pprof_modes(dev);
86         if (mode < 0)
87                 return mode;
88
89         src = amd_pmf_get_power_source();
90         amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
91         seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
92                    table.prop[src][mode].spl,
93                    table.prop[src][mode].fppt,
94                    table.prop[src][mode].sppt,
95                    table.prop[src][mode].sppt_apu_only,
96                    table.prop[src][mode].stt_min,
97                    table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
98                    table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
99         return 0;
100 }
101 DEFINE_SHOW_ATTRIBUTE(current_power_limits);
102
103 static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
104 {
105         debugfs_remove_recursive(dev->dbgfs_dir);
106 }
107
108 static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
109 {
110         dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
111         if (dev->pmf_if_version == PMF_IF_V1)
112                 debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
113                                     &current_power_limits_fops);
114 }
115
116 int amd_pmf_get_power_source(void)
117 {
118         if (power_supply_is_system_supplied() > 0)
119                 return POWER_SOURCE_AC;
120         else
121                 return POWER_SOURCE_DC;
122 }
123
124 static void amd_pmf_get_metrics(struct work_struct *work)
125 {
126         struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
127         ktime_t time_elapsed_ms;
128         int socket_power;
129
130         mutex_lock(&dev->update_mutex);
131         /* Transfer table contents */
132         memset(dev->buf, 0, sizeof(dev->m_table));
133         amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
134         memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
135
136         time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
137         /* Calculate the avg SoC power consumption */
138         socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
139
140         if (dev->amt_enabled) {
141                 /* Apply the Auto Mode transition */
142                 amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
143         }
144
145         if (dev->cnqf_enabled) {
146                 /* Apply the CnQF transition */
147                 amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
148         }
149
150         dev->start_time = ktime_to_ms(ktime_get());
151         schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
152         mutex_unlock(&dev->update_mutex);
153 }
154
155 static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
156 {
157         return ioread32(dev->regbase + reg_offset);
158 }
159
160 static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
161 {
162         iowrite32(val, dev->regbase + reg_offset);
163 }
164
165 static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
166 {
167         u32 value;
168
169         value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
170         dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
171
172         value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
173         dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
174
175         value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
176         dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
177 }
178
179 int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
180 {
181         int rc;
182         u32 val;
183
184         mutex_lock(&dev->lock);
185
186         /* Wait until we get a valid response */
187         rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
188                                 val, val != 0, PMF_MSG_DELAY_MIN_US,
189                                 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
190         if (rc) {
191                 dev_err(dev->dev, "failed to talk to SMU\n");
192                 goto out_unlock;
193         }
194
195         /* Write zero to response register */
196         amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
197
198         /* Write argument into argument register */
199         amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
200
201         /* Write message ID to message ID register */
202         amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
203
204         /* Wait until we get a valid response */
205         rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
206                                 val, val != 0, PMF_MSG_DELAY_MIN_US,
207                                 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
208         if (rc) {
209                 dev_err(dev->dev, "SMU response timed out\n");
210                 goto out_unlock;
211         }
212
213         switch (val) {
214         case AMD_PMF_RESULT_OK:
215                 if (get) {
216                         /* PMFW may take longer time to return back the data */
217                         usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
218                         *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
219                 }
220                 break;
221         case AMD_PMF_RESULT_CMD_REJECT_BUSY:
222                 dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
223                 rc = -EBUSY;
224                 goto out_unlock;
225         case AMD_PMF_RESULT_CMD_UNKNOWN:
226                 dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
227                 rc = -EINVAL;
228                 goto out_unlock;
229         case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
230         case AMD_PMF_RESULT_FAILED:
231         default:
232                 dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
233                 rc = -EIO;
234                 goto out_unlock;
235         }
236
237 out_unlock:
238         mutex_unlock(&dev->lock);
239         amd_pmf_dump_registers(dev);
240         return rc;
241 }
242
243 static const struct pci_device_id pmf_pci_ids[] = {
244         { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
245         { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
246         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
247         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
248         { }
249 };
250
251 int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
252 {
253         u64 phys_addr;
254         u32 hi, low;
255
256         /* Get Metrics Table Address */
257         if (alloc_buffer) {
258                 switch (dev->cpu_id) {
259                 case AMD_CPU_ID_PS:
260                 case AMD_CPU_ID_RMB:
261                         dev->mtable_size = sizeof(dev->m_table);
262                         break;
263                 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
264                         dev->mtable_size = sizeof(dev->m_table_v2);
265                         break;
266                 default:
267                         dev_err(dev->dev, "Invalid CPU id: 0x%x", dev->cpu_id);
268                 }
269
270                 dev->buf = kzalloc(dev->mtable_size, GFP_KERNEL);
271                 if (!dev->buf)
272                         return -ENOMEM;
273         }
274
275         phys_addr = virt_to_phys(dev->buf);
276         hi = phys_addr >> 32;
277         low = phys_addr & GENMASK(31, 0);
278
279         amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
280         amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
281
282         return 0;
283 }
284
285 int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
286 {
287         int ret;
288
289         INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
290
291         ret = amd_pmf_set_dram_addr(dev, true);
292         if (ret)
293                 return ret;
294
295         /*
296          * Start collecting the metrics data after a small delay
297          * or else, we might end up getting stale values from PMFW.
298          */
299         schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
300
301         return 0;
302 }
303
304 static int amd_pmf_suspend_handler(struct device *dev)
305 {
306         struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
307
308         if (pdev->smart_pc_enabled)
309                 cancel_delayed_work_sync(&pdev->pb_work);
310
311         if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
312                 amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_SUSPEND);
313
314         return 0;
315 }
316
317 static int amd_pmf_resume_handler(struct device *dev)
318 {
319         struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
320         int ret;
321
322         if (pdev->buf) {
323                 ret = amd_pmf_set_dram_addr(pdev, false);
324                 if (ret)
325                         return ret;
326         }
327
328         if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
329                 amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_RESUME);
330
331         if (pdev->smart_pc_enabled)
332                 schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
333
334         return 0;
335 }
336
337 static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, amd_pmf_suspend_handler, amd_pmf_resume_handler);
338
339 static void amd_pmf_init_features(struct amd_pmf_dev *dev)
340 {
341         int ret;
342
343         /* Enable Static Slider */
344         if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
345             is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
346                 amd_pmf_init_sps(dev);
347                 dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
348                 power_supply_reg_notifier(&dev->pwr_src_notifier);
349                 dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
350         }
351
352         amd_pmf_init_smart_pc(dev);
353         if (dev->smart_pc_enabled) {
354                 dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
355                 /* If Smart PC is enabled, no need to check for other features */
356                 return;
357         }
358
359         if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
360                 amd_pmf_init_auto_mode(dev);
361                 dev_dbg(dev->dev, "Auto Mode Init done\n");
362         } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
363                           is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
364                 ret = amd_pmf_init_cnqf(dev);
365                 if (ret)
366                         dev_warn(dev->dev, "CnQF Init failed\n");
367         }
368 }
369
370 static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
371 {
372         if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
373             is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
374                 power_supply_unreg_notifier(&dev->pwr_src_notifier);
375                 amd_pmf_deinit_sps(dev);
376         }
377
378         if (dev->smart_pc_enabled) {
379                 amd_pmf_deinit_smart_pc(dev);
380         } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
381                 amd_pmf_deinit_auto_mode(dev);
382         } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
383                           is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
384                 amd_pmf_deinit_cnqf(dev);
385         }
386 }
387
388 static const struct acpi_device_id amd_pmf_acpi_ids[] = {
389         {"AMDI0100", 0x100},
390         {"AMDI0102", 0},
391         {"AMDI0103", 0},
392         {"AMDI0105", 0},
393         {"AMDI0107", 0},
394         { }
395 };
396 MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
397
398 static int amd_pmf_probe(struct platform_device *pdev)
399 {
400         const struct acpi_device_id *id;
401         struct amd_pmf_dev *dev;
402         struct pci_dev *rdev;
403         u32 base_addr_lo;
404         u32 base_addr_hi;
405         u64 base_addr;
406         u32 val;
407         int err;
408
409         id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
410         if (!id)
411                 return -ENODEV;
412
413         if (id->driver_data == 0x100 && !force_load)
414                 return -ENODEV;
415
416         dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
417         if (!dev)
418                 return -ENOMEM;
419
420         dev->dev = &pdev->dev;
421
422         rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
423         if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
424                 pci_dev_put(rdev);
425                 return -ENODEV;
426         }
427
428         dev->cpu_id = rdev->device;
429
430         err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
431         if (err) {
432                 dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
433                 pci_dev_put(rdev);
434                 return pcibios_err_to_errno(err);
435         }
436
437         base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
438
439         err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
440         if (err) {
441                 dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
442                 pci_dev_put(rdev);
443                 return pcibios_err_to_errno(err);
444         }
445
446         base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
447         pci_dev_put(rdev);
448         base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
449
450         dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
451                                     AMD_PMF_MAPPING_SIZE);
452         if (!dev->regbase)
453                 return -ENOMEM;
454
455         mutex_init(&dev->lock);
456         mutex_init(&dev->update_mutex);
457
458         amd_pmf_quirks_init(dev);
459         apmf_acpi_init(dev);
460         platform_set_drvdata(pdev, dev);
461         amd_pmf_dbgfs_register(dev);
462         amd_pmf_init_features(dev);
463         apmf_install_handler(dev);
464         if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
465                 amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_LOAD);
466
467         dev_info(dev->dev, "registered PMF device successfully\n");
468
469         return 0;
470 }
471
472 static void amd_pmf_remove(struct platform_device *pdev)
473 {
474         struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
475
476         amd_pmf_deinit_features(dev);
477         if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
478                 amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD);
479         apmf_acpi_deinit(dev);
480         amd_pmf_dbgfs_unregister(dev);
481         mutex_destroy(&dev->lock);
482         mutex_destroy(&dev->update_mutex);
483         kfree(dev->buf);
484 }
485
486 static const struct attribute_group *amd_pmf_driver_groups[] = {
487         &cnqf_feature_attribute_group,
488         NULL,
489 };
490
491 static struct platform_driver amd_pmf_driver = {
492         .driver = {
493                 .name = "amd-pmf",
494                 .acpi_match_table = amd_pmf_acpi_ids,
495                 .dev_groups = amd_pmf_driver_groups,
496                 .pm = pm_sleep_ptr(&amd_pmf_pm),
497         },
498         .probe = amd_pmf_probe,
499         .remove_new = amd_pmf_remove,
500 };
501 module_platform_driver(amd_pmf_driver);
502
503 MODULE_LICENSE("GPL");
504 MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
505 MODULE_SOFTDEP("pre: amdtee");