Merge tag 'ceph-for-4.9-rc2' of git://github.com/ceph/ceph-client
[linux-2.6-block.git] / drivers / base / firmware_class.c
CommitLineData
1da177e4
LT
1/*
2 * firmware_class.c - Multi purpose firmware loading support
3 *
87d37a4f 4 * Copyright (c) 2003 Manuel Estrada Sainz
1da177e4
LT
5 *
6 * Please see Documentation/firmware_class/ for more information.
7 *
8 */
9
c59ede7b 10#include <linux/capability.h>
1da177e4
LT
11#include <linux/device.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/timer.h>
15#include <linux/vmalloc.h>
16#include <linux/interrupt.h>
17#include <linux/bitops.h>
cad1e55d 18#include <linux/mutex.h>
a36cf844 19#include <linux/workqueue.h>
6e03a201 20#include <linux/highmem.h>
1da177e4 21#include <linux/firmware.h>
5a0e3ad6 22#include <linux/slab.h>
a36cf844 23#include <linux/sched.h>
abb139e7 24#include <linux/file.h>
1f2b7959 25#include <linux/list.h>
e40ba6d5 26#include <linux/fs.h>
37276a51
ML
27#include <linux/async.h>
28#include <linux/pm.h>
07646d9c 29#include <linux/suspend.h>
ac39b3ea 30#include <linux/syscore_ops.h>
fe304143 31#include <linux/reboot.h>
6593d924 32#include <linux/security.h>
37276a51 33
abb139e7
LT
34#include <generated/utsrelease.h>
35
37276a51 36#include "base.h"
1da177e4 37
87d37a4f 38MODULE_AUTHOR("Manuel Estrada Sainz");
1da177e4
LT
39MODULE_DESCRIPTION("Multi purpose firmware loading support");
40MODULE_LICENSE("GPL");
41
bcb9bd18
DT
42/* Builtin firmware support */
43
44#ifdef CONFIG_FW_LOADER
45
46extern struct builtin_fw __start_builtin_fw[];
47extern struct builtin_fw __end_builtin_fw[];
48
a098ecd2
SB
49static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
50 void *buf, size_t size)
bcb9bd18
DT
51{
52 struct builtin_fw *b_fw;
53
54 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
55 if (strcmp(name, b_fw->name) == 0) {
56 fw->size = b_fw->size;
57 fw->data = b_fw->data;
a098ecd2
SB
58
59 if (buf && fw->size <= size)
60 memcpy(buf, fw->data, fw->size);
bcb9bd18
DT
61 return true;
62 }
63 }
64
65 return false;
66}
67
68static bool fw_is_builtin_firmware(const struct firmware *fw)
69{
70 struct builtin_fw *b_fw;
71
72 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
73 if (fw->data == b_fw->data)
74 return true;
75
76 return false;
77}
78
79#else /* Module case - no builtin firmware support */
80
a098ecd2
SB
81static inline bool fw_get_builtin_firmware(struct firmware *fw,
82 const char *name, void *buf,
83 size_t size)
bcb9bd18
DT
84{
85 return false;
86}
87
88static inline bool fw_is_builtin_firmware(const struct firmware *fw)
89{
90 return false;
91}
92#endif
93
1da177e4
LT
94enum {
95 FW_STATUS_LOADING,
96 FW_STATUS_DONE,
97 FW_STATUS_ABORT,
1da177e4
LT
98};
99
2f65168d 100static int loading_timeout = 60; /* In seconds */
1da177e4 101
9b78c1da
RW
102static inline long firmware_loading_timeout(void)
103{
68ff2a00 104 return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
9b78c1da
RW
105}
106
14c4bae7
TI
107/* firmware behavior options */
108#define FW_OPT_UEVENT (1U << 0)
109#define FW_OPT_NOWAIT (1U << 1)
68aeeaaa 110#ifdef CONFIG_FW_LOADER_USER_HELPER
5a1379e8 111#define FW_OPT_USERHELPER (1U << 2)
68aeeaaa 112#else
5a1379e8
TI
113#define FW_OPT_USERHELPER 0
114#endif
115#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
116#define FW_OPT_FALLBACK FW_OPT_USERHELPER
117#else
118#define FW_OPT_FALLBACK 0
68aeeaaa 119#endif
c868edf4 120#define FW_OPT_NO_WARN (1U << 3)
0e742e92 121#define FW_OPT_NOCACHE (1U << 4)
14c4bae7 122
1f2b7959
ML
123struct firmware_cache {
124 /* firmware_buf instance will be added into the below list */
125 spinlock_t lock;
126 struct list_head head;
cfe016b1 127 int state;
37276a51 128
cfe016b1 129#ifdef CONFIG_PM_SLEEP
37276a51
ML
130 /*
131 * Names of firmware images which have been cached successfully
132 * will be added into the below list so that device uncache
133 * helper can trace which firmware images have been cached
134 * before.
135 */
136 spinlock_t name_lock;
137 struct list_head fw_names;
138
37276a51 139 struct delayed_work work;
07646d9c
ML
140
141 struct notifier_block pm_notify;
cfe016b1 142#endif
1f2b7959 143};
1da177e4 144
1244691c 145struct firmware_buf {
1f2b7959
ML
146 struct kref ref;
147 struct list_head list;
1da177e4 148 struct completion completion;
1f2b7959 149 struct firmware_cache *fwc;
1da177e4 150 unsigned long status;
65710cb6
ML
151 void *data;
152 size_t size;
a098ecd2 153 size_t allocated_size;
7b1269f7 154#ifdef CONFIG_FW_LOADER_USER_HELPER
cd7239fa 155 bool is_paged_buf;
af5bc11e 156 bool need_uevent;
6e03a201
DW
157 struct page **pages;
158 int nr_pages;
159 int page_array_size;
fe304143 160 struct list_head pending_list;
7b1269f7 161#endif
e0fd9b1d 162 const char *fw_id;
1244691c
ML
163};
164
37276a51
ML
165struct fw_cache_entry {
166 struct list_head list;
e0fd9b1d 167 const char *name;
37276a51
ML
168};
169
f531f05a
ML
170struct fw_name_devm {
171 unsigned long magic;
e0fd9b1d 172 const char *name;
f531f05a
ML
173};
174
1f2b7959
ML
175#define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
176
ac39b3ea
ML
177#define FW_LOADER_NO_CACHE 0
178#define FW_LOADER_START_CACHE 1
179
180static int fw_cache_piggyback_on_request(const char *name);
181
1f2b7959
ML
182/* fw_lock could be moved to 'struct firmware_priv' but since it is just
183 * guarding for corner cases a global lock should be OK */
184static DEFINE_MUTEX(fw_lock);
185
186static struct firmware_cache fw_cache;
187
188static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
a098ecd2
SB
189 struct firmware_cache *fwc,
190 void *dbuf, size_t size)
1f2b7959
ML
191{
192 struct firmware_buf *buf;
193
e0fd9b1d 194 buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
1f2b7959 195 if (!buf)
e0fd9b1d
LR
196 return NULL;
197
198 buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC);
199 if (!buf->fw_id) {
200 kfree(buf);
201 return NULL;
202 }
1f2b7959
ML
203
204 kref_init(&buf->ref);
1f2b7959 205 buf->fwc = fwc;
a098ecd2
SB
206 buf->data = dbuf;
207 buf->allocated_size = size;
1f2b7959 208 init_completion(&buf->completion);
fe304143
TI
209#ifdef CONFIG_FW_LOADER_USER_HELPER
210 INIT_LIST_HEAD(&buf->pending_list);
211#endif
1f2b7959
ML
212
213 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
214
215 return buf;
216}
217
2887b395
ML
218static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
219{
220 struct firmware_buf *tmp;
221 struct firmware_cache *fwc = &fw_cache;
222
223 list_for_each_entry(tmp, &fwc->head, list)
224 if (!strcmp(tmp->fw_id, fw_name))
225 return tmp;
226 return NULL;
227}
228
1f2b7959
ML
229static int fw_lookup_and_allocate_buf(const char *fw_name,
230 struct firmware_cache *fwc,
a098ecd2
SB
231 struct firmware_buf **buf, void *dbuf,
232 size_t size)
1f2b7959
ML
233{
234 struct firmware_buf *tmp;
235
236 spin_lock(&fwc->lock);
2887b395
ML
237 tmp = __fw_lookup_buf(fw_name);
238 if (tmp) {
239 kref_get(&tmp->ref);
240 spin_unlock(&fwc->lock);
241 *buf = tmp;
242 return 1;
243 }
a098ecd2 244 tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
1f2b7959
ML
245 if (tmp)
246 list_add(&tmp->list, &fwc->head);
247 spin_unlock(&fwc->lock);
248
249 *buf = tmp;
250
251 return tmp ? 0 : -ENOMEM;
252}
253
254static void __fw_free_buf(struct kref *ref)
98233b21 255 __releases(&fwc->lock)
1f2b7959
ML
256{
257 struct firmware_buf *buf = to_fwbuf(ref);
258 struct firmware_cache *fwc = buf->fwc;
1f2b7959
ML
259
260 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
261 __func__, buf->fw_id, buf, buf->data,
262 (unsigned int)buf->size);
263
1f2b7959
ML
264 list_del(&buf->list);
265 spin_unlock(&fwc->lock);
266
7b1269f7 267#ifdef CONFIG_FW_LOADER_USER_HELPER
cd7239fa 268 if (buf->is_paged_buf) {
7b1269f7 269 int i;
746058f4
ML
270 vunmap(buf->data);
271 for (i = 0; i < buf->nr_pages; i++)
272 __free_page(buf->pages[i]);
10a3fbf1 273 vfree(buf->pages);
746058f4 274 } else
7b1269f7 275#endif
a098ecd2 276 if (!buf->allocated_size)
746058f4 277 vfree(buf->data);
e0fd9b1d 278 kfree_const(buf->fw_id);
1f2b7959
ML
279 kfree(buf);
280}
281
282static void fw_free_buf(struct firmware_buf *buf)
283{
bd9eb7fb
CL
284 struct firmware_cache *fwc = buf->fwc;
285 spin_lock(&fwc->lock);
286 if (!kref_put(&buf->ref, __fw_free_buf))
287 spin_unlock(&fwc->lock);
1f2b7959
ML
288}
289
746058f4 290/* direct firmware loading support */
27602842
ML
291static char fw_path_para[256];
292static const char * const fw_path[] = {
293 fw_path_para,
746058f4
ML
294 "/lib/firmware/updates/" UTS_RELEASE,
295 "/lib/firmware/updates",
296 "/lib/firmware/" UTS_RELEASE,
297 "/lib/firmware"
298};
299
27602842
ML
300/*
301 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
302 * from kernel command line because firmware_class is generally built in
303 * kernel instead of module.
304 */
305module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
306MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
307
5275d194
LR
308static void fw_finish_direct_load(struct device *device,
309 struct firmware_buf *buf)
310{
311 mutex_lock(&fw_lock);
312 set_bit(FW_STATUS_DONE, &buf->status);
313 complete_all(&buf->completion);
314 mutex_unlock(&fw_lock);
315}
316
a098ecd2
SB
317static int
318fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
746058f4 319{
e40ba6d5 320 loff_t size;
1ba4de17 321 int i, len;
3e358ac2 322 int rc = -ENOENT;
f5727b05 323 char *path;
a098ecd2
SB
324 enum kernel_read_file_id id = READING_FIRMWARE;
325 size_t msize = INT_MAX;
326
327 /* Already populated data member means we're loading into a buffer */
328 if (buf->data) {
329 id = READING_FIRMWARE_PREALLOC_BUFFER;
330 msize = buf->allocated_size;
331 }
f5727b05
LR
332
333 path = __getname();
334 if (!path)
335 return -ENOMEM;
746058f4
ML
336
337 for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
27602842
ML
338 /* skip the unset customized path */
339 if (!fw_path[i][0])
340 continue;
341
1ba4de17
LR
342 len = snprintf(path, PATH_MAX, "%s/%s",
343 fw_path[i], buf->fw_id);
344 if (len >= PATH_MAX) {
345 rc = -ENAMETOOLONG;
346 break;
347 }
746058f4 348
e40ba6d5 349 buf->size = 0;
a098ecd2
SB
350 rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
351 id);
4b2530d8 352 if (rc) {
8e516aa5
LR
353 if (rc == -ENOENT)
354 dev_dbg(device, "loading %s failed with error %d\n",
355 path, rc);
356 else
357 dev_warn(device, "loading %s failed with error %d\n",
358 path, rc);
4b2530d8
KC
359 continue;
360 }
e40ba6d5
MZ
361 dev_dbg(device, "direct-loading %s\n", buf->fw_id);
362 buf->size = size;
5275d194 363 fw_finish_direct_load(device, buf);
4b2530d8 364 break;
4e0c92d0 365 }
4b2530d8 366 __putname(path);
4e0c92d0 367
3e358ac2 368 return rc;
746058f4
ML
369}
370
7b1269f7
TI
371/* firmware holds the ownership of pages */
372static void firmware_free_data(const struct firmware *fw)
373{
374 /* Loaded directly? */
375 if (!fw->priv) {
376 vfree(fw->data);
377 return;
378 }
379 fw_free_buf(fw->priv);
380}
381
cd7239fa
TI
382/* store the pages buffer info firmware from buf */
383static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
384{
385 fw->priv = buf;
386#ifdef CONFIG_FW_LOADER_USER_HELPER
387 fw->pages = buf->pages;
388#endif
389 fw->size = buf->size;
390 fw->data = buf->data;
391
392 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
393 __func__, buf->fw_id, buf, buf->data,
394 (unsigned int)buf->size);
395}
396
397#ifdef CONFIG_PM_SLEEP
398static void fw_name_devm_release(struct device *dev, void *res)
399{
400 struct fw_name_devm *fwn = res;
401
402 if (fwn->magic == (unsigned long)&fw_cache)
403 pr_debug("%s: fw_name-%s devm-%p released\n",
404 __func__, fwn->name, res);
e0fd9b1d 405 kfree_const(fwn->name);
cd7239fa
TI
406}
407
408static int fw_devm_match(struct device *dev, void *res,
409 void *match_data)
410{
411 struct fw_name_devm *fwn = res;
412
413 return (fwn->magic == (unsigned long)&fw_cache) &&
414 !strcmp(fwn->name, match_data);
415}
416
417static struct fw_name_devm *fw_find_devm_name(struct device *dev,
418 const char *name)
419{
420 struct fw_name_devm *fwn;
421
422 fwn = devres_find(dev, fw_name_devm_release,
423 fw_devm_match, (void *)name);
424 return fwn;
425}
426
427/* add firmware name into devres list */
428static int fw_add_devm_name(struct device *dev, const char *name)
429{
430 struct fw_name_devm *fwn;
431
432 fwn = fw_find_devm_name(dev, name);
433 if (fwn)
434 return 1;
435
e0fd9b1d
LR
436 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
437 GFP_KERNEL);
cd7239fa
TI
438 if (!fwn)
439 return -ENOMEM;
e0fd9b1d
LR
440 fwn->name = kstrdup_const(name, GFP_KERNEL);
441 if (!fwn->name) {
a885de67 442 devres_free(fwn);
e0fd9b1d
LR
443 return -ENOMEM;
444 }
cd7239fa
TI
445
446 fwn->magic = (unsigned long)&fw_cache;
cd7239fa
TI
447 devres_add(dev, fwn);
448
449 return 0;
450}
451#else
452static int fw_add_devm_name(struct device *dev, const char *name)
453{
454 return 0;
455}
456#endif
457
458
459/*
460 * user-mode helper code
461 */
7b1269f7 462#ifdef CONFIG_FW_LOADER_USER_HELPER
cd7239fa 463struct firmware_priv {
cd7239fa
TI
464 bool nowait;
465 struct device dev;
466 struct firmware_buf *buf;
467 struct firmware *fw;
468};
7b1269f7 469
f8a4bd34
DT
470static struct firmware_priv *to_firmware_priv(struct device *dev)
471{
472 return container_of(dev, struct firmware_priv, dev);
473}
474
7068cb07 475static void __fw_load_abort(struct firmware_buf *buf)
1da177e4 476{
87597936
ML
477 /*
478 * There is a small window in which user can write to 'loading'
479 * between loading done and disappearance of 'loading'
480 */
481 if (test_bit(FW_STATUS_DONE, &buf->status))
482 return;
483
fe304143 484 list_del_init(&buf->pending_list);
1244691c 485 set_bit(FW_STATUS_ABORT, &buf->status);
1f2b7959 486 complete_all(&buf->completion);
1da177e4
LT
487}
488
7068cb07
GKH
489static void fw_load_abort(struct firmware_priv *fw_priv)
490{
491 struct firmware_buf *buf = fw_priv->buf;
492
493 __fw_load_abort(buf);
87597936
ML
494
495 /* avoid user action after loading abort */
496 fw_priv->buf = NULL;
1da177e4
LT
497}
498
807be03c
TI
499#define is_fw_load_aborted(buf) \
500 test_bit(FW_STATUS_ABORT, &(buf)->status)
501
fe304143
TI
502static LIST_HEAD(pending_fw_head);
503
504/* reboot notifier for avoid deadlock with usermode_lock */
505static int fw_shutdown_notify(struct notifier_block *unused1,
506 unsigned long unused2, void *unused3)
507{
508 mutex_lock(&fw_lock);
509 while (!list_empty(&pending_fw_head))
7068cb07 510 __fw_load_abort(list_first_entry(&pending_fw_head,
fe304143
TI
511 struct firmware_buf,
512 pending_list));
513 mutex_unlock(&fw_lock);
514 return NOTIFY_DONE;
515}
516
517static struct notifier_block fw_shutdown_nb = {
518 .notifier_call = fw_shutdown_notify,
519};
520
14adbe53
GKH
521static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
522 char *buf)
1da177e4
LT
523{
524 return sprintf(buf, "%d\n", loading_timeout);
525}
526
527/**
eb8e3179
RD
528 * firmware_timeout_store - set number of seconds to wait for firmware
529 * @class: device class pointer
e59817bf 530 * @attr: device attribute pointer
eb8e3179
RD
531 * @buf: buffer to scan for timeout value
532 * @count: number of bytes in @buf
533 *
1da177e4 534 * Sets the number of seconds to wait for the firmware. Once
eb8e3179 535 * this expires an error will be returned to the driver and no
1da177e4
LT
536 * firmware will be provided.
537 *
eb8e3179 538 * Note: zero means 'wait forever'.
1da177e4 539 **/
14adbe53
GKH
540static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
541 const char *buf, size_t count)
1da177e4
LT
542{
543 loading_timeout = simple_strtol(buf, NULL, 10);
b92eac01
SG
544 if (loading_timeout < 0)
545 loading_timeout = 0;
f8a4bd34 546
1da177e4
LT
547 return count;
548}
549
673fae90 550static struct class_attribute firmware_class_attrs[] = {
14adbe53 551 __ATTR_RW(timeout),
673fae90
DT
552 __ATTR_NULL
553};
1da177e4 554
1244691c
ML
555static void fw_dev_release(struct device *dev)
556{
557 struct firmware_priv *fw_priv = to_firmware_priv(dev);
65710cb6 558
673fae90 559 kfree(fw_priv);
673fae90 560}
1da177e4 561
6f957724 562static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
1da177e4 563{
1244691c 564 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
1da177e4 565 return -ENOMEM;
7eff2e7a 566 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
6897089c 567 return -ENOMEM;
e9045f91
JB
568 if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
569 return -ENOMEM;
1da177e4
LT
570
571 return 0;
572}
573
6f957724
LT
574static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
575{
576 struct firmware_priv *fw_priv = to_firmware_priv(dev);
577 int err = 0;
578
579 mutex_lock(&fw_lock);
580 if (fw_priv->buf)
581 err = do_firmware_uevent(fw_priv, env);
582 mutex_unlock(&fw_lock);
583 return err;
584}
585
1b81d663
AB
586static struct class firmware_class = {
587 .name = "firmware",
673fae90 588 .class_attrs = firmware_class_attrs,
e55c8790
GKH
589 .dev_uevent = firmware_uevent,
590 .dev_release = fw_dev_release,
1b81d663
AB
591};
592
e55c8790
GKH
593static ssize_t firmware_loading_show(struct device *dev,
594 struct device_attribute *attr, char *buf)
1da177e4 595{
f8a4bd34 596 struct firmware_priv *fw_priv = to_firmware_priv(dev);
87597936
ML
597 int loading = 0;
598
599 mutex_lock(&fw_lock);
600 if (fw_priv->buf)
601 loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
602 mutex_unlock(&fw_lock);
f8a4bd34 603
1da177e4
LT
604 return sprintf(buf, "%d\n", loading);
605}
606
6e03a201
DW
607/* Some architectures don't have PAGE_KERNEL_RO */
608#ifndef PAGE_KERNEL_RO
609#define PAGE_KERNEL_RO PAGE_KERNEL
610#endif
253c9240
ML
611
612/* one pages buffer should be mapped/unmapped only once */
613static int fw_map_pages_buf(struct firmware_buf *buf)
614{
cd7239fa 615 if (!buf->is_paged_buf)
746058f4
ML
616 return 0;
617
daa3d67f 618 vunmap(buf->data);
253c9240
ML
619 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
620 if (!buf->data)
621 return -ENOMEM;
622 return 0;
623}
624
1da177e4 625/**
eb8e3179 626 * firmware_loading_store - set value in the 'loading' control file
e55c8790 627 * @dev: device pointer
af9997e4 628 * @attr: device attribute pointer
eb8e3179
RD
629 * @buf: buffer to scan for loading control value
630 * @count: number of bytes in @buf
631 *
1da177e4
LT
632 * The relevant values are:
633 *
634 * 1: Start a load, discarding any previous partial load.
eb8e3179 635 * 0: Conclude the load and hand the data to the driver code.
1da177e4
LT
636 * -1: Conclude the load with an error and discard any written data.
637 **/
e55c8790
GKH
638static ssize_t firmware_loading_store(struct device *dev,
639 struct device_attribute *attr,
640 const char *buf, size_t count)
1da177e4 641{
f8a4bd34 642 struct firmware_priv *fw_priv = to_firmware_priv(dev);
87597936 643 struct firmware_buf *fw_buf;
6593d924 644 ssize_t written = count;
1da177e4 645 int loading = simple_strtol(buf, NULL, 10);
6e03a201 646 int i;
1da177e4 647
eea915bb 648 mutex_lock(&fw_lock);
87597936 649 fw_buf = fw_priv->buf;
1244691c 650 if (!fw_buf)
eea915bb
NH
651 goto out;
652
1da177e4
LT
653 switch (loading) {
654 case 1:
65710cb6 655 /* discarding any previous partial load */
1244691c
ML
656 if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
657 for (i = 0; i < fw_buf->nr_pages; i++)
658 __free_page(fw_buf->pages[i]);
10a3fbf1 659 vfree(fw_buf->pages);
1244691c
ML
660 fw_buf->pages = NULL;
661 fw_buf->page_array_size = 0;
662 fw_buf->nr_pages = 0;
663 set_bit(FW_STATUS_LOADING, &fw_buf->status);
28eefa75 664 }
1da177e4
LT
665 break;
666 case 0:
1244691c 667 if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
6593d924
KC
668 int rc;
669
1244691c
ML
670 set_bit(FW_STATUS_DONE, &fw_buf->status);
671 clear_bit(FW_STATUS_LOADING, &fw_buf->status);
253c9240
ML
672
673 /*
674 * Several loading requests may be pending on
675 * one same firmware buf, so let all requests
676 * see the mapped 'buf->data' once the loading
677 * is completed.
678 * */
6593d924
KC
679 rc = fw_map_pages_buf(fw_buf);
680 if (rc)
2b1278cb 681 dev_err(dev, "%s: map pages failed\n",
682 __func__);
6593d924 683 else
e40ba6d5
MZ
684 rc = security_kernel_post_read_file(NULL,
685 fw_buf->data, fw_buf->size,
686 READING_FIRMWARE);
6593d924
KC
687
688 /*
689 * Same logic as fw_load_abort, only the DONE bit
690 * is ignored and we set ABORT only on failure.
691 */
fe304143 692 list_del_init(&fw_buf->pending_list);
6593d924
KC
693 if (rc) {
694 set_bit(FW_STATUS_ABORT, &fw_buf->status);
695 written = rc;
696 }
1f2b7959 697 complete_all(&fw_buf->completion);
1da177e4
LT
698 break;
699 }
700 /* fallthrough */
701 default:
266a813c 702 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
1da177e4
LT
703 /* fallthrough */
704 case -1:
705 fw_load_abort(fw_priv);
706 break;
707 }
eea915bb
NH
708out:
709 mutex_unlock(&fw_lock);
6593d924 710 return written;
1da177e4
LT
711}
712
e55c8790 713static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
1da177e4 714
a098ecd2
SB
715static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
716 loff_t offset, size_t count, bool read)
717{
718 if (read)
719 memcpy(buffer, buf->data + offset, count);
720 else
721 memcpy(buf->data + offset, buffer, count);
722}
723
9ccf9811
SB
724static void firmware_rw(struct firmware_buf *buf, char *buffer,
725 loff_t offset, size_t count, bool read)
726{
727 while (count) {
728 void *page_data;
729 int page_nr = offset >> PAGE_SHIFT;
730 int page_ofs = offset & (PAGE_SIZE-1);
731 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
732
733 page_data = kmap(buf->pages[page_nr]);
734
735 if (read)
736 memcpy(buffer, page_data + page_ofs, page_cnt);
737 else
738 memcpy(page_data + page_ofs, buffer, page_cnt);
739
740 kunmap(buf->pages[page_nr]);
741 buffer += page_cnt;
742 offset += page_cnt;
743 count -= page_cnt;
744 }
745}
746
f8a4bd34
DT
747static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
748 struct bin_attribute *bin_attr,
749 char *buffer, loff_t offset, size_t count)
1da177e4 750{
b0d1f807 751 struct device *dev = kobj_to_dev(kobj);
f8a4bd34 752 struct firmware_priv *fw_priv = to_firmware_priv(dev);
1244691c 753 struct firmware_buf *buf;
f37e6617 754 ssize_t ret_count;
1da177e4 755
cad1e55d 756 mutex_lock(&fw_lock);
1244691c
ML
757 buf = fw_priv->buf;
758 if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
1da177e4
LT
759 ret_count = -ENODEV;
760 goto out;
761 }
1244691c 762 if (offset > buf->size) {
308975fa
JS
763 ret_count = 0;
764 goto out;
765 }
1244691c
ML
766 if (count > buf->size - offset)
767 count = buf->size - offset;
6e03a201
DW
768
769 ret_count = count;
770
a098ecd2
SB
771 if (buf->data)
772 firmware_rw_buf(buf, buffer, offset, count, true);
773 else
774 firmware_rw(buf, buffer, offset, count, true);
6e03a201 775
1da177e4 776out:
cad1e55d 777 mutex_unlock(&fw_lock);
1da177e4
LT
778 return ret_count;
779}
eb8e3179 780
f8a4bd34 781static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
1da177e4 782{
1244691c 783 struct firmware_buf *buf = fw_priv->buf;
a76040d8 784 int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
6e03a201
DW
785
786 /* If the array of pages is too small, grow it... */
1244691c 787 if (buf->page_array_size < pages_needed) {
6e03a201 788 int new_array_size = max(pages_needed,
1244691c 789 buf->page_array_size * 2);
6e03a201
DW
790 struct page **new_pages;
791
10a3fbf1 792 new_pages = vmalloc(new_array_size * sizeof(void *));
6e03a201
DW
793 if (!new_pages) {
794 fw_load_abort(fw_priv);
795 return -ENOMEM;
796 }
1244691c
ML
797 memcpy(new_pages, buf->pages,
798 buf->page_array_size * sizeof(void *));
799 memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
800 (new_array_size - buf->page_array_size));
10a3fbf1 801 vfree(buf->pages);
1244691c
ML
802 buf->pages = new_pages;
803 buf->page_array_size = new_array_size;
6e03a201 804 }
1da177e4 805
1244691c
ML
806 while (buf->nr_pages < pages_needed) {
807 buf->pages[buf->nr_pages] =
6e03a201 808 alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
1da177e4 809
1244691c 810 if (!buf->pages[buf->nr_pages]) {
6e03a201
DW
811 fw_load_abort(fw_priv);
812 return -ENOMEM;
813 }
1244691c 814 buf->nr_pages++;
1da177e4 815 }
1da177e4
LT
816 return 0;
817}
818
819/**
eb8e3179 820 * firmware_data_write - write method for firmware
2c3c8bea 821 * @filp: open sysfs file
e55c8790 822 * @kobj: kobject for the device
42e61f4a 823 * @bin_attr: bin_attr structure
eb8e3179
RD
824 * @buffer: buffer being written
825 * @offset: buffer offset for write in total data store area
826 * @count: buffer size
1da177e4 827 *
eb8e3179 828 * Data written to the 'data' attribute will be later handed to
1da177e4
LT
829 * the driver as a firmware image.
830 **/
f8a4bd34
DT
831static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
832 struct bin_attribute *bin_attr,
833 char *buffer, loff_t offset, size_t count)
1da177e4 834{
b0d1f807 835 struct device *dev = kobj_to_dev(kobj);
f8a4bd34 836 struct firmware_priv *fw_priv = to_firmware_priv(dev);
1244691c 837 struct firmware_buf *buf;
1da177e4
LT
838 ssize_t retval;
839
840 if (!capable(CAP_SYS_RAWIO))
841 return -EPERM;
b92eac01 842
cad1e55d 843 mutex_lock(&fw_lock);
1244691c
ML
844 buf = fw_priv->buf;
845 if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
1da177e4
LT
846 retval = -ENODEV;
847 goto out;
848 }
65710cb6 849
a098ecd2
SB
850 if (buf->data) {
851 if (offset + count > buf->allocated_size) {
852 retval = -ENOMEM;
853 goto out;
854 }
855 firmware_rw_buf(buf, buffer, offset, count, false);
856 retval = count;
857 } else {
858 retval = fw_realloc_buffer(fw_priv, offset + count);
859 if (retval)
860 goto out;
1da177e4 861
a098ecd2
SB
862 retval = count;
863 firmware_rw(buf, buffer, offset, count, false);
864 }
6e03a201 865
9ccf9811 866 buf->size = max_t(size_t, offset + count, buf->size);
1da177e4 867out:
cad1e55d 868 mutex_unlock(&fw_lock);
1da177e4
LT
869 return retval;
870}
eb8e3179 871
0983ca2d
DT
872static struct bin_attribute firmware_attr_data = {
873 .attr = { .name = "data", .mode = 0644 },
1da177e4
LT
874 .size = 0,
875 .read = firmware_data_read,
876 .write = firmware_data_write,
877};
878
46239902
TI
879static struct attribute *fw_dev_attrs[] = {
880 &dev_attr_loading.attr,
881 NULL
882};
883
884static struct bin_attribute *fw_dev_bin_attrs[] = {
885 &firmware_attr_data,
886 NULL
887};
888
889static const struct attribute_group fw_dev_attr_group = {
890 .attrs = fw_dev_attrs,
891 .bin_attrs = fw_dev_bin_attrs,
892};
893
894static const struct attribute_group *fw_dev_attr_groups[] = {
895 &fw_dev_attr_group,
896 NULL
897};
898
f8a4bd34 899static struct firmware_priv *
dddb5549 900fw_create_instance(struct firmware *firmware, const char *fw_name,
14c4bae7 901 struct device *device, unsigned int opt_flags)
1da177e4 902{
f8a4bd34
DT
903 struct firmware_priv *fw_priv;
904 struct device *f_dev;
1da177e4 905
1244691c 906 fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
f8a4bd34 907 if (!fw_priv) {
1244691c
ML
908 fw_priv = ERR_PTR(-ENOMEM);
909 goto exit;
910 }
911
14c4bae7 912 fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
1f2b7959 913 fw_priv->fw = firmware;
f8a4bd34
DT
914 f_dev = &fw_priv->dev;
915
916 device_initialize(f_dev);
99c2aa72 917 dev_set_name(f_dev, "%s", fw_name);
e55c8790
GKH
918 f_dev->parent = device;
919 f_dev->class = &firmware_class;
46239902 920 f_dev->groups = fw_dev_attr_groups;
1244691c 921exit:
f8a4bd34 922 return fw_priv;
1da177e4
LT
923}
924
cd7239fa 925/* load a firmware via user helper */
14c4bae7
TI
926static int _request_firmware_load(struct firmware_priv *fw_priv,
927 unsigned int opt_flags, long timeout)
1f2b7959 928{
cd7239fa
TI
929 int retval = 0;
930 struct device *f_dev = &fw_priv->dev;
931 struct firmware_buf *buf = fw_priv->buf;
1f2b7959 932
cd7239fa 933 /* fall back on userspace loading */
a098ecd2
SB
934 if (!buf->data)
935 buf->is_paged_buf = true;
1f2b7959 936
cd7239fa 937 dev_set_uevent_suppress(f_dev, true);
f531f05a 938
cd7239fa
TI
939 retval = device_add(f_dev);
940 if (retval) {
941 dev_err(f_dev, "%s: device_register failed\n", __func__);
942 goto err_put_dev;
943 }
f531f05a 944
1eeeef15
MB
945 mutex_lock(&fw_lock);
946 list_add(&buf->pending_list, &pending_fw_head);
947 mutex_unlock(&fw_lock);
948
14c4bae7 949 if (opt_flags & FW_OPT_UEVENT) {
af5bc11e 950 buf->need_uevent = true;
cd7239fa
TI
951 dev_set_uevent_suppress(f_dev, false);
952 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
cd7239fa 953 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
68ff2a00
ML
954 } else {
955 timeout = MAX_JIFFY_OFFSET;
cd7239fa 956 }
f531f05a 957
68ff2a00
ML
958 retval = wait_for_completion_interruptible_timeout(&buf->completion,
959 timeout);
960 if (retval == -ERESTARTSYS || !retval) {
0cb64249
ML
961 mutex_lock(&fw_lock);
962 fw_load_abort(fw_priv);
963 mutex_unlock(&fw_lock);
ef518cc8
ZD
964 } else if (retval > 0) {
965 retval = 0;
0cb64249 966 }
f531f05a 967
0542ad88
SK
968 if (is_fw_load_aborted(buf))
969 retval = -EAGAIN;
a098ecd2 970 else if (buf->is_paged_buf && !buf->data)
2b1278cb 971 retval = -ENOMEM;
f531f05a 972
cd7239fa
TI
973 device_del(f_dev);
974err_put_dev:
975 put_device(f_dev);
976 return retval;
f531f05a 977}
cd7239fa
TI
978
979static int fw_load_from_user_helper(struct firmware *firmware,
980 const char *name, struct device *device,
14c4bae7 981 unsigned int opt_flags, long timeout)
cfe016b1 982{
cd7239fa
TI
983 struct firmware_priv *fw_priv;
984
14c4bae7 985 fw_priv = fw_create_instance(firmware, name, device, opt_flags);
cd7239fa
TI
986 if (IS_ERR(fw_priv))
987 return PTR_ERR(fw_priv);
988
989 fw_priv->buf = firmware->priv;
14c4bae7 990 return _request_firmware_load(fw_priv, opt_flags, timeout);
cfe016b1 991}
ddf1f064 992
5b7cb7a1 993#ifdef CONFIG_PM_SLEEP
ddf1f064
ML
994/* kill pending requests without uevent to avoid blocking suspend */
995static void kill_requests_without_uevent(void)
996{
997 struct firmware_buf *buf;
998 struct firmware_buf *next;
999
1000 mutex_lock(&fw_lock);
1001 list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
1002 if (!buf->need_uevent)
7068cb07 1003 __fw_load_abort(buf);
ddf1f064
ML
1004 }
1005 mutex_unlock(&fw_lock);
1006}
5b7cb7a1 1007#endif
ddf1f064 1008
cd7239fa
TI
1009#else /* CONFIG_FW_LOADER_USER_HELPER */
1010static inline int
1011fw_load_from_user_helper(struct firmware *firmware, const char *name,
14c4bae7 1012 struct device *device, unsigned int opt_flags,
cd7239fa
TI
1013 long timeout)
1014{
1015 return -ENOENT;
1016}
807be03c
TI
1017
1018/* No abort during direct loading */
1019#define is_fw_load_aborted(buf) false
1020
5b7cb7a1 1021#ifdef CONFIG_PM_SLEEP
ddf1f064 1022static inline void kill_requests_without_uevent(void) { }
5b7cb7a1 1023#endif
ddf1f064 1024
cd7239fa
TI
1025#endif /* CONFIG_FW_LOADER_USER_HELPER */
1026
f531f05a 1027
4e0c92d0
TI
1028/* wait until the shared firmware_buf becomes ready (or error) */
1029static int sync_cached_firmware_buf(struct firmware_buf *buf)
1f2b7959 1030{
4e0c92d0
TI
1031 int ret = 0;
1032
1033 mutex_lock(&fw_lock);
1034 while (!test_bit(FW_STATUS_DONE, &buf->status)) {
807be03c 1035 if (is_fw_load_aborted(buf)) {
4e0c92d0
TI
1036 ret = -ENOENT;
1037 break;
1038 }
1039 mutex_unlock(&fw_lock);
000deba7 1040 ret = wait_for_completion_interruptible(&buf->completion);
4e0c92d0
TI
1041 mutex_lock(&fw_lock);
1042 }
1043 mutex_unlock(&fw_lock);
1044 return ret;
1f2b7959
ML
1045}
1046
4e0c92d0
TI
1047/* prepare firmware and firmware_buf structs;
1048 * return 0 if a firmware is already assigned, 1 if need to load one,
1049 * or a negative error code
1050 */
1051static int
1052_request_firmware_prepare(struct firmware **firmware_p, const char *name,
a098ecd2 1053 struct device *device, void *dbuf, size_t size)
1da177e4 1054{
1da177e4 1055 struct firmware *firmware;
1f2b7959
ML
1056 struct firmware_buf *buf;
1057 int ret;
1da177e4 1058
4aed0644 1059 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
1da177e4 1060 if (!firmware) {
266a813c
BH
1061 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
1062 __func__);
4e0c92d0 1063 return -ENOMEM;
1da177e4 1064 }
1da177e4 1065
a098ecd2 1066 if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
ed04630b 1067 dev_dbg(device, "using built-in %s\n", name);
4e0c92d0 1068 return 0; /* assigned */
5658c769
DW
1069 }
1070
a098ecd2 1071 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
4e0c92d0
TI
1072
1073 /*
1074 * bind with 'buf' now to avoid warning in failure path
1075 * of requesting firmware.
1076 */
1077 firmware->priv = buf;
1078
1079 if (ret > 0) {
1080 ret = sync_cached_firmware_buf(buf);
1081 if (!ret) {
1082 fw_set_page_data(buf, firmware);
1083 return 0; /* assigned */
1084 }
dddb5549 1085 }
811fa400 1086
4e0c92d0
TI
1087 if (ret < 0)
1088 return ret;
1089 return 1; /* need to load */
1090}
1091
e771d1aa 1092static int assign_firmware_buf(struct firmware *fw, struct device *device,
14c4bae7 1093 unsigned int opt_flags)
4e0c92d0
TI
1094{
1095 struct firmware_buf *buf = fw->priv;
1096
1f2b7959 1097 mutex_lock(&fw_lock);
807be03c 1098 if (!buf->size || is_fw_load_aborted(buf)) {
4e0c92d0
TI
1099 mutex_unlock(&fw_lock);
1100 return -ENOENT;
1f2b7959 1101 }
65710cb6 1102
4e0c92d0
TI
1103 /*
1104 * add firmware name into devres list so that we can auto cache
1105 * and uncache firmware for device.
1106 *
1107 * device may has been deleted already, but the problem
1108 * should be fixed in devres or driver core.
1109 */
14c4bae7 1110 /* don't cache firmware handled without uevent */
0e742e92
VM
1111 if (device && (opt_flags & FW_OPT_UEVENT) &&
1112 !(opt_flags & FW_OPT_NOCACHE))
4e0c92d0
TI
1113 fw_add_devm_name(device, buf->fw_id);
1114
1115 /*
1116 * After caching firmware image is started, let it piggyback
1117 * on request firmware.
1118 */
0e742e92
VM
1119 if (!(opt_flags & FW_OPT_NOCACHE) &&
1120 buf->fwc->state == FW_LOADER_START_CACHE) {
4e0c92d0
TI
1121 if (fw_cache_piggyback_on_request(buf->fw_id))
1122 kref_get(&buf->ref);
1123 }
1124
1125 /* pass the pages buffer to driver at the last minute */
1126 fw_set_page_data(buf, fw);
1f2b7959 1127 mutex_unlock(&fw_lock);
4e0c92d0 1128 return 0;
65710cb6
ML
1129}
1130
4e0c92d0
TI
1131/* called from request_firmware() and request_firmware_work_func() */
1132static int
1133_request_firmware(const struct firmware **firmware_p, const char *name,
a098ecd2
SB
1134 struct device *device, void *buf, size_t size,
1135 unsigned int opt_flags)
4e0c92d0 1136{
715780ae 1137 struct firmware *fw = NULL;
4e0c92d0
TI
1138 long timeout;
1139 int ret;
1140
1141 if (!firmware_p)
1142 return -EINVAL;
1143
715780ae
BN
1144 if (!name || name[0] == '\0') {
1145 ret = -EINVAL;
1146 goto out;
1147 }
471b095d 1148
a098ecd2 1149 ret = _request_firmware_prepare(&fw, name, device, buf, size);
4e0c92d0
TI
1150 if (ret <= 0) /* error or already assigned */
1151 goto out;
1152
1153 ret = 0;
1154 timeout = firmware_loading_timeout();
14c4bae7 1155 if (opt_flags & FW_OPT_NOWAIT) {
4e0c92d0
TI
1156 timeout = usermodehelper_read_lock_wait(timeout);
1157 if (!timeout) {
1158 dev_dbg(device, "firmware: %s loading timed out\n",
1159 name);
1160 ret = -EBUSY;
1161 goto out;
1162 }
1163 } else {
1164 ret = usermodehelper_read_trylock();
1165 if (WARN_ON(ret)) {
1166 dev_err(device, "firmware: %s will not be loaded\n",
1167 name);
1168 goto out;
1169 }
1170 }
1171
3e358ac2
NH
1172 ret = fw_get_filesystem_firmware(device, fw->priv);
1173 if (ret) {
c868edf4 1174 if (!(opt_flags & FW_OPT_NO_WARN))
bba3a87e 1175 dev_warn(device,
c868edf4
LR
1176 "Direct firmware load for %s failed with error %d\n",
1177 name, ret);
1178 if (opt_flags & FW_OPT_USERHELPER) {
bba3a87e
TI
1179 dev_warn(device, "Falling back to user helper\n");
1180 ret = fw_load_from_user_helper(fw, name, device,
14c4bae7 1181 opt_flags, timeout);
bba3a87e 1182 }
3e358ac2 1183 }
e771d1aa 1184
4e0c92d0 1185 if (!ret)
14c4bae7 1186 ret = assign_firmware_buf(fw, device, opt_flags);
4e0c92d0
TI
1187
1188 usermodehelper_read_unlock();
1189
1190 out:
1191 if (ret < 0) {
1192 release_firmware(fw);
1193 fw = NULL;
1194 }
1195
1196 *firmware_p = fw;
1197 return ret;
1198}
1199
6e3eaab0 1200/**
312c004d 1201 * request_firmware: - send firmware request and wait for it
eb8e3179
RD
1202 * @firmware_p: pointer to firmware image
1203 * @name: name of firmware file
1204 * @device: device for which firmware is being loaded
1205 *
1206 * @firmware_p will be used to return a firmware image by the name
6e3eaab0
AS
1207 * of @name for device @device.
1208 *
1209 * Should be called from user context where sleeping is allowed.
1210 *
312c004d 1211 * @name will be used as $FIRMWARE in the uevent environment and
6e3eaab0
AS
1212 * should be distinctive enough not to be confused with any other
1213 * firmware image for this or any other device.
0cfc1e1e
ML
1214 *
1215 * Caller must hold the reference count of @device.
6a927857
ML
1216 *
1217 * The function can be called safely inside device's suspend and
1218 * resume callback.
6e3eaab0
AS
1219 **/
1220int
1221request_firmware(const struct firmware **firmware_p, const char *name,
ea31003c 1222 struct device *device)
6e3eaab0 1223{
d6c8aa39
ML
1224 int ret;
1225
1226 /* Need to pin this module until return */
1227 __module_get(THIS_MODULE);
a098ecd2 1228 ret = _request_firmware(firmware_p, name, device, NULL, 0,
14c4bae7 1229 FW_OPT_UEVENT | FW_OPT_FALLBACK);
d6c8aa39
ML
1230 module_put(THIS_MODULE);
1231 return ret;
6e3eaab0 1232}
f494513f 1233EXPORT_SYMBOL(request_firmware);
6e3eaab0 1234
bba3a87e 1235/**
3c1556b2 1236 * request_firmware_direct: - load firmware directly without usermode helper
bba3a87e
TI
1237 * @firmware_p: pointer to firmware image
1238 * @name: name of firmware file
1239 * @device: device for which firmware is being loaded
1240 *
1241 * This function works pretty much like request_firmware(), but this doesn't
1242 * fall back to usermode helper even if the firmware couldn't be loaded
1243 * directly from fs. Hence it's useful for loading optional firmwares, which
1244 * aren't always present, without extra long timeouts of udev.
1245 **/
1246int request_firmware_direct(const struct firmware **firmware_p,
1247 const char *name, struct device *device)
1248{
1249 int ret;
ea31003c 1250
bba3a87e 1251 __module_get(THIS_MODULE);
a098ecd2 1252 ret = _request_firmware(firmware_p, name, device, NULL, 0,
c868edf4 1253 FW_OPT_UEVENT | FW_OPT_NO_WARN);
bba3a87e
TI
1254 module_put(THIS_MODULE);
1255 return ret;
1256}
1257EXPORT_SYMBOL_GPL(request_firmware_direct);
bba3a87e 1258
a098ecd2
SB
1259/**
1260 * request_firmware_into_buf - load firmware into a previously allocated buffer
1261 * @firmware_p: pointer to firmware image
1262 * @name: name of firmware file
1263 * @device: device for which firmware is being loaded and DMA region allocated
1264 * @buf: address of buffer to load firmware into
1265 * @size: size of buffer
1266 *
1267 * This function works pretty much like request_firmware(), but it doesn't
1268 * allocate a buffer to hold the firmware data. Instead, the firmware
1269 * is loaded directly into the buffer pointed to by @buf and the @firmware_p
1270 * data member is pointed at @buf.
1271 *
1272 * This function doesn't cache firmware either.
1273 */
1274int
1275request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
1276 struct device *device, void *buf, size_t size)
1277{
1278 int ret;
1279
1280 __module_get(THIS_MODULE);
1281 ret = _request_firmware(firmware_p, name, device, buf, size,
1282 FW_OPT_UEVENT | FW_OPT_FALLBACK |
1283 FW_OPT_NOCACHE);
1284 module_put(THIS_MODULE);
1285 return ret;
1286}
1287EXPORT_SYMBOL(request_firmware_into_buf);
1288
1da177e4
LT
1289/**
1290 * release_firmware: - release the resource associated with a firmware image
eb8e3179 1291 * @fw: firmware resource to release
1da177e4 1292 **/
bcb9bd18 1293void release_firmware(const struct firmware *fw)
1da177e4
LT
1294{
1295 if (fw) {
bcb9bd18
DT
1296 if (!fw_is_builtin_firmware(fw))
1297 firmware_free_data(fw);
1da177e4
LT
1298 kfree(fw);
1299 }
1300}
f494513f 1301EXPORT_SYMBOL(release_firmware);
1da177e4 1302
1da177e4
LT
1303/* Async support */
1304struct firmware_work {
1305 struct work_struct work;
1306 struct module *module;
1307 const char *name;
1308 struct device *device;
1309 void *context;
1310 void (*cont)(const struct firmware *fw, void *context);
14c4bae7 1311 unsigned int opt_flags;
1da177e4
LT
1312};
1313
a36cf844 1314static void request_firmware_work_func(struct work_struct *work)
1da177e4 1315{
a36cf844 1316 struct firmware_work *fw_work;
1da177e4 1317 const struct firmware *fw;
f8a4bd34 1318
a36cf844 1319 fw_work = container_of(work, struct firmware_work, work);
811fa400 1320
a098ecd2 1321 _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
14c4bae7 1322 fw_work->opt_flags);
9ebfbd45 1323 fw_work->cont(fw, fw_work->context);
4e0c92d0 1324 put_device(fw_work->device); /* taken in request_firmware_nowait() */
9ebfbd45 1325
1da177e4 1326 module_put(fw_work->module);
f9692b26 1327 kfree_const(fw_work->name);
1da177e4 1328 kfree(fw_work);
1da177e4
LT
1329}
1330
1331/**
3c31f07a 1332 * request_firmware_nowait - asynchronous version of request_firmware
eb8e3179 1333 * @module: module requesting the firmware
312c004d 1334 * @uevent: sends uevent to copy the firmware image if this flag
eb8e3179
RD
1335 * is non-zero else the firmware copy must be done manually.
1336 * @name: name of firmware file
1337 * @device: device for which firmware is being loaded
9ebfbd45 1338 * @gfp: allocation flags
eb8e3179
RD
1339 * @context: will be passed over to @cont, and
1340 * @fw may be %NULL if firmware request fails.
1341 * @cont: function will be called asynchronously when the firmware
1342 * request is over.
1da177e4 1343 *
0cfc1e1e
ML
1344 * Caller must hold the reference count of @device.
1345 *
6f21a62a
ML
1346 * Asynchronous variant of request_firmware() for user contexts:
1347 * - sleep for as small periods as possible since it may
1348 * increase kernel boot time of built-in device drivers
1349 * requesting firmware in their ->probe() methods, if
1350 * @gfp is GFP_KERNEL.
1351 *
1352 * - can't sleep at all if @gfp is GFP_ATOMIC.
1da177e4
LT
1353 **/
1354int
1355request_firmware_nowait(
072fc8f0 1356 struct module *module, bool uevent,
9ebfbd45 1357 const char *name, struct device *device, gfp_t gfp, void *context,
1da177e4
LT
1358 void (*cont)(const struct firmware *fw, void *context))
1359{
f8a4bd34 1360 struct firmware_work *fw_work;
1da177e4 1361
ea31003c 1362 fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1da177e4
LT
1363 if (!fw_work)
1364 return -ENOMEM;
f8a4bd34
DT
1365
1366 fw_work->module = module;
f9692b26 1367 fw_work->name = kstrdup_const(name, gfp);
303cda0e
LR
1368 if (!fw_work->name) {
1369 kfree(fw_work);
f9692b26 1370 return -ENOMEM;
303cda0e 1371 }
f8a4bd34
DT
1372 fw_work->device = device;
1373 fw_work->context = context;
1374 fw_work->cont = cont;
14c4bae7 1375 fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
5a1379e8 1376 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
f8a4bd34 1377
1da177e4 1378 if (!try_module_get(module)) {
f9692b26 1379 kfree_const(fw_work->name);
1da177e4
LT
1380 kfree(fw_work);
1381 return -EFAULT;
1382 }
1383
0cfc1e1e 1384 get_device(fw_work->device);
a36cf844
SB
1385 INIT_WORK(&fw_work->work, request_firmware_work_func);
1386 schedule_work(&fw_work->work);
1da177e4
LT
1387 return 0;
1388}
f494513f 1389EXPORT_SYMBOL(request_firmware_nowait);
1da177e4 1390
90f89081
ML
1391#ifdef CONFIG_PM_SLEEP
1392static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1393
2887b395
ML
1394/**
1395 * cache_firmware - cache one firmware image in kernel memory space
1396 * @fw_name: the firmware image name
1397 *
1398 * Cache firmware in kernel memory so that drivers can use it when
1399 * system isn't ready for them to request firmware image from userspace.
1400 * Once it returns successfully, driver can use request_firmware or its
1401 * nowait version to get the cached firmware without any interacting
1402 * with userspace
1403 *
1404 * Return 0 if the firmware image has been cached successfully
1405 * Return !0 otherwise
1406 *
1407 */
93232e46 1408static int cache_firmware(const char *fw_name)
2887b395
ML
1409{
1410 int ret;
1411 const struct firmware *fw;
1412
1413 pr_debug("%s: %s\n", __func__, fw_name);
1414
1415 ret = request_firmware(&fw, fw_name, NULL);
1416 if (!ret)
1417 kfree(fw);
1418
1419 pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1420
1421 return ret;
1422}
1423
6a2c1234
ML
1424static struct firmware_buf *fw_lookup_buf(const char *fw_name)
1425{
1426 struct firmware_buf *tmp;
1427 struct firmware_cache *fwc = &fw_cache;
1428
1429 spin_lock(&fwc->lock);
1430 tmp = __fw_lookup_buf(fw_name);
1431 spin_unlock(&fwc->lock);
1432
1433 return tmp;
1434}
1435
2887b395
ML
1436/**
1437 * uncache_firmware - remove one cached firmware image
1438 * @fw_name: the firmware image name
1439 *
1440 * Uncache one firmware image which has been cached successfully
1441 * before.
1442 *
1443 * Return 0 if the firmware cache has been removed successfully
1444 * Return !0 otherwise
1445 *
1446 */
93232e46 1447static int uncache_firmware(const char *fw_name)
2887b395
ML
1448{
1449 struct firmware_buf *buf;
1450 struct firmware fw;
1451
1452 pr_debug("%s: %s\n", __func__, fw_name);
1453
a098ecd2 1454 if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
2887b395
ML
1455 return 0;
1456
1457 buf = fw_lookup_buf(fw_name);
1458 if (buf) {
1459 fw_free_buf(buf);
1460 return 0;
1461 }
1462
1463 return -EINVAL;
1464}
1465
37276a51
ML
1466static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1467{
1468 struct fw_cache_entry *fce;
1469
e0fd9b1d 1470 fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
37276a51
ML
1471 if (!fce)
1472 goto exit;
1473
e0fd9b1d
LR
1474 fce->name = kstrdup_const(name, GFP_ATOMIC);
1475 if (!fce->name) {
1476 kfree(fce);
1477 fce = NULL;
1478 goto exit;
1479 }
37276a51
ML
1480exit:
1481 return fce;
1482}
1483
373304fe 1484static int __fw_entry_found(const char *name)
ac39b3ea
ML
1485{
1486 struct firmware_cache *fwc = &fw_cache;
1487 struct fw_cache_entry *fce;
ac39b3ea 1488
ac39b3ea
ML
1489 list_for_each_entry(fce, &fwc->fw_names, list) {
1490 if (!strcmp(fce->name, name))
373304fe 1491 return 1;
ac39b3ea 1492 }
373304fe
ML
1493 return 0;
1494}
1495
1496static int fw_cache_piggyback_on_request(const char *name)
1497{
1498 struct firmware_cache *fwc = &fw_cache;
1499 struct fw_cache_entry *fce;
1500 int ret = 0;
1501
1502 spin_lock(&fwc->name_lock);
1503 if (__fw_entry_found(name))
1504 goto found;
ac39b3ea
ML
1505
1506 fce = alloc_fw_cache_entry(name);
1507 if (fce) {
1508 ret = 1;
1509 list_add(&fce->list, &fwc->fw_names);
1510 pr_debug("%s: fw: %s\n", __func__, name);
1511 }
1512found:
1513 spin_unlock(&fwc->name_lock);
1514 return ret;
1515}
1516
37276a51
ML
1517static void free_fw_cache_entry(struct fw_cache_entry *fce)
1518{
e0fd9b1d 1519 kfree_const(fce->name);
37276a51
ML
1520 kfree(fce);
1521}
1522
1523static void __async_dev_cache_fw_image(void *fw_entry,
1524 async_cookie_t cookie)
1525{
1526 struct fw_cache_entry *fce = fw_entry;
1527 struct firmware_cache *fwc = &fw_cache;
1528 int ret;
1529
1530 ret = cache_firmware(fce->name);
ac39b3ea
ML
1531 if (ret) {
1532 spin_lock(&fwc->name_lock);
1533 list_del(&fce->list);
1534 spin_unlock(&fwc->name_lock);
37276a51 1535
ac39b3ea
ML
1536 free_fw_cache_entry(fce);
1537 }
37276a51
ML
1538}
1539
1540/* called with dev->devres_lock held */
1541static void dev_create_fw_entry(struct device *dev, void *res,
1542 void *data)
1543{
1544 struct fw_name_devm *fwn = res;
1545 const char *fw_name = fwn->name;
1546 struct list_head *head = data;
1547 struct fw_cache_entry *fce;
1548
1549 fce = alloc_fw_cache_entry(fw_name);
1550 if (fce)
1551 list_add(&fce->list, head);
1552}
1553
1554static int devm_name_match(struct device *dev, void *res,
1555 void *match_data)
1556{
1557 struct fw_name_devm *fwn = res;
1558 return (fwn->magic == (unsigned long)match_data);
1559}
1560
ab6dd8e5 1561static void dev_cache_fw_image(struct device *dev, void *data)
37276a51
ML
1562{
1563 LIST_HEAD(todo);
1564 struct fw_cache_entry *fce;
1565 struct fw_cache_entry *fce_next;
1566 struct firmware_cache *fwc = &fw_cache;
1567
1568 devres_for_each_res(dev, fw_name_devm_release,
1569 devm_name_match, &fw_cache,
1570 dev_create_fw_entry, &todo);
1571
1572 list_for_each_entry_safe(fce, fce_next, &todo, list) {
1573 list_del(&fce->list);
1574
1575 spin_lock(&fwc->name_lock);
373304fe
ML
1576 /* only one cache entry for one firmware */
1577 if (!__fw_entry_found(fce->name)) {
373304fe
ML
1578 list_add(&fce->list, &fwc->fw_names);
1579 } else {
1580 free_fw_cache_entry(fce);
1581 fce = NULL;
1582 }
37276a51
ML
1583 spin_unlock(&fwc->name_lock);
1584
373304fe 1585 if (fce)
d28d3882
ML
1586 async_schedule_domain(__async_dev_cache_fw_image,
1587 (void *)fce,
1588 &fw_cache_domain);
37276a51
ML
1589 }
1590}
1591
1592static void __device_uncache_fw_images(void)
1593{
1594 struct firmware_cache *fwc = &fw_cache;
1595 struct fw_cache_entry *fce;
1596
1597 spin_lock(&fwc->name_lock);
1598 while (!list_empty(&fwc->fw_names)) {
1599 fce = list_entry(fwc->fw_names.next,
1600 struct fw_cache_entry, list);
1601 list_del(&fce->list);
1602 spin_unlock(&fwc->name_lock);
1603
1604 uncache_firmware(fce->name);
1605 free_fw_cache_entry(fce);
1606
1607 spin_lock(&fwc->name_lock);
1608 }
1609 spin_unlock(&fwc->name_lock);
1610}
1611
1612/**
1613 * device_cache_fw_images - cache devices' firmware
1614 *
1615 * If one device called request_firmware or its nowait version
1616 * successfully before, the firmware names are recored into the
1617 * device's devres link list, so device_cache_fw_images can call
1618 * cache_firmware() to cache these firmwares for the device,
1619 * then the device driver can load its firmwares easily at
1620 * time when system is not ready to complete loading firmware.
1621 */
1622static void device_cache_fw_images(void)
1623{
1624 struct firmware_cache *fwc = &fw_cache;
ffe53f6f 1625 int old_timeout;
37276a51
ML
1626 DEFINE_WAIT(wait);
1627
1628 pr_debug("%s\n", __func__);
1629
373304fe
ML
1630 /* cancel uncache work */
1631 cancel_delayed_work_sync(&fwc->work);
1632
ffe53f6f
ML
1633 /*
1634 * use small loading timeout for caching devices' firmware
1635 * because all these firmware images have been loaded
1636 * successfully at lease once, also system is ready for
1637 * completing firmware loading now. The maximum size of
1638 * firmware in current distributions is about 2M bytes,
1639 * so 10 secs should be enough.
1640 */
1641 old_timeout = loading_timeout;
1642 loading_timeout = 10;
1643
ac39b3ea
ML
1644 mutex_lock(&fw_lock);
1645 fwc->state = FW_LOADER_START_CACHE;
ab6dd8e5 1646 dpm_for_each_dev(NULL, dev_cache_fw_image);
ac39b3ea 1647 mutex_unlock(&fw_lock);
37276a51
ML
1648
1649 /* wait for completion of caching firmware for all devices */
d28d3882 1650 async_synchronize_full_domain(&fw_cache_domain);
ffe53f6f
ML
1651
1652 loading_timeout = old_timeout;
37276a51
ML
1653}
1654
1655/**
1656 * device_uncache_fw_images - uncache devices' firmware
1657 *
1658 * uncache all firmwares which have been cached successfully
1659 * by device_uncache_fw_images earlier
1660 */
1661static void device_uncache_fw_images(void)
1662{
1663 pr_debug("%s\n", __func__);
1664 __device_uncache_fw_images();
1665}
1666
1667static void device_uncache_fw_images_work(struct work_struct *work)
1668{
1669 device_uncache_fw_images();
1670}
1671
1672/**
1673 * device_uncache_fw_images_delay - uncache devices firmwares
1674 * @delay: number of milliseconds to delay uncache device firmwares
1675 *
1676 * uncache all devices's firmwares which has been cached successfully
1677 * by device_cache_fw_images after @delay milliseconds.
1678 */
1679static void device_uncache_fw_images_delay(unsigned long delay)
1680{
bce6618a
SD
1681 queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1682 msecs_to_jiffies(delay));
37276a51
ML
1683}
1684
07646d9c
ML
1685static int fw_pm_notify(struct notifier_block *notify_block,
1686 unsigned long mode, void *unused)
1687{
1688 switch (mode) {
1689 case PM_HIBERNATION_PREPARE:
1690 case PM_SUSPEND_PREPARE:
f8d5b9e9 1691 case PM_RESTORE_PREPARE:
af5bc11e 1692 kill_requests_without_uevent();
07646d9c
ML
1693 device_cache_fw_images();
1694 break;
1695
1696 case PM_POST_SUSPEND:
1697 case PM_POST_HIBERNATION:
1698 case PM_POST_RESTORE:
ac39b3ea
ML
1699 /*
1700 * In case that system sleep failed and syscore_suspend is
1701 * not called.
1702 */
1703 mutex_lock(&fw_lock);
1704 fw_cache.state = FW_LOADER_NO_CACHE;
1705 mutex_unlock(&fw_lock);
1706
07646d9c
ML
1707 device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1708 break;
1709 }
1710
1711 return 0;
1712}
07646d9c 1713
ac39b3ea
ML
1714/* stop caching firmware once syscore_suspend is reached */
1715static int fw_suspend(void)
1716{
1717 fw_cache.state = FW_LOADER_NO_CACHE;
1718 return 0;
1719}
1720
1721static struct syscore_ops fw_syscore_ops = {
1722 .suspend = fw_suspend,
1723};
cfe016b1
ML
1724#else
1725static int fw_cache_piggyback_on_request(const char *name)
1726{
1727 return 0;
1728}
1729#endif
ac39b3ea 1730
37276a51
ML
1731static void __init fw_cache_init(void)
1732{
1733 spin_lock_init(&fw_cache.lock);
1734 INIT_LIST_HEAD(&fw_cache.head);
cfe016b1 1735 fw_cache.state = FW_LOADER_NO_CACHE;
37276a51 1736
cfe016b1 1737#ifdef CONFIG_PM_SLEEP
37276a51
ML
1738 spin_lock_init(&fw_cache.name_lock);
1739 INIT_LIST_HEAD(&fw_cache.fw_names);
37276a51 1740
37276a51
ML
1741 INIT_DELAYED_WORK(&fw_cache.work,
1742 device_uncache_fw_images_work);
07646d9c
ML
1743
1744 fw_cache.pm_notify.notifier_call = fw_pm_notify;
1745 register_pm_notifier(&fw_cache.pm_notify);
ac39b3ea
ML
1746
1747 register_syscore_ops(&fw_syscore_ops);
cfe016b1 1748#endif
37276a51
ML
1749}
1750
673fae90 1751static int __init firmware_class_init(void)
1da177e4 1752{
1f2b7959 1753 fw_cache_init();
7b1269f7 1754#ifdef CONFIG_FW_LOADER_USER_HELPER
fe304143 1755 register_reboot_notifier(&fw_shutdown_nb);
673fae90 1756 return class_register(&firmware_class);
7b1269f7
TI
1757#else
1758 return 0;
1759#endif
1da177e4 1760}
673fae90
DT
1761
1762static void __exit firmware_class_exit(void)
1da177e4 1763{
cfe016b1 1764#ifdef CONFIG_PM_SLEEP
ac39b3ea 1765 unregister_syscore_ops(&fw_syscore_ops);
07646d9c 1766 unregister_pm_notifier(&fw_cache.pm_notify);
cfe016b1 1767#endif
7b1269f7 1768#ifdef CONFIG_FW_LOADER_USER_HELPER
fe304143 1769 unregister_reboot_notifier(&fw_shutdown_nb);
1da177e4 1770 class_unregister(&firmware_class);
7b1269f7 1771#endif
1da177e4
LT
1772}
1773
a30a6a2c 1774fs_initcall(firmware_class_init);
1da177e4 1775module_exit(firmware_class_exit);