| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* The industrial I/O core, trigger handling functions |
| 3 | * |
| 4 | * Copyright (c) 2008 Jonathan Cameron |
| 5 | */ |
| 6 | |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/idr.h> |
| 9 | #include <linux/err.h> |
| 10 | #include <linux/device.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/list.h> |
| 13 | #include <linux/slab.h> |
| 14 | |
| 15 | #include <linux/iio/iio.h> |
| 16 | #include <linux/iio/iio-opaque.h> |
| 17 | #include <linux/iio/trigger.h> |
| 18 | #include "iio_core.h" |
| 19 | #include "iio_core_trigger.h" |
| 20 | #include <linux/iio/trigger_consumer.h> |
| 21 | |
| 22 | /* RFC - Question of approach |
| 23 | * Make the common case (single sensor single trigger) |
| 24 | * simple by starting trigger capture from when first sensors |
| 25 | * is added. |
| 26 | * |
| 27 | * Complex simultaneous start requires use of 'hold' functionality |
| 28 | * of the trigger. (not implemented) |
| 29 | * |
| 30 | * Any other suggestions? |
| 31 | */ |
| 32 | |
| 33 | static DEFINE_IDA(iio_trigger_ida); |
| 34 | |
| 35 | /* Single list of all available triggers */ |
| 36 | static LIST_HEAD(iio_trigger_list); |
| 37 | static DEFINE_MUTEX(iio_trigger_list_lock); |
| 38 | |
| 39 | /** |
| 40 | * name_show() - retrieve useful identifying name |
| 41 | * @dev: device associated with the iio_trigger |
| 42 | * @attr: pointer to the device_attribute structure that is |
| 43 | * being processed |
| 44 | * @buf: buffer to print the name into |
| 45 | * |
| 46 | * Return: a negative number on failure or the number of written |
| 47 | * characters on success. |
| 48 | */ |
| 49 | static ssize_t name_show(struct device *dev, struct device_attribute *attr, |
| 50 | char *buf) |
| 51 | { |
| 52 | struct iio_trigger *trig = to_iio_trigger(dev); |
| 53 | |
| 54 | return sysfs_emit(buf, "%s\n", trig->name); |
| 55 | } |
| 56 | |
| 57 | static DEVICE_ATTR_RO(name); |
| 58 | |
| 59 | static struct attribute *iio_trig_dev_attrs[] = { |
| 60 | &dev_attr_name.attr, |
| 61 | NULL, |
| 62 | }; |
| 63 | ATTRIBUTE_GROUPS(iio_trig_dev); |
| 64 | |
| 65 | static struct iio_trigger *__iio_trigger_find_by_name(const char *name); |
| 66 | |
| 67 | int iio_trigger_register(struct iio_trigger *trig_info) |
| 68 | { |
| 69 | int ret; |
| 70 | |
| 71 | trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL); |
| 72 | if (trig_info->id < 0) |
| 73 | return trig_info->id; |
| 74 | |
| 75 | /* Set the name used for the sysfs directory etc */ |
| 76 | dev_set_name(&trig_info->dev, "trigger%d", trig_info->id); |
| 77 | |
| 78 | ret = device_add(&trig_info->dev); |
| 79 | if (ret) |
| 80 | goto error_unregister_id; |
| 81 | |
| 82 | /* Add to list of available triggers held by the IIO core */ |
| 83 | mutex_lock(&iio_trigger_list_lock); |
| 84 | if (__iio_trigger_find_by_name(trig_info->name)) { |
| 85 | pr_err("Duplicate trigger name '%s'\n", trig_info->name); |
| 86 | ret = -EEXIST; |
| 87 | goto error_device_del; |
| 88 | } |
| 89 | list_add_tail(&trig_info->list, &iio_trigger_list); |
| 90 | mutex_unlock(&iio_trigger_list_lock); |
| 91 | |
| 92 | return 0; |
| 93 | |
| 94 | error_device_del: |
| 95 | mutex_unlock(&iio_trigger_list_lock); |
| 96 | device_del(&trig_info->dev); |
| 97 | error_unregister_id: |
| 98 | ida_free(&iio_trigger_ida, trig_info->id); |
| 99 | return ret; |
| 100 | } |
| 101 | EXPORT_SYMBOL(iio_trigger_register); |
| 102 | |
| 103 | void iio_trigger_unregister(struct iio_trigger *trig_info) |
| 104 | { |
| 105 | mutex_lock(&iio_trigger_list_lock); |
| 106 | list_del(&trig_info->list); |
| 107 | mutex_unlock(&iio_trigger_list_lock); |
| 108 | |
| 109 | ida_free(&iio_trigger_ida, trig_info->id); |
| 110 | /* Possible issue in here */ |
| 111 | device_del(&trig_info->dev); |
| 112 | } |
| 113 | EXPORT_SYMBOL(iio_trigger_unregister); |
| 114 | |
| 115 | int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig) |
| 116 | { |
| 117 | struct iio_dev_opaque *iio_dev_opaque; |
| 118 | |
| 119 | if (!indio_dev || !trig) |
| 120 | return -EINVAL; |
| 121 | |
| 122 | iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
| 123 | mutex_lock(&iio_dev_opaque->mlock); |
| 124 | WARN_ON(iio_dev_opaque->trig_readonly); |
| 125 | |
| 126 | indio_dev->trig = iio_trigger_get(trig); |
| 127 | iio_dev_opaque->trig_readonly = true; |
| 128 | mutex_unlock(&iio_dev_opaque->mlock); |
| 129 | |
| 130 | return 0; |
| 131 | } |
| 132 | EXPORT_SYMBOL(iio_trigger_set_immutable); |
| 133 | |
| 134 | /* Search for trigger by name, assuming iio_trigger_list_lock held */ |
| 135 | static struct iio_trigger *__iio_trigger_find_by_name(const char *name) |
| 136 | { |
| 137 | struct iio_trigger *iter; |
| 138 | |
| 139 | list_for_each_entry(iter, &iio_trigger_list, list) |
| 140 | if (!strcmp(iter->name, name)) |
| 141 | return iter; |
| 142 | |
| 143 | return NULL; |
| 144 | } |
| 145 | |
| 146 | static struct iio_trigger *iio_trigger_acquire_by_name(const char *name) |
| 147 | { |
| 148 | struct iio_trigger *trig = NULL, *iter; |
| 149 | |
| 150 | mutex_lock(&iio_trigger_list_lock); |
| 151 | list_for_each_entry(iter, &iio_trigger_list, list) |
| 152 | if (sysfs_streq(iter->name, name)) { |
| 153 | trig = iter; |
| 154 | iio_trigger_get(trig); |
| 155 | break; |
| 156 | } |
| 157 | mutex_unlock(&iio_trigger_list_lock); |
| 158 | |
| 159 | return trig; |
| 160 | } |
| 161 | |
| 162 | static void iio_reenable_work_fn(struct work_struct *work) |
| 163 | { |
| 164 | struct iio_trigger *trig = container_of(work, struct iio_trigger, |
| 165 | reenable_work); |
| 166 | |
| 167 | /* |
| 168 | * This 'might' occur after the trigger state is set to disabled - |
| 169 | * in that case the driver should skip reenabling. |
| 170 | */ |
| 171 | trig->ops->reenable(trig); |
| 172 | } |
| 173 | |
| 174 | /* |
| 175 | * In general, reenable callbacks may need to sleep and this path is |
| 176 | * not performance sensitive, so just queue up a work item |
| 177 | * to reneable the trigger for us. |
| 178 | * |
| 179 | * Races that can cause this. |
| 180 | * 1) A handler occurs entirely in interrupt context so the counter |
| 181 | * the final decrement is still in this interrupt. |
| 182 | * 2) The trigger has been removed, but one last interrupt gets through. |
| 183 | * |
| 184 | * For (1) we must call reenable, but not in atomic context. |
| 185 | * For (2) it should be safe to call reenanble, if drivers never blindly |
| 186 | * reenable after state is off. |
| 187 | */ |
| 188 | static void iio_trigger_notify_done_atomic(struct iio_trigger *trig) |
| 189 | { |
| 190 | if (atomic_dec_and_test(&trig->use_count) && trig->ops && |
| 191 | trig->ops->reenable) |
| 192 | schedule_work(&trig->reenable_work); |
| 193 | } |
| 194 | |
| 195 | void iio_trigger_poll(struct iio_trigger *trig) |
| 196 | { |
| 197 | int i; |
| 198 | |
| 199 | if (!atomic_read(&trig->use_count)) { |
| 200 | atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 201 | |
| 202 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
| 203 | if (trig->subirqs[i].enabled) |
| 204 | generic_handle_irq(trig->subirq_base + i); |
| 205 | else |
| 206 | iio_trigger_notify_done_atomic(trig); |
| 207 | } |
| 208 | } |
| 209 | } |
| 210 | EXPORT_SYMBOL(iio_trigger_poll); |
| 211 | |
| 212 | irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private) |
| 213 | { |
| 214 | iio_trigger_poll(private); |
| 215 | return IRQ_HANDLED; |
| 216 | } |
| 217 | EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); |
| 218 | |
| 219 | void iio_trigger_poll_chained(struct iio_trigger *trig) |
| 220 | { |
| 221 | int i; |
| 222 | |
| 223 | if (!atomic_read(&trig->use_count)) { |
| 224 | atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 225 | |
| 226 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
| 227 | if (trig->subirqs[i].enabled) |
| 228 | handle_nested_irq(trig->subirq_base + i); |
| 229 | else |
| 230 | iio_trigger_notify_done(trig); |
| 231 | } |
| 232 | } |
| 233 | } |
| 234 | EXPORT_SYMBOL(iio_trigger_poll_chained); |
| 235 | |
| 236 | void iio_trigger_notify_done(struct iio_trigger *trig) |
| 237 | { |
| 238 | if (atomic_dec_and_test(&trig->use_count) && trig->ops && |
| 239 | trig->ops->reenable) |
| 240 | trig->ops->reenable(trig); |
| 241 | } |
| 242 | EXPORT_SYMBOL(iio_trigger_notify_done); |
| 243 | |
| 244 | /* Trigger Consumer related functions */ |
| 245 | static int iio_trigger_get_irq(struct iio_trigger *trig) |
| 246 | { |
| 247 | int ret; |
| 248 | |
| 249 | mutex_lock(&trig->pool_lock); |
| 250 | ret = bitmap_find_free_region(trig->pool, |
| 251 | CONFIG_IIO_CONSUMERS_PER_TRIGGER, |
| 252 | ilog2(1)); |
| 253 | mutex_unlock(&trig->pool_lock); |
| 254 | if (ret >= 0) |
| 255 | ret += trig->subirq_base; |
| 256 | |
| 257 | return ret; |
| 258 | } |
| 259 | |
| 260 | static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) |
| 261 | { |
| 262 | mutex_lock(&trig->pool_lock); |
| 263 | clear_bit(irq - trig->subirq_base, trig->pool); |
| 264 | mutex_unlock(&trig->pool_lock); |
| 265 | } |
| 266 | |
| 267 | /* Complexity in here. With certain triggers (datardy) an acknowledgement |
| 268 | * may be needed if the pollfuncs do not include the data read for the |
| 269 | * triggering device. |
| 270 | * This is not currently handled. Alternative of not enabling trigger unless |
| 271 | * the relevant function is in there may be the best option. |
| 272 | */ |
| 273 | /* Worth protecting against double additions? */ |
| 274 | int iio_trigger_attach_poll_func(struct iio_trigger *trig, |
| 275 | struct iio_poll_func *pf) |
| 276 | { |
| 277 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev); |
| 278 | bool notinuse = |
| 279 | bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 280 | int ret = 0; |
| 281 | |
| 282 | /* Prevent the module from being removed whilst attached to a trigger */ |
| 283 | __module_get(iio_dev_opaque->driver_module); |
| 284 | |
| 285 | /* Get irq number */ |
| 286 | pf->irq = iio_trigger_get_irq(trig); |
| 287 | if (pf->irq < 0) { |
| 288 | pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n", |
| 289 | trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 290 | goto out_put_module; |
| 291 | } |
| 292 | |
| 293 | /* Request irq */ |
| 294 | ret = request_threaded_irq(pf->irq, pf->h, pf->thread, |
| 295 | pf->type, pf->name, |
| 296 | pf); |
| 297 | if (ret < 0) |
| 298 | goto out_put_irq; |
| 299 | |
| 300 | /* Enable trigger in driver */ |
| 301 | if (trig->ops && trig->ops->set_trigger_state && notinuse) { |
| 302 | ret = trig->ops->set_trigger_state(trig, true); |
| 303 | if (ret < 0) |
| 304 | goto out_free_irq; |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * Check if we just registered to our own trigger: we determine that |
| 309 | * this is the case if the IIO device and the trigger device share the |
| 310 | * same parent device. |
| 311 | */ |
| 312 | if (pf->indio_dev->dev.parent == trig->dev.parent) |
| 313 | trig->attached_own_device = true; |
| 314 | |
| 315 | return ret; |
| 316 | |
| 317 | out_free_irq: |
| 318 | free_irq(pf->irq, pf); |
| 319 | out_put_irq: |
| 320 | iio_trigger_put_irq(trig, pf->irq); |
| 321 | out_put_module: |
| 322 | module_put(iio_dev_opaque->driver_module); |
| 323 | return ret; |
| 324 | } |
| 325 | |
| 326 | int iio_trigger_detach_poll_func(struct iio_trigger *trig, |
| 327 | struct iio_poll_func *pf) |
| 328 | { |
| 329 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev); |
| 330 | bool no_other_users = |
| 331 | bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1; |
| 332 | int ret = 0; |
| 333 | |
| 334 | if (trig->ops && trig->ops->set_trigger_state && no_other_users) { |
| 335 | ret = trig->ops->set_trigger_state(trig, false); |
| 336 | if (ret) |
| 337 | return ret; |
| 338 | } |
| 339 | if (pf->indio_dev->dev.parent == trig->dev.parent) |
| 340 | trig->attached_own_device = false; |
| 341 | iio_trigger_put_irq(trig, pf->irq); |
| 342 | free_irq(pf->irq, pf); |
| 343 | module_put(iio_dev_opaque->driver_module); |
| 344 | |
| 345 | return ret; |
| 346 | } |
| 347 | |
| 348 | irqreturn_t iio_pollfunc_store_time(int irq, void *p) |
| 349 | { |
| 350 | struct iio_poll_func *pf = p; |
| 351 | |
| 352 | pf->timestamp = iio_get_time_ns(pf->indio_dev); |
| 353 | return IRQ_WAKE_THREAD; |
| 354 | } |
| 355 | EXPORT_SYMBOL(iio_pollfunc_store_time); |
| 356 | |
| 357 | struct iio_poll_func |
| 358 | *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), |
| 359 | irqreturn_t (*thread)(int irq, void *p), |
| 360 | int type, |
| 361 | struct iio_dev *indio_dev, |
| 362 | const char *fmt, |
| 363 | ...) |
| 364 | { |
| 365 | va_list vargs; |
| 366 | struct iio_poll_func *pf; |
| 367 | |
| 368 | pf = kmalloc(sizeof(*pf), GFP_KERNEL); |
| 369 | if (!pf) |
| 370 | return NULL; |
| 371 | va_start(vargs, fmt); |
| 372 | pf->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
| 373 | va_end(vargs); |
| 374 | if (pf->name == NULL) { |
| 375 | kfree(pf); |
| 376 | return NULL; |
| 377 | } |
| 378 | pf->h = h; |
| 379 | pf->thread = thread; |
| 380 | pf->type = type; |
| 381 | pf->indio_dev = indio_dev; |
| 382 | |
| 383 | return pf; |
| 384 | } |
| 385 | EXPORT_SYMBOL_GPL(iio_alloc_pollfunc); |
| 386 | |
| 387 | void iio_dealloc_pollfunc(struct iio_poll_func *pf) |
| 388 | { |
| 389 | kfree(pf->name); |
| 390 | kfree(pf); |
| 391 | } |
| 392 | EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc); |
| 393 | |
| 394 | /** |
| 395 | * current_trigger_show() - trigger consumer sysfs query current trigger |
| 396 | * @dev: device associated with an industrial I/O device |
| 397 | * @attr: pointer to the device_attribute structure that |
| 398 | * is being processed |
| 399 | * @buf: buffer where the current trigger name will be printed into |
| 400 | * |
| 401 | * For trigger consumers the current_trigger interface allows the trigger |
| 402 | * used by the device to be queried. |
| 403 | * |
| 404 | * Return: a negative number on failure, the number of characters written |
| 405 | * on success or 0 if no trigger is available |
| 406 | */ |
| 407 | static ssize_t current_trigger_show(struct device *dev, |
| 408 | struct device_attribute *attr, char *buf) |
| 409 | { |
| 410 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
| 411 | |
| 412 | if (indio_dev->trig) |
| 413 | return sysfs_emit(buf, "%s\n", indio_dev->trig->name); |
| 414 | return 0; |
| 415 | } |
| 416 | |
| 417 | /** |
| 418 | * current_trigger_store() - trigger consumer sysfs set current trigger |
| 419 | * @dev: device associated with an industrial I/O device |
| 420 | * @attr: device attribute that is being processed |
| 421 | * @buf: string buffer that holds the name of the trigger |
| 422 | * @len: length of the trigger name held by buf |
| 423 | * |
| 424 | * For trigger consumers the current_trigger interface allows the trigger |
| 425 | * used for this device to be specified at run time based on the trigger's |
| 426 | * name. |
| 427 | * |
| 428 | * Return: negative error code on failure or length of the buffer |
| 429 | * on success |
| 430 | */ |
| 431 | static ssize_t current_trigger_store(struct device *dev, |
| 432 | struct device_attribute *attr, |
| 433 | const char *buf, size_t len) |
| 434 | { |
| 435 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
| 436 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
| 437 | struct iio_trigger *oldtrig = indio_dev->trig; |
| 438 | struct iio_trigger *trig; |
| 439 | int ret; |
| 440 | |
| 441 | mutex_lock(&iio_dev_opaque->mlock); |
| 442 | if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { |
| 443 | mutex_unlock(&iio_dev_opaque->mlock); |
| 444 | return -EBUSY; |
| 445 | } |
| 446 | if (iio_dev_opaque->trig_readonly) { |
| 447 | mutex_unlock(&iio_dev_opaque->mlock); |
| 448 | return -EPERM; |
| 449 | } |
| 450 | mutex_unlock(&iio_dev_opaque->mlock); |
| 451 | |
| 452 | trig = iio_trigger_acquire_by_name(buf); |
| 453 | if (oldtrig == trig) { |
| 454 | ret = len; |
| 455 | goto out_trigger_put; |
| 456 | } |
| 457 | |
| 458 | if (trig && indio_dev->info->validate_trigger) { |
| 459 | ret = indio_dev->info->validate_trigger(indio_dev, trig); |
| 460 | if (ret) |
| 461 | goto out_trigger_put; |
| 462 | } |
| 463 | |
| 464 | if (trig && trig->ops && trig->ops->validate_device) { |
| 465 | ret = trig->ops->validate_device(trig, indio_dev); |
| 466 | if (ret) |
| 467 | goto out_trigger_put; |
| 468 | } |
| 469 | |
| 470 | indio_dev->trig = trig; |
| 471 | |
| 472 | if (oldtrig) { |
| 473 | if (indio_dev->modes & INDIO_EVENT_TRIGGERED) |
| 474 | iio_trigger_detach_poll_func(oldtrig, |
| 475 | indio_dev->pollfunc_event); |
| 476 | iio_trigger_put(oldtrig); |
| 477 | } |
| 478 | if (indio_dev->trig) { |
| 479 | if (indio_dev->modes & INDIO_EVENT_TRIGGERED) |
| 480 | iio_trigger_attach_poll_func(indio_dev->trig, |
| 481 | indio_dev->pollfunc_event); |
| 482 | } |
| 483 | |
| 484 | return len; |
| 485 | |
| 486 | out_trigger_put: |
| 487 | if (trig) |
| 488 | iio_trigger_put(trig); |
| 489 | return ret; |
| 490 | } |
| 491 | |
| 492 | static DEVICE_ATTR_RW(current_trigger); |
| 493 | |
| 494 | static struct attribute *iio_trigger_consumer_attrs[] = { |
| 495 | &dev_attr_current_trigger.attr, |
| 496 | NULL, |
| 497 | }; |
| 498 | |
| 499 | static const struct attribute_group iio_trigger_consumer_attr_group = { |
| 500 | .name = "trigger", |
| 501 | .attrs = iio_trigger_consumer_attrs, |
| 502 | }; |
| 503 | |
| 504 | static void iio_trig_release(struct device *device) |
| 505 | { |
| 506 | struct iio_trigger *trig = to_iio_trigger(device); |
| 507 | int i; |
| 508 | |
| 509 | if (trig->subirq_base) { |
| 510 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
| 511 | irq_modify_status(trig->subirq_base + i, |
| 512 | IRQ_NOAUTOEN, |
| 513 | IRQ_NOREQUEST | IRQ_NOPROBE); |
| 514 | irq_set_chip(trig->subirq_base + i, |
| 515 | NULL); |
| 516 | irq_set_handler(trig->subirq_base + i, |
| 517 | NULL); |
| 518 | } |
| 519 | |
| 520 | irq_free_descs(trig->subirq_base, |
| 521 | CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 522 | } |
| 523 | kfree(trig->name); |
| 524 | kfree(trig); |
| 525 | } |
| 526 | |
| 527 | static const struct device_type iio_trig_type = { |
| 528 | .release = iio_trig_release, |
| 529 | .groups = iio_trig_dev_groups, |
| 530 | }; |
| 531 | |
| 532 | static void iio_trig_subirqmask(struct irq_data *d) |
| 533 | { |
| 534 | struct irq_chip *chip = irq_data_get_irq_chip(d); |
| 535 | struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip); |
| 536 | |
| 537 | trig->subirqs[d->irq - trig->subirq_base].enabled = false; |
| 538 | } |
| 539 | |
| 540 | static void iio_trig_subirqunmask(struct irq_data *d) |
| 541 | { |
| 542 | struct irq_chip *chip = irq_data_get_irq_chip(d); |
| 543 | struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip); |
| 544 | |
| 545 | trig->subirqs[d->irq - trig->subirq_base].enabled = true; |
| 546 | } |
| 547 | |
| 548 | static __printf(3, 0) |
| 549 | struct iio_trigger *viio_trigger_alloc(struct device *parent, |
| 550 | struct module *this_mod, |
| 551 | const char *fmt, |
| 552 | va_list vargs) |
| 553 | { |
| 554 | struct iio_trigger *trig; |
| 555 | int i; |
| 556 | |
| 557 | trig = kzalloc(sizeof(*trig), GFP_KERNEL); |
| 558 | if (!trig) |
| 559 | return NULL; |
| 560 | |
| 561 | trig->dev.parent = parent; |
| 562 | trig->dev.type = &iio_trig_type; |
| 563 | trig->dev.bus = &iio_bus_type; |
| 564 | device_initialize(&trig->dev); |
| 565 | INIT_WORK(&trig->reenable_work, iio_reenable_work_fn); |
| 566 | |
| 567 | mutex_init(&trig->pool_lock); |
| 568 | trig->subirq_base = irq_alloc_descs(-1, 0, |
| 569 | CONFIG_IIO_CONSUMERS_PER_TRIGGER, |
| 570 | 0); |
| 571 | if (trig->subirq_base < 0) |
| 572 | goto free_trig; |
| 573 | |
| 574 | trig->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
| 575 | if (trig->name == NULL) |
| 576 | goto free_descs; |
| 577 | |
| 578 | INIT_LIST_HEAD(&trig->list); |
| 579 | |
| 580 | trig->owner = this_mod; |
| 581 | |
| 582 | trig->subirq_chip.name = trig->name; |
| 583 | trig->subirq_chip.irq_mask = &iio_trig_subirqmask; |
| 584 | trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; |
| 585 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
| 586 | irq_set_chip(trig->subirq_base + i, &trig->subirq_chip); |
| 587 | irq_set_handler(trig->subirq_base + i, &handle_simple_irq); |
| 588 | irq_modify_status(trig->subirq_base + i, |
| 589 | IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); |
| 590 | } |
| 591 | |
| 592 | return trig; |
| 593 | |
| 594 | free_descs: |
| 595 | irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 596 | free_trig: |
| 597 | kfree(trig); |
| 598 | return NULL; |
| 599 | } |
| 600 | |
| 601 | /** |
| 602 | * __iio_trigger_alloc - Allocate a trigger |
| 603 | * @parent: Device to allocate iio_trigger for |
| 604 | * @this_mod: module allocating the trigger |
| 605 | * @fmt: trigger name format. If it includes format |
| 606 | * specifiers, the additional arguments following |
| 607 | * format are formatted and inserted in the resulting |
| 608 | * string replacing their respective specifiers. |
| 609 | * RETURNS: |
| 610 | * Pointer to allocated iio_trigger on success, NULL on failure. |
| 611 | */ |
| 612 | struct iio_trigger *__iio_trigger_alloc(struct device *parent, |
| 613 | struct module *this_mod, |
| 614 | const char *fmt, ...) |
| 615 | { |
| 616 | struct iio_trigger *trig; |
| 617 | va_list vargs; |
| 618 | |
| 619 | va_start(vargs, fmt); |
| 620 | trig = viio_trigger_alloc(parent, this_mod, fmt, vargs); |
| 621 | va_end(vargs); |
| 622 | |
| 623 | return trig; |
| 624 | } |
| 625 | EXPORT_SYMBOL(__iio_trigger_alloc); |
| 626 | |
| 627 | void iio_trigger_free(struct iio_trigger *trig) |
| 628 | { |
| 629 | if (trig) |
| 630 | put_device(&trig->dev); |
| 631 | } |
| 632 | EXPORT_SYMBOL(iio_trigger_free); |
| 633 | |
| 634 | static void devm_iio_trigger_release(struct device *dev, void *res) |
| 635 | { |
| 636 | iio_trigger_free(*(struct iio_trigger **)res); |
| 637 | } |
| 638 | |
| 639 | /** |
| 640 | * __devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc() |
| 641 | * Managed iio_trigger_alloc. iio_trigger allocated with this function is |
| 642 | * automatically freed on driver detach. |
| 643 | * @parent: Device to allocate iio_trigger for |
| 644 | * @this_mod: module allocating the trigger |
| 645 | * @fmt: trigger name format. If it includes format |
| 646 | * specifiers, the additional arguments following |
| 647 | * format are formatted and inserted in the resulting |
| 648 | * string replacing their respective specifiers. |
| 649 | * |
| 650 | * |
| 651 | * RETURNS: |
| 652 | * Pointer to allocated iio_trigger on success, NULL on failure. |
| 653 | */ |
| 654 | struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent, |
| 655 | struct module *this_mod, |
| 656 | const char *fmt, ...) |
| 657 | { |
| 658 | struct iio_trigger **ptr, *trig; |
| 659 | va_list vargs; |
| 660 | |
| 661 | ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr), |
| 662 | GFP_KERNEL); |
| 663 | if (!ptr) |
| 664 | return NULL; |
| 665 | |
| 666 | /* use raw alloc_dr for kmalloc caller tracing */ |
| 667 | va_start(vargs, fmt); |
| 668 | trig = viio_trigger_alloc(parent, this_mod, fmt, vargs); |
| 669 | va_end(vargs); |
| 670 | if (trig) { |
| 671 | *ptr = trig; |
| 672 | devres_add(parent, ptr); |
| 673 | } else { |
| 674 | devres_free(ptr); |
| 675 | } |
| 676 | |
| 677 | return trig; |
| 678 | } |
| 679 | EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc); |
| 680 | |
| 681 | static void devm_iio_trigger_unreg(void *trigger_info) |
| 682 | { |
| 683 | iio_trigger_unregister(trigger_info); |
| 684 | } |
| 685 | |
| 686 | /** |
| 687 | * devm_iio_trigger_register - Resource-managed iio_trigger_register() |
| 688 | * @dev: device this trigger was allocated for |
| 689 | * @trig_info: trigger to register |
| 690 | * |
| 691 | * Managed iio_trigger_register(). The IIO trigger registered with this |
| 692 | * function is automatically unregistered on driver detach. This function |
| 693 | * calls iio_trigger_register() internally. Refer to that function for more |
| 694 | * information. |
| 695 | * |
| 696 | * RETURNS: |
| 697 | * 0 on success, negative error number on failure. |
| 698 | */ |
| 699 | int devm_iio_trigger_register(struct device *dev, |
| 700 | struct iio_trigger *trig_info) |
| 701 | { |
| 702 | int ret; |
| 703 | |
| 704 | ret = iio_trigger_register(trig_info); |
| 705 | if (ret) |
| 706 | return ret; |
| 707 | |
| 708 | return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info); |
| 709 | } |
| 710 | EXPORT_SYMBOL_GPL(devm_iio_trigger_register); |
| 711 | |
| 712 | bool iio_trigger_using_own(struct iio_dev *indio_dev) |
| 713 | { |
| 714 | return indio_dev->trig->attached_own_device; |
| 715 | } |
| 716 | EXPORT_SYMBOL(iio_trigger_using_own); |
| 717 | |
| 718 | /** |
| 719 | * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to |
| 720 | * the same device |
| 721 | * @trig: The IIO trigger to check |
| 722 | * @indio_dev: the IIO device to check |
| 723 | * |
| 724 | * This function can be used as the validate_device callback for triggers that |
| 725 | * can only be attached to their own device. |
| 726 | * |
| 727 | * Return: 0 if both the trigger and the IIO device belong to the same |
| 728 | * device, -EINVAL otherwise. |
| 729 | */ |
| 730 | int iio_trigger_validate_own_device(struct iio_trigger *trig, |
| 731 | struct iio_dev *indio_dev) |
| 732 | { |
| 733 | if (indio_dev->dev.parent != trig->dev.parent) |
| 734 | return -EINVAL; |
| 735 | return 0; |
| 736 | } |
| 737 | EXPORT_SYMBOL(iio_trigger_validate_own_device); |
| 738 | |
| 739 | int iio_device_register_trigger_consumer(struct iio_dev *indio_dev) |
| 740 | { |
| 741 | return iio_device_register_sysfs_group(indio_dev, |
| 742 | &iio_trigger_consumer_attr_group); |
| 743 | } |
| 744 | |
| 745 | void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) |
| 746 | { |
| 747 | /* Clean up an associated but not attached trigger reference */ |
| 748 | if (indio_dev->trig) |
| 749 | iio_trigger_put(indio_dev->trig); |
| 750 | } |