Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-block.git] / include / linux / host1x.h
CommitLineData
16216333 1/* SPDX-License-Identifier: GPL-2.0-or-later */
6579324a 2/*
6579324a 3 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
6579324a
TB
4 */
5
6#ifndef __LINUX_HOST1X_H
7#define __LINUX_HOST1X_H
8
776dc384 9#include <linux/device.h>
c6aeaf56 10#include <linux/dma-direction.h>
c24973ed 11#include <linux/dma-fence.h>
1f39b1df 12#include <linux/spinlock.h>
35d747a8
TR
13#include <linux/types.h>
14
6579324a 15enum host1x_class {
e1e90644
TR
16 HOST1X_CLASS_HOST1X = 0x1,
17 HOST1X_CLASS_GR2D = 0x51,
18 HOST1X_CLASS_GR2D_SB = 0x52,
0ae797a8 19 HOST1X_CLASS_VIC = 0x5D,
5f60ed0d 20 HOST1X_CLASS_GR3D = 0x60,
46f226c9
MP
21 HOST1X_CLASS_NVDEC = 0xF0,
22 HOST1X_CLASS_NVDEC1 = 0xF5,
6579324a
TB
23};
24
501be6c1 25struct host1x;
53fa7f72 26struct host1x_client;
aacdf198 27struct iommu_group;
53fa7f72 28
501be6c1
TR
29u64 host1x_get_dma_mask(struct host1x *host1x);
30
1f39b1df
TR
31/**
32 * struct host1x_bo_cache - host1x buffer object cache
33 * @mappings: list of mappings
34 * @lock: synchronizes accesses to the list of mappings
3e9c4584
TR
35 *
36 * Note that entries are not periodically evicted from this cache and instead need to be
37 * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
38 * released when the last reference to a buffer object represented by a mapping in this
39 * cache is dropped.
1f39b1df
TR
40 */
41struct host1x_bo_cache {
42 struct list_head mappings;
43 struct mutex lock;
44};
45
46static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
47{
48 INIT_LIST_HEAD(&cache->mappings);
49 mutex_init(&cache->lock);
50}
51
52static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
53{
54 /* XXX warn if not empty? */
55 mutex_destroy(&cache->lock);
56}
57
466749f1
TR
58/**
59 * struct host1x_client_ops - host1x client operations
933deb8c 60 * @early_init: host1x client early initialization code
466749f1
TR
61 * @init: host1x client initialization code
62 * @exit: host1x client tear down code
933deb8c 63 * @late_exit: host1x client late tear down code
fd67e9c6
TR
64 * @suspend: host1x client suspend code
65 * @resume: host1x client resume code
466749f1 66 */
53fa7f72 67struct host1x_client_ops {
933deb8c 68 int (*early_init)(struct host1x_client *client);
53fa7f72
TR
69 int (*init)(struct host1x_client *client);
70 int (*exit)(struct host1x_client *client);
933deb8c 71 int (*late_exit)(struct host1x_client *client);
fd67e9c6
TR
72 int (*suspend)(struct host1x_client *client);
73 int (*resume)(struct host1x_client *client);
53fa7f72
TR
74};
75
466749f1
TR
76/**
77 * struct host1x_client - host1x client structure
78 * @list: list node for the host1x client
608f43ad 79 * @host: pointer to struct device representing the host1x controller
466749f1 80 * @dev: pointer to struct device backing this host1x client
aacdf198 81 * @group: IOMMU group that this client is a member of
466749f1
TR
82 * @ops: host1x client operations
83 * @class: host1x class represented by this client
84 * @channel: host1x channel associated with this client
85 * @syncpts: array of syncpoints requested for this client
86 * @num_syncpts: number of syncpoints requested for this client
2fd2bc7f
CL
87 * @parent: pointer to parent structure
88 * @usecount: reference count for this structure
89 * @lock: mutex for mutually exclusive concurrency
fe696ccb 90 * @cache: host1x buffer object cache
466749f1 91 */
53fa7f72
TR
92struct host1x_client {
93 struct list_head list;
608f43ad 94 struct device *host;
53fa7f72 95 struct device *dev;
aacdf198 96 struct iommu_group *group;
53fa7f72
TR
97
98 const struct host1x_client_ops *ops;
99
100 enum host1x_class class;
101 struct host1x_channel *channel;
102
103 struct host1x_syncpt **syncpts;
104 unsigned int num_syncpts;
fd67e9c6
TR
105
106 struct host1x_client *parent;
107 unsigned int usecount;
108 struct mutex lock;
1f39b1df
TR
109
110 struct host1x_bo_cache cache;
53fa7f72
TR
111};
112
35d747a8
TR
113/*
114 * host1x buffer objects
115 */
116
117struct host1x_bo;
118struct sg_table;
119
c6aeaf56 120struct host1x_bo_mapping {
1f39b1df 121 struct kref ref;
c6aeaf56
TR
122 struct dma_buf_attachment *attach;
123 enum dma_data_direction direction;
1f39b1df 124 struct list_head list;
c6aeaf56
TR
125 struct host1x_bo *bo;
126 struct sg_table *sgt;
127 unsigned int chunks;
128 struct device *dev;
129 dma_addr_t phys;
130 size_t size;
1f39b1df
TR
131
132 struct host1x_bo_cache *cache;
133 struct list_head entry;
c6aeaf56
TR
134};
135
1f39b1df
TR
136static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
137{
138 return container_of(ref, struct host1x_bo_mapping, ref);
139}
140
35d747a8
TR
141struct host1x_bo_ops {
142 struct host1x_bo *(*get)(struct host1x_bo *bo);
143 void (*put)(struct host1x_bo *bo);
c6aeaf56
TR
144 struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
145 enum dma_data_direction dir);
146 void (*unpin)(struct host1x_bo_mapping *map);
35d747a8
TR
147 void *(*mmap)(struct host1x_bo *bo);
148 void (*munmap)(struct host1x_bo *bo, void *addr);
35d747a8
TR
149};
150
151struct host1x_bo {
152 const struct host1x_bo_ops *ops;
1f39b1df
TR
153 struct list_head mappings;
154 spinlock_t lock;
35d747a8
TR
155};
156
157static inline void host1x_bo_init(struct host1x_bo *bo,
158 const struct host1x_bo_ops *ops)
159{
1f39b1df
TR
160 INIT_LIST_HEAD(&bo->mappings);
161 spin_lock_init(&bo->lock);
35d747a8
TR
162 bo->ops = ops;
163}
164
165static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
166{
167 return bo->ops->get(bo);
168}
169
170static inline void host1x_bo_put(struct host1x_bo *bo)
171{
172 bo->ops->put(bo);
173}
174
1f39b1df
TR
175struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
176 enum dma_data_direction dir,
177 struct host1x_bo_cache *cache);
178void host1x_bo_unpin(struct host1x_bo_mapping *map);
35d747a8
TR
179
180static inline void *host1x_bo_mmap(struct host1x_bo *bo)
181{
182 return bo->ops->mmap(bo);
183}
184
185static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
186{
187 bo->ops->munmap(bo, addr);
188}
189
35d747a8
TR
190/*
191 * host1x syncpoints
192 */
193
8736fe81 194#define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
f5a954fe 195#define HOST1X_SYNCPT_HAS_BASE (1 << 1)
8736fe81 196
f5a954fe 197struct host1x_syncpt_base;
35d747a8
TR
198struct host1x_syncpt;
199struct host1x;
200
2aed4f5a
MP
201struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
202struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
203struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
35d747a8
TR
204u32 host1x_syncpt_id(struct host1x_syncpt *sp);
205u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
206u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
b4a20144 207u32 host1x_syncpt_read(struct host1x_syncpt *sp);
35d747a8 208int host1x_syncpt_incr(struct host1x_syncpt *sp);
64400c37 209u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
35d747a8
TR
210int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
211 u32 *value);
617dd7cc 212struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
8736fe81 213 unsigned long flags);
2aed4f5a 214void host1x_syncpt_put(struct host1x_syncpt *sp);
86cec7ec
MP
215struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
216 unsigned long flags,
217 const char *name);
35d747a8 218
f5a954fe
AM
219struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
220u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
221
f5ba33fb
MP
222void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
223 u32 syncpt_id);
224
d5179020
MP
225struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
226 bool timeout);
227void host1x_fence_cancel(struct dma_fence *fence);
687db220 228
35d747a8
TR
229/*
230 * host1x channel
231 */
232
233struct host1x_channel;
234struct host1x_job;
235
caccddcf 236struct host1x_channel *host1x_channel_request(struct host1x_client *client);
35d747a8 237struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
9ca790f4 238void host1x_channel_stop(struct host1x_channel *channel);
35d747a8
TR
239void host1x_channel_put(struct host1x_channel *channel);
240int host1x_job_submit(struct host1x_job *job);
241
242/*
243 * host1x job
244 */
245
ab4f81bf
TR
246#define HOST1X_RELOC_READ (1 << 0)
247#define HOST1X_RELOC_WRITE (1 << 1)
248
35d747a8 249struct host1x_reloc {
961e3bea
TR
250 struct {
251 struct host1x_bo *bo;
252 unsigned long offset;
253 } cmdbuf;
254 struct {
255 struct host1x_bo *bo;
256 unsigned long offset;
257 } target;
258 unsigned long shift;
ab4f81bf 259 unsigned long flags;
35d747a8
TR
260};
261
262struct host1x_job {
263 /* When refcount goes to zero, job can be freed */
264 struct kref ref;
265
266 /* List entry */
267 struct list_head list;
268
269 /* Channel where job is submitted to */
270 struct host1x_channel *channel;
271
bf3d41cc
TR
272 /* client where the job originated */
273 struct host1x_client *client;
35d747a8
TR
274
275 /* Gathers and their memory */
e902585f
MP
276 struct host1x_job_cmd *cmds;
277 unsigned int num_cmds;
35d747a8 278
35d747a8 279 /* Array of handles to be pinned & unpinned */
06490bb9 280 struct host1x_reloc *relocs;
35d747a8
TR
281 unsigned int num_relocs;
282 struct host1x_job_unpin_data *unpins;
283 unsigned int num_unpins;
284
285 dma_addr_t *addr_phys;
286 dma_addr_t *gather_addr_phys;
287 dma_addr_t *reloc_addr_phys;
288
289 /* Sync point id, number of increments and end related to the submit */
2aed4f5a 290 struct host1x_syncpt *syncpt;
35d747a8
TR
291 u32 syncpt_incrs;
292 u32 syncpt_end;
293
c24973ed
MP
294 /* Completion fence for job tracking */
295 struct dma_fence *fence;
296 struct dma_fence_cb fence_cb;
c78f837a 297
35d747a8
TR
298 /* Maximum time to wait for this job */
299 unsigned int timeout;
300
c78f837a
MP
301 /* Job has timed out and should be released */
302 bool cancelled;
303
35d747a8
TR
304 /* Index and number of slots used in the push buffer */
305 unsigned int first_get;
306 unsigned int num_slots;
307
308 /* Copy of gathers */
309 size_t gather_copy_size;
310 dma_addr_t gather_copy;
311 u8 *gather_copy_mapped;
312
313 /* Check if register is marked as an address reg */
a2b78b0d 314 int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
35d747a8 315
0f563a4b
DO
316 /* Check if class belongs to the unit */
317 int (*is_valid_class)(u32 class);
318
35d747a8
TR
319 /* Request a SETCLASS to this class */
320 u32 class;
321
322 /* Add a channel wait for previous ops to complete */
323 bool serialize;
c78f837a
MP
324
325 /* Fast-forward syncpoint increments on job timeout */
326 bool syncpt_recovery;
17a298e9
MP
327
328 /* Callback called when job is freed */
329 void (*release)(struct host1x_job *job);
330 void *user_data;
0fddaa85
MP
331
332 /* Whether host1x-side firewall should be ran for this job or not */
333 bool enable_firewall;
24862547
MP
334
335 /* Options for configuring engine data stream ID */
336 /* Context device to use for job */
337 struct host1x_memory_context *memory_context;
338 /* Stream ID to use if context isolation is disabled (!memory_context) */
339 u32 engine_fallback_streamid;
340 /* Engine offset to program stream ID to */
341 u32 engine_streamid_offset;
35d747a8
TR
342};
343
344struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
0fddaa85
MP
345 u32 num_cmdbufs, u32 num_relocs,
346 bool skip_firewall);
326bbd79
TR
347void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
348 unsigned int words, unsigned int offset);
e902585f
MP
349void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
350 bool relative, u32 next_class);
35d747a8
TR
351struct host1x_job *host1x_job_get(struct host1x_job *job);
352void host1x_job_put(struct host1x_job *job);
353int host1x_job_pin(struct host1x_job *job, struct device *dev);
354void host1x_job_unpin(struct host1x_job *job);
355
776dc384
TR
356/*
357 * subdevice probe infrastructure
358 */
359
360struct host1x_device;
361
466749f1
TR
362/**
363 * struct host1x_driver - host1x logical device driver
364 * @driver: core driver
365 * @subdevs: table of OF device IDs matching subdevices for this driver
366 * @list: list node for the driver
367 * @probe: called when the host1x logical device is probed
368 * @remove: called when the host1x logical device is removed
369 * @shutdown: called when the host1x logical device is shut down
370 */
776dc384 371struct host1x_driver {
f4c5cf88
TR
372 struct device_driver driver;
373
776dc384
TR
374 const struct of_device_id *subdevs;
375 struct list_head list;
776dc384
TR
376
377 int (*probe)(struct host1x_device *device);
378 int (*remove)(struct host1x_device *device);
f4c5cf88 379 void (*shutdown)(struct host1x_device *device);
776dc384
TR
380};
381
f4c5cf88
TR
382static inline struct host1x_driver *
383to_host1x_driver(struct device_driver *driver)
384{
385 return container_of(driver, struct host1x_driver, driver);
386}
387
388int host1x_driver_register_full(struct host1x_driver *driver,
389 struct module *owner);
776dc384
TR
390void host1x_driver_unregister(struct host1x_driver *driver);
391
f4c5cf88
TR
392#define host1x_driver_register(driver) \
393 host1x_driver_register_full(driver, THIS_MODULE)
394
776dc384
TR
395struct host1x_device {
396 struct host1x_driver *driver;
397 struct list_head list;
398 struct device dev;
399
400 struct mutex subdevs_lock;
401 struct list_head subdevs;
402 struct list_head active;
403
404 struct mutex clients_lock;
405 struct list_head clients;
536e1715 406
f4c5cf88 407 bool registered;
1e390478
TR
408
409 struct device_dma_parameters dma_parms;
776dc384
TR
410};
411
412static inline struct host1x_device *to_host1x_device(struct device *dev)
413{
414 return container_of(dev, struct host1x_device, dev);
415}
416
417int host1x_device_init(struct host1x_device *device);
418int host1x_device_exit(struct host1x_device *device);
419
0cfe5a6e
TR
420void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
421void host1x_client_exit(struct host1x_client *client);
422
423#define host1x_client_init(client) \
424 ({ \
425 static struct lock_class_key __key; \
426 __host1x_client_init(client, &__key); \
427 })
428
429int __host1x_client_register(struct host1x_client *client);
430
431/*
432 * Note that this wrapper calls __host1x_client_init() for compatibility
433 * with existing callers. Callers that want to separately initialize and
434 * register a host1x client must first initialize using either of the
435 * __host1x_client_init() or host1x_client_init() functions and then use
436 * the low-level __host1x_client_register() function to avoid the client
437 * getting reinitialized.
438 */
439#define host1x_client_register(client) \
440 ({ \
441 static struct lock_class_key __key; \
442 __host1x_client_init(client, &__key); \
443 __host1x_client_register(client); \
a24f9817
MP
444 })
445
1d83d1a2 446void host1x_client_unregister(struct host1x_client *client);
776dc384 447
fd67e9c6
TR
448int host1x_client_suspend(struct host1x_client *client);
449int host1x_client_resume(struct host1x_client *client);
450
4de6a2d6
TR
451struct tegra_mipi_device;
452
767598d4
SK
453struct tegra_mipi_device *tegra_mipi_request(struct device *device,
454 struct device_node *np);
4de6a2d6 455void tegra_mipi_free(struct tegra_mipi_device *device);
87904c3e
TR
456int tegra_mipi_enable(struct tegra_mipi_device *device);
457int tegra_mipi_disable(struct tegra_mipi_device *device);
cf5153e4
SK
458int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
459int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
4de6a2d6 460
8aa5bcb6
MP
461/* host1x memory contexts */
462
463struct host1x_memory_context {
464 struct host1x *host;
465
466 refcount_t ref;
467 struct pid *owner;
468
469 struct device dev;
470 u64 dma_mask;
471 u32 stream_id;
472};
473
474#ifdef CONFIG_IOMMU_API
475struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
8935002f 476 struct device *dev,
8aa5bcb6
MP
477 struct pid *pid);
478void host1x_memory_context_get(struct host1x_memory_context *cd);
479void host1x_memory_context_put(struct host1x_memory_context *cd);
480#else
481static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
8935002f 482 struct device *dev,
8aa5bcb6
MP
483 struct pid *pid)
484{
485 return NULL;
486}
487
488static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
489{
490}
491
492static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
493{
494}
495#endif
496
6579324a 497#endif