fsi: Add regmap and refactor sbefifo
[linux-block.git] / include / linux / host1x.h
CommitLineData
16216333 1/* SPDX-License-Identifier: GPL-2.0-or-later */
6579324a 2/*
6579324a 3 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
6579324a
TB
4 */
5
6#ifndef __LINUX_HOST1X_H
7#define __LINUX_HOST1X_H
8
776dc384 9#include <linux/device.h>
c6aeaf56 10#include <linux/dma-direction.h>
1f39b1df 11#include <linux/spinlock.h>
35d747a8
TR
12#include <linux/types.h>
13
6579324a 14enum host1x_class {
e1e90644
TR
15 HOST1X_CLASS_HOST1X = 0x1,
16 HOST1X_CLASS_GR2D = 0x51,
17 HOST1X_CLASS_GR2D_SB = 0x52,
0ae797a8 18 HOST1X_CLASS_VIC = 0x5D,
5f60ed0d 19 HOST1X_CLASS_GR3D = 0x60,
46f226c9
MP
20 HOST1X_CLASS_NVDEC = 0xF0,
21 HOST1X_CLASS_NVDEC1 = 0xF5,
6579324a
TB
22};
23
501be6c1 24struct host1x;
53fa7f72 25struct host1x_client;
aacdf198 26struct iommu_group;
53fa7f72 27
501be6c1
TR
28u64 host1x_get_dma_mask(struct host1x *host1x);
29
1f39b1df
TR
30/**
31 * struct host1x_bo_cache - host1x buffer object cache
32 * @mappings: list of mappings
33 * @lock: synchronizes accesses to the list of mappings
3e9c4584
TR
34 *
35 * Note that entries are not periodically evicted from this cache and instead need to be
36 * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
37 * released when the last reference to a buffer object represented by a mapping in this
38 * cache is dropped.
1f39b1df
TR
39 */
40struct host1x_bo_cache {
41 struct list_head mappings;
42 struct mutex lock;
43};
44
45static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
46{
47 INIT_LIST_HEAD(&cache->mappings);
48 mutex_init(&cache->lock);
49}
50
51static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
52{
53 /* XXX warn if not empty? */
54 mutex_destroy(&cache->lock);
55}
56
466749f1
TR
57/**
58 * struct host1x_client_ops - host1x client operations
933deb8c 59 * @early_init: host1x client early initialization code
466749f1
TR
60 * @init: host1x client initialization code
61 * @exit: host1x client tear down code
933deb8c 62 * @late_exit: host1x client late tear down code
fd67e9c6
TR
63 * @suspend: host1x client suspend code
64 * @resume: host1x client resume code
466749f1 65 */
53fa7f72 66struct host1x_client_ops {
933deb8c 67 int (*early_init)(struct host1x_client *client);
53fa7f72
TR
68 int (*init)(struct host1x_client *client);
69 int (*exit)(struct host1x_client *client);
933deb8c 70 int (*late_exit)(struct host1x_client *client);
fd67e9c6
TR
71 int (*suspend)(struct host1x_client *client);
72 int (*resume)(struct host1x_client *client);
53fa7f72
TR
73};
74
466749f1
TR
75/**
76 * struct host1x_client - host1x client structure
77 * @list: list node for the host1x client
608f43ad 78 * @host: pointer to struct device representing the host1x controller
466749f1 79 * @dev: pointer to struct device backing this host1x client
aacdf198 80 * @group: IOMMU group that this client is a member of
466749f1
TR
81 * @ops: host1x client operations
82 * @class: host1x class represented by this client
83 * @channel: host1x channel associated with this client
84 * @syncpts: array of syncpoints requested for this client
85 * @num_syncpts: number of syncpoints requested for this client
2fd2bc7f
CL
86 * @parent: pointer to parent structure
87 * @usecount: reference count for this structure
88 * @lock: mutex for mutually exclusive concurrency
fe696ccb 89 * @cache: host1x buffer object cache
466749f1 90 */
53fa7f72
TR
91struct host1x_client {
92 struct list_head list;
608f43ad 93 struct device *host;
53fa7f72 94 struct device *dev;
aacdf198 95 struct iommu_group *group;
53fa7f72
TR
96
97 const struct host1x_client_ops *ops;
98
99 enum host1x_class class;
100 struct host1x_channel *channel;
101
102 struct host1x_syncpt **syncpts;
103 unsigned int num_syncpts;
fd67e9c6
TR
104
105 struct host1x_client *parent;
106 unsigned int usecount;
107 struct mutex lock;
1f39b1df
TR
108
109 struct host1x_bo_cache cache;
53fa7f72
TR
110};
111
35d747a8
TR
112/*
113 * host1x buffer objects
114 */
115
116struct host1x_bo;
117struct sg_table;
118
c6aeaf56 119struct host1x_bo_mapping {
1f39b1df 120 struct kref ref;
c6aeaf56
TR
121 struct dma_buf_attachment *attach;
122 enum dma_data_direction direction;
1f39b1df 123 struct list_head list;
c6aeaf56
TR
124 struct host1x_bo *bo;
125 struct sg_table *sgt;
126 unsigned int chunks;
127 struct device *dev;
128 dma_addr_t phys;
129 size_t size;
1f39b1df
TR
130
131 struct host1x_bo_cache *cache;
132 struct list_head entry;
c6aeaf56
TR
133};
134
1f39b1df
TR
135static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
136{
137 return container_of(ref, struct host1x_bo_mapping, ref);
138}
139
35d747a8
TR
140struct host1x_bo_ops {
141 struct host1x_bo *(*get)(struct host1x_bo *bo);
142 void (*put)(struct host1x_bo *bo);
c6aeaf56
TR
143 struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
144 enum dma_data_direction dir);
145 void (*unpin)(struct host1x_bo_mapping *map);
35d747a8
TR
146 void *(*mmap)(struct host1x_bo *bo);
147 void (*munmap)(struct host1x_bo *bo, void *addr);
35d747a8
TR
148};
149
150struct host1x_bo {
151 const struct host1x_bo_ops *ops;
1f39b1df
TR
152 struct list_head mappings;
153 spinlock_t lock;
35d747a8
TR
154};
155
156static inline void host1x_bo_init(struct host1x_bo *bo,
157 const struct host1x_bo_ops *ops)
158{
1f39b1df
TR
159 INIT_LIST_HEAD(&bo->mappings);
160 spin_lock_init(&bo->lock);
35d747a8
TR
161 bo->ops = ops;
162}
163
164static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
165{
166 return bo->ops->get(bo);
167}
168
169static inline void host1x_bo_put(struct host1x_bo *bo)
170{
171 bo->ops->put(bo);
172}
173
1f39b1df
TR
174struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
175 enum dma_data_direction dir,
176 struct host1x_bo_cache *cache);
177void host1x_bo_unpin(struct host1x_bo_mapping *map);
35d747a8
TR
178
179static inline void *host1x_bo_mmap(struct host1x_bo *bo)
180{
181 return bo->ops->mmap(bo);
182}
183
184static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
185{
186 bo->ops->munmap(bo, addr);
187}
188
35d747a8
TR
189/*
190 * host1x syncpoints
191 */
192
8736fe81 193#define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
f5a954fe 194#define HOST1X_SYNCPT_HAS_BASE (1 << 1)
8736fe81 195
f5a954fe 196struct host1x_syncpt_base;
35d747a8
TR
197struct host1x_syncpt;
198struct host1x;
199
2aed4f5a
MP
200struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
201struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
202struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
35d747a8
TR
203u32 host1x_syncpt_id(struct host1x_syncpt *sp);
204u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
205u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
b4a20144 206u32 host1x_syncpt_read(struct host1x_syncpt *sp);
35d747a8 207int host1x_syncpt_incr(struct host1x_syncpt *sp);
64400c37 208u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
35d747a8
TR
209int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
210 u32 *value);
617dd7cc 211struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
8736fe81 212 unsigned long flags);
2aed4f5a 213void host1x_syncpt_put(struct host1x_syncpt *sp);
86cec7ec
MP
214struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
215 unsigned long flags,
216 const char *name);
35d747a8 217
f5a954fe
AM
218struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
219u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
220
f5ba33fb
MP
221void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
222 u32 syncpt_id);
223
687db220
MP
224struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
225
35d747a8
TR
226/*
227 * host1x channel
228 */
229
230struct host1x_channel;
231struct host1x_job;
232
caccddcf 233struct host1x_channel *host1x_channel_request(struct host1x_client *client);
35d747a8 234struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
9ca790f4 235void host1x_channel_stop(struct host1x_channel *channel);
35d747a8
TR
236void host1x_channel_put(struct host1x_channel *channel);
237int host1x_job_submit(struct host1x_job *job);
238
239/*
240 * host1x job
241 */
242
ab4f81bf
TR
243#define HOST1X_RELOC_READ (1 << 0)
244#define HOST1X_RELOC_WRITE (1 << 1)
245
35d747a8 246struct host1x_reloc {
961e3bea
TR
247 struct {
248 struct host1x_bo *bo;
249 unsigned long offset;
250 } cmdbuf;
251 struct {
252 struct host1x_bo *bo;
253 unsigned long offset;
254 } target;
255 unsigned long shift;
ab4f81bf 256 unsigned long flags;
35d747a8
TR
257};
258
259struct host1x_job {
260 /* When refcount goes to zero, job can be freed */
261 struct kref ref;
262
263 /* List entry */
264 struct list_head list;
265
266 /* Channel where job is submitted to */
267 struct host1x_channel *channel;
268
bf3d41cc
TR
269 /* client where the job originated */
270 struct host1x_client *client;
35d747a8
TR
271
272 /* Gathers and their memory */
e902585f
MP
273 struct host1x_job_cmd *cmds;
274 unsigned int num_cmds;
35d747a8 275
35d747a8 276 /* Array of handles to be pinned & unpinned */
06490bb9 277 struct host1x_reloc *relocs;
35d747a8
TR
278 unsigned int num_relocs;
279 struct host1x_job_unpin_data *unpins;
280 unsigned int num_unpins;
281
282 dma_addr_t *addr_phys;
283 dma_addr_t *gather_addr_phys;
284 dma_addr_t *reloc_addr_phys;
285
286 /* Sync point id, number of increments and end related to the submit */
2aed4f5a 287 struct host1x_syncpt *syncpt;
35d747a8
TR
288 u32 syncpt_incrs;
289 u32 syncpt_end;
290
c78f837a
MP
291 /* Completion waiter ref */
292 void *waiter;
293
35d747a8
TR
294 /* Maximum time to wait for this job */
295 unsigned int timeout;
296
c78f837a
MP
297 /* Job has timed out and should be released */
298 bool cancelled;
299
35d747a8
TR
300 /* Index and number of slots used in the push buffer */
301 unsigned int first_get;
302 unsigned int num_slots;
303
304 /* Copy of gathers */
305 size_t gather_copy_size;
306 dma_addr_t gather_copy;
307 u8 *gather_copy_mapped;
308
309 /* Check if register is marked as an address reg */
a2b78b0d 310 int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
35d747a8 311
0f563a4b
DO
312 /* Check if class belongs to the unit */
313 int (*is_valid_class)(u32 class);
314
35d747a8
TR
315 /* Request a SETCLASS to this class */
316 u32 class;
317
318 /* Add a channel wait for previous ops to complete */
319 bool serialize;
c78f837a
MP
320
321 /* Fast-forward syncpoint increments on job timeout */
322 bool syncpt_recovery;
17a298e9
MP
323
324 /* Callback called when job is freed */
325 void (*release)(struct host1x_job *job);
326 void *user_data;
0fddaa85
MP
327
328 /* Whether host1x-side firewall should be ran for this job or not */
329 bool enable_firewall;
24862547
MP
330
331 /* Options for configuring engine data stream ID */
332 /* Context device to use for job */
333 struct host1x_memory_context *memory_context;
334 /* Stream ID to use if context isolation is disabled (!memory_context) */
335 u32 engine_fallback_streamid;
336 /* Engine offset to program stream ID to */
337 u32 engine_streamid_offset;
35d747a8
TR
338};
339
340struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
0fddaa85
MP
341 u32 num_cmdbufs, u32 num_relocs,
342 bool skip_firewall);
326bbd79
TR
343void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
344 unsigned int words, unsigned int offset);
e902585f
MP
345void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
346 bool relative, u32 next_class);
35d747a8
TR
347struct host1x_job *host1x_job_get(struct host1x_job *job);
348void host1x_job_put(struct host1x_job *job);
349int host1x_job_pin(struct host1x_job *job, struct device *dev);
350void host1x_job_unpin(struct host1x_job *job);
351
776dc384
TR
352/*
353 * subdevice probe infrastructure
354 */
355
356struct host1x_device;
357
466749f1
TR
358/**
359 * struct host1x_driver - host1x logical device driver
360 * @driver: core driver
361 * @subdevs: table of OF device IDs matching subdevices for this driver
362 * @list: list node for the driver
363 * @probe: called when the host1x logical device is probed
364 * @remove: called when the host1x logical device is removed
365 * @shutdown: called when the host1x logical device is shut down
366 */
776dc384 367struct host1x_driver {
f4c5cf88
TR
368 struct device_driver driver;
369
776dc384
TR
370 const struct of_device_id *subdevs;
371 struct list_head list;
776dc384
TR
372
373 int (*probe)(struct host1x_device *device);
374 int (*remove)(struct host1x_device *device);
f4c5cf88 375 void (*shutdown)(struct host1x_device *device);
776dc384
TR
376};
377
f4c5cf88
TR
378static inline struct host1x_driver *
379to_host1x_driver(struct device_driver *driver)
380{
381 return container_of(driver, struct host1x_driver, driver);
382}
383
384int host1x_driver_register_full(struct host1x_driver *driver,
385 struct module *owner);
776dc384
TR
386void host1x_driver_unregister(struct host1x_driver *driver);
387
f4c5cf88
TR
388#define host1x_driver_register(driver) \
389 host1x_driver_register_full(driver, THIS_MODULE)
390
776dc384
TR
391struct host1x_device {
392 struct host1x_driver *driver;
393 struct list_head list;
394 struct device dev;
395
396 struct mutex subdevs_lock;
397 struct list_head subdevs;
398 struct list_head active;
399
400 struct mutex clients_lock;
401 struct list_head clients;
536e1715 402
f4c5cf88 403 bool registered;
1e390478
TR
404
405 struct device_dma_parameters dma_parms;
776dc384
TR
406};
407
408static inline struct host1x_device *to_host1x_device(struct device *dev)
409{
410 return container_of(dev, struct host1x_device, dev);
411}
412
413int host1x_device_init(struct host1x_device *device);
414int host1x_device_exit(struct host1x_device *device);
415
0cfe5a6e
TR
416void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
417void host1x_client_exit(struct host1x_client *client);
418
419#define host1x_client_init(client) \
420 ({ \
421 static struct lock_class_key __key; \
422 __host1x_client_init(client, &__key); \
423 })
424
425int __host1x_client_register(struct host1x_client *client);
426
427/*
428 * Note that this wrapper calls __host1x_client_init() for compatibility
429 * with existing callers. Callers that want to separately initialize and
430 * register a host1x client must first initialize using either of the
431 * __host1x_client_init() or host1x_client_init() functions and then use
432 * the low-level __host1x_client_register() function to avoid the client
433 * getting reinitialized.
434 */
435#define host1x_client_register(client) \
436 ({ \
437 static struct lock_class_key __key; \
438 __host1x_client_init(client, &__key); \
439 __host1x_client_register(client); \
a24f9817
MP
440 })
441
776dc384
TR
442int host1x_client_unregister(struct host1x_client *client);
443
fd67e9c6
TR
444int host1x_client_suspend(struct host1x_client *client);
445int host1x_client_resume(struct host1x_client *client);
446
4de6a2d6
TR
447struct tegra_mipi_device;
448
767598d4
SK
449struct tegra_mipi_device *tegra_mipi_request(struct device *device,
450 struct device_node *np);
4de6a2d6 451void tegra_mipi_free(struct tegra_mipi_device *device);
87904c3e
TR
452int tegra_mipi_enable(struct tegra_mipi_device *device);
453int tegra_mipi_disable(struct tegra_mipi_device *device);
cf5153e4
SK
454int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
455int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
4de6a2d6 456
8aa5bcb6
MP
457/* host1x memory contexts */
458
459struct host1x_memory_context {
460 struct host1x *host;
461
462 refcount_t ref;
463 struct pid *owner;
464
465 struct device dev;
466 u64 dma_mask;
467 u32 stream_id;
468};
469
470#ifdef CONFIG_IOMMU_API
471struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
472 struct pid *pid);
473void host1x_memory_context_get(struct host1x_memory_context *cd);
474void host1x_memory_context_put(struct host1x_memory_context *cd);
475#else
476static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
477 struct pid *pid)
478{
479 return NULL;
480}
481
482static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
483{
484}
485
486static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
487{
488}
489#endif
490
6579324a 491#endif