drm/i915: convert INTEL_DISPLAY_ENABLED() into a function
[linux-2.6-block.git] / drivers / gpu / drm / i915 / display / intel_hotplug.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <linux/kernel.h>
25
26 #include "i915_drv.h"
27 #include "i915_irq.h"
28 #include "intel_display_power.h"
29 #include "intel_display_types.h"
30 #include "intel_hotplug.h"
31 #include "intel_hotplug_irq.h"
32
33 /**
34  * DOC: Hotplug
35  *
36  * Simply put, hotplug occurs when a display is connected to or disconnected
37  * from the system. However, there may be adapters and docking stations and
38  * Display Port short pulses and MST devices involved, complicating matters.
39  *
40  * Hotplug in i915 is handled in many different levels of abstraction.
41  *
42  * The platform dependent interrupt handling code in i915_irq.c enables,
43  * disables, and does preliminary handling of the interrupts. The interrupt
44  * handlers gather the hotplug detect (HPD) information from relevant registers
45  * into a platform independent mask of hotplug pins that have fired.
46  *
47  * The platform independent interrupt handler intel_hpd_irq_handler() in
48  * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
49  * further processing to appropriate bottom halves (Display Port specific and
50  * regular hotplug).
51  *
52  * The Display Port work function i915_digport_work_func() calls into
53  * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
54  * pulses, with failures and non-MST long pulses triggering regular hotplug
55  * processing on the connector.
56  *
57  * The regular hotplug work function i915_hotplug_work_func() calls connector
58  * detect hooks, and, if connector status changes, triggers sending of hotplug
59  * uevent to userspace via drm_kms_helper_hotplug_event().
60  *
61  * Finally, the userspace is responsible for triggering a modeset upon receiving
62  * the hotplug uevent, disabling or enabling the crtc as needed.
63  *
64  * The hotplug interrupt storm detection and mitigation code keeps track of the
65  * number of interrupts per hotplug pin per a period of time, and if the number
66  * of interrupts exceeds a certain threshold, the interrupt is disabled for a
67  * while before being re-enabled. The intention is to mitigate issues raising
68  * from broken hardware triggering massive amounts of interrupts and grinding
69  * the system to a halt.
70  *
71  * Current implementation expects that hotplug interrupt storm will not be
72  * seen when display port sink is connected, hence on platforms whose DP
73  * callback is handled by i915_digport_work_func reenabling of hpd is not
74  * performed (it was never expected to be disabled in the first place ;) )
75  * this is specific to DP sinks handled by this routine and any other display
76  * such as HDMI or DVI enabled on the same port will have proper logic since
77  * it will use i915_hotplug_work_func where this logic is handled.
78  */
79
80 /**
81  * intel_hpd_pin_default - return default pin associated with certain port.
82  * @dev_priv: private driver data pointer
83  * @port: the hpd port to get associated pin
84  *
85  * It is only valid and used by digital port encoder.
86  *
87  * Return pin that is associatade with @port.
88  */
89 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
90                                    enum port port)
91 {
92         return HPD_PORT_A + port - PORT_A;
93 }
94
95 /* Threshold == 5 for long IRQs, 50 for short */
96 #define HPD_STORM_DEFAULT_THRESHOLD     50
97
98 #define HPD_STORM_DETECT_PERIOD         1000
99 #define HPD_STORM_REENABLE_DELAY        (2 * 60 * 1000)
100 #define HPD_RETRY_DELAY                 1000
101
102 static enum hpd_pin
103 intel_connector_hpd_pin(struct intel_connector *connector)
104 {
105         struct intel_encoder *encoder = intel_attached_encoder(connector);
106
107         /*
108          * MST connectors get their encoder attached dynamically
109          * so need to make sure we have an encoder here. But since
110          * MST encoders have their hpd_pin set to HPD_NONE we don't
111          * have to special case them beyond that.
112          */
113         return encoder ? encoder->hpd_pin : HPD_NONE;
114 }
115
116 /**
117  * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
118  * @dev_priv: private driver data pointer
119  * @pin: the pin to gather stats on
120  * @long_hpd: whether the HPD IRQ was long or short
121  *
122  * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
123  * storms. Only the pin specific stats and state are changed, the caller is
124  * responsible for further action.
125  *
126  * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
127  * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
128  * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
129  * short IRQs count as +1. If this threshold is exceeded, it's considered an
130  * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
131  *
132  * By default, most systems will only count long IRQs towards
133  * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
134  * suffer from short IRQ storms and must also track these. Because short IRQ
135  * storms are naturally caused by sideband interactions with DP MST devices,
136  * short IRQ detection is only enabled for systems without DP MST support.
137  * Systems which are new enough to support DP MST are far less likely to
138  * suffer from IRQ storms at all, so this is fine.
139  *
140  * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
141  * and should only be adjusted for automated hotplug testing.
142  *
143  * Return true if an IRQ storm was detected on @pin.
144  */
145 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
146                                        enum hpd_pin pin, bool long_hpd)
147 {
148         struct intel_hotplug *hpd = &dev_priv->display.hotplug;
149         unsigned long start = hpd->stats[pin].last_jiffies;
150         unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
151         const int increment = long_hpd ? 10 : 1;
152         const int threshold = hpd->hpd_storm_threshold;
153         bool storm = false;
154
155         if (!threshold ||
156             (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
157                 return false;
158
159         if (!time_in_range(jiffies, start, end)) {
160                 hpd->stats[pin].last_jiffies = jiffies;
161                 hpd->stats[pin].count = 0;
162         }
163
164         hpd->stats[pin].count += increment;
165         if (hpd->stats[pin].count > threshold) {
166                 hpd->stats[pin].state = HPD_MARK_DISABLED;
167                 drm_dbg_kms(&dev_priv->drm,
168                             "HPD interrupt storm detected on PIN %d\n", pin);
169                 storm = true;
170         } else {
171                 drm_dbg_kms(&dev_priv->drm,
172                             "Received HPD interrupt on PIN %d - cnt: %d\n",
173                               pin,
174                               hpd->stats[pin].count);
175         }
176
177         return storm;
178 }
179
180 static void
181 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
182 {
183         struct drm_connector_list_iter conn_iter;
184         struct intel_connector *connector;
185         bool hpd_disabled = false;
186
187         lockdep_assert_held(&dev_priv->irq_lock);
188
189         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
190         for_each_intel_connector_iter(connector, &conn_iter) {
191                 enum hpd_pin pin;
192
193                 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
194                         continue;
195
196                 pin = intel_connector_hpd_pin(connector);
197                 if (pin == HPD_NONE ||
198                     dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
199                         continue;
200
201                 drm_info(&dev_priv->drm,
202                          "HPD interrupt storm detected on connector %s: "
203                          "switching from hotplug detection to polling\n",
204                          connector->base.name);
205
206                 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
207                 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
208                         DRM_CONNECTOR_POLL_DISCONNECT;
209                 hpd_disabled = true;
210         }
211         drm_connector_list_iter_end(&conn_iter);
212
213         /* Enable polling and queue hotplug re-enabling. */
214         if (hpd_disabled) {
215                 drm_kms_helper_poll_reschedule(&dev_priv->drm);
216                 mod_delayed_work(dev_priv->unordered_wq,
217                                  &dev_priv->display.hotplug.reenable_work,
218                                  msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
219         }
220 }
221
222 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
223 {
224         struct drm_i915_private *dev_priv =
225                 container_of(work, typeof(*dev_priv),
226                              display.hotplug.reenable_work.work);
227         struct drm_connector_list_iter conn_iter;
228         struct intel_connector *connector;
229         intel_wakeref_t wakeref;
230         enum hpd_pin pin;
231
232         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
233
234         spin_lock_irq(&dev_priv->irq_lock);
235
236         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
237         for_each_intel_connector_iter(connector, &conn_iter) {
238                 pin = intel_connector_hpd_pin(connector);
239                 if (pin == HPD_NONE ||
240                     dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
241                         continue;
242
243                 if (connector->base.polled != connector->polled)
244                         drm_dbg(&dev_priv->drm,
245                                 "Reenabling HPD on connector %s\n",
246                                 connector->base.name);
247                 connector->base.polled = connector->polled;
248         }
249         drm_connector_list_iter_end(&conn_iter);
250
251         for_each_hpd_pin(pin) {
252                 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
253                         dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
254         }
255
256         intel_hpd_irq_setup(dev_priv);
257
258         spin_unlock_irq(&dev_priv->irq_lock);
259
260         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
261 }
262
263 static enum intel_hotplug_state
264 intel_hotplug_detect_connector(struct intel_connector *connector)
265 {
266         struct drm_device *dev = connector->base.dev;
267         enum drm_connector_status old_status;
268         u64 old_epoch_counter;
269         int status;
270         bool ret = false;
271
272         drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
273         old_status = connector->base.status;
274         old_epoch_counter = connector->base.epoch_counter;
275
276         status = drm_helper_probe_detect(&connector->base, NULL, false);
277         if (!connector->base.force)
278                 connector->base.status = status;
279
280         if (old_epoch_counter != connector->base.epoch_counter)
281                 ret = true;
282
283         if (ret) {
284                 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
285                             connector->base.base.id,
286                             connector->base.name,
287                             drm_get_connector_status_name(old_status),
288                             drm_get_connector_status_name(connector->base.status),
289                             old_epoch_counter,
290                             connector->base.epoch_counter);
291                 return INTEL_HOTPLUG_CHANGED;
292         }
293         return INTEL_HOTPLUG_UNCHANGED;
294 }
295
296 enum intel_hotplug_state
297 intel_encoder_hotplug(struct intel_encoder *encoder,
298                       struct intel_connector *connector)
299 {
300         return intel_hotplug_detect_connector(connector);
301 }
302
303 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
304 {
305         return intel_encoder_is_dig_port(encoder) &&
306                 enc_to_dig_port(encoder)->hpd_pulse != NULL;
307 }
308
309 static void i915_digport_work_func(struct work_struct *work)
310 {
311         struct drm_i915_private *dev_priv =
312                 container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
313         u32 long_port_mask, short_port_mask;
314         struct intel_encoder *encoder;
315         u32 old_bits = 0;
316
317         spin_lock_irq(&dev_priv->irq_lock);
318         long_port_mask = dev_priv->display.hotplug.long_port_mask;
319         dev_priv->display.hotplug.long_port_mask = 0;
320         short_port_mask = dev_priv->display.hotplug.short_port_mask;
321         dev_priv->display.hotplug.short_port_mask = 0;
322         spin_unlock_irq(&dev_priv->irq_lock);
323
324         for_each_intel_encoder(&dev_priv->drm, encoder) {
325                 struct intel_digital_port *dig_port;
326                 enum port port = encoder->port;
327                 bool long_hpd, short_hpd;
328                 enum irqreturn ret;
329
330                 if (!intel_encoder_has_hpd_pulse(encoder))
331                         continue;
332
333                 long_hpd = long_port_mask & BIT(port);
334                 short_hpd = short_port_mask & BIT(port);
335
336                 if (!long_hpd && !short_hpd)
337                         continue;
338
339                 dig_port = enc_to_dig_port(encoder);
340
341                 ret = dig_port->hpd_pulse(dig_port, long_hpd);
342                 if (ret == IRQ_NONE) {
343                         /* fall back to old school hpd */
344                         old_bits |= BIT(encoder->hpd_pin);
345                 }
346         }
347
348         if (old_bits) {
349                 spin_lock_irq(&dev_priv->irq_lock);
350                 dev_priv->display.hotplug.event_bits |= old_bits;
351                 spin_unlock_irq(&dev_priv->irq_lock);
352                 queue_delayed_work(dev_priv->unordered_wq,
353                                    &dev_priv->display.hotplug.hotplug_work, 0);
354         }
355 }
356
357 /**
358  * intel_hpd_trigger_irq - trigger an hpd irq event for a port
359  * @dig_port: digital port
360  *
361  * Trigger an HPD interrupt event for the given port, emulating a short pulse
362  * generated by the sink, and schedule the dig port work to handle it.
363  */
364 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
365 {
366         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
367
368         spin_lock_irq(&i915->irq_lock);
369         i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
370         spin_unlock_irq(&i915->irq_lock);
371
372         queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
373 }
374
375 /*
376  * Handle hotplug events outside the interrupt handler proper.
377  */
378 static void i915_hotplug_work_func(struct work_struct *work)
379 {
380         struct drm_i915_private *dev_priv =
381                 container_of(work, struct drm_i915_private,
382                              display.hotplug.hotplug_work.work);
383         struct drm_connector_list_iter conn_iter;
384         struct intel_connector *connector;
385         u32 changed = 0, retry = 0;
386         u32 hpd_event_bits;
387         u32 hpd_retry_bits;
388         struct drm_connector *first_changed_connector = NULL;
389         int changed_connectors = 0;
390
391         mutex_lock(&dev_priv->drm.mode_config.mutex);
392         drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
393
394         spin_lock_irq(&dev_priv->irq_lock);
395
396         hpd_event_bits = dev_priv->display.hotplug.event_bits;
397         dev_priv->display.hotplug.event_bits = 0;
398         hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
399         dev_priv->display.hotplug.retry_bits = 0;
400
401         /* Enable polling for connectors which had HPD IRQ storms */
402         intel_hpd_irq_storm_switch_to_polling(dev_priv);
403
404         spin_unlock_irq(&dev_priv->irq_lock);
405
406         /* Skip calling encode hotplug handlers if ignore long HPD set*/
407         if (dev_priv->display.hotplug.ignore_long_hpd) {
408                 drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
409                 mutex_unlock(&dev_priv->drm.mode_config.mutex);
410                 return;
411         }
412
413         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
414         for_each_intel_connector_iter(connector, &conn_iter) {
415                 enum hpd_pin pin;
416                 u32 hpd_bit;
417
418                 pin = intel_connector_hpd_pin(connector);
419                 if (pin == HPD_NONE)
420                         continue;
421
422                 hpd_bit = BIT(pin);
423                 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
424                         struct intel_encoder *encoder =
425                                 intel_attached_encoder(connector);
426
427                         if (hpd_event_bits & hpd_bit)
428                                 connector->hotplug_retries = 0;
429                         else
430                                 connector->hotplug_retries++;
431
432                         drm_dbg_kms(&dev_priv->drm,
433                                     "Connector %s (pin %i) received hotplug event. (retry %d)\n",
434                                     connector->base.name, pin,
435                                     connector->hotplug_retries);
436
437                         switch (encoder->hotplug(encoder, connector)) {
438                         case INTEL_HOTPLUG_UNCHANGED:
439                                 break;
440                         case INTEL_HOTPLUG_CHANGED:
441                                 changed |= hpd_bit;
442                                 changed_connectors++;
443                                 if (!first_changed_connector) {
444                                         drm_connector_get(&connector->base);
445                                         first_changed_connector = &connector->base;
446                                 }
447                                 break;
448                         case INTEL_HOTPLUG_RETRY:
449                                 retry |= hpd_bit;
450                                 break;
451                         }
452                 }
453         }
454         drm_connector_list_iter_end(&conn_iter);
455         mutex_unlock(&dev_priv->drm.mode_config.mutex);
456
457         if (changed_connectors == 1)
458                 drm_kms_helper_connector_hotplug_event(first_changed_connector);
459         else if (changed_connectors > 0)
460                 drm_kms_helper_hotplug_event(&dev_priv->drm);
461
462         if (first_changed_connector)
463                 drm_connector_put(first_changed_connector);
464
465         /* Remove shared HPD pins that have changed */
466         retry &= ~changed;
467         if (retry) {
468                 spin_lock_irq(&dev_priv->irq_lock);
469                 dev_priv->display.hotplug.retry_bits |= retry;
470                 spin_unlock_irq(&dev_priv->irq_lock);
471
472                 mod_delayed_work(dev_priv->unordered_wq,
473                                  &dev_priv->display.hotplug.hotplug_work,
474                                  msecs_to_jiffies(HPD_RETRY_DELAY));
475         }
476 }
477
478
479 /**
480  * intel_hpd_irq_handler - main hotplug irq handler
481  * @dev_priv: drm_i915_private
482  * @pin_mask: a mask of hpd pins that have triggered the irq
483  * @long_mask: a mask of hpd pins that may be long hpd pulses
484  *
485  * This is the main hotplug irq handler for all platforms. The platform specific
486  * irq handlers call the platform specific hotplug irq handlers, which read and
487  * decode the appropriate registers into bitmasks about hpd pins that have
488  * triggered (@pin_mask), and which of those pins may be long pulses
489  * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
490  * is not a digital port.
491  *
492  * Here, we do hotplug irq storm detection and mitigation, and pass further
493  * processing to appropriate bottom halves.
494  */
495 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
496                            u32 pin_mask, u32 long_mask)
497 {
498         struct intel_encoder *encoder;
499         bool storm_detected = false;
500         bool queue_dig = false, queue_hp = false;
501         u32 long_hpd_pulse_mask = 0;
502         u32 short_hpd_pulse_mask = 0;
503         enum hpd_pin pin;
504
505         if (!pin_mask)
506                 return;
507
508         spin_lock(&dev_priv->irq_lock);
509
510         /*
511          * Determine whether ->hpd_pulse() exists for each pin, and
512          * whether we have a short or a long pulse. This is needed
513          * as each pin may have up to two encoders (HDMI and DP) and
514          * only the one of them (DP) will have ->hpd_pulse().
515          */
516         for_each_intel_encoder(&dev_priv->drm, encoder) {
517                 enum port port = encoder->port;
518                 bool long_hpd;
519
520                 pin = encoder->hpd_pin;
521                 if (!(BIT(pin) & pin_mask))
522                         continue;
523
524                 if (!intel_encoder_has_hpd_pulse(encoder))
525                         continue;
526
527                 long_hpd = long_mask & BIT(pin);
528
529                 drm_dbg(&dev_priv->drm,
530                         "digital hpd on [ENCODER:%d:%s] - %s\n",
531                         encoder->base.base.id, encoder->base.name,
532                         long_hpd ? "long" : "short");
533                 queue_dig = true;
534
535                 if (long_hpd) {
536                         long_hpd_pulse_mask |= BIT(pin);
537                         dev_priv->display.hotplug.long_port_mask |= BIT(port);
538                 } else {
539                         short_hpd_pulse_mask |= BIT(pin);
540                         dev_priv->display.hotplug.short_port_mask |= BIT(port);
541                 }
542         }
543
544         /* Now process each pin just once */
545         for_each_hpd_pin(pin) {
546                 bool long_hpd;
547
548                 if (!(BIT(pin) & pin_mask))
549                         continue;
550
551                 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
552                         /*
553                          * On GMCH platforms the interrupt mask bits only
554                          * prevent irq generation, not the setting of the
555                          * hotplug bits itself. So only WARN about unexpected
556                          * interrupts on saner platforms.
557                          */
558                         drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
559                                       "Received HPD interrupt on pin %d although disabled\n",
560                                       pin);
561                         continue;
562                 }
563
564                 if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
565                         continue;
566
567                 /*
568                  * Delegate to ->hpd_pulse() if one of the encoders for this
569                  * pin has it, otherwise let the hotplug_work deal with this
570                  * pin directly.
571                  */
572                 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
573                         long_hpd = long_hpd_pulse_mask & BIT(pin);
574                 } else {
575                         dev_priv->display.hotplug.event_bits |= BIT(pin);
576                         long_hpd = true;
577                         queue_hp = true;
578                 }
579
580                 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
581                         dev_priv->display.hotplug.event_bits &= ~BIT(pin);
582                         storm_detected = true;
583                         queue_hp = true;
584                 }
585         }
586
587         /*
588          * Disable any IRQs that storms were detected on. Polling enablement
589          * happens later in our hotplug work.
590          */
591         if (storm_detected)
592                 intel_hpd_irq_setup(dev_priv);
593         spin_unlock(&dev_priv->irq_lock);
594
595         /*
596          * Our hotplug handler can grab modeset locks (by calling down into the
597          * fb helpers). Hence it must not be run on our own dev-priv->wq work
598          * queue for otherwise the flush_work in the pageflip code will
599          * deadlock.
600          */
601         if (queue_dig)
602                 queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
603         if (queue_hp)
604                 queue_delayed_work(dev_priv->unordered_wq,
605                                    &dev_priv->display.hotplug.hotplug_work, 0);
606 }
607
608 /**
609  * intel_hpd_init - initializes and enables hpd support
610  * @dev_priv: i915 device instance
611  *
612  * This function enables the hotplug support. It requires that interrupts have
613  * already been enabled with intel_irq_init_hw(). From this point on hotplug and
614  * poll request can run concurrently to other code, so locking rules must be
615  * obeyed.
616  *
617  * This is a separate step from interrupt enabling to simplify the locking rules
618  * in the driver load and resume code.
619  *
620  * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
621  */
622 void intel_hpd_init(struct drm_i915_private *dev_priv)
623 {
624         int i;
625
626         if (!HAS_DISPLAY(dev_priv))
627                 return;
628
629         for_each_hpd_pin(i) {
630                 dev_priv->display.hotplug.stats[i].count = 0;
631                 dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
632         }
633
634         /*
635          * Interrupt setup is already guaranteed to be single-threaded, this is
636          * just to make the assert_spin_locked checks happy.
637          */
638         spin_lock_irq(&dev_priv->irq_lock);
639         intel_hpd_irq_setup(dev_priv);
640         spin_unlock_irq(&dev_priv->irq_lock);
641 }
642
643 static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
644 {
645         struct drm_connector_list_iter conn_iter;
646         struct intel_connector *connector;
647         struct intel_connector *first_changed_connector = NULL;
648         int changed = 0;
649
650         mutex_lock(&i915->drm.mode_config.mutex);
651
652         if (!i915->drm.mode_config.poll_enabled)
653                 goto out;
654
655         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
656         for_each_intel_connector_iter(connector, &conn_iter) {
657                 if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD))
658                         continue;
659
660                 if (intel_hotplug_detect_connector(connector) != INTEL_HOTPLUG_CHANGED)
661                         continue;
662
663                 changed++;
664
665                 if (changed == 1) {
666                         drm_connector_get(&connector->base);
667                         first_changed_connector = connector;
668                 }
669         }
670         drm_connector_list_iter_end(&conn_iter);
671
672 out:
673         mutex_unlock(&i915->drm.mode_config.mutex);
674
675         if (!changed)
676                 return;
677
678         if (changed == 1)
679                 drm_kms_helper_connector_hotplug_event(&first_changed_connector->base);
680         else
681                 drm_kms_helper_hotplug_event(&i915->drm);
682
683         drm_connector_put(&first_changed_connector->base);
684 }
685
686 static void i915_hpd_poll_init_work(struct work_struct *work)
687 {
688         struct drm_i915_private *dev_priv =
689                 container_of(work, struct drm_i915_private,
690                              display.hotplug.poll_init_work);
691         struct drm_connector_list_iter conn_iter;
692         struct intel_connector *connector;
693         intel_wakeref_t wakeref;
694         bool enabled;
695
696         mutex_lock(&dev_priv->drm.mode_config.mutex);
697
698         enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
699         /*
700          * Prevent taking a power reference from this sequence of
701          * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() ->
702          * connector detect which would requeue i915_hpd_poll_init_work()
703          * and so risk an endless loop of this same sequence.
704          */
705         if (!enabled) {
706                 wakeref = intel_display_power_get(dev_priv,
707                                                   POWER_DOMAIN_DISPLAY_CORE);
708                 drm_WARN_ON(&dev_priv->drm,
709                             READ_ONCE(dev_priv->display.hotplug.poll_enabled));
710                 cancel_work(&dev_priv->display.hotplug.poll_init_work);
711         }
712
713         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
714         for_each_intel_connector_iter(connector, &conn_iter) {
715                 enum hpd_pin pin;
716
717                 pin = intel_connector_hpd_pin(connector);
718                 if (pin == HPD_NONE)
719                         continue;
720
721                 connector->base.polled = connector->polled;
722
723                 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
724                         connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
725                                 DRM_CONNECTOR_POLL_DISCONNECT;
726         }
727         drm_connector_list_iter_end(&conn_iter);
728
729         if (enabled)
730                 drm_kms_helper_poll_reschedule(&dev_priv->drm);
731
732         mutex_unlock(&dev_priv->drm.mode_config.mutex);
733
734         /*
735          * We might have missed any hotplugs that happened while we were
736          * in the middle of disabling polling
737          */
738         if (!enabled) {
739                 i915_hpd_poll_detect_connectors(dev_priv);
740
741                 intel_display_power_put(dev_priv,
742                                         POWER_DOMAIN_DISPLAY_CORE,
743                                         wakeref);
744         }
745 }
746
747 /**
748  * intel_hpd_poll_enable - enable polling for connectors with hpd
749  * @dev_priv: i915 device instance
750  *
751  * This function enables polling for all connectors which support HPD.
752  * Under certain conditions HPD may not be functional. On most Intel GPUs,
753  * this happens when we enter runtime suspend.
754  * On Valleyview and Cherryview systems, this also happens when we shut off all
755  * of the powerwells.
756  *
757  * Since this function can get called in contexts where we're already holding
758  * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
759  * worker.
760  *
761  * Also see: intel_hpd_init() and intel_hpd_poll_disable().
762  */
763 void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
764 {
765         if (!HAS_DISPLAY(dev_priv) ||
766             !intel_display_device_enabled(dev_priv))
767                 return;
768
769         WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
770
771         /*
772          * We might already be holding dev->mode_config.mutex, so do this in a
773          * seperate worker
774          * As well, there's no issue if we race here since we always reschedule
775          * this worker anyway
776          */
777         queue_work(dev_priv->unordered_wq,
778                    &dev_priv->display.hotplug.poll_init_work);
779 }
780
781 /**
782  * intel_hpd_poll_disable - disable polling for connectors with hpd
783  * @dev_priv: i915 device instance
784  *
785  * This function disables polling for all connectors which support HPD.
786  * Under certain conditions HPD may not be functional. On most Intel GPUs,
787  * this happens when we enter runtime suspend.
788  * On Valleyview and Cherryview systems, this also happens when we shut off all
789  * of the powerwells.
790  *
791  * Since this function can get called in contexts where we're already holding
792  * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
793  * worker.
794  *
795  * Also used during driver init to initialize connector->polled
796  * appropriately for all connectors.
797  *
798  * Also see: intel_hpd_init() and intel_hpd_poll_enable().
799  */
800 void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
801 {
802         if (!HAS_DISPLAY(dev_priv))
803                 return;
804
805         WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
806         queue_work(dev_priv->unordered_wq,
807                    &dev_priv->display.hotplug.poll_init_work);
808 }
809
810 void intel_hpd_init_early(struct drm_i915_private *i915)
811 {
812         INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work,
813                           i915_hotplug_work_func);
814         INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func);
815         INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
816         INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work,
817                           intel_hpd_irq_storm_reenable_work);
818
819         i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
820         /* If we have MST support, we want to avoid doing short HPD IRQ storm
821          * detection, as short HPD storms will occur as a natural part of
822          * sideband messaging with MST.
823          * On older platforms however, IRQ storms can occur with both long and
824          * short pulses, as seen on some G4x systems.
825          */
826         i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
827 }
828
829 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
830 {
831         if (!HAS_DISPLAY(dev_priv))
832                 return;
833
834         spin_lock_irq(&dev_priv->irq_lock);
835
836         dev_priv->display.hotplug.long_port_mask = 0;
837         dev_priv->display.hotplug.short_port_mask = 0;
838         dev_priv->display.hotplug.event_bits = 0;
839         dev_priv->display.hotplug.retry_bits = 0;
840
841         spin_unlock_irq(&dev_priv->irq_lock);
842
843         cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
844         cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
845         cancel_work_sync(&dev_priv->display.hotplug.poll_init_work);
846         cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work);
847 }
848
849 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
850 {
851         bool ret = false;
852
853         if (pin == HPD_NONE)
854                 return false;
855
856         spin_lock_irq(&dev_priv->irq_lock);
857         if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
858                 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
859                 ret = true;
860         }
861         spin_unlock_irq(&dev_priv->irq_lock);
862
863         return ret;
864 }
865
866 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
867 {
868         if (pin == HPD_NONE)
869                 return;
870
871         spin_lock_irq(&dev_priv->irq_lock);
872         dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
873         spin_unlock_irq(&dev_priv->irq_lock);
874 }
875
876 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
877 {
878         struct drm_i915_private *dev_priv = m->private;
879         struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
880
881         /* Synchronize with everything first in case there's been an HPD
882          * storm, but we haven't finished handling it in the kernel yet
883          */
884         intel_synchronize_irq(dev_priv);
885         flush_work(&dev_priv->display.hotplug.dig_port_work);
886         flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
887
888         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
889         seq_printf(m, "Detected: %s\n",
890                    str_yes_no(delayed_work_pending(&hotplug->reenable_work)));
891
892         return 0;
893 }
894
895 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
896                                         const char __user *ubuf, size_t len,
897                                         loff_t *offp)
898 {
899         struct seq_file *m = file->private_data;
900         struct drm_i915_private *dev_priv = m->private;
901         struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
902         unsigned int new_threshold;
903         int i;
904         char *newline;
905         char tmp[16];
906
907         if (len >= sizeof(tmp))
908                 return -EINVAL;
909
910         if (copy_from_user(tmp, ubuf, len))
911                 return -EFAULT;
912
913         tmp[len] = '\0';
914
915         /* Strip newline, if any */
916         newline = strchr(tmp, '\n');
917         if (newline)
918                 *newline = '\0';
919
920         if (strcmp(tmp, "reset") == 0)
921                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
922         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
923                 return -EINVAL;
924
925         if (new_threshold > 0)
926                 drm_dbg_kms(&dev_priv->drm,
927                             "Setting HPD storm detection threshold to %d\n",
928                             new_threshold);
929         else
930                 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
931
932         spin_lock_irq(&dev_priv->irq_lock);
933         hotplug->hpd_storm_threshold = new_threshold;
934         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
935         for_each_hpd_pin(i)
936                 hotplug->stats[i].count = 0;
937         spin_unlock_irq(&dev_priv->irq_lock);
938
939         /* Re-enable hpd immediately if we were in an irq storm */
940         flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
941
942         return len;
943 }
944
945 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
946 {
947         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
948 }
949
950 static const struct file_operations i915_hpd_storm_ctl_fops = {
951         .owner = THIS_MODULE,
952         .open = i915_hpd_storm_ctl_open,
953         .read = seq_read,
954         .llseek = seq_lseek,
955         .release = single_release,
956         .write = i915_hpd_storm_ctl_write
957 };
958
959 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
960 {
961         struct drm_i915_private *dev_priv = m->private;
962
963         seq_printf(m, "Enabled: %s\n",
964                    str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
965
966         return 0;
967 }
968
969 static int
970 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
971 {
972         return single_open(file, i915_hpd_short_storm_ctl_show,
973                            inode->i_private);
974 }
975
976 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
977                                               const char __user *ubuf,
978                                               size_t len, loff_t *offp)
979 {
980         struct seq_file *m = file->private_data;
981         struct drm_i915_private *dev_priv = m->private;
982         struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
983         char *newline;
984         char tmp[16];
985         int i;
986         bool new_state;
987
988         if (len >= sizeof(tmp))
989                 return -EINVAL;
990
991         if (copy_from_user(tmp, ubuf, len))
992                 return -EFAULT;
993
994         tmp[len] = '\0';
995
996         /* Strip newline, if any */
997         newline = strchr(tmp, '\n');
998         if (newline)
999                 *newline = '\0';
1000
1001         /* Reset to the "default" state for this system */
1002         if (strcmp(tmp, "reset") == 0)
1003                 new_state = !HAS_DP_MST(dev_priv);
1004         else if (kstrtobool(tmp, &new_state) != 0)
1005                 return -EINVAL;
1006
1007         drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1008                     new_state ? "En" : "Dis");
1009
1010         spin_lock_irq(&dev_priv->irq_lock);
1011         hotplug->hpd_short_storm_enabled = new_state;
1012         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
1013         for_each_hpd_pin(i)
1014                 hotplug->stats[i].count = 0;
1015         spin_unlock_irq(&dev_priv->irq_lock);
1016
1017         /* Re-enable hpd immediately if we were in an irq storm */
1018         flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
1019
1020         return len;
1021 }
1022
1023 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1024         .owner = THIS_MODULE,
1025         .open = i915_hpd_short_storm_ctl_open,
1026         .read = seq_read,
1027         .llseek = seq_lseek,
1028         .release = single_release,
1029         .write = i915_hpd_short_storm_ctl_write,
1030 };
1031
1032 void intel_hpd_debugfs_register(struct drm_i915_private *i915)
1033 {
1034         struct drm_minor *minor = i915->drm.primary;
1035
1036         debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
1037                             i915, &i915_hpd_storm_ctl_fops);
1038         debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
1039                             i915, &i915_hpd_short_storm_ctl_fops);
1040         debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
1041                             &i915->display.hotplug.ignore_long_hpd);
1042 }