1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Implementation of primary alsa driver code base for Intel HD Audio.
6 * Copyright(c) 2004 Intel Corporation. All rights reserved.
8 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9 * PeiSen Hou <pshou@realtek.com.tw>
12 #include <linux/clocksource.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
21 /* for art-tsc conversion */
25 #include <sound/core.h>
26 #include <sound/initval.h>
27 #include "hda_controller.h"
29 #define CREATE_TRACE_POINTS
30 #include "hda_controller_trace.h"
32 /* DSP lock helpers */
33 #define dsp_lock(dev) snd_hdac_dsp_lock(azx_stream(dev))
34 #define dsp_unlock(dev) snd_hdac_dsp_unlock(azx_stream(dev))
35 #define dsp_is_locked(dev) snd_hdac_stream_is_locked(azx_stream(dev))
37 /* assign a stream for the PCM */
38 static inline struct azx_dev *
39 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
41 struct hdac_stream *s;
43 s = snd_hdac_stream_assign(azx_bus(chip), substream);
46 return stream_to_azx_dev(s);
49 /* release the assigned stream */
50 static inline void azx_release_device(struct azx_dev *azx_dev)
52 snd_hdac_stream_release(azx_stream(azx_dev));
55 static inline struct hda_pcm_stream *
56 to_hda_pcm_stream(struct snd_pcm_substream *substream)
58 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
59 return &apcm->info->stream[substream->stream];
62 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
65 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
66 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
67 u64 codec_frames, codec_nsecs;
69 if (!hinfo->ops.get_delay)
72 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
73 codec_nsecs = div_u64(codec_frames * 1000000000LL,
74 substream->runtime->rate);
76 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
77 return nsec + codec_nsecs;
79 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
86 static int azx_pcm_close(struct snd_pcm_substream *substream)
88 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
89 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
90 struct azx *chip = apcm->chip;
91 struct azx_dev *azx_dev = get_azx_dev(substream);
93 trace_azx_pcm_close(chip, azx_dev);
94 mutex_lock(&chip->open_mutex);
95 azx_release_device(azx_dev);
97 hinfo->ops.close(hinfo, apcm->codec, substream);
98 snd_hda_power_down(apcm->codec);
99 mutex_unlock(&chip->open_mutex);
100 snd_hda_codec_pcm_put(apcm->info);
104 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
105 struct snd_pcm_hw_params *hw_params)
107 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
108 struct azx *chip = apcm->chip;
109 struct azx_dev *azx_dev = get_azx_dev(substream);
112 trace_azx_pcm_hw_params(chip, azx_dev);
114 if (dsp_is_locked(azx_dev)) {
119 azx_dev->core.bufsize = 0;
120 azx_dev->core.period_bytes = 0;
121 azx_dev->core.format_val = 0;
122 ret = snd_pcm_lib_malloc_pages(substream,
123 params_buffer_bytes(hw_params));
130 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
132 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
133 struct azx_dev *azx_dev = get_azx_dev(substream);
134 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
137 /* reset BDL address */
139 if (!dsp_is_locked(azx_dev))
140 snd_hdac_stream_cleanup(azx_stream(azx_dev));
142 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
144 err = snd_pcm_lib_free_pages(substream);
145 azx_stream(azx_dev)->prepared = 0;
150 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
152 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
153 struct azx *chip = apcm->chip;
154 struct azx_dev *azx_dev = get_azx_dev(substream);
155 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
156 struct snd_pcm_runtime *runtime = substream->runtime;
157 unsigned int format_val, stream_tag;
159 struct hda_spdif_out *spdif =
160 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
161 unsigned short ctls = spdif ? spdif->ctls : 0;
163 trace_azx_pcm_prepare(chip, azx_dev);
165 if (dsp_is_locked(azx_dev)) {
170 snd_hdac_stream_reset(azx_stream(azx_dev));
171 format_val = snd_hdac_calc_stream_format(runtime->rate,
177 dev_err(chip->card->dev,
178 "invalid format_val, rate=%d, ch=%d, format=%d\n",
179 runtime->rate, runtime->channels, runtime->format);
184 err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
188 snd_hdac_stream_setup(azx_stream(azx_dev));
190 stream_tag = azx_dev->core.stream_tag;
191 /* CA-IBG chips need the playback stream starting from 1 */
192 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
193 stream_tag > chip->capture_streams)
194 stream_tag -= chip->capture_streams;
195 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
196 azx_dev->core.format_val, substream);
200 azx_stream(azx_dev)->prepared = 1;
205 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
207 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
208 struct azx *chip = apcm->chip;
209 struct hdac_bus *bus = azx_bus(chip);
210 struct azx_dev *azx_dev;
211 struct snd_pcm_substream *s;
212 struct hdac_stream *hstr;
217 azx_dev = get_azx_dev(substream);
218 trace_azx_pcm_trigger(chip, azx_dev, cmd);
220 hstr = azx_stream(azx_dev);
221 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
222 sync_reg = AZX_REG_OLD_SSYNC;
224 sync_reg = AZX_REG_SSYNC;
226 if (dsp_is_locked(azx_dev) || !hstr->prepared)
230 case SNDRV_PCM_TRIGGER_START:
231 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
232 case SNDRV_PCM_TRIGGER_RESUME:
235 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
236 case SNDRV_PCM_TRIGGER_SUSPEND:
237 case SNDRV_PCM_TRIGGER_STOP:
244 snd_pcm_group_for_each_entry(s, substream) {
245 if (s->pcm->card != substream->pcm->card)
247 azx_dev = get_azx_dev(s);
248 sbits |= 1 << azx_dev->core.index;
249 snd_pcm_trigger_done(s, substream);
252 spin_lock(&bus->reg_lock);
254 /* first, set SYNC bits of corresponding streams */
255 snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
257 snd_pcm_group_for_each_entry(s, substream) {
258 if (s->pcm->card != substream->pcm->card)
260 azx_dev = get_azx_dev(s);
262 azx_dev->insufficient = 1;
263 snd_hdac_stream_start(azx_stream(azx_dev), true);
265 snd_hdac_stream_stop(azx_stream(azx_dev));
268 spin_unlock(&bus->reg_lock);
270 snd_hdac_stream_sync(hstr, start, sbits);
272 spin_lock(&bus->reg_lock);
273 /* reset SYNC bits */
274 snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
276 snd_hdac_stream_timecounter_init(hstr, sbits);
277 spin_unlock(&bus->reg_lock);
281 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
283 return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
285 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
287 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
289 return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
291 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
293 unsigned int azx_get_position(struct azx *chip,
294 struct azx_dev *azx_dev)
296 struct snd_pcm_substream *substream = azx_dev->core.substream;
298 int stream = substream->stream;
301 if (chip->get_position[stream])
302 pos = chip->get_position[stream](chip, azx_dev);
303 else /* use the position buffer as default */
304 pos = azx_get_pos_posbuf(chip, azx_dev);
306 if (pos >= azx_dev->core.bufsize)
309 if (substream->runtime) {
310 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
311 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
313 if (chip->get_delay[stream])
314 delay += chip->get_delay[stream](chip, azx_dev, pos);
315 if (hinfo->ops.get_delay)
316 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
318 substream->runtime->delay = delay;
321 trace_azx_get_position(chip, azx_dev, pos, delay);
324 EXPORT_SYMBOL_GPL(azx_get_position);
326 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
328 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
329 struct azx *chip = apcm->chip;
330 struct azx_dev *azx_dev = get_azx_dev(substream);
331 return bytes_to_frames(substream->runtime,
332 azx_get_position(chip, azx_dev));
336 * azx_scale64: Scale base by mult/div while not overflowing sanely
338 * Derived from scale64_check_overflow in kernel/time/timekeeping.c
340 * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
341 * is about 384307 ie ~4.5 days.
343 * This scales the calculation so that overflow will happen but after 2^64 /
344 * 48000 secs, which is pretty large!
347 * base may overflow, but since there isn’t any additional division
348 * performed on base it’s OK
349 * rem can’t overflow because both are 32-bit values
353 static u64 azx_scale64(u64 base, u32 num, u32 den)
357 rem = do_div(base, den);
367 static int azx_get_sync_time(ktime_t *device,
368 struct system_counterval_t *system, void *ctx)
370 struct snd_pcm_substream *substream = ctx;
371 struct azx_dev *azx_dev = get_azx_dev(substream);
372 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
373 struct azx *chip = apcm->chip;
374 struct snd_pcm_runtime *runtime;
375 u64 ll_counter, ll_counter_l, ll_counter_h;
376 u64 tsc_counter, tsc_counter_l, tsc_counter_h;
377 u32 wallclk_ctr, wallclk_cycles;
383 runtime = substream->runtime;
385 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
390 /* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
393 dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
394 (azx_dev->core.stream_tag - 1);
395 snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
397 /* Enable the capture */
398 snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
401 if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
409 dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
413 /* Read wall clock counter */
414 wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
416 /* Read TSC counter */
417 tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
418 tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
420 /* Read Link counter */
421 ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
422 ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
424 /* Ack: registers read done */
425 snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
427 tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
430 ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) | ll_counter_l;
431 wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
434 * An error occurs near frame "rollover". The clocks in
435 * frame value indicates whether this error may have
436 * occurred. Here we use the value of 10 i.e.,
437 * HDA_MAX_CYCLE_OFFSET
439 if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
440 && wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
444 * Sleep before we read again, else we may again get
445 * value near to MAX_CYCLE. Try to sleep for different
446 * amount of time so we dont hit the same number again
448 udelay(retry_count++);
450 } while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
452 if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
453 dev_err_ratelimited(chip->card->dev,
454 "Error in WALFCC cycle count\n");
458 *device = ns_to_ktime(azx_scale64(ll_counter,
459 NSEC_PER_SEC, runtime->rate));
460 *device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
461 ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
463 *system = convert_art_to_tsc(tsc_counter);
469 static int azx_get_sync_time(ktime_t *device,
470 struct system_counterval_t *system, void *ctx)
476 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
477 struct system_device_crosststamp *xtstamp)
479 return get_device_system_crosststamp(azx_get_sync_time,
480 substream, NULL, xtstamp);
483 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
484 struct snd_pcm_audio_tstamp_config *ts)
486 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
487 if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
493 static int azx_get_time_info(struct snd_pcm_substream *substream,
494 struct timespec *system_ts, struct timespec *audio_ts,
495 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
496 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
498 struct azx_dev *azx_dev = get_azx_dev(substream);
499 struct snd_pcm_runtime *runtime = substream->runtime;
500 struct system_device_crosststamp xtstamp;
504 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
505 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
507 snd_pcm_gettime(substream->runtime, system_ts);
509 nsec = timecounter_read(&azx_dev->core.tc);
510 nsec = div_u64(nsec, 3); /* can be optimized */
511 if (audio_tstamp_config->report_delay)
512 nsec = azx_adjust_codec_delay(substream, nsec);
514 *audio_ts = ns_to_timespec(nsec);
516 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
517 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
518 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
520 } else if (is_link_time_supported(runtime, audio_tstamp_config)) {
522 ret = azx_get_crosststamp(substream, &xtstamp);
526 switch (runtime->tstamp_type) {
527 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
530 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
531 *system_ts = ktime_to_timespec(xtstamp.sys_monoraw);
535 *system_ts = ktime_to_timespec(xtstamp.sys_realtime);
540 *audio_ts = ktime_to_timespec(xtstamp.device);
542 audio_tstamp_report->actual_type =
543 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
544 audio_tstamp_report->accuracy_report = 1;
545 /* 24 MHz WallClock == 42ns resolution */
546 audio_tstamp_report->accuracy = 42;
549 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
555 static struct snd_pcm_hardware azx_pcm_hw = {
556 .info = (SNDRV_PCM_INFO_MMAP |
557 SNDRV_PCM_INFO_INTERLEAVED |
558 SNDRV_PCM_INFO_BLOCK_TRANSFER |
559 SNDRV_PCM_INFO_MMAP_VALID |
560 /* No full-resume yet implemented */
561 /* SNDRV_PCM_INFO_RESUME |*/
562 SNDRV_PCM_INFO_PAUSE |
563 SNDRV_PCM_INFO_SYNC_START |
564 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
565 SNDRV_PCM_INFO_HAS_LINK_ATIME |
566 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
567 .formats = SNDRV_PCM_FMTBIT_S16_LE,
568 .rates = SNDRV_PCM_RATE_48000,
573 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
574 .period_bytes_min = 128,
575 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
577 .periods_max = AZX_MAX_FRAG,
581 static int azx_pcm_open(struct snd_pcm_substream *substream)
583 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
584 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
585 struct azx *chip = apcm->chip;
586 struct azx_dev *azx_dev;
587 struct snd_pcm_runtime *runtime = substream->runtime;
591 snd_hda_codec_pcm_get(apcm->info);
592 mutex_lock(&chip->open_mutex);
593 azx_dev = azx_assign_device(chip, substream);
594 trace_azx_pcm_open(chip, azx_dev);
595 if (azx_dev == NULL) {
599 runtime->private_data = azx_dev;
601 runtime->hw = azx_pcm_hw;
602 if (chip->gts_present)
603 runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
604 runtime->hw.channels_min = hinfo->channels_min;
605 runtime->hw.channels_max = hinfo->channels_max;
606 runtime->hw.formats = hinfo->formats;
607 runtime->hw.rates = hinfo->rates;
608 snd_pcm_limit_hw_rates(runtime);
609 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
611 /* avoid wrap-around with wall-clock */
612 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
616 /* by some reason, the playback stream stalls on PulseAudio with
617 * tsched=1 when a capture stream triggers. Until we figure out the
618 * real cause, disable tsched mode by telling the PCM info flag.
620 if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
621 runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
623 if (chip->align_buffer_size)
624 /* constrain buffer sizes to be multiple of 128
625 bytes. This is more efficient in terms of memory
626 access but isn't required by the HDA spec and
627 prevents users from specifying exact period/buffer
628 sizes. For example for 44.1kHz, a period size set
629 to 20ms will be rounded to 19.59ms. */
632 /* Don't enforce steps on buffer sizes, still need to
633 be multiple of 4 bytes (HDA spec). Tested on Intel
634 HDA controllers, may not work on all devices where
635 option needs to be disabled */
638 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
640 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
642 snd_hda_power_up(apcm->codec);
644 err = hinfo->ops.open(hinfo, apcm->codec, substream);
648 azx_release_device(azx_dev);
651 snd_pcm_limit_hw_rates(runtime);
653 if (snd_BUG_ON(!runtime->hw.channels_min) ||
654 snd_BUG_ON(!runtime->hw.channels_max) ||
655 snd_BUG_ON(!runtime->hw.formats) ||
656 snd_BUG_ON(!runtime->hw.rates)) {
657 azx_release_device(azx_dev);
658 if (hinfo->ops.close)
659 hinfo->ops.close(hinfo, apcm->codec, substream);
664 /* disable LINK_ATIME timestamps for capture streams
665 until we figure out how to handle digital inputs */
666 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
667 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
668 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
671 snd_pcm_set_sync(substream);
672 mutex_unlock(&chip->open_mutex);
676 snd_hda_power_down(apcm->codec);
678 mutex_unlock(&chip->open_mutex);
679 snd_hda_codec_pcm_put(apcm->info);
683 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
684 struct vm_area_struct *area)
686 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
687 struct azx *chip = apcm->chip;
688 if (chip->ops->pcm_mmap_prepare)
689 chip->ops->pcm_mmap_prepare(substream, area);
690 return snd_pcm_lib_default_mmap(substream, area);
693 static const struct snd_pcm_ops azx_pcm_ops = {
694 .open = azx_pcm_open,
695 .close = azx_pcm_close,
696 .ioctl = snd_pcm_lib_ioctl,
697 .hw_params = azx_pcm_hw_params,
698 .hw_free = azx_pcm_hw_free,
699 .prepare = azx_pcm_prepare,
700 .trigger = azx_pcm_trigger,
701 .pointer = azx_pcm_pointer,
702 .get_time_info = azx_get_time_info,
703 .mmap = azx_pcm_mmap,
704 .page = snd_pcm_sgbuf_ops_page,
707 static void azx_pcm_free(struct snd_pcm *pcm)
709 struct azx_pcm *apcm = pcm->private_data;
711 list_del(&apcm->list);
712 apcm->info->pcm = NULL;
717 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
719 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
720 struct hda_pcm *cpcm)
722 struct hdac_bus *bus = &_bus->core;
723 struct azx *chip = bus_to_azx(bus);
725 struct azx_pcm *apcm;
726 int pcm_dev = cpcm->device;
729 int type = SNDRV_DMA_TYPE_DEV_SG;
731 list_for_each_entry(apcm, &chip->pcm_list, list) {
732 if (apcm->pcm->device == pcm_dev) {
733 dev_err(chip->card->dev, "PCM %d already exists\n",
738 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
739 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
740 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
744 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
745 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
747 snd_device_free(chip->card, pcm);
754 pcm->private_data = apcm;
755 pcm->private_free = azx_pcm_free;
756 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
757 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
758 list_add_tail(&apcm->list, &chip->pcm_list);
760 for (s = 0; s < 2; s++) {
761 if (cpcm->stream[s].substreams)
762 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
764 /* buffer pre-allocation */
765 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
766 if (size > MAX_PREALLOC_SIZE)
767 size = MAX_PREALLOC_SIZE;
769 type = SNDRV_DMA_TYPE_DEV_UC_SG;
770 snd_pcm_lib_preallocate_pages_for_all(pcm, type,
772 size, MAX_PREALLOC_SIZE);
776 static unsigned int azx_command_addr(u32 cmd)
778 unsigned int addr = cmd >> 28;
780 if (addr >= AZX_MAX_CODECS) {
788 /* receive a response */
789 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
792 struct azx *chip = bus_to_azx(bus);
793 struct hda_bus *hbus = &chip->bus;
794 unsigned long timeout;
795 unsigned long loopcounter;
799 timeout = jiffies + msecs_to_jiffies(1000);
801 for (loopcounter = 0;; loopcounter++) {
802 spin_lock_irq(&bus->reg_lock);
803 if (bus->polling_mode || do_poll)
804 snd_hdac_bus_update_rirb(bus);
805 if (!bus->rirb.cmds[addr]) {
809 *res = bus->rirb.res[addr]; /* the last value */
810 spin_unlock_irq(&bus->reg_lock);
813 spin_unlock_irq(&bus->reg_lock);
814 if (time_after(jiffies, timeout))
816 if (hbus->needs_damn_long_delay || loopcounter > 3000)
817 msleep(2); /* temporary workaround */
824 if (hbus->no_response_fallback)
827 if (!bus->polling_mode && bus->poll_count < 2) {
828 dev_dbg(chip->card->dev,
829 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
830 bus->last_cmd[addr]);
837 if (!bus->polling_mode) {
838 dev_warn(chip->card->dev,
839 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
840 bus->last_cmd[addr]);
841 bus->polling_mode = 1;
846 dev_warn(chip->card->dev,
847 "No response from codec, disabling MSI: last cmd=0x%08x\n",
848 bus->last_cmd[addr]);
849 if (chip->ops->disable_msi_reset_irq &&
850 chip->ops->disable_msi_reset_irq(chip) < 0)
856 /* If this critical timeout happens during the codec probing
857 * phase, this is likely an access to a non-existing codec
858 * slot. Better to return an error and reset the system.
863 /* no fallback mechanism? */
864 if (!chip->fallback_to_single_cmd)
867 /* a fatal communication error; need either to reset or to fallback
868 * to the single_cmd mode
870 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
871 hbus->response_reset = 1;
872 return -EAGAIN; /* give a chance to retry */
875 dev_err(chip->card->dev,
876 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
877 bus->last_cmd[addr]);
878 chip->single_cmd = 1;
879 hbus->response_reset = 0;
880 snd_hdac_bus_stop_cmd_io(bus);
885 * Use the single immediate command instead of CORB/RIRB for simplicity
887 * Note: according to Intel, this is not preferred use. The command was
888 * intended for the BIOS only, and may get confused with unsolicited
889 * responses. So, we shouldn't use it for normal operation from the
891 * I left the codes, however, for debugging/testing purposes.
894 /* receive a response */
895 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
900 /* check IRV busy bit */
901 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
902 /* reuse rirb.res as the response return value */
903 azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
908 if (printk_ratelimit())
909 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
910 azx_readw(chip, IRS));
911 azx_bus(chip)->rirb.res[addr] = -1;
916 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
918 struct azx *chip = bus_to_azx(bus);
919 unsigned int addr = azx_command_addr(val);
922 bus->last_cmd[azx_command_addr(val)] = val;
924 /* check ICB busy bit */
925 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
926 /* Clear IRV valid bit */
927 azx_writew(chip, IRS, azx_readw(chip, IRS) |
929 azx_writel(chip, IC, val);
930 azx_writew(chip, IRS, azx_readw(chip, IRS) |
932 return azx_single_wait_for_response(chip, addr);
936 if (printk_ratelimit())
937 dev_dbg(chip->card->dev,
938 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
939 azx_readw(chip, IRS), val);
943 /* receive a response */
944 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
948 *res = bus->rirb.res[addr];
953 * The below are the main callbacks from hda_codec.
955 * They are just the skeleton to call sub-callbacks according to the
956 * current setting of chip->single_cmd.
960 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
962 struct azx *chip = bus_to_azx(bus);
966 if (chip->single_cmd)
967 return azx_single_send_cmd(bus, val);
969 return snd_hdac_bus_send_cmd(bus, val);
973 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
976 struct azx *chip = bus_to_azx(bus);
980 if (chip->single_cmd)
981 return azx_single_get_response(bus, addr, res);
983 return azx_rirb_get_response(bus, addr, res);
986 static const struct hdac_bus_ops bus_core_ops = {
987 .command = azx_send_cmd,
988 .get_response = azx_get_response,
991 #ifdef CONFIG_SND_HDA_DSP_LOADER
993 * DSP loading code (e.g. for CA0132)
996 /* use the first stream for loading DSP */
997 static struct azx_dev *
998 azx_get_dsp_loader_dev(struct azx *chip)
1000 struct hdac_bus *bus = azx_bus(chip);
1001 struct hdac_stream *s;
1003 list_for_each_entry(s, &bus->stream_list, list)
1004 if (s->index == chip->playback_index_offset)
1005 return stream_to_azx_dev(s);
1010 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
1011 unsigned int byte_size,
1012 struct snd_dma_buffer *bufp)
1014 struct hdac_bus *bus = &codec->bus->core;
1015 struct azx *chip = bus_to_azx(bus);
1016 struct azx_dev *azx_dev;
1017 struct hdac_stream *hstr;
1021 azx_dev = azx_get_dsp_loader_dev(chip);
1022 hstr = azx_stream(azx_dev);
1023 spin_lock_irq(&bus->reg_lock);
1025 chip->saved_azx_dev = *azx_dev;
1028 spin_unlock_irq(&bus->reg_lock);
1030 err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
1032 spin_lock_irq(&bus->reg_lock);
1034 *azx_dev = chip->saved_azx_dev;
1035 spin_unlock_irq(&bus->reg_lock);
1042 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
1044 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
1046 struct hdac_bus *bus = &codec->bus->core;
1047 struct azx *chip = bus_to_azx(bus);
1048 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1050 snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1052 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1054 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1055 struct snd_dma_buffer *dmab)
1057 struct hdac_bus *bus = &codec->bus->core;
1058 struct azx *chip = bus_to_azx(bus);
1059 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1060 struct hdac_stream *hstr = azx_stream(azx_dev);
1062 if (!dmab->area || !hstr->locked)
1065 snd_hdac_dsp_cleanup(hstr, dmab);
1066 spin_lock_irq(&bus->reg_lock);
1068 *azx_dev = chip->saved_azx_dev;
1069 hstr->locked = false;
1070 spin_unlock_irq(&bus->reg_lock);
1072 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1073 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1076 * reset and start the controller registers
1078 void azx_init_chip(struct azx *chip, bool full_reset)
1080 if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1081 /* correct RINTCNT for CXT */
1082 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1083 azx_writew(chip, RINTCNT, 0xc0);
1086 EXPORT_SYMBOL_GPL(azx_init_chip);
1088 void azx_stop_all_streams(struct azx *chip)
1090 struct hdac_bus *bus = azx_bus(chip);
1091 struct hdac_stream *s;
1093 list_for_each_entry(s, &bus->stream_list, list)
1094 snd_hdac_stream_stop(s);
1096 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1098 void azx_stop_chip(struct azx *chip)
1100 snd_hdac_bus_stop_chip(azx_bus(chip));
1102 EXPORT_SYMBOL_GPL(azx_stop_chip);
1107 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1109 struct azx *chip = bus_to_azx(bus);
1110 struct azx_dev *azx_dev = stream_to_azx_dev(s);
1112 /* check whether this IRQ is really acceptable */
1113 if (!chip->ops->position_check ||
1114 chip->ops->position_check(chip, azx_dev)) {
1115 spin_unlock(&bus->reg_lock);
1116 snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1117 spin_lock(&bus->reg_lock);
1121 irqreturn_t azx_interrupt(int irq, void *dev_id)
1123 struct azx *chip = dev_id;
1124 struct hdac_bus *bus = azx_bus(chip);
1126 bool active, handled = false;
1127 int repeat = 0; /* count for avoiding endless loop */
1130 if (azx_has_pm_runtime(chip))
1131 if (!pm_runtime_active(chip->card->dev))
1135 spin_lock(&bus->reg_lock);
1141 status = azx_readl(chip, INTSTS);
1142 if (status == 0 || status == 0xffffffff)
1147 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1150 /* clear rirb int */
1151 status = azx_readb(chip, RIRBSTS);
1152 if (status & RIRB_INT_MASK) {
1154 if (status & RIRB_INT_RESPONSE) {
1155 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1157 snd_hdac_bus_update_rirb(bus);
1159 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1161 } while (active && ++repeat < 10);
1164 spin_unlock(&bus->reg_lock);
1166 return IRQ_RETVAL(handled);
1168 EXPORT_SYMBOL_GPL(azx_interrupt);
1175 * Probe the given codec address
1177 static int probe_codec(struct azx *chip, int addr)
1179 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1180 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1181 struct hdac_bus *bus = azx_bus(chip);
1183 unsigned int res = -1;
1185 mutex_lock(&bus->cmd_mutex);
1187 azx_send_cmd(bus, cmd);
1188 err = azx_get_response(bus, addr, &res);
1190 mutex_unlock(&bus->cmd_mutex);
1191 if (err < 0 || res == -1)
1193 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1197 void snd_hda_bus_reset(struct hda_bus *bus)
1199 struct azx *chip = bus_to_azx(&bus->core);
1202 azx_stop_chip(chip);
1203 azx_init_chip(chip, true);
1204 if (bus->core.chip_init)
1205 snd_hda_bus_reset_codecs(bus);
1209 /* HD-audio bus initialization */
1210 int azx_bus_init(struct azx *chip, const char *model)
1212 struct hda_bus *bus = &chip->bus;
1215 err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops);
1219 bus->card = chip->card;
1220 mutex_init(&bus->prepare_mutex);
1221 bus->pci = chip->pci;
1222 bus->modelname = model;
1223 bus->mixer_assigned = -1;
1224 bus->core.snoop = azx_snoop(chip);
1225 if (chip->get_position[0] != azx_get_pos_lpib ||
1226 chip->get_position[1] != azx_get_pos_lpib)
1227 bus->core.use_posbuf = true;
1228 bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1229 if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1230 bus->core.corbrp_self_clear = true;
1232 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1233 bus->core.align_bdle_4k = true;
1235 /* AMD chipsets often cause the communication stalls upon certain
1236 * sequence like the pin-detection. It seems that forcing the synced
1237 * access works around the stall. Grrr...
1239 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1240 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1241 bus->core.sync_write = 1;
1242 bus->allow_bus_reset = 1;
1247 EXPORT_SYMBOL_GPL(azx_bus_init);
1250 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1252 struct hdac_bus *bus = azx_bus(chip);
1257 max_slots = AZX_DEFAULT_CODECS;
1259 /* First try to probe all given codec slots */
1260 for (c = 0; c < max_slots; c++) {
1261 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1262 if (probe_codec(chip, c) < 0) {
1263 /* Some BIOSen give you wrong codec addresses
1266 dev_warn(chip->card->dev,
1267 "Codec #%d probe error; disabling it...\n", c);
1268 bus->codec_mask &= ~(1 << c);
1269 /* More badly, accessing to a non-existing
1270 * codec often screws up the controller chip,
1271 * and disturbs the further communications.
1272 * Thus if an error occurs during probing,
1273 * better to reset the controller chip to
1274 * get back to the sanity state.
1276 azx_stop_chip(chip);
1277 azx_init_chip(chip, true);
1282 /* Then create codec instances */
1283 for (c = 0; c < max_slots; c++) {
1284 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1285 struct hda_codec *codec;
1286 err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1289 codec->jackpoll_interval = chip->jackpoll_interval;
1290 codec->beep_mode = chip->beep_mode;
1295 dev_err(chip->card->dev, "no codecs initialized\n");
1300 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1302 /* configure each codec instance */
1303 int azx_codec_configure(struct azx *chip)
1305 struct hda_codec *codec, *next;
1307 /* use _safe version here since snd_hda_codec_configure() deregisters
1308 * the device upon error and deletes itself from the bus list.
1310 list_for_each_codec_safe(codec, next, &chip->bus) {
1311 snd_hda_codec_configure(codec);
1314 if (!azx_bus(chip)->num_codecs)
1318 EXPORT_SYMBOL_GPL(azx_codec_configure);
1320 static int stream_direction(struct azx *chip, unsigned char index)
1322 if (index >= chip->capture_index_offset &&
1323 index < chip->capture_index_offset + chip->capture_streams)
1324 return SNDRV_PCM_STREAM_CAPTURE;
1325 return SNDRV_PCM_STREAM_PLAYBACK;
1328 /* initialize SD streams */
1329 int azx_init_streams(struct azx *chip)
1332 int stream_tags[2] = { 0, 0 };
1334 /* initialize each stream (aka device)
1335 * assign the starting bdl address to each stream (device)
1338 for (i = 0; i < chip->num_streams; i++) {
1339 struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1345 dir = stream_direction(chip, i);
1346 /* stream tag must be unique throughout
1347 * the stream direction group,
1348 * valid values 1...15
1349 * use separate stream tag if the flag
1350 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1352 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1353 tag = ++stream_tags[dir];
1356 snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1362 EXPORT_SYMBOL_GPL(azx_init_streams);
1364 void azx_free_streams(struct azx *chip)
1366 struct hdac_bus *bus = azx_bus(chip);
1367 struct hdac_stream *s;
1369 while (!list_empty(&bus->stream_list)) {
1370 s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1372 kfree(stream_to_azx_dev(s));
1375 EXPORT_SYMBOL_GPL(azx_free_streams);