3 * Implementation of primary alsa driver code base for Intel HD Audio.
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/reboot.h>
31 #include <sound/core.h>
32 #include <sound/initval.h>
33 #include "hda_controller.h"
35 #define CREATE_TRACE_POINTS
36 #include "hda_intel_trace.h"
38 /* DSP lock helpers */
39 #ifdef CONFIG_SND_HDA_DSP_LOADER
40 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
41 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
42 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
43 #define dsp_is_locked(dev) ((dev)->locked)
45 #define dsp_lock_init(dev) do {} while (0)
46 #define dsp_lock(dev) do {} while (0)
47 #define dsp_unlock(dev) do {} while (0)
48 #define dsp_is_locked(dev) 0
52 * AZX stream operations.
56 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
59 * Before stream start, initialize parameter
61 azx_dev->insufficient = 1;
64 azx_writel(chip, INTCTL,
65 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
66 /* set DMA start and interrupt mask */
67 azx_sd_writeb(chip, azx_dev, SD_CTL,
68 azx_sd_readb(chip, azx_dev, SD_CTL) |
69 SD_CTL_DMA_START | SD_INT_MASK);
73 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
75 azx_sd_writeb(chip, azx_dev, SD_CTL,
76 azx_sd_readb(chip, azx_dev, SD_CTL) &
77 ~(SD_CTL_DMA_START | SD_INT_MASK));
78 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
82 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
84 azx_stream_clear(chip, azx_dev);
86 azx_writel(chip, INTCTL,
87 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
89 EXPORT_SYMBOL_GPL(azx_stream_stop);
92 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
97 azx_stream_clear(chip, azx_dev);
99 azx_sd_writeb(chip, azx_dev, SD_CTL,
100 azx_sd_readb(chip, azx_dev, SD_CTL) |
101 SD_CTL_STREAM_RESET);
104 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
105 SD_CTL_STREAM_RESET) && --timeout)
107 val &= ~SD_CTL_STREAM_RESET;
108 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
112 /* waiting for hardware to report that the stream is out of reset */
113 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
114 SD_CTL_STREAM_RESET) && --timeout)
117 /* reset first position - may not be synced with hw at this time */
118 *azx_dev->posbuf = 0;
122 * set up the SD for streaming
124 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
127 /* make sure the run bit is zero for SD */
128 azx_stream_clear(chip, azx_dev);
129 /* program the stream_tag */
130 val = azx_sd_readl(chip, azx_dev, SD_CTL);
131 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
132 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
133 if (!azx_snoop(chip))
134 val |= SD_CTL_TRAFFIC_PRIO;
135 azx_sd_writel(chip, azx_dev, SD_CTL, val);
137 /* program the length of samples in cyclic buffer */
138 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
140 /* program the stream format */
141 /* this value needs to be the same as the one programmed */
142 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
144 /* program the stream LVI (last valid index) of the BDL */
145 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
147 /* program the BDL address */
148 /* lower BDL address */
149 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
150 /* upper BDL address */
151 azx_sd_writel(chip, azx_dev, SD_BDLPU,
152 upper_32_bits(azx_dev->bdl.addr));
154 /* enable the position buffer */
155 if (chip->get_position[0] != azx_get_pos_lpib ||
156 chip->get_position[1] != azx_get_pos_lpib) {
157 if (!(azx_readl(chip, DPLBASE) & AZX_DPLBASE_ENABLE))
158 azx_writel(chip, DPLBASE,
159 (u32)chip->posbuf.addr | AZX_DPLBASE_ENABLE);
162 /* set the interrupt enable bits in the descriptor control register */
163 azx_sd_writel(chip, azx_dev, SD_CTL,
164 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
169 /* assign a stream for the PCM */
170 static inline struct azx_dev *
171 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
174 struct azx_dev *res = NULL;
175 /* make a non-zero unique key for the substream */
176 int key = (substream->pcm->device << 16) | (substream->number << 2) |
177 (substream->stream + 1);
179 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
180 dev = chip->playback_index_offset;
181 nums = chip->playback_streams;
183 dev = chip->capture_index_offset;
184 nums = chip->capture_streams;
186 for (i = 0; i < nums; i++, dev++) {
187 struct azx_dev *azx_dev = &chip->azx_dev[dev];
189 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
190 if (azx_dev->assigned_key == key) {
192 azx_dev->assigned_key = key;
197 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
205 res->assigned_key = key;
211 /* release the assigned stream */
212 static inline void azx_release_device(struct azx_dev *azx_dev)
217 static cycle_t azx_cc_read(const struct cyclecounter *cc)
219 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
220 struct snd_pcm_substream *substream = azx_dev->substream;
221 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
222 struct azx *chip = apcm->chip;
224 return azx_readl(chip, WALLCLK);
227 static void azx_timecounter_init(struct snd_pcm_substream *substream,
228 bool force, cycle_t last)
230 struct azx_dev *azx_dev = get_azx_dev(substream);
231 struct timecounter *tc = &azx_dev->azx_tc;
232 struct cyclecounter *cc = &azx_dev->azx_cc;
235 cc->read = azx_cc_read;
236 cc->mask = CLOCKSOURCE_MASK(32);
239 * Converting from 24 MHz to ns means applying a 125/3 factor.
240 * To avoid any saturation issues in intermediate operations,
241 * the 125 factor is applied first. The division is applied
242 * last after reading the timecounter value.
243 * Applying the 1/3 factor as part of the multiplication
244 * requires at least 20 bits for a decent precision, however
245 * overflows occur after about 4 hours or less, not a option.
248 cc->mult = 125; /* saturation after 195 years */
251 nsec = 0; /* audio time is elapsed time since trigger */
252 timecounter_init(tc, cc, nsec);
255 * force timecounter to use predefined value,
256 * used for synchronized starts
258 tc->cycle_last = last;
261 static inline struct hda_pcm_stream *
262 to_hda_pcm_stream(struct snd_pcm_substream *substream)
264 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
265 return &apcm->info->stream[substream->stream];
268 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
271 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
272 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
273 u64 codec_frames, codec_nsecs;
275 if (!hinfo->ops.get_delay)
278 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
279 codec_nsecs = div_u64(codec_frames * 1000000000LL,
280 substream->runtime->rate);
282 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
283 return nsec + codec_nsecs;
285 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
291 static int setup_bdle(struct azx *chip,
292 struct snd_dma_buffer *dmab,
293 struct azx_dev *azx_dev, u32 **bdlp,
294 int ofs, int size, int with_ioc)
302 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
305 addr = snd_sgbuf_get_addr(dmab, ofs);
306 /* program the address field of the BDL entry */
307 bdl[0] = cpu_to_le32((u32)addr);
308 bdl[1] = cpu_to_le32(upper_32_bits(addr));
309 /* program the size field of the BDL entry */
310 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
311 /* one BDLE cannot cross 4K boundary on CTHDA chips */
312 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
313 u32 remain = 0x1000 - (ofs & 0xfff);
317 bdl[2] = cpu_to_le32(chunk);
318 /* program the IOC to enable interrupt
319 * only when the whole fragment is processed
322 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
334 static int azx_setup_periods(struct azx *chip,
335 struct snd_pcm_substream *substream,
336 struct azx_dev *azx_dev)
339 int i, ofs, periods, period_bytes;
342 /* reset BDL address */
343 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
344 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
346 period_bytes = azx_dev->period_bytes;
347 periods = azx_dev->bufsize / period_bytes;
349 /* program the initial BDL entries */
350 bdl = (u32 *)azx_dev->bdl.area;
354 if (chip->bdl_pos_adj)
355 pos_adj = chip->bdl_pos_adj[chip->dev_index];
356 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
357 struct snd_pcm_runtime *runtime = substream->runtime;
358 int pos_align = pos_adj;
359 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
363 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
365 pos_adj = frames_to_bytes(runtime, pos_adj);
366 if (pos_adj >= period_bytes) {
367 dev_warn(chip->card->dev,"Too big adjustment %d\n",
371 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
373 &bdl, ofs, pos_adj, true);
380 for (i = 0; i < periods; i++) {
381 if (i == periods - 1 && pos_adj)
382 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
384 period_bytes - pos_adj, 0);
386 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
389 !azx_dev->no_period_wakeup);
396 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
397 azx_dev->bufsize, period_bytes);
405 static int azx_pcm_close(struct snd_pcm_substream *substream)
407 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
408 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
409 struct azx *chip = apcm->chip;
410 struct azx_dev *azx_dev = get_azx_dev(substream);
413 mutex_lock(&chip->open_mutex);
414 spin_lock_irqsave(&chip->reg_lock, flags);
415 azx_dev->substream = NULL;
416 azx_dev->running = 0;
417 spin_unlock_irqrestore(&chip->reg_lock, flags);
418 azx_release_device(azx_dev);
419 hinfo->ops.close(hinfo, apcm->codec, substream);
420 snd_hda_power_down(apcm->codec);
421 mutex_unlock(&chip->open_mutex);
425 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
426 struct snd_pcm_hw_params *hw_params)
428 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
429 struct azx *chip = apcm->chip;
432 dsp_lock(get_azx_dev(substream));
433 if (dsp_is_locked(get_azx_dev(substream))) {
438 ret = chip->ops->substream_alloc_pages(chip, substream,
439 params_buffer_bytes(hw_params));
441 dsp_unlock(get_azx_dev(substream));
445 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
447 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
448 struct azx_dev *azx_dev = get_azx_dev(substream);
449 struct azx *chip = apcm->chip;
450 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
453 /* reset BDL address */
455 if (!dsp_is_locked(azx_dev)) {
456 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
457 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
458 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
459 azx_dev->bufsize = 0;
460 azx_dev->period_bytes = 0;
461 azx_dev->format_val = 0;
464 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
466 err = chip->ops->substream_free_pages(chip, substream);
467 azx_dev->prepared = 0;
472 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
474 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
475 struct azx *chip = apcm->chip;
476 struct azx_dev *azx_dev = get_azx_dev(substream);
477 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
478 struct snd_pcm_runtime *runtime = substream->runtime;
479 unsigned int bufsize, period_bytes, format_val, stream_tag;
481 struct hda_spdif_out *spdif =
482 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
483 unsigned short ctls = spdif ? spdif->ctls : 0;
486 if (dsp_is_locked(azx_dev)) {
491 azx_stream_reset(chip, azx_dev);
492 format_val = snd_hda_calc_stream_format(apcm->codec,
499 dev_err(chip->card->dev,
500 "invalid format_val, rate=%d, ch=%d, format=%d\n",
501 runtime->rate, runtime->channels, runtime->format);
506 bufsize = snd_pcm_lib_buffer_bytes(substream);
507 period_bytes = snd_pcm_lib_period_bytes(substream);
509 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
510 bufsize, format_val);
512 if (bufsize != azx_dev->bufsize ||
513 period_bytes != azx_dev->period_bytes ||
514 format_val != azx_dev->format_val ||
515 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
516 azx_dev->bufsize = bufsize;
517 azx_dev->period_bytes = period_bytes;
518 azx_dev->format_val = format_val;
519 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
520 err = azx_setup_periods(chip, substream, azx_dev);
525 /* when LPIB delay correction gives a small negative value,
526 * we ignore it; currently set the threshold statically to
529 if (runtime->period_size > 64)
530 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
532 azx_dev->delay_negative_threshold = 0;
534 /* wallclk has 24Mhz clock source */
535 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
536 runtime->rate) * 1000);
537 azx_setup_controller(chip, azx_dev);
538 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
540 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
542 azx_dev->fifo_size = 0;
544 stream_tag = azx_dev->stream_tag;
545 /* CA-IBG chips need the playback stream starting from 1 */
546 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
547 stream_tag > chip->capture_streams)
548 stream_tag -= chip->capture_streams;
549 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
550 azx_dev->format_val, substream);
554 azx_dev->prepared = 1;
559 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
561 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
562 struct azx *chip = apcm->chip;
563 struct azx_dev *azx_dev;
564 struct snd_pcm_substream *s;
565 int rstart = 0, start, nsync = 0, sbits = 0;
568 azx_dev = get_azx_dev(substream);
569 trace_azx_pcm_trigger(chip, azx_dev, cmd);
571 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
575 case SNDRV_PCM_TRIGGER_START:
577 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
578 case SNDRV_PCM_TRIGGER_RESUME:
581 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
582 case SNDRV_PCM_TRIGGER_SUSPEND:
583 case SNDRV_PCM_TRIGGER_STOP:
590 snd_pcm_group_for_each_entry(s, substream) {
591 if (s->pcm->card != substream->pcm->card)
593 azx_dev = get_azx_dev(s);
594 sbits |= 1 << azx_dev->index;
596 snd_pcm_trigger_done(s, substream);
599 spin_lock(&chip->reg_lock);
601 /* first, set SYNC bits of corresponding streams */
602 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
603 azx_writel(chip, OLD_SSYNC,
604 azx_readl(chip, OLD_SSYNC) | sbits);
606 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
608 snd_pcm_group_for_each_entry(s, substream) {
609 if (s->pcm->card != substream->pcm->card)
611 azx_dev = get_azx_dev(s);
613 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
615 azx_dev->start_wallclk -=
616 azx_dev->period_wallclk;
617 azx_stream_start(chip, azx_dev);
619 azx_stream_stop(chip, azx_dev);
621 azx_dev->running = start;
623 spin_unlock(&chip->reg_lock);
625 /* wait until all FIFOs get ready */
626 for (timeout = 5000; timeout; timeout--) {
628 snd_pcm_group_for_each_entry(s, substream) {
629 if (s->pcm->card != substream->pcm->card)
631 azx_dev = get_azx_dev(s);
632 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
641 /* wait until all RUN bits are cleared */
642 for (timeout = 5000; timeout; timeout--) {
644 snd_pcm_group_for_each_entry(s, substream) {
645 if (s->pcm->card != substream->pcm->card)
647 azx_dev = get_azx_dev(s);
648 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
657 spin_lock(&chip->reg_lock);
658 /* reset SYNC bits */
659 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
660 azx_writel(chip, OLD_SSYNC,
661 azx_readl(chip, OLD_SSYNC) & ~sbits);
663 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
665 azx_timecounter_init(substream, 0, 0);
666 snd_pcm_gettime(substream->runtime, &substream->runtime->trigger_tstamp);
667 substream->runtime->trigger_tstamp_latched = true;
672 /* same start cycle for master and group */
673 azx_dev = get_azx_dev(substream);
674 cycle_last = azx_dev->azx_tc.cycle_last;
676 snd_pcm_group_for_each_entry(s, substream) {
677 if (s->pcm->card != substream->pcm->card)
679 azx_timecounter_init(s, 1, cycle_last);
683 spin_unlock(&chip->reg_lock);
687 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
689 return azx_sd_readl(chip, azx_dev, SD_LPIB);
691 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
693 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
695 return le32_to_cpu(*azx_dev->posbuf);
697 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
699 unsigned int azx_get_position(struct azx *chip,
700 struct azx_dev *azx_dev)
702 struct snd_pcm_substream *substream = azx_dev->substream;
704 int stream = substream->stream;
707 if (chip->get_position[stream])
708 pos = chip->get_position[stream](chip, azx_dev);
709 else /* use the position buffer as default */
710 pos = azx_get_pos_posbuf(chip, azx_dev);
712 if (pos >= azx_dev->bufsize)
715 if (substream->runtime) {
716 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
717 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
719 if (chip->get_delay[stream])
720 delay += chip->get_delay[stream](chip, azx_dev, pos);
721 if (hinfo->ops.get_delay)
722 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
724 substream->runtime->delay = delay;
727 trace_azx_get_position(chip, azx_dev, pos, delay);
730 EXPORT_SYMBOL_GPL(azx_get_position);
732 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
734 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
735 struct azx *chip = apcm->chip;
736 struct azx_dev *azx_dev = get_azx_dev(substream);
737 return bytes_to_frames(substream->runtime,
738 azx_get_position(chip, azx_dev));
741 static int azx_get_time_info(struct snd_pcm_substream *substream,
742 struct timespec *system_ts, struct timespec *audio_ts,
743 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
744 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
746 struct azx_dev *azx_dev = get_azx_dev(substream);
749 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
750 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
752 snd_pcm_gettime(substream->runtime, system_ts);
754 nsec = timecounter_read(&azx_dev->azx_tc);
755 nsec = div_u64(nsec, 3); /* can be optimized */
756 if (audio_tstamp_config->report_delay)
757 nsec = azx_adjust_codec_delay(substream, nsec);
759 *audio_ts = ns_to_timespec(nsec);
761 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
762 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
763 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
766 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
771 static struct snd_pcm_hardware azx_pcm_hw = {
772 .info = (SNDRV_PCM_INFO_MMAP |
773 SNDRV_PCM_INFO_INTERLEAVED |
774 SNDRV_PCM_INFO_BLOCK_TRANSFER |
775 SNDRV_PCM_INFO_MMAP_VALID |
776 /* No full-resume yet implemented */
777 /* SNDRV_PCM_INFO_RESUME |*/
778 SNDRV_PCM_INFO_PAUSE |
779 SNDRV_PCM_INFO_SYNC_START |
780 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
781 SNDRV_PCM_INFO_HAS_LINK_ATIME |
782 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
783 .formats = SNDRV_PCM_FMTBIT_S16_LE,
784 .rates = SNDRV_PCM_RATE_48000,
789 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
790 .period_bytes_min = 128,
791 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
793 .periods_max = AZX_MAX_FRAG,
797 static int azx_pcm_open(struct snd_pcm_substream *substream)
799 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
800 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
801 struct azx *chip = apcm->chip;
802 struct azx_dev *azx_dev;
803 struct snd_pcm_runtime *runtime = substream->runtime;
808 mutex_lock(&chip->open_mutex);
809 azx_dev = azx_assign_device(chip, substream);
810 if (azx_dev == NULL) {
811 mutex_unlock(&chip->open_mutex);
814 runtime->hw = azx_pcm_hw;
815 runtime->hw.channels_min = hinfo->channels_min;
816 runtime->hw.channels_max = hinfo->channels_max;
817 runtime->hw.formats = hinfo->formats;
818 runtime->hw.rates = hinfo->rates;
819 snd_pcm_limit_hw_rates(runtime);
820 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
822 /* avoid wrap-around with wall-clock */
823 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
827 if (chip->align_buffer_size)
828 /* constrain buffer sizes to be multiple of 128
829 bytes. This is more efficient in terms of memory
830 access but isn't required by the HDA spec and
831 prevents users from specifying exact period/buffer
832 sizes. For example for 44.1kHz, a period size set
833 to 20ms will be rounded to 19.59ms. */
836 /* Don't enforce steps on buffer sizes, still need to
837 be multiple of 4 bytes (HDA spec). Tested on Intel
838 HDA controllers, may not work on all devices where
839 option needs to be disabled */
842 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
844 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
846 snd_hda_power_up(apcm->codec);
847 err = hinfo->ops.open(hinfo, apcm->codec, substream);
849 azx_release_device(azx_dev);
850 snd_hda_power_down(apcm->codec);
851 mutex_unlock(&chip->open_mutex);
854 snd_pcm_limit_hw_rates(runtime);
856 if (snd_BUG_ON(!runtime->hw.channels_min) ||
857 snd_BUG_ON(!runtime->hw.channels_max) ||
858 snd_BUG_ON(!runtime->hw.formats) ||
859 snd_BUG_ON(!runtime->hw.rates)) {
860 azx_release_device(azx_dev);
861 hinfo->ops.close(hinfo, apcm->codec, substream);
862 snd_hda_power_down(apcm->codec);
863 mutex_unlock(&chip->open_mutex);
867 /* disable LINK_ATIME timestamps for capture streams
868 until we figure out how to handle digital inputs */
869 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
870 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
871 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
874 spin_lock_irqsave(&chip->reg_lock, flags);
875 azx_dev->substream = substream;
876 azx_dev->running = 0;
877 spin_unlock_irqrestore(&chip->reg_lock, flags);
879 runtime->private_data = azx_dev;
880 snd_pcm_set_sync(substream);
881 mutex_unlock(&chip->open_mutex);
885 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
886 struct vm_area_struct *area)
888 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
889 struct azx *chip = apcm->chip;
890 if (chip->ops->pcm_mmap_prepare)
891 chip->ops->pcm_mmap_prepare(substream, area);
892 return snd_pcm_lib_default_mmap(substream, area);
895 static struct snd_pcm_ops azx_pcm_ops = {
896 .open = azx_pcm_open,
897 .close = azx_pcm_close,
898 .ioctl = snd_pcm_lib_ioctl,
899 .hw_params = azx_pcm_hw_params,
900 .hw_free = azx_pcm_hw_free,
901 .prepare = azx_pcm_prepare,
902 .trigger = azx_pcm_trigger,
903 .pointer = azx_pcm_pointer,
904 .get_time_info = azx_get_time_info,
905 .mmap = azx_pcm_mmap,
906 .page = snd_pcm_sgbuf_ops_page,
909 static void azx_pcm_free(struct snd_pcm *pcm)
911 struct azx_pcm *apcm = pcm->private_data;
913 list_del(&apcm->list);
914 apcm->info->pcm = NULL;
919 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
921 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
922 struct hda_pcm *cpcm)
924 struct azx *chip = bus->private_data;
926 struct azx_pcm *apcm;
927 int pcm_dev = cpcm->device;
931 list_for_each_entry(apcm, &chip->pcm_list, list) {
932 if (apcm->pcm->device == pcm_dev) {
933 dev_err(chip->card->dev, "PCM %d already exists\n",
938 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
939 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
940 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
944 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
945 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
952 pcm->private_data = apcm;
953 pcm->private_free = azx_pcm_free;
954 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
955 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
956 list_add_tail(&apcm->list, &chip->pcm_list);
958 for (s = 0; s < 2; s++) {
959 if (cpcm->stream[s].substreams)
960 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
962 /* buffer pre-allocation */
963 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
964 if (size > MAX_PREALLOC_SIZE)
965 size = MAX_PREALLOC_SIZE;
966 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
968 size, MAX_PREALLOC_SIZE);
973 * CORB / RIRB interface
975 static int azx_alloc_cmd_io(struct azx *chip)
977 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
978 return chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
979 PAGE_SIZE, &chip->rb);
981 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
983 static void azx_init_cmd_io(struct azx *chip)
987 spin_lock_irq(&chip->reg_lock);
989 chip->corb.addr = chip->rb.addr;
990 chip->corb.buf = (u32 *)chip->rb.area;
991 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
992 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
994 /* set the corb size to 256 entries (ULI requires explicitly) */
995 azx_writeb(chip, CORBSIZE, 0x02);
996 /* set the corb write pointer to 0 */
997 azx_writew(chip, CORBWP, 0);
999 /* reset the corb hw read pointer */
1000 azx_writew(chip, CORBRP, AZX_CORBRP_RST);
1001 if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
1002 for (timeout = 1000; timeout > 0; timeout--) {
1003 if ((azx_readw(chip, CORBRP) & AZX_CORBRP_RST) == AZX_CORBRP_RST)
1008 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1009 azx_readw(chip, CORBRP));
1011 azx_writew(chip, CORBRP, 0);
1012 for (timeout = 1000; timeout > 0; timeout--) {
1013 if (azx_readw(chip, CORBRP) == 0)
1018 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1019 azx_readw(chip, CORBRP));
1022 /* enable corb dma */
1023 azx_writeb(chip, CORBCTL, AZX_CORBCTL_RUN);
1026 chip->rirb.addr = chip->rb.addr + 2048;
1027 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1028 chip->rirb.wp = chip->rirb.rp = 0;
1029 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1030 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1031 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1033 /* set the rirb size to 256 entries (ULI requires explicitly) */
1034 azx_writeb(chip, RIRBSIZE, 0x02);
1035 /* reset the rirb hw write pointer */
1036 azx_writew(chip, RIRBWP, AZX_RIRBWP_RST);
1037 /* set N=1, get RIRB response interrupt for new entry */
1038 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1039 azx_writew(chip, RINTCNT, 0xc0);
1041 azx_writew(chip, RINTCNT, 1);
1042 /* enable rirb dma and response irq */
1043 azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
1044 spin_unlock_irq(&chip->reg_lock);
1046 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1048 static void azx_free_cmd_io(struct azx *chip)
1050 spin_lock_irq(&chip->reg_lock);
1051 /* disable ringbuffer DMAs */
1052 azx_writeb(chip, RIRBCTL, 0);
1053 azx_writeb(chip, CORBCTL, 0);
1054 spin_unlock_irq(&chip->reg_lock);
1056 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1058 static unsigned int azx_command_addr(u32 cmd)
1060 unsigned int addr = cmd >> 28;
1062 if (addr >= AZX_MAX_CODECS) {
1070 /* send a command */
1071 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1073 struct azx *chip = bus->private_data;
1074 unsigned int addr = azx_command_addr(val);
1075 unsigned int wp, rp;
1077 spin_lock_irq(&chip->reg_lock);
1079 /* add command to corb */
1080 wp = azx_readw(chip, CORBWP);
1082 /* something wrong, controller likely turned to D3 */
1083 spin_unlock_irq(&chip->reg_lock);
1087 wp %= AZX_MAX_CORB_ENTRIES;
1089 rp = azx_readw(chip, CORBRP);
1091 /* oops, it's full */
1092 spin_unlock_irq(&chip->reg_lock);
1096 chip->rirb.cmds[addr]++;
1097 chip->corb.buf[wp] = cpu_to_le32(val);
1098 azx_writew(chip, CORBWP, wp);
1100 spin_unlock_irq(&chip->reg_lock);
1105 #define AZX_RIRB_EX_UNSOL_EV (1<<4)
1107 /* retrieve RIRB entry - called from interrupt handler */
1108 static void azx_update_rirb(struct azx *chip)
1110 unsigned int rp, wp;
1114 wp = azx_readw(chip, RIRBWP);
1116 /* something wrong, controller likely turned to D3 */
1120 if (wp == chip->rirb.wp)
1124 while (chip->rirb.rp != wp) {
1126 chip->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
1128 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1129 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1130 res = le32_to_cpu(chip->rirb.buf[rp]);
1131 addr = res_ex & 0xf;
1132 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1133 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1137 } else if (res_ex & AZX_RIRB_EX_UNSOL_EV)
1138 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1139 else if (chip->rirb.cmds[addr]) {
1140 chip->rirb.res[addr] = res;
1142 chip->rirb.cmds[addr]--;
1143 } else if (printk_ratelimit()) {
1144 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1146 chip->last_cmd[addr]);
1151 /* receive a response */
1152 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1155 struct azx *chip = bus->private_data;
1156 unsigned long timeout;
1157 unsigned long loopcounter;
1161 timeout = jiffies + msecs_to_jiffies(1000);
1163 for (loopcounter = 0;; loopcounter++) {
1164 if (chip->polling_mode || do_poll) {
1165 spin_lock_irq(&chip->reg_lock);
1166 azx_update_rirb(chip);
1167 spin_unlock_irq(&chip->reg_lock);
1169 if (!chip->rirb.cmds[addr]) {
1171 bus->rirb_error = 0;
1174 chip->poll_count = 0;
1175 return chip->rirb.res[addr]; /* the last value */
1177 if (time_after(jiffies, timeout))
1179 if (bus->needs_damn_long_delay || loopcounter > 3000)
1180 msleep(2); /* temporary workaround */
1187 if (!bus->no_response_fallback)
1190 if (!chip->polling_mode && chip->poll_count < 2) {
1191 dev_dbg(chip->card->dev,
1192 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1193 chip->last_cmd[addr]);
1200 if (!chip->polling_mode) {
1201 dev_warn(chip->card->dev,
1202 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1203 chip->last_cmd[addr]);
1204 chip->polling_mode = 1;
1209 dev_warn(chip->card->dev,
1210 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1211 chip->last_cmd[addr]);
1212 if (chip->ops->disable_msi_reset_irq(chip) &&
1213 chip->ops->disable_msi_reset_irq(chip) < 0) {
1214 bus->rirb_error = 1;
1220 if (chip->probing) {
1221 /* If this critical timeout happens during the codec probing
1222 * phase, this is likely an access to a non-existing codec
1223 * slot. Better to return an error and reset the system.
1228 /* a fatal communication error; need either to reset or to fallback
1229 * to the single_cmd mode
1231 bus->rirb_error = 1;
1232 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1233 bus->response_reset = 1;
1234 return -1; /* give a chance to retry */
1237 dev_err(chip->card->dev,
1238 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1239 chip->last_cmd[addr]);
1240 chip->single_cmd = 1;
1241 bus->response_reset = 0;
1242 /* release CORB/RIRB */
1243 azx_free_cmd_io(chip);
1244 /* disable unsolicited responses */
1245 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_UNSOL);
1250 * Use the single immediate command instead of CORB/RIRB for simplicity
1252 * Note: according to Intel, this is not preferred use. The command was
1253 * intended for the BIOS only, and may get confused with unsolicited
1254 * responses. So, we shouldn't use it for normal operation from the
1256 * I left the codes, however, for debugging/testing purposes.
1259 /* receive a response */
1260 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1265 /* check IRV busy bit */
1266 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
1267 /* reuse rirb.res as the response return value */
1268 chip->rirb.res[addr] = azx_readl(chip, IR);
1273 if (printk_ratelimit())
1274 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1275 azx_readw(chip, IRS));
1276 chip->rirb.res[addr] = -1;
1280 /* send a command */
1281 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1283 struct azx *chip = bus->private_data;
1284 unsigned int addr = azx_command_addr(val);
1287 bus->rirb_error = 0;
1289 /* check ICB busy bit */
1290 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
1291 /* Clear IRV valid bit */
1292 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1294 azx_writel(chip, IC, val);
1295 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1297 return azx_single_wait_for_response(chip, addr);
1301 if (printk_ratelimit())
1302 dev_dbg(chip->card->dev,
1303 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1304 azx_readw(chip, IRS), val);
1308 /* receive a response */
1309 static unsigned int azx_single_get_response(struct hda_bus *bus,
1312 struct azx *chip = bus->private_data;
1313 return chip->rirb.res[addr];
1317 * The below are the main callbacks from hda_codec.
1319 * They are just the skeleton to call sub-callbacks according to the
1320 * current setting of chip->single_cmd.
1323 /* send a command */
1324 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1326 struct azx *chip = bus->private_data;
1330 chip->last_cmd[azx_command_addr(val)] = val;
1331 if (chip->single_cmd)
1332 return azx_single_send_cmd(bus, val);
1334 return azx_corb_send_cmd(bus, val);
1336 EXPORT_SYMBOL_GPL(azx_send_cmd);
1338 /* get a response */
1339 static unsigned int azx_get_response(struct hda_bus *bus,
1342 struct azx *chip = bus->private_data;
1345 if (chip->single_cmd)
1346 return azx_single_get_response(bus, addr);
1348 return azx_rirb_get_response(bus, addr);
1350 EXPORT_SYMBOL_GPL(azx_get_response);
1352 #ifdef CONFIG_SND_HDA_DSP_LOADER
1354 * DSP loading code (e.g. for CA0132)
1357 /* use the first stream for loading DSP */
1358 static struct azx_dev *
1359 azx_get_dsp_loader_dev(struct azx *chip)
1361 return &chip->azx_dev[chip->playback_index_offset];
1364 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1365 unsigned int byte_size,
1366 struct snd_dma_buffer *bufp)
1369 struct azx *chip = bus->private_data;
1370 struct azx_dev *azx_dev;
1373 azx_dev = azx_get_dsp_loader_dev(chip);
1376 spin_lock_irq(&chip->reg_lock);
1377 if (azx_dev->running || azx_dev->locked) {
1378 spin_unlock_irq(&chip->reg_lock);
1382 azx_dev->prepared = 0;
1383 chip->saved_azx_dev = *azx_dev;
1384 azx_dev->locked = 1;
1385 spin_unlock_irq(&chip->reg_lock);
1387 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1392 azx_dev->bufsize = byte_size;
1393 azx_dev->period_bytes = byte_size;
1394 azx_dev->format_val = format;
1396 azx_stream_reset(chip, azx_dev);
1398 /* reset BDL address */
1399 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1400 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1403 bdl = (u32 *)azx_dev->bdl.area;
1404 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1408 azx_setup_controller(chip, azx_dev);
1409 dsp_unlock(azx_dev);
1410 return azx_dev->stream_tag;
1413 chip->ops->dma_free_pages(chip, bufp);
1415 spin_lock_irq(&chip->reg_lock);
1416 if (azx_dev->opened)
1417 *azx_dev = chip->saved_azx_dev;
1418 azx_dev->locked = 0;
1419 spin_unlock_irq(&chip->reg_lock);
1421 dsp_unlock(azx_dev);
1425 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1427 struct azx *chip = bus->private_data;
1428 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1431 azx_stream_start(chip, azx_dev);
1433 azx_stream_stop(chip, azx_dev);
1434 azx_dev->running = start;
1437 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1438 struct snd_dma_buffer *dmab)
1440 struct azx *chip = bus->private_data;
1441 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1443 if (!dmab->area || !azx_dev->locked)
1447 /* reset BDL address */
1448 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1449 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1450 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1451 azx_dev->bufsize = 0;
1452 azx_dev->period_bytes = 0;
1453 azx_dev->format_val = 0;
1455 chip->ops->dma_free_pages(chip, dmab);
1458 spin_lock_irq(&chip->reg_lock);
1459 if (azx_dev->opened)
1460 *azx_dev = chip->saved_azx_dev;
1461 azx_dev->locked = 0;
1462 spin_unlock_irq(&chip->reg_lock);
1463 dsp_unlock(azx_dev);
1465 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1467 int azx_alloc_stream_pages(struct azx *chip)
1471 for (i = 0; i < chip->num_streams; i++) {
1472 dsp_lock_init(&chip->azx_dev[i]);
1473 /* allocate memory for the BDL for each stream */
1474 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1476 &chip->azx_dev[i].bdl);
1480 /* allocate memory for the position buffer */
1481 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1482 chip->num_streams * 8, &chip->posbuf);
1486 /* allocate CORB/RIRB */
1487 err = azx_alloc_cmd_io(chip);
1492 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1494 void azx_free_stream_pages(struct azx *chip)
1497 if (chip->azx_dev) {
1498 for (i = 0; i < chip->num_streams; i++)
1499 if (chip->azx_dev[i].bdl.area)
1500 chip->ops->dma_free_pages(
1501 chip, &chip->azx_dev[i].bdl);
1504 chip->ops->dma_free_pages(chip, &chip->rb);
1505 if (chip->posbuf.area)
1506 chip->ops->dma_free_pages(chip, &chip->posbuf);
1508 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1511 * Lowlevel interface
1514 /* enter link reset */
1515 void azx_enter_link_reset(struct azx *chip)
1517 unsigned long timeout;
1519 /* reset controller */
1520 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_RESET);
1522 timeout = jiffies + msecs_to_jiffies(100);
1523 while ((azx_readb(chip, GCTL) & AZX_GCTL_RESET) &&
1524 time_before(jiffies, timeout))
1525 usleep_range(500, 1000);
1527 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1529 /* exit link reset */
1530 static void azx_exit_link_reset(struct azx *chip)
1532 unsigned long timeout;
1534 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | AZX_GCTL_RESET);
1536 timeout = jiffies + msecs_to_jiffies(100);
1537 while (!azx_readb(chip, GCTL) &&
1538 time_before(jiffies, timeout))
1539 usleep_range(500, 1000);
1542 /* reset codec link */
1543 static int azx_reset(struct azx *chip, bool full_reset)
1548 /* clear STATESTS */
1549 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1551 /* reset controller */
1552 azx_enter_link_reset(chip);
1554 /* delay for >= 100us for codec PLL to settle per spec
1555 * Rev 0.9 section 5.5.1
1557 usleep_range(500, 1000);
1559 /* Bring controller out of reset */
1560 azx_exit_link_reset(chip);
1562 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1563 usleep_range(1000, 1200);
1566 /* check to see if controller is ready */
1567 if (!azx_readb(chip, GCTL)) {
1568 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1572 /* Accept unsolicited responses */
1573 if (!chip->single_cmd)
1574 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1578 if (!chip->codec_mask) {
1579 chip->codec_mask = azx_readw(chip, STATESTS);
1580 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1587 /* enable interrupts */
1588 static void azx_int_enable(struct azx *chip)
1590 /* enable controller CIE and GIE */
1591 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1592 AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
1595 /* disable interrupts */
1596 static void azx_int_disable(struct azx *chip)
1600 /* disable interrupts in stream descriptor */
1601 for (i = 0; i < chip->num_streams; i++) {
1602 struct azx_dev *azx_dev = &chip->azx_dev[i];
1603 azx_sd_writeb(chip, azx_dev, SD_CTL,
1604 azx_sd_readb(chip, azx_dev, SD_CTL) &
1608 /* disable SIE for all streams */
1609 azx_writeb(chip, INTCTL, 0);
1611 /* disable controller CIE and GIE */
1612 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1613 ~(AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN));
1616 /* clear interrupts */
1617 static void azx_int_clear(struct azx *chip)
1621 /* clear stream status */
1622 for (i = 0; i < chip->num_streams; i++) {
1623 struct azx_dev *azx_dev = &chip->azx_dev[i];
1624 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1627 /* clear STATESTS */
1628 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1630 /* clear rirb status */
1631 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1633 /* clear int status */
1634 azx_writel(chip, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM);
1638 * reset and start the controller registers
1640 void azx_init_chip(struct azx *chip, bool full_reset)
1642 if (chip->initialized)
1645 /* reset controller */
1646 azx_reset(chip, full_reset);
1648 /* initialize interrupts */
1649 azx_int_clear(chip);
1650 azx_int_enable(chip);
1652 /* initialize the codec command I/O */
1653 if (!chip->single_cmd)
1654 azx_init_cmd_io(chip);
1656 /* program the position buffer */
1657 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1658 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1660 chip->initialized = 1;
1662 EXPORT_SYMBOL_GPL(azx_init_chip);
1664 void azx_stop_chip(struct azx *chip)
1666 if (!chip->initialized)
1669 /* disable interrupts */
1670 azx_int_disable(chip);
1671 azx_int_clear(chip);
1673 /* disable CORB/RIRB */
1674 azx_free_cmd_io(chip);
1676 /* disable position buffer */
1677 azx_writel(chip, DPLBASE, 0);
1678 azx_writel(chip, DPUBASE, 0);
1680 chip->initialized = 0;
1682 EXPORT_SYMBOL_GPL(azx_stop_chip);
1687 irqreturn_t azx_interrupt(int irq, void *dev_id)
1689 struct azx *chip = dev_id;
1690 struct azx_dev *azx_dev;
1696 if (azx_has_pm_runtime(chip))
1697 if (!pm_runtime_active(chip->card->dev))
1701 spin_lock(&chip->reg_lock);
1703 if (chip->disabled) {
1704 spin_unlock(&chip->reg_lock);
1708 status = azx_readl(chip, INTSTS);
1709 if (status == 0 || status == 0xffffffff) {
1710 spin_unlock(&chip->reg_lock);
1714 for (i = 0; i < chip->num_streams; i++) {
1715 azx_dev = &chip->azx_dev[i];
1716 if (status & azx_dev->sd_int_sta_mask) {
1717 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1718 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1719 if (!azx_dev->substream || !azx_dev->running ||
1720 !(sd_status & SD_INT_COMPLETE))
1722 /* check whether this IRQ is really acceptable */
1723 if (!chip->ops->position_check ||
1724 chip->ops->position_check(chip, azx_dev)) {
1725 spin_unlock(&chip->reg_lock);
1726 snd_pcm_period_elapsed(azx_dev->substream);
1727 spin_lock(&chip->reg_lock);
1732 /* clear rirb int */
1733 status = azx_readb(chip, RIRBSTS);
1734 if (status & RIRB_INT_MASK) {
1735 if (status & RIRB_INT_RESPONSE) {
1736 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1738 azx_update_rirb(chip);
1740 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1743 spin_unlock(&chip->reg_lock);
1747 EXPORT_SYMBOL_GPL(azx_interrupt);
1754 * Probe the given codec address
1756 static int probe_codec(struct azx *chip, int addr)
1758 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1759 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1762 mutex_lock(&chip->bus->cmd_mutex);
1764 azx_send_cmd(chip->bus, cmd);
1765 res = azx_get_response(chip->bus, addr);
1767 mutex_unlock(&chip->bus->cmd_mutex);
1770 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1774 static void azx_bus_reset(struct hda_bus *bus)
1776 struct azx *chip = bus->private_data;
1779 azx_stop_chip(chip);
1780 azx_init_chip(chip, true);
1781 if (chip->initialized)
1782 snd_hda_bus_reset(chip->bus);
1786 static int get_jackpoll_interval(struct azx *chip)
1791 if (!chip->jackpoll_ms)
1794 i = chip->jackpoll_ms[chip->dev_index];
1797 if (i < 50 || i > 60000)
1800 j = msecs_to_jiffies(i);
1802 dev_warn(chip->card->dev,
1803 "jackpoll_ms value out of range: %d\n", i);
1807 static struct hda_bus_ops bus_ops = {
1808 .command = azx_send_cmd,
1809 .get_response = azx_get_response,
1810 .attach_pcm = azx_attach_pcm_stream,
1811 .bus_reset = azx_bus_reset,
1812 #ifdef CONFIG_SND_HDA_DSP_LOADER
1813 .load_dsp_prepare = azx_load_dsp_prepare,
1814 .load_dsp_trigger = azx_load_dsp_trigger,
1815 .load_dsp_cleanup = azx_load_dsp_cleanup,
1819 /* HD-audio bus initialization */
1820 int azx_bus_create(struct azx *chip, const char *model)
1822 struct hda_bus *bus;
1825 err = snd_hda_bus_new(chip->card, &bus);
1830 bus->private_data = chip;
1831 bus->pci = chip->pci;
1832 bus->modelname = model;
1835 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1836 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1837 bus->needs_damn_long_delay = 1;
1840 /* AMD chipsets often cause the communication stalls upon certain
1841 * sequence like the pin-detection. It seems that forcing the synced
1842 * access works around the stall. Grrr...
1844 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1845 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1846 bus->sync_write = 1;
1847 bus->allow_bus_reset = 1;
1852 EXPORT_SYMBOL_GPL(azx_bus_create);
1855 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1857 struct hda_bus *bus = chip->bus;
1862 max_slots = AZX_DEFAULT_CODECS;
1864 /* First try to probe all given codec slots */
1865 for (c = 0; c < max_slots; c++) {
1866 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1867 if (probe_codec(chip, c) < 0) {
1868 /* Some BIOSen give you wrong codec addresses
1871 dev_warn(chip->card->dev,
1872 "Codec #%d probe error; disabling it...\n", c);
1873 chip->codec_mask &= ~(1 << c);
1874 /* More badly, accessing to a non-existing
1875 * codec often screws up the controller chip,
1876 * and disturbs the further communications.
1877 * Thus if an error occurs during probing,
1878 * better to reset the controller chip to
1879 * get back to the sanity state.
1881 azx_stop_chip(chip);
1882 azx_init_chip(chip, true);
1887 /* Then create codec instances */
1888 for (c = 0; c < max_slots; c++) {
1889 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1890 struct hda_codec *codec;
1891 err = snd_hda_codec_new(bus, bus->card, c, &codec);
1894 codec->jackpoll_interval = get_jackpoll_interval(chip);
1895 codec->beep_mode = chip->beep_mode;
1900 dev_err(chip->card->dev, "no codecs initialized\n");
1905 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1907 /* configure each codec instance */
1908 int azx_codec_configure(struct azx *chip)
1910 struct hda_codec *codec;
1911 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1912 snd_hda_codec_configure(codec);
1916 EXPORT_SYMBOL_GPL(azx_codec_configure);
1919 static bool is_input_stream(struct azx *chip, unsigned char index)
1921 return (index >= chip->capture_index_offset &&
1922 index < chip->capture_index_offset + chip->capture_streams);
1925 /* initialize SD streams */
1926 int azx_init_stream(struct azx *chip)
1929 int in_stream_tag = 0;
1930 int out_stream_tag = 0;
1932 /* initialize each stream (aka device)
1933 * assign the starting bdl address to each stream (device)
1936 for (i = 0; i < chip->num_streams; i++) {
1937 struct azx_dev *azx_dev = &chip->azx_dev[i];
1938 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
1939 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1940 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1941 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1942 azx_dev->sd_int_sta_mask = 1 << i;
1945 /* stream tag must be unique throughout
1946 * the stream direction group,
1947 * valid values 1...15
1948 * use separate stream tag if the flag
1949 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1951 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1952 azx_dev->stream_tag =
1953 is_input_stream(chip, i) ?
1957 azx_dev->stream_tag = i + 1;
1962 EXPORT_SYMBOL_GPL(azx_init_stream);
1965 * reboot notifier for hang-up problem at power-down
1967 static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
1969 struct azx *chip = container_of(nb, struct azx, reboot_notifier);
1970 snd_hda_bus_reboot_notify(chip->bus);
1971 azx_stop_chip(chip);
1975 void azx_notifier_register(struct azx *chip)
1977 chip->reboot_notifier.notifier_call = azx_halt;
1978 register_reboot_notifier(&chip->reboot_notifier);
1980 EXPORT_SYMBOL_GPL(azx_notifier_register);
1982 void azx_notifier_unregister(struct azx *chip)
1984 if (chip->reboot_notifier.notifier_call)
1985 unregister_reboot_notifier(&chip->reboot_notifier);
1987 EXPORT_SYMBOL_GPL(azx_notifier_unregister);
1989 MODULE_LICENSE("GPL");
1990 MODULE_DESCRIPTION("Common HDA driver functions");