3 * Implementation of primary alsa driver code base for Intel HD Audio.
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/reboot.h>
31 #include <sound/core.h>
32 #include <sound/initval.h>
33 #include "hda_controller.h"
35 #define CREATE_TRACE_POINTS
36 #include "hda_intel_trace.h"
38 /* DSP lock helpers */
39 #ifdef CONFIG_SND_HDA_DSP_LOADER
40 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
41 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
42 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
43 #define dsp_is_locked(dev) ((dev)->locked)
45 #define dsp_lock_init(dev) do {} while (0)
46 #define dsp_lock(dev) do {} while (0)
47 #define dsp_unlock(dev) do {} while (0)
48 #define dsp_is_locked(dev) 0
52 * AZX stream operations.
56 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
59 * Before stream start, initialize parameter
61 azx_dev->insufficient = 1;
64 azx_writel(chip, INTCTL,
65 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
66 /* set DMA start and interrupt mask */
67 azx_sd_writeb(chip, azx_dev, SD_CTL,
68 azx_sd_readb(chip, azx_dev, SD_CTL) |
69 SD_CTL_DMA_START | SD_INT_MASK);
73 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
75 azx_sd_writeb(chip, azx_dev, SD_CTL,
76 azx_sd_readb(chip, azx_dev, SD_CTL) &
77 ~(SD_CTL_DMA_START | SD_INT_MASK));
78 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
82 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
84 azx_stream_clear(chip, azx_dev);
86 azx_writel(chip, INTCTL,
87 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
89 EXPORT_SYMBOL_GPL(azx_stream_stop);
92 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
97 azx_stream_clear(chip, azx_dev);
99 azx_sd_writeb(chip, azx_dev, SD_CTL,
100 azx_sd_readb(chip, azx_dev, SD_CTL) |
101 SD_CTL_STREAM_RESET);
104 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
105 SD_CTL_STREAM_RESET) && --timeout)
107 val &= ~SD_CTL_STREAM_RESET;
108 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
112 /* waiting for hardware to report that the stream is out of reset */
113 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
114 SD_CTL_STREAM_RESET) && --timeout)
117 /* reset first position - may not be synced with hw at this time */
118 *azx_dev->posbuf = 0;
122 * set up the SD for streaming
124 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
127 /* make sure the run bit is zero for SD */
128 azx_stream_clear(chip, azx_dev);
129 /* program the stream_tag */
130 val = azx_sd_readl(chip, azx_dev, SD_CTL);
131 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
132 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
133 if (!azx_snoop(chip))
134 val |= SD_CTL_TRAFFIC_PRIO;
135 azx_sd_writel(chip, azx_dev, SD_CTL, val);
137 /* program the length of samples in cyclic buffer */
138 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
140 /* program the stream format */
141 /* this value needs to be the same as the one programmed */
142 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
144 /* program the stream LVI (last valid index) of the BDL */
145 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
147 /* program the BDL address */
148 /* lower BDL address */
149 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
150 /* upper BDL address */
151 azx_sd_writel(chip, azx_dev, SD_BDLPU,
152 upper_32_bits(azx_dev->bdl.addr));
154 /* enable the position buffer */
155 if (chip->get_position[0] != azx_get_pos_lpib ||
156 chip->get_position[1] != azx_get_pos_lpib) {
157 if (!(azx_readl(chip, DPLBASE) & AZX_DPLBASE_ENABLE))
158 azx_writel(chip, DPLBASE,
159 (u32)chip->posbuf.addr | AZX_DPLBASE_ENABLE);
162 /* set the interrupt enable bits in the descriptor control register */
163 azx_sd_writel(chip, azx_dev, SD_CTL,
164 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
169 /* assign a stream for the PCM */
170 static inline struct azx_dev *
171 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
174 struct azx_dev *res = NULL;
175 /* make a non-zero unique key for the substream */
176 int key = (substream->pcm->device << 16) | (substream->number << 2) |
177 (substream->stream + 1);
179 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
180 dev = chip->playback_index_offset;
181 nums = chip->playback_streams;
183 dev = chip->capture_index_offset;
184 nums = chip->capture_streams;
186 for (i = 0; i < nums; i++, dev++) {
187 struct azx_dev *azx_dev = &chip->azx_dev[dev];
189 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
190 if (azx_dev->assigned_key == key) {
192 azx_dev->assigned_key = key;
197 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
205 res->assigned_key = key;
211 /* release the assigned stream */
212 static inline void azx_release_device(struct azx_dev *azx_dev)
217 static cycle_t azx_cc_read(const struct cyclecounter *cc)
219 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
220 struct snd_pcm_substream *substream = azx_dev->substream;
221 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
222 struct azx *chip = apcm->chip;
224 return azx_readl(chip, WALLCLK);
227 static void azx_timecounter_init(struct snd_pcm_substream *substream,
228 bool force, cycle_t last)
230 struct azx_dev *azx_dev = get_azx_dev(substream);
231 struct timecounter *tc = &azx_dev->azx_tc;
232 struct cyclecounter *cc = &azx_dev->azx_cc;
235 cc->read = azx_cc_read;
236 cc->mask = CLOCKSOURCE_MASK(32);
239 * Converting from 24 MHz to ns means applying a 125/3 factor.
240 * To avoid any saturation issues in intermediate operations,
241 * the 125 factor is applied first. The division is applied
242 * last after reading the timecounter value.
243 * Applying the 1/3 factor as part of the multiplication
244 * requires at least 20 bits for a decent precision, however
245 * overflows occur after about 4 hours or less, not a option.
248 cc->mult = 125; /* saturation after 195 years */
251 nsec = 0; /* audio time is elapsed time since trigger */
252 timecounter_init(tc, cc, nsec);
255 * force timecounter to use predefined value,
256 * used for synchronized starts
258 tc->cycle_last = last;
261 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
264 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
265 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
266 u64 codec_frames, codec_nsecs;
268 if (!hinfo->ops.get_delay)
271 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
272 codec_nsecs = div_u64(codec_frames * 1000000000LL,
273 substream->runtime->rate);
275 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
276 return nsec + codec_nsecs;
278 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
284 static int setup_bdle(struct azx *chip,
285 struct snd_dma_buffer *dmab,
286 struct azx_dev *azx_dev, u32 **bdlp,
287 int ofs, int size, int with_ioc)
295 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
298 addr = snd_sgbuf_get_addr(dmab, ofs);
299 /* program the address field of the BDL entry */
300 bdl[0] = cpu_to_le32((u32)addr);
301 bdl[1] = cpu_to_le32(upper_32_bits(addr));
302 /* program the size field of the BDL entry */
303 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
304 /* one BDLE cannot cross 4K boundary on CTHDA chips */
305 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
306 u32 remain = 0x1000 - (ofs & 0xfff);
310 bdl[2] = cpu_to_le32(chunk);
311 /* program the IOC to enable interrupt
312 * only when the whole fragment is processed
315 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
327 static int azx_setup_periods(struct azx *chip,
328 struct snd_pcm_substream *substream,
329 struct azx_dev *azx_dev)
332 int i, ofs, periods, period_bytes;
335 /* reset BDL address */
336 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
337 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
339 period_bytes = azx_dev->period_bytes;
340 periods = azx_dev->bufsize / period_bytes;
342 /* program the initial BDL entries */
343 bdl = (u32 *)azx_dev->bdl.area;
347 if (chip->bdl_pos_adj)
348 pos_adj = chip->bdl_pos_adj[chip->dev_index];
349 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
350 struct snd_pcm_runtime *runtime = substream->runtime;
351 int pos_align = pos_adj;
352 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
356 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
358 pos_adj = frames_to_bytes(runtime, pos_adj);
359 if (pos_adj >= period_bytes) {
360 dev_warn(chip->card->dev,"Too big adjustment %d\n",
364 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
366 &bdl, ofs, pos_adj, true);
373 for (i = 0; i < periods; i++) {
374 if (i == periods - 1 && pos_adj)
375 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
377 period_bytes - pos_adj, 0);
379 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
382 !azx_dev->no_period_wakeup);
389 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
390 azx_dev->bufsize, period_bytes);
398 static int azx_pcm_close(struct snd_pcm_substream *substream)
400 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
401 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
402 struct azx *chip = apcm->chip;
403 struct azx_dev *azx_dev = get_azx_dev(substream);
406 mutex_lock(&chip->open_mutex);
407 spin_lock_irqsave(&chip->reg_lock, flags);
408 azx_dev->substream = NULL;
409 azx_dev->running = 0;
410 spin_unlock_irqrestore(&chip->reg_lock, flags);
411 azx_release_device(azx_dev);
412 hinfo->ops.close(hinfo, apcm->codec, substream);
413 snd_hda_power_down(apcm->codec);
414 mutex_unlock(&chip->open_mutex);
418 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
419 struct snd_pcm_hw_params *hw_params)
421 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
422 struct azx *chip = apcm->chip;
425 dsp_lock(get_azx_dev(substream));
426 if (dsp_is_locked(get_azx_dev(substream))) {
431 ret = chip->ops->substream_alloc_pages(chip, substream,
432 params_buffer_bytes(hw_params));
434 dsp_unlock(get_azx_dev(substream));
438 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
440 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
441 struct azx_dev *azx_dev = get_azx_dev(substream);
442 struct azx *chip = apcm->chip;
443 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
446 /* reset BDL address */
448 if (!dsp_is_locked(azx_dev)) {
449 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
450 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
451 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
452 azx_dev->bufsize = 0;
453 azx_dev->period_bytes = 0;
454 azx_dev->format_val = 0;
457 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
459 err = chip->ops->substream_free_pages(chip, substream);
460 azx_dev->prepared = 0;
465 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
467 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
468 struct azx *chip = apcm->chip;
469 struct azx_dev *azx_dev = get_azx_dev(substream);
470 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
471 struct snd_pcm_runtime *runtime = substream->runtime;
472 unsigned int bufsize, period_bytes, format_val, stream_tag;
474 struct hda_spdif_out *spdif =
475 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
476 unsigned short ctls = spdif ? spdif->ctls : 0;
479 if (dsp_is_locked(azx_dev)) {
484 azx_stream_reset(chip, azx_dev);
485 format_val = snd_hda_calc_stream_format(apcm->codec,
492 dev_err(chip->card->dev,
493 "invalid format_val, rate=%d, ch=%d, format=%d\n",
494 runtime->rate, runtime->channels, runtime->format);
499 bufsize = snd_pcm_lib_buffer_bytes(substream);
500 period_bytes = snd_pcm_lib_period_bytes(substream);
502 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
503 bufsize, format_val);
505 if (bufsize != azx_dev->bufsize ||
506 period_bytes != azx_dev->period_bytes ||
507 format_val != azx_dev->format_val ||
508 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
509 azx_dev->bufsize = bufsize;
510 azx_dev->period_bytes = period_bytes;
511 azx_dev->format_val = format_val;
512 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
513 err = azx_setup_periods(chip, substream, azx_dev);
518 /* when LPIB delay correction gives a small negative value,
519 * we ignore it; currently set the threshold statically to
522 if (runtime->period_size > 64)
523 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
525 azx_dev->delay_negative_threshold = 0;
527 /* wallclk has 24Mhz clock source */
528 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
529 runtime->rate) * 1000);
530 azx_setup_controller(chip, azx_dev);
531 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
533 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
535 azx_dev->fifo_size = 0;
537 stream_tag = azx_dev->stream_tag;
538 /* CA-IBG chips need the playback stream starting from 1 */
539 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
540 stream_tag > chip->capture_streams)
541 stream_tag -= chip->capture_streams;
542 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
543 azx_dev->format_val, substream);
547 azx_dev->prepared = 1;
552 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
554 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
555 struct azx *chip = apcm->chip;
556 struct azx_dev *azx_dev;
557 struct snd_pcm_substream *s;
558 int rstart = 0, start, nsync = 0, sbits = 0;
561 azx_dev = get_azx_dev(substream);
562 trace_azx_pcm_trigger(chip, azx_dev, cmd);
564 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
568 case SNDRV_PCM_TRIGGER_START:
570 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
571 case SNDRV_PCM_TRIGGER_RESUME:
574 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
575 case SNDRV_PCM_TRIGGER_SUSPEND:
576 case SNDRV_PCM_TRIGGER_STOP:
583 snd_pcm_group_for_each_entry(s, substream) {
584 if (s->pcm->card != substream->pcm->card)
586 azx_dev = get_azx_dev(s);
587 sbits |= 1 << azx_dev->index;
589 snd_pcm_trigger_done(s, substream);
592 spin_lock(&chip->reg_lock);
594 /* first, set SYNC bits of corresponding streams */
595 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
596 azx_writel(chip, OLD_SSYNC,
597 azx_readl(chip, OLD_SSYNC) | sbits);
599 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
601 snd_pcm_group_for_each_entry(s, substream) {
602 if (s->pcm->card != substream->pcm->card)
604 azx_dev = get_azx_dev(s);
606 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
608 azx_dev->start_wallclk -=
609 azx_dev->period_wallclk;
610 azx_stream_start(chip, azx_dev);
612 azx_stream_stop(chip, azx_dev);
614 azx_dev->running = start;
616 spin_unlock(&chip->reg_lock);
618 /* wait until all FIFOs get ready */
619 for (timeout = 5000; timeout; timeout--) {
621 snd_pcm_group_for_each_entry(s, substream) {
622 if (s->pcm->card != substream->pcm->card)
624 azx_dev = get_azx_dev(s);
625 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
634 /* wait until all RUN bits are cleared */
635 for (timeout = 5000; timeout; timeout--) {
637 snd_pcm_group_for_each_entry(s, substream) {
638 if (s->pcm->card != substream->pcm->card)
640 azx_dev = get_azx_dev(s);
641 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
650 spin_lock(&chip->reg_lock);
651 /* reset SYNC bits */
652 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
653 azx_writel(chip, OLD_SSYNC,
654 azx_readl(chip, OLD_SSYNC) & ~sbits);
656 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
658 azx_timecounter_init(substream, 0, 0);
659 snd_pcm_gettime(substream->runtime, &substream->runtime->trigger_tstamp);
660 substream->runtime->trigger_tstamp_latched = true;
665 /* same start cycle for master and group */
666 azx_dev = get_azx_dev(substream);
667 cycle_last = azx_dev->azx_tc.cycle_last;
669 snd_pcm_group_for_each_entry(s, substream) {
670 if (s->pcm->card != substream->pcm->card)
672 azx_timecounter_init(s, 1, cycle_last);
676 spin_unlock(&chip->reg_lock);
680 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
682 return azx_sd_readl(chip, azx_dev, SD_LPIB);
684 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
686 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
688 return le32_to_cpu(*azx_dev->posbuf);
690 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
692 unsigned int azx_get_position(struct azx *chip,
693 struct azx_dev *azx_dev)
695 struct snd_pcm_substream *substream = azx_dev->substream;
697 int stream = substream->stream;
700 if (chip->get_position[stream])
701 pos = chip->get_position[stream](chip, azx_dev);
702 else /* use the position buffer as default */
703 pos = azx_get_pos_posbuf(chip, azx_dev);
705 if (pos >= azx_dev->bufsize)
708 if (substream->runtime) {
709 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
710 struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
712 if (chip->get_delay[stream])
713 delay += chip->get_delay[stream](chip, azx_dev, pos);
714 if (hinfo->ops.get_delay)
715 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
717 substream->runtime->delay = delay;
720 trace_azx_get_position(chip, azx_dev, pos, delay);
723 EXPORT_SYMBOL_GPL(azx_get_position);
725 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
727 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
728 struct azx *chip = apcm->chip;
729 struct azx_dev *azx_dev = get_azx_dev(substream);
730 return bytes_to_frames(substream->runtime,
731 azx_get_position(chip, azx_dev));
734 static int azx_get_time_info(struct snd_pcm_substream *substream,
735 struct timespec *system_ts, struct timespec *audio_ts,
736 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
737 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
739 struct azx_dev *azx_dev = get_azx_dev(substream);
742 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
743 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
745 snd_pcm_gettime(substream->runtime, system_ts);
747 nsec = timecounter_read(&azx_dev->azx_tc);
748 nsec = div_u64(nsec, 3); /* can be optimized */
749 if (audio_tstamp_config->report_delay)
750 nsec = azx_adjust_codec_delay(substream, nsec);
752 *audio_ts = ns_to_timespec(nsec);
754 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
755 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
756 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
759 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
764 static struct snd_pcm_hardware azx_pcm_hw = {
765 .info = (SNDRV_PCM_INFO_MMAP |
766 SNDRV_PCM_INFO_INTERLEAVED |
767 SNDRV_PCM_INFO_BLOCK_TRANSFER |
768 SNDRV_PCM_INFO_MMAP_VALID |
769 /* No full-resume yet implemented */
770 /* SNDRV_PCM_INFO_RESUME |*/
771 SNDRV_PCM_INFO_PAUSE |
772 SNDRV_PCM_INFO_SYNC_START |
773 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
774 SNDRV_PCM_INFO_HAS_LINK_ATIME |
775 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
776 .formats = SNDRV_PCM_FMTBIT_S16_LE,
777 .rates = SNDRV_PCM_RATE_48000,
782 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
783 .period_bytes_min = 128,
784 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
786 .periods_max = AZX_MAX_FRAG,
790 static int azx_pcm_open(struct snd_pcm_substream *substream)
792 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
793 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
794 struct azx *chip = apcm->chip;
795 struct azx_dev *azx_dev;
796 struct snd_pcm_runtime *runtime = substream->runtime;
801 mutex_lock(&chip->open_mutex);
802 azx_dev = azx_assign_device(chip, substream);
803 if (azx_dev == NULL) {
804 mutex_unlock(&chip->open_mutex);
807 runtime->hw = azx_pcm_hw;
808 runtime->hw.channels_min = hinfo->channels_min;
809 runtime->hw.channels_max = hinfo->channels_max;
810 runtime->hw.formats = hinfo->formats;
811 runtime->hw.rates = hinfo->rates;
812 snd_pcm_limit_hw_rates(runtime);
813 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
815 /* avoid wrap-around with wall-clock */
816 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
820 if (chip->align_buffer_size)
821 /* constrain buffer sizes to be multiple of 128
822 bytes. This is more efficient in terms of memory
823 access but isn't required by the HDA spec and
824 prevents users from specifying exact period/buffer
825 sizes. For example for 44.1kHz, a period size set
826 to 20ms will be rounded to 19.59ms. */
829 /* Don't enforce steps on buffer sizes, still need to
830 be multiple of 4 bytes (HDA spec). Tested on Intel
831 HDA controllers, may not work on all devices where
832 option needs to be disabled */
835 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
837 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
839 snd_hda_power_up_d3wait(apcm->codec);
840 err = hinfo->ops.open(hinfo, apcm->codec, substream);
842 azx_release_device(azx_dev);
843 snd_hda_power_down(apcm->codec);
844 mutex_unlock(&chip->open_mutex);
847 snd_pcm_limit_hw_rates(runtime);
849 if (snd_BUG_ON(!runtime->hw.channels_min) ||
850 snd_BUG_ON(!runtime->hw.channels_max) ||
851 snd_BUG_ON(!runtime->hw.formats) ||
852 snd_BUG_ON(!runtime->hw.rates)) {
853 azx_release_device(azx_dev);
854 hinfo->ops.close(hinfo, apcm->codec, substream);
855 snd_hda_power_down(apcm->codec);
856 mutex_unlock(&chip->open_mutex);
860 /* disable LINK_ATIME timestamps for capture streams
861 until we figure out how to handle digital inputs */
862 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
863 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
864 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
867 spin_lock_irqsave(&chip->reg_lock, flags);
868 azx_dev->substream = substream;
869 azx_dev->running = 0;
870 spin_unlock_irqrestore(&chip->reg_lock, flags);
872 runtime->private_data = azx_dev;
873 snd_pcm_set_sync(substream);
874 mutex_unlock(&chip->open_mutex);
878 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
879 struct vm_area_struct *area)
881 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
882 struct azx *chip = apcm->chip;
883 if (chip->ops->pcm_mmap_prepare)
884 chip->ops->pcm_mmap_prepare(substream, area);
885 return snd_pcm_lib_default_mmap(substream, area);
888 static struct snd_pcm_ops azx_pcm_ops = {
889 .open = azx_pcm_open,
890 .close = azx_pcm_close,
891 .ioctl = snd_pcm_lib_ioctl,
892 .hw_params = azx_pcm_hw_params,
893 .hw_free = azx_pcm_hw_free,
894 .prepare = azx_pcm_prepare,
895 .trigger = azx_pcm_trigger,
896 .pointer = azx_pcm_pointer,
897 .get_time_info = azx_get_time_info,
898 .mmap = azx_pcm_mmap,
899 .page = snd_pcm_sgbuf_ops_page,
902 static void azx_pcm_free(struct snd_pcm *pcm)
904 struct azx_pcm *apcm = pcm->private_data;
906 list_del(&apcm->list);
911 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
913 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
914 struct hda_pcm *cpcm)
916 struct azx *chip = bus->private_data;
918 struct azx_pcm *apcm;
919 int pcm_dev = cpcm->device;
923 list_for_each_entry(apcm, &chip->pcm_list, list) {
924 if (apcm->pcm->device == pcm_dev) {
925 dev_err(chip->card->dev, "PCM %d already exists\n",
930 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
931 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
932 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
936 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
937 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
943 pcm->private_data = apcm;
944 pcm->private_free = azx_pcm_free;
945 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
946 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
947 list_add_tail(&apcm->list, &chip->pcm_list);
949 for (s = 0; s < 2; s++) {
950 apcm->hinfo[s] = &cpcm->stream[s];
951 if (cpcm->stream[s].substreams)
952 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
954 /* buffer pre-allocation */
955 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
956 if (size > MAX_PREALLOC_SIZE)
957 size = MAX_PREALLOC_SIZE;
958 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
960 size, MAX_PREALLOC_SIZE);
962 for (s = 0; s < 2; s++)
963 pcm->streams[s].dev.parent = &codec->dev;
968 * CORB / RIRB interface
970 static int azx_alloc_cmd_io(struct azx *chip)
974 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
975 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
976 PAGE_SIZE, &chip->rb);
978 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
982 static void azx_init_cmd_io(struct azx *chip)
986 spin_lock_irq(&chip->reg_lock);
988 chip->corb.addr = chip->rb.addr;
989 chip->corb.buf = (u32 *)chip->rb.area;
990 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
991 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
993 /* set the corb size to 256 entries (ULI requires explicitly) */
994 azx_writeb(chip, CORBSIZE, 0x02);
995 /* set the corb write pointer to 0 */
996 azx_writew(chip, CORBWP, 0);
998 /* reset the corb hw read pointer */
999 azx_writew(chip, CORBRP, AZX_CORBRP_RST);
1000 if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
1001 for (timeout = 1000; timeout > 0; timeout--) {
1002 if ((azx_readw(chip, CORBRP) & AZX_CORBRP_RST) == AZX_CORBRP_RST)
1007 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1008 azx_readw(chip, CORBRP));
1010 azx_writew(chip, CORBRP, 0);
1011 for (timeout = 1000; timeout > 0; timeout--) {
1012 if (azx_readw(chip, CORBRP) == 0)
1017 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1018 azx_readw(chip, CORBRP));
1021 /* enable corb dma */
1022 azx_writeb(chip, CORBCTL, AZX_CORBCTL_RUN);
1025 chip->rirb.addr = chip->rb.addr + 2048;
1026 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1027 chip->rirb.wp = chip->rirb.rp = 0;
1028 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1029 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1030 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1032 /* set the rirb size to 256 entries (ULI requires explicitly) */
1033 azx_writeb(chip, RIRBSIZE, 0x02);
1034 /* reset the rirb hw write pointer */
1035 azx_writew(chip, RIRBWP, AZX_RIRBWP_RST);
1036 /* set N=1, get RIRB response interrupt for new entry */
1037 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1038 azx_writew(chip, RINTCNT, 0xc0);
1040 azx_writew(chip, RINTCNT, 1);
1041 /* enable rirb dma and response irq */
1042 azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
1043 spin_unlock_irq(&chip->reg_lock);
1046 static void azx_free_cmd_io(struct azx *chip)
1048 spin_lock_irq(&chip->reg_lock);
1049 /* disable ringbuffer DMAs */
1050 azx_writeb(chip, RIRBCTL, 0);
1051 azx_writeb(chip, CORBCTL, 0);
1052 spin_unlock_irq(&chip->reg_lock);
1055 static unsigned int azx_command_addr(u32 cmd)
1057 unsigned int addr = cmd >> 28;
1059 if (addr >= AZX_MAX_CODECS) {
1067 /* send a command */
1068 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1070 struct azx *chip = bus->private_data;
1071 unsigned int addr = azx_command_addr(val);
1072 unsigned int wp, rp;
1074 spin_lock_irq(&chip->reg_lock);
1076 /* add command to corb */
1077 wp = azx_readw(chip, CORBWP);
1079 /* something wrong, controller likely turned to D3 */
1080 spin_unlock_irq(&chip->reg_lock);
1084 wp %= AZX_MAX_CORB_ENTRIES;
1086 rp = azx_readw(chip, CORBRP);
1088 /* oops, it's full */
1089 spin_unlock_irq(&chip->reg_lock);
1093 chip->rirb.cmds[addr]++;
1094 chip->corb.buf[wp] = cpu_to_le32(val);
1095 azx_writew(chip, CORBWP, wp);
1097 spin_unlock_irq(&chip->reg_lock);
1102 #define AZX_RIRB_EX_UNSOL_EV (1<<4)
1104 /* retrieve RIRB entry - called from interrupt handler */
1105 static void azx_update_rirb(struct azx *chip)
1107 unsigned int rp, wp;
1111 wp = azx_readw(chip, RIRBWP);
1113 /* something wrong, controller likely turned to D3 */
1117 if (wp == chip->rirb.wp)
1121 while (chip->rirb.rp != wp) {
1123 chip->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
1125 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1126 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1127 res = le32_to_cpu(chip->rirb.buf[rp]);
1128 addr = res_ex & 0xf;
1129 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1130 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1134 } else if (res_ex & AZX_RIRB_EX_UNSOL_EV)
1135 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1136 else if (chip->rirb.cmds[addr]) {
1137 chip->rirb.res[addr] = res;
1139 chip->rirb.cmds[addr]--;
1140 } else if (printk_ratelimit()) {
1141 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1143 chip->last_cmd[addr]);
1148 /* receive a response */
1149 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1152 struct azx *chip = bus->private_data;
1153 unsigned long timeout;
1154 unsigned long loopcounter;
1158 timeout = jiffies + msecs_to_jiffies(1000);
1160 for (loopcounter = 0;; loopcounter++) {
1161 if (chip->polling_mode || do_poll) {
1162 spin_lock_irq(&chip->reg_lock);
1163 azx_update_rirb(chip);
1164 spin_unlock_irq(&chip->reg_lock);
1166 if (!chip->rirb.cmds[addr]) {
1168 bus->rirb_error = 0;
1171 chip->poll_count = 0;
1172 return chip->rirb.res[addr]; /* the last value */
1174 if (time_after(jiffies, timeout))
1176 if (bus->needs_damn_long_delay || loopcounter > 3000)
1177 msleep(2); /* temporary workaround */
1184 if (!bus->no_response_fallback)
1187 if (!chip->polling_mode && chip->poll_count < 2) {
1188 dev_dbg(chip->card->dev,
1189 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1190 chip->last_cmd[addr]);
1197 if (!chip->polling_mode) {
1198 dev_warn(chip->card->dev,
1199 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1200 chip->last_cmd[addr]);
1201 chip->polling_mode = 1;
1206 dev_warn(chip->card->dev,
1207 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1208 chip->last_cmd[addr]);
1209 if (chip->ops->disable_msi_reset_irq(chip) &&
1210 chip->ops->disable_msi_reset_irq(chip) < 0) {
1211 bus->rirb_error = 1;
1217 if (chip->probing) {
1218 /* If this critical timeout happens during the codec probing
1219 * phase, this is likely an access to a non-existing codec
1220 * slot. Better to return an error and reset the system.
1225 /* a fatal communication error; need either to reset or to fallback
1226 * to the single_cmd mode
1228 bus->rirb_error = 1;
1229 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1230 bus->response_reset = 1;
1231 return -1; /* give a chance to retry */
1234 dev_err(chip->card->dev,
1235 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1236 chip->last_cmd[addr]);
1237 chip->single_cmd = 1;
1238 bus->response_reset = 0;
1239 /* release CORB/RIRB */
1240 azx_free_cmd_io(chip);
1241 /* disable unsolicited responses */
1242 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_UNSOL);
1247 * Use the single immediate command instead of CORB/RIRB for simplicity
1249 * Note: according to Intel, this is not preferred use. The command was
1250 * intended for the BIOS only, and may get confused with unsolicited
1251 * responses. So, we shouldn't use it for normal operation from the
1253 * I left the codes, however, for debugging/testing purposes.
1256 /* receive a response */
1257 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1262 /* check IRV busy bit */
1263 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
1264 /* reuse rirb.res as the response return value */
1265 chip->rirb.res[addr] = azx_readl(chip, IR);
1270 if (printk_ratelimit())
1271 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1272 azx_readw(chip, IRS));
1273 chip->rirb.res[addr] = -1;
1277 /* send a command */
1278 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1280 struct azx *chip = bus->private_data;
1281 unsigned int addr = azx_command_addr(val);
1284 bus->rirb_error = 0;
1286 /* check ICB busy bit */
1287 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
1288 /* Clear IRV valid bit */
1289 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1291 azx_writel(chip, IC, val);
1292 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1294 return azx_single_wait_for_response(chip, addr);
1298 if (printk_ratelimit())
1299 dev_dbg(chip->card->dev,
1300 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1301 azx_readw(chip, IRS), val);
1305 /* receive a response */
1306 static unsigned int azx_single_get_response(struct hda_bus *bus,
1309 struct azx *chip = bus->private_data;
1310 return chip->rirb.res[addr];
1314 * The below are the main callbacks from hda_codec.
1316 * They are just the skeleton to call sub-callbacks according to the
1317 * current setting of chip->single_cmd.
1320 /* send a command */
1321 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1323 struct azx *chip = bus->private_data;
1327 chip->last_cmd[azx_command_addr(val)] = val;
1328 if (chip->single_cmd)
1329 return azx_single_send_cmd(bus, val);
1331 return azx_corb_send_cmd(bus, val);
1334 /* get a response */
1335 static unsigned int azx_get_response(struct hda_bus *bus,
1338 struct azx *chip = bus->private_data;
1341 if (chip->single_cmd)
1342 return azx_single_get_response(bus, addr);
1344 return azx_rirb_get_response(bus, addr);
1347 #ifdef CONFIG_SND_HDA_DSP_LOADER
1349 * DSP loading code (e.g. for CA0132)
1352 /* use the first stream for loading DSP */
1353 static struct azx_dev *
1354 azx_get_dsp_loader_dev(struct azx *chip)
1356 return &chip->azx_dev[chip->playback_index_offset];
1359 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1360 unsigned int byte_size,
1361 struct snd_dma_buffer *bufp)
1364 struct azx *chip = bus->private_data;
1365 struct azx_dev *azx_dev;
1368 azx_dev = azx_get_dsp_loader_dev(chip);
1371 spin_lock_irq(&chip->reg_lock);
1372 if (azx_dev->running || azx_dev->locked) {
1373 spin_unlock_irq(&chip->reg_lock);
1377 azx_dev->prepared = 0;
1378 chip->saved_azx_dev = *azx_dev;
1379 azx_dev->locked = 1;
1380 spin_unlock_irq(&chip->reg_lock);
1382 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1387 azx_dev->bufsize = byte_size;
1388 azx_dev->period_bytes = byte_size;
1389 azx_dev->format_val = format;
1391 azx_stream_reset(chip, azx_dev);
1393 /* reset BDL address */
1394 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1395 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1398 bdl = (u32 *)azx_dev->bdl.area;
1399 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1403 azx_setup_controller(chip, azx_dev);
1404 dsp_unlock(azx_dev);
1405 return azx_dev->stream_tag;
1408 chip->ops->dma_free_pages(chip, bufp);
1410 spin_lock_irq(&chip->reg_lock);
1411 if (azx_dev->opened)
1412 *azx_dev = chip->saved_azx_dev;
1413 azx_dev->locked = 0;
1414 spin_unlock_irq(&chip->reg_lock);
1416 dsp_unlock(azx_dev);
1420 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1422 struct azx *chip = bus->private_data;
1423 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1426 azx_stream_start(chip, azx_dev);
1428 azx_stream_stop(chip, azx_dev);
1429 azx_dev->running = start;
1432 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1433 struct snd_dma_buffer *dmab)
1435 struct azx *chip = bus->private_data;
1436 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1438 if (!dmab->area || !azx_dev->locked)
1442 /* reset BDL address */
1443 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1444 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1445 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1446 azx_dev->bufsize = 0;
1447 azx_dev->period_bytes = 0;
1448 azx_dev->format_val = 0;
1450 chip->ops->dma_free_pages(chip, dmab);
1453 spin_lock_irq(&chip->reg_lock);
1454 if (azx_dev->opened)
1455 *azx_dev = chip->saved_azx_dev;
1456 azx_dev->locked = 0;
1457 spin_unlock_irq(&chip->reg_lock);
1458 dsp_unlock(azx_dev);
1460 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1462 int azx_alloc_stream_pages(struct azx *chip)
1465 struct snd_card *card = chip->card;
1467 for (i = 0; i < chip->num_streams; i++) {
1468 dsp_lock_init(&chip->azx_dev[i]);
1469 /* allocate memory for the BDL for each stream */
1470 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1472 &chip->azx_dev[i].bdl);
1474 dev_err(card->dev, "cannot allocate BDL\n");
1478 /* allocate memory for the position buffer */
1479 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1480 chip->num_streams * 8, &chip->posbuf);
1482 dev_err(card->dev, "cannot allocate posbuf\n");
1486 /* allocate CORB/RIRB */
1487 err = azx_alloc_cmd_io(chip);
1492 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1494 void azx_free_stream_pages(struct azx *chip)
1497 if (chip->azx_dev) {
1498 for (i = 0; i < chip->num_streams; i++)
1499 if (chip->azx_dev[i].bdl.area)
1500 chip->ops->dma_free_pages(
1501 chip, &chip->azx_dev[i].bdl);
1504 chip->ops->dma_free_pages(chip, &chip->rb);
1505 if (chip->posbuf.area)
1506 chip->ops->dma_free_pages(chip, &chip->posbuf);
1508 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1511 * Lowlevel interface
1514 /* enter link reset */
1515 void azx_enter_link_reset(struct azx *chip)
1517 unsigned long timeout;
1519 /* reset controller */
1520 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_RESET);
1522 timeout = jiffies + msecs_to_jiffies(100);
1523 while ((azx_readb(chip, GCTL) & AZX_GCTL_RESET) &&
1524 time_before(jiffies, timeout))
1525 usleep_range(500, 1000);
1527 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1529 /* exit link reset */
1530 static void azx_exit_link_reset(struct azx *chip)
1532 unsigned long timeout;
1534 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | AZX_GCTL_RESET);
1536 timeout = jiffies + msecs_to_jiffies(100);
1537 while (!azx_readb(chip, GCTL) &&
1538 time_before(jiffies, timeout))
1539 usleep_range(500, 1000);
1542 /* reset codec link */
1543 static int azx_reset(struct azx *chip, bool full_reset)
1548 /* clear STATESTS */
1549 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1551 /* reset controller */
1552 azx_enter_link_reset(chip);
1554 /* delay for >= 100us for codec PLL to settle per spec
1555 * Rev 0.9 section 5.5.1
1557 usleep_range(500, 1000);
1559 /* Bring controller out of reset */
1560 azx_exit_link_reset(chip);
1562 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1563 usleep_range(1000, 1200);
1566 /* check to see if controller is ready */
1567 if (!azx_readb(chip, GCTL)) {
1568 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1572 /* Accept unsolicited responses */
1573 if (!chip->single_cmd)
1574 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1578 if (!chip->codec_mask) {
1579 chip->codec_mask = azx_readw(chip, STATESTS);
1580 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1587 /* enable interrupts */
1588 static void azx_int_enable(struct azx *chip)
1590 /* enable controller CIE and GIE */
1591 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1592 AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
1595 /* disable interrupts */
1596 static void azx_int_disable(struct azx *chip)
1600 /* disable interrupts in stream descriptor */
1601 for (i = 0; i < chip->num_streams; i++) {
1602 struct azx_dev *azx_dev = &chip->azx_dev[i];
1603 azx_sd_writeb(chip, azx_dev, SD_CTL,
1604 azx_sd_readb(chip, azx_dev, SD_CTL) &
1608 /* disable SIE for all streams */
1609 azx_writeb(chip, INTCTL, 0);
1611 /* disable controller CIE and GIE */
1612 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1613 ~(AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN));
1616 /* clear interrupts */
1617 static void azx_int_clear(struct azx *chip)
1621 /* clear stream status */
1622 for (i = 0; i < chip->num_streams; i++) {
1623 struct azx_dev *azx_dev = &chip->azx_dev[i];
1624 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1627 /* clear STATESTS */
1628 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1630 /* clear rirb status */
1631 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1633 /* clear int status */
1634 azx_writel(chip, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM);
1638 * reset and start the controller registers
1640 void azx_init_chip(struct azx *chip, bool full_reset)
1642 if (chip->initialized)
1645 /* reset controller */
1646 azx_reset(chip, full_reset);
1648 /* initialize interrupts */
1649 azx_int_clear(chip);
1650 azx_int_enable(chip);
1652 /* initialize the codec command I/O */
1653 if (!chip->single_cmd)
1654 azx_init_cmd_io(chip);
1656 /* program the position buffer */
1657 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1658 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1660 chip->initialized = 1;
1662 EXPORT_SYMBOL_GPL(azx_init_chip);
1664 void azx_stop_chip(struct azx *chip)
1666 if (!chip->initialized)
1669 /* disable interrupts */
1670 azx_int_disable(chip);
1671 azx_int_clear(chip);
1673 /* disable CORB/RIRB */
1674 azx_free_cmd_io(chip);
1676 /* disable position buffer */
1677 azx_writel(chip, DPLBASE, 0);
1678 azx_writel(chip, DPUBASE, 0);
1680 chip->initialized = 0;
1682 EXPORT_SYMBOL_GPL(azx_stop_chip);
1687 irqreturn_t azx_interrupt(int irq, void *dev_id)
1689 struct azx *chip = dev_id;
1690 struct azx_dev *azx_dev;
1696 if (azx_has_pm_runtime(chip))
1697 if (!pm_runtime_active(chip->card->dev))
1701 spin_lock(&chip->reg_lock);
1703 if (chip->disabled) {
1704 spin_unlock(&chip->reg_lock);
1708 status = azx_readl(chip, INTSTS);
1709 if (status == 0 || status == 0xffffffff) {
1710 spin_unlock(&chip->reg_lock);
1714 for (i = 0; i < chip->num_streams; i++) {
1715 azx_dev = &chip->azx_dev[i];
1716 if (status & azx_dev->sd_int_sta_mask) {
1717 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1718 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1719 if (!azx_dev->substream || !azx_dev->running ||
1720 !(sd_status & SD_INT_COMPLETE))
1722 /* check whether this IRQ is really acceptable */
1723 if (!chip->ops->position_check ||
1724 chip->ops->position_check(chip, azx_dev)) {
1725 spin_unlock(&chip->reg_lock);
1726 snd_pcm_period_elapsed(azx_dev->substream);
1727 spin_lock(&chip->reg_lock);
1732 /* clear rirb int */
1733 status = azx_readb(chip, RIRBSTS);
1734 if (status & RIRB_INT_MASK) {
1735 if (status & RIRB_INT_RESPONSE) {
1736 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1738 azx_update_rirb(chip);
1740 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1743 spin_unlock(&chip->reg_lock);
1747 EXPORT_SYMBOL_GPL(azx_interrupt);
1754 * Probe the given codec address
1756 static int probe_codec(struct azx *chip, int addr)
1758 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1759 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1762 mutex_lock(&chip->bus->cmd_mutex);
1764 azx_send_cmd(chip->bus, cmd);
1765 res = azx_get_response(chip->bus, addr);
1767 mutex_unlock(&chip->bus->cmd_mutex);
1770 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1774 static void azx_bus_reset(struct hda_bus *bus)
1776 struct azx *chip = bus->private_data;
1779 azx_stop_chip(chip);
1780 azx_init_chip(chip, true);
1782 if (chip->initialized) {
1784 list_for_each_entry(p, &chip->pcm_list, list)
1785 snd_pcm_suspend_all(p->pcm);
1786 snd_hda_suspend(chip->bus);
1787 snd_hda_resume(chip->bus);
1794 /* power-up/down the controller */
1795 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1797 struct azx *chip = bus->private_data;
1799 if (!azx_has_pm_runtime(chip))
1803 pm_runtime_get_sync(chip->card->dev);
1805 pm_runtime_put_sync(chip->card->dev);
1809 static int get_jackpoll_interval(struct azx *chip)
1814 if (!chip->jackpoll_ms)
1817 i = chip->jackpoll_ms[chip->dev_index];
1820 if (i < 50 || i > 60000)
1823 j = msecs_to_jiffies(i);
1825 dev_warn(chip->card->dev,
1826 "jackpoll_ms value out of range: %d\n", i);
1830 static struct hda_bus_ops bus_ops = {
1831 .command = azx_send_cmd,
1832 .get_response = azx_get_response,
1833 .attach_pcm = azx_attach_pcm_stream,
1834 .bus_reset = azx_bus_reset,
1836 .pm_notify = azx_power_notify,
1838 #ifdef CONFIG_SND_HDA_DSP_LOADER
1839 .load_dsp_prepare = azx_load_dsp_prepare,
1840 .load_dsp_trigger = azx_load_dsp_trigger,
1841 .load_dsp_cleanup = azx_load_dsp_cleanup,
1845 /* HD-audio bus initialization */
1846 int azx_bus_create(struct azx *chip, const char *model, int *power_save_to)
1848 struct hda_bus *bus;
1851 err = snd_hda_bus_new(chip->card, &bus);
1856 bus->private_data = chip;
1857 bus->pci = chip->pci;
1858 bus->modelname = model;
1861 bus->power_save = power_save_to;
1864 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1865 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1866 bus->needs_damn_long_delay = 1;
1869 /* AMD chipsets often cause the communication stalls upon certain
1870 * sequence like the pin-detection. It seems that forcing the synced
1871 * access works around the stall. Grrr...
1873 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1874 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1875 bus->sync_write = 1;
1876 bus->allow_bus_reset = 1;
1881 EXPORT_SYMBOL_GPL(azx_bus_create);
1884 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1886 struct hda_bus *bus = chip->bus;
1891 max_slots = AZX_DEFAULT_CODECS;
1893 /* First try to probe all given codec slots */
1894 for (c = 0; c < max_slots; c++) {
1895 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1896 if (probe_codec(chip, c) < 0) {
1897 /* Some BIOSen give you wrong codec addresses
1900 dev_warn(chip->card->dev,
1901 "Codec #%d probe error; disabling it...\n", c);
1902 chip->codec_mask &= ~(1 << c);
1903 /* More badly, accessing to a non-existing
1904 * codec often screws up the controller chip,
1905 * and disturbs the further communications.
1906 * Thus if an error occurs during probing,
1907 * better to reset the controller chip to
1908 * get back to the sanity state.
1910 azx_stop_chip(chip);
1911 azx_init_chip(chip, true);
1916 /* Then create codec instances */
1917 for (c = 0; c < max_slots; c++) {
1918 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1919 struct hda_codec *codec;
1920 err = snd_hda_codec_new(bus, c, &codec);
1923 codec->jackpoll_interval = get_jackpoll_interval(chip);
1924 codec->beep_mode = chip->beep_mode;
1929 dev_err(chip->card->dev, "no codecs initialized\n");
1934 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1936 /* configure each codec instance */
1937 int azx_codec_configure(struct azx *chip)
1939 struct hda_codec *codec;
1940 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1941 snd_hda_codec_configure(codec);
1945 EXPORT_SYMBOL_GPL(azx_codec_configure);
1948 static bool is_input_stream(struct azx *chip, unsigned char index)
1950 return (index >= chip->capture_index_offset &&
1951 index < chip->capture_index_offset + chip->capture_streams);
1954 /* initialize SD streams */
1955 int azx_init_stream(struct azx *chip)
1958 int in_stream_tag = 0;
1959 int out_stream_tag = 0;
1961 /* initialize each stream (aka device)
1962 * assign the starting bdl address to each stream (device)
1965 for (i = 0; i < chip->num_streams; i++) {
1966 struct azx_dev *azx_dev = &chip->azx_dev[i];
1967 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
1968 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
1969 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1970 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1971 azx_dev->sd_int_sta_mask = 1 << i;
1974 /* stream tag must be unique throughout
1975 * the stream direction group,
1976 * valid values 1...15
1977 * use separate stream tag if the flag
1978 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1980 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1981 azx_dev->stream_tag =
1982 is_input_stream(chip, i) ?
1986 azx_dev->stream_tag = i + 1;
1991 EXPORT_SYMBOL_GPL(azx_init_stream);
1994 * reboot notifier for hang-up problem at power-down
1996 static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
1998 struct azx *chip = container_of(nb, struct azx, reboot_notifier);
1999 snd_hda_bus_reboot_notify(chip->bus);
2000 azx_stop_chip(chip);
2004 void azx_notifier_register(struct azx *chip)
2006 chip->reboot_notifier.notifier_call = azx_halt;
2007 register_reboot_notifier(&chip->reboot_notifier);
2009 EXPORT_SYMBOL_GPL(azx_notifier_register);
2011 void azx_notifier_unregister(struct azx *chip)
2013 if (chip->reboot_notifier.notifier_call)
2014 unregister_reboot_notifier(&chip->reboot_notifier);
2016 EXPORT_SYMBOL_GPL(azx_notifier_unregister);
2018 MODULE_LICENSE("GPL");
2019 MODULE_DESCRIPTION("Common HDA driver functions");