o binutils 2.12 # ld -v
o util-linux 2.10o # fdformat --version
o module-init-tools 0.9.10 # depmod -V
-o e2fsprogs 1.29 # tune2fs
+o e2fsprogs 1.41.4 # e2fsck -V
o jfsutils 1.1.3 # fsck.jfs -V
o reiserfsprogs 3.6.3 # reiserfsck -V 2>&1|grep reiserfsprogs
o xfsprogs 2.6.0 # xfs_db -V
+o squashfs-tools 4.0 # mksquashfs -version
+o btrfs-progs 0.18 # btrfsck
o pcmciautils 004 # pccardctl -V
o quota-tools 3.09 # quota -V
o PPP 2.4.0 # pppd --version
(* (max steps 1)
c-basic-offset)))
+(add-hook 'c-mode-common-hook
+ (lambda ()
+ ;; Add kernel style
+ (c-add-style
+ "linux-tabs-only"
+ '("linux" (c-offsets-alist
+ (arglist-cont-nonempty
+ c-lineup-gcc-asm-reg
+ c-lineup-arglist-tabs-only))))))
+
(add-hook 'c-mode-hook
(lambda ()
(let ((filename (buffer-file-name)))
;; Enable kernel mode for the appropriate files
(when (and filename
- (string-match "~/src/linux-trees" filename))
+ (string-match (expand-file-name "~/src/linux-trees")
+ filename))
(setq indent-tabs-mode t)
- (c-set-style "linux")
- (c-set-offset 'arglist-cont-nonempty
- '(c-lineup-gcc-asm-reg
- c-lineup-arglist-tabs-only))))))
+ (c-set-style "linux-tabs-only")))))
This will make emacs go better with the kernel coding style for C
files below ~/src/linux-trees.
This document describes the DMA API. For a more gentle introduction
phrased in terms of the pci_ equivalents (and actual examples) see
-DMA-mapping.txt
+Documentation/PCI/PCI-DMA-mapping.txt.
This API is split into two pieces. Part I describes the API and the
corresponding pci_ API. Part II describes the extensions to the API
</abstract>
<revhistory>
+ <revision>
+ <revnumber>0.7</revnumber>
+ <date>2008-12-23</date>
+ <authorinitials>hjk</authorinitials>
+ <revremark>Added generic platform drivers and offset attribute.</revremark>
+ </revision>
<revision>
<revnumber>0.6</revnumber>
<date>2008-12-05</date>
pointed to by addr.
</para>
</listitem>
+<listitem>
+ <para>
+ <filename>offset</filename>: The offset, in bytes, that has to be
+ added to the pointer returned by <function>mmap()</function> to get
+ to the actual device memory. This is important if the device's memory
+ is not page aligned. Remember that pointers returned by
+ <function>mmap()</function> are always page aligned, so it is good
+ style to always add this offset.
+ </para>
+</listitem>
</itemizedlist>
<para>
</para>
</sect1>
+<sect1 id="using_uio_pdrv">
+<title>Using uio_pdrv for platform devices</title>
+ <para>
+ In many cases, UIO drivers for platform devices can be handled in a
+ generic way. In the same place where you define your
+ <varname>struct platform_device</varname>, you simply also implement
+ your interrupt handler and fill your
+ <varname>struct uio_info</varname>. A pointer to this
+ <varname>struct uio_info</varname> is then used as
+ <varname>platform_data</varname> for your platform device.
+ </para>
+ <para>
+ You also need to set up an array of <varname>struct resource</varname>
+ containing addresses and sizes of your memory mappings. This
+ information is passed to the driver using the
+ <varname>.resource</varname> and <varname>.num_resources</varname>
+ elements of <varname>struct platform_device</varname>.
+ </para>
+ <para>
+ You now have to set the <varname>.name</varname> element of
+ <varname>struct platform_device</varname> to
+ <varname>"uio_pdrv"</varname> to use the generic UIO platform device
+ driver. This driver will fill the <varname>mem[]</varname> array
+ according to the resources given, and register the device.
+ </para>
+ <para>
+ The advantage of this approach is that you only have to edit a file
+ you need to edit anyway. You do not have to create an extra driver.
+ </para>
+</sect1>
+
+<sect1 id="using_uio_pdrv_genirq">
+<title>Using uio_pdrv_genirq for platform devices</title>
+ <para>
+ Especially in embedded devices, you frequently find chips where the
+ irq pin is tied to its own dedicated interrupt line. In such cases,
+ where you can be really sure the interrupt is not shared, we can take
+ the concept of <varname>uio_pdrv</varname> one step further and use a
+ generic interrupt handler. That's what
+ <varname>uio_pdrv_genirq</varname> does.
+ </para>
+ <para>
+ The setup for this driver is the same as described above for
+ <varname>uio_pdrv</varname>, except that you do not implement an
+ interrupt handler. The <varname>.handler</varname> element of
+ <varname>struct uio_info</varname> must remain
+ <varname>NULL</varname>. The <varname>.irq_flags</varname> element
+ must not contain <varname>IRQF_SHARED</varname>.
+ </para>
+ <para>
+ You will set the <varname>.name</varname> element of
+ <varname>struct platform_device</varname> to
+ <varname>"uio_pdrv_genirq"</varname> to use this driver.
+ </para>
+ <para>
+ The generic interrupt handler of <varname>uio_pdrv_genirq</varname>
+ will simply disable the interrupt line using
+ <function>disable_irq_nosync()</function>. After doing its work,
+ userspace can reenable the interrupt by writing 0x00000001 to the UIO
+ device file. The driver already implements an
+ <function>irq_control()</function> to make this possible, you must not
+ implement your own.
+ </para>
+ <para>
+ Using <varname>uio_pdrv_genirq</varname> not only saves a few lines of
+ interrupt handler code. You also do not need to know anything about
+ the chip's internal registers to create the kernel part of the driver.
+ All you need to know is the irq number of the pin the chip is
+ connected to.
+ </para>
+</sect1>
+
</chapter>
<chapter id="userspace_driver" xreflabel="Writing a driver in user space">
[ NOTE: The virt_to_bus() and bus_to_virt() functions have been
- superseded by the functionality provided by the PCI DMA
- interface (see Documentation/DMA-mapping.txt). They continue
+ superseded by the functionality provided by the PCI DMA interface
+ (see Documentation/PCI/PCI-DMA-mapping.txt). They continue
to be documented below for historical purposes, but new code
must not use them. --davidm 00/12/12 ]
do not have a corresponding kernel virtual address space mapping) and
low-memory pages.
-Note: Please refer to DMA-mapping.txt for a discussion on PCI high mem DMA
-aspects and mapping of scatter gather lists, and support for 64 bit PCI.
+Note: Please refer to Documentation/PCI/PCI-DMA-mapping.txt for a discussion
+on PCI high mem DMA aspects and mapping of scatter gather lists, and support
+for 64 bit PCI.
Special handling is required only for cases where i/o needs to happen on
pages at physical memory addresses beyond what the device can support. In these
results in some sort of conflict internally,
this hook allows it to do that.
-elevator_dispatch_fn fills the dispatch queue with ready requests.
+elevator_dispatch_fn* fills the dispatch queue with ready requests.
I/O schedulers are free to postpone requests by
not filling the dispatch queue unless @force
is non-zero. Once dispatched, I/O schedulers
are not allowed to manipulate the requests -
they belong to generic dispatch queue.
-elevator_add_req_fn called to add a new request into the scheduler
+elevator_add_req_fn* called to add a new request into the scheduler
elevator_queue_empty_fn returns true if the merge queue is empty.
Drivers shouldn't use this, but rather check
elevator_deactivate_req_fn Called when device driver decides to delay
a request by requeueing it.
-elevator_init_fn
+elevator_init_fn*
elevator_exit_fn Allocate and free any elevator specific storage
for a queue.
Memory Resource Controller(Memcg) Implementation Memo.
-Last Updated: 2008/12/15
-Base Kernel Version: based on 2.6.28-rc8-mm.
+Last Updated: 2009/1/19
+Base Kernel Version: based on 2.6.29-rc2.
Because VM is getting complex (one of reasons is memcg...), memcg's behavior
is complex. This is a document for memcg's internal behavior.
# mount -t cgroup none /cgroup -t cpuset,memory,cpu,devices
and do task move, mkdir, rmdir etc...under this.
+
+ 9.7 swapoff.
+ Besides management of swap is one of complicated parts of memcg,
+ call path of swap-in at swapoff is not same as usual swap-in path..
+ It's worth to be tested explicitly.
+
+ For example, test like following is good.
+ (Shell-A)
+ # mount -t cgroup none /cgroup -t memory
+ # mkdir /cgroup/test
+ # echo 40M > /cgroup/test/memory.limit_in_bytes
+ # echo 0 > /cgroup/test/tasks
+ Run malloc(100M) program under this. You'll see 60M of swaps.
+ (Shell-B)
+ # move all tasks in /cgroup/test to /cgroup
+ # /sbin/swapoff -a
+ # rmdir /test/cgroup
+ # kill malloc task.
+
+ Of course, tmpfs v.s. swapoff test should be tested, too.
values are in the range -16 to +15, plus the special value -17, which disables
oom-killing altogether for this process.
+The process to be killed in an out-of-memory situation is selected among all others
+based on its badness score. This value equals the original memory size of the process
+and is then updated according to its CPU time (utime + stime) and the
+run time (uptime - start time). The longer it runs the smaller is the score.
+Badness score is divided by the square root of the CPU time and then by
+the double square root of the run time.
+
+Swapped out tasks are killed first. Half of each child's memory size is added to
+the parent's score if they do not share the same memory. Thus forking servers
+are the prime candidates to be killed. Having only one 'hungry' child will make
+parent less preferable than the child.
+
+/proc/<pid>/oom_score shows process' current badness score.
+
+The following heuristics are then applied:
+ * if the task was reniced, its score doubles
+ * superuser or direct hardware access tasks (CAP_SYS_ADMIN, CAP_SYS_RESOURCE
+ or CAP_SYS_RAWIO) have their score divided by 4
+ * if oom condition happened in one cpuset and checked task does not belong
+ to it, its score is divided by 8
+ * the resulting score is multiplied by two to the power of oom_adj, i.e.
+ points <<= oom_adj when it is positive and
+ points >>= -(oom_adj) otherwise
+
+The task with the highest badness score is then selected and its children
+are killed, process itself will be killed in an OOM situation when it does
+not have children or some of them disabled oom like described above.
+
2.13 /proc/<pid>/oom_score - Display current oom-killer score
-------------------------------------------------------------
==================================
これは、
-linux-2.6.24/Documentation/stable_kernel_rules.txt
+linux-2.6.29/Documentation/stable_kernel_rules.txt
の和訳です。
翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ >
-翻訳日: 2007/12/30
+翻訳日: 2009/1/14
翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com>
校正者: 武井伸光さん、<takei at webmasters dot gr dot jp>
かねこさん (Seiji Kaneko) <skaneko at a2 dot mbn dot or dot jp>
- ビルドエラー(CONFIG_BROKENになっているものを除く), oops, ハング、デー
タ破壊、現実のセキュリティ問題、その他 "ああ、これはダメだね"という
ようなものを修正しなければならない。短く言えば、重大な問題。
+ - 新しい device ID とクオークも受け入れられる。
- どのように競合状態が発生するかの説明も一緒に書かれていない限り、
"理論的には競合状態になる"ようなものは不可。
- いかなる些細な修正も含めることはできない。(スペルの修正、空白のクリー
ンアップなど)
- - 対応するサブシステムメンテナが受け入れたものでなければならない。
- Documentation/SubmittingPatches の規則に従ったものでなければならない。
+ - パッチ自体か同等の修正が Linus のツリーに既に存在しなければならない。
+ Linus のツリーでのコミットID を -stable へのパッチ投稿の際に引用す
+ ること。
-stable ツリーにパッチを送付する手続き-
- 送信者はパッチがキューに受け付けられた際には ACK を、却下された場合
には NAK を受け取る。この反応は開発者たちのスケジュールによって、数
日かかる場合がある。
- - もし受け取られたら、パッチは他の開発者たちのレビューのために
- -stable キューに追加される。
+ - もし受け取られたら、パッチは他の開発者たちと関連するサブシステムの
+ メンテナーによるレビューのために -stable キューに追加される。
+ - パッチに stable@kernel.org のアドレスが付加されているときには、それ
+ が Linus のツリーに入る時に自動的に stable チームに email される。
- セキュリティパッチはこのエイリアス (stable@kernel.org) に送られるべ
きではなく、代わりに security@kernel.org のアドレスに送られる。
# This creates the demonstration utility "lguest" which runs a Linux guest.
-CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include
+CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE
LDLIBS:=-lz
all: lguest
IP-Aliasing:
============
-IP-aliases are additional IP-addresses/masks hooked up to a base
-interface by adding a colon and a string when running ifconfig.
-This string is usually numeric, but this is not a must.
-
-IP-Aliases are avail if CONFIG_INET (`standard' IPv4 networking)
-is configured in the kernel.
+IP-aliases are an obsolete way to manage multiple IP-addresses/masks
+per interface. Newer tools such as iproute2 support multiple
+address/prefixes per interface, but aliases are still supported
+for backwards compatibility.
+An alias is formed by adding a colon and a string when running ifconfig.
+This string is usually numeric, but this is not a must.
o Alias creation.
Alias creation is done by 'magic' interface naming: eg. to create a
If the base device is shut down the added aliases will be deleted
too.
-
-
-Contact
--------
-Please finger or e-mail me:
- Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar>
-
-Updated by Erik Schoenfelder <schoenfr@gaertner.DE>
-
-; local variables:
-; mode: indented-text
-; mode: auto-fill
-; end:
API OVERVIEW
The big picture is that USB drivers can continue to ignore most DMA issues,
-though they still must provide DMA-ready buffers (see DMA-mapping.txt).
-That's how they've worked through the 2.4 (and earlier) kernels.
+though they still must provide DMA-ready buffers (see
+Documentation/PCI/PCI-DMA-mapping.txt). That's how they've worked through
+the 2.4 (and earlier) kernels.
OR: they can now be DMA-aware.
force a consistent memory access ordering by using memory barriers. It's
not using a streaming DMA mapping, so it's good for small transfers on
systems where the I/O would otherwise thrash an IOMMU mapping. (See
- Documentation/DMA-mapping.txt for definitions of "coherent" and "streaming"
- DMA mappings.)
+ Documentation/PCI/PCI-DMA-mapping.txt for definitions of "coherent" and
+ "streaming" DMA mappings.)
Asking for 1/Nth of a page (as well as asking for N pages) is reasonably
space-efficient.
Existing buffers aren't usable for DMA without first being mapped into the
DMA address space of the device. However, most buffers passed to your
driver can safely be used with such DMA mapping. (See the first section
-of DMA-mapping.txt, titled "What memory is DMA-able?")
+of Documentation/PCI/PCI-DMA-mapping.txt, titled "What memory is DMA-able?")
- When you're using scatterlists, you can map everything at once. On some
systems, this kicks in an IOMMU and turns the scatterlists into single
MAC80211
P: Johannes Berg
M: johannes@sipsolutions.net
-P: Michael Wu
-M: flamingice@sourmilk.net
L: linux-wireless@vger.kernel.org
W: http://linuxwireless.org/
T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git
select HAVE_AOUT
select HAVE_IDE
select HAVE_OPROFILE
+ select HAVE_SYSCALL_WRAPPERS
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
/* ??? Would be nice to use .gprel32 here, but we can't be sure that the
function loaded the GP, so this could fail in modules. */
-static inline void ATTRIB_NORET __BUG(const char *file, int line)
-{
- __asm__ __volatile__(
- "call_pal %0 # bugchk\n\t"
- ".long %1\n\t.8byte %2"
- : : "i" (PAL_bugchk), "i"(line), "i"(file));
- for ( ; ; )
- ;
-}
-
-#define BUG() __BUG(__FILE__, __LINE__)
+#define BUG() { \
+ __asm__ __volatile__( \
+ "call_pal %0 # bugchk\n\t" \
+ ".long %1\n\t.8byte %2" \
+ : : "i"(PAL_bugchk), "i"(__LINE__), "i"(__FILE__)); \
+ for ( ; ; ); }
#define HAVE_ARCH_BUG
#endif
#else /* no PCI - no IOMMU. */
+#include <asm/io.h> /* for virt_to_phys() */
+
struct scatterlist;
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
osf_sigprocmask:
.prologue 0
mov $sp, $18
- jmp $31, do_osf_sigprocmask
+ jmp $31, sys_osf_sigprocmask
.end osf_sigprocmask
.align 4
* identical to OSF as we don't return 0 on success, but doing otherwise
* would require changes to libc. Hopefully this is good enough.
*/
-asmlinkage unsigned long
-osf_brk(unsigned long brk)
+SYSCALL_DEFINE1(osf_brk, unsigned long, brk)
{
unsigned long retval = sys_brk(brk);
if (brk && brk != retval)
/*
* This is pure guess-work..
*/
-asmlinkage int
-osf_set_program_attributes(unsigned long text_start, unsigned long text_len,
- unsigned long bss_start, unsigned long bss_len)
+SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
+ unsigned long, text_len, unsigned long, bss_start,
+ unsigned long, bss_len)
{
struct mm_struct *mm;
return -EFAULT;
}
-asmlinkage int
-osf_getdirentries(unsigned int fd, struct osf_dirent __user *dirent,
- unsigned int count, long __user *basep)
+SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd,
+ struct osf_dirent __user *, dirent, unsigned int, count,
+ long __user *, basep)
{
int error;
struct file *file;
#undef NAME_OFFSET
-asmlinkage unsigned long
-osf_mmap(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, unsigned long off)
+SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long, fd,
+ unsigned long, off)
{
struct file *file = NULL;
unsigned long ret = -EBADF;
return error;
}
-asmlinkage int
-osf_statfs(char __user *pathname, struct osf_statfs __user *buffer, unsigned long bufsiz)
+SYSCALL_DEFINE3(osf_statfs, char __user *, pathname,
+ struct osf_statfs __user *, buffer, unsigned long, bufsiz)
{
struct path path;
int retval;
return retval;
}
-asmlinkage int
-osf_fstatfs(unsigned long fd, struct osf_statfs __user *buffer, unsigned long bufsiz)
+SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd,
+ struct osf_statfs __user *, buffer, unsigned long, bufsiz)
{
struct file *file;
int retval;
return do_mount("", dirname, "proc", flags, NULL);
}
-asmlinkage int
-osf_mount(unsigned long typenr, char __user *path, int flag, void __user *data)
+SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, char __user *, path,
+ int, flag, void __user *, data)
{
int retval = -EINVAL;
char *name;
return retval;
}
-asmlinkage int
-osf_utsname(char __user *name)
+SYSCALL_DEFINE1(osf_utsname, char __user *, name)
{
int error;
return error;
}
-asmlinkage unsigned long
-sys_getpagesize(void)
+SYSCALL_DEFINE0(getpagesize)
{
return PAGE_SIZE;
}
-asmlinkage unsigned long
-sys_getdtablesize(void)
+SYSCALL_DEFINE0(getdtablesize)
{
return sysctl_nr_open;
}
/*
* For compatibility with OSF/1 only. Use utsname(2) instead.
*/
-asmlinkage int
-osf_getdomainname(char __user *name, int namelen)
+SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
{
unsigned len;
int i;
PL_DEL = 5, PL_FDEL = 6
};
-asmlinkage long
-osf_proplist_syscall(enum pl_code code, union pl_args __user *args)
+SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
+ union pl_args __user *, args)
{
long error;
int __user *min_buf_size_ptr;
return error;
}
-asmlinkage int
-osf_sigstack(struct sigstack __user *uss, struct sigstack __user *uoss)
+SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss,
+ struct sigstack __user *, uoss)
{
unsigned long usp = rdusp();
unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size;
return error;
}
-asmlinkage long
-osf_sysinfo(int command, char __user *buf, long count)
+SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
{
char *sysinfo_table[] = {
utsname()->sysname,
return err;
}
-asmlinkage unsigned long
-osf_getsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
- int __user *start, void __user *arg)
+SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
+ unsigned long, nbytes, int __user *, start, void __user *, arg)
{
unsigned long w;
struct percpu_struct *cpu;
return -EOPNOTSUPP;
}
-asmlinkage unsigned long
-osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
- int __user *start, void __user *arg)
+SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
+ unsigned long, nbytes, int __user *, start, void __user *, arg)
{
switch (op) {
case SSI_IEEE_FP_CONTROL: {
value->tv_sec = jiffies / HZ;
}
-asmlinkage int
-osf_gettimeofday(struct timeval32 __user *tv, struct timezone __user *tz)
+SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv,
+ struct timezone __user *, tz)
{
if (tv) {
struct timeval ktv;
return 0;
}
-asmlinkage int
-osf_settimeofday(struct timeval32 __user *tv, struct timezone __user *tz)
+SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
+ struct timezone __user *, tz)
{
struct timespec kts;
struct timezone ktz;
return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
}
-asmlinkage int
-osf_getitimer(int which, struct itimerval32 __user *it)
+SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it)
{
struct itimerval kit;
int error;
return error;
}
-asmlinkage int
-osf_setitimer(int which, struct itimerval32 __user *in, struct itimerval32 __user *out)
+SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in,
+ struct itimerval32 __user *, out)
{
struct itimerval kin, kout;
int error;
}
-asmlinkage int
-osf_utimes(char __user *filename, struct timeval32 __user *tvs)
+SYSCALL_DEFINE2(osf_utimes, char __user *, filename,
+ struct timeval32 __user *, tvs)
{
struct timespec tv[2];
#define MAX_SELECT_SECONDS \
((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
-asmlinkage int
-osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
- struct timeval32 __user *tvp)
+SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp,
+ fd_set __user *, exp, struct timeval32 __user *, tvp)
{
struct timespec end_time, *to = NULL;
if (tvp) {
long ru_nivcsw; /* involuntary " */
};
-asmlinkage int
-osf_getrusage(int who, struct rusage32 __user *ru)
+SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
{
struct rusage32 r;
return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
}
-asmlinkage long
-osf_wait4(pid_t pid, int __user *ustatus, int options,
- struct rusage32 __user *ur)
+SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
+ struct rusage32 __user *, ur)
{
struct rusage r;
long ret, err;
* seems to be a timeval pointer, and I suspect the second
* one is the time remaining.. Ho humm.. No documentation.
*/
-asmlinkage int
-osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remain)
+SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep,
+ struct timeval32 __user *, remain)
{
struct timeval tmp;
unsigned long ticks;
int :32; int :32; int :32; int :32;
};
-asmlinkage int
-sys_old_adjtimex(struct timex32 __user *txc_p)
+SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
{
struct timex txc;
int ret;
return 0;
}
-asmlinkage ssize_t
-osf_readv(unsigned long fd, const struct iovec __user * vector, unsigned long count)
+SYSCALL_DEFINE3(osf_readv, unsigned long, fd,
+ const struct iovec __user *, vector, unsigned long, count)
{
if (unlikely(personality(current->personality) == PER_OSF4))
if (osf_fix_iov_len(vector, count))
return sys_readv(fd, vector, count);
}
-asmlinkage ssize_t
-osf_writev(unsigned long fd, const struct iovec __user * vector, unsigned long count)
+SYSCALL_DEFINE3(osf_writev, unsigned long, fd,
+ const struct iovec __user *, vector, unsigned long, count)
{
if (unlikely(personality(current->personality) == PER_OSF4))
if (osf_fix_iov_len(vector, count))
/* Stubs for the routines in pci_iommu.c: */
void *
-pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
+__pci_alloc_consistent(struct pci_dev *pdev, size_t size,
+ dma_addr_t *dma_addrp, gfp_t gfp)
{
return NULL;
}
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/bitops.h>
+#include <linux/syscalls.h>
#include <asm/uaccess.h>
#include <asm/sigcontext.h>
* Note that we don't need to acquire the kernel lock for SMP
* operation, as all of this is local to this thread.
*/
-asmlinkage unsigned long
-do_osf_sigprocmask(int how, unsigned long newmask, struct pt_regs *regs)
+SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
+ struct pt_regs *, regs)
{
unsigned long oldmask = -EINVAL;
return oldmask;
}
-asmlinkage int
-osf_sigaction(int sig, const struct osf_sigaction __user *act,
- struct osf_sigaction __user *oact)
+SYSCALL_DEFINE3(osf_sigaction, int, sig,
+ const struct osf_sigaction __user *, act,
+ struct osf_sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
return ret;
}
-asmlinkage long
-sys_rt_sigaction(int sig, const struct sigaction __user *act,
- struct sigaction __user *oact,
- size_t sigsetsize, void __user *restorer)
+SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
+ struct sigaction __user *, oact,
+ size_t, sigsetsize, void __user *, restorer)
{
struct k_sigaction new_ka, old_ka;
int ret;
smp_callin(void)
{
int cpuid = hard_smp_processor_id();
+ cpumask_t mask = cpu_online_map;
- if (cpu_test_and_set(cpuid, cpu_online_map)) {
+ if (cpu_test_and_set(cpuid, mask)) {
printk("??, cpu 0x%x already present??\n", cpuid);
BUG();
}
.quad sys_write
.quad alpha_ni_syscall /* 5 */
.quad sys_close
- .quad osf_wait4
+ .quad sys_osf_wait4
.quad alpha_ni_syscall
.quad sys_link
.quad sys_unlink /* 10 */
.quad sys_mknod
.quad sys_chmod /* 15 */
.quad sys_chown
- .quad osf_brk
+ .quad sys_osf_brk
.quad alpha_ni_syscall
.quad sys_lseek
.quad sys_getxpid /* 20 */
- .quad osf_mount
+ .quad sys_osf_mount
.quad sys_umount
.quad sys_setuid
.quad sys_getxuid
.quad alpha_ni_syscall /* 40 */
.quad sys_dup
.quad sys_alpha_pipe
- .quad osf_set_program_attributes
+ .quad sys_osf_set_program_attributes
.quad alpha_ni_syscall
.quad sys_open /* 45 */
.quad alpha_ni_syscall
.quad sys_newlstat
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 70 */
- .quad osf_mmap
+ .quad sys_osf_mmap
.quad alpha_ni_syscall
.quad sys_munmap
.quad sys_mprotect
.quad sys_setgroups /* 80 */
.quad alpha_ni_syscall
.quad sys_setpgid
- .quad osf_setitimer
+ .quad sys_osf_setitimer
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 85 */
- .quad osf_getitimer
+ .quad sys_osf_getitimer
.quad sys_gethostname
.quad sys_sethostname
.quad sys_getdtablesize
.quad sys_dup2 /* 90 */
.quad sys_newfstat
.quad sys_fcntl
- .quad osf_select
+ .quad sys_osf_select
.quad sys_poll
.quad sys_fsync /* 95 */
.quad sys_setpriority
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 110 */
.quad sys_sigsuspend
- .quad osf_sigstack
+ .quad sys_osf_sigstack
.quad sys_recvmsg
.quad sys_sendmsg
.quad alpha_ni_syscall /* 115 */
- .quad osf_gettimeofday
- .quad osf_getrusage
+ .quad sys_osf_gettimeofday
+ .quad sys_osf_getrusage
.quad sys_getsockopt
.quad alpha_ni_syscall
#ifdef CONFIG_OSF4_COMPAT
- .quad osf_readv /* 120 */
- .quad osf_writev
+ .quad sys_osf_readv /* 120 */
+ .quad sys_osf_writev
#else
.quad sys_readv /* 120 */
.quad sys_writev
#endif
- .quad osf_settimeofday
+ .quad sys_osf_settimeofday
.quad sys_fchown
.quad sys_fchmod
.quad sys_recvfrom /* 125 */
.quad sys_socketpair /* 135 */
.quad sys_mkdir
.quad sys_rmdir
- .quad osf_utimes
+ .quad sys_osf_utimes
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 140 */
.quad sys_getpeername
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 155 */
- .quad osf_sigaction
+ .quad sys_osf_sigaction
.quad alpha_ni_syscall
.quad alpha_ni_syscall
- .quad osf_getdirentries
- .quad osf_statfs /* 160 */
- .quad osf_fstatfs
+ .quad sys_osf_getdirentries
+ .quad sys_osf_statfs /* 160 */
+ .quad sys_osf_fstatfs
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
- .quad osf_getdomainname /* 165 */
+ .quad sys_osf_getdomainname /* 165 */
.quad sys_setdomainname
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_semctl
.quad sys_semget /* 205 */
.quad sys_semop
- .quad osf_utsname
+ .quad sys_osf_utsname
.quad sys_lchown
.quad sys_shmat
.quad sys_shmctl /* 210 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 240 */
- .quad osf_sysinfo
+ .quad sys_osf_sysinfo
.quad alpha_ni_syscall
.quad alpha_ni_syscall
- .quad osf_proplist_syscall
+ .quad sys_osf_proplist_syscall
.quad alpha_ni_syscall /* 245 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 250 */
- .quad osf_usleep_thread
+ .quad sys_osf_usleep_thread
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_sysfs
.quad alpha_ni_syscall /* 255 */
- .quad osf_getsysinfo
- .quad osf_setsysinfo
+ .quad sys_osf_getsysinfo
+ .quad sys_osf_setsysinfo
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 260 */
* @dir: R/W or both.
* @attrs: optional dma attributes
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
dma_addr_t
sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
* @dir: R/W or both.
* @attrs: optional dma attributes
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
int dir, struct dma_attrs *attrs)
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
void *
sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
* @vaddr: virtual address IOVA of "consistent" buffer.
* @dma_handler: IO virtual address of "consistent" buffer.
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
* @dir: R/W or both.
* @attrs: optional dma attributes
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
int dir, struct dma_attrs *attrs)
* @dir: R/W or both.
* @attrs: optional dma attributes
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, int dir, struct dma_attrs *attrs)
#include <asm/cacheflush.h>
#include <asm/scatterlist.h>
-/* See Documentation/DMA-mapping.txt */
+/* See Documentation/PCI/PCI-DMA-mapping.txt */
struct hppa_dma_ops {
int (*dma_supported)(struct device *dev, u64 mask);
void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
** PARISC 1.1 Dynamic DMA mapping support.
** This implementation is for PA-RISC platforms that do not support
** I/O TLBs (aka DMA address translation hardware).
-** See Documentation/DMA-mapping.txt for interface definitions.
+** See Documentation/PCI/PCI-DMA-mapping.txt for interface definitions.
**
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
#define _ASM_X86_DMA_MAPPING_H
/*
- * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
- * documentation.
+ * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
+ * Documentation/DMA-API.txt for documentation.
*/
#include <linux/scatterlist.h>
* This allows to use PCI devices that only support 32bit addresses on systems
* with more than 4GB.
*
- * See Documentation/DMA-mapping.txt for the interface specification.
+ * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
* Subject to the GNU General Public License v2 only.
* that we can fit comfortably.
*
* First we need assembly templates of each of the patchable Guest operations,
- * and these are in lguest_asm.S. */
+ * and these are in i386_head.S. */
/*G:060 We construct a table from the assembler templates: */
static const struct lguest_insns
acpi_ht = 0;
#endif
- /* We set the perferred console to "hvc". This is the "hypervisor
+ /* We set the preferred console to "hvc". This is the "hypervisor
* virtual console" driver written by the PowerPC people, which we also
* adapted for lguest's use. */
add_preferred_console("hvc", 0, NULL);
* Description:
* Issue a flush for the block device in question. Caller can supply
* room for storing the error offset in case of a flush error, if they
- * wish to. Caller must run wait_for_completion() on its own.
+ * wish to.
*/
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
static void drive_stat_acct(struct request *rq, int new_io)
{
+ struct gendisk *disk = rq->rq_disk;
struct hd_struct *part;
int rw = rq_data_dir(rq);
int cpu;
- if (!blk_fs_request(rq) || !rq->rq_disk)
+ if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue))
return;
cpu = part_stat_lock();
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
- q->queue_flags = (1 << QUEUE_FLAG_CLUSTER |
- 1 << QUEUE_FLAG_STACKABLE);
+ q->queue_flags = QUEUE_FLAG_DEFAULT;
q->queue_lock = lock;
blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
if (bio_sync(bio))
req->cmd_flags |= REQ_RW_SYNC;
+ if (bio_unplug(bio))
+ req->cmd_flags |= REQ_UNPLUG;
if (bio_rw_meta(bio))
req->cmd_flags |= REQ_RW_META;
int el_ret, nr_sectors;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
+ const int unplug = bio_unplug(bio);
int rw_flags;
nr_sectors = bio_sectors(bio);
blk_plug_device(q);
add_request(q, req);
out:
- if (sync || blk_queue_nonrot(q))
+ if (unplug || blk_queue_nonrot(q))
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
return 0;
err = -EOPNOTSUPP;
goto end_io;
}
+ if (bio_barrier(bio) && bio_has_data(bio) &&
+ (q->next_ordered == QUEUE_ORDERED_NONE)) {
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
ret = q->make_request_fn(q, bio);
} while (ret);
}
EXPORT_SYMBOL(blkdev_dequeue_request);
+static void blk_account_io_completion(struct request *req, unsigned int bytes)
+{
+ struct gendisk *disk = req->rq_disk;
+
+ if (!disk || !blk_queue_io_stat(disk->queue))
+ return;
+
+ if (blk_fs_request(req)) {
+ const int rw = rq_data_dir(req);
+ struct hd_struct *part;
+ int cpu;
+
+ cpu = part_stat_lock();
+ part = disk_map_sector_rcu(req->rq_disk, req->sector);
+ part_stat_add(cpu, part, sectors[rw], bytes >> 9);
+ part_stat_unlock();
+ }
+}
+
+static void blk_account_io_done(struct request *req)
+{
+ struct gendisk *disk = req->rq_disk;
+
+ if (!disk || !blk_queue_io_stat(disk->queue))
+ return;
+
+ /*
+ * Account IO completion. bar_rq isn't accounted as a normal
+ * IO on queueing nor completion. Accounting the containing
+ * request is enough.
+ */
+ if (blk_fs_request(req) && req != &req->q->bar_rq) {
+ unsigned long duration = jiffies - req->start_time;
+ const int rw = rq_data_dir(req);
+ struct hd_struct *part;
+ int cpu;
+
+ cpu = part_stat_lock();
+ part = disk_map_sector_rcu(disk, req->sector);
+
+ part_stat_inc(cpu, part, ios[rw]);
+ part_stat_add(cpu, part, ticks[rw], duration);
+ part_round_stats(cpu, part);
+ part_dec_in_flight(part);
+
+ part_stat_unlock();
+ }
+}
+
/**
* __end_that_request_first - end I/O on a request
* @req: the request being processed
(unsigned long long)req->sector);
}
- if (blk_fs_request(req) && req->rq_disk) {
- const int rw = rq_data_dir(req);
- struct hd_struct *part;
- int cpu;
-
- cpu = part_stat_lock();
- part = disk_map_sector_rcu(req->rq_disk, req->sector);
- part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
- part_stat_unlock();
- }
+ blk_account_io_completion(req, nr_bytes);
total_bytes = bio_nbytes = 0;
while ((bio = req->bio) != NULL) {
*/
static void end_that_request_last(struct request *req, int error)
{
- struct gendisk *disk = req->rq_disk;
-
if (blk_rq_tagged(req))
blk_queue_end_tag(req->q, req);
blk_delete_timer(req);
- /*
- * Account IO completion. bar_rq isn't accounted as a normal
- * IO on queueing nor completion. Accounting the containing
- * request is enough.
- */
- if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
- unsigned long duration = jiffies - req->start_time;
- const int rw = rq_data_dir(req);
- struct hd_struct *part;
- int cpu;
-
- cpu = part_stat_lock();
- part = disk_map_sector_rcu(disk, req->sector);
-
- part_stat_inc(cpu, part, ios[rw]);
- part_stat_add(cpu, part, ticks[rw], duration);
- part_round_stats(cpu, part);
- part_dec_in_flight(part);
-
- part_stat_unlock();
- }
+ blk_account_io_done(req);
if (req->end_io)
req->end_io(req, error);
/**
* blk_integrity_register - Register a gendisk as being integrity-capable
* @disk: struct gendisk pointer to make integrity-aware
- * @template: integrity profile
+ * @template: optional integrity profile to register
*
* Description: When a device needs to advertise itself as being able
* to send/receive integrity metadata it must use this function to
* register the capability with the block layer. The template is a
* blk_integrity struct with values appropriate for the underlying
- * hardware. See Documentation/block/data-integrity.txt.
+ * hardware. If template is NULL the new profile is allocated but
+ * not filled out. See Documentation/block/data-integrity.txt.
*/
int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
{
struct blk_integrity *bi;
BUG_ON(disk == NULL);
- BUG_ON(template == NULL);
if (disk->integrity == NULL) {
bi = kmem_cache_alloc(integrity_cachep,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL | __GFP_ZERO);
if (!bi)
return -1;
bi = disk->integrity;
/* Use the provided profile as template */
- bi->name = template->name;
- bi->generate_fn = template->generate_fn;
- bi->verify_fn = template->verify_fn;
- bi->tuple_size = template->tuple_size;
- bi->set_tag_fn = template->set_tag_fn;
- bi->get_tag_fn = template->get_tag_fn;
- bi->tag_size = template->tag_size;
+ if (template != NULL) {
+ bi->name = template->name;
+ bi->generate_fn = template->generate_fn;
+ bi->verify_fn = template->verify_fn;
+ bi->tuple_size = template->tuple_size;
+ bi->set_tag_fn = template->set_tag_fn;
+ bi->get_tag_fn = template->get_tag_fn;
+ bi->tag_size = template->tag_size;
+ } else
+ bi->name = "unsupported";
return 0;
}
return queue_var_show(max_hw_sectors_kb, (page));
}
+static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(!blk_queue_nonrot(q), page);
+}
+
+static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long nm;
+ ssize_t ret = queue_var_store(&nm, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ if (nm)
+ queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ else
+ queue_flag_set(QUEUE_FLAG_NONROT, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
return queue_var_show(blk_queue_nomerges(q), page);
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
else
queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
-
spin_unlock_irq(q->queue_lock);
+
return ret;
}
return ret;
}
+static ssize_t queue_iostats_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(blk_queue_io_stat(q), page);
+}
+
+static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long stats;
+ ssize_t ret = queue_var_store(&stats, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ if (stats)
+ queue_flag_set(QUEUE_FLAG_IO_STAT, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
.show = queue_hw_sector_size_show,
};
+static struct queue_sysfs_entry queue_nonrot_entry = {
+ .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_nonrot_show,
+ .store = queue_nonrot_store,
+};
+
static struct queue_sysfs_entry queue_nomerges_entry = {
.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
.show = queue_nomerges_show,
.store = queue_rq_affinity_store,
};
+static struct queue_sysfs_entry queue_iostats_entry = {
+ .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_iostats_show,
+ .store = queue_iostats_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_max_sectors_entry.attr,
&queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
+ &queue_nonrot_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
+ &queue_iostats_entry.attr,
NULL,
};
static struct dentry *blk_tree_root;
static DEFINE_MUTEX(blk_tree_mutex);
-static unsigned int root_users;
-
-static inline void blk_remove_root(void)
-{
- if (blk_tree_root) {
- debugfs_remove(blk_tree_root);
- blk_tree_root = NULL;
- }
-}
-
-static void blk_remove_tree(struct dentry *dir)
-{
- mutex_lock(&blk_tree_mutex);
- debugfs_remove(dir);
- if (--root_users == 0)
- blk_remove_root();
- mutex_unlock(&blk_tree_mutex);
-}
-
-static struct dentry *blk_create_tree(const char *blk_name)
-{
- struct dentry *dir = NULL;
- int created = 0;
-
- mutex_lock(&blk_tree_mutex);
-
- if (!blk_tree_root) {
- blk_tree_root = debugfs_create_dir("block", NULL);
- if (!blk_tree_root)
- goto err;
- created = 1;
- }
-
- dir = debugfs_create_dir(blk_name, blk_tree_root);
- if (dir)
- root_users++;
- else {
- /* Delete root only if we created it */
- if (created)
- blk_remove_root();
- }
-
-err:
- mutex_unlock(&blk_tree_mutex);
- return dir;
-}
static void blk_trace_cleanup(struct blk_trace *bt)
{
- relay_close(bt->rchan);
debugfs_remove(bt->msg_file);
debugfs_remove(bt->dropped_file);
- blk_remove_tree(bt->dir);
+ relay_close(bt->rchan);
free_percpu(bt->sequence);
free_percpu(bt->msg_data);
kfree(bt);
static int blk_remove_buf_file_callback(struct dentry *dentry)
{
+ struct dentry *parent = dentry->d_parent;
debugfs_remove(dentry);
+
+ /*
+ * this will fail for all but the last file, but that is ok. what we
+ * care about is the top level buts->name directory going away, when
+ * the last trace file is gone. Then we don't have to rmdir() that
+ * manually on trace stop, so it nicely solves the issue with
+ * force killing of running traces.
+ */
+
+ debugfs_remove(parent);
return 0;
}
goto err;
ret = -ENOENT;
- dir = blk_create_tree(buts->name);
+
+ if (!blk_tree_root) {
+ blk_tree_root = debugfs_create_dir("block", NULL);
+ if (!blk_tree_root)
+ return -ENOMEM;
+ }
+
+ dir = debugfs_create_dir(buts->name, blk_tree_root);
+
if (!dir)
goto err;
atomic_dec(&blk_probes_ref);
mutex_unlock(&blk_probe_mutex);
err:
- if (dir)
- blk_remove_tree(dir);
if (bt) {
if (bt->msg_file)
debugfs_remove(bt->msg_file);
*/
struct cfq_rb_root service_tree;
unsigned int busy_queues;
+ /*
+ * Used to track any pending rt requests so we can pre-empt current
+ * non-RT cfqq in service when this value is non-zero.
+ */
+ unsigned int busy_rt_queues;
int rq_in_driver;
int sync_flight;
BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++;
+ if (cfq_class_rt(cfqq))
+ cfqd->busy_rt_queues++;
cfq_resort_rr_list(cfqd, cfqq);
}
BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
+ if (cfq_class_rt(cfqq))
+ cfqd->busy_rt_queues--;
}
/*
if (cfq_slice_used(cfqq))
goto expire;
+ /*
+ * If we have a RT cfqq waiting, then we pre-empt the current non-rt
+ * cfqq.
+ */
+ if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
+ /*
+ * We simulate this as cfqq timed out so that it gets to bank
+ * the remaining of its time slice.
+ */
+ cfq_log_cfqq(cfqd, cfqq, "preempt");
+ cfq_slice_expired(cfqd, 1);
+ goto new_queue;
+ }
+
/*
* The active queue has requests and isn't expired, allow it to
* dispatch.
if (RB_EMPTY_ROOT(&cfqq->sort_list))
break;
+ /*
+ * If there is a non-empty RT cfqq waiting for current
+ * cfqq's timeslice to complete, pre-empt this cfqq
+ */
+ if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues)
+ break;
+
} while (dispatched < max_dispatch);
/*
if (rq_is_meta(rq) && !cfqq->meta_pending)
return 1;
+ /*
+ * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
+ */
+ if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
+ return 1;
+
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
return 0;
/*
* not the active queue - expire current slice if it is
* idle and has expired it's mean thinktime or this new queue
- * has some old slice time left and is of higher priority
+ * has some old slice time left and is of higher priority or
+ * this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
cfq_mark_cfqq_must_dispatch(cfqq);
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
+#include <linux/dmi.h>
#define DRV_NAME "sata_sil"
#define DRV_VERSION "2.4"
/**
* root_device_unregister - unregister and free a root device
- * @root: device going away.
+ * @dev: device going away
*
* This function unregisters and cleans up a device that was created by
* root_device_register().
root_id = root_node->node_id;
grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
- if (card->bm_generation + 1 == generation ||
+ if (is_next_generation(generation, card->bm_generation) ||
(card->bm_generation != generation && grace)) {
/*
* This first step is to figure out who is IRM and
fw_core_initiate_bus_reset(card, 1);
mutex_lock(&card_mutex);
- list_del(&card->link);
+ list_del_init(&card->link);
mutex_unlock(&card_mutex);
/* Set up the dummy driver. */
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/idr.h>
+#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/rwsem.h>
#include <linux/semaphore.h>
return device;
}
+/*
+ * These defines control the retry behavior for reading the config
+ * rom. It shouldn't be necessary to tweak these; if the device
+ * doesn't respond to a config rom read within 10 seconds, it's not
+ * going to respond at all. As for the initial delay, a lot of
+ * devices will be able to respond within half a second after bus
+ * reset. On the other hand, it's not really worth being more
+ * aggressive than that, since it scales pretty well; if 10 devices
+ * are plugged in, they're all getting read within one second.
+ */
+
+#define MAX_RETRIES 10
+#define RETRY_DELAY (3 * HZ)
+#define INITIAL_DELAY (HZ / 2)
+#define SHUTDOWN_DELAY (2 * HZ)
+
static void fw_device_shutdown(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
int minor = MINOR(device->device.devt);
+ if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY)
+ && !list_empty(&device->card->link)) {
+ schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
+ return;
+ }
+
+ if (atomic_cmpxchg(&device->state,
+ FW_DEVICE_GONE,
+ FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE)
+ return;
+
fw_device_cdev_remove(device);
device_for_each_child(&device->device, NULL, shutdown_unit);
device_unregister(&device->device);
down_write(&fw_device_rwsem);
idr_remove(&fw_device_idr, minor);
up_write(&fw_device_rwsem);
+
fw_device_put(device);
}
.release = fw_device_release,
};
+static void fw_device_update(struct work_struct *work);
+
/*
- * These defines control the retry behavior for reading the config
- * rom. It shouldn't be necessary to tweak these; if the device
- * doesn't respond to a config rom read within 10 seconds, it's not
- * going to respond at all. As for the initial delay, a lot of
- * devices will be able to respond within half a second after bus
- * reset. On the other hand, it's not really worth being more
- * aggressive than that, since it scales pretty well; if 10 devices
- * are plugged in, they're all getting read within one second.
+ * If a device was pending for deletion because its node went away but its
+ * bus info block and root directory header matches that of a newly discovered
+ * device, revive the existing fw_device.
+ * The newly allocated fw_device becomes obsolete instead.
*/
+static int lookup_existing_device(struct device *dev, void *data)
+{
+ struct fw_device *old = fw_device(dev);
+ struct fw_device *new = data;
+ struct fw_card *card = new->card;
+ int match = 0;
+
+ down_read(&fw_device_rwsem); /* serialize config_rom access */
+ spin_lock_irq(&card->lock); /* serialize node access */
+
+ if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
+ atomic_cmpxchg(&old->state,
+ FW_DEVICE_GONE,
+ FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
+ struct fw_node *current_node = new->node;
+ struct fw_node *obsolete_node = old->node;
+
+ new->node = obsolete_node;
+ new->node->data = new;
+ old->node = current_node;
+ old->node->data = old;
+
+ old->max_speed = new->max_speed;
+ old->node_id = current_node->node_id;
+ smp_wmb(); /* update node_id before generation */
+ old->generation = card->generation;
+ old->config_rom_retries = 0;
+ fw_notify("rediscovered device %s\n", dev_name(dev));
-#define MAX_RETRIES 10
-#define RETRY_DELAY (3 * HZ)
-#define INITIAL_DELAY (HZ / 2)
+ PREPARE_DELAYED_WORK(&old->work, fw_device_update);
+ schedule_delayed_work(&old->work, 0);
+
+ if (current_node == card->root_node)
+ fw_schedule_bm_work(card, 0);
+
+ match = 1;
+ }
+
+ spin_unlock_irq(&card->lock);
+ up_read(&fw_device_rwsem);
+
+ return match;
+}
static void fw_device_init(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
+ struct device *revived_dev;
int minor, err;
/*
return;
}
+ revived_dev = device_find_child(device->card->device,
+ device, lookup_existing_device);
+ if (revived_dev) {
+ put_device(revived_dev);
+ fw_device_release(&device->device);
+
+ return;
+ }
+
device_initialize(&device->device);
fw_device_get(device);
* fw_node_event().
*/
if (atomic_cmpxchg(&device->state,
- FW_DEVICE_INITIALIZING,
- FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) {
- fw_device_shutdown(work);
+ FW_DEVICE_INITIALIZING,
+ FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
+ PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
} else {
if (device->config_rom_retries)
fw_notify("created device %s: GUID %08x%08x, S%d00, "
case REREAD_BIB_UNCHANGED:
if (atomic_cmpxchg(&device->state,
- FW_DEVICE_INITIALIZING,
- FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
+ FW_DEVICE_INITIALIZING,
+ FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
goto gone;
fw_device_update(work);
create_units(device);
if (atomic_cmpxchg(&device->state,
- FW_DEVICE_INITIALIZING,
- FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
+ FW_DEVICE_INITIALIZING,
+ FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
goto gone;
fw_notify("refreshed device %s\n", dev_name(&device->device));
give_up:
fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
gone:
- atomic_set(&device->state, FW_DEVICE_SHUTDOWN);
- fw_device_shutdown(work);
+ atomic_set(&device->state, FW_DEVICE_GONE);
+ PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
out:
if (node_id == card->root_node->node_id)
fw_schedule_bm_work(card, 0);
*/
device = node->data;
if (atomic_xchg(&device->state,
- FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) {
+ FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
- schedule_delayed_work(&device->work, 0);
+ schedule_delayed_work(&device->work,
+ list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
}
break;
}
enum fw_device_state {
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING,
+ FW_DEVICE_GONE,
FW_DEVICE_SHUTDOWN,
};
#define CONTEXT_DEAD 0x0800
#define CONTEXT_ACTIVE 0x0400
-#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
+#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
for (i = 0; i < 10; i++) {
reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
if ((reg & CONTEXT_ACTIVE) == 0)
- break;
+ return;
- fw_notify("context_stop: still active (0x%08x)\n", reg);
mdelay(1);
}
+ fw_error("Error: DMA context still active (0x%08x)\n", reg);
}
struct driver_data {
int address_high;
unsigned int workarounds;
unsigned int mgt_orb_timeout;
+ unsigned int max_payload;
int dont_block; /* counter for each logical unit */
int blocked; /* ditto */
dma_addr_t page_table_bus;
};
+#define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */
+#define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */
+
/*
* List of devices with known bugs.
*
* The firmware_revision field, masked with 0xffff00, is the best
* indicator for the type of bridge chip of a device. It yields a few
* false positives but this did not break correctly behaving devices
- * so far. We use ~0 as a wildcard, since the 24 bit values we get
- * from the config rom can never match that.
+ * so far.
*/
static const struct {
u32 firmware_revision;
},
/* Initio bridges, actually only needed for some older ones */ {
.firmware_revision = 0x000200,
- .model = ~0,
+ .model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_INQUIRY_36,
},
/* PL-3507 bridge with Prolific firmware */ {
.firmware_revision = 0x012800,
- .model = ~0,
+ .model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_POWER_CONDITION,
},
/* Symbios bridge */ {
.firmware_revision = 0xa0b800,
- .model = ~0,
+ .model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
},
/* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
.firmware_revision = 0x002600,
- .model = ~0,
+ .model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
},
-
/*
- * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
- * these iPods do not feature the read_capacity bug according
- * to one report. Read_capacity behaviour as well as model_id
- * could change due to Apple-supplied firmware updates though.
+ * iPod 2nd generation: needs 128k max transfer size workaround
+ * iPod 3rd generation: needs fix capacity workaround
*/
-
- /* iPod 4th generation. */ {
+ {
+ .firmware_revision = 0x0a2700,
+ .model = 0x000000,
+ .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS |
+ SBP2_WORKAROUND_FIX_CAPACITY,
+ },
+ /* iPod 4th generation */ {
.firmware_revision = 0x0a2700,
.model = 0x000021,
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
continue;
if (sbp2_workarounds_table[i].model != model &&
- sbp2_workarounds_table[i].model != ~0)
+ sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD)
continue;
w |= sbp2_workarounds_table[i].workarounds;
fw_device_get(device);
fw_unit_get(unit);
- /* Initialize to values that won't match anything in our table. */
- firmware_revision = 0xff000000;
- model = 0xff000000;
-
/* implicit directory ID */
tgt->directory_id = ((unit->directory - device->config_rom) * 4
+ CSR_CONFIG_ROM) & 0xffffff;
+ firmware_revision = SBP2_ROM_VALUE_MISSING;
+ model = SBP2_ROM_VALUE_MISSING;
+
if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
&firmware_revision) < 0)
goto fail_tgt_put;
sbp2_init_workarounds(tgt, model, firmware_revision);
+ /*
+ * At S100 we can do 512 bytes per packet, at S200 1024 bytes,
+ * and so on up to 4096 bytes. The SBP-2 max_payload field
+ * specifies the max payload size as 2 ^ (max_payload + 2), so
+ * if we set this to max_speed + 7, we get the right value.
+ */
+ tgt->max_payload = min(device->max_speed + 7, 10U);
+ tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1);
+
/* Do the login in a workqueue so we can easily reschedule retries. */
list_for_each_entry(lu, &tgt->lu_list, link)
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
.id_table = sbp2_id_table,
};
+static void sbp2_unmap_scatterlist(struct device *card_device,
+ struct sbp2_command_orb *orb)
+{
+ if (scsi_sg_count(orb->cmd))
+ dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
+ scsi_sg_count(orb->cmd),
+ orb->cmd->sc_data_direction);
+
+ if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
+ dma_unmap_single(card_device, orb->page_table_bus,
+ sizeof(orb->page_table), DMA_TO_DEVICE);
+}
+
static unsigned int
sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
{
dma_unmap_single(device->card->device, orb->base.request_bus,
sizeof(orb->request), DMA_TO_DEVICE);
-
- if (scsi_sg_count(orb->cmd) > 0)
- dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
- scsi_sg_count(orb->cmd),
- orb->cmd->sc_data_direction);
-
- if (orb->page_table_bus != 0)
- dma_unmap_single(device->card->device, orb->page_table_bus,
- sizeof(orb->page_table), DMA_TO_DEVICE);
+ sbp2_unmap_scatterlist(device->card->device, orb);
orb->cmd->result = result;
orb->done(orb->cmd);
struct sbp2_logical_unit *lu = cmd->device->hostdata;
struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct sbp2_command_orb *orb;
- unsigned int max_payload;
int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
/*
orb->done = done;
orb->cmd = cmd;
- orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
- /*
- * At speed 100 we can do 512 bytes per packet, at speed 200,
- * 1024 bytes per packet etc. The SBP-2 max_payload field
- * specifies the max payload size as 2 ^ (max_payload + 2), so
- * if we set this to max_speed + 7, we get the right value.
- */
- max_payload = min(device->max_speed + 7,
- device->card->max_receive - 1);
+ orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
orb->request.misc = cpu_to_be32(
- COMMAND_ORB_MAX_PAYLOAD(max_payload) |
+ COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
COMMAND_ORB_SPEED(device->max_speed) |
COMMAND_ORB_NOTIFY);
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
- if (dma_mapping_error(device->card->device, orb->base.request_bus))
+ if (dma_mapping_error(device->card->device, orb->base.request_bus)) {
+ sbp2_unmap_scatterlist(device->card->device, orb);
goto out;
+ }
sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
lu->command_block_agent_address + SBP2_ORB_POINTER);
struct fw_node *local_node;
unsigned long flags;
+ /*
+ * If the selfID buffer is not the immediate successor of the
+ * previously processed one, we cannot reliably compare the
+ * old and new topologies.
+ */
+ if (!is_next_generation(generation, card->generation) &&
+ card->local_node != NULL) {
+ fw_notify("skipped bus generations, destroying all nodes\n");
+ fw_destroy_nodes(card);
+ card->bm_retries = 0;
+ }
+
spin_lock_irqsave(&card->lock, flags);
card->node_id = node_id;
extern void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
+/*
+ * Check whether new_generation is the immediate successor of old_generation.
+ * Take counter roll-over at 255 (as per to OHCI) into account.
+ */
+static inline bool is_next_generation(int new_generation, int old_generation)
+{
+ return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
+}
+
/*
* The iso packet format allows for an immediate header/payload part
* stored in 'header' immediately after the packet info plus an
} else {
status = -EBUSY;
module_put(chip->owner);
+ goto done;
}
if (chip->request) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_lock(&dev->mode_config.mutex);
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL_GPL(hid_connect);
+/* a list of devices for which there is a specialized driver on HID bus */
static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
.uevent = hid_uevent,
};
+/* a list of devices that shouldn't be handled by HID core at all */
static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD 0x0038
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD2 0x0036
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD3 0x0034
+#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD4 0x0044
+#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD5 0x0045
#define USB_VENDOR_ID_SUN 0x0430
#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
#define MS_NOGET 0x10
/*
- * Microsoft Wireless Desktop Receiver (Model 1028) has several
+ * Microsoft Wireless Desktop Receiver (Model 1028) has
* 'Usage Min/Max' where it ought to have 'Physical Min/Max'
*/
static void ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
- if ((quirks & MS_RDESC) && rsize == 571 && rdesc[284] == 0x19 &&
- rdesc[286] == 0x2a && rdesc[304] == 0x19 &&
- rdesc[306] == 0x29 && rdesc[352] == 0x1a &&
- rdesc[355] == 0x2a && rdesc[557] == 0x19 &&
+ if ((quirks & MS_RDESC) && rsize == 571 && rdesc[557] == 0x19 &&
rdesc[559] == 0x29) {
dev_info(&hdev->dev, "fixing up Microsoft Wireless Receiver "
"Model 1028 report descriptor\n");
- rdesc[284] = rdesc[304] = rdesc[557] = 0x35;
- rdesc[352] = 0x36;
- rdesc[286] = rdesc[355] = 0x46;
- rdesc[306] = rdesc[559] = 0x45;
+ rdesc[557] = 0x35;
+ rdesc[559] = 0x45;
}
}
case HIDIOCGSTRING:
mutex_lock(&hiddev->existancelock);
- if (!hiddev->exist)
+ if (hiddev->exist)
r = hiddev_ioctl_string(hiddev, cmd, user_arg);
else
r = -ENODEV;
/*
* Temperature sensors keys (sp78 - 2 bytes).
*/
-static const char* temperature_sensors_sets[][36] = {
+static const char *temperature_sensors_sets[][41] = {
/* Set 0: Macbook Pro */
{ "TA0P", "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "Th0H",
"Th1H", "Tm0P", "Ts0P", "Ts1P", NULL },
{ "TB0T", "TB1S", "TB1T", "TB2S", "TB2T", "TC0D", "TN0D", "TTF0",
"TV0P", "TVFP", "TW0P", "Th0P", "Tp0P", "Tp1P", "TpFP", "Ts0P",
"Ts0S", NULL },
+/* Set 16: Mac Pro 3,1 (2 x Quad-Core) */
+ { "TA0P", "TCAG", "TCAH", "TCBG", "TCBH", "TC0C", "TC0D", "TC0P",
+ "TC1C", "TC1D", "TC2C", "TC2D", "TC3C", "TC3D", "TH0P", "TH1P",
+ "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S", "TM1P",
+ "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S",
+ "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S",
+ NULL },
};
/* List of keys used to read/write fan speeds */
applesmc_show_temperature, NULL, 33);
static SENSOR_DEVICE_ATTR(temp35_input, S_IRUGO,
applesmc_show_temperature, NULL, 34);
+static SENSOR_DEVICE_ATTR(temp36_input, S_IRUGO,
+ applesmc_show_temperature, NULL, 35);
+static SENSOR_DEVICE_ATTR(temp37_input, S_IRUGO,
+ applesmc_show_temperature, NULL, 36);
+static SENSOR_DEVICE_ATTR(temp38_input, S_IRUGO,
+ applesmc_show_temperature, NULL, 37);
+static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO,
+ applesmc_show_temperature, NULL, 38);
+static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO,
+ applesmc_show_temperature, NULL, 39);
static struct attribute *temperature_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp33_input.dev_attr.attr,
&sensor_dev_attr_temp34_input.dev_attr.attr,
&sensor_dev_attr_temp35_input.dev_attr.attr,
+ &sensor_dev_attr_temp36_input.dev_attr.attr,
+ &sensor_dev_attr_temp37_input.dev_attr.attr,
+ &sensor_dev_attr_temp38_input.dev_attr.attr,
+ &sensor_dev_attr_temp39_input.dev_attr.attr,
+ &sensor_dev_attr_temp40_input.dev_attr.attr,
NULL
};
{ .accelerometer = 0, .light = 0, .temperature_set = 14 },
/* MacBook Air 2,1: accelerometer, backlight and temperature set 15 */
{ .accelerometer = 1, .light = 1, .temperature_set = 15 },
+/* MacPro3,1: temperature set 16 */
+ { .accelerometer = 0, .light = 0, .temperature_set = 16 },
};
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
&applesmc_dmi_data[4]},
+ { applesmc_dmi_match, "Apple MacPro3", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacPro3") },
+ &applesmc_dmi_data[16]},
{ applesmc_dmi_match, "Apple MacPro", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
#define IEEE1394_SPEED_800 0x03
#define IEEE1394_SPEED_1600 0x04
#define IEEE1394_SPEED_3200 0x05
-
-/* The current highest tested speed supported by the subsystem */
-#define IEEE1394_SPEED_MAX IEEE1394_SPEED_800
+#define IEEE1394_SPEED_MAX IEEE1394_SPEED_3200
/* Maps speed values above to a string representation */
extern const char *hpsb_speedto_str[];
u8 cldcnt[nodecount];
u8 *map = host->speed_map;
u8 *speedcap = host->speed;
+ u8 local_link_speed = host->csr.lnk_spd;
struct selfid *sid;
struct ext_selfid *esid;
int i, j, n;
if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
speedcap[n] = sid->speed;
- if (speedcap[n] > host->csr.lnk_spd)
- speedcap[n] = host->csr.lnk_spd;
+ if (speedcap[n] > local_link_speed)
+ speedcap[n] = local_link_speed;
n--;
}
}
}
}
-#if SELFID_SPEED_UNKNOWN != IEEE1394_SPEED_MAX
- /* assume maximum speed for 1394b PHYs, nodemgr will correct it */
- for (n = 0; n < nodecount; n++)
- if (speedcap[n] == SELFID_SPEED_UNKNOWN)
- speedcap[n] = IEEE1394_SPEED_MAX;
-#endif
+ /* assume a maximum speed for 1394b PHYs, nodemgr will correct it */
+ if (local_link_speed > SELFID_SPEED_UNKNOWN)
+ for (i = 0; i < nodecount; i++)
+ if (speedcap[i] == SELFID_SPEED_UNKNOWN)
+ speedcap[i] = local_link_speed;
}
#define OHCI1394_DRIVER_NAME "ohci1394"
-#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
+#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
#define OHCI1394_MAX_SELF_ID_ERRORS 16
*/
static int sbp2_max_speed = IEEE1394_SPEED_MAX;
module_param_named(max_speed, sbp2_max_speed, int, 0644);
-MODULE_PARM_DESC(max_speed, "Force max speed "
- "(3 = 800Mb/s, 2 = 400Mb/s, 1 = 200Mb/s, 0 = 100Mb/s)");
+MODULE_PARM_DESC(max_speed, "Limit data transfer speed (5 <= 3200, "
+ "4 <= 1600, 3 <= 800, 2 <= 400, 1 <= 200, 0 = 100 Mb/s)");
/*
* Set serialize_io to 0 or N to use dynamically appended lists of command ORBs.
static int sbp2_max_speed_and_size(struct sbp2_lu *);
-static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
+static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xa, 0xa, 0xa };
static DEFINE_RWLOCK(sbp2_hi_logical_units_lock);
.sdev_attrs = sbp2_sysfs_sdev_attrs,
};
-/* for match-all entries in sbp2_workarounds_table */
-#define SBP2_ROM_VALUE_WILDCARD 0x1000000
+#define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */
+#define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */
/*
* List of devices with known bugs.
*/
static const struct {
u32 firmware_revision;
- u32 model_id;
+ u32 model;
unsigned workarounds;
} sbp2_workarounds_table[] = {
/* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
.firmware_revision = 0x002800,
- .model_id = 0x001010,
+ .model = 0x001010,
.workarounds = SBP2_WORKAROUND_INQUIRY_36 |
SBP2_WORKAROUND_MODE_SENSE_8 |
SBP2_WORKAROUND_POWER_CONDITION,
},
/* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
.firmware_revision = 0x002800,
- .model_id = 0x000000,
+ .model = 0x000000,
.workarounds = SBP2_WORKAROUND_DELAY_INQUIRY |
SBP2_WORKAROUND_POWER_CONDITION,
},
/* Initio bridges, actually only needed for some older ones */ {
.firmware_revision = 0x000200,
- .model_id = SBP2_ROM_VALUE_WILDCARD,
+ .model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_INQUIRY_36,
},
/* PL-3507 bridge with Prolific firmware */ {
.firmware_revision = 0x012800,
- .model_id = SBP2_ROM_VALUE_WILDCARD,
+ .model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_POWER_CONDITION,
},
/* Symbios bridge */ {
.firmware_revision = 0xa0b800,
- .model_id = SBP2_ROM_VALUE_WILDCARD,
+ .model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
},
/* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
.firmware_revision = 0x002600,
- .model_id = SBP2_ROM_VALUE_WILDCARD,
+ .model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
},
+ /*
+ * iPod 2nd generation: needs 128k max transfer size workaround
+ * iPod 3rd generation: needs fix capacity workaround
+ */
+ {
+ .firmware_revision = 0x0a2700,
+ .model = 0x000000,
+ .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS |
+ SBP2_WORKAROUND_FIX_CAPACITY,
+ },
/* iPod 4th generation */ {
.firmware_revision = 0x0a2700,
- .model_id = 0x000021,
+ .model = 0x000021,
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
},
/* iPod mini */ {
.firmware_revision = 0x0a2700,
- .model_id = 0x000022,
+ .model = 0x000022,
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
},
/* iPod mini */ {
.firmware_revision = 0x0a2700,
- .model_id = 0x000023,
+ .model = 0x000023,
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
},
/* iPod Photo */ {
.firmware_revision = 0x0a2700,
- .model_id = 0x00007e,
+ .model = 0x00007e,
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
}
};
struct csr1212_keyval *kv;
struct csr1212_dentry *dentry;
u64 management_agent_addr;
- u32 unit_characteristics, firmware_revision;
+ u32 unit_characteristics, firmware_revision, model;
unsigned workarounds;
int i;
management_agent_addr = 0;
unit_characteristics = 0;
- firmware_revision = 0;
+ firmware_revision = SBP2_ROM_VALUE_MISSING;
+ model = ud->flags & UNIT_DIRECTORY_MODEL_ID ?
+ ud->model_id : SBP2_ROM_VALUE_MISSING;
csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) {
switch (kv->key.id) {
sbp2_workarounds_table[i].firmware_revision !=
(firmware_revision & 0xffff00))
continue;
- if (sbp2_workarounds_table[i].model_id !=
+ if (sbp2_workarounds_table[i].model !=
SBP2_ROM_VALUE_WILDCARD &&
- sbp2_workarounds_table[i].model_id != ud->model_id)
+ sbp2_workarounds_table[i].model != model)
continue;
workarounds |= sbp2_workarounds_table[i].workarounds;
break;
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
workarounds, firmware_revision,
ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
- ud->model_id);
+ model);
/* We would need one SCSI host template for each target to adjust
* max_sectors on the fly, therefore warn only. */
break;
/* If the Guest asked to be stopped, we sleep. The Guest's
- * clock timer or LHCALL_BREAK from the Waker will wake us. */
+ * clock timer or LHREQ_BREAK from the Waker will wake us. */
if (cpu->halted) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
* kmalloc()ed string, either of which is ok to hand to kfree(). */
if (!IS_ERR(lg->dead))
kfree(lg->dead);
- /* We clear the entire structure, which also marks it as free for the
- * next user. */
- memset(lg, 0, sizeof(*lg));
+ /* Free the memory allocated to the lguest_struct */
+ kfree(lg);
/* Release lock and exit. */
mutex_unlock(&lguest_lock);
class_destroy(ilo_class);
}
-MODULE_VERSION("0.05");
+MODULE_VERSION("0.06");
MODULE_ALIAS(ILO_NAME);
MODULE_DESCRIPTION(ILO_NAME);
MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
return;
-
- DBUG_ON(ch->local_msgqueue == NULL);
- DBUG_ON(ch->remote_msgqueue == NULL);
}
if (!(ch->flags & XPC_C_OPENREPLY)) {
int n_IRQs_expected;
int n_IRQs_detected;
- DBUG_ON(xpc_activate_IRQ_rcvd == 0);
-
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
n_IRQs_expected = xpc_activate_IRQ_rcvd;
xpc_activate_IRQ_rcvd = 0;
msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue +
(get % ch->local_nentries) *
ch->entry_size);
+ DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
msg->flags = 0;
} while (++get < ch_sn2->remote_GP.get);
}
struct xpc_msg_sn2 *msg;
s64 put;
- put = ch_sn2->w_remote_GP.put;
+ /* flags are zeroed when the buffer is allocated */
+ if (ch_sn2->remote_GP.put < ch->remote_nentries)
+ return;
+
+ put = max(ch_sn2->w_remote_GP.put, ch->remote_nentries);
do {
msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
(put % ch->remote_nentries) *
ch->entry_size);
+ DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
+ DBUG_ON(!(msg->flags & XPC_M_SN2_DONE));
+ DBUG_ON(msg->number != put - ch->remote_nentries);
msg->flags = 0;
} while (++put < ch_sn2->remote_GP.put);
}
*/
xpc_clear_remote_msgqueue_flags_sn2(ch);
+ smp_wmb(); /* ensure flags have been cleared before bte_copy */
ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
break;
get = ch_sn2->w_local_GP.get;
- rmb(); /* guarantee that .get loads before .put */
+ smp_rmb(); /* guarantee that .get loads before .put */
if (get == ch_sn2->w_remote_GP.put)
break;
msg = xpc_pull_remote_msg_sn2(ch, get);
- DBUG_ON(msg != NULL && msg->number != get);
- DBUG_ON(msg != NULL && (msg->flags & XPC_M_SN2_DONE));
- DBUG_ON(msg != NULL && !(msg->flags & XPC_M_SN2_READY));
+ if (msg != NULL) {
+ DBUG_ON(msg->number != get);
+ DBUG_ON(msg->flags & XPC_M_SN2_DONE);
+ DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
- payload = &msg->payload;
+ payload = &msg->payload;
+ }
break;
}
while (1) {
put = ch_sn2->w_local_GP.put;
- rmb(); /* guarantee that .put loads before .get */
+ smp_rmb(); /* guarantee that .put loads before .get */
if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) {
/* There are available message entries. We need to try
* The preceding store of msg->flags must occur before the following
* load of local_GP->put.
*/
- mb();
+ smp_mb();
/* see if the message is next in line to be sent, if so send it */
dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
(void *)msg, msg_number, ch->partid, ch->number);
- DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->entry_size) !=
+ DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) !=
msg_number % ch->remote_nentries);
+ DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
DBUG_ON(msg->flags & XPC_M_SN2_DONE);
msg->flags |= XPC_M_SN2_DONE;
* The preceding store of msg->flags must occur before the following
* load of local_GP->get.
*/
- mb();
+ smp_mb();
/*
* See if this message is next in line to be acknowledged as having
atomic_inc(&ch->n_to_notify);
msg_slot->key = key;
- wmb(); /* a non-NULL func must hit memory after the key */
+ smp_wmb(); /* a non-NULL func must hit memory after the key */
msg_slot->func = func;
if (ch->flags & XPC_C_DISCONNECTING) {
This option switches the background thread off by default. The thread
may be also be enabled/disabled via UBI sysfs.
-config MTD_UBI_DEBUG_USERSPACE_IO
- bool "Direct user-space write/erase support"
- default n
- depends on MTD_UBI_DEBUG
- help
- By default, users cannot directly write and erase individual
- eraseblocks of dynamic volumes, and have to use update operation
- instead. This option enables this capability - it is very useful for
- debugging and testing.
-
config MTD_UBI_DEBUG_EMULATE_BITFLIPS
bool "Emulate flash bit-flips"
depends on MTD_UBI_DEBUG
return ret;
}
-/* Fake "release" method for UBI devices */
-static void dev_release(struct device *dev) { }
+static void dev_release(struct device *dev)
+{
+ struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
+
+ kfree(ubi);
+}
/**
* ubi_sysfs_init - initialize sysfs for an UBI device.
*/
static int uif_init(struct ubi_device *ubi)
{
- int i, err, do_free = 0;
+ int i, err;
dev_t dev;
sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
out_volumes:
kill_volumes(ubi);
- do_free = 0;
out_sysfs:
ubi_sysfs_close(ubi);
cdev_del(&ubi->cdev);
out_unreg:
- if (do_free)
- free_user_volumes(ubi);
unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
return err;
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
+ /*
+ * Get a reference to the device in order to prevent 'dev_release()'
+ * from freeing @ubi object.
+ */
+ get_device(&ubi->dev);
+
uif_close(ubi);
ubi_wl_close(ubi);
free_internal_volumes(ubi);
vfree(ubi->dbg_peb_buf);
#endif
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
- kfree(ubi);
+ put_device(&ubi->dev);
return 0;
}
#include <linux/ioctl.h>
#include <linux/capability.h>
#include <linux/uaccess.h>
-#include <linux/smp_lock.h>
+#include <linux/compat.h>
+#include <linux/math64.h>
#include <mtd/ubi-user.h>
-#include <asm/div64.h>
#include "ubi.h"
/**
int err, lnum, off, len, tbuf_size;
size_t count_save = count;
void *tbuf;
- uint64_t tmp;
dbg_gen("read %zd bytes from offset %lld of volume %d",
count, *offp, vol->vol_id);
return -ENOMEM;
len = count > tbuf_size ? tbuf_size : count;
-
- tmp = *offp;
- off = do_div(tmp, vol->usable_leb_size);
- lnum = tmp;
+ lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
do {
cond_resched();
return err ? err : count_save - count;
}
-#ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
-
/*
* This function allows to directly write to dynamic UBI volumes, without
- * issuing the volume update operation. Available only as a debugging feature.
- * Very useful for testing UBI.
+ * issuing the volume update operation.
*/
static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
int lnum, off, len, tbuf_size, err = 0;
size_t count_save = count;
char *tbuf;
- uint64_t tmp;
+
+ if (!vol->direct_writes)
+ return -EPERM;
dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
count, *offp, vol->vol_id);
if (vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
- tmp = *offp;
- off = do_div(tmp, vol->usable_leb_size);
- lnum = tmp;
-
+ lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
dbg_err("unaligned position");
return -EINVAL;
return err ? err : count_save - count;
}
-#else
-#define vol_cdev_direct_write(file, buf, count, offp) (-EPERM)
-#endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */
-
static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
{
return count;
}
-static int vol_cdev_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
int err = 0;
struct ubi_volume_desc *desc = file->private_data;
break;
}
-#ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
/* Logical eraseblock erasure command */
case UBI_IOCEBER:
{
err = ubi_wl_flush(ubi);
break;
}
-#endif
+
+ /* Logical eraseblock map command */
+ case UBI_IOCEBMAP:
+ {
+ struct ubi_map_req req;
+
+ err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_leb_map(desc, req.lnum, req.dtype);
+ break;
+ }
+
+ /* Logical eraseblock un-map command */
+ case UBI_IOCEBUNMAP:
+ {
+ int32_t lnum;
+
+ err = get_user(lnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_leb_unmap(desc, lnum);
+ break;
+ }
+
+ /* Check if logical eraseblock is mapped command */
+ case UBI_IOCEBISMAP:
+ {
+ int32_t lnum;
+
+ err = get_user(lnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_is_mapped(desc, lnum);
+ break;
+ }
+
+ /* Set volume property command*/
+ case UBI_IOCSETPROP:
+ {
+ struct ubi_set_prop_req req;
+
+ err = copy_from_user(&req, argp,
+ sizeof(struct ubi_set_prop_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ switch (req.property) {
+ case UBI_PROP_DIRECT_WRITE:
+ mutex_lock(&ubi->volumes_mutex);
+ desc->vol->direct_writes = !!req.value;
+ mutex_unlock(&ubi->volumes_mutex);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ break;
+ }
default:
err = -ENOTTY;
break;
}
-
return err;
}
return err;
}
-static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
int err = 0;
struct ubi_device *ubi;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- ubi = ubi_get_by_major(imajor(inode));
+ ubi = ubi_get_by_major(imajor(file->f_mapping->host));
if (!ubi)
return -ENODEV;
case UBI_IOCRSVOL:
{
int pebs;
- uint64_t tmp;
struct ubi_rsvol_req req;
dbg_gen("re-size volume");
break;
}
- tmp = req.bytes;
- pebs = !!do_div(tmp, desc->vol->usable_leb_size);
- pebs += tmp;
+ pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
+ desc->vol->usable_leb_size);
mutex_lock(&ubi->volumes_mutex);
err = ubi_resize_volume(desc, pebs);
return err;
}
-static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
return err;
}
-/* UBI control character device operations */
-struct file_operations ubi_ctrl_cdev_operations = {
- .ioctl = ctrl_cdev_ioctl,
- .owner = THIS_MODULE,
+#ifdef CONFIG_COMPAT
+static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned long translated_arg = (unsigned long)compat_ptr(arg);
+
+ return vol_cdev_ioctl(file, cmd, translated_arg);
+}
+
+static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned long translated_arg = (unsigned long)compat_ptr(arg);
+
+ return ubi_cdev_ioctl(file, cmd, translated_arg);
+}
+
+static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned long translated_arg = (unsigned long)compat_ptr(arg);
+
+ return ctrl_cdev_ioctl(file, cmd, translated_arg);
+}
+#else
+#define vol_cdev_compat_ioctl NULL
+#define ubi_cdev_compat_ioctl NULL
+#define ctrl_cdev_compat_ioctl NULL
+#endif
+
+/* UBI volume character device operations */
+const struct file_operations ubi_vol_cdev_operations = {
+ .owner = THIS_MODULE,
+ .open = vol_cdev_open,
+ .release = vol_cdev_release,
+ .llseek = vol_cdev_llseek,
+ .read = vol_cdev_read,
+ .write = vol_cdev_write,
+ .unlocked_ioctl = vol_cdev_ioctl,
+ .compat_ioctl = vol_cdev_compat_ioctl,
};
/* UBI character device operations */
-struct file_operations ubi_cdev_operations = {
- .owner = THIS_MODULE,
- .ioctl = ubi_cdev_ioctl,
- .llseek = no_llseek,
+const struct file_operations ubi_cdev_operations = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = ubi_cdev_ioctl,
+ .compat_ioctl = ubi_cdev_compat_ioctl,
};
-/* UBI volume character device operations */
-struct file_operations ubi_vol_cdev_operations = {
- .owner = THIS_MODULE,
- .open = vol_cdev_open,
- .release = vol_cdev_release,
- .llseek = vol_cdev_llseek,
- .read = vol_cdev_read,
- .write = vol_cdev_write,
- .ioctl = vol_cdev_ioctl,
+/* UBI control character device operations */
+const struct file_operations ubi_ctrl_cdev_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ctrl_cdev_ioctl,
+ .compat_ioctl = ctrl_cdev_compat_ioctl,
};
* eraseblock size is equivalent to the logical eraseblock size of the volume.
*/
-#include <asm/div64.h>
+#include <linux/math64.h>
#include "ubi.h"
/**
int err = 0, lnum, offs, total_read;
struct ubi_volume *vol;
struct ubi_device *ubi;
- uint64_t tmp = from;
dbg_gen("read %zd bytes from offset %lld", len, from);
vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
ubi = vol->ubi;
- offs = do_div(tmp, mtd->erasesize);
- lnum = tmp;
-
+ lnum = div_u64_rem(from, mtd->erasesize, &offs);
total_read = len;
while (total_read) {
size_t to_read = mtd->erasesize - offs;
int err = 0, lnum, offs, total_written;
struct ubi_volume *vol;
struct ubi_device *ubi;
- uint64_t tmp = to;
dbg_gen("write %zd bytes to offset %lld", len, to);
if (ubi->ro_mode)
return -EROFS;
- offs = do_div(tmp, mtd->erasesize);
- lnum = tmp;
+ lnum = div_u64_rem(to, mtd->erasesize, &offs);
if (len % mtd->writesize || offs % mtd->writesize)
return -EINVAL;
#include <linux/err.h>
#include <linux/crc32.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
#include "ubi.h"
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
dbg_msg("scanning is finished");
/* Calculate mean erase counter */
- if (si->ec_count) {
- do_div(si->ec_sum, si->ec_count);
- si->mean_ec = si->ec_sum;
- }
+ if (si->ec_count)
+ si->mean_ec = div_u64(si->ec_sum, si->ec_count);
if (si->is_empty)
ubi_msg("empty MTD device detected");
* @upd_marker: %1 if the update marker is set for this volume
* @updating: %1 if the volume is being updated
* @changing_leb: %1 if the atomic LEB change ioctl command is in progress
+ * @direct_writes: %1 if direct writes are enabled for this volume
*
* @gluebi_desc: gluebi UBI volume descriptor
* @gluebi_refcount: reference count of the gluebi MTD device
unsigned int upd_marker:1;
unsigned int updating:1;
unsigned int changing_leb:1;
+ unsigned int direct_writes:1;
#ifdef CONFIG_MTD_UBI_GLUEBI
/*
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
* @volumes_mutex: protects on-flash volume table and serializes volume
- * changes, like creation, deletion, update, re-size and re-name
+ * changes, like creation, deletion, update, re-size,
+ * re-name and set property
*
* @max_ec: current highest erase counter value
* @mean_ec: current mean erase counter value
};
extern struct kmem_cache *ubi_wl_entry_slab;
-extern struct file_operations ubi_ctrl_cdev_operations;
-extern struct file_operations ubi_cdev_operations;
-extern struct file_operations ubi_vol_cdev_operations;
+extern const struct file_operations ubi_ctrl_cdev_operations;
+extern const struct file_operations ubi_cdev_operations;
+extern const struct file_operations ubi_vol_cdev_operations;
extern struct class *ubi_class;
extern struct mutex ubi_devices_mutex;
#include <linux/err.h>
#include <linux/uaccess.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
#include "ubi.h"
/**
long long bytes)
{
int err;
- uint64_t tmp;
struct ubi_vtbl_record vtbl_rec;
dbg_gen("clear update marker for volume %d", vol->vol_id);
if (vol->vol_type == UBI_STATIC_VOLUME) {
vol->corrupted = 0;
- vol->used_bytes = tmp = bytes;
- vol->last_eb_bytes = do_div(tmp, vol->usable_leb_size);
- vol->used_ebs = tmp;
+ vol->used_bytes = bytes;
+ vol->used_ebs = div_u64_rem(bytes, vol->usable_leb_size,
+ &vol->last_eb_bytes);
if (vol->last_eb_bytes)
vol->used_ebs += 1;
else
long long bytes)
{
int i, err;
- uint64_t tmp;
dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes);
ubi_assert(!vol->updating && !vol->changing_leb);
if (!vol->upd_buf)
return -ENOMEM;
- tmp = bytes;
- vol->upd_ebs = !!do_div(tmp, vol->usable_leb_size);
- vol->upd_ebs += tmp;
+ vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
vol->upd_bytes = bytes;
vol->upd_received = 0;
return 0;
int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count)
{
- uint64_t tmp;
int lnum, offs, err = 0, len, to_write = count;
dbg_gen("write %d of %lld bytes, %lld already passed",
if (ubi->ro_mode)
return -EROFS;
- tmp = vol->upd_received;
- offs = do_div(tmp, vol->usable_leb_size);
- lnum = tmp;
-
+ lnum = div_u64_rem(vol->upd_received, vol->usable_leb_size, &offs);
if (vol->upd_received + count > vol->upd_bytes)
to_write = count = vol->upd_bytes - vol->upd_received;
*/
#include <linux/err.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
#include "ubi.h"
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
int i, err, vol_id = req->vol_id, do_free = 1;
struct ubi_volume *vol;
struct ubi_vtbl_record vtbl_rec;
- uint64_t bytes;
dev_t dev;
if (ubi->ro_mode)
/* Calculate how many eraseblocks are requested */
vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
- bytes = req->bytes;
- if (do_div(bytes, vol->usable_leb_size))
- vol->reserved_pebs = 1;
- vol->reserved_pebs += bytes;
+ vol->reserved_pebs += div_u64(req->bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
vol->used_bytes =
(long long)vol->used_ebs * vol->usable_leb_size;
} else {
- bytes = vol->used_bytes;
- vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size);
- vol->used_ebs = bytes;
- if (vol->last_eb_bytes)
+ vol->used_ebs = div_u64_rem(vol->used_bytes,
+ vol->usable_leb_size,
+ &vol->last_eb_bytes);
+ if (vol->last_eb_bytes != 0)
vol->used_ebs += 1;
else
vol->last_eb_bytes = vol->usable_leb_size;
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.20-k3-NAPI"
+#define DRV_VERSION "7.3.21-k3-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
struct e1000_hw *hw = &adapter->hw;
u32 rctl, icr = er32(ICR);
- if (unlikely(!icr))
+ if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
return IRQ_NONE; /* Not our interrupt */
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
if (NULL == new_bus)
return -ENOMEM;
+ device_init_wakeup(&ofdev->dev, 1);
+
new_bus->name = "Gianfar MII Bus",
new_bus->read = &gfar_mdio_read,
new_bus->write = &gfar_mdio_write,
#define MAX_CMD_DESCRIPTORS_HOST 1024
#define MAX_RCV_DESCRIPTORS_1G 2048
#define MAX_RCV_DESCRIPTORS_10G 4096
-#define MAX_JUMBO_RCV_DESCRIPTORS 512
+#define MAX_JUMBO_RCV_DESCRIPTORS 1024
#define MAX_LRO_RCV_DESCRIPTORS 8
#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS
}
for (i = 0; i < n; i++) {
if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
- netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0)
+ netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
return -EIO;
+ }
buf[i].addr = addr;
buf[i].data = val;
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- struct pci_dev *pdev = lp->pdev;
int limit = 2048;
u16 *adrp;
u16 cmd;
break;
case SKFP_CLR_STATS: /* Zero out the driver statistics */
if (!capable(CAP_NET_ADMIN)) {
- memset(&lp->MacStat, 0, sizeof(lp->MacStat));
- } else {
status = -EPERM;
+ } else {
+ memset(&lp->MacStat, 0, sizeof(lp->MacStat));
}
break;
default:
do {
udelay(1);
val = smsc911x_reg_read(pdata, RX_DP_CTRL);
- } while (timeout-- && (val & RX_DP_CTRL_RX_FFWD_));
+ } while (--timeout && (val & RX_DP_CTRL_RX_FFWD_));
if (unlikely(timeout == 0))
SMSC_WARNING(HW, "Timed out waiting for "
/* test the IRQ connection to the ISR */
smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
+ pd->software_irq_signal = false;
spin_lock_irqsave(&pd->int_lock, flags);
/* configure interrupt deassertion timer and enable interrupts */
smsc9420_pci_flush_write(pd);
timeout = 1000;
- pd->software_irq_signal = false;
- smp_wmb();
while (timeout--) {
if (pd->software_irq_signal)
break;
Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
for more information on this driver.
+
+ DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller
+ Hardware Reference Manual" is currently available at :
+ http://developer.intel.com/design/network/manuals/278074.htm
+
Please submit bugs to http://bugzilla.kernel.org/ .
*/
int csr12 = ioread32(ioaddr + CSR12);
int next_tick = 60*HZ;
int new_csr6 = 0;
+ int csr14 = ioread32(ioaddr + CSR14);
+ /* CSR12[LS10,LS100] are not reliable during autonegotiation */
+ if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
+ csr12 |= 6;
if (tulip_debug > 2)
printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
dev->name, csr12, medianame[dev->if_port]);
new_csr6 = 0x83860000;
dev->if_port = 3;
iowrite32(0, ioaddr + CSR13);
- iowrite32(0x0003FF7F, ioaddr + CSR14);
+ iowrite32(0x0003FFFF, ioaddr + CSR14);
iowrite16(8, ioaddr + CSR15);
iowrite32(1, ioaddr + CSR13);
}
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
int csr12 = ioread32(ioaddr + CSR12);
+ int csr14 = ioread32(ioaddr + CSR14);
+ /* CSR12[LS10,LS100] are not reliable during autonegotiation */
+ if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
+ csr12 |= 6;
if (tulip_debug > 1)
printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
- "%8.8x.\n", dev->name, csr12, csr5, ioread32(ioaddr + CSR14));
+ "%8.8x.\n", dev->name, csr12, csr5, csr14);
/* If NWay finished and we have a negotiated partner capability. */
if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
int negotiated = tp->sym_advertise & (csr12 >> 16);
tp->lpar = csr12 >> 16;
tp->nwayset = 1;
- if (negotiated & 0x0100) dev->if_port = 5;
+ /* If partner cannot negotiate, it is 10Mbps Half Duplex */
+ if (!(csr12 & 0x8000)) dev->if_port = 0;
+ else if (negotiated & 0x0100) dev->if_port = 5;
else if (negotiated & 0x0080) dev->if_port = 3;
else if (negotiated & 0x0040) dev->if_port = 4;
else if (negotiated & 0x0020) dev->if_port = 0;
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
} else if (dev->if_port == 5)
- iowrite32(ioread32(ioaddr + CSR14) & ~0x080, ioaddr + CSR14);
+ iowrite32(csr14 & ~0x080, ioaddr + CSR14);
} else if (dev->if_port == 0 || dev->if_port == 4) {
if ((csr12 & 4) == 0)
printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
static int init_phy(struct net_device *dev)
{
struct ucc_geth_private *priv = netdev_priv(dev);
+ struct device_node *np = priv->node;
+ struct device_node *phy, *mdio;
+ const phandle *ph;
+ char bus_name[MII_BUS_ID_SIZE];
+ const unsigned int *id;
struct phy_device *phydev;
char phy_id[BUS_ID_SIZE];
priv->oldspeed = 0;
priv->oldduplex = -1;
- snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->ug_info->mdio_bus,
- priv->ug_info->phy_address);
+ ph = of_get_property(np, "phy-handle", NULL);
+ phy = of_find_node_by_phandle(*ph);
+ mdio = of_get_parent(phy);
+
+ id = of_get_property(phy, "reg", NULL);
+
+ of_node_put(phy);
+ of_node_put(mdio);
+
+ uec_mdio_bus_name(bus_name, mdio);
+ snprintf(phy_id, sizeof(phy_id), "%s:%02x",
+ bus_name, *id);
phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
ugeth->ug_info = ug_info;
ugeth->dev = dev;
+ ugeth->node = np;
return 0;
}
int oldspeed;
int oldduplex;
int oldlink;
+
+ struct device_node *node;
};
void uec_set_ethtool_ops(struct net_device *netdev);
if (err)
goto reg_map_fail;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ uec_mdio_bus_name(new_bus->id, np);
new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL);
{
of_unregister_platform_driver(&uec_mdio_driver);
}
+
+void uec_mdio_bus_name(char *name, struct device_node *np)
+{
+ const u32 *reg;
+
+ reg = of_get_property(np, "reg", NULL);
+
+ snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0);
+}
+
int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
int __init uec_mdio_init(void);
void uec_mdio_exit(void);
+void uec_mdio_bus_name(char *name, struct device_node *np);
#endif /* __UEC_MII_H */
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
- sg_init_one(sg, hdr, sizeof(*hdr));
+ sg_set_buf(sg, hdr, sizeof(*hdr));
if (vi->big_packets) {
for (i = 0; i < MAX_SKB_FRAGS; i++) {
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_init_one(sg, mhdr, sizeof(*mhdr));
+ sg_set_buf(sg, mhdr, sizeof(*mhdr));
else
- sg_init_one(sg, hdr, sizeof(*hdr));
+ sg_set_buf(sg, hdr, sizeof(*hdr));
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
&fops_i2400m_reset);
}
-/*
- * Debug levels control; see debug.h
- */
-struct d_level D_LEVEL[] = {
- D_SUBMODULE_DEFINE(control),
- D_SUBMODULE_DEFINE(driver),
- D_SUBMODULE_DEFINE(debugfs),
- D_SUBMODULE_DEFINE(fw),
- D_SUBMODULE_DEFINE(netdev),
- D_SUBMODULE_DEFINE(rfkill),
- D_SUBMODULE_DEFINE(rx),
- D_SUBMODULE_DEFINE(tx),
-};
-size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
#define __debugfs_register(prefix, name, parent) \
do { \
EXPORT_SYMBOL_GPL(i2400m_release);
+/*
+ * Debug levels control; see debug.h
+ */
+struct d_level D_LEVEL[] = {
+ D_SUBMODULE_DEFINE(control),
+ D_SUBMODULE_DEFINE(driver),
+ D_SUBMODULE_DEFINE(debugfs),
+ D_SUBMODULE_DEFINE(fw),
+ D_SUBMODULE_DEFINE(netdev),
+ D_SUBMODULE_DEFINE(rfkill),
+ D_SUBMODULE_DEFINE(rx),
+ D_SUBMODULE_DEFINE(tx),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+
static
int __init i2400m_driver_init(void)
{
* it's done by reseting the chip. To accomplish this we must
* first cleanup any pending DMA, then restart stuff after a la
* ath5k_init.
+ *
+ * Called with sc->lock.
*/
static int
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
{
struct ath5k_softc *sc = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
+ int ret;
+
+ mutex_lock(&sc->lock);
sc->bintval = conf->beacon_int;
sc->power_level = conf->power_level;
- return ath5k_chan_set(sc, conf->channel);
+ ret = ath5k_chan_set(sc, conf->channel);
+
+ mutex_unlock(&sc->lock);
+ return ret;
}
static int
priv->ucode_data_backup.len = data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+ if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
+ !priv->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
/* Initialization instructions and data */
if (init_size && init_data_size) {
priv->ucode_init.len = init_size;
ofdm_power = priv->channels[channel - 1].hw_value >> 4;
cck_power = min(cck_power, (u8)11);
- ofdm_power = min(ofdm_power, (u8)35);
+ if (ofdm_power > (u8)15)
+ ofdm_power = 25;
+ else
+ ofdm_power += 10;
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1);
cck_power += priv->txpwr_base & 0xF;
cck_power = min(cck_power, (u8)35);
- ofdm_power = min(ofdm_power, (u8)15);
+ if (ofdm_power > (u8)15)
+ ofdm_power = 25;
+ else
+ ofdm_power += 10;
ofdm_power += priv->txpwr_base >> 4;
ofdm_power = min(ofdm_power, (u8)35);
* @dev: instance of PCI owned by the driver that's asking
* @mask: number of address bits this PCI device can handle
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
static int sba_dma_supported( struct device *dev, u64 mask)
{
return(0);
}
- /* Documentation/DMA-mapping.txt tells drivers to try 64-bit first,
- * then fall back to 32-bit if that fails.
+ /* Documentation/PCI/PCI-DMA-mapping.txt tells drivers to try 64-bit
+ * first, then fall back to 32-bit if that fails.
* We are just "encouraging" 32-bit DMA masks here since we can
* never allow IOMMU bypass unless we add special support for ZX1.
*/
* @size: number of bytes to map in driver buffer.
* @direction: R/W or both.
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
static dma_addr_t
sba_map_single(struct device *dev, void *addr, size_t size,
* @size: number of bytes mapped in driver buffer.
* @direction: R/W or both.
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
static void
sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
static void *sba_alloc_consistent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
* @vaddr: virtual address IOVA of "consistent" buffer.
* @dma_handler: IO virtual address of "consistent" buffer.
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
static void
sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
* @nents: number of entries in list
* @direction: R/W or both.
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
static int
sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
* @nents: number of entries in list
* @direction: R/W or both.
*
- * See Documentation/DMA-mapping.txt
+ * See Documentation/PCI/PCI-DMA-mapping.txt
*/
static void
sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
case KE_SW:
set_bit(EV_SW, hp_wmi_input_dev->evbit);
set_bit(key->keycode, hp_wmi_input_dev->swbit);
+
+ /* Set initial dock state */
+ input_report_switch(hp_wmi_input_dev, key->keycode,
+ hp_wmi_dock_state());
+ input_sync(hp_wmi_input_dev);
break;
}
}
bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set;
bluetooth_rfkill->user_claim_unsupported = 1;
err = rfkill_register(bluetooth_rfkill);
+ if (err)
goto register_bluetooth_error;
}
channel->ch_bd->bd_ops->disable_receiver(channel);
}
+static void jsm_tty_enable_ms(struct uart_port *port)
+{
+ /* Nothing needed */
+}
+
static void jsm_tty_break(struct uart_port *port, int break_state)
{
unsigned long lock_flags;
.start_tx = jsm_tty_start_tx,
.send_xchar = jsm_tty_send_xchar,
.stop_rx = jsm_tty_stop_rx,
+ .enable_ms = jsm_tty_enable_ms,
.break_ctl = jsm_tty_break,
.startup = jsm_tty_open,
.shutdown = jsm_tty_close,
#ifndef AGNX_H_
#define AGNX_H_
+#include <linux/io.h>
+
#include "xmit.h"
#define PFX KBUILD_MODNAME ": "
goto fail;
/* allocate and map coherently-cached memory for a DMA-able buffer */
- /* @see 2.6.26.2/Documentation/DMA-mapping.txt line 318 */
+ /* @see Documentation/PCI/PCI-DMA-mapping.txt, near line 318 */
buffer_virt = (u8 *)pci_alloc_consistent(dev, PAGE_SIZE * 4, &buffer_bus);
if (!buffer_virt) {
printk(KERN_DEBUG "Could not allocate coherent DMA buffer.\n");
#if 1 // @todo For now, disable 64-bit, because I do not understand the implications (DAC!)
/* query for DMA transfer */
- /* @see Documentation/DMA-mapping.txt */
+ /* @see Documentation/PCI/PCI-DMA-mapping.txt */
if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)) {
pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK);
/* use 64-bit DMA */
int fd, error;
struct fdtable *fdt;
unsigned long rlim_cur;
+ unsigned long irqs;
if (files == NULL)
return -ESRCH;
* N.B. For clone tasks sharing a files structure, this test
* will limit the total number of files that can be opened.
*/
- rcu_read_lock();
- if (tsk->signal)
+ rlim_cur = 0;
+ if (lock_task_sighand(tsk, &irqs)) {
rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
- else
- rlim_cur = 0;
- rcu_read_unlock();
+ unlock_task_sighand(tsk, &irqs);
+ }
if (fd >= rlim_cur)
goto out;
{
struct binder_proc *proc = vma->vm_private_data;
if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
- printk(KERN_INFO "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot);
+ printk(KERN_INFO "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, pgprot_val(vma->vm_page_prot));
dump_stack();
}
static void binder_vma_close(struct vm_area_struct *vma)
{
struct binder_proc *proc = vma->vm_private_data;
if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
- printk(KERN_INFO "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot);
+ printk(KERN_INFO "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, pgprot_val(vma->vm_page_prot));
proc->vma = NULL;
}
vma->vm_end = vma->vm_start + SZ_4M;
if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
- printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot);
+ printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, pgprot_val(vma->vm_page_prot));
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
ret = -EPERM;
--- /dev/null
+The lowmemorykiller driver lets user-space specify a set of memory thresholds
+where processes with a range of oom_adj values will get killed. Specify the
+minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
+number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
+files take a comma separated list of numbers in ascending order.
+
+For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
+"1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes
+with a oom_adj value of 8 or higher when the free memory drops below 4096 pages
+and kill processes with a oom_adj value of 0 or higher when the free memory
+drops below 1024 pages.
+
+The driver considers memory used for caches to be free, but if a large
+percentage of the cached memory is locked this can be very inaccurate
+and processes may not get killed until the normal oom killer is triggered.
+
#include <linux/platform_device.h>
#include <linux/hrtimer.h>
#include <linux/err.h>
-#include <asm/arch/gpio.h>
+#include <linux/gpio.h>
#include "timed_gpio.h"
if (hrtimer_active(&gpio_data->timer)) {
ktime_t r = hrtimer_get_remaining(&gpio_data->timer);
- remaining = r.tv.sec * 1000 + r.tv.nsec / 1000000;
+ struct timeval t = ktime_to_timeval(r);
+ remaining = t.tv_sec * 1000 + t.tv_usec;
} else
remaining = 0;
config COMEDI
tristate "Data Acquision support (comedi)"
default N
+ depends on m
---help---
Enable support a wide range of data acquision devices
for Linux.
menuconfig MEILHAUS
tristate "Meilhaus support"
+ depends on m
---help---
If you have a Meilhaus card, say Y (or M) here.
config ME0600
tristate "Meilhaus ME-600 support"
default n
- depends on PCI
+ depends on PCI && m
help
This driver supports the Meilhaus ME-600 family of boards
that do data collection and multipurpose I/O.
config ME0900
tristate "Meilhaus ME-900 support"
default n
- depends on PCI
+ depends on PCI && m
help
This driver supports the Meilhaus ME-900 family of boards
that do data collection and multipurpose I/O.
config ME1000
tristate "Meilhaus ME-1000 support"
default n
- depends on PCI
+ depends on PCI && m
help
This driver supports the Meilhaus ME-1000 family of boards
that do data collection and multipurpose I/O.
config ME1400
tristate "Meilhaus ME-1400 support"
default n
- depends on PCI
+ depends on PCI && m
help
This driver supports the Meilhaus ME-1400 family of boards
that do data collection and multipurpose I/O.
config ME1600
tristate "Meilhaus ME-1600 support"
default n
- depends on PCI
+ depends on PCI && m
help
This driver supports the Meilhaus ME-1600 family of boards
that do data collection and multipurpose I/O.
config ME4600
tristate "Meilhaus ME-4600 support"
default n
- depends on PCI
+ depends on PCI && m
help
This driver supports the Meilhaus ME-4600 family of boards
that do data collection and multipurpose I/O.
config ME6000
tristate "Meilhaus ME-6000 support"
default n
- depends on PCI
+ depends on PCI && m
help
This driver supports the Meilhaus ME-6000 family of boards
that do data collection and multipurpose I/O.
config ME8100
tristate "Meilhaus ME-8100 support"
default n
- depends on PCI
+ depends on PCI && m
help
This driver supports the Meilhaus ME-8100 family of boards
that do data collection and multipurpose I/O.
config ME8200
tristate "Meilhaus ME-8200 support"
default n
- depends on PCI
+ depends on PCI && m
help
This driver supports the Meilhaus ME-8200 family of boards
that do data collection and multipurpose I/O.
config MEDUMMY
tristate "Meilhaus dummy driver"
default n
- depends on PCI
+ depends on PCI && m
help
This provides a dummy driver for the Meilhaus driver package
}
break;
case POCH_IOC_GET_COUNTERS:
- if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
+ if (!access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
return -EFAULT;
spin_lock_irq(&channel->counters_lock);
/*
* threads are invoked per one device (per one connection).
*/
- kernel_thread(usbip_thread, (void *)&ud->tcp_rx, 0);
- kernel_thread(usbip_thread, (void *)&ud->tcp_tx, 0);
+ int retval;
+
+ retval = kernel_thread(usbip_thread, (void *)&ud->tcp_rx, 0);
+ if (retval < 0) {
+ printk(KERN_ERR "Creating tcp_rx thread for ud %p failed.\n",
+ ud);
+ return;
+ }
+ retval = kernel_thread(usbip_thread, (void *)&ud->tcp_tx, 0);
+ if (retval < 0) {
+ printk(KERN_ERR "Creating tcp_tx thread for ud %p failed.\n",
+ ud);
+ return;
+ }
/* confirm threads are starting */
wait_for_completion(&ud->tcp_rx.thread_done);
You need an utility program called fbset to make full use of frame
buffer devices. Please read <file:Documentation/fb/framebuffer.txt>
and the Framebuffer-HOWTO at
- <http://www.tahallah.demon.co.uk/programming/prog.html> for more
+ <http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.2.html> for more
information.
Say Y here and to the driver for your graphics board below if you
iv = bip_vec_idx(bip, bip->bip_vcnt);
BUG_ON(iv == NULL);
- BUG_ON(iv->bv_page != NULL);
iv->bv_page = page;
iv->bv_len = len;
if (ret) {
kunmap_atomic(kaddr, KM_USER0);
- break;
+ return ret;
}
sectors = bv->bv_len / bi->sector_size;
struct bio_integrity_payload *bip =
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
- int error = bip->bip_error;
+ int error;
- if (bio_integrity_verify(bio)) {
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- error = -EIO;
- }
+ error = bio_integrity_verify(bio);
/* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io;
-
- if (bio->bi_end_io)
- bio->bi_end_io(bio, error);
+ bio_endio(bio, error);
}
/**
BUG_ON(bip->bip_bio != bio);
- bip->bip_error = error;
+ /* In case of an I/O error there is no point in verifying the
+ * integrity metadata. Restore original bio end_io handler
+ * and run it.
+ */
+ if (error) {
+ bio->bi_end_io = bip->bip_end_io;
+ bio_endio(bio, error);
+
+ return;
+ }
+
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bip->bip_work);
}
support posix byte range locks. Fix query of root inode when prefixpath
specified and user does not have access to query information about the
top of the share. Fix problem in 2.6.28 resolving DFS paths to
-Samba servers (worked to Windows).
+Samba servers (worked to Windows). Fix rmdir so that pending search
+(readdir) requests do not get invalid results which include the now
+removed directory.
Version 1.55
------------
if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
return -EINVAL;
- MD5Init(&context);
- MD5Update(&context, (char *)&key->data, key->len);
- MD5Update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
+ cifs_MD5_init(&context);
+ cifs_MD5_update(&context, (char *)&key->data, key->len);
+ cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
- MD5Final(signature, &context);
+ cifs_MD5_final(signature, &context);
return 0;
}
if ((iov == NULL) || (signature == NULL) || (key == NULL))
return -EINVAL;
- MD5Init(&context);
- MD5Update(&context, (char *)&key->data, key->len);
+ cifs_MD5_init(&context);
+ cifs_MD5_update(&context, (char *)&key->data, key->len);
for (i = 0; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (i == 0) {
if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
- MD5Update(&context, iov[0].iov_base+4,
+ cifs_MD5_update(&context, iov[0].iov_base+4,
iov[0].iov_len-4);
} else
- MD5Update(&context, iov[i].iov_base, iov[i].iov_len);
+ cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len);
}
- MD5Final(signature, &context);
+ cifs_MD5_final(signature, &context);
return 0;
}
extern void cifs_buf_release(void *);
extern struct smb_hdr *cifs_small_buf_get(void);
extern void cifs_small_buf_release(void *);
-extern int smb_send(struct socket *, struct smb_hdr *,
- unsigned int /* length */ , struct sockaddr *, bool);
+extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
+ unsigned int /* length */);
extern unsigned int _GetXid(void);
extern void _FreeXid(unsigned int);
#define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__func__, xid,current_fsuid()));
}
static struct TCP_Server_Info *
-cifs_find_tcp_session(struct sockaddr *addr)
+cifs_find_tcp_session(struct sockaddr_storage *addr)
{
struct list_head *tmp;
struct TCP_Server_Info *server;
if (server->tcpStatus == CifsNew)
continue;
- if (addr->sa_family == AF_INET &&
+ if (addr->ss_family == AF_INET &&
(addr4->sin_addr.s_addr !=
server->addr.sockAddr.sin_addr.s_addr))
continue;
- else if (addr->sa_family == AF_INET6 &&
+ else if (addr->ss_family == AF_INET6 &&
memcmp(&server->addr.sockAddr6.sin6_addr,
&addr6->sin6_addr, sizeof(addr6->sin6_addr)))
continue;
cifs_get_tcp_session(struct smb_vol *volume_info)
{
struct TCP_Server_Info *tcp_ses = NULL;
- struct sockaddr addr;
+ struct sockaddr_storage addr;
struct sockaddr_in *sin_server = (struct sockaddr_in *) &addr;
struct sockaddr_in6 *sin_server6 = (struct sockaddr_in6 *) &addr;
int rc;
- memset(&addr, 0, sizeof(struct sockaddr));
+ memset(&addr, 0, sizeof(struct sockaddr_storage));
if (volume_info->UNCip && volume_info->UNC) {
rc = cifs_inet_pton(AF_INET, volume_info->UNCip,
rc = cifs_inet_pton(AF_INET6, volume_info->UNCip,
&sin_server6->sin6_addr.in6_u);
if (rc > 0)
- addr.sa_family = AF_INET6;
+ addr.ss_family = AF_INET6;
} else {
- addr.sa_family = AF_INET;
+ addr.ss_family = AF_INET;
}
if (rc <= 0) {
tcp_ses->tcpStatus = CifsNew;
++tcp_ses->srv_count;
- if (addr.sa_family == AF_INET6) {
+ if (addr.ss_family == AF_INET6) {
cFYI(1, ("attempting ipv6 connect"));
/* BB should we allow ipv6 on port 139? */
/* other OS never observed in Wild doing 139 with v6 */
* user space buffer
*/
socket->sk->sk_rcvtimeo = 7 * HZ;
- socket->sk->sk_sndtimeo = 3 * HZ;
+ socket->sk->sk_sndtimeo = 5 * HZ;
/* make the bufsizes depend on wsize/rsize and max requests */
if (server->noautotune) {
smb_buf = (struct smb_hdr *)ses_init_buf;
/* sizeof RFC1002_SESSION_REQUEST with no scope */
smb_buf->smb_buf_length = 0x81000044;
- rc = smb_send(socket, smb_buf, 0x44,
- (struct sockaddr *) &server->addr.sockAddr,
- server->noblocksnd);
+ rc = smb_send(server, smb_buf, 0x44);
kfree(ses_init_buf);
msleep(1); /* RFC1001 layer in at least one server
requires very short break before negprot
* user space buffer
*/
socket->sk->sk_rcvtimeo = 7 * HZ;
- socket->sk->sk_sndtimeo = 3 * HZ;
+ socket->sk->sk_sndtimeo = 5 * HZ;
server->ssocket = socket;
return rc;
return full_path;
}
+static void setup_cifs_dentry(struct cifsTconInfo *tcon,
+ struct dentry *direntry,
+ struct inode *newinode)
+{
+ if (tcon->nocase)
+ direntry->d_op = &cifs_ci_dentry_ops;
+ else
+ direntry->d_op = &cifs_dentry_ops;
+ d_instantiate(direntry, newinode);
+}
+
/* Inode operations in similar order to how they appear in Linux file fs.h */
int
int xid;
int create_options = CREATE_NOT_DIR;
int oplock = 0;
+ /* BB below access is too much for the mknod to request */
int desiredAccess = GENERIC_READ | GENERIC_WRITE;
__u16 fileHandle;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifsTconInfo *tcon;
char *full_path = NULL;
FILE_ALL_INFO *buf = NULL;
struct inode *newinode = NULL;
- struct cifsFileInfo *pCifsFile = NULL;
struct cifsInodeInfo *pCifsInode;
int disposition = FILE_OVERWRITE_IF;
bool write_only = false;
xid = GetXid();
cifs_sb = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb->tcon;
+ tcon = cifs_sb->tcon;
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
return -ENOMEM;
}
+ mode &= ~current->fs->umask;
+
if (nd && (nd->flags & LOOKUP_OPEN)) {
int oflags = nd->intent.open.flags;
return -ENOMEM;
}
- mode &= ~current->fs->umask;
-
/*
* if we're not using unix extensions, see if we need to set
* ATTR_READONLY on the create call
*/
- if (!pTcon->unix_ext && (mode & S_IWUGO) == 0)
+ if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
create_options |= CREATE_OPTION_READONLY;
if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
- rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
+ rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
desiredAccess, create_options,
&fileHandle, &oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == -EIO) {
/* old server, retry the open legacy style */
- rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
+ rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
desiredAccess, create_options,
&fileHandle, &oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
} else {
/* If Open reported that we actually created a file
then we now have to set the mode if possible */
- if ((pTcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) {
+ if ((tcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) {
struct cifs_unix_set_info_args args = {
.mode = mode,
.ctime = NO_CHANGE_64,
args.uid = NO_CHANGE_64;
args.gid = NO_CHANGE_64;
}
- CIFSSMBUnixSetInfo(xid, pTcon, full_path, &args,
+ CIFSSMBUnixSetInfo(xid, tcon, full_path, &args,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
} else {
/* BB implement mode setting via Windows security
descriptors e.g. */
- /* CIFSSMBWinSetPerms(xid,pTcon,path,mode,-1,-1,nls);*/
+ /* CIFSSMBWinSetPerms(xid,tcon,path,mode,-1,-1,nls);*/
/* Could set r/o dos attribute if mode & 0222 == 0 */
}
/* server might mask mode so we have to query for it */
- if (pTcon->unix_ext)
+ if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
else {
}
if (rc != 0) {
- cFYI(1,
- ("Create worked but get_inode_info failed rc = %d",
- rc));
- } else {
- if (pTcon->nocase)
- direntry->d_op = &cifs_ci_dentry_ops;
- else
- direntry->d_op = &cifs_dentry_ops;
- d_instantiate(direntry, newinode);
- }
+ cFYI(1, ("Create worked, get_inode_info failed rc = %d",
+ rc));
+ } else
+ setup_cifs_dentry(tcon, direntry, newinode);
+
if ((nd == NULL /* nfsd case - nfs srv does not set nd */) ||
(!(nd->flags & LOOKUP_OPEN))) {
/* mknod case - do not leave file open */
- CIFSSMBClose(xid, pTcon, fileHandle);
+ CIFSSMBClose(xid, tcon, fileHandle);
} else if (newinode) {
- pCifsFile =
+ struct cifsFileInfo *pCifsFile =
kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
if (pCifsFile == NULL)
/* set the following in open now
pCifsFile->pfile = file; */
write_lock(&GlobalSMBSeslock);
- list_add(&pCifsFile->tlist, &pTcon->openFileList);
+ list_add(&pCifsFile->tlist, &tcon->openFileList);
pCifsInode = CIFS_I(newinode);
if (pCifsInode) {
/* if readable file instance put first in list*/
cifsInode = CIFS_I(direntry->d_inode);
cifsInode->time = 0; /* force revalidate to go get info when
needed */
+
+ cifsInode = CIFS_I(inode);
+ cifsInode->time = 0; /* force revalidate to get parent dir info
+ since cached search results now invalid */
+
direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
* with every copy.
*
* To compute the message digest of a chunk of bytes, declare an
- * MD5Context structure, pass it to MD5Init, call MD5Update as
- * needed on buffers full of bytes, and then call MD5Final, which
+ * MD5Context structure, pass it to cifs_MD5_init, call cifs_MD5_update as
+ * needed on buffers full of bytes, and then call cifs_MD5_final, which
* will fill a supplied 16-byte array with the digest.
*/
* initialization constants.
*/
void
-MD5Init(struct MD5Context *ctx)
+cifs_MD5_init(struct MD5Context *ctx)
{
ctx->buf[0] = 0x67452301;
ctx->buf[1] = 0xefcdab89;
* of bytes.
*/
void
-MD5Update(struct MD5Context *ctx, unsigned char const *buf, unsigned len)
+cifs_MD5_update(struct MD5Context *ctx, unsigned char const *buf, unsigned len)
{
register __u32 t;
* 1 0* (64-bit count of bits processed, MSB-first)
*/
void
-MD5Final(unsigned char digest[16], struct MD5Context *ctx)
+cifs_MD5_final(unsigned char digest[16], struct MD5Context *ctx)
{
unsigned int count;
unsigned char *p;
/*
* The core of the MD5 algorithm, this alters an existing MD5 hash to
- * reflect the addition of 16 longwords of new data. MD5Update blocks
+ * reflect the addition of 16 longwords of new data. cifs_MD5_update blocks
* the data and converts bytes into longwords for this routine.
*/
static void
unsigned char tk[16];
struct MD5Context tctx;
- MD5Init(&tctx);
- MD5Update(&tctx, key, key_len);
- MD5Final(tk, &tctx);
+ cifs_MD5_init(&tctx);
+ cifs_MD5_update(&tctx, key, key_len);
+ cifs_MD5_final(tk, &tctx);
key = tk;
key_len = 16;
ctx->k_opad[i] ^= 0x5c;
}
- MD5Init(&ctx->ctx);
- MD5Update(&ctx->ctx, ctx->k_ipad, 64);
+ cifs_MD5_init(&ctx->ctx);
+ cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64);
}
#endif
ctx->k_opad[i] ^= 0x5c;
}
- MD5Init(&ctx->ctx);
- MD5Update(&ctx->ctx, ctx->k_ipad, 64);
+ cifs_MD5_init(&ctx->ctx);
+ cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64);
}
/***********************************************************************
hmac_md5_update(const unsigned char *text, int text_len,
struct HMACMD5Context *ctx)
{
- MD5Update(&ctx->ctx, text, text_len); /* then text of datagram */
+ cifs_MD5_update(&ctx->ctx, text, text_len); /* then text of datagram */
}
/***********************************************************************
{
struct MD5Context ctx_o;
- MD5Final(digest, &ctx->ctx);
+ cifs_MD5_final(digest, &ctx->ctx);
- MD5Init(&ctx_o);
- MD5Update(&ctx_o, ctx->k_opad, 64);
- MD5Update(&ctx_o, digest, 16);
- MD5Final(digest, &ctx_o);
+ cifs_MD5_init(&ctx_o);
+ cifs_MD5_update(&ctx_o, ctx->k_opad, 64);
+ cifs_MD5_update(&ctx_o, digest, 16);
+ cifs_MD5_final(digest, &ctx_o);
}
/***********************************************************
};
#endif /* _HMAC_MD5_H */
-void MD5Init(struct MD5Context *context);
-void MD5Update(struct MD5Context *context, unsigned char const *buf,
+void cifs_MD5_init(struct MD5Context *context);
+void cifs_MD5_update(struct MD5Context *context, unsigned char const *buf,
unsigned len);
-void MD5Final(unsigned char digest[16], struct MD5Context *context);
+void cifs_MD5_final(unsigned char digest[16], struct MD5Context *context);
/* The following definitions come from lib/hmacmd5.c */
spin_unlock(&GlobalMid_Lock);
}
-int
-smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
- unsigned int smb_buf_length, struct sockaddr *sin, bool noblocksnd)
-{
- int rc = 0;
- int i = 0;
- struct msghdr smb_msg;
- struct kvec iov;
- unsigned len = smb_buf_length + 4;
-
- if (ssocket == NULL)
- return -ENOTSOCK; /* BB eventually add reconnect code here */
- iov.iov_base = smb_buffer;
- iov.iov_len = len;
-
- smb_msg.msg_name = sin;
- smb_msg.msg_namelen = sizeof(struct sockaddr);
- smb_msg.msg_control = NULL;
- smb_msg.msg_controllen = 0;
- if (noblocksnd)
- smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
- else
- smb_msg.msg_flags = MSG_NOSIGNAL;
-
- /* smb header is converted in header_assemble. bcc and rest of SMB word
- area, and byte area if necessary, is converted to littleendian in
- cifssmb.c and RFC1001 len is converted to bigendian in smb_send
- Flags2 is converted in SendReceive */
-
- smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length);
- cFYI(1, ("Sending smb of length %d", smb_buf_length));
- dump_smb(smb_buffer, len);
-
- while (len > 0) {
- rc = kernel_sendmsg(ssocket, &smb_msg, &iov, 1, len);
- if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
- i++;
- /* smaller timeout here than send2 since smaller size */
- /* Although it may not be required, this also is smaller
- oplock break time */
- if (i > 12) {
- cERROR(1,
- ("sends on sock %p stuck for 7 seconds",
- ssocket));
- rc = -EAGAIN;
- break;
- }
- msleep(1 << i);
- continue;
- }
- if (rc < 0)
- break;
- else
- i = 0; /* reset i after each successful send */
- iov.iov_base += rc;
- iov.iov_len -= rc;
- len -= rc;
- }
-
- if (rc < 0) {
- cERROR(1, ("Error %d sending data on socket to server", rc));
- } else {
- rc = 0;
- }
-
- /* Don't want to modify the buffer as a
- side effect of this call. */
- smb_buffer->smb_buf_length = smb_buf_length;
-
- return rc;
-}
-
static int
-smb_send2(struct TCP_Server_Info *server, struct kvec *iov, int n_vec,
- struct sockaddr *sin, bool noblocksnd)
+smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
{
int rc = 0;
int i = 0;
if (ssocket == NULL)
return -ENOTSOCK; /* BB eventually add reconnect code here */
- smb_msg.msg_name = sin;
+ smb_msg.msg_name = (struct sockaddr *) &server->addr.sockAddr;
smb_msg.msg_namelen = sizeof(struct sockaddr);
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
- if (noblocksnd)
+ if (server->noblocksnd)
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
else
smb_msg.msg_flags = MSG_NOSIGNAL;
n_vec - first_vec, total_len);
if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
i++;
- if (i >= 14) {
+ /* if blocking send we try 3 times, since each can block
+ for 5 seconds. For nonblocking we have to try more
+ but wait increasing amounts of time allowing time for
+ socket to clear. The overall time we wait in either
+ case to send on the socket is about 15 seconds.
+ Similarly we wait for 15 seconds for
+ a response from the server in SendReceive[2]
+ for the server to send a response back for
+ most types of requests (except SMB Write
+ past end of file which can be slow, and
+ blocking lock operations). NFS waits slightly longer
+ than CIFS, but this can make it take longer for
+ nonresponsive servers to be detected and 15 seconds
+ is more than enough time for modern networks to
+ send a packet. In most cases if we fail to send
+ after the retries we will kill the socket and
+ reconnect which may clear the network problem.
+ */
+ if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
cERROR(1,
("sends on sock %p stuck for 15 seconds",
ssocket));
return rc;
}
+int
+smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
+ unsigned int smb_buf_length)
+{
+ struct kvec iov;
+
+ iov.iov_base = smb_buffer;
+ iov.iov_len = smb_buf_length + 4;
+
+ return smb_sendv(server, &iov, 1);
+}
+
static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
{
if (long_op == CIFS_ASYNC_OP) {
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&ses->server->inSend);
#endif
- rc = smb_send2(ses->server, iov, n_vec,
- (struct sockaddr *) &(ses->server->addr.sockAddr),
- ses->server->noblocksnd);
+ rc = smb_sendv(ses->server, iov, n_vec);
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&ses->server->inSend);
midQ->when_sent = jiffies;
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&ses->server->inSend);
#endif
- rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
- (struct sockaddr *) &(ses->server->addr.sockAddr),
- ses->server->noblocksnd);
+ rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&ses->server->inSend);
midQ->when_sent = jiffies;
mutex_unlock(&ses->server->srv_mutex);
return rc;
}
- rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
- (struct sockaddr *) &(ses->server->addr.sockAddr),
- ses->server->noblocksnd);
+ rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
mutex_unlock(&ses->server->srv_mutex);
return rc;
}
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&ses->server->inSend);
#endif
- rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
- (struct sockaddr *) &(ses->server->addr.sockAddr),
- ses->server->noblocksnd);
+ rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&ses->server->inSend);
midQ->when_sent = jiffies;
* cannot be fixed without breaking all existing apps.
*/
case TUNSETIFF:
+ case TUNGETIFF:
case SIOCGIFFLAGS:
case SIOCGIFMETRIC:
case SIOCGIFMTU:
COMPATIBLE_IOCTL(TUNSETDEBUG)
COMPATIBLE_IOCTL(TUNSETPERSIST)
COMPATIBLE_IOCTL(TUNSETOWNER)
+COMPATIBLE_IOCTL(TUNSETLINK)
+COMPATIBLE_IOCTL(TUNSETGROUP)
+COMPATIBLE_IOCTL(TUNGETFEATURES)
+COMPATIBLE_IOCTL(TUNSETOFFLOAD)
+COMPATIBLE_IOCTL(TUNSETTXFILTER)
/* Big V */
COMPATIBLE_IOCTL(VT_SETMODE)
COMPATIBLE_IOCTL(VT_GETMODE)
HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
HANDLE_IOCTL(TUNSETIFF, dev_ifsioc)
+HANDLE_IOCTL(TUNGETIFF, dev_ifsioc)
HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl)
HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl)
HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl)
/*
* Configuration options available inside /proc/sys/fs/epoll/
*/
-/* Maximum number of epoll devices, per user */
-static int max_user_instances __read_mostly;
/* Maximum number of epoll watched descriptors, per user */
static int max_user_watches __read_mostly;
static int zero;
ctl_table epoll_table[] = {
- {
- .procname = "max_user_instances",
- .data = &max_user_instances,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .extra1 = &zero,
- },
{
.procname = "max_user_watches",
.data = &max_user_watches,
mutex_unlock(&epmutex);
mutex_destroy(&ep->mtx);
- atomic_dec(&ep->user->epoll_devs);
free_uid(ep->user);
kfree(ep);
}
struct eventpoll *ep;
user = get_current_user();
- error = -EMFILE;
- if (unlikely(atomic_read(&user->epoll_devs) >=
- max_user_instances))
- goto free_uid;
error = -ENOMEM;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (unlikely(!ep))
flags & O_CLOEXEC);
if (fd < 0)
ep_free(ep);
- atomic_inc(&ep->user->epoll_devs);
error_return:
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
struct sysinfo si;
si_meminfo(&si);
- max_user_instances = 128;
- max_user_watches = (((si.totalram - si.totalhigh) / 32) << PAGE_SHIFT) /
+ /*
+ * Allows top 4% of lomem to be allocated for epoll watches (per user).
+ */
+ max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
EP_ITEM_COST;
/* Initialize the structure used to perform safe poll wait head wake ups */
header-y += bfs_fs.h
header-y += blkpg.h
header-y += bpqether.h
+header-y += bsg.h
header-y += can.h
header-y += cdk.h
header-y += chio.h
* bit 1 -- rw-ahead when set
* bit 2 -- barrier
* Insert a serialization point in the IO queue, forcing previously
- * submitted IO to be completed before this oen is issued.
+ * submitted IO to be completed before this one is issued.
* bit 3 -- synchronous I/O hint: the block layer will unplug immediately
* Note that this does NOT indicate that the IO itself is sync, just
* that the block layer will not postpone issue of this IO by plugging.
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
#define BIO_RW_BARRIER 2
-#define BIO_RW_SYNC 3
-#define BIO_RW_META 4
-#define BIO_RW_DISCARD 5
-#define BIO_RW_FAILFAST_DEV 6
-#define BIO_RW_FAILFAST_TRANSPORT 7
-#define BIO_RW_FAILFAST_DRIVER 8
+#define BIO_RW_SYNCIO 3
+#define BIO_RW_UNPLUG 4
+#define BIO_RW_META 5
+#define BIO_RW_DISCARD 6
+#define BIO_RW_FAILFAST_DEV 7
+#define BIO_RW_FAILFAST_TRANSPORT 8
+#define BIO_RW_FAILFAST_DRIVER 9
+
+#define BIO_RW_SYNC (BIO_RW_SYNCIO | BIO_RW_UNPLUG)
+
+#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
+
+/*
+ * Old defines, these should eventually be replaced by direct usage of
+ * bio_rw_flagged()
+ */
+#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER)
+#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO)
+#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG)
+#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV)
+#define bio_failfast_transport(bio) \
+ bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT)
+#define bio_failfast_driver(bio) \
+ bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER)
+#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
+#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
+#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
/*
* upper 16 bits of bi_rw define the io priority of this bio
#define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9)
-#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
-#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
-#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV))
-#define bio_failfast_transport(bio) \
- ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
-#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
-#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
-#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
-#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
static inline unsigned int bio_cur_sectors(struct bio *bio)
void *bip_buf; /* generated integrity data */
bio_end_io_t *bip_end_io; /* saved I/O completion fn */
- int bip_error; /* saved I/O error */
unsigned int bip_size;
unsigned short bip_pool; /* pool the ivec came from */
__REQ_RW_META, /* metadata io request */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_INTEGRITY, /* integrity metadata has been remapped */
+ __REQ_UNPLUG, /* unplug queue on submission */
__REQ_NR_BITS, /* stops here */
};
#define REQ_RW_META (1 << __REQ_RW_META)
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
+#define REQ_UNPLUG (1 << __REQ_UNPLUG)
#define BLK_MAX_CDB 16
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
+#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
+
+#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
+ (1 << QUEUE_FLAG_CLUSTER) | \
+ 1 << QUEUE_FLAG_STACKABLE)
static inline int queue_is_locked(struct request_queue *q)
{
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq)
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
while (!atomic_inc_not_zero(&css->refcnt)) {
if (test_bit(CSS_REMOVED, &css->flags))
return false;
+ cpu_relax();
}
return true;
}
/* Attach to insert probes on any functions which should be ignored*/
#define __kprobes __attribute__((__section__(".kprobes.text"))) notrace
+#else /* CONFIG_KPROBES */
+typedef int kprobe_opcode_t;
+struct arch_specific_insn {
+ int dummy;
+};
+#define __kprobes notrace
+#endif /* CONFIG_KPROBES */
struct kprobe;
struct pt_regs;
/* For backward compatibility with old code using JPROBE_ENTRY() */
#define JPROBE_ENTRY(handler) (handler)
-DECLARE_PER_CPU(struct kprobe *, current_kprobe);
-DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-
-#ifdef CONFIG_KRETPROBES
-extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs);
-extern int arch_trampoline_kprobe(struct kprobe *p);
-#else /* CONFIG_KRETPROBES */
-static inline void arch_prepare_kretprobe(struct kretprobe *rp,
- struct pt_regs *regs)
-{
-}
-static inline int arch_trampoline_kprobe(struct kprobe *p)
-{
- return 0;
-}
-#endif /* CONFIG_KRETPROBES */
/*
* Function-return probe -
* Note:
unsigned long range;
};
+#ifdef CONFIG_KPROBES
+DECLARE_PER_CPU(struct kprobe *, current_kprobe);
+DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+#ifdef CONFIG_KRETPROBES
+extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs);
+extern int arch_trampoline_kprobe(struct kprobe *p);
+#else /* CONFIG_KRETPROBES */
+static inline void arch_prepare_kretprobe(struct kretprobe *rp,
+ struct pt_regs *regs)
+{
+}
+static inline int arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+#endif /* CONFIG_KRETPROBES */
+
extern struct kretprobe_blackpoint kretprobe_blacklist[];
static inline void kretprobe_assert(struct kretprobe_instance *ri,
#else /* CONFIG_KPROBES */
-#define __kprobes notrace
-struct jprobe;
-struct kretprobe;
-
static inline struct kprobe *get_kprobe(void *addr)
{
return NULL;
atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
#endif
#ifdef CONFIG_EPOLL
- atomic_t epoll_devs; /* The number of epoll descriptors currently open */
atomic_t epoll_watches; /* The number of file descriptors currently watched */
#endif
#ifdef CONFIG_POSIX_MQUEUE
asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
"\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
#else
+#ifdef CONFIG_ALPHA
+#define SYSCALL_ALIAS(alias, name) \
+ asm ( #alias " = " #name "\n\t.globl " #alias)
+#else
#define SYSCALL_ALIAS(alias, name) \
asm ("\t.globl " #alias "\n\t.set " #alias ", " #name)
#endif
+#endif
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
* does memory allocation too using vmalloc_32().
*
* videobuf_dma_*()
- * see Documentation/DMA-mapping.txt, these functions to
+ * see Documentation/PCI/PCI-DMA-mapping.txt, these functions to
* basically the same. The map function does also build a
* scatterlist for the buffer (and unmap frees it ...)
*
* UBI volume creation
* ~~~~~~~~~~~~~~~~~~~
*
- * UBI volumes are created via the %UBI_IOCMKVOL IOCTL command of UBI character
+ * UBI volumes are created via the %UBI_IOCMKVOL ioctl command of UBI character
* device. A &struct ubi_mkvol_req object has to be properly filled and a
- * pointer to it has to be passed to the IOCTL.
+ * pointer to it has to be passed to the ioctl.
*
* UBI volume deletion
* ~~~~~~~~~~~~~~~~~~~
*
- * To delete a volume, the %UBI_IOCRMVOL IOCTL command of the UBI character
+ * To delete a volume, the %UBI_IOCRMVOL ioctl command of the UBI character
* device should be used. A pointer to the 32-bit volume ID hast to be passed
- * to the IOCTL.
+ * to the ioctl.
*
* UBI volume re-size
* ~~~~~~~~~~~~~~~~~~
*
- * To re-size a volume, the %UBI_IOCRSVOL IOCTL command of the UBI character
+ * To re-size a volume, the %UBI_IOCRSVOL ioctl command of the UBI character
* device should be used. A &struct ubi_rsvol_req object has to be properly
- * filled and a pointer to it has to be passed to the IOCTL.
+ * filled and a pointer to it has to be passed to the ioctl.
*
* UBI volumes re-name
* ~~~~~~~~~~~~~~~~~~~
*
* To re-name several volumes atomically at one go, the %UBI_IOCRNVOL command
* of the UBI character device should be used. A &struct ubi_rnvol_req object
- * has to be properly filled and a pointer to it has to be passed to the IOCTL.
+ * has to be properly filled and a pointer to it has to be passed to the ioctl.
*
* UBI volume update
* ~~~~~~~~~~~~~~~~~
*
- * Volume update should be done via the %UBI_IOCVOLUP IOCTL command of the
+ * Volume update should be done via the %UBI_IOCVOLUP ioctl command of the
* corresponding UBI volume character device. A pointer to a 64-bit update
- * size should be passed to the IOCTL. After this, UBI expects user to write
+ * size should be passed to the ioctl. After this, UBI expects user to write
* this number of bytes to the volume character device. The update is finished
* when the claimed number of bytes is passed. So, the volume update sequence
* is something like:
* write(fd, buf, image_size);
* close(fd);
*
- * Atomic eraseblock change
+ * Logical eraseblock erase
* ~~~~~~~~~~~~~~~~~~~~~~~~
*
- * Atomic eraseblock change operation is done via the %UBI_IOCEBCH IOCTL
- * command of the corresponding UBI volume character device. A pointer to
- * &struct ubi_leb_change_req has to be passed to the IOCTL. Then the user is
- * expected to write the requested amount of bytes. This is similar to the
- * "volume update" IOCTL.
+ * To erase a logical eraseblock, the %UBI_IOCEBER ioctl command of the
+ * corresponding UBI volume character device should be used. This command
+ * unmaps the requested logical eraseblock, makes sure the corresponding
+ * physical eraseblock is successfully erased, and returns.
+ *
+ * Atomic logical eraseblock change
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Atomic logical eraseblock change operation is called using the %UBI_IOCEBCH
+ * ioctl command of the corresponding UBI volume character device. A pointer to
+ * a &struct ubi_leb_change_req object has to be passed to the ioctl. Then the
+ * user is expected to write the requested amount of bytes (similarly to what
+ * should be done in case of the "volume update" ioctl).
+ *
+ * Logical eraseblock map
+ * ~~~~~~~~~~~~~~~~~~~~~
+ *
+ * To map a logical eraseblock to a physical eraseblock, the %UBI_IOCEBMAP
+ * ioctl command should be used. A pointer to a &struct ubi_map_req object is
+ * expected to be passed. The ioctl maps the requested logical eraseblock to
+ * a physical eraseblock and returns. Only non-mapped logical eraseblocks can
+ * be mapped. If the logical eraseblock specified in the request is already
+ * mapped to a physical eraseblock, the ioctl fails and returns error.
+ *
+ * Logical eraseblock unmap
+ * ~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * To unmap a logical eraseblock to a physical eraseblock, the %UBI_IOCEBUNMAP
+ * ioctl command should be used. The ioctl unmaps the logical eraseblocks,
+ * schedules corresponding physical eraseblock for erasure, and returns. Unlike
+ * the "LEB erase" command, it does not wait for the physical eraseblock being
+ * erased. Note, the side effect of this is that if an unclean reboot happens
+ * after the unmap ioctl returns, you may find the LEB mapped again to the same
+ * physical eraseblock after the UBI is run again.
+ *
+ * Check if logical eraseblock is mapped
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * To check if a logical eraseblock is mapped to a physical eraseblock, the
+ * %UBI_IOCEBISMAP ioctl command should be used. It returns %0 if the LEB is
+ * not mapped, and %1 if it is mapped.
+ *
+ * Set an UBI volume property
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * To set an UBI volume property the %UBI_IOCSETPROP ioctl command should be
+ * used. A pointer to a &struct ubi_set_prop_req object is expected to be
+ * passed. The object describes which property should be set, and to which value
+ * it should be set.
*/
/*
/* Maximum volume name length */
#define UBI_MAX_VOLUME_NAME 127
-/* IOCTL commands of UBI character devices */
+/* ioctl commands of UBI character devices */
#define UBI_IOC_MAGIC 'o'
/* Re-name volumes */
#define UBI_IOCRNVOL _IOW(UBI_IOC_MAGIC, 3, struct ubi_rnvol_req)
-/* IOCTL commands of the UBI control character device */
+/* ioctl commands of the UBI control character device */
#define UBI_CTRL_IOC_MAGIC 'o'
/* Detach an MTD device */
#define UBI_IOCDET _IOW(UBI_CTRL_IOC_MAGIC, 65, int32_t)
-/* IOCTL commands of UBI volume character devices */
+/* ioctl commands of UBI volume character devices */
#define UBI_VOL_IOC_MAGIC 'O'
/* Start UBI volume update */
#define UBI_IOCVOLUP _IOW(UBI_VOL_IOC_MAGIC, 0, int64_t)
-/* An eraseblock erasure command, used for debugging, disabled by default */
+/* LEB erasure command, used for debugging, disabled by default */
#define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, int32_t)
-/* An atomic eraseblock change command */
+/* Atomic LEB change command */
#define UBI_IOCEBCH _IOW(UBI_VOL_IOC_MAGIC, 2, int32_t)
+/* Map LEB command */
+#define UBI_IOCEBMAP _IOW(UBI_VOL_IOC_MAGIC, 3, struct ubi_map_req)
+/* Unmap LEB command */
+#define UBI_IOCEBUNMAP _IOW(UBI_VOL_IOC_MAGIC, 4, int32_t)
+/* Check if LEB is mapped command */
+#define UBI_IOCEBISMAP _IOR(UBI_VOL_IOC_MAGIC, 5, int32_t)
+/* Set an UBI volume property */
+#define UBI_IOCSETPROP _IOW(UBI_VOL_IOC_MAGIC, 6, struct ubi_set_prop_req)
/* Maximum MTD device name length supported by UBI */
#define MAX_UBI_MTD_NAME_LEN 127
UBI_STATIC_VOLUME = 4,
};
+/*
+ * UBI set property ioctl constants
+ *
+ * @UBI_PROP_DIRECT_WRITE: allow / disallow user to directly write and
+ * erase individual eraseblocks on dynamic volumes
+ */
+enum {
+ UBI_PROP_DIRECT_WRITE = 1,
+};
+
/**
* struct ubi_attach_req - attach MTD device request.
* @ubi_num: UBI device number to create
} __attribute__ ((packed));
/**
- * struct ubi_leb_change_req - a data structure used in atomic logical
- * eraseblock change requests.
+ * struct ubi_leb_change_req - a data structure used in atomic LEB change
+ * requests.
* @lnum: logical eraseblock number to change
* @bytes: how many bytes will be written to the logical eraseblock
* @dtype: data type (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN)
int8_t padding[7];
} __attribute__ ((packed));
+/**
+ * struct ubi_map_req - a data structure used in map LEB requests.
+ * @lnum: logical eraseblock number to unmap
+ * @dtype: data type (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN)
+ * @padding: reserved for future, not used, has to be zeroed
+ */
+struct ubi_map_req {
+ int32_t lnum;
+ int8_t dtype;
+ int8_t padding[3];
+} __attribute__ ((packed));
+
+
+/**
+ * struct ubi_set_prop_req - a data structure used to set an ubi volume
+ * property.
+ * @property: property to set (%UBI_PROP_DIRECT_WRITE)
+ * @padding: reserved for future, not used, has to be zeroed
+ * @value: value to set
+ */
+struct ubi_set_prop_req {
+ uint8_t property;
+ uint8_t padding[7];
+ uint64_t value;
+} __attribute__ ((packed));
+
#endif /* __UBI_USER_H__ */
size = 2048;
if (nr_pcpus >= 32)
size = 4096;
- if (sizeof(rwlock_t) != 0) {
+ if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
if (size * sizeof(spinlock_t) > PAGE_SIZE)
hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
}
write_unlock(&css_set_lock);
- list_del(&root->root_list);
- root_count--;
+ if (!list_empty(&root->root_list)) {
+ list_del(&root->root_list);
+ root_count--;
+ }
mutex_unlock(&cgroup_mutex);
err_remove:
+ cgroup_lock_hierarchy(root);
list_del(&cgrp->sibling);
+ cgroup_unlock_hierarchy(root);
root->number_of_cgroups--;
err_destroy:
for_each_subsys(cgrp->root, ss) {
struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
int refcnt;
- do {
+ while (1) {
/* We can only remove a CSS with a refcnt==1 */
refcnt = atomic_read(&css->refcnt);
if (refcnt > 1) {
* css_tryget() to spin until we set the
* CSS_REMOVED bits or abort
*/
- } while (atomic_cmpxchg(&css->refcnt, refcnt, 0) != refcnt);
+ if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt)
+ break;
+ cpu_relax();
+ }
}
done:
for_each_subsys(cgrp->root, ss) {
mutex_unlock(&cgroup_mutex);
return 0;
}
- task_lock(tsk);
- cg = tsk->cgroups;
- parent = task_cgroup(tsk, subsys->subsys_id);
/* Pin the hierarchy */
- if (!atomic_inc_not_zero(&parent->root->sb->s_active)) {
+ if (!atomic_inc_not_zero(&root->sb->s_active)) {
/* We race with the final deactivate_super() */
mutex_unlock(&cgroup_mutex);
return 0;
}
/* Keep the cgroup alive */
+ task_lock(tsk);
+ parent = task_cgroup(tsk, subsys->subsys_id);
+ cg = tsk->cgroups;
get_css_set(cg);
task_unlock(tsk);
+
mutex_unlock(&cgroup_mutex);
/* Now do the VFS work to create a cgroup */
mutex_unlock(&inode->i_mutex);
put_css_set(cg);
- deactivate_super(parent->root->sb);
+ deactivate_super(root->sb);
/* The cgroup is still accessible in the VFS, but
* we're not going to try to rmdir() it at this
* point. */
mutex_lock(&cgroup_mutex);
put_css_set(cg);
mutex_unlock(&cgroup_mutex);
- deactivate_super(parent->root->sb);
+ deactivate_super(root->sb);
return ret;
}
static void mem_cgroup_get(struct mem_cgroup *mem);
static void mem_cgroup_put(struct mem_cgroup *mem);
+static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
struct page_cgroup *pc,
/* This is for making all *used* pages to be on LRU. */
lru_add_drain_all();
ret = 0;
- for_each_node_state(node, N_POSSIBLE) {
+ for_each_node_state(node, N_HIGH_MEMORY) {
for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
enum lru_list l;
for_each_lru(l) {
static void mem_cgroup_put(struct mem_cgroup *mem)
{
- if (atomic_dec_and_test(&mem->refcnt))
+ if (atomic_dec_and_test(&mem->refcnt)) {
+ struct mem_cgroup *parent = parent_mem_cgroup(mem);
__mem_cgroup_free(mem);
+ if (parent)
+ mem_cgroup_put(parent);
+ }
}
+/*
+ * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
+ */
+static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
+{
+ if (!mem->res.parent)
+ return NULL;
+ return mem_cgroup_from_res_counter(mem->res.parent, res);
+}
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
static void __init enable_swap_cgroup(void)
if (parent && parent->use_hierarchy) {
res_counter_init(&mem->res, &parent->res);
res_counter_init(&mem->memsw, &parent->memsw);
+ /*
+ * We increment refcnt of the parent to ensure that we can
+ * safely access it on res_counter_charge/uncharge.
+ * This refcnt will be decremented when freeing this
+ * mem_cgroup(see mem_cgroup_put).
+ */
+ mem_cgroup_get(parent);
} else {
res_counter_init(&mem->res, NULL);
res_counter_init(&mem->memsw, NULL);
}
/*
- * Can we just expand an old private anonymous mapping?
- * The VM_SHARED test is necessary because shmem_zero_setup
- * will create the file object for a shared anonymous map below.
+ * Can we just expand an old mapping?
*/
- if (!file && !(vm_flags & VM_SHARED)) {
- vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
- NULL, NULL, pgoff, NULL);
- if (vma)
- goto out;
- }
+ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
+ if (vma)
+ goto out;
/*
* Determine the object being mapped and call the appropriate
if (vma_wants_writenotify(vma))
vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
- if (file && vma_merge(mm, prev, addr, vma->vm_end,
- vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
- mpol_put(vma_policy(vma));
- kmem_cache_free(vm_area_cachep, vma);
- fput(file);
- if (vm_flags & VM_EXECUTABLE)
- removed_exe_file_vma(mm);
- } else {
- vma_link(mm, vma, prev, rb_link, rb_parent);
- file = vma->vm_file;
- }
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ file = vma->vm_file;
/* Once vma denies write, undo our temporary denial count */
if (correct_wcount)
pte_t *pte;
int ret = 1;
- if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
+ if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) {
ret = -ENOMEM;
+ goto out_nolock;
+ }
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
activate_page(page);
out:
pte_unmap_unlock(pte, ptl);
+out_nolock:
return ret;
}
return 0;
next_skb:
- block_limit = skb_headlen(st->cur_skb);
+ block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
if (abs_offset < block_limit) {
- *data = st->cur_skb->data + abs_offset;
+ *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
return block_limit - abs_offset;
}
st->frag_data = NULL;
}
- if (st->cur_skb->next) {
- st->cur_skb = st->cur_skb->next;
+ if (st->root_skb == st->cur_skb &&
+ skb_shinfo(st->root_skb)->frag_list) {
+ st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0;
goto next_skb;
- } else if (st->root_skb == st->cur_skb &&
- skb_shinfo(st->root_skb)->frag_list) {
- st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
+ } else if (st->cur_skb->next) {
+ st->cur_skb = st->cur_skb->next;
+ st->frag_idx = 0;
goto next_skb;
}
static int __init ip_auto_config(void)
{
__be32 addr;
+#ifdef IPCONFIG_DYNAMIC
+ int retries = CONF_OPEN_RETRIES;
+#endif
#ifdef CONFIG_PROC_FS
proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
#endif
ic_first_dev->next) {
#ifdef IPCONFIG_DYNAMIC
-
- int retries = CONF_OPEN_RETRIES;
-
if (ic_dynamic() < 0) {
ic_close_devs();
struct tcp_splice_state *tss = rd_desc->arg.data;
int ret;
- ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags);
+ ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
+ tss->flags);
if (ret > 0)
rd_desc->count -= ret;
return ret;
atomic_t udp_memory_allocated;
EXPORT_SYMBOL(udp_memory_allocated);
+#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE)
+
static int udp_lib_lport_inuse(struct net *net, __u16 num,
const struct udp_hslot *hslot,
+ unsigned long *bitmap,
struct sock *sk,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2))
sk_nulls_for_each(sk2, node, &hslot->head)
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
- sk2->sk_hash == num &&
+ (bitmap || sk2->sk_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
|| sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
- (*saddr_comp)(sk, sk2))
- return 1;
+ (*saddr_comp)(sk, sk2)) {
+ if (bitmap)
+ __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE,
+ bitmap);
+ else
+ return 1;
+ }
return 0;
}
if (!snum) {
int low, high, remaining;
unsigned rand;
- unsigned short first;
+ unsigned short first, last;
+ DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
inet_get_local_port_range(&low, &high);
remaining = (high - low) + 1;
rand = net_random();
- snum = first = rand % remaining + low;
- rand |= 1;
- for (;;) {
- hslot = &udptable->hash[udp_hashfn(net, snum)];
+ first = (((u64)rand * remaining) >> 32) + low;
+ /*
+ * force rand to be an odd multiple of UDP_HTABLE_SIZE
+ */
+ rand = (rand | 1) * UDP_HTABLE_SIZE;
+ for (last = first + UDP_HTABLE_SIZE; first != last; first++) {
+ hslot = &udptable->hash[udp_hashfn(net, first)];
+ bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock);
- if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
- break;
- spin_unlock_bh(&hslot->lock);
+ udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
+ saddr_comp);
+
+ snum = first;
+ /*
+ * Iterate on all possible values of snum for this hash.
+ * Using steps of an odd multiple of UDP_HTABLE_SIZE
+ * give us randomization and full range coverage.
+ */
do {
- snum = snum + rand;
- } while (snum < low || snum > high);
- if (snum == first)
- goto fail;
+ if (low <= snum && snum <= high &&
+ !test_bit(snum / UDP_HTABLE_SIZE, bitmap))
+ goto found;
+ snum += rand;
+ } while (snum != first);
+ spin_unlock_bh(&hslot->lock);
}
+ goto fail;
} else {
hslot = &udptable->hash[udp_hashfn(net, snum)];
spin_lock_bh(&hslot->lock);
- if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
+ if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp))
goto fail_unlock;
}
+found:
inet_sk(sk)->num = snum;
sk->sk_hash = snum;
if (sk_unhashed(sk)) {
.procname = "mc_forwarding",
.data = &ipv6_devconf.mc_forwarding,
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0444,
.proc_handler = proc_dointvec,
},
#endif
if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
goto relookup_failed;
- if (ip6_dst_lookup(sk, &dst2, &fl))
+ if (ip6_dst_lookup(sk, &dst2, &fl2))
goto relookup_failed;
- err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP);
+ err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
switch (err) {
case 0:
dst_release(dst);
* IPv6 multicast router mode is now supported ;)
*/
if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
+ !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
/*
* Okay, we try to forward - split and duplicate
}
if (skb2) {
- skb2->dev = skb2->dst->dev;
ip6_mr_input(skb2);
}
}
pim = (struct pimreghdr *)skb_transport_header(skb);
if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
(pim->flags & PIM_NULL_REGISTER) ||
- (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
+ (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ sizeof(*pim), IPPROTO_PIM,
+ csum_partial((void *)pim, sizeof(*pim), 0)) &&
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb);
skb->dev = reg_dev;
- skb->protocol = htons(ETH_P_IP);
+ skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
dst_release(skb->dst);
{
struct mif_device *v;
struct net_device *dev;
+ struct inet6_dev *in6_dev;
if (vifi < 0 || vifi >= net->ipv6.maxvif)
return -EADDRNOTAVAIL;
dev_set_allmulti(dev, -1);
+ in6_dev = __in6_dev_get(dev);
+ if (in6_dev)
+ in6_dev->cnf.mc_forwarding--;
+
if (v->flags & MIFF_REGISTER)
unregister_netdevice(dev);
int vifi = vifc->mif6c_mifi;
struct mif_device *v = &net->ipv6.vif6_table[vifi];
struct net_device *dev;
+ struct inet6_dev *in6_dev;
int err;
/* Is vif busy ? */
return -EINVAL;
}
+ in6_dev = __in6_dev_get(dev);
+ if (in6_dev)
+ in6_dev->cnf.mc_forwarding++;
+
/*
* Fill in the VIF structures
*/
skb->dst = dst_clone(pkt->dst);
skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_pull(skb, sizeof(struct ipv6hdr));
}
if (net->ipv6.mroute6_sk == NULL) {
rtnl_lock();
write_lock_bh(&mrt_lock);
- if (likely(net->ipv6.mroute6_sk == NULL))
+ if (likely(net->ipv6.mroute6_sk == NULL)) {
net->ipv6.mroute6_sk = sk;
+ net->ipv6.devconf_all->mc_forwarding++;
+ }
else
err = -EADDRINUSE;
write_unlock_bh(&mrt_lock);
if (sk == net->ipv6.mroute6_sk) {
write_lock_bh(&mrt_lock);
net->ipv6.mroute6_sk = NULL;
+ net->ipv6.devconf_all->mc_forwarding--;
write_unlock_bh(&mrt_lock);
mroute_clean_tables(net);
.proto = iph->nexthdr,
};
- if (rt6_need_strict(&iph->daddr))
+ if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
flags |= RT6_LOOKUP_F_IFACE;
skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);
#include "debug-levels.h"
-/* Debug framework control of debug levels */
-struct d_level D_LEVEL[] = {
- D_SUBMODULE_DEFINE(debugfs),
- D_SUBMODULE_DEFINE(id_table),
- D_SUBMODULE_DEFINE(op_msg),
- D_SUBMODULE_DEFINE(op_reset),
- D_SUBMODULE_DEFINE(op_rfkill),
- D_SUBMODULE_DEFINE(stack),
-};
-size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
-
#define __debugfs_register(prefix, name, parent) \
do { \
result = d_level_register_debugfs(prefix, name, parent); \
}
EXPORT_SYMBOL_GPL(wimax_dev_rm);
+
+/* Debug framework control of debug levels */
+struct d_level D_LEVEL[] = {
+ D_SUBMODULE_DEFINE(debugfs),
+ D_SUBMODULE_DEFINE(id_table),
+ D_SUBMODULE_DEFINE(op_msg),
+ D_SUBMODULE_DEFINE(op_reset),
+ D_SUBMODULE_DEFINE(op_rfkill),
+ D_SUBMODULE_DEFINE(stack),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+
struct genl_family wimax_gnl_family = {
.id = GENL_ID_GENERATE,
.name = "WiMAX",
* calculate the number of reg rules we will need. We will need one
* for each channel subband */
while (country_ie_len >= 3) {
+ int end_channel = 0;
struct ieee80211_country_ie_triplet *triplet =
(struct ieee80211_country_ie_triplet *) country_ie;
int cur_sub_max_channel = 0, cur_channel = 0;
continue;
}
+ /* 2 GHz */
+ if (triplet->chans.first_channel <= 14)
+ end_channel = triplet->chans.first_channel +
+ triplet->chans.num_channels;
+ else
+ /*
+ * 5 GHz -- For example in country IEs if the first
+ * channel given is 36 and the number of channels is 4
+ * then the individual channel numbers defined for the
+ * 5 GHz PHY by these parameters are: 36, 40, 44, and 48
+ * and not 36, 37, 38, 39.
+ *
+ * See: http://tinyurl.com/11d-clarification
+ */
+ end_channel = triplet->chans.first_channel +
+ (4 * (triplet->chans.num_channels - 1));
+
cur_channel = triplet->chans.first_channel;
- cur_sub_max_channel = ieee80211_channel_to_frequency(
- cur_channel + triplet->chans.num_channels);
+ cur_sub_max_channel = end_channel;
/* Basic sanity check */
if (cur_sub_max_channel < cur_channel)
end_channel = triplet->chans.first_channel +
triplet->chans.num_channels;
else
- /*
- * 5 GHz -- For example in country IEs if the first
- * channel given is 36 and the number of channels is 4
- * then the individual channel numbers defined for the
- * 5 GHz PHY by these parameters are: 36, 40, 44, and 48
- * and not 36, 37, 38, 39.
- *
- * See: http://tinyurl.com/11d-clarification
- */
end_channel = triplet->chans.first_channel +
(4 * (triplet->chans.num_channels - 1));
if (intersected_rd) {
printk(KERN_DEBUG "cfg80211: We intersect both of these "
"and get:\n");
- print_regdomain_info(rd);
+ print_regdomain_info(intersected_rd);
return;
}
printk(KERN_DEBUG "cfg80211: Intersection between both failed\n");
&spec->cur_mux[adc_idx]);
}
+#ifdef CONFIG_SND_JACK
static int conexant_add_jack(struct hda_codec *codec,
hda_nid_t nid, int type)
{
static int conexant_init_jacks(struct hda_codec *codec)
{
-#ifdef CONFIG_SND_JACK
struct conexant_spec *spec = codec->spec;
int i;
++hv;
}
}
-#endif
return 0;
}
+#else
+static inline void conexant_report_jack(struct hda_codec *codec, hda_nid_t nid)
+{
+}
+
+static inline int conexant_init_jacks(struct hda_codec *codec)
+{
+ return 0;
+}
+#endif
static int conexant_init(struct hda_codec *codec)
{
SND_PCI_QUIRK(0x103c, 0x30a5, "HP DV5200T/DV8000T", CXT5047_LAPTOP_HP),
SND_PCI_QUIRK(0x103c, 0x30b2, "HP DV2000T/DV3000T", CXT5047_LAPTOP),
SND_PCI_QUIRK(0x103c, 0x30b5, "HP DV2000Z", CXT5047_LAPTOP),
+ SND_PCI_QUIRK(0x103c, 0x30cf, "HP DV6700", CXT5047_LAPTOP),
SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P100", CXT5047_LAPTOP_EAPD),
{}
};
case 0x106b00a4: /* MacbookPro4,1 */
case 0x106b2c00: /* Macbook Pro rev3 */
case 0x106b3600: /* Macbook 3.1 */
+ case 0x106b3800: /* MacbookPro4,1 - latter revision */
board_config = ALC885_MBP3;
break;
default:
info->name = "STAC92xx Analog";
info->stream[SNDRV_PCM_STREAM_PLAYBACK] = stac92xx_pcm_analog_playback;
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
+ spec->multiout.dac_nids[0];
info->stream[SNDRV_PCM_STREAM_CAPTURE] = stac92xx_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0];
info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_adcs;
},
};
-struct snd_soc_dai wm8753_dai[2];
+struct snd_soc_dai wm8753_dai[] = {
+ {
+ .name = "WM8753 DAI 0",
+ },
+ {
+ .name = "WM8753 DAI 1",
+ },
+};
EXPORT_SYMBOL_GPL(wm8753_dai);
static void wm8753_set_dai_mode(struct snd_soc_codec *codec, unsigned int mode)
regs->spcr1 |= RINTM(3);
regs->rcr2 |= RFIG;
regs->xcr2 |= XFIG;
+ if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ regs->xccr = DXENDLY(1) | XDMAEN;
+ regs->rccr = RFULL_CYCLE | RDMAEN;
+ }
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S: