Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Jan 2016 20:32:08 +0000 (12:32 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Jan 2016 20:32:08 +0000 (12:32 -0800)
Merge third patch-bomb from Andrew Morton:
 "I'm pretty much done for -rc1 now:

   - the rest of MM, basically

   - lib/ updates

   - checkpatch, epoll, hfs, fatfs, ptrace, coredump, exit

   - cpu_mask simplifications

   - kexec, rapidio, MAINTAINERS etc, etc.

   - more dma-mapping cleanups/simplifications from hch"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (109 commits)
  MAINTAINERS: add/fix git URLs for various subsystems
  mm: memcontrol: add "sock" to cgroup2 memory.stat
  mm: memcontrol: basic memory statistics in cgroup2 memory controller
  mm: memcontrol: do not uncharge old page in page cache replacement
  Documentation: cgroup: add memory.swap.{current,max} description
  mm: free swap cache aggressively if memcg swap is full
  mm: vmscan: do not scan anon pages if memcg swap limit is hit
  swap.h: move memcg related stuff to the end of the file
  mm: memcontrol: replace mem_cgroup_lruvec_online with mem_cgroup_online
  mm: vmscan: pass memcg to get_scan_count()
  mm: memcontrol: charge swap to cgroup2
  mm: memcontrol: clean up alloc, online, offline, free functions
  mm: memcontrol: flatten struct cg_proto
  mm: memcontrol: rein in the CONFIG space madness
  net: drop tcp_memcontrol.c
  mm: memcontrol: introduce CONFIG_MEMCG_LEGACY_KMEM
  mm: memcontrol: allow to disable kmem accounting for cgroup2
  mm: memcontrol: account "kmem" consumers in cgroup2 memory controller
  mm: memcontrol: move kmem accounting code to CONFIG_MEMCG
  mm: memcontrol: separate kmem code from legacy tcp accounting code
  ...

204 files changed:
CREDITS
Documentation/DMA-API-HOWTO.txt
Documentation/cgroup-v2.txt
Documentation/features/io/dma_map_attrs/arch-support.txt [deleted file]
Documentation/filesystems/vfat.txt
Documentation/kernel-parameters.txt
Documentation/sysctl/kernel.txt
Documentation/ubsan.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/Kconfig
arch/alpha/include/asm/dma-mapping.h
arch/alpha/include/uapi/asm/mman.h
arch/arc/include/asm/dma-mapping.h
arch/arc/mm/dma.c
arch/arm/Kconfig
arch/arm/include/asm/dma-mapping.h
arch/arm64/Kconfig
arch/arm64/include/asm/dma-mapping.h
arch/avr32/include/asm/dma-mapping.h
arch/avr32/mm/dma-coherent.c
arch/blackfin/include/asm/dma-mapping.h
arch/blackfin/kernel/dma-mapping.c
arch/c6x/Kconfig
arch/c6x/include/asm/dma-mapping.h
arch/c6x/kernel/dma.c
arch/c6x/mm/dma-coherent.c
arch/cris/arch-v32/drivers/pci/dma.c
arch/cris/include/asm/dma-mapping.h
arch/frv/Kconfig
arch/frv/include/asm/dma-mapping.h
arch/frv/include/asm/io.h
arch/frv/mb93090-mb00/pci-dma-nommu.c
arch/frv/mb93090-mb00/pci-dma.c
arch/h8300/Kconfig
arch/h8300/include/asm/dma-mapping.h
arch/hexagon/Kconfig
arch/hexagon/include/asm/dma-mapping.h
arch/ia64/Kconfig
arch/ia64/include/asm/dma-mapping.h
arch/m68k/include/asm/dma-mapping.h
arch/m68k/kernel/dma.c
arch/metag/include/asm/dma-mapping.h
arch/metag/kernel/dma.c
arch/microblaze/Kconfig
arch/microblaze/include/asm/dma-mapping.h
arch/mips/Kconfig
arch/mips/include/asm/dma-mapping.h
arch/mips/include/uapi/asm/mman.h
arch/mn10300/Kconfig
arch/mn10300/include/asm/dma-mapping.h
arch/mn10300/mm/dma-alloc.c
arch/nios2/include/asm/dma-mapping.h
arch/nios2/mm/dma-mapping.c
arch/openrisc/Kconfig
arch/openrisc/include/asm/dma-mapping.h
arch/parisc/Kconfig
arch/parisc/include/asm/dma-mapping.h
arch/parisc/include/uapi/asm/mman.h
arch/parisc/kernel/drivers.c
arch/parisc/kernel/pci-dma.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/dma-mapping.h
arch/powerpc/include/asm/fadump.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/fadump.c
arch/powerpc/kernel/vdso32/Makefile
arch/powerpc/kernel/vdso64/Makefile
arch/powerpc/xmon/Makefile
arch/s390/Kconfig
arch/s390/include/asm/dma-mapping.h
arch/sh/Kconfig
arch/sh/include/asm/dma-mapping.h
arch/sparc/Kconfig
arch/sparc/include/asm/dma-mapping.h
arch/tile/Kconfig
arch/tile/include/asm/dma-mapping.h
arch/tile/kernel/pci-dma.c
arch/unicore32/Kconfig
arch/unicore32/include/asm/dma-mapping.h
arch/x86/Kconfig
arch/x86/boot/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/entry/vdso/Makefile
arch/x86/include/asm/dma-mapping.h
arch/x86/kernel/machine_kexec_64.c
arch/x86/realmode/rm/Makefile
arch/xtensa/Kconfig
arch/xtensa/include/asm/dma-mapping.h
arch/xtensa/include/uapi/asm/mman.h
drivers/base/cpu.c
drivers/base/dma-mapping.c
drivers/firmware/broadcom/bcm47xx_nvram.c
drivers/firmware/efi/libstub/Makefile
drivers/gpu/drm/Kconfig
drivers/gpu/drm/imx/Kconfig
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/shmobile/Kconfig
drivers/gpu/drm/sti/Kconfig
drivers/gpu/drm/tilcdc/Kconfig
drivers/gpu/drm/vc4/Kconfig
drivers/iio/industrialio-sw-trigger.c
drivers/media/platform/Kconfig
drivers/memstick/core/ms_block.c
drivers/misc/Kconfig
drivers/parisc/ccio-dma.c
drivers/parisc/sba_iommu.c
drivers/rapidio/rio-sysfs.c
drivers/soc/qcom/smd.c
fs/adfs/adfs.h
fs/coredump.c
fs/eventpoll.c
fs/fat/cache.c
fs/fat/dir.c
fs/fat/fat.h
fs/fat/fatent.c
fs/fat/file.c
fs/fat/inode.c
fs/hfs/catalog.c
fs/overlayfs/super.c
fs/proc/array.c
fs/proc/base.c
fs/proc/namespaces.c
fs/proc/task_mmu.c
include/asm-generic/dma-coherent.h [deleted file]
include/asm-generic/dma-mapping-broken.h [deleted file]
include/asm-generic/dma-mapping-common.h [deleted file]
include/linux/cpumask.h
include/linux/dma-attrs.h
include/linux/dma-mapping.h
include/linux/io.h
include/linux/kexec.h
include/linux/list_lru.h
include/linux/lz4.h
include/linux/memcontrol.h
include/linux/ptrace.h
include/linux/radix-tree.h
include/linux/rbtree.h
include/linux/sched.h
include/linux/shm.h
include/linux/slab.h
include/linux/slab_def.h
include/linux/slub_def.h
include/linux/swap.h
include/net/tcp_memcontrol.h [deleted file]
include/uapi/linux/eventpoll.h
init/Kconfig
init/do_mounts.h
init/do_mounts_initrd.c
init/main.c
ipc/shm.c
kernel/cpu.c
kernel/events/core.c
kernel/exit.c
kernel/futex.c
kernel/futex_compat.c
kernel/kcmp.c
kernel/kexec.c
kernel/kexec_core.c
kernel/kexec_file.c
kernel/kexec_internal.h
kernel/printk/printk.c
kernel/ptrace.c
kernel/sys.c
kernel/sysctl.c
lib/Kconfig.debug
lib/Kconfig.ubsan [new file with mode: 0644]
lib/Makefile
lib/iomap_copy.c
lib/libcrc32c.c
lib/string_helpers.c
lib/test-hexdump.c [deleted file]
lib/test_hexdump.c [new file with mode: 0644]
lib/ubsan.c [new file with mode: 0644]
lib/ubsan.h [new file with mode: 0644]
mm/huge_memory.c
mm/kasan/Makefile
mm/list_lru.c
mm/memcontrol.c
mm/memory.c
mm/process_vm_access.c
mm/shmem.c
mm/slab.h
mm/slab_common.c
mm/slub.c
mm/swap_state.c
mm/swapfile.c
mm/util.c
mm/vmscan.c
mm/zsmalloc.c
net/ipv4/Makefile
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_memcontrol.c [deleted file]
net/ipv6/tcp_ipv6.c
net/mac80211/debugfs.c
scripts/Makefile.lib
scripts/Makefile.ubsan [new file with mode: 0644]
scripts/checkpatch.pl
scripts/get_maintainer.pl
security/commoncap.c
security/smack/smack_lsm.c
security/yama/yama_lsm.c

diff --git a/CREDITS b/CREDITS
index 25133c5adae768dd908fe9dd405307239bf98a1b..a3887b59b9f94dffce186079756b29d01fb7b91b 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1856,6 +1856,16 @@ S: Korte Heul 95
 S: 1403 ND  BUSSUM
 S: The Netherlands
 
+N: Martin Kepplinger
+E: martink@posteo.de
+E: martin.kepplinger@theobroma-systems.com
+W: http://www.martinkepplinger.com
+D: mma8452 accelerators iio driver
+D: Kernel cleanups
+S: Garnisonstraße 26
+S: 4020 Linz
+S: Austria
+
 N: Karl Keyte
 E: karl@koft.com
 D: Disk usage statistics and modifications to line printer driver
index d69b3fc64e14e0427717efc5c505231c78acb5a6..781024ef90502f700baf258115c3715420fc8fb0 100644 (file)
@@ -951,16 +951,6 @@ to "Closing".
    alignment constraints (e.g. the alignment constraints about 64-bit
    objects).
 
-3) Supporting multiple types of IOMMUs
-
-   If your architecture needs to support multiple types of IOMMUs, you
-   can use include/linux/asm-generic/dma-mapping-common.h. It's a
-   library to support the DMA API with multiple types of IOMMUs. Lots
-   of architectures (x86, powerpc, sh, alpha, ia64, microblaze and
-   sparc) use it. Choose one to see how it can be used. If you need to
-   support multiple types of IOMMUs in a single system, the example of
-   x86 or powerpc helps.
-
                           Closing
 
 This document, and the API itself, would not be in its current
index 31d1f7bf12a19ee4a658461569b7285e3fc5df19..65b3eac8856cf7ec26b341b217046a2fb9b7bf26 100644 (file)
@@ -819,6 +819,78 @@ PAGE_SIZE multiple when read back.
                the cgroup.  This may not exactly match the number of
                processes killed but should generally be close.
 
+  memory.stat
+
+       A read-only flat-keyed file which exists on non-root cgroups.
+
+       This breaks down the cgroup's memory footprint into different
+       types of memory, type-specific details, and other information
+       on the state and past events of the memory management system.
+
+       All memory amounts are in bytes.
+
+       The entries are ordered to be human readable, and new entries
+       can show up in the middle. Don't rely on items remaining in a
+       fixed position; use the keys to look up specific values!
+
+         anon
+
+               Amount of memory used in anonymous mappings such as
+               brk(), sbrk(), and mmap(MAP_ANONYMOUS)
+
+         file
+
+               Amount of memory used to cache filesystem data,
+               including tmpfs and shared memory.
+
+         file_mapped
+
+               Amount of cached filesystem data mapped with mmap()
+
+         file_dirty
+
+               Amount of cached filesystem data that was modified but
+               not yet written back to disk
+
+         file_writeback
+
+               Amount of cached filesystem data that was modified and
+               is currently being written back to disk
+
+         inactive_anon
+         active_anon
+         inactive_file
+         active_file
+         unevictable
+
+               Amount of memory, swap-backed and filesystem-backed,
+               on the internal memory management lists used by the
+               page reclaim algorithm
+
+         pgfault
+
+               Total number of page faults incurred
+
+         pgmajfault
+
+               Number of major page faults incurred
+
+  memory.swap.current
+
+       A read-only single value file which exists on non-root
+       cgroups.
+
+       The total amount of swap currently being used by the cgroup
+       and its descendants.
+
+  memory.swap.max
+
+       A read-write single value file which exists on non-root
+       cgroups.  The default is "max".
+
+       Swap usage hard limit.  If a cgroup's swap usage reaches this
+       limit, anonymous meomry of the cgroup will not be swapped out.
+
 
 5-2-2. General Usage
 
@@ -1291,3 +1363,20 @@ allocation from the slack available in other groups or the rest of the
 system than killing the group.  Otherwise, memory.max is there to
 limit this type of spillover and ultimately contain buggy or even
 malicious applications.
+
+The combined memory+swap accounting and limiting is replaced by real
+control over swap space.
+
+The main argument for a combined memory+swap facility in the original
+cgroup design was that global or parental pressure would always be
+able to swap all anonymous memory of a child group, regardless of the
+child's own (possibly untrusted) configuration.  However, untrusted
+groups can sabotage swapping by other means - such as referencing its
+anonymous memory in a tight loop - and an admin can not assume full
+swappability when overcommitting untrusted jobs.
+
+For trusted jobs, on the other hand, a combined counter is not an
+intuitive userspace interface, and it flies in the face of the idea
+that cgroup controllers should account and limit specific physical
+resources.  Swap space is a resource like all others in the system,
+and that's why unified hierarchy allows distributing it separately.
diff --git a/Documentation/features/io/dma_map_attrs/arch-support.txt b/Documentation/features/io/dma_map_attrs/arch-support.txt
deleted file mode 100644 (file)
index 51d0f1c..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Feature name:          dma_map_attrs
-#         Kconfig:       HAVE_DMA_ATTRS
-#         description:   arch provides dma_*map*_attrs() APIs
-#
-    -----------------------
-    |         arch |status|
-    -----------------------
-    |       alpha: |  ok  |
-    |         arc: | TODO |
-    |         arm: |  ok  |
-    |       arm64: |  ok  |
-    |       avr32: | TODO |
-    |    blackfin: | TODO |
-    |         c6x: | TODO |
-    |        cris: | TODO |
-    |         frv: | TODO |
-    |       h8300: |  ok  |
-    |     hexagon: |  ok  |
-    |        ia64: |  ok  |
-    |        m32r: | TODO |
-    |        m68k: | TODO |
-    |       metag: | TODO |
-    |  microblaze: |  ok  |
-    |        mips: |  ok  |
-    |     mn10300: | TODO |
-    |       nios2: | TODO |
-    |    openrisc: |  ok  |
-    |      parisc: | TODO |
-    |     powerpc: |  ok  |
-    |        s390: |  ok  |
-    |       score: | TODO |
-    |          sh: |  ok  |
-    |       sparc: |  ok  |
-    |        tile: |  ok  |
-    |          um: | TODO |
-    |   unicore32: |  ok  |
-    |         x86: |  ok  |
-    |      xtensa: | TODO |
-    -----------------------
index ce1126aceed8fc739aba64b1ff61ae3ce7cbc90b..223c32171dcc2b562ee0995a708e87ba1829cc8c 100644 (file)
@@ -180,6 +180,16 @@ dos1xfloppy  -- If set, use a fallback default BIOS Parameter Block
 
 <bool>: 0,1,yes,no,true,false
 
+LIMITATION
+---------------------------------------------------------------------
+* The fallocated region of file is discarded at umount/evict time
+  when using fallocate with FALLOC_FL_KEEP_SIZE.
+  So, User should assume that fallocated region can be discarded at
+  last close if there is memory pressure resulting in eviction of
+  the inode from the memory. As a result, for any dependency on
+  the fallocated region, user should make sure to recheck fallocate
+  after reopening the file.
+
 TODO
 ----------------------------------------------------------------------
 * Need to get rid of the raw scanning stuff.  Instead, always use
index 3ea869d7a31c4019c0ed768050b930a370931521..cfb2c0f1a4a89237ce131999d7b1cc03aff178b4 100644 (file)
@@ -611,6 +611,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        cgroup.memory=  [KNL] Pass options to the cgroup memory controller.
                        Format: <string>
                        nosocket -- Disable socket memory accounting.
+                       nokmem -- Disable kernel memory accounting.
 
        checkreqprot    [SELINUX] Set initial checkreqprot flag value.
                        Format: { "0" | "1" }
index 73c6b1ef0e8456a5de653305c1f9c783f0d958c1..a93b414672a71ac6fa9bac1e848215804bde139c 100644 (file)
@@ -825,14 +825,13 @@ via the /proc/sys interface:
        Each write syscall must fully contain the sysctl value to be
        written, and multiple writes on the same sysctl file descriptor
        will rewrite the sysctl value, regardless of file position.
-   0 - (default) Same behavior as above, but warn about processes that
-       perform writes to a sysctl file descriptor when the file position
-       is not 0.
-   1 - Respect file position when writing sysctl strings. Multiple writes
-       will append to the sysctl value buffer. Anything past the max length
-       of the sysctl value buffer will be ignored. Writes to numeric sysctl
-       entries must always be at file position 0 and the value must be
-       fully contained in the buffer sent in the write syscall.
+   0 - Same behavior as above, but warn about processes that perform writes
+       to a sysctl file descriptor when the file position is not 0.
+   1 - (default) Respect file position when writing sysctl strings. Multiple
+       writes will append to the sysctl value buffer. Anything past the max
+       length of the sysctl value buffer will be ignored. Writes to numeric
+       sysctl entries must always be at file position 0 and the value must
+       be fully contained in the buffer sent in the write syscall.
 
 ==============================================================
 
diff --git a/Documentation/ubsan.txt b/Documentation/ubsan.txt
new file mode 100644 (file)
index 0000000..f58215e
--- /dev/null
@@ -0,0 +1,84 @@
+Undefined Behavior Sanitizer - UBSAN
+
+Overview
+--------
+
+UBSAN is a runtime undefined behaviour checker.
+
+UBSAN uses compile-time instrumentation to catch undefined behavior (UB).
+Compiler inserts code that perform certain kinds of checks before operations
+that may cause UB. If check fails (i.e. UB detected) __ubsan_handle_*
+function called to print error message.
+
+GCC has that feature since 4.9.x [1] (see -fsanitize=undefined option and
+its suboptions). GCC 5.x has more checkers implemented [2].
+
+Report example
+---------------
+
+        ================================================================================
+        UBSAN: Undefined behaviour in ../include/linux/bitops.h:110:33
+        shift exponent 32 is to large for 32-bit type 'unsigned int'
+        CPU: 0 PID: 0 Comm: swapper Not tainted 4.4.0-rc1+ #26
+         0000000000000000 ffffffff82403cc8 ffffffff815e6cd6 0000000000000001
+         ffffffff82403cf8 ffffffff82403ce0 ffffffff8163a5ed 0000000000000020
+         ffffffff82403d78 ffffffff8163ac2b ffffffff815f0001 0000000000000002
+        Call Trace:
+         [<ffffffff815e6cd6>] dump_stack+0x45/0x5f
+         [<ffffffff8163a5ed>] ubsan_epilogue+0xd/0x40
+         [<ffffffff8163ac2b>] __ubsan_handle_shift_out_of_bounds+0xeb/0x130
+         [<ffffffff815f0001>] ? radix_tree_gang_lookup_slot+0x51/0x150
+         [<ffffffff8173c586>] _mix_pool_bytes+0x1e6/0x480
+         [<ffffffff83105653>] ? dmi_walk_early+0x48/0x5c
+         [<ffffffff8173c881>] add_device_randomness+0x61/0x130
+         [<ffffffff83105b35>] ? dmi_save_one_device+0xaa/0xaa
+         [<ffffffff83105653>] dmi_walk_early+0x48/0x5c
+         [<ffffffff831066ae>] dmi_scan_machine+0x278/0x4b4
+         [<ffffffff8111d58a>] ? vprintk_default+0x1a/0x20
+         [<ffffffff830ad120>] ? early_idt_handler_array+0x120/0x120
+         [<ffffffff830b2240>] setup_arch+0x405/0xc2c
+         [<ffffffff830ad120>] ? early_idt_handler_array+0x120/0x120
+         [<ffffffff830ae053>] start_kernel+0x83/0x49a
+         [<ffffffff830ad120>] ? early_idt_handler_array+0x120/0x120
+         [<ffffffff830ad386>] x86_64_start_reservations+0x2a/0x2c
+         [<ffffffff830ad4f3>] x86_64_start_kernel+0x16b/0x17a
+        ================================================================================
+
+Usage
+-----
+
+To enable UBSAN configure kernel with:
+
+       CONFIG_UBSAN=y
+
+and to check the entire kernel:
+
+        CONFIG_UBSAN_SANITIZE_ALL=y
+
+To enable instrumentation for specific files or directories, add a line
+similar to the following to the respective kernel Makefile:
+
+        For a single file (e.g. main.o):
+                UBSAN_SANITIZE_main.o := y
+
+        For all files in one directory:
+                UBSAN_SANITIZE := y
+
+To exclude files from being instrumented even if
+CONFIG_UBSAN_SANITIZE_ALL=y, use:
+
+                UBSAN_SANITIZE_main.o := n
+        and:
+                UBSAN_SANITIZE := n
+
+Detection of unaligned accesses controlled through the separate option -
+CONFIG_UBSAN_ALIGNMENT. It's off by default on architectures that support
+unaligned accesses (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y). One could
+still enable it in config, just note that it will produce a lot of UBSAN
+reports.
+
+References
+----------
+
+[1] - https://gcc.gnu.org/onlinedocs/gcc-4.9.0/gcc/Debugging-Options.html
+[2] - https://gcc.gnu.org/onlinedocs/gcc/Debugging-Options.html
index 84e08e626e1015fe01949b80452e6c342ea62691..45d2717760fc229171775a5216b13d9ef3025e1d 100644 (file)
@@ -781,6 +781,7 @@ F:  sound/aoa/
 APM DRIVER
 M:     Jiri Kosina <jikos@kernel.org>
 S:     Odd fixes
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/apm.git
 F:     arch/x86/kernel/apm_32.c
 F:     include/linux/apm_bios.h
 F:     include/uapi/linux/apm_bios.h
@@ -946,6 +947,7 @@ M:  Alexandre Belloni <alexandre.belloni@free-electrons.com>
 M:     Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.linux4sam.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git
 S:     Supported
 F:     arch/arm/mach-at91/
 F:     include/soc/at91/
@@ -1464,6 +1466,7 @@ ARM/Rockchip SoC support
 M:     Heiko Stuebner <heiko@sntech.de>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-rockchip@lists.infradead.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git
 S:     Maintained
 F:     arch/arm/boot/dts/rk3*
 F:     arch/arm/mach-rockchip/
@@ -1796,6 +1799,7 @@ ARM64 PORT (AARCH64 ARCHITECTURE)
 M:     Catalin Marinas <catalin.marinas@arm.com>
 M:     Will Deacon <will.deacon@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
 S:     Maintained
 F:     arch/arm64/
 F:     Documentation/arm64/
@@ -1881,7 +1885,7 @@ ATHEROS ATH6KL WIRELESS DRIVER
 M:     Kalle Valo <kvalo@qca.qualcomm.com>
 L:     linux-wireless@vger.kernel.org
 W:     http://wireless.kernel.org/en/users/Drivers/ath6kl
-T:     git git://github.com/kvalo/ath.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
 S:     Supported
 F:     drivers/net/wireless/ath/ath6kl/
 
@@ -2133,6 +2137,7 @@ F:        drivers/net/wireless/broadcom/b43legacy/
 BACKLIGHT CLASS/SUBSYSTEM
 M:     Jingoo Han <jingoohan1@gmail.com>
 M:     Lee Jones <lee.jones@linaro.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lee/backlight.git
 S:     Maintained
 F:     drivers/video/backlight/
 F:     include/linux/backlight.h
@@ -2815,6 +2820,7 @@ F:        drivers/input/touchscreen/chipone_icn8318.c
 CHROME HARDWARE PLATFORM SUPPORT
 M:     Olof Johansson <olof@lixom.net>
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/olof/chrome-platform.git
 F:     drivers/platform/chrome/
 
 CISCO VIC ETHERNET NIC DRIVER
@@ -3113,6 +3119,7 @@ M:        Mikael Starvik <starvik@axis.com>
 M:     Jesper Nilsson <jesper.nilsson@axis.com>
 L:     linux-cris-kernel@axis.com
 W:     http://developer.axis.com
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jesper/cris.git
 S:     Maintained
 F:     arch/cris/
 F:     drivers/tty/serial/crisv10.*
@@ -3121,6 +3128,7 @@ CRYPTO API
 M:     Herbert Xu <herbert@gondor.apana.org.au>
 M:     "David S. Miller" <davem@davemloft.net>
 L:     linux-crypto@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
 S:     Maintained
 F:     Documentation/crypto/
@@ -3583,7 +3591,7 @@ M:        Christine Caulfield <ccaulfie@redhat.com>
 M:     David Teigland <teigland@redhat.com>
 L:     cluster-devel@redhat.com
 W:     http://sources.redhat.com/cluster/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm.git
 S:     Supported
 F:     fs/dlm/
 
@@ -3997,6 +4005,7 @@ M:        Tyler Hicks <tyhicks@canonical.com>
 L:     ecryptfs@vger.kernel.org
 W:     http://ecryptfs.org
 W:     https://launchpad.net/ecryptfs
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tyhicks/ecryptfs.git
 S:     Supported
 F:     Documentation/filesystems/ecryptfs.txt
 F:     fs/ecryptfs/
@@ -4275,6 +4284,7 @@ M:        Andreas Dilger <adilger.kernel@dilger.ca>
 L:     linux-ext4@vger.kernel.org
 W:     http://ext4.wiki.kernel.org
 Q:     http://patchwork.ozlabs.org/project/linux-ext4/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4.git
 S:     Maintained
 F:     Documentation/filesystems/ext4.txt
 F:     fs/ext4/
@@ -4957,6 +4967,7 @@ F:        include/linux/hw_random.h
 HARDWARE SPINLOCK CORE
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git
 F:     Documentation/hwspinlock.txt
 F:     drivers/hwspinlock/hwspinlock_*
 F:     include/linux/hwspinlock.h
@@ -5495,6 +5506,7 @@ M:        Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
 L:     linux-ima-devel@lists.sourceforge.net
 L:     linux-ima-user@lists.sourceforge.net
 L:     linux-security-module@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
 S:     Supported
 F:     security/integrity/ima/
 
@@ -5750,11 +5762,11 @@ F:      include/linux/mic_bus.h
 F:     include/linux/scif.h
 F:     include/uapi/linux/mic_common.h
 F:     include/uapi/linux/mic_ioctl.h
-F      include/uapi/linux/scif_ioctl.h
+F:     include/uapi/linux/scif_ioctl.h
 F:     drivers/misc/mic/
 F:     drivers/dma/mic_x100_dma.c
 F:     drivers/dma/mic_x100_dma.h
-F      Documentation/mic/
+F:     Documentation/mic/
 
 INTEL PMC/P-Unit IPC DRIVER
 M:     Zha Qipeng<qipeng.zha@intel.com>
@@ -5835,6 +5847,8 @@ M:        Julian Anastasov <ja@ssi.bg>
 L:     netdev@vger.kernel.org
 L:     lvs-devel@vger.kernel.org
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/horms/ipvs-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/horms/ipvs.git
 F:     Documentation/networking/ipvs-sysctl.txt
 F:     include/net/ip_vs.h
 F:     include/uapi/linux/ip_vs.h
@@ -6118,6 +6132,7 @@ M:        "J. Bruce Fields" <bfields@fieldses.org>
 M:     Jeff Layton <jlayton@poochiereds.net>
 L:     linux-nfs@vger.kernel.org
 W:     http://nfs.sourceforge.net/
+T:     git git://linux-nfs.org/~bfields/linux.git
 S:     Supported
 F:     fs/nfsd/
 F:     include/uapi/linux/nfsd/
@@ -6174,6 +6189,7 @@ M:        Christian Borntraeger <borntraeger@de.ibm.com>
 M:     Cornelia Huck <cornelia.huck@de.ibm.com>
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
 S:     Supported
 F:     Documentation/s390/kvm.txt
 F:     arch/s390/include/asm/kvm*
@@ -6247,6 +6263,7 @@ KGDB / KDB /debug_core
 M:     Jason Wessel <jason.wessel@windriver.com>
 W:     http://kgdb.wiki.kernel.org/
 L:     kgdb-bugreport@lists.sourceforge.net
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
 S:     Maintained
 F:     Documentation/DocBook/kgdb.tmpl
 F:     drivers/misc/kgdbts.c
@@ -6418,6 +6435,7 @@ LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
 M:     Dan Williams <dan.j.williams@intel.com>
 L:     linux-nvdimm@lists.01.org
 Q:     https://patchwork.kernel.org/project/linux-nvdimm/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
 S:     Supported
 F:     drivers/nvdimm/*
 F:     include/linux/nd.h
@@ -7087,6 +7105,7 @@ F:        Documentation/hwmon/menf21bmc
 METAG ARCHITECTURE
 M:     James Hogan <james.hogan@imgtec.com>
 L:     linux-metag@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git
 S:     Odd Fixes
 F:     arch/metag/
 F:     Documentation/metag/
@@ -7568,7 +7587,8 @@ NETWORKING DRIVERS (WIRELESS)
 M:     Kalle Valo <kvalo@codeaurora.org>
 L:     linux-wireless@vger.kernel.org
 Q:     http://patchwork.kernel.org/project/linux-wireless/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
 S:     Maintained
 F:     drivers/net/wireless/
 
@@ -7974,6 +7994,7 @@ M:        Mark Rutland <mark.rutland@arm.com>
 M:     Ian Campbell <ijc+devicetree@hellion.org.uk>
 M:     Kumar Gala <galak@codeaurora.org>
 L:     devicetree@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
 S:     Maintained
 F:     Documentation/devicetree/
 F:     arch/*/boot/dts/
@@ -8364,7 +8385,7 @@ PCMCIA SUBSYSTEM
 P:     Linux PCMCIA Team
 L:     linux-pcmcia@lists.infradead.org
 W:     http://lists.infradead.org/mailman/listinfo/linux-pcmcia
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia.git
 S:     Maintained
 F:     Documentation/pcmcia/
 F:     drivers/pcmcia/
@@ -8686,7 +8707,7 @@ M:        Colin Cross <ccross@android.com>
 M:     Kees Cook <keescook@chromium.org>
 M:     Tony Luck <tony.luck@intel.com>
 S:     Maintained
-T:     git git://git.infradead.org/users/cbou/linux-pstore.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
 F:     fs/pstore/
 F:     include/linux/pstore*
 F:     drivers/firmware/efi/efi-pstore.c
@@ -8895,13 +8916,14 @@ QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
 M:     Kalle Valo <kvalo@qca.qualcomm.com>
 L:     ath10k@lists.infradead.org
 W:     http://wireless.kernel.org/en/users/Drivers/ath10k
-T:     git git://github.com/kvalo/ath.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
 S:     Supported
 F:     drivers/net/wireless/ath/ath10k/
 
 QUALCOMM HEXAGON ARCHITECTURE
 M:     Richard Kuo <rkuo@codeaurora.org>
 L:     linux-hexagon@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.git
 S:     Supported
 F:     arch/hexagon/
 
@@ -9100,6 +9122,7 @@ F:        drivers/phy/phy-rcar-gen3-usb2.c
 
 RESET CONTROLLER FRAMEWORK
 M:     Philipp Zabel <p.zabel@pengutronix.de>
+T:     git git://git.pengutronix.de/git/pza/linux
 S:     Maintained
 F:     drivers/reset/
 F:     Documentation/devicetree/bindings/reset/
@@ -9247,6 +9270,7 @@ M:        Martin Schwidefsky <schwidefsky@de.ibm.com>
 M:     Heiko Carstens <heiko.carstens@de.ibm.com>
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
 S:     Supported
 F:     arch/s390/
 F:     drivers/s390/
@@ -9439,7 +9463,7 @@ M:        Lukasz Majewski <l.majewski@samsung.com>
 L:     linux-pm@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Supported
-T:     https://github.com/lmajewski/linux-samsung-thermal.git
+T:     git https://github.com/lmajewski/linux-samsung-thermal.git
 F:     drivers/thermal/samsung/
 
 SAMSUNG USB2 PHY DRIVER
@@ -10092,6 +10116,7 @@ F:      drivers/media/pci/solo6x10/
 
 SOFTWARE RAID (Multiple Disks) SUPPORT
 L:     linux-raid@vger.kernel.org
+T:     git git://neil.brown.name/md
 S:     Supported
 F:     drivers/md/
 F:     include/linux/raid/
@@ -10263,6 +10288,7 @@ SQUASHFS FILE SYSTEM
 M:     Phillip Lougher <phillip@squashfs.org.uk>
 L:     squashfs-devel@lists.sourceforge.net (subscribers-only)
 W:     http://squashfs.org.uk
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pkl/squashfs-next.git
 S:     Maintained
 F:     Documentation/filesystems/squashfs.txt
 F:     fs/squashfs/
@@ -10459,6 +10485,7 @@ F:      arch/x86/boot/video*
 SWIOTLB SUBSYSTEM
 M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:     linux-kernel@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
 S:     Supported
 F:     lib/swiotlb.c
 F:     arch/*/kernel/pci-swiotlb.c
@@ -10722,6 +10749,7 @@ TENSILICA XTENSA PORT (xtensa)
 M:     Chris Zankel <chris@zankel.net>
 M:     Max Filippov <jcmvbkbc@gmail.com>
 L:     linux-xtensa@linux-xtensa.org
+T:     git git://github.com/czankel/xtensa-linux.git
 S:     Maintained
 F:     arch/xtensa/
 F:     drivers/irqchip/irq-xtensa-*
@@ -11004,7 +11032,7 @@ R:      Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
 W:     http://tpmdd.sourceforge.net
 L:     tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
 Q:     git git://github.com/PeterHuewe/linux-tpmdd.git
-T:     https://github.com/PeterHuewe/linux-tpmdd
+T:     git https://github.com/PeterHuewe/linux-tpmdd
 S:     Maintained
 F:     drivers/char/tpm/
 
@@ -11461,6 +11489,7 @@ M:      Richard Weinberger <richard@nod.at>
 L:     user-mode-linux-devel@lists.sourceforge.net
 L:     user-mode-linux-user@lists.sourceforge.net
 W:     http://user-mode-linux.sourceforge.net
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git
 S:     Maintained
 F:     Documentation/virtual/uml/
 F:     arch/um/
@@ -11507,6 +11536,7 @@ F:      fs/fat/
 VFIO DRIVER
 M:     Alex Williamson <alex.williamson@redhat.com>
 L:     kvm@vger.kernel.org
+T:     git git://github.com/awilliam/linux-vfio.git
 S:     Maintained
 F:     Documentation/vfio.txt
 F:     drivers/vfio/
@@ -11576,6 +11606,7 @@ M:      "Michael S. Tsirkin" <mst@redhat.com>
 L:     kvm@vger.kernel.org
 L:     virtualization@lists.linux-foundation.org
 L:     netdev@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git
 S:     Maintained
 F:     drivers/vhost/
 F:     include/uapi/linux/vhost.h
@@ -11992,7 +12023,7 @@ M:      Dave Chinner <david@fromorbit.com>
 M:     xfs@oss.sgi.com
 L:     xfs@oss.sgi.com
 W:     http://oss.sgi.com/projects/xfs
-T:     git git://oss.sgi.com/xfs/xfs.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git
 S:     Supported
 F:     Documentation/filesystems/xfs.txt
 F:     fs/xfs/
index 7f4ac1ee4a2b1359276b7f49b87008a5b4b6b413..abfb3e8eb0b14e680290f3800bd331e870cbb8c6 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -411,7 +411,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
-export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN
+export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN CFLAGS_UBSAN
 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -784,6 +784,7 @@ endif
 
 include scripts/Makefile.kasan
 include scripts/Makefile.extrawarn
+include scripts/Makefile.ubsan
 
 # Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
 # last assignments
index ba1b626bca002e751d60fc4e29784d7632f1a1a9..f6b649d88ec82ec44913d6a81ee0136498d88b92 100644 (file)
@@ -205,9 +205,6 @@ config HAVE_NMI_WATCHDOG
 config HAVE_ARCH_TRACEHOOK
        bool
 
-config HAVE_DMA_ATTRS
-       bool
-
 config HAVE_DMA_CONTIGUOUS
        bool
 
@@ -632,4 +629,7 @@ config OLD_SIGACTION
 config COMPAT_OLD_SIGACTION
        bool
 
+config ARCH_NO_COHERENT_DMA_MMAP
+       bool
+
 source "kernel/gcov/Kconfig"
index f515a4dbf7a0621e902fc488ef0108ac34d60b4c..9d8a85801ed1f3ae482bc94ff08bcc423c89f9a8 100644 (file)
@@ -9,7 +9,6 @@ config ALPHA
        select HAVE_OPROFILE
        select HAVE_PCSPKR_PLATFORM
        select HAVE_PERF_EVENTS
-       select HAVE_DMA_ATTRS
        select VIRT_TO_BUS
        select GENERIC_IRQ_PROBE
        select AUTO_IRQ_AFFINITY if SMP
index 72a8ca7796d91a2d2a92d696ce507650678c3998..3c3451f58ff4e32ba283f8f208a713427f5a1d60 100644 (file)
@@ -10,8 +10,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
        return dma_ops;
 }
 
-#include <asm-generic/dma-mapping-common.h>
-
 #define dma_cache_sync(dev, va, size, dir)               ((void)0)
 
 #endif /* _ALPHA_DMA_MAPPING_H */
index ab336c06153e63c3aba2adc36a622e8a5d4c3423..fec1947b8dbcdbc444f65254df5f3d4eeec717be 100644 (file)
@@ -47,7 +47,6 @@
 #define MADV_WILLNEED  3               /* will need these pages */
 #define        MADV_SPACEAVAIL 5               /* ensure resources are available */
 #define MADV_DONTNEED  6               /* don't need these pages */
-#define MADV_FREE      7               /* free pages only if memory pressure */
 
 /* common/generic parameters */
 #define MADV_FREE      8               /* free pages only if memory pressure */
index 2d28ba939d8edc71c693442b4464076f840ea5ff..660205414f1da1c199461ccc2c2eee46ecacd3b9 100644 (file)
 #ifndef ASM_ARC_DMA_MAPPING_H
 #define ASM_ARC_DMA_MAPPING_H
 
-#include <asm-generic/dma-coherent.h>
-#include <asm/cacheflush.h>
+extern struct dma_map_ops arc_dma_ops;
 
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
-                           dma_addr_t *dma_handle, gfp_t gfp);
-
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
-                         dma_addr_t dma_handle);
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, gfp_t gfp);
-
-void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
-                      dma_addr_t dma_handle);
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
-/*
- * streaming DMA Mapping API...
- * CPU accesses page via normal paddr, thus needs to explicitly made
- * consistent before each use
- */
-
-static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
-                                          enum dma_data_direction dir)
-{
-       switch (dir) {
-       case DMA_FROM_DEVICE:
-               dma_cache_inv(paddr, size);
-               break;
-       case DMA_TO_DEVICE:
-               dma_cache_wback(paddr, size);
-               break;
-       case DMA_BIDIRECTIONAL:
-               dma_cache_wback_inv(paddr, size);
-               break;
-       default:
-               pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
-       }
-}
-
-void __arc_dma_cache_sync(unsigned long paddr, size_t size,
-                         enum dma_data_direction dir);
-
-#define _dma_cache_sync(addr, sz, dir)                 \
-do {                                                   \
-       if (__builtin_constant_p(dir))                  \
-               __inline_dma_cache_sync(addr, sz, dir); \
-       else                                            \
-               __arc_dma_cache_sync(addr, sz, dir);    \
-}                                                      \
-while (0);
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
-              enum dma_data_direction dir)
-{
-       _dma_cache_sync((unsigned long)cpu_addr, size, dir);
-       return (dma_addr_t)cpu_addr;
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
-                size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
-            unsigned long offset, size_t size,
-            enum dma_data_direction dir)
-{
-       unsigned long paddr = page_to_phys(page) + offset;
-       return dma_map_single(dev, (void *)paddr, size, dir);
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
-              size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg,
-          int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *s;
-       int i;
-
-       for_each_sg(sg, s, nents, i)
-               s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
-                                              s->length, dir);
-
-       return nents;
-}
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-            int nents, enum dma_data_direction dir)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-       struct scatterlist *s;
-       int i;
-
-       for_each_sg(sg, s, nents, i)
-               dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                       size_t size, enum dma_data_direction dir)
-{
-       _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
-                          size_t size, enum dma_data_direction dir)
-{
-       _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                             unsigned long offset, size_t size,
-                             enum dma_data_direction direction)
-{
-       _dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-                                unsigned long offset, size_t size,
-                                enum dma_data_direction direction)
-{
-       _dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
-                   enum dma_data_direction dir)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nelems, i)
-               _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
-                      int nelems, enum dma_data_direction dir)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nelems, i)
-               _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
-}
-
-static inline int dma_supported(struct device *dev, u64 dma_mask)
-{
-       /* Support 32 bit DMA mask exclusively */
-       return dma_mask == DMA_BIT_MASK(32);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       return 0;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
-       if (!dev->dma_mask || !dma_supported(dev, dma_mask))
-               return -EIO;
-
-       *dev->dma_mask = dma_mask;
-
-       return 0;
+       return &arc_dma_ops;
 }
 
 #endif
index 29a46bb198ccaf834398212b25c3a1c670ba2763..01eaf88bf821398fecb19dae6780e6702242ff64 100644 (file)
  */
 
 #include <linux/dma-mapping.h>
-#include <linux/dma-debug.h>
-#include <linux/export.h>
 #include <asm/cache.h>
 #include <asm/cacheflush.h>
 
-/*
- * Helpers for Coherent DMA API.
- */
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
-                           dma_addr_t *dma_handle, gfp_t gfp)
+
+static void *arc_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
 {
-       void *paddr;
+       void *paddr, *kvaddr;
 
        /* This is linear addr (0x8000_0000 based) */
        paddr = alloc_pages_exact(size, gfp);
@@ -38,22 +34,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
        /* This is bus address, platform dependent */
        *dma_handle = (dma_addr_t)paddr;
 
-       return paddr;
-}
-EXPORT_SYMBOL(dma_alloc_noncoherent);
-
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
-                         dma_addr_t dma_handle)
-{
-       free_pages_exact((void *)dma_handle, size);
-}
-EXPORT_SYMBOL(dma_free_noncoherent);
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, gfp_t gfp)
-{
-       void *paddr, *kvaddr;
-
        /*
         * IOC relies on all data (even coherent DMA data) being in cache
         * Thus allocate normal cached memory
@@ -65,22 +45,15 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
         *   -For coherent data, Read/Write to buffers terminate early in cache
         *   (vs. always going to memory - thus are faster)
         */
-       if (is_isa_arcv2() && ioc_exists)
-               return dma_alloc_noncoherent(dev, size, dma_handle, gfp);
-
-       /* This is linear addr (0x8000_0000 based) */
-       paddr = alloc_pages_exact(size, gfp);
-       if (!paddr)
-               return NULL;
+       if ((is_isa_arcv2() && ioc_exists) ||
+           dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+               return paddr;
 
        /* This is kernel Virtual address (0x7000_0000 based) */
        kvaddr = ioremap_nocache((unsigned long)paddr, size);
        if (kvaddr == NULL)
                return NULL;
 
-       /* This is bus address, platform dependent */
-       *dma_handle = (dma_addr_t)paddr;
-
        /*
         * Evict any existing L1 and/or L2 lines for the backing page
         * in case it was used earlier as a normal "cached" page.
@@ -95,26 +68,111 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 
        return kvaddr;
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
-                      dma_addr_t dma_handle)
+static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
-       if (is_isa_arcv2() && ioc_exists)
-               return dma_free_noncoherent(dev, size, kvaddr, dma_handle);
-
-       iounmap((void __force __iomem *)kvaddr);
+       if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) &&
+           !(is_isa_arcv2() && ioc_exists))
+               iounmap((void __force __iomem *)vaddr);
 
        free_pages_exact((void *)dma_handle, size);
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
 /*
- * Helper for streaming DMA...
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
  */
-void __arc_dma_cache_sync(unsigned long paddr, size_t size,
-                         enum dma_data_direction dir)
+static void _dma_cache_sync(unsigned long paddr, size_t size,
+               enum dma_data_direction dir)
+{
+       switch (dir) {
+       case DMA_FROM_DEVICE:
+               dma_cache_inv(paddr, size);
+               break;
+       case DMA_TO_DEVICE:
+               dma_cache_wback(paddr, size);
+               break;
+       case DMA_BIDIRECTIONAL:
+               dma_cache_wback_inv(paddr, size);
+               break;
+       default:
+               pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+       }
+}
+
+static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       unsigned long paddr = page_to_phys(page) + offset;
+       _dma_cache_sync(paddr, size, dir);
+       return (dma_addr_t)paddr;
+}
+
+static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
+          int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i)
+               s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+                                              s->length, dir);
+
+       return nents;
+}
+
+static void arc_dma_sync_single_for_cpu(struct device *dev,
+               dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+{
+       _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
+}
+
+static void arc_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
 {
-       __inline_dma_cache_sync(paddr, size, dir);
+       _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
 }
-EXPORT_SYMBOL(__arc_dma_cache_sync);
+
+static void arc_dma_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sglist, int nelems,
+               enum dma_data_direction dir)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nelems, i)
+               _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static void arc_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sglist, int nelems,
+               enum dma_data_direction dir)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nelems, i)
+               _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static int arc_dma_supported(struct device *dev, u64 dma_mask)
+{
+       /* Support 32 bit DMA mask exclusively */
+       return dma_mask == DMA_BIT_MASK(32);
+}
+
+struct dma_map_ops arc_dma_ops = {
+       .alloc                  = arc_dma_alloc,
+       .free                   = arc_dma_free,
+       .map_page               = arc_dma_map_page,
+       .map_sg                 = arc_dma_map_sg,
+       .sync_single_for_device = arc_dma_sync_single_for_device,
+       .sync_single_for_cpu    = arc_dma_sync_single_for_cpu,
+       .sync_sg_for_cpu        = arc_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = arc_dma_sync_sg_for_device,
+       .dma_supported          = arc_dma_supported,
+};
+EXPORT_SYMBOL(arc_dma_ops);
index 37c7951ca4f5fe1efd09aaa11ba7c027297ae075..4f799e567fc870502ae147f3c26c5bb402315a9d 100644 (file)
@@ -47,7 +47,6 @@ config ARM
        select HAVE_C_RECORDMCOUNT
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_API_DEBUG
-       select HAVE_DMA_ATTRS
        select HAVE_DMA_CONTIGUOUS if MMU
        select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
        select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
index ccb3aa64640dc350da9de1319d65b2a46c44e2ee..6ad1ceda62a52cebe1063828ef8c131ebc9489c6 100644 (file)
@@ -41,13 +41,6 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
 #define HAVE_ARCH_DMA_SUPPORTED 1
 extern int dma_supported(struct device *dev, u64 mask);
 
-/*
- * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent
- * implementations, we don't provide a dma_cache_sync function so drivers using
- * this API are highlighted with build warnings.
- */
-#include <asm-generic/dma-mapping-common.h>
-
 #ifdef __arch_page_to_dma
 #error Please update to __arch_pfn_to_dma
 #endif
index 6be3fa2310ee839e7af1b0cb57aed6ac22790242..8cc62289a63ed9958c9623acaa5462048cc9287b 100644 (file)
@@ -64,7 +64,6 @@ config ARM64
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_API_DEBUG
-       select HAVE_DMA_ATTRS
        select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
index 61e08f360e31da1f92a38881cd2cf924d2f03c82..ba437f090a74702dd1e697db3238f6619023f83c 100644 (file)
@@ -64,8 +64,6 @@ static inline bool is_device_dma_coherent(struct device *dev)
        return dev->archdata.dma_coherent;
 }
 
-#include <asm-generic/dma-mapping-common.h>
-
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
        return (dma_addr_t)paddr;
index ae7ac9205d20cb80d6d6f5ebecf21537fdc51cca..1115f2a645d1805de87396e49fc27de9b7700136 100644 (file)
 #ifndef __ASM_AVR32_DMA_MAPPING_H
 #define __ASM_AVR32_DMA_MAPPING_H
 
-#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/scatterlist.h>
-#include <asm/processor.h>
-#include <asm/cacheflush.h>
-#include <asm/io.h>
-
 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
        int direction);
 
-/*
- * Return whether the given device DMA address mask can be supported
- * properly.  For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask
- * to this function.
- */
-static inline int dma_supported(struct device *dev, u64 mask)
-{
-       /* Fix when needed. I really don't know of any limitations */
-       return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
-       if (!dev->dma_mask || !dma_supported(dev, dma_mask))
-               return -EIO;
-
-       *dev->dma_mask = dma_mask;
-       return 0;
-}
+extern struct dma_map_ops avr32_dma_ops;
 
-/*
- * dma_map_single can't fail as it is implemented now.
- */
-static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-       return 0;
+       return &avr32_dma_ops;
 }
 
-/**
- * dma_alloc_coherent - allocate consistent memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, unbuffered memory for a device for
- * performing DMA.  This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
-                               dma_addr_t *handle, gfp_t gfp);
-
-/**
- * dma_free_coherent - free memory allocated by dma_alloc_coherent
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_coherent
- * @cpu_addr: CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_coherent().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void dma_free_coherent(struct device *dev, size_t size,
-                             void *cpu_addr, dma_addr_t handle);
-
-/**
- * dma_alloc_writecombine - allocate write-combining memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, buffered memory for a device for
- * performing DMA.  This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_writecombine(struct device *dev, size_t size,
-                                   dma_addr_t *handle, gfp_t gfp);
-
-/**
- * dma_free_coherent - free memory allocated by dma_alloc_writecombine
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_writecombine
- * @cpu_addr: CPU-view address returned from dma_alloc_writecombine
- * @handle: device-view address returned from dma_alloc_writecombine
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_writecombine().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void dma_free_writecombine(struct device *dev, size_t size,
-                                 void *cpu_addr, dma_addr_t handle);
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_single() or dma_sync_single().
- */
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
-              enum dma_data_direction direction)
-{
-       dma_cache_sync(dev, cpu_addr, size, direction);
-       return virt_to_bus(cpu_addr);
-}
-
-/**
- * dma_unmap_single - unmap a single buffer previously mapped
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Unmap a single streaming mode DMA translation.  The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                enum dma_data_direction direction)
-{
-
-}
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_page() or dma_sync_single().
- */
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
-            unsigned long offset, size_t size,
-            enum dma_data_direction direction)
-{
-       return dma_map_single(dev, page_address(page) + offset,
-                             size, direction);
-}
-
-/**
- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Unmap a single streaming mode DMA translation.  The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-              enum dma_data_direction direction)
-{
-       dma_unmap_single(dev, dma_address, size, direction);
-}
-
-/**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of buffers described by scatterlist in streaming
- * mode for DMA.  This is the scatter-gather version of the
- * above pci_map_single interface.  Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length.  They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- *       DMA address/length pairs than there are SG table elements.
- *       (for example via virtual mapping capabilities)
- *       The routine returns the number of addr/length pairs actually
- *       used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
-          enum dma_data_direction direction)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nents, i) {
-               char *virt;
-
-               sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
-               virt = sg_virt(sg);
-               dma_cache_sync(dev, virt, sg->length, direction);
-       }
-
-       return nents;
-}
-
-/**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Unmap a set of streaming mode DMA translations.
- * Again, CPU read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-            enum dma_data_direction direction)
-{
-
-}
-
-/**
- * dma_sync_single_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Make physical memory consistent for a single streaming mode DMA
- * translation after a transfer.
- *
- * If you perform a dma_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the DMA mapping,
- * you must call this function before doing so.  At the next point you
- * give the DMA address back to the card, you must first perform a
- * dma_sync_single_for_device, and then the device again owns the
- * buffer.
- */
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                       size_t size, enum dma_data_direction direction)
-{
-       /*
-        * No need to do anything since the CPU isn't supposed to
-        * touch this memory after we flushed it at mapping- or
-        * sync-for-device time.
-        */
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
-                          size_t size, enum dma_data_direction direction)
-{
-       dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                             unsigned long offset, size_t size,
-                             enum dma_data_direction direction)
-{
-       /* just sync everything, that's all the pci API can do */
-       dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-                                unsigned long offset, size_t size,
-                                enum dma_data_direction direction)
-{
-       /* just sync everything, that's all the pci API can do */
-       dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
-}
-
-/**
- * dma_sync_sg_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as dma_sync_single_for_* but for a scatter-gather list,
- * same rules and usage.
- */
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-                   int nents, enum dma_data_direction direction)
-{
-       /*
-        * No need to do anything since the CPU isn't supposed to
-        * touch this memory after we flushed it at mapping- or
-        * sync-for-device time.
-        */
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
-                      int nents, enum dma_data_direction direction)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nents, i)
-               dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
-}
-
-/* Now for the API extensions over the pci_ one */
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
 #endif /* __ASM_AVR32_DMA_MAPPING_H */
index 50cdb5b10f0fc7b8bdfae25ffb9b32d5655463de..92cf1fb2b3e68d83aafcbf53f859bf88448cfcc9 100644 (file)
@@ -9,9 +9,14 @@
 #include <linux/dma-mapping.h>
 #include <linux/gfp.h>
 #include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
 
-#include <asm/addrspace.h>
+#include <asm/processor.h>
 #include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/addrspace.h>
 
 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
 {
@@ -93,60 +98,100 @@ static void __dma_free(struct device *dev, size_t size,
                __free_page(page++);
 }
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *handle, gfp_t gfp)
+static void *avr32_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
        struct page *page;
-       void *ret = NULL;
+       dma_addr_t phys;
 
        page = __dma_alloc(dev, size, handle, gfp);
-       if (page)
-               ret = phys_to_uncached(page_to_phys(page));
+       if (!page)
+               return NULL;
+       phys = page_to_phys(page);
 
-       return ret;
+       if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+               /* Now, map the page into P3 with write-combining turned on */
+               *handle = phys;
+               return __ioremap(phys, size, _PAGE_BUFFER);
+       } else {
+               return phys_to_uncached(phys);
+       }
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void dma_free_coherent(struct device *dev, size_t size,
-                      void *cpu_addr, dma_addr_t handle)
+static void avr32_dma_free(struct device *dev, size_t size,
+               void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
 {
-       void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
        struct page *page;
 
-       pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
-                cpu_addr, (unsigned long)handle, (unsigned)size);
-       BUG_ON(!virt_addr_valid(addr));
-       page = virt_to_page(addr);
+       if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+               iounmap(cpu_addr);
+
+               page = phys_to_page(handle);
+       } else {
+               void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
+
+               pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n",
+                        cpu_addr, (unsigned long)handle, (unsigned)size);
+
+               BUG_ON(!virt_addr_valid(addr));
+               page = virt_to_page(addr);
+       }
+
        __dma_free(dev, size, page, handle);
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
-void *dma_alloc_writecombine(struct device *dev, size_t size,
-                            dma_addr_t *handle, gfp_t gfp)
+static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size,
+               enum dma_data_direction direction, struct dma_attrs *attrs)
 {
-       struct page *page;
-       dma_addr_t phys;
+       void *cpu_addr = page_address(page) + offset;
 
-       page = __dma_alloc(dev, size, handle, gfp);
-       if (!page)
-               return NULL;
+       dma_cache_sync(dev, cpu_addr, size, direction);
+       return virt_to_bus(cpu_addr);
+}
 
-       phys = page_to_phys(page);
-       *handle = phys;
+static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nents, i) {
+               char *virt;
 
-       /* Now, map the page into P3 with write-combining turned on */
-       return __ioremap(phys, size, _PAGE_BUFFER);
+               sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
+               virt = sg_virt(sg);
+               dma_cache_sync(dev, virt, sg->length, direction);
+       }
+
+       return nents;
 }
-EXPORT_SYMBOL(dma_alloc_writecombine);
 
-void dma_free_writecombine(struct device *dev, size_t size,
-                          void *cpu_addr, dma_addr_t handle)
+static void avr32_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
 {
-       struct page *page;
+       dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
+}
 
-       iounmap(cpu_addr);
+static void avr32_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sglist, int nents,
+               enum dma_data_direction direction)
+{
+       int i;
+       struct scatterlist *sg;
 
-       page = phys_to_page(handle);
-       __dma_free(dev, size, page, handle);
+       for_each_sg(sglist, sg, nents, i)
+               dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
 }
-EXPORT_SYMBOL(dma_free_writecombine);
+
+struct dma_map_ops avr32_dma_ops = {
+       .alloc                  = avr32_dma_alloc,
+       .free                   = avr32_dma_free,
+       .map_page               = avr32_dma_map_page,
+       .map_sg                 = avr32_dma_map_sg,
+       .sync_single_for_device = avr32_dma_sync_single_for_device,
+       .sync_sg_for_device     = avr32_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(avr32_dma_ops);
index 054d9ec57d9dc1cea3eba24655a8e24349c06cef..3490570aaa8284daffc6abb6d73d60d3bdcc92b4 100644 (file)
@@ -8,36 +8,6 @@
 #define _BLACKFIN_DMA_MAPPING_H
 
 #include <asm/cacheflush.h>
-struct scatterlist;
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, gfp_t gfp);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-                      dma_addr_t dma_handle);
-
-/*
- * Now for the API extensions over the pci_ one
- */
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_supported(d, m)         (1)
-
-static inline int
-dma_set_mask(struct device *dev, u64 dma_mask)
-{
-       if (!dev->dma_mask || !dma_supported(dev, dma_mask))
-               return -EIO;
-
-       *dev->dma_mask = dma_mask;
-
-       return 0;
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       return 0;
-}
 
 extern void
 __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
@@ -66,102 +36,11 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
                __dma_sync(addr, size, dir);
 }
 
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
-              enum dma_data_direction dir)
-{
-       _dma_sync((dma_addr_t)ptr, size, dir);
-       return (dma_addr_t) ptr;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
-            unsigned long offset, size_t size,
-            enum dma_data_direction dir)
-{
-       return dma_map_single(dev, page_address(page) + offset, size, dir);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                enum dma_data_direction dir)
-{
-       BUG_ON(!valid_dma_direction(dir));
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
-              enum dma_data_direction dir)
-{
-       dma_unmap_single(dev, dma_addr, size, dir);
-}
+extern struct dma_map_ops bfin_dma_ops;
 
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-                     enum dma_data_direction dir);
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-            int nhwentries, enum dma_data_direction dir)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-       BUG_ON(!valid_dma_direction(dir));
+       return &bfin_dma_ops;
 }
 
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
-                             unsigned long offset, size_t size,
-                             enum dma_data_direction dir)
-{
-       BUG_ON(!valid_dma_direction(dir));
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
-                                unsigned long offset, size_t size,
-                                enum dma_data_direction dir)
-{
-       _dma_sync(handle + offset, size, dir);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
-                       enum dma_data_direction dir)
-{
-       dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
-                          enum dma_data_direction dir)
-{
-       dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
-                   enum dma_data_direction dir)
-{
-       BUG_ON(!valid_dma_direction(dir));
-}
-
-extern void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-                      int nents, enum dma_data_direction dir);
-
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-              enum dma_data_direction dir)
-{
-       _dma_sync((dma_addr_t)vaddr, size, dir);
-}
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
 #endif                         /* _BLACKFIN_DMA_MAPPING_H */
index df437e52d9df20b9f3369d12a2939a9277687c0d..771afe6e4264460b7457bb7961120fe15f959ed0 100644 (file)
@@ -78,8 +78,8 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
        spin_unlock_irqrestore(&dma_page_lock, flags);
 }
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, gfp_t gfp)
+static void *bfin_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
 {
        void *ret;
 
@@ -92,15 +92,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 
        return ret;
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void
-dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-                 dma_addr_t dma_handle)
+static void bfin_dma_free(struct device *dev, size_t size, void *vaddr,
+                 dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
        __free_dma_pages((unsigned long)vaddr, get_pages(size));
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
 /*
  * Streaming DMA mappings
@@ -112,9 +109,9 @@ void __dma_sync(dma_addr_t addr, size_t size,
 }
 EXPORT_SYMBOL(__dma_sync);
 
-int
-dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
-          enum dma_data_direction direction)
+static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
        struct scatterlist *sg;
        int i;
@@ -126,10 +123,10 @@ dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
 
        return nents;
 }
-EXPORT_SYMBOL(dma_map_sg);
 
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list,
-                           int nelems, enum dma_data_direction direction)
+static void bfin_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sg_list, int nelems,
+               enum dma_data_direction direction)
 {
        struct scatterlist *sg;
        int i;
@@ -139,4 +136,31 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list,
                __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
        }
 }
-EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);
+
+       _dma_sync(handle, size, dir);
+       return handle;
+}
+
+static inline void bfin_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       _dma_sync(handle, size, dir);
+}
+
+struct dma_map_ops bfin_dma_ops = {
+       .alloc                  = bfin_dma_alloc,
+       .free                   = bfin_dma_free,
+
+       .map_page               = bfin_dma_map_page,
+       .map_sg                 = bfin_dma_map_sg,
+
+       .sync_single_for_device = bfin_dma_sync_single_for_device,
+       .sync_sg_for_device     = bfin_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(bfin_dma_ops);
index 77ea09b8bce1bb775c725f71211de3d3c9224229..79049d432d3c7808f6914376a715e34ada4c14af 100644 (file)
@@ -17,6 +17,7 @@ config C6X
        select OF_EARLY_FLATTREE
        select GENERIC_CLOCKEVENTS
        select MODULES_USE_ELF_RELA
+       select ARCH_NO_COHERENT_DMA_MMAP
 
 config MMU
        def_bool n
index bbd7774e4d4e312b57a626c0b685cb447c82b89d..6b5cd7b0cf32f3eccd352ed1962ecaa715035b2a 100644 (file)
 #ifndef _ASM_C6X_DMA_MAPPING_H
 #define _ASM_C6X_DMA_MAPPING_H
 
-#include <linux/dma-debug.h>
-#include <asm-generic/dma-coherent.h>
-
-#define dma_supported(d, m)    1
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
-                                                   dma_addr_t addr,
-                                                   unsigned long offset,
-                                                   size_t size,
-                                                   enum dma_data_direction dir)
-{
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
-       if (!dev->dma_mask || !dma_supported(dev, dma_mask))
-               return -EIO;
-
-       *dev->dma_mask = dma_mask;
-
-       return 0;
-}
-
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       debug_dma_mapping_error(dev, dma_addr);
-       return dma_addr == ~0;
-}
-
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
-                                size_t size, enum dma_data_direction dir);
+#define DMA_ERROR_CODE ~0
 
-extern void dma_unmap_single(struct device *dev, dma_addr_t handle,
-                            size_t size, enum dma_data_direction dir);
+extern struct dma_map_ops c6x_dma_ops;
 
-extern int dma_map_sg(struct device *dev, struct scatterlist *sglist,
-                     int nents, enum dma_data_direction direction);
-
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
-                        int nents, enum dma_data_direction direction);
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
-                                     unsigned long offset, size_t size,
-                                     enum dma_data_direction dir)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-       dma_addr_t handle;
-
-       handle = dma_map_single(dev, page_address(page) + offset, size, dir);
-
-       debug_dma_map_page(dev, page, offset, size, dir, handle, false);
-
-       return handle;
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir)
-{
-       dma_unmap_single(dev, handle, size, dir);
-
-       debug_dma_unmap_page(dev, handle, size, dir, false);
+       return &c6x_dma_ops;
 }
 
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
-                                   size_t size, enum dma_data_direction dir);
-
-extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
-                                      size_t size,
-                                      enum dma_data_direction dir);
-
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-                               int nents, enum dma_data_direction dir);
-
-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-                                  int nents, enum dma_data_direction dir);
-
 extern void coherent_mem_init(u32 start, u32 size);
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
-#define dma_free_noncoherent(d, s, v, h)  dma_free_coherent((d), (s), (v), (h))
-
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
-                                   struct vm_area_struct *vma, void *cpu_addr,
-                                   dma_addr_t dma_addr, size_t size)
-{
-       return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size)
-{
-       return -EINVAL;
-}
+void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+               gfp_t gfp, struct dma_attrs *attrs);
+void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs);
 
 #endif /* _ASM_C6X_DMA_MAPPING_H */
index ab7b12de144d22cdf5eb6041715e062f7ed7d32d..8a80f3a250c046a464c9af30bdb6e7df51fc0e87 100644 (file)
@@ -36,110 +36,101 @@ static void c6x_dma_sync(dma_addr_t handle, size_t size,
        }
 }
 
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
-                         enum dma_data_direction dir)
+static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
 {
-       dma_addr_t addr = virt_to_phys(ptr);
+       dma_addr_t handle = virt_to_phys(page_address(page) + offset);
 
-       c6x_dma_sync(addr, size, dir);
-
-       debug_dma_map_page(dev, virt_to_page(ptr),
-                          (unsigned long)ptr & ~PAGE_MASK, size,
-                          dir, addr, true);
-       return addr;
+       c6x_dma_sync(handle, size, dir);
+       return handle;
 }
-EXPORT_SYMBOL(dma_map_single);
-
 
-void dma_unmap_single(struct device *dev, dma_addr_t handle,
-                     size_t size, enum dma_data_direction dir)
+static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        c6x_dma_sync(handle, size, dir);
-
-       debug_dma_unmap_page(dev, handle, size, dir, true);
 }
-EXPORT_SYMBOL(dma_unmap_single);
-
 
-int dma_map_sg(struct device *dev, struct scatterlist *sglist,
-              int nents, enum dma_data_direction dir)
+static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        struct scatterlist *sg;
        int i;
 
-       for_each_sg(sglist, sg, nents, i)
-               sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length,
-                                                dir);
-
-       debug_dma_map_sg(dev, sglist, nents, nents, dir);
+       for_each_sg(sglist, sg, nents, i) {
+               sg->dma_address = sg_phys(sg);
+               c6x_dma_sync(sg->dma_address, sg->length, dir);
+       }
 
        return nents;
 }
-EXPORT_SYMBOL(dma_map_sg);
-
 
-void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
-                 int nents, enum dma_data_direction dir)
+static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+                 int nents, enum dma_data_direction dir,
+                 struct dma_attrs *attrs)
 {
        struct scatterlist *sg;
        int i;
 
        for_each_sg(sglist, sg, nents, i)
-               dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir);
+               c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
 
-       debug_dma_unmap_sg(dev, sglist, nents, dir);
 }
-EXPORT_SYMBOL(dma_unmap_sg);
 
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
-                            size_t size, enum dma_data_direction dir)
+static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir)
 {
        c6x_dma_sync(handle, size, dir);
 
-       debug_dma_sync_single_for_cpu(dev, handle, size, dir);
 }
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
 
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
-                               size_t size, enum dma_data_direction dir)
+static void c6x_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
        c6x_dma_sync(handle, size, dir);
 
-       debug_dma_sync_single_for_device(dev, handle, size, dir);
 }
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
 
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
-                        int nents, enum dma_data_direction dir)
+static void c6x_dma_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sglist, int nents,
+               enum dma_data_direction dir)
 {
        struct scatterlist *sg;
        int i;
 
        for_each_sg(sglist, sg, nents, i)
-               dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+               c6x_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
                                        sg->length, dir);
 
-       debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-
 
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
-                           int nents, enum dma_data_direction dir)
+static void c6x_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sglist, int nents,
+               enum dma_data_direction dir)
 {
        struct scatterlist *sg;
        int i;
 
        for_each_sg(sglist, sg, nents, i)
-               dma_sync_single_for_device(dev, sg_dma_address(sg),
+               c6x_dma_sync_single_for_device(dev, sg_dma_address(sg),
                                           sg->length, dir);
 
-       debug_dma_sync_sg_for_device(dev, sglist, nents, dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_device);
 
+struct dma_map_ops c6x_dma_ops = {
+       .alloc                  = c6x_dma_alloc,
+       .free                   = c6x_dma_free,
+       .map_page               = c6x_dma_map_page,
+       .unmap_page             = c6x_dma_unmap_page,
+       .map_sg                 = c6x_dma_map_sg,
+       .unmap_sg               = c6x_dma_unmap_sg,
+       .sync_single_for_device = c6x_dma_sync_single_for_device,
+       .sync_single_for_cpu    = c6x_dma_sync_single_for_cpu,
+       .sync_sg_for_device     = c6x_dma_sync_sg_for_device,
+       .sync_sg_for_cpu        = c6x_dma_sync_sg_for_cpu,
+};
+EXPORT_SYMBOL(c6x_dma_ops);
 
 /* Number of entries preallocated for DMA-API debugging */
 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
index 4187e5180373fc706301993c010d4cd7b7d25d55..f7ee63af254180c29824c26b2d6af0868442ded7 100644 (file)
@@ -73,8 +73,8 @@ static void __free_dma_pages(u32 addr, int order)
  * Allocate DMA coherent memory space and return both the kernel
  * virtual and DMA address for that space.
  */
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *handle, gfp_t gfp)
+void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+               gfp_t gfp, struct dma_attrs *attrs)
 {
        u32 paddr;
        int order;
@@ -94,13 +94,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 
        return phys_to_virt(paddr);
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
 /*
  * Free DMA coherent memory as defined by the above mapping.
  */
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-                      dma_addr_t dma_handle)
+void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
        int order;
 
@@ -111,7 +110,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
 
        __free_dma_pages(virt_to_phys(vaddr), order);
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
 /*
  * Initialise the coherent DMA memory allocator using the given uncached region.
index ee55578d9834159b8e403c0b8e3f26ce2de4d5f4..8d5efa58cce1755d4f84ad630dc3cf09d3a0dce6 100644 (file)
 #include <linux/gfp.h>
 #include <asm/io.h>
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t gfp)
+static void *v32_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp,  struct dma_attrs *attrs)
 {
        void *ret;
-       int order = get_order(size);
+
        /* ignore region specifiers */
        gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
 
-       if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
-               return ret;
-
        if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
                gfp |= GFP_DMA;
 
-       ret = (void *)__get_free_pages(gfp, order);
+       ret = (void *)__get_free_pages(gfp,  get_order(size));
 
        if (ret != NULL) {
                memset(ret, 0, size);
@@ -39,12 +36,45 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
        return ret;
 }
 
-void dma_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle)
+static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
+{
+       free_pages((unsigned long)vaddr, get_order(size));
+}
+
+static inline dma_addr_t v32_dma_map_page(struct device *dev,
+               struct page *page, unsigned long offset, size_t size,
+               enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
-       int order = get_order(size);
+       return page_to_phys(page) + offset;
+}
 
-       if (!dma_release_from_coherent(dev, order, vaddr))
-               free_pages((unsigned long)vaddr, order);
+static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       printk("Map sg\n");
+       return nents;
+}
+
+static inline int v32_dma_supported(struct device *dev, u64 mask)
+{
+        /*
+         * we fall back to GFP_DMA when the mask isn't all 1s,
+         * so we can't guarantee allocations that must be
+         * within a tighter range than GFP_DMA..
+         */
+        if (mask < 0x00ffffff)
+                return 0;
+       return 1;
 }
 
+struct dma_map_ops v32_dma_ops = {
+       .alloc                  = v32_dma_alloc,
+       .free                   = v32_dma_free,
+       .map_page               = v32_dma_map_page,
+       .map_sg                 = v32_dma_map_sg,
+       .dma_supported          = v32_dma_supported,
+};
+EXPORT_SYMBOL(v32_dma_ops);
index 57f794ee6039d00eb16589bddc1da1b0579741ef..5a370178a0e95a7463688ab591860aeb5050c94f 100644 (file)
-/* DMA mapping. Nothing tricky here, just virt_to_phys */
-
 #ifndef _ASM_CRIS_DMA_MAPPING_H
 #define _ASM_CRIS_DMA_MAPPING_H
 
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-
-#include <asm/cache.h>
-#include <asm/io.h>
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
 #ifdef CONFIG_PCI
-#include <asm-generic/dma-coherent.h>
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t flag);
+extern struct dma_map_ops v32_dma_ops;
 
-void dma_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle);
-#else
-static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                   gfp_t flag)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-        BUG();
-        return NULL;
+       return &v32_dma_ops;
 }
-
-static inline void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-                    dma_addr_t dma_handle)
+#else
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-        BUG();
+       BUG();
+       return NULL;
 }
 #endif
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
-              enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-       return virt_to_phys(ptr);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-          enum dma_data_direction direction)
-{
-       printk("Map sg\n");
-       return nents;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-            size_t size, enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-       return page_to_phys(page) + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-              enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-            enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-                       enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
-                       enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                             unsigned long offset, size_t size,
-                             enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-                                unsigned long offset, size_t size,
-                                enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-                   enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
-                   enum dma_data_direction direction)
-{
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       return 0;
-}
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
-        /*
-         * we fall back to GFP_DMA when the mask isn't all 1s,
-         * so we can't guarantee allocations that must be
-         * within a tighter range than GFP_DMA..
-         */
-        if(mask < 0x00ffffff)
-                return 0;
-
-       return 1;
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
-       if(!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-
-       *dev->dma_mask = mask;
-
-       return 0;
-}
 
 static inline void
 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
@@ -158,15 +22,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 {
 }
 
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
-
 #endif
index 03bfd6bf03e7a28c5b31a1b6f51b47e5471596a9..eefd9a4ed156753b6f07cb41cd0cb97ca0f99a48 100644 (file)
@@ -15,6 +15,7 @@ config FRV
        select OLD_SIGSUSPEND3
        select OLD_SIGACTION
        select HAVE_DEBUG_STACKOVERFLOW
+       select ARCH_NO_COHERENT_DMA_MMAP
 
 config ZONE_DMA
        bool
index 2840adcd6d928c41a75b4b6640d3008ee12fddc9..9a82bfa4303b2ed4f1390906d62e85b5b7d26cb2 100644 (file)
 #ifndef _ASM_DMA_MAPPING_H
 #define _ASM_DMA_MAPPING_H
 
-#include <linux/device.h>
-#include <linux/scatterlist.h>
 #include <asm/cache.h>
 #include <asm/cacheflush.h>
-#include <asm/io.h>
-
-/*
- * See Documentation/DMA-API.txt for the description of how the
- * following DMA API should work.
- */
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
 extern unsigned long __nongprelbss dma_coherent_mem_start;
 extern unsigned long __nongprelbss dma_coherent_mem_end;
 
-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
-
-extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
-                                enum dma_data_direction direction);
-
-static inline
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                     enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-                     enum dma_data_direction direction);
-
-static inline
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-            enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-extern
-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-                       size_t size, enum dma_data_direction direction);
-
-static inline
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-                   enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-
-static inline
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-                            enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
-                               enum dma_data_direction direction)
-{
-       flush_write_buffers();
-}
-
-static inline
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                                  unsigned long offset, size_t size,
-                                  enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-                                     unsigned long offset, size_t size,
-                                     enum dma_data_direction direction)
-{
-       flush_write_buffers();
-}
-
-static inline
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-                        enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
-                           enum dma_data_direction direction)
-{
-       flush_write_buffers();
-}
-
-static inline
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       return 0;
-}
-
-static inline
-int dma_supported(struct device *dev, u64 mask)
-{
-        /*
-         * we fall back to GFP_DMA when the mask isn't all 1s,
-         * so we can't guarantee allocations that must be
-         * within a tighter range than GFP_DMA..
-         */
-        if (mask < 0x00ffffff)
-                return 0;
-
-       return 1;
-}
+extern struct dma_map_ops frv_dma_ops;
 
-static inline
-int dma_set_mask(struct device *dev, u64 mask)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-       if (!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-
-       *dev->dma_mask = mask;
-
-       return 0;
+       return &frv_dma_ops;
 }
 
 static inline
@@ -132,19 +21,4 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
        flush_write_buffers();
 }
 
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
-                                   struct vm_area_struct *vma, void *cpu_addr,
-                                   dma_addr_t dma_addr, size_t size)
-{
-       return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size)
-{
-       return -EINVAL;
-}
-
 #endif  /* _ASM_DMA_MAPPING_H */
index 70dfbea8c8d7c23d70449217de0d4f100e92dc3a..8062fc73fad027d5c3b531b6aa5d80b7fee8da11 100644 (file)
@@ -43,9 +43,20 @@ static inline unsigned long _swapl(unsigned long v)
 //#define __iormb() asm volatile("membar")
 //#define __iowmb() asm volatile("membar")
 
-#define __raw_readb __builtin_read8
-#define __raw_readw __builtin_read16
-#define __raw_readl __builtin_read32
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+       return __builtin_read8((volatile void __iomem *)addr);
+}
+
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+       return __builtin_read16((volatile void __iomem *)addr);
+}
+
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+       return __builtin_read32((volatile void __iomem *)addr);
+}
 
 #define __raw_writeb(datum, addr) __builtin_write8(addr, datum)
 #define __raw_writew(datum, addr) __builtin_write16(addr, datum)
index 8eeea0d77aadace16347a550974a4c58983b53fb..082be49b5df0ec49ef05d466e7f7f8b68e4e7e6a 100644 (file)
@@ -34,7 +34,8 @@ struct dma_alloc_record {
 static DEFINE_SPINLOCK(dma_alloc_lock);
 static LIST_HEAD(dma_alloc_list);
 
-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
+static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, struct dma_attrs *attrs)
 {
        struct dma_alloc_record *new;
        struct list_head *this = &dma_alloc_list;
@@ -84,9 +85,8 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand
        return NULL;
 }
 
-EXPORT_SYMBOL(dma_alloc_coherent);
-
-void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
        struct dma_alloc_record *rec;
        unsigned long flags;
@@ -105,22 +105,9 @@ void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_
        BUG();
 }
 
-EXPORT_SYMBOL(dma_free_coherent);
-
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
-                         enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-
-       frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
-
-       return virt_to_bus(ptr);
-}
-
-EXPORT_SYMBOL(dma_map_single);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
-              enum dma_data_direction direction)
+static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
        int i;
        struct scatterlist *sg;
@@ -135,14 +122,49 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
        return nents;
 }
 
-EXPORT_SYMBOL(dma_map_sg);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-                       size_t size, enum dma_data_direction direction)
+static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size,
+               enum dma_data_direction direction, struct dma_attrs *attrs)
 {
        BUG_ON(direction == DMA_NONE);
        flush_dcache_page(page);
        return (dma_addr_t) page_to_phys(page) + offset;
 }
 
-EXPORT_SYMBOL(dma_map_page);
+static void frv_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
+{
+       flush_write_buffers();
+}
+
+static void frv_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sg, int nelems,
+               enum dma_data_direction direction)
+{
+       flush_write_buffers();
+}
+
+
+static int frv_dma_supported(struct device *dev, u64 mask)
+{
+        /*
+         * we fall back to GFP_DMA when the mask isn't all 1s,
+         * so we can't guarantee allocations that must be
+         * within a tighter range than GFP_DMA..
+         */
+        if (mask < 0x00ffffff)
+                return 0;
+       return 1;
+}
+
+struct dma_map_ops frv_dma_ops = {
+       .alloc                  = frv_dma_alloc,
+       .free                   = frv_dma_free,
+       .map_page               = frv_dma_map_page,
+       .map_sg                 = frv_dma_map_sg,
+       .sync_single_for_device = frv_dma_sync_single_for_device,
+       .sync_sg_for_device     = frv_dma_sync_sg_for_device,
+       .dma_supported          = frv_dma_supported,
+};
+EXPORT_SYMBOL(frv_dma_ops);
index 4d1f01dc46e5bb8e5aa1b2e53aa495d4d50ee391..316b7b65348d8bb166b32c0ba9dc33ff41f70200 100644 (file)
@@ -18,7 +18,9 @@
 #include <linux/scatterlist.h>
 #include <asm/io.h>
 
-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
+static void *frv_dma_alloc(struct device *hwdev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp,
+               struct dma_attrs *attrs)
 {
        void *ret;
 
@@ -29,29 +31,15 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand
        return ret;
 }
 
-EXPORT_SYMBOL(dma_alloc_coherent);
-
-void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
        consistent_free(vaddr);
 }
 
-EXPORT_SYMBOL(dma_free_coherent);
-
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
-                         enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-
-       frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
-
-       return virt_to_bus(ptr);
-}
-
-EXPORT_SYMBOL(dma_map_single);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
-              enum dma_data_direction direction)
+static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
        unsigned long dampr2;
        void *vaddr;
@@ -79,14 +67,48 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
        return nents;
 }
 
-EXPORT_SYMBOL(dma_map_sg);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-                       size_t size, enum dma_data_direction direction)
+static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size,
+               enum dma_data_direction direction, struct dma_attrs *attrs)
 {
-       BUG_ON(direction == DMA_NONE);
        flush_dcache_page(page);
        return (dma_addr_t) page_to_phys(page) + offset;
 }
 
-EXPORT_SYMBOL(dma_map_page);
+static void frv_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
+{
+       flush_write_buffers();
+}
+
+static void frv_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sg, int nelems,
+               enum dma_data_direction direction)
+{
+       flush_write_buffers();
+}
+
+
+static int frv_dma_supported(struct device *dev, u64 mask)
+{
+        /*
+         * we fall back to GFP_DMA when the mask isn't all 1s,
+         * so we can't guarantee allocations that must be
+         * within a tighter range than GFP_DMA..
+         */
+        if (mask < 0x00ffffff)
+                return 0;
+       return 1;
+}
+
+struct dma_map_ops frv_dma_ops = {
+       .alloc                  = frv_dma_alloc,
+       .free                   = frv_dma_free,
+       .map_page               = frv_dma_map_page,
+       .map_sg                 = frv_dma_map_sg,
+       .sync_single_for_device = frv_dma_sync_single_for_device,
+       .sync_sg_for_device     = frv_dma_sync_sg_for_device,
+       .dma_supported          = frv_dma_supported,
+};
+EXPORT_SYMBOL(frv_dma_ops);
index cd1f754c1336b8ddadebf944aa51485eb33f2d2d..986ea84caaed48a9fd4d16a7b1195a7b97b87eb5 100644 (file)
@@ -15,7 +15,6 @@ config H8300
        select OF_IRQ
        select OF_EARLY_FLATTREE
        select HAVE_MEMBLOCK
-       select HAVE_DMA_ATTRS
        select CLKSRC_OF
        select H8300_TMR8
        select HAVE_KERNEL_GZIP
index d9b5b806afe6fcecfaf6c727c1a09310ed8867a1..7ac7fadffed07577e842d2a45912fae29630583c 100644 (file)
@@ -8,6 +8,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
        return &h8300_dma_map_ops;
 }
 
-#include <asm-generic/dma-mapping-common.h>
-
 #endif
index 4dc89d1f9c489ba390d9318caf76ed238683416a..57298e7b4867091edb074979363824a9b4f2b7e9 100644 (file)
@@ -27,7 +27,6 @@ config HEXAGON
        select GENERIC_CLOCKEVENTS_BROADCAST
        select MODULES_USE_ELF_RELA
        select GENERIC_CPU_DEVICES
-       select HAVE_DMA_ATTRS
        ---help---
          Qualcomm Hexagon is a processor architecture designed for high
          performance and low power across a wide variety of applications.
index 268fde8a45756e580ef06da3a2b051c4fb2e9f58..aa6203464520bcfa6666da8a9b76054c8f4a0173 100644 (file)
@@ -49,8 +49,6 @@ extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                           enum dma_data_direction direction);
 
-#include <asm-generic/dma-mapping-common.h>
-
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
        if (!dev->dma_mask)
index eb0249e3798112615fd5774d6f30229aa6241e53..fb0515eb639b55295c59b25ef1ce83554f35d7b4 100644 (file)
@@ -25,7 +25,6 @@ config IA64
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
        select HAVE_FUNCTION_TRACER
-       select HAVE_DMA_ATTRS
        select TTY
        select HAVE_ARCH_TRACEHOOK
        select HAVE_DMA_API_DEBUG
index 9beccf8010bd6bf8eaa64ab292a753449a0ca609..d472805edfa9da0df269effd38c09de607bdd291 100644 (file)
@@ -25,8 +25,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
 
 #define get_dma_ops(dev) platform_dma_get_ops(dev)
 
-#include <asm-generic/dma-mapping-common.h>
-
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
        if (!dev->dma_mask)
index 05aa53594d49d0fecd4d0e9aae1d05cddf8c9dfc..96c536194287d02d1802c114add933495c855736 100644 (file)
 #ifndef _M68K_DMA_MAPPING_H
 #define _M68K_DMA_MAPPING_H
 
-#include <asm/cache.h>
+extern struct dma_map_ops m68k_dma_ops;
 
-struct scatterlist;
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
-       return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
-       return 0;
-}
-
-extern void *dma_alloc_coherent(struct device *, size_t,
-                               dma_addr_t *, gfp_t);
-extern void dma_free_coherent(struct device *, size_t,
-                             void *, dma_addr_t);
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
-                                   dma_addr_t *dma_handle, gfp_t flag,
-                                   struct dma_attrs *attrs)
-{
-       /* attrs is not supported and ignored */
-       return dma_alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
-                                 void *cpu_addr, dma_addr_t dma_handle,
-                                 struct dma_attrs *attrs)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-       /* attrs is not supported and ignored */
-       dma_free_coherent(dev, size, cpu_addr, dma_handle);
+        return &m68k_dma_ops;
 }
 
-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
-                                         dma_addr_t *handle, gfp_t flag)
-{
-       return dma_alloc_coherent(dev, size, handle, flag);
-}
-static inline void dma_free_noncoherent(struct device *dev, size_t size,
-                                       void *addr, dma_addr_t handle)
-{
-       dma_free_coherent(dev, size, addr, handle);
-}
 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                                  enum dma_data_direction dir)
 {
        /* we use coherent allocation, so not much to do here. */
 }
 
-extern dma_addr_t dma_map_single(struct device *, void *, size_t,
-                                enum dma_data_direction);
-static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
-                                   size_t size, enum dma_data_direction dir)
-{
-}
-
-extern dma_addr_t dma_map_page(struct device *, struct page *,
-                              unsigned long, size_t size,
-                              enum dma_data_direction);
-static inline void dma_unmap_page(struct device *dev, dma_addr_t address,
-                                 size_t size, enum dma_data_direction dir)
-{
-}
-
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
-                     enum dma_data_direction);
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-                               int nhwentries, enum dma_data_direction dir)
-{
-}
-
-extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t,
-                                      enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
-                                  enum dma_data_direction);
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
-               dma_addr_t dma_handle, unsigned long offset, size_t size,
-               enum dma_data_direction direction)
-{
-       /* just sync everything for now */
-       dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
-                                          size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-                                      int nents, enum dma_data_direction dir)
-{
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
-               dma_addr_t dma_handle, unsigned long offset, size_t size,
-               enum dma_data_direction direction)
-{
-       /* just sync everything for now */
-       dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
-{
-       return 0;
-}
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
 #endif  /* _M68K_DMA_MAPPING_H */
index 564665f9af30c9e1da180b20f5176a2b1c0e9731..cbc78b4117b555af1fbb3695ffc0009d34565539 100644 (file)
@@ -18,8 +18,8 @@
 
 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *handle, gfp_t flag)
+static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+               gfp_t flag, struct dma_attrs *attrs)
 {
        struct page *page, **map;
        pgprot_t pgprot;
@@ -61,8 +61,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
        return addr;
 }
 
-void dma_free_coherent(struct device *dev, size_t size,
-                      void *addr, dma_addr_t handle)
+static void m68k_dma_free(struct device *dev, size_t size, void *addr,
+               dma_addr_t handle, struct dma_attrs *attrs)
 {
        pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
        vfree(addr);
@@ -72,8 +72,8 @@ void dma_free_coherent(struct device *dev, size_t size,
 
 #include <asm/cacheflush.h>
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t gfp)
+static void *m68k_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
 {
        void *ret;
        /* ignore region specifiers */
@@ -90,19 +90,16 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
        return ret;
 }
 
-void dma_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle)
+static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
        free_pages((unsigned long)vaddr, get_order(size));
 }
 
 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
 
-EXPORT_SYMBOL(dma_alloc_coherent);
-EXPORT_SYMBOL(dma_free_coherent);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
-                               size_t size, enum dma_data_direction dir)
+static void m68k_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_BIDIRECTIONAL:
@@ -118,10 +115,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
                break;
        }
 }
-EXPORT_SYMBOL(dma_sync_single_for_device);
 
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
-                           int nents, enum dma_data_direction dir)
+static void m68k_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sglist, int nents, enum dma_data_direction dir)
 {
        int i;
        struct scatterlist *sg;
@@ -131,31 +127,19 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
                                           dir);
        }
 }
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-
-dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
-                         enum dma_data_direction dir)
-{
-       dma_addr_t handle = virt_to_bus(addr);
-
-       dma_sync_single_for_device(dev, handle, size, dir);
-       return handle;
-}
-EXPORT_SYMBOL(dma_map_single);
 
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
-                       unsigned long offset, size_t size,
-                       enum dma_data_direction dir)
+static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
 {
        dma_addr_t handle = page_to_phys(page) + offset;
 
        dma_sync_single_for_device(dev, handle, size, dir);
        return handle;
 }
-EXPORT_SYMBOL(dma_map_page);
 
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
-              enum dma_data_direction dir)
+static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        int i;
        struct scatterlist *sg;
@@ -167,4 +151,13 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
        }
        return nents;
 }
-EXPORT_SYMBOL(dma_map_sg);
+
+struct dma_map_ops m68k_dma_ops = {
+       .alloc                  = m68k_dma_alloc,
+       .free                   = m68k_dma_free,
+       .map_page               = m68k_dma_map_page,
+       .map_sg                 = m68k_dma_map_sg,
+       .sync_single_for_device = m68k_dma_sync_single_for_device,
+       .sync_sg_for_device     = m68k_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(m68k_dma_ops);
index eb5cdec94be031f0eb0702b6d29ec83616211e1b..27af5d479ce62fda67f29a604efd852a0ddee7ff 100644 (file)
 #ifndef _ASM_METAG_DMA_MAPPING_H
 #define _ASM_METAG_DMA_MAPPING_H
 
-#include <linux/mm.h>
+extern struct dma_map_ops metag_dma_ops;
 
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <linux/scatterlist.h>
-#include <asm/bug.h>
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
-                      void *vaddr, dma_addr_t dma_handle);
-
-void dma_sync_for_device(void *vaddr, size_t size, int dma_direction);
-void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction);
-
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
-                     void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
-                         void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
-              enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-       WARN_ON(size == 0);
-       dma_sync_for_device(ptr, size, direction);
-       return virt_to_phys(ptr);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-       dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
-          enum dma_data_direction direction)
-{
-       struct scatterlist *sg;
-       int i;
-
-       BUG_ON(!valid_dma_direction(direction));
-       WARN_ON(nents == 0 || sglist[0].length == 0);
-
-       for_each_sg(sglist, sg, nents, i) {
-               BUG_ON(!sg_page(sg));
-
-               sg->dma_address = sg_phys(sg);
-               dma_sync_for_device(sg_virt(sg), sg->length, direction);
-       }
-
-       return nents;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-            size_t size, enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-       dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
-                           direction);
-       return page_to_phys(page) + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-              enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-       dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
-            enum dma_data_direction direction)
-{
-       struct scatterlist *sg;
-       int i;
-
-       BUG_ON(!valid_dma_direction(direction));
-       WARN_ON(nhwentries == 0 || sglist[0].length == 0);
-
-       for_each_sg(sglist, sg, nhwentries, i) {
-               BUG_ON(!sg_page(sg));
-
-               sg->dma_address = sg_phys(sg);
-               dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
-       }
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-                       enum dma_data_direction direction)
-{
-       dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
-                          size_t size, enum dma_data_direction direction)
-{
-       dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                             unsigned long offset, size_t size,
-                             enum dma_data_direction direction)
-{
-       dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size,
-                        direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-                                unsigned long offset, size_t size,
-                                enum dma_data_direction direction)
-{
-       dma_sync_for_device(phys_to_virt(dma_handle)+offset, size,
-                           direction);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
-                   enum dma_data_direction direction)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nelems, i)
-               dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
-                      int nelems, enum dma_data_direction direction)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nelems, i)
-               dma_sync_for_device(sg_virt(sg), sg->length, direction);
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-       return 0;
-}
-
-#define dma_supported(dev, mask)        (1)
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
-       if (!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-
-       *dev->dma_mask = mask;
-
-       return 0;
+       return &metag_dma_ops;
 }
 
 /*
@@ -184,11 +18,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 {
 }
 
-/* drivers/base/dma-mapping.c */
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size);
-
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
 #endif
index c700d625067a96cb725064e0c19ceadad078cd6a..e12368d02155ac5ede14d9ce982134de486a4551 100644 (file)
@@ -171,8 +171,8 @@ out:
  * Allocate DMA-coherent memory space and return both the kernel remapped
  * virtual and bus address for that space.
  */
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *handle, gfp_t gfp)
+static void *metag_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
        struct page *page;
        struct metag_vm_region *c;
@@ -263,13 +263,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 no_page:
        return NULL;
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
 /*
  * free a page as defined by the above mapping.
  */
-void dma_free_coherent(struct device *dev, size_t size,
-                      void *vaddr, dma_addr_t dma_handle)
+static void metag_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
        struct metag_vm_region *c;
        unsigned long flags, addr;
@@ -329,16 +328,19 @@ no_area:
               __func__, vaddr);
        dump_stack();
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
-                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
+static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               struct dma_attrs *attrs)
 {
-       int ret = -ENXIO;
-
        unsigned long flags, user_size, kern_size;
        struct metag_vm_region *c;
+       int ret = -ENXIO;
+
+       if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       else
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 
@@ -364,25 +366,6 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
        return ret;
 }
 
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
-                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
-                         void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
-
-
-
 /*
  * Initialise the consistent memory allocation.
  */
@@ -423,7 +406,7 @@ early_initcall(dma_alloc_init);
 /*
  * make an area consistent to devices.
  */
-void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
+static void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
 {
        /*
         * Ensure any writes get through the write combiner. This is necessary
@@ -465,12 +448,11 @@ void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
 
        wmb();
 }
-EXPORT_SYMBOL(dma_sync_for_device);
 
 /*
  * make an area consistent to the core.
  */
-void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
+static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
 {
        /*
         * Hardware L2 cache prefetch doesn't occur across 4K physical
@@ -497,4 +479,100 @@ void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
 
        rmb();
 }
-EXPORT_SYMBOL(dma_sync_for_cpu);
+
+static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size,
+               enum dma_data_direction direction, struct dma_attrs *attrs)
+{
+       dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
+                           direction);
+       return page_to_phys(page) + offset;
+}
+
+static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+               size_t size, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+}
+
+static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nents, i) {
+               BUG_ON(!sg_page(sg));
+
+               sg->dma_address = sg_phys(sg);
+               dma_sync_for_device(sg_virt(sg), sg->length, direction);
+       }
+
+       return nents;
+}
+
+
+static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+               int nhwentries, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nhwentries, i) {
+               BUG_ON(!sg_page(sg));
+
+               sg->dma_address = sg_phys(sg);
+               dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+       }
+}
+
+static void metag_dma_sync_single_for_cpu(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
+{
+       dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+}
+
+static void metag_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
+{
+       dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+}
+
+static void metag_dma_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sglist, int nelems,
+               enum dma_data_direction direction)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nelems, i)
+               dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+}
+
+static void metag_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sglist, int nelems,
+               enum dma_data_direction direction)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nelems, i)
+               dma_sync_for_device(sg_virt(sg), sg->length, direction);
+}
+
+struct dma_map_ops metag_dma_ops = {
+       .alloc                  = metag_dma_alloc,
+       .free                   = metag_dma_free,
+       .map_page               = metag_dma_map_page,
+       .map_sg                 = metag_dma_map_sg,
+       .sync_single_for_device = metag_dma_sync_single_for_device,
+       .sync_single_for_cpu    = metag_dma_sync_single_for_cpu,
+       .sync_sg_for_cpu        = metag_dma_sync_sg_for_cpu,
+       .mmap                   = metag_dma_mmap,
+};
+EXPORT_SYMBOL(metag_dma_ops);
index 5ecd0287a87428ff9340a1ed88908d00897b5a24..53b69deceb998820947e5136d668f334a4f9f1cd 100644 (file)
@@ -19,7 +19,6 @@ config MICROBLAZE
        select HAVE_ARCH_KGDB
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_API_DEBUG
-       select HAVE_DMA_ATTRS
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_GRAPH_TRACER
index 24b12970c9cff772d24d8b581aeb945b8d917b6b..1884783d15c0efee874310cc05ef93e38dafc00c 100644 (file)
@@ -44,8 +44,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
        return &dma_direct_ops;
 }
 
-#include <asm-generic/dma-mapping-common.h>
-
 static inline void __dma_sync(unsigned long paddr,
                              size_t size, enum dma_data_direction direction)
 {
index 71683a8533723866872de04f7194d07a28f33045..fbf3f6670b69a58f23bf3878eb0d5d2df3edeaec 100644 (file)
@@ -31,7 +31,6 @@ config MIPS
        select RTC_LIB if !MACH_LOONGSON64
        select GENERIC_ATOMIC64 if !64BIT
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select HAVE_DMA_ATTRS
        select HAVE_DMA_CONTIGUOUS
        select HAVE_DMA_API_DEBUG
        select GENERIC_IRQ_PROBE
index e604f760c4a076b44255b312b6f45180f06200c4..12fa79e2f1b4fc7fe7c66f1f93d344bb7f1d67fb 100644 (file)
@@ -29,8 +29,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 
 static inline void dma_mark_clean(void *addr, size_t size) {}
 
-#include <asm-generic/dma-mapping-common.h>
-
 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction);
 
index b0ebe59f73fdfa6b69df58010c4a7ca4f672255a..ccdcfcbb24aa60e2d2f161b61067ad4061caca84 100644 (file)
@@ -73,7 +73,6 @@
 #define MADV_SEQUENTIAL 2              /* expect sequential page references */
 #define MADV_WILLNEED  3               /* will need these pages */
 #define MADV_DONTNEED  4               /* don't need these pages */
-#define MADV_FREE      5               /* free pages only if memory pressure */
 
 /* common parameters: try to keep these consistent across architectures */
 #define MADV_FREE      8               /* free pages only if memory pressure */
index 78ae5552fdb89cca3c6a5ff4290a2581fbe95079..10607f0d2bcd1526f01d95cee5c519c1916224b1 100644 (file)
@@ -14,6 +14,7 @@ config MN10300
        select OLD_SIGSUSPEND3
        select OLD_SIGACTION
        select HAVE_DEBUG_STACKOVERFLOW
+       select ARCH_NO_COHERENT_DMA_MMAP
 
 config AM33_2
        def_bool n
index a18abfc558eb1d41b99cd22798976f517196b7bd..1dcd44757f323f26db9ae7e38562a9ea947996ab 100644 (file)
 #ifndef _ASM_DMA_MAPPING_H
 #define _ASM_DMA_MAPPING_H
 
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-
 #include <asm/cache.h>
 #include <asm/io.h>
 
-/*
- * See Documentation/DMA-API.txt for the description of how the
- * following DMA API should work.
- */
-
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
-                               dma_addr_t *dma_handle, int flag);
-
-extern void dma_free_coherent(struct device *dev, size_t size,
-                             void *vaddr, dma_addr_t dma_handle);
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
-#define dma_free_noncoherent(d, s, v, h)  dma_free_coherent((d), (s), (v), (h))
-
-static inline
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
-                         enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-       mn10300_dcache_flush_inv();
-       return virt_to_bus(ptr);
-}
-
-static inline
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                     enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-static inline
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
-              enum dma_data_direction direction)
-{
-       struct scatterlist *sg;
-       int i;
-
-       BUG_ON(!valid_dma_direction(direction));
-       WARN_ON(nents == 0 || sglist[0].length == 0);
-
-       for_each_sg(sglist, sg, nents, i) {
-               BUG_ON(!sg_page(sg));
-
-               sg->dma_address = sg_phys(sg);
-       }
+extern struct dma_map_ops mn10300_dma_ops;
 
-       mn10300_dcache_flush_inv();
-       return nents;
-}
-
-static inline
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-                 enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-}
-
-static inline
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
-                       unsigned long offset, size_t size,
-                       enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-       return page_to_bus(page) + offset;
-}
-
-static inline
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-                   enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-}
-
-static inline
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                            size_t size, enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-}
-
-static inline
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
-                               size_t size, enum dma_data_direction direction)
-{
-       mn10300_dcache_flush_inv();
-}
-
-static inline
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                                  unsigned long offset, size_t size,
-                                  enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-                                unsigned long offset, size_t size,
-                                enum dma_data_direction direction)
-{
-       mn10300_dcache_flush_inv();
-}
-
-
-static inline
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-                        int nelems, enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-                           int nelems, enum dma_data_direction direction)
-{
-       mn10300_dcache_flush_inv();
-}
-
-static inline
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       return 0;
-}
-
-static inline
-int dma_supported(struct device *dev, u64 mask)
-{
-       /*
-        * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
-        * guarantee allocations that must be within a tighter range than
-        * GFP_DMA
-        */
-       if (mask < 0x00ffffff)
-               return 0;
-       return 1;
-}
-
-static inline
-int dma_set_mask(struct device *dev, u64 mask)
-{
-       if (!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-
-       *dev->dma_mask = mask;
-       return 0;
+       return &mn10300_dma_ops;
 }
 
 static inline
@@ -168,19 +28,4 @@ void dma_cache_sync(void *vaddr, size_t size,
        mn10300_dcache_flush_inv();
 }
 
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
-                                   struct vm_area_struct *vma, void *cpu_addr,
-                                   dma_addr_t dma_addr, size_t size)
-{
-       return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size)
-{
-       return -EINVAL;
-}
-
 #endif
index e244ebe637e15436f643a63e28217d9714321645..8842394cb49a3bc250a7cf2722ce3507428db7e0 100644 (file)
@@ -20,8 +20,8 @@
 
 static unsigned long pci_sram_allocated = 0xbc000000;
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, int gfp)
+static void *mn10300_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
 {
        unsigned long addr;
        void *ret;
@@ -61,10 +61,9 @@ done:
        printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
        return ret;
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-                      dma_addr_t dma_handle)
+static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
        unsigned long addr = (unsigned long) vaddr & ~0x20000000;
 
@@ -73,4 +72,60 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
 
        free_pages(addr, get_order(size));
 }
-EXPORT_SYMBOL(dma_free_coherent);
+
+static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nents, i) {
+               BUG_ON(!sg_page(sg));
+
+               sg->dma_address = sg_phys(sg);
+       }
+
+       mn10300_dcache_flush_inv();
+       return nents;
+}
+
+static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size,
+               enum dma_data_direction direction, struct dma_attrs *attrs)
+{
+       return page_to_bus(page) + offset;
+}
+
+static void mn10300_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+                               size_t size, enum dma_data_direction direction)
+{
+       mn10300_dcache_flush_inv();
+}
+
+static void mn10300_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+                           int nelems, enum dma_data_direction direction)
+{
+       mn10300_dcache_flush_inv();
+}
+
+static int mn10300_dma_supported(struct device *dev, u64 mask)
+{
+       /*
+        * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
+        * guarantee allocations that must be within a tighter range than
+        * GFP_DMA
+        */
+       if (mask < 0x00ffffff)
+               return 0;
+       return 1;
+}
+
+struct dma_map_ops mn10300_dma_ops = {
+       .alloc                  = mn10300_dma_alloc,
+       .free                   = mn10300_dma_free,
+       .map_page               = mn10300_dma_map_page,
+       .map_sg                 = mn10300_dma_map_sg,
+       .sync_single_for_device = mn10300_dma_sync_single_for_device,
+       .sync_sg_for_device     = mn10300_dma_sync_sg_for_device,
+};
index b5567233f7f16d1ffa9e6f432f84f73ee28120d9..bec8ac8e6ad2311cf1703334c71c367af6248ebb 100644 (file)
 #ifndef _ASM_NIOS2_DMA_MAPPING_H
 #define _ASM_NIOS2_DMA_MAPPING_H
 
-#include <linux/scatterlist.h>
-#include <linux/cache.h>
-#include <asm/cacheflush.h>
+extern struct dma_map_ops nios2_dma_ops;
 
-static inline void __dma_sync_for_device(void *vaddr, size_t size,
-                             enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-       switch (direction) {
-       case DMA_FROM_DEVICE:
-               invalidate_dcache_range((unsigned long)vaddr,
-                       (unsigned long)(vaddr + size));
-               break;
-       case DMA_TO_DEVICE:
-               /*
-                * We just need to flush the caches here , but Nios2 flush
-                * instruction will do both writeback and invalidate.
-                */
-       case DMA_BIDIRECTIONAL: /* flush and invalidate */
-               flush_dcache_range((unsigned long)vaddr,
-                       (unsigned long)(vaddr + size));
-               break;
-       default:
-               BUG();
-       }
-}
-
-static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
-                             enum dma_data_direction direction)
-{
-       switch (direction) {
-       case DMA_BIDIRECTIONAL:
-       case DMA_FROM_DEVICE:
-               invalidate_dcache_range((unsigned long)vaddr,
-                       (unsigned long)(vaddr + size));
-               break;
-       case DMA_TO_DEVICE:
-               break;
-       default:
-               BUG();
-       }
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle);
-
-static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
-                                       size_t size,
-                                       enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-       __dma_sync_for_device(ptr, size, direction);
-       return virt_to_phys(ptr);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
-                               size_t size, enum dma_data_direction direction)
-{
-}
-
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-       enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
-       unsigned long offset, size_t size, enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
-       size_t size, enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-       int nhwentries, enum dma_data_direction direction);
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-       size_t size, enum dma_data_direction direction);
-extern void dma_sync_single_for_device(struct device *dev,
-       dma_addr_t dma_handle, size_t size, enum dma_data_direction direction);
-extern void dma_sync_single_range_for_cpu(struct device *dev,
-       dma_addr_t dma_handle, unsigned long offset, size_t size,
-       enum dma_data_direction direction);
-extern void dma_sync_single_range_for_device(struct device *dev,
-       dma_addr_t dma_handle, unsigned long offset, size_t size,
-       enum dma_data_direction direction);
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-       int nelems, enum dma_data_direction direction);
-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-       int nelems, enum dma_data_direction direction);
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
-       return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
-       if (!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-
-       *dev->dma_mask = mask;
-
-       return 0;
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       return 0;
+       return &nios2_dma_ops;
 }
 
 /*
-* dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
-* do any flushing here.
-*/
+ * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
+ * do any flushing here.
+ */
 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                                  enum dma_data_direction direction)
 {
 }
 
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-               void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-               void *cpu_addr, dma_addr_t dma_addr,
-               size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
 #endif /* _ASM_NIOS2_DMA_MAPPING_H */
index ac5da7594f0b4dd7c8010f223d32122b5133a399..90422c367ed3ad33b8a193bf0a062f601a590909 100644 (file)
 #include <linux/cache.h>
 #include <asm/cacheflush.h>
 
+static inline void __dma_sync_for_device(void *vaddr, size_t size,
+                             enum dma_data_direction direction)
+{
+       switch (direction) {
+       case DMA_FROM_DEVICE:
+               invalidate_dcache_range((unsigned long)vaddr,
+                       (unsigned long)(vaddr + size));
+               break;
+       case DMA_TO_DEVICE:
+               /*
+                * We just need to flush the caches here , but Nios2 flush
+                * instruction will do both writeback and invalidate.
+                */
+       case DMA_BIDIRECTIONAL: /* flush and invalidate */
+               flush_dcache_range((unsigned long)vaddr,
+                       (unsigned long)(vaddr + size));
+               break;
+       default:
+               BUG();
+       }
+}
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                           dma_addr_t *dma_handle, gfp_t gfp)
+static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
+                             enum dma_data_direction direction)
+{
+       switch (direction) {
+       case DMA_BIDIRECTIONAL:
+       case DMA_FROM_DEVICE:
+               invalidate_dcache_range((unsigned long)vaddr,
+                       (unsigned long)(vaddr + size));
+               break;
+       case DMA_TO_DEVICE:
+               break;
+       default:
+               BUG();
+       }
+}
+
+static void *nios2_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
 {
        void *ret;
 
@@ -45,24 +82,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 
        return ret;
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-                       dma_addr_t dma_handle)
+static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
        unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
 
        free_pages(addr, get_order(size));
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction direction)
+static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
        int i;
 
-       BUG_ON(!valid_dma_direction(direction));
-
        for_each_sg(sg, sg, nents, i) {
                void *addr;
 
@@ -75,40 +109,32 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 
        return nents;
 }
-EXPORT_SYMBOL(dma_map_sg);
 
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
+static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
                        unsigned long offset, size_t size,
-                       enum dma_data_direction direction)
+                       enum dma_data_direction direction,
+                       struct dma_attrs *attrs)
 {
-       void *addr;
-
-       BUG_ON(!valid_dma_direction(direction));
+       void *addr = page_address(page) + offset;
 
-       addr = page_address(page) + offset;
        __dma_sync_for_device(addr, size, direction);
-
        return page_to_phys(page) + offset;
 }
-EXPORT_SYMBOL(dma_map_page);
 
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-                   enum dma_data_direction direction)
+static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+               size_t size, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
-       BUG_ON(!valid_dma_direction(direction));
-
        __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
 }
-EXPORT_SYMBOL(dma_unmap_page);
 
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-                 enum dma_data_direction direction)
+static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+               int nhwentries, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
        void *addr;
        int i;
 
-       BUG_ON(!valid_dma_direction(direction));
-
        if (direction == DMA_TO_DEVICE)
                return;
 
@@ -118,69 +144,54 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
                        __dma_sync_for_cpu(addr, sg->length, direction);
        }
 }
-EXPORT_SYMBOL(dma_unmap_sg);
-
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                            size_t size, enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
 
-       __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
-                               size_t size, enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-
-       __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                                       unsigned long offset, size_t size,
-                                       enum dma_data_direction direction)
+static void nios2_dma_sync_single_for_cpu(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
 {
-       BUG_ON(!valid_dma_direction(direction));
-
        __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
 }
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
 
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-                                       unsigned long offset, size_t size,
-                                       enum dma_data_direction direction)
+static void nios2_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
 {
-       BUG_ON(!valid_dma_direction(direction));
-
        __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
 }
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
 
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-                        enum dma_data_direction direction)
+static void nios2_dma_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sg, int nelems,
+               enum dma_data_direction direction)
 {
        int i;
 
-       BUG_ON(!valid_dma_direction(direction));
-
        /* Make sure that gcc doesn't leave the empty loop body.  */
        for_each_sg(sg, sg, nelems, i)
                __dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
 
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-                               int nelems, enum dma_data_direction direction)
+static void nios2_dma_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sg, int nelems,
+               enum dma_data_direction direction)
 {
        int i;
 
-       BUG_ON(!valid_dma_direction(direction));
-
        /* Make sure that gcc doesn't leave the empty loop body.  */
        for_each_sg(sg, sg, nelems, i)
                __dma_sync_for_device(sg_virt(sg), sg->length, direction);
 
 }
-EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+struct dma_map_ops nios2_dma_ops = {
+       .alloc                  = nios2_dma_alloc,
+       .free                   = nios2_dma_free,
+       .map_page               = nios2_dma_map_page,
+       .unmap_page             = nios2_dma_unmap_page,
+       .map_sg                 = nios2_dma_map_sg,
+       .unmap_sg               = nios2_dma_unmap_sg,
+       .sync_single_for_device = nios2_dma_sync_single_for_device,
+       .sync_single_for_cpu    = nios2_dma_sync_single_for_cpu,
+       .sync_sg_for_cpu        = nios2_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = nios2_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(nios2_dma_ops);
index 443f44de102093f18fe5d797b297757705a0b70c..e118c02cc79a085eb59e10513bbf15c0272725a3 100644 (file)
@@ -29,9 +29,6 @@ config OPENRISC
 config MMU
        def_bool y
 
-config HAVE_DMA_ATTRS
-       def_bool y
-
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
index 413bfcf863848fba556078d8ad6ea1cba59cf770..1f260bccb36878d6445d12695f9f033787cbda48 100644 (file)
@@ -42,6 +42,4 @@ static inline int dma_supported(struct device *dev, u64 dma_mask)
        return dma_mask == DMA_BIT_MASK(32);
 }
 
-#include <asm-generic/dma-mapping-common.h>
-
 #endif /* __ASM_OPENRISC_DMA_MAPPING_H */
index 7c34cafdf3012e6f9b29d4a912ec064e4cefe068..14f655cf542e1eff76c5584b0a603d40a330e333 100644 (file)
@@ -29,6 +29,7 @@ config PARISC
        select TTY # Needed for pdc_cons.c
        select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_ARCH_AUDITSYSCALL
+       select ARCH_NO_COHERENT_DMA_MMAP
 
        help
          The PA-RISC microprocessor is designed by Hewlett-Packard and used
index d8d60a57183fb32be02adc0eefee76ff97d4aa71..16e024602737085eee5c3bdfae979fc5e578e2a1 100644 (file)
@@ -1,30 +1,11 @@
 #ifndef _PARISC_DMA_MAPPING_H
 #define _PARISC_DMA_MAPPING_H
 
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
 #include <asm/cacheflush.h>
 
-/* See Documentation/DMA-API-HOWTO.txt */
-struct hppa_dma_ops {
-       int  (*dma_supported)(struct device *dev, u64 mask);
-       void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
-       void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
-       void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
-       dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
-       void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
-       int  (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
-       void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
-       void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
-       void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
-       void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
-       void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
-};
-
 /*
-** We could live without the hppa_dma_ops indirection if we didn't want
-** to support 4 different coherent dma models with one binary (they will
-** someday be loadable modules):
+** We need to support 4 different coherent dma models with one binary:
+**
 **     I/O MMU        consistent method           dma_sync behavior
 **  =============   ======================       =======================
 **  a) PA-7x00LC    uncachable host memory          flush/purge
@@ -40,158 +21,22 @@ struct hppa_dma_ops {
 */
 
 #ifdef CONFIG_PA11
-extern struct hppa_dma_ops pcxl_dma_ops;
-extern struct hppa_dma_ops pcx_dma_ops;
+extern struct dma_map_ops pcxl_dma_ops;
+extern struct dma_map_ops pcx_dma_ops;
 #endif
 
-extern struct hppa_dma_ops *hppa_dma_ops;
-
-#define dma_alloc_attrs(d, s, h, f, a) dma_alloc_coherent(d, s, h, f)
-#define dma_free_attrs(d, s, h, f, a) dma_free_coherent(d, s, h, f)
-
-static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                  gfp_t flag)
-{
-       return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
-}
-
-static inline void *
-dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                     gfp_t flag)
-{
-       return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
-}
-
-static inline void
-dma_free_coherent(struct device *dev, size_t size, 
-                   void *vaddr, dma_addr_t dma_handle)
-{
-       hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
-}
-
-static inline void
-dma_free_noncoherent(struct device *dev, size_t size, 
-                   void *vaddr, dma_addr_t dma_handle)
-{
-       hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
-}
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
-              enum dma_data_direction direction)
-{
-       return hppa_dma_ops->map_single(dev, ptr, size, direction);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                enum dma_data_direction direction)
-{
-       hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-          enum dma_data_direction direction)
-{
-       return hppa_dma_ops->map_sg(dev, sg, nents, direction);
-}
+extern struct dma_map_ops *hppa_dma_ops;
 
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-            enum dma_data_direction direction)
-{
-       hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-            size_t size, enum dma_data_direction direction)
-{
-       return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-              enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-       dma_unmap_single(dev, dma_address, size, direction);
-}
-
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-               enum dma_data_direction direction)
-{
-       if(hppa_dma_ops->dma_sync_single_for_cpu)
-               hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
-               enum dma_data_direction direction)
-{
-       if(hppa_dma_ops->dma_sync_single_for_device)
-               hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                     unsigned long offset, size_t size,
-                     enum dma_data_direction direction)
-{
-       if(hppa_dma_ops->dma_sync_single_for_cpu)
-               hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-                     unsigned long offset, size_t size,
-                     enum dma_data_direction direction)
-{
-       if(hppa_dma_ops->dma_sync_single_for_device)
-               hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-                enum dma_data_direction direction)
-{
-       if(hppa_dma_ops->dma_sync_sg_for_cpu)
-               hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
-                enum dma_data_direction direction)
-{
-       if(hppa_dma_ops->dma_sync_sg_for_device)
-               hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);
-}
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
-       return hppa_dma_ops->dma_supported(dev, mask);
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
-       if(!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-
-       *dev->dma_mask = mask;
-
-       return 0;
+       return hppa_dma_ops;
 }
 
 static inline void
 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
-       if(hppa_dma_ops->dma_sync_single_for_cpu)
+       if (hppa_dma_ops->sync_single_for_cpu)
                flush_kernel_dcache_range((unsigned long)vaddr, size);
 }
 
@@ -238,22 +83,4 @@ struct parisc_device;
 void * sba_get_iommu(struct parisc_device *dev);
 #endif
 
-/* At the moment, we panic on error for IOMMU resource exaustion */
-#define dma_mapping_error(dev, x)      0
-
-/* This API cannot be supported on PA-RISC */
-static inline int dma_mmap_coherent(struct device *dev,
-                                   struct vm_area_struct *vma, void *cpu_addr,
-                                   dma_addr_t dma_addr, size_t size)
-{
-       return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size)
-{
-       return -EINVAL;
-}
-
 #endif
index cf830d465f75fd2cf55afb74c73adf593f3fb7cf..f3db7d8eb0c265a875fbcbb1d72f1cfaa058efbd 100644 (file)
@@ -43,7 +43,6 @@
 #define MADV_SPACEAVAIL 5               /* insure that resources are reserved */
 #define MADV_VPS_PURGE  6               /* Purge pages from VM page cache */
 #define MADV_VPS_INHERIT 7              /* Inherit parents page size */
-#define MADV_FREE      8               /* free pages only if memory pressure */
 
 /* common/generic parameters */
 #define MADV_FREE      8               /* free pages only if memory pressure */
index dba508fe1683d8153ed9ca7a0539246f7f6d52a2..f8150669b8c6f4e4de827b6b3d2f8dbd586c6593 100644 (file)
@@ -40,7 +40,7 @@
 #include <asm/parisc-device.h>
 
 /* See comments in include/asm-parisc/pci.h */
-struct hppa_dma_ops *hppa_dma_ops __read_mostly;
+struct dma_map_ops *hppa_dma_ops __read_mostly;
 EXPORT_SYMBOL(hppa_dma_ops);
 
 static struct device root = {
index b9402c9b34545e81c42b894cd9e1532bac66fccf..a27e4928bf73e0dfd49e904d7fb3c05b64615267 100644 (file)
@@ -413,7 +413,8 @@ pcxl_dma_init(void)
 
 __initcall(pcxl_dma_init);
 
-static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
+static void *pa11_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
 {
        unsigned long vaddr;
        unsigned long paddr;
@@ -439,7 +440,8 @@ static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_ad
        return (void *)vaddr;
 }
 
-static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
        int order;
 
@@ -450,15 +452,20 @@ static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vad
        free_pages((unsigned long)__va(dma_handle), order);
 }
 
-static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
+static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size,
+               enum dma_data_direction direction, struct dma_attrs *attrs)
 {
+       void *addr = page_address(page) + offset;
        BUG_ON(direction == DMA_NONE);
 
        flush_kernel_dcache_range((unsigned long) addr, size);
        return virt_to_phys(addr);
 }
 
-static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+               size_t size, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
        BUG_ON(direction == DMA_NONE);
 
@@ -475,7 +482,9 @@ static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, siz
        return;
 }
 
-static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
        int i;
        struct scatterlist *sg;
@@ -492,7 +501,9 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int n
        return nents;
 }
 
-static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+               int nents, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
        int i;
        struct scatterlist *sg;
@@ -509,18 +520,24 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in
        return;
 }
 
-static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
+static void pa11_dma_sync_single_for_cpu(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
 {
        BUG_ON(direction == DMA_NONE);
 
-       flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
+       flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
+                       size);
 }
 
-static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
+static void pa11_dma_sync_single_for_device(struct device *dev,
+               dma_addr_t dma_handle, size_t size,
+               enum dma_data_direction direction)
 {
        BUG_ON(direction == DMA_NONE);
 
-       flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
+       flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
+                       size);
 }
 
 static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
@@ -545,32 +562,28 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
                flush_kernel_vmap_range(sg_virt(sg), sg->length);
 }
 
-struct hppa_dma_ops pcxl_dma_ops = {
+struct dma_map_ops pcxl_dma_ops = {
        .dma_supported =        pa11_dma_supported,
-       .alloc_consistent =     pa11_dma_alloc_consistent,
-       .alloc_noncoherent =    pa11_dma_alloc_consistent,
-       .free_consistent =      pa11_dma_free_consistent,
-       .map_single =           pa11_dma_map_single,
-       .unmap_single =         pa11_dma_unmap_single,
+       .alloc =                pa11_dma_alloc,
+       .free =                 pa11_dma_free,
+       .map_page =             pa11_dma_map_page,
+       .unmap_page =           pa11_dma_unmap_page,
        .map_sg =               pa11_dma_map_sg,
        .unmap_sg =             pa11_dma_unmap_sg,
-       .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
-       .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
-       .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
-       .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
+       .sync_single_for_cpu =  pa11_dma_sync_single_for_cpu,
+       .sync_single_for_device = pa11_dma_sync_single_for_device,
+       .sync_sg_for_cpu =      pa11_dma_sync_sg_for_cpu,
+       .sync_sg_for_device =   pa11_dma_sync_sg_for_device,
 };
 
-static void *fail_alloc_consistent(struct device *dev, size_t size,
-                                  dma_addr_t *dma_handle, gfp_t flag)
-{
-       return NULL;
-}
-
-static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
-                                         dma_addr_t *dma_handle, gfp_t flag)
+static void *pcx_dma_alloc(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
 {
        void *addr;
 
+       if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+               return NULL;
+
        addr = (void *)__get_free_pages(flag, get_order(size));
        if (addr)
                *dma_handle = (dma_addr_t)virt_to_phys(addr);
@@ -578,24 +591,23 @@ static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
        return addr;
 }
 
-static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
-                                       void *vaddr, dma_addr_t iova)
+static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t iova, struct dma_attrs *attrs)
 {
        free_pages((unsigned long)vaddr, get_order(size));
        return;
 }
 
-struct hppa_dma_ops pcx_dma_ops = {
+struct dma_map_ops pcx_dma_ops = {
        .dma_supported =        pa11_dma_supported,
-       .alloc_consistent =     fail_alloc_consistent,
-       .alloc_noncoherent =    pa11_dma_alloc_noncoherent,
-       .free_consistent =      pa11_dma_free_noncoherent,
-       .map_single =           pa11_dma_map_single,
-       .unmap_single =         pa11_dma_unmap_single,
+       .alloc =                pcx_dma_alloc,
+       .free =                 pcx_dma_free,
+       .map_page =             pa11_dma_map_page,
+       .unmap_page =           pa11_dma_unmap_page,
        .map_sg =               pa11_dma_map_sg,
        .unmap_sg =             pa11_dma_unmap_sg,
-       .dma_sync_single_for_cpu =      pa11_dma_sync_single_for_cpu,
-       .dma_sync_single_for_device =   pa11_dma_sync_single_for_device,
-       .dma_sync_sg_for_cpu =          pa11_dma_sync_sg_for_cpu,
-       .dma_sync_sg_for_device =       pa11_dma_sync_sg_for_device,
+       .sync_single_for_cpu =  pa11_dma_sync_single_for_cpu,
+       .sync_single_for_device = pa11_dma_sync_single_for_device,
+       .sync_sg_for_cpu =      pa11_dma_sync_sg_for_cpu,
+       .sync_sg_for_device =   pa11_dma_sync_sg_for_device,
 };
index 94f6c5089e0cc8d8c03d1ce658bcbd9953d3899d..e4824fd04bb7449d262c1a7697b5f539b03f6bab 100644 (file)
@@ -108,7 +108,6 @@ config PPC
        select HAVE_ARCH_TRACEHOOK
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
-       select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
        select HAVE_OPROFILE
        select HAVE_DEBUG_KMEMLEAK
@@ -158,6 +157,7 @@ config PPC
        select ARCH_HAS_DMA_SET_COHERENT_MASK
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select HAVE_ARCH_SECCOMP_FILTER
+       select ARCH_HAS_UBSAN_SANITIZE_ALL
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
index 7f522c021dc3087af2393b40e603a054f992952b..77816acd4fd91b61aac59bec7e18647116e07b0c 100644 (file)
@@ -125,8 +125,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
 #define HAVE_ARCH_DMA_SET_MASK 1
 extern int dma_set_mask(struct device *dev, u64 dma_mask);
 
-#include <asm-generic/dma-mapping-common.h>
-
 extern int __dma_set_mask(struct device *dev, u64 dma_mask);
 extern u64 __dma_get_required_mask(struct device *dev);
 
index 493e72f64b35feac4a03fe7912aa7c04067e6fe6..b4407d0add27733fba4d97c027ec820b799f42a7 100644 (file)
@@ -191,7 +191,7 @@ struct fadump_crash_info_header {
        u64             elfcorehdr_addr;
        u32             crashing_cpu;
        struct pt_regs  regs;
-       struct cpumask  cpu_online_mask;
+       struct cpumask  online_mask;
 };
 
 /* Crash memory ranges */
index ba336930d448792c235344cc9afb1a9e458f9a93..794f22adf99d4819c15de9f1ecc316eaafb491c0 100644 (file)
@@ -136,12 +136,18 @@ endif
 obj-$(CONFIG_EPAPR_PARAVIRT)   += epapr_paravirt.o epapr_hcalls.o
 obj-$(CONFIG_KVM_GUEST)                += kvm.o kvm_emul.o
 
-# Disable GCOV in odd or sensitive code
+# Disable GCOV & sanitizers in odd or sensitive code
 GCOV_PROFILE_prom_init.o := n
+UBSAN_SANITIZE_prom_init.o := n
 GCOV_PROFILE_ftrace.o := n
+UBSAN_SANITIZE_ftrace.o := n
 GCOV_PROFILE_machine_kexec_64.o := n
+UBSAN_SANITIZE_machine_kexec_64.o := n
 GCOV_PROFILE_machine_kexec_32.o := n
+UBSAN_SANITIZE_machine_kexec_32.o := n
 GCOV_PROFILE_kprobes.o := n
+UBSAN_SANITIZE_kprobes.o := n
+UBSAN_SANITIZE_vdso.o := n
 
 extra-$(CONFIG_PPC_FPU)                += fpu.o
 extra-$(CONFIG_ALTIVEC)                += vector.o
index 26d091a1a54cf555627fd95bb135cf98eb3e52b8..3cb3b02a13dd14fdf43ddfe0261aed162db07774 100644 (file)
@@ -415,7 +415,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
        else
                ppc_save_regs(&fdh->regs);
 
-       fdh->cpu_online_mask = *cpu_online_mask;
+       fdh->online_mask = *cpu_online_mask;
 
        /* Call ibm,os-term rtas call to trigger firmware assisted dump */
        rtas_os_term((char *)str);
@@ -646,7 +646,7 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
                }
                /* Lower 4 bytes of reg_value contains logical cpu id */
                cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK;
-               if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) {
+               if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) {
                        SKIP_TO_NEXT_CPU(reg_entry);
                        continue;
                }
index 6abffb7a8cd987358a2a8c6af0b26c7c0aa323e8..cbabd143acae8e9db3b3883be876527b68b83574 100644 (file)
@@ -15,6 +15,7 @@ targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
 obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
 
 GCOV_PROFILE := n
+UBSAN_SANITIZE := n
 
 ccflags-y := -shared -fno-common -fno-builtin
 ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
index 8c8f2ae43935600388116ac69b28756244c260be..c710802b8fb685a7cb5815d86387f7debb356d9e 100644 (file)
@@ -8,6 +8,7 @@ targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
 obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
 
 GCOV_PROFILE := n
+UBSAN_SANITIZE := n
 
 ccflags-y := -shared -fno-common -fno-builtin
 ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
index 1278788d96e3efb41b13a88099fa5914844bc4b1..436062dbb6e2e5c4141c70c2e43d9080679199b2 100644 (file)
@@ -3,6 +3,7 @@
 subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
 
 GCOV_PROFILE := n
+UBSAN_SANITIZE := n
 
 ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
 
index dbeeb3a049f21f8f24d08560b1afc9ee6ec3690b..3be9c832dec117a041378bd606f07df89ca44520 100644 (file)
@@ -579,7 +579,6 @@ config QDIO
 
 menuconfig PCI
        bool "PCI support"
-       select HAVE_DMA_ATTRS
        select PCI_MSI
        select IOMMU_SUPPORT
        help
index b3fd54d93dd20f85147c9e1e884e08e7358ec914..e64bfcb9702f668ada47197049718b1531ddc1a1 100644 (file)
@@ -23,8 +23,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 {
 }
 
-#include <asm-generic/dma-mapping-common.h>
-
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
        if (!dev->dma_mask)
index 6c391a5d3e5cf63f90475e5c55a2a5f5c687716d..e13da05505dcebc9501e97a34ec26057b1ffab90 100644 (file)
@@ -11,7 +11,6 @@ config SUPERH
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_ARCH_TRACEHOOK
        select HAVE_DMA_API_DEBUG
-       select HAVE_DMA_ATTRS
        select HAVE_PERF_EVENTS
        select HAVE_DEBUG_BUGVERBOSE
        select ARCH_HAVE_CUSTOM_GPIO_H
index a3745a3fe0290896a2a14450e6e47e8caf30a793..e11cf0c8206b7aa44fbfabc291405f9b941c5d2d 100644 (file)
@@ -11,8 +11,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 
 #define DMA_ERROR_CODE 0
 
-#include <asm-generic/dma-mapping-common.h>
-
 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                    enum dma_data_direction dir);
 
index 3203e42190dd79f513a2670677d373b72672c8f7..57ffaf285c2f511e1ee1549e8e002fa2f1e5d22c 100644 (file)
@@ -26,7 +26,6 @@ config SPARC
        select RTC_CLASS
        select RTC_DRV_M48T59
        select RTC_SYSTOHC
-       select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
        select HAVE_ARCH_JUMP_LABEL if SPARC64
        select GENERIC_IRQ_SHOW
index a21da597b0b59d49cce2f5fcca68a4274bccbfc6..1180ae25415489d1642ffc94a5db70011f250dd9 100644 (file)
@@ -37,21 +37,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
        return dma_ops;
 }
 
-#define HAVE_ARCH_DMA_SET_MASK 1
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_PCI
-       if (dev->bus == &pci_bus_type) {
-               if (!dev->dma_mask || !dma_supported(dev, mask))
-                       return -EINVAL;
-               *dev->dma_mask = mask;
-               return 0;
-       }
-#endif
-       return -EINVAL;
-}
-
-#include <asm-generic/dma-mapping-common.h>
-
 #endif
index 6bfbe8b71e7ee10f650078ef39f1eb9bc89b37f0..de4a4fff93237a1a9d14c6520f905a67c8ef7aad 100644 (file)
@@ -5,7 +5,6 @@ config TILE
        def_bool y
        select HAVE_PERF_EVENTS
        select USE_PMC if PERF_EVENTS
-       select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
        select HAVE_KVM if !TILEGX
        select GENERIC_FIND_FIRST_BIT
index 96ac6cce4a32c03ead94166ac1190b91ac5b032d..01ceb4a895b09b21167fd269ee8874a5d87a00ce 100644 (file)
@@ -73,37 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 }
 
 #define HAVE_ARCH_DMA_SET_MASK 1
-
-#include <asm-generic/dma-mapping-common.h>
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
-       struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
-       /*
-        * For PCI devices with 64-bit DMA addressing capability, promote
-        * the dma_ops to hybrid, with the consistent memory DMA space limited
-        * to 32-bit. For 32-bit capable devices, limit the streaming DMA
-        * address range to max_direct_dma_addr.
-        */
-       if (dma_ops == gx_pci_dma_map_ops ||
-           dma_ops == gx_hybrid_pci_dma_map_ops ||
-           dma_ops == gx_legacy_pci_dma_map_ops) {
-               if (mask == DMA_BIT_MASK(64) &&
-                   dma_ops == gx_legacy_pci_dma_map_ops)
-                       set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
-               else if (mask > dev->archdata.max_direct_dma_addr)
-                       mask = dev->archdata.max_direct_dma_addr;
-       }
-
-       if (!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-
-       *dev->dma_mask = mask;
-
-       return 0;
-}
+int dma_set_mask(struct device *dev, u64 mask);
 
 /*
  * dma_alloc_noncoherent() is #defined to return coherent memory,
index 09b58703ac264a7218e2f4586abd7e1f4d59f82a..b6bc0547a4f6989b9e275b27c4287d7146dd3670 100644 (file)
@@ -583,6 +583,35 @@ struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
 EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
 EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
 
+int dma_set_mask(struct device *dev, u64 mask)
+{
+       struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+       /*
+        * For PCI devices with 64-bit DMA addressing capability, promote
+        * the dma_ops to hybrid, with the consistent memory DMA space limited
+        * to 32-bit. For 32-bit capable devices, limit the streaming DMA
+        * address range to max_direct_dma_addr.
+        */
+       if (dma_ops == gx_pci_dma_map_ops ||
+           dma_ops == gx_hybrid_pci_dma_map_ops ||
+           dma_ops == gx_legacy_pci_dma_map_ops) {
+               if (mask == DMA_BIT_MASK(64) &&
+                   dma_ops == gx_legacy_pci_dma_map_ops)
+                       set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
+               else if (mask > dev->archdata.max_direct_dma_addr)
+                       mask = dev->archdata.max_direct_dma_addr;
+       }
+
+       if (!dev->dma_mask || !dma_supported(dev, mask))
+               return -EIO;
+
+       *dev->dma_mask = mask;
+
+       return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
 int dma_set_coherent_mask(struct device *dev, u64 mask)
 {
index 877342640b6e6da59db68ee9705ca8e00a62e7e4..e5602ee9c6101119990f4a84b367924b6afdf1d9 100644 (file)
@@ -5,7 +5,6 @@ config UNICORE32
        select ARCH_MIGHT_HAVE_PC_SERIO
        select HAVE_MEMBLOCK
        select HAVE_GENERIC_DMA_COHERENT
-       select HAVE_DMA_ATTRS
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_BZIP2
        select GENERIC_ATOMIC64
index 8140e053ccd351332f33d3df8decccfdeb86074b..4749854afd03544481207d54569f47c5368edc60 100644 (file)
@@ -28,8 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
        return &swiotlb_dma_map_ops;
 }
 
-#include <asm-generic/dma-mapping-common.h>
-
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
        if (dev && dev->dma_mask)
index 9bd3cc03d51dbbb6f64223fb0739f2e3eaeb300a..330e738ccfc13633954a02677e46bb1b12605fa9 100644 (file)
@@ -31,6 +31,7 @@ config X86
        select ARCH_HAS_PMEM_API                if X86_64
        select ARCH_HAS_MMIO_FLUSH
        select ARCH_HAS_SG_CHAIN
+       select ARCH_HAS_UBSAN_SANITIZE_ALL
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_MIGHT_HAVE_ACPI_PDC         if ACPI
        select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -99,7 +100,6 @@ config X86
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_DMA_API_DEBUG
-       select HAVE_DMA_ATTRS
        select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
        select HAVE_DYNAMIC_FTRACE_WITH_REGS
index 2ee62dba0373b059664de8d554caa6143ba8d989..bbe1a62efc021aadb29f27a1c7a68257b8478cef 100644 (file)
@@ -60,6 +60,7 @@ clean-files += cpustr.h
 KBUILD_CFLAGS  := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
+UBSAN_SANITIZE := n
 
 $(obj)/bzImage: asflags-y  := $(SVGA_MODE)
 
index 0a291cdfaf77100117baf53b2e3af75a43a8af4c..f9ce75d80101cc7a620bed1169a1128056b9caa5 100644 (file)
@@ -33,6 +33,7 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
 
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
+UBSAN_SANITIZE :=n
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
 LDFLAGS_vmlinux := -T
index 265c0ed6811800e3b03550cfded0c0cc743794f0..c854541d93ff66d71fa8b38d0d4f7f1ed9e16c15 100644 (file)
@@ -4,6 +4,7 @@
 
 KBUILD_CFLAGS += $(DISABLE_LTO)
 KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
 
 VDSO64-$(CONFIG_X86_64)                := y
 VDSOX32-$(CONFIG_X86_X32_ABI)  := y
index 953b7263f84466f463d416814be709d22971c701..3a27b93e62619155064e87ffa7e895df8a0d5275 100644 (file)
@@ -46,8 +46,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
 #define HAVE_ARCH_DMA_SUPPORTED 1
 extern int dma_supported(struct device *hwdev, u64 mask);
 
-#include <asm-generic/dma-mapping-common.h>
-
 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                        dma_addr_t *dma_addr, gfp_t flag,
                                        struct dma_attrs *attrs);
index 819ab3f9c9c7cb1476007619b5e6998c528be30f..ba7fbba9831b88e0be4c228db2fe61b298c62d64 100644 (file)
@@ -385,6 +385,7 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
        return image->fops->cleanup(image->image_loader_data);
 }
 
+#ifdef CONFIG_KEXEC_VERIFY_SIG
 int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
                                 unsigned long kernel_len)
 {
@@ -395,6 +396,7 @@ int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
 
        return image->fops->verify_sig(kernel, kernel_len);
 }
+#endif
 
 /*
  * Apply purgatory relocations.
index 2730d775ef9a44e4709c99f29a333f07c4691ef0..3e75fcf6b8362e7c43f24340a3836386a4d5ac48 100644 (file)
@@ -70,3 +70,4 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
                   -I$(srctree)/arch/x86/boot
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
+UBSAN_SANITIZE := n
index 82044f73232335f715a1356ae61f7d66dc538f51..e9df1567d778e2f87ecdb1a64df1c2dfeb874071 100644 (file)
@@ -15,7 +15,6 @@ config XTENSA
        select GENERIC_PCI_IOMAP
        select GENERIC_SCHED_CLOCK
        select HAVE_DMA_API_DEBUG
-       select HAVE_DMA_ATTRS
        select HAVE_FUNCTION_TRACER
        select HAVE_FUTEX_CMPXCHG if !MMU
        select HAVE_IRQ_TIME_ACCOUNTING
index 66c9ba261e30b694713bc7861bba33ef5ee356b6..3fc1170a64880cdfc495fb3aca16fe8904d807b3 100644 (file)
@@ -13,8 +13,6 @@
 #include <asm/cache.h>
 #include <asm/io.h>
 
-#include <asm-generic/dma-coherent.h>
-
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
 
@@ -30,8 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
                return &xtensa_dma_map_ops;
 }
 
-#include <asm-generic/dma-mapping-common.h>
-
 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                    enum dma_data_direction direction);
 
index d030594ed22b25390aee7eb0159b7e9063ab9558..9e079d49e7f2e678044d0cb527daec7cf70a293e 100644 (file)
@@ -86,7 +86,6 @@
 #define MADV_SEQUENTIAL        2               /* expect sequential page references */
 #define MADV_WILLNEED  3               /* will need these pages */
 #define MADV_DONTNEED  4               /* don't need these pages */
-#define MADV_FREE      5               /* free pages only if memory pressure */
 
 /* common parameters: try to keep these consistent across architectures */
 #define MADV_FREE      8               /* free pages only if memory pressure */
index 91bbb1959d8d0a831b7a7f0dc6472f2e5d4937b1..691eeea2f19a027fe01f720dbf3b139784ea4ffc 100644 (file)
@@ -200,7 +200,7 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
 
 struct cpu_attr {
        struct device_attribute attr;
-       const struct cpumask *const * const map;
+       const struct cpumask *const map;
 };
 
 static ssize_t show_cpus_attr(struct device *dev,
@@ -209,7 +209,7 @@ static ssize_t show_cpus_attr(struct device *dev,
 {
        struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
 
-       return cpumap_print_to_pagebuf(true, buf, *ca->map);
+       return cpumap_print_to_pagebuf(true, buf, ca->map);
 }
 
 #define _CPU_ATTR(name, map) \
@@ -217,9 +217,9 @@ static ssize_t show_cpus_attr(struct device *dev,
 
 /* Keep in sync with cpu_subsys_attrs */
 static struct cpu_attr cpu_attrs[] = {
-       _CPU_ATTR(online, &cpu_online_mask),
-       _CPU_ATTR(possible, &cpu_possible_mask),
-       _CPU_ATTR(present, &cpu_present_mask),
+       _CPU_ATTR(online, &__cpu_online_mask),
+       _CPU_ATTR(possible, &__cpu_possible_mask),
+       _CPU_ATTR(present, &__cpu_present_mask),
 };
 
 /*
index d95c5971c2256f20f473d5744b01fc54bbf9acf2..d799662f19eb83d9830ee562a17f2bf1394a0462 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/gfp.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
-#include <asm-generic/dma-coherent.h>
 
 /*
  * Managed DMA API
@@ -167,7 +166,7 @@ void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
 }
 EXPORT_SYMBOL(dmam_free_noncoherent);
 
-#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
 
 static void dmam_coherent_decl_release(struct device *dev, void *res)
 {
@@ -247,7 +246,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
                    void *cpu_addr, dma_addr_t dma_addr, size_t size)
 {
        int ret = -ENXIO;
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP)
        unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
@@ -264,7 +263,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
                                      user_count << PAGE_SHIFT,
                                      vma->vm_page_prot);
        }
-#endif /* CONFIG_MMU */
+#endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
 
        return ret;
 }
index e41594510b978291de179182c46b0196b3a377e3..0c2f0a61b0ea021872f27705b53ae784f1be3f61 100644 (file)
@@ -56,9 +56,7 @@ static u32 find_nvram_size(void __iomem *end)
 static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
 {
        struct nvram_header __iomem *header;
-       int i;
        u32 off;
-       u32 *src, *dst;
        u32 size;
 
        if (nvram_len) {
@@ -95,10 +93,7 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
        return -ENXIO;
 
 found:
-       src = (u32 *)header;
-       dst = (u32 *)nvram_buf;
-       for (i = 0; i < sizeof(struct nvram_header); i += 4)
-               *dst++ = __raw_readl(src++);
+       __ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
        header = (struct nvram_header *)nvram_buf;
        nvram_len = header->len;
        if (nvram_len > size) {
@@ -111,8 +106,8 @@ found:
                nvram_len = NVRAM_SPACE - 1;
        }
        /* proceed reading data after header */
-       for (; i < nvram_len; i += 4)
-               *dst++ = readl(src++);
+       __ioread32_copy(nvram_buf + sizeof(*header), header + 1,
+                       DIV_ROUND_UP(nvram_len, 4));
        nvram_buf[NVRAM_SPACE - 1] = '\0';
 
        return 0;
index 9c12e18031d57b29b1a18547447b17207b3054de..aaf9c0bab42e8fc29e093da64f609d99064046fd 100644 (file)
@@ -22,6 +22,7 @@ KBUILD_CFLAGS                 := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
 
 GCOV_PROFILE                   := n
 KASAN_SANITIZE                 := n
+UBSAN_SANITIZE                 := n
 
 lib-y                          := efi-stub-helper.o
 
index 59babd5a5396a332e78984a7c19549c4e7486c68..8ae7ab68cb9781fd04f176136564aa841e16fd20 100644 (file)
@@ -82,13 +82,13 @@ config DRM_TTM
 
 config DRM_GEM_CMA_HELPER
        bool
-       depends on DRM && HAVE_DMA_ATTRS
+       depends on DRM
        help
          Choose this if you need the GEM CMA helper functions
 
 config DRM_KMS_CMA_HELPER
        bool
-       depends on DRM && HAVE_DMA_ATTRS
+       depends on DRM
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_FB_HELPER
        select FB_SYS_FILLRECT
index 35ca4f0078392df2ff06c557ba59c86724d6ee8f..a1844b50546c1529cb1ec653553a74c2a2afc059 100644 (file)
@@ -5,7 +5,7 @@ config DRM_IMX
        select VIDEOMODE_HELPERS
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
-       depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS
+       depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
        depends on IMX_IPUV3_CORE
        help
          enable i.MX graphics support
index d4e0a39568f699c63112ab36d451c2371d0b987f..96dcd4a78951c51b3471a7c4dc22bded4cd8b39d 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_RCAR_DU
        tristate "DRM Support for R-Car Display Unit"
-       depends on DRM && ARM && HAVE_DMA_ATTRS && OF
+       depends on DRM && ARM && OF
        depends on ARCH_SHMOBILE || COMPILE_TEST
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
index b9202aa6f8ab28331790313ad967af90fa729df6..8d17d00ddb4b75aea7456fd1ef3dcb4f1fbe5b0e 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_SHMOBILE
        tristate "DRM Support for SH Mobile"
-       depends on DRM && ARM && HAVE_DMA_ATTRS
+       depends on DRM && ARM
        depends on ARCH_SHMOBILE || COMPILE_TEST
        depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM
        select BACKLIGHT_CLASS_DEVICE
index 10c1b1926e6f9665b083df291632924d91c12ef5..5ad43a1bb260a828649dacb3f095c3619b2c9e0d 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_STI
        tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
-       depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS
+       depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
        select RESET_CONTROLLER
        select DRM_KMS_HELPER
        select DRM_GEM_CMA_HELPER
index 78beafb0742c2ca5fb382eb3fb52c978f2e420d5..f60a1ec84fa4b1025891830f0b3444b0a16b9dbf 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_TILCDC
        tristate "DRM Support for TI LCDC Display Controller"
-       depends on DRM && OF && ARM && HAVE_DMA_ATTRS
+       depends on DRM && OF && ARM
        select DRM_KMS_HELPER
        select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
index 2d7d115ddf3fae6558ab42d80255917c5eabad1a..584810474e5b84b211a5cbda681ea04c41bc0055 100644 (file)
@@ -1,7 +1,7 @@
 config DRM_VC4
        tristate "Broadcom VC4 Graphics"
        depends on ARCH_BCM2835 || COMPILE_TEST
-       depends on DRM && HAVE_DMA_ATTRS
+       depends on DRM
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
index 311f9fe5aa34bd809b2705e50c84682f460e35f1..8d24fb159cc92504c16938529542c117bd38000d 100644 (file)
@@ -167,9 +167,7 @@ static int __init iio_sw_trigger_init(void)
                configfs_register_default_group(&iio_configfs_subsys.su_group,
                                                "triggers",
                                                &iio_triggers_group_type);
-       if (IS_ERR(iio_triggers_group))
-               return PTR_ERR(iio_triggers_group);
-       return 0;
+       return PTR_ERR_OR_ZERO(iio_triggers_group);
 }
 module_init(iio_sw_trigger_init);
 
index 0c53805dff0e328e03e624732a78856166164ae5..526359447ff90fda699b032e0dbed4d6a06e577e 100644 (file)
@@ -216,7 +216,6 @@ config VIDEO_STI_BDISP
        tristate "STMicroelectronics BDISP 2D blitter driver"
        depends on VIDEO_DEV && VIDEO_V4L2
        depends on ARCH_STI || COMPILE_TEST
-       depends on HAVE_DMA_ATTRS
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
        help
index 24f2f8473deeccf796b8e34771c1f37085b6b4ad..84abf9d3c24e62410b242520e7d5f20553e784dd 100644 (file)
@@ -1909,7 +1909,7 @@ static void msb_io_work(struct work_struct *work)
                lba = blk_rq_pos(msb->req);
 
                sector_div(lba, msb->page_size / 512);
-               page = do_div(lba, msb->pages_in_block);
+               page = sector_div(lba, msb->pages_in_block);
 
                if (rq_data_dir(msb->req) == READ)
                        error = msb_do_read_request(msb, lba, page, sg,
index 22892c701c63b8e650fc7a6323934ca13385382b..054fc10cb3b6ab5a6b5f9d51b2ded3bb656cbf4e 100644 (file)
@@ -95,6 +95,7 @@ config DUMMY_IRQ
 config IBM_ASM
        tristate "Device driver for IBM RSA service processor"
        depends on X86 && PCI && INPUT
+       depends on SERIAL_8250 || SERIAL_8250=n
        ---help---
          This option enables device driver support for in-band access to the
          IBM RSA (Condor) service processor in eServer xSeries systems.
index 8e11fb2831cd20cb9e4b95415aee05041bb7813b..e24b05996a1b124bb2794b0dcb87ccb7d7ae4456 100644 (file)
@@ -786,18 +786,27 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
        return CCIO_IOVA(iovp, offset);
 }
 
+
+static dma_addr_t
+ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
+               size_t size, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       return ccio_map_single(dev, page_address(page) + offset, size,
+                       direction);
+}
+
+
 /**
- * ccio_unmap_single - Unmap an address range from the IOMMU.
+ * ccio_unmap_page - Unmap an address range from the IOMMU.
  * @dev: The PCI device.
  * @addr: The start address of the DMA region.
  * @size: The length of the DMA region.
  * @direction: The direction of the DMA transaction (to/from device).
- *
- * This function implements the pci_unmap_single function.
  */
 static void 
-ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, 
-                 enum dma_data_direction direction)
+ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+               enum dma_data_direction direction, struct dma_attrs *attrs)
 {
        struct ioc *ioc;
        unsigned long flags; 
@@ -826,7 +835,7 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
 }
 
 /**
- * ccio_alloc_consistent - Allocate a consistent DMA mapping.
+ * ccio_alloc - Allocate a consistent DMA mapping.
  * @dev: The PCI device.
  * @size: The length of the DMA region.
  * @dma_handle: The DMA address handed back to the device (not the cpu).
@@ -834,7 +843,8 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
  * This function implements the pci_alloc_consistent function.
  */
 static void * 
-ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
+ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
+               struct dma_attrs *attrs)
 {
       void *ret;
 #if 0
@@ -858,7 +868,7 @@ ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, g
 }
 
 /**
- * ccio_free_consistent - Free a consistent DMA mapping.
+ * ccio_free - Free a consistent DMA mapping.
  * @dev: The PCI device.
  * @size: The length of the DMA region.
  * @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
@@ -867,10 +877,10 @@ ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, g
  * This function implements the pci_free_consistent function.
  */
 static void 
-ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr, 
-                    dma_addr_t dma_handle)
+ccio_free(struct device *dev, size_t size, void *cpu_addr,
+               dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
-       ccio_unmap_single(dev, dma_handle, size, 0);
+       ccio_unmap_page(dev, dma_handle, size, 0, NULL);
        free_pages((unsigned long)cpu_addr, get_order(size));
 }
 
@@ -897,7 +907,7 @@ ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr,
  */
 static int
 ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 
-           enum dma_data_direction direction)
+           enum dma_data_direction direction, struct dma_attrs *attrs)
 {
        struct ioc *ioc;
        int coalesced, filled = 0;
@@ -974,7 +984,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
  */
 static void 
 ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 
-             enum dma_data_direction direction)
+             enum dma_data_direction direction, struct dma_attrs *attrs)
 {
        struct ioc *ioc;
 
@@ -993,27 +1003,22 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
 #ifdef CCIO_COLLECT_STATS
                ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
 #endif
-               ccio_unmap_single(dev, sg_dma_address(sglist),
-                                 sg_dma_len(sglist), direction);
+               ccio_unmap_page(dev, sg_dma_address(sglist),
+                                 sg_dma_len(sglist), direction, NULL);
                ++sglist;
        }
 
        DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
 }
 
-static struct hppa_dma_ops ccio_ops = {
+static struct dma_map_ops ccio_ops = {
        .dma_supported =        ccio_dma_supported,
-       .alloc_consistent =     ccio_alloc_consistent,
-       .alloc_noncoherent =    ccio_alloc_consistent,
-       .free_consistent =      ccio_free_consistent,
-       .map_single =           ccio_map_single,
-       .unmap_single =         ccio_unmap_single,
+       .alloc =                ccio_alloc,
+       .free =                 ccio_free,
+       .map_page =             ccio_map_page,
+       .unmap_page =           ccio_unmap_page,
        .map_sg =               ccio_map_sg,
        .unmap_sg =             ccio_unmap_sg,
-       .dma_sync_single_for_cpu =      NULL,   /* NOP for U2/Uturn */
-       .dma_sync_single_for_device =   NULL,   /* NOP for U2/Uturn */
-       .dma_sync_sg_for_cpu =          NULL,   /* ditto */
-       .dma_sync_sg_for_device =               NULL,   /* ditto */
 };
 
 #ifdef CONFIG_PROC_FS
@@ -1062,7 +1067,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
                           ioc->msingle_calls, ioc->msingle_pages,
                           (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
 
-               /* KLUGE - unmap_sg calls unmap_single for each mapped page */
+               /* KLUGE - unmap_sg calls unmap_page for each mapped page */
                min = ioc->usingle_calls - ioc->usg_calls;
                max = ioc->usingle_pages - ioc->usg_pages;
                seq_printf(m, "pci_unmap_single: %8ld calls  %8ld pages (avg %d/1000)\n",
index 225049b492e535f7bf30ac8ef00f110d4647c0c2..42ec4600b7e490f031b03906d83c71083abc74ce 100644 (file)
@@ -780,8 +780,18 @@ sba_map_single(struct device *dev, void *addr, size_t size,
 }
 
 
+static dma_addr_t
+sba_map_page(struct device *dev, struct page *page, unsigned long offset,
+               size_t size, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
+{
+       return sba_map_single(dev, page_address(page) + offset, size,
+                       direction);
+}
+
+
 /**
- * sba_unmap_single - unmap one IOVA and free resources
+ * sba_unmap_page - unmap one IOVA and free resources
  * @dev: instance of PCI owned by the driver that's asking.
  * @iova:  IOVA of driver buffer previously mapped.
  * @size:  number of bytes mapped in driver buffer.
@@ -790,8 +800,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
  * See Documentation/DMA-API-HOWTO.txt
  */
 static void
-sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
-                enum dma_data_direction direction)
+sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+               enum dma_data_direction direction, struct dma_attrs *attrs)
 {
        struct ioc *ioc;
 #if DELAYED_RESOURCE_CNT > 0
@@ -858,15 +868,15 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
 
 
 /**
- * sba_alloc_consistent - allocate/map shared mem for DMA
+ * sba_alloc - allocate/map shared mem for DMA
  * @hwdev: instance of PCI owned by the driver that's asking.
  * @size:  number of bytes mapped in driver buffer.
  * @dma_handle:  IOVA of new buffer.
  *
  * See Documentation/DMA-API-HOWTO.txt
  */
-static void *sba_alloc_consistent(struct device *hwdev, size_t size,
-                                       dma_addr_t *dma_handle, gfp_t gfp)
+static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, struct dma_attrs *attrs)
 {
        void *ret;
 
@@ -888,7 +898,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
 
 
 /**
- * sba_free_consistent - free/unmap shared mem for DMA
+ * sba_free - free/unmap shared mem for DMA
  * @hwdev: instance of PCI owned by the driver that's asking.
  * @size:  number of bytes mapped in driver buffer.
  * @vaddr:  virtual address IOVA of "consistent" buffer.
@@ -897,10 +907,10 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
  * See Documentation/DMA-API-HOWTO.txt
  */
 static void
-sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
-                   dma_addr_t dma_handle)
+sba_free(struct device *hwdev, size_t size, void *vaddr,
+                   dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
-       sba_unmap_single(hwdev, dma_handle, size, 0);
+       sba_unmap_page(hwdev, dma_handle, size, 0, NULL);
        free_pages((unsigned long) vaddr, get_order(size));
 }
 
@@ -933,7 +943,7 @@ int dump_run_sg = 0;
  */
 static int
 sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
-          enum dma_data_direction direction)
+          enum dma_data_direction direction, struct dma_attrs *attrs)
 {
        struct ioc *ioc;
        int coalesced, filled = 0;
@@ -1016,7 +1026,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
  */
 static void 
 sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
-            enum dma_data_direction direction)
+            enum dma_data_direction direction, struct dma_attrs *attrs)
 {
        struct ioc *ioc;
 #ifdef ASSERT_PDIR_SANITY
@@ -1040,7 +1050,8 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
 
        while (sg_dma_len(sglist) && nents--) {
 
-               sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
+               sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
+                               direction, NULL);
 #ifdef SBA_COLLECT_STATS
                ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
                ioc->usingle_calls--;   /* kluge since call is unmap_sg() */
@@ -1058,19 +1069,14 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
 
 }
 
-static struct hppa_dma_ops sba_ops = {
+static struct dma_map_ops sba_ops = {
        .dma_supported =        sba_dma_supported,
-       .alloc_consistent =     sba_alloc_consistent,
-       .alloc_noncoherent =    sba_alloc_consistent,
-       .free_consistent =      sba_free_consistent,
-       .map_single =           sba_map_single,
-       .unmap_single =         sba_unmap_single,
+       .alloc =                sba_alloc,
+       .free =                 sba_free,
+       .map_page =             sba_map_page,
+       .unmap_page =           sba_unmap_page,
        .map_sg =               sba_map_sg,
        .unmap_sg =             sba_unmap_sg,
-       .dma_sync_single_for_cpu =      NULL,
-       .dma_sync_single_for_device =   NULL,
-       .dma_sync_sg_for_cpu =          NULL,
-       .dma_sync_sg_for_device =       NULL,
 };
 
 
index cdb005c0094df0b58362db0e6a4b462afd2f0621..eda41563d06d8a1f0e1fa65267fb9441bdf75b30 100644 (file)
@@ -125,8 +125,7 @@ rio_read_config(struct file *filp, struct kobject *kobj,
                struct bin_attribute *bin_attr,
                char *buf, loff_t off, size_t count)
 {
-       struct rio_dev *dev =
-           to_rio_dev(container_of(kobj, struct device, kobj));
+       struct rio_dev *dev = to_rio_dev(kobj_to_dev(kobj));
        unsigned int size = 0x100;
        loff_t init_off = off;
        u8 *data = (u8 *) buf;
@@ -197,8 +196,7 @@ rio_write_config(struct file *filp, struct kobject *kobj,
                 struct bin_attribute *bin_attr,
                 char *buf, loff_t off, size_t count)
 {
-       struct rio_dev *dev =
-           to_rio_dev(container_of(kobj, struct device, kobj));
+       struct rio_dev *dev = to_rio_dev(kobj_to_dev(kobj));
        unsigned int size = count;
        loff_t init_off = off;
        u8 *data = (u8 *) buf;
index 86b598cff91a95a38003a65682135989dfa8f3e3..498fd0581a451999b89cbb43805b37f003fdb379 100644 (file)
@@ -434,20 +434,15 @@ static void smd_copy_to_fifo(void __iomem *dst,
 /*
  * Copy count bytes of data using 32bit accesses, if that is required.
  */
-static void smd_copy_from_fifo(void *_dst,
-                              const void __iomem *_src,
+static void smd_copy_from_fifo(void *dst,
+                              const void __iomem *src,
                               size_t count,
                               bool word_aligned)
 {
-       u32 *dst = (u32 *)_dst;
-       u32 *src = (u32 *)_src;
-
        if (word_aligned) {
-               count /= sizeof(u32);
-               while (count--)
-                       *dst++ = __raw_readl(src++);
+               __ioread32_copy(dst, src, count / sizeof(u32));
        } else {
-               memcpy_fromio(_dst, _src, count);
+               memcpy_fromio(dst, src, count);
        }
 }
 
index ea4aba56f29d69dc26192a2b76e14e1d03747e2d..fadf408bdd4622205b29c8a1066d1e03909abd40 100644 (file)
@@ -44,24 +44,24 @@ struct adfs_dir_ops;
  */
 struct adfs_sb_info {
        union { struct {
-               struct adfs_discmap *s_map;     /* bh list containing map        */
-               const struct adfs_dir_ops *s_dir; /* directory operations        */
+               struct adfs_discmap *s_map;     /* bh list containing map */
+               const struct adfs_dir_ops *s_dir; /* directory operations */
                };
-               struct rcu_head rcu;            /* used only at shutdown time    */
+               struct rcu_head rcu;    /* used only at shutdown time    */
        };
-       kuid_t          s_uid;          /* owner uid                             */
-       kgid_t          s_gid;          /* owner gid                             */
-       umode_t         s_owner_mask;   /* ADFS owner perm -> unix perm          */
-       umode_t         s_other_mask;   /* ADFS other perm -> unix perm          */
+       kuid_t          s_uid;          /* owner uid */
+       kgid_t          s_gid;          /* owner gid */
+       umode_t         s_owner_mask;   /* ADFS owner perm -> unix perm */
+       umode_t         s_other_mask;   /* ADFS other perm -> unix perm */
        int             s_ftsuffix;     /* ,xyz hex filetype suffix option */
 
-       __u32           s_ids_per_zone; /* max. no ids in one zone               */
-       __u32           s_idlen;        /* length of ID in map                   */
-       __u32           s_map_size;     /* sector size of a map                  */
-       unsigned long   s_size;         /* total size (in blocks) of this fs     */
-       signed int      s_map2blk;      /* shift left by this for map->sector    */
-       unsigned int    s_log2sharesize;/* log2 share size                       */
-       __le32          s_version;      /* disc format version                   */
+       __u32           s_ids_per_zone; /* max. no ids in one zone */
+       __u32           s_idlen;        /* length of ID in map */
+       __u32           s_map_size;     /* sector size of a map */
+       unsigned long   s_size;         /* total size (in blocks) of this fs */
+       signed int      s_map2blk;      /* shift left by this for map->sector*/
+       unsigned int    s_log2sharesize;/* log2 share size */
+       __le32          s_version;      /* disc format version */
        unsigned int    s_namelen;      /* maximum number of characters in name  */
 };
 
index b3c153ca435d24fdbdfcb909228b6b4787bb63f2..9ea87e9fdccf6e8c26350b7bce598562295eacb9 100644 (file)
@@ -118,6 +118,26 @@ int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
        ret = cn_vprintf(cn, fmt, arg);
        va_end(arg);
 
+       if (ret == 0) {
+               /*
+                * Ensure that this coredump name component can't cause the
+                * resulting corefile path to consist of a ".." or ".".
+                */
+               if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
+                               (cn->used - cur == 2 && cn->corename[cur] == '.'
+                               && cn->corename[cur+1] == '.'))
+                       cn->corename[cur] = '!';
+
+               /*
+                * Empty names are fishy and could be used to create a "//" in a
+                * corefile name, causing the coredump to happen one directory
+                * level too high. Enforce that all components of the core
+                * pattern are at least one character long.
+                */
+               if (cn->used == cur)
+                       ret = cn_printf(cn, "!");
+       }
+
        for (; cur < cn->used; ++cur) {
                if (cn->corename[cur] == '/')
                        cn->corename[cur] = '!';
index 1e009cad8d5cac61832ebb3916033780f2fec1a6..ae1dbcf47e979d48b67ee83e1ec413e4d119a48b 100644 (file)
@@ -92,7 +92,7 @@
  */
 
 /* Epoll private bits inside the event mask */
-#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET)
+#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
 
 /* Maximum number of nesting allowed inside epoll sets */
 #define EP_MAX_NESTS 4
@@ -1002,6 +1002,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
        unsigned long flags;
        struct epitem *epi = ep_item_from_wait(wait);
        struct eventpoll *ep = epi->ep;
+       int ewake = 0;
 
        if ((unsigned long)key & POLLFREE) {
                ep_pwq_from_wait(wait)->whead = NULL;
@@ -1066,8 +1067,10 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
         * Wake up ( if active ) both the eventpoll wait list and the ->poll()
         * wait list.
         */
-       if (waitqueue_active(&ep->wq))
+       if (waitqueue_active(&ep->wq)) {
+               ewake = 1;
                wake_up_locked(&ep->wq);
+       }
        if (waitqueue_active(&ep->poll_wait))
                pwake++;
 
@@ -1078,6 +1081,9 @@ out_unlock:
        if (pwake)
                ep_poll_safewake(&ep->poll_wait);
 
+       if (epi->event.events & EPOLLEXCLUSIVE)
+               return ewake;
+
        return 1;
 }
 
@@ -1095,7 +1101,10 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
                init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
                pwq->whead = whead;
                pwq->base = epi;
-               add_wait_queue(whead, &pwq->wait);
+               if (epi->event.events & EPOLLEXCLUSIVE)
+                       add_wait_queue_exclusive(whead, &pwq->wait);
+               else
+                       add_wait_queue(whead, &pwq->wait);
                list_add_tail(&pwq->llink, &epi->pwqlist);
                epi->nwait++;
        } else {
@@ -1861,6 +1870,15 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
        if (f.file == tf.file || !is_file_epoll(f.file))
                goto error_tgt_fput;
 
+       /*
+        * epoll adds to the wakeup queue at EPOLL_CTL_ADD time only,
+        * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
+        * Also, we do not currently supported nested exclusive wakeups.
+        */
+       if ((epds.events & EPOLLEXCLUSIVE) && (op == EPOLL_CTL_MOD ||
+               (op == EPOLL_CTL_ADD && is_file_epoll(tf.file))))
+               goto error_tgt_fput;
+
        /*
         * At this point it is safe to assume that the "private_data" contains
         * our own data structure.
index 93fc62232ec21e795efc938edad43a22d55cac52..5d384921524d9722c96cd14c9fdcf0a3aab05851 100644 (file)
@@ -301,15 +301,59 @@ static int fat_bmap_cluster(struct inode *inode, int cluster)
        return dclus;
 }
 
-int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
-            unsigned long *mapped_blocks, int create)
+int fat_get_mapped_cluster(struct inode *inode, sector_t sector,
+                          sector_t last_block,
+                          unsigned long *mapped_blocks, sector_t *bmap)
 {
        struct super_block *sb = inode->i_sb;
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
+       int cluster, offset;
+
+       cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
+       offset  = sector & (sbi->sec_per_clus - 1);
+       cluster = fat_bmap_cluster(inode, cluster);
+       if (cluster < 0)
+               return cluster;
+       else if (cluster) {
+               *bmap = fat_clus_to_blknr(sbi, cluster) + offset;
+               *mapped_blocks = sbi->sec_per_clus - offset;
+               if (*mapped_blocks > last_block - sector)
+                       *mapped_blocks = last_block - sector;
+       }
+
+       return 0;
+}
+
+static int is_exceed_eof(struct inode *inode, sector_t sector,
+                        sector_t *last_block, int create)
+{
+       struct super_block *sb = inode->i_sb;
        const unsigned long blocksize = sb->s_blocksize;
        const unsigned char blocksize_bits = sb->s_blocksize_bits;
+
+       *last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
+       if (sector >= *last_block) {
+               if (!create)
+                       return 1;
+
+               /*
+                * ->mmu_private can access on only allocation path.
+                * (caller must hold ->i_mutex)
+                */
+               *last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
+                       >> blocksize_bits;
+               if (sector >= *last_block)
+                       return 1;
+       }
+
+       return 0;
+}
+
+int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
+            unsigned long *mapped_blocks, int create, bool from_bmap)
+{
+       struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
        sector_t last_block;
-       int cluster, offset;
 
        *phys = 0;
        *mapped_blocks = 0;
@@ -321,31 +365,16 @@ int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
                return 0;
        }
 
-       last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
-       if (sector >= last_block) {
-               if (!create)
+       if (!from_bmap) {
+               if (is_exceed_eof(inode, sector, &last_block, create))
                        return 0;
-
-               /*
-                * ->mmu_private can access on only allocation path.
-                * (caller must hold ->i_mutex)
-                */
-               last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
-                       >> blocksize_bits;
+       } else {
+               last_block = inode->i_blocks >>
+                               (inode->i_sb->s_blocksize_bits - 9);
                if (sector >= last_block)
                        return 0;
        }
 
-       cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
-       offset  = sector & (sbi->sec_per_clus - 1);
-       cluster = fat_bmap_cluster(inode, cluster);
-       if (cluster < 0)
-               return cluster;
-       else if (cluster) {
-               *phys = fat_clus_to_blknr(sbi, cluster) + offset;
-               *mapped_blocks = sbi->sec_per_clus - offset;
-               if (*mapped_blocks > last_block - sector)
-                       *mapped_blocks = last_block - sector;
-       }
-       return 0;
+       return fat_get_mapped_cluster(inode, sector, last_block, mapped_blocks,
+                                     phys);
 }
index 8b2127ffb226cca2ece880d41fd756624c5d3118..7def96caec5f7fc4abd82e51746bce4dce1f30ee 100644 (file)
@@ -91,7 +91,7 @@ next:
 
        *bh = NULL;
        iblock = *pos >> sb->s_blocksize_bits;
-       err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0);
+       err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0, false);
        if (err || !phys)
                return -1;      /* beyond EOF or error */
 
index be5e15323bab45b99e4693b43cb543c9aa85c586..e6b764a17a9c844bacb724c3f9b601df6c8bcdd0 100644 (file)
@@ -87,7 +87,7 @@ struct msdos_sb_info {
        unsigned int vol_id;            /*volume ID*/
 
        int fatent_shift;
-       struct fatent_operations *fatent_ops;
+       const struct fatent_operations *fatent_ops;
        struct inode *fat_inode;
        struct inode *fsinfo_inode;
 
@@ -285,8 +285,11 @@ static inline void fatwchar_to16(__u8 *dst, const wchar_t *src, size_t len)
 extern void fat_cache_inval_inode(struct inode *inode);
 extern int fat_get_cluster(struct inode *inode, int cluster,
                           int *fclus, int *dclus);
+extern int fat_get_mapped_cluster(struct inode *inode, sector_t sector,
+                                 sector_t last_block,
+                                 unsigned long *mapped_blocks, sector_t *bmap);
 extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
-                   unsigned long *mapped_blocks, int create);
+                   unsigned long *mapped_blocks, int create, bool from_bmap);
 
 /* fat/dir.c */
 extern const struct file_operations fat_dir_operations;
@@ -384,6 +387,7 @@ static inline unsigned long fat_dir_hash(int logstart)
 {
        return hash_32(logstart, FAT_HASH_BITS);
 }
+extern int fat_add_cluster(struct inode *inode);
 
 /* fat/misc.c */
 extern __printf(3, 4) __cold
index 8226557130a2fc47d3637de98fb3c19c6a2046f0..1d9a8c4e9de05827496ee5ce49b66b83a9422bfe 100644 (file)
@@ -99,7 +99,7 @@ err:
 static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
                         int offset, sector_t blocknr)
 {
-       struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
+       const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
 
        WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
        fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
@@ -246,7 +246,7 @@ static int fat32_ent_next(struct fat_entry *fatent)
        return 0;
 }
 
-static struct fatent_operations fat12_ops = {
+static const struct fatent_operations fat12_ops = {
        .ent_blocknr    = fat12_ent_blocknr,
        .ent_set_ptr    = fat12_ent_set_ptr,
        .ent_bread      = fat12_ent_bread,
@@ -255,7 +255,7 @@ static struct fatent_operations fat12_ops = {
        .ent_next       = fat12_ent_next,
 };
 
-static struct fatent_operations fat16_ops = {
+static const struct fatent_operations fat16_ops = {
        .ent_blocknr    = fat_ent_blocknr,
        .ent_set_ptr    = fat16_ent_set_ptr,
        .ent_bread      = fat_ent_bread,
@@ -264,7 +264,7 @@ static struct fatent_operations fat16_ops = {
        .ent_next       = fat16_ent_next,
 };
 
-static struct fatent_operations fat32_ops = {
+static const struct fatent_operations fat32_ops = {
        .ent_blocknr    = fat_ent_blocknr,
        .ent_set_ptr    = fat32_ent_set_ptr,
        .ent_bread      = fat_ent_bread,
@@ -320,7 +320,7 @@ static inline int fat_ent_update_ptr(struct super_block *sb,
                                     int offset, sector_t blocknr)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
-       struct fatent_operations *ops = sbi->fatent_ops;
+       const struct fatent_operations *ops = sbi->fatent_ops;
        struct buffer_head **bhs = fatent->bhs;
 
        /* Is this fatent's blocks including this entry? */
@@ -349,7 +349,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
 {
        struct super_block *sb = inode->i_sb;
        struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
-       struct fatent_operations *ops = sbi->fatent_ops;
+       const struct fatent_operations *ops = sbi->fatent_ops;
        int err, offset;
        sector_t blocknr;
 
@@ -407,7 +407,7 @@ int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
                  int new, int wait)
 {
        struct super_block *sb = inode->i_sb;
-       struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
+       const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
        int err;
 
        ops->ent_put(fatent, new);
@@ -432,7 +432,7 @@ static inline int fat_ent_next(struct msdos_sb_info *sbi,
 static inline int fat_ent_read_block(struct super_block *sb,
                                     struct fat_entry *fatent)
 {
-       struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
+       const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
        sector_t blocknr;
        int offset;
 
@@ -463,7 +463,7 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
 {
        struct super_block *sb = inode->i_sb;
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
-       struct fatent_operations *ops = sbi->fatent_ops;
+       const struct fatent_operations *ops = sbi->fatent_ops;
        struct fat_entry fatent, prev_ent;
        struct buffer_head *bhs[MAX_BUF_PER_PAGE];
        int i, count, err, nr_bhs, idx_clus;
@@ -551,7 +551,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
 {
        struct super_block *sb = inode->i_sb;
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
-       struct fatent_operations *ops = sbi->fatent_ops;
+       const struct fatent_operations *ops = sbi->fatent_ops;
        struct fat_entry fatent;
        struct buffer_head *bhs[MAX_BUF_PER_PAGE];
        int i, err, nr_bhs;
@@ -636,7 +636,7 @@ EXPORT_SYMBOL_GPL(fat_free_clusters);
 static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent,
                          unsigned long reada_blocks)
 {
-       struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
+       const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
        sector_t blocknr;
        int i, offset;
 
@@ -649,7 +649,7 @@ static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent,
 int fat_count_free_clusters(struct super_block *sb)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
-       struct fatent_operations *ops = sbi->fatent_ops;
+       const struct fatent_operations *ops = sbi->fatent_ops;
        struct fat_entry fatent;
        unsigned long reada_blocks, reada_mask, cur_block;
        int err = 0, free;
index a08f1039909a76e6427cf9e64ddf16c30a1716c9..43d3475da83a79c8857e0861bf298df38a9d7ba7 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/fsnotify.h>
 #include <linux/security.h>
+#include <linux/falloc.h>
 #include "fat.h"
 
+static long fat_fallocate(struct file *file, int mode,
+                         loff_t offset, loff_t len);
+
 static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr)
 {
        u32 attr;
@@ -177,6 +181,7 @@ const struct file_operations fat_file_operations = {
 #endif
        .fsync          = fat_file_fsync,
        .splice_read    = generic_file_splice_read,
+       .fallocate      = fat_fallocate,
 };
 
 static int fat_cont_expand(struct inode *inode, loff_t size)
@@ -215,6 +220,62 @@ out:
        return err;
 }
 
+/*
+ * Preallocate space for a file. This implements fat's fallocate file
+ * operation, which gets called from sys_fallocate system call. User
+ * space requests len bytes at offset. If FALLOC_FL_KEEP_SIZE is set
+ * we just allocate clusters without zeroing them out. Otherwise we
+ * allocate and zero out clusters via an expanding truncate.
+ */
+static long fat_fallocate(struct file *file, int mode,
+                         loff_t offset, loff_t len)
+{
+       int nr_cluster; /* Number of clusters to be allocated */
+       loff_t mm_bytes; /* Number of bytes to be allocated for file */
+       loff_t ondisksize; /* block aligned on-disk size in bytes*/
+       struct inode *inode = file->f_mapping->host;
+       struct super_block *sb = inode->i_sb;
+       struct msdos_sb_info *sbi = MSDOS_SB(sb);
+       int err = 0;
+
+       /* No support for hole punch or other fallocate flags. */
+       if (mode & ~FALLOC_FL_KEEP_SIZE)
+               return -EOPNOTSUPP;
+
+       /* No support for dir */
+       if (!S_ISREG(inode->i_mode))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&inode->i_mutex);
+       if (mode & FALLOC_FL_KEEP_SIZE) {
+               ondisksize = inode->i_blocks << 9;
+               if ((offset + len) <= ondisksize)
+                       goto error;
+
+               /* First compute the number of clusters to be allocated */
+               mm_bytes = offset + len - ondisksize;
+               nr_cluster = (mm_bytes + (sbi->cluster_size - 1)) >>
+                       sbi->cluster_bits;
+
+               /* Start the allocation.We are not zeroing out the clusters */
+               while (nr_cluster-- > 0) {
+                       err = fat_add_cluster(inode);
+                       if (err)
+                               goto error;
+               }
+       } else {
+               if ((offset + len) <= i_size_read(inode))
+                       goto error;
+
+               /* This is just an expanding truncate */
+               err = fat_cont_expand(inode, (offset + len));
+       }
+
+error:
+       mutex_unlock(&inode->i_mutex);
+       return err;
+}
+
 /* Free all clusters after the skip'th cluster. */
 static int fat_free(struct inode *inode, int skip)
 {
index 6aece96df19fcebdda5ed3f036205bbaeddc65a6..a5599052116c48fcf8f5a0b0a64e9c638276a362 100644 (file)
@@ -93,7 +93,7 @@ static struct fat_floppy_defaults {
 },
 };
 
-static int fat_add_cluster(struct inode *inode)
+int fat_add_cluster(struct inode *inode)
 {
        int err, cluster;
 
@@ -115,10 +115,10 @@ static inline int __fat_get_block(struct inode *inode, sector_t iblock,
        struct super_block *sb = inode->i_sb;
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
        unsigned long mapped_blocks;
-       sector_t phys;
+       sector_t phys, last_block;
        int err, offset;
 
-       err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create);
+       err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create, false);
        if (err)
                return err;
        if (phys) {
@@ -135,8 +135,14 @@ static inline int __fat_get_block(struct inode *inode, sector_t iblock,
                return -EIO;
        }
 
+       last_block = inode->i_blocks >> (sb->s_blocksize_bits - 9);
        offset = (unsigned long)iblock & (sbi->sec_per_clus - 1);
-       if (!offset) {
+       /*
+        * allocate a cluster according to the following.
+        * 1) no more available blocks
+        * 2) not part of fallocate region
+        */
+       if (!offset && !(iblock < last_block)) {
                /* TODO: multiple cluster allocation would be desirable. */
                err = fat_add_cluster(inode);
                if (err)
@@ -148,7 +154,7 @@ static inline int __fat_get_block(struct inode *inode, sector_t iblock,
        *max_blocks = min(mapped_blocks, *max_blocks);
        MSDOS_I(inode)->mmu_private += *max_blocks << sb->s_blocksize_bits;
 
-       err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create);
+       err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create, false);
        if (err)
                return err;
 
@@ -273,13 +279,38 @@ static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
        return ret;
 }
 
+static int fat_get_block_bmap(struct inode *inode, sector_t iblock,
+               struct buffer_head *bh_result, int create)
+{
+       struct super_block *sb = inode->i_sb;
+       unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
+       int err;
+       sector_t bmap;
+       unsigned long mapped_blocks;
+
+       BUG_ON(create != 0);
+
+       err = fat_bmap(inode, iblock, &bmap, &mapped_blocks, create, true);
+       if (err)
+               return err;
+
+       if (bmap) {
+               map_bh(bh_result, sb, bmap);
+               max_blocks = min(mapped_blocks, max_blocks);
+       }
+
+       bh_result->b_size = max_blocks << sb->s_blocksize_bits;
+
+       return 0;
+}
+
 static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
 {
        sector_t blocknr;
 
        /* fat_get_cluster() assumes the requested blocknr isn't truncated. */
        down_read(&MSDOS_I(mapping->host)->truncate_lock);
-       blocknr = generic_block_bmap(mapping, block, fat_get_block);
+       blocknr = generic_block_bmap(mapping, block, fat_get_block_bmap);
        up_read(&MSDOS_I(mapping->host)->truncate_lock);
 
        return blocknr;
@@ -449,6 +480,24 @@ static int fat_calc_dir_size(struct inode *inode)
        return 0;
 }
 
+static int fat_validate_dir(struct inode *dir)
+{
+       struct super_block *sb = dir->i_sb;
+
+       if (dir->i_nlink < 2) {
+               /* Directory should have "."/".." entries at least. */
+               fat_fs_error(sb, "corrupted directory (invalid entries)");
+               return -EIO;
+       }
+       if (MSDOS_I(dir)->i_start == 0 ||
+           MSDOS_I(dir)->i_start == MSDOS_SB(sb)->root_cluster) {
+               /* Directory should point valid cluster. */
+               fat_fs_error(sb, "corrupted directory (invalid i_start)");
+               return -EIO;
+       }
+       return 0;
+}
+
 /* doesn't deal with root inode */
 int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
 {
@@ -475,6 +524,10 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
                MSDOS_I(inode)->mmu_private = inode->i_size;
 
                set_nlink(inode, fat_subdirs(inode));
+
+               error = fat_validate_dir(inode);
+               if (error < 0)
+                       return error;
        } else { /* not a directory */
                inode->i_generation |= 1;
                inode->i_mode = fat_make_mode(sbi, de->attr,
@@ -553,13 +606,43 @@ out:
 
 EXPORT_SYMBOL_GPL(fat_build_inode);
 
+static int __fat_write_inode(struct inode *inode, int wait);
+
+static void fat_free_eofblocks(struct inode *inode)
+{
+       /* Release unwritten fallocated blocks on inode eviction. */
+       if ((inode->i_blocks << 9) >
+                       round_up(MSDOS_I(inode)->mmu_private,
+                               MSDOS_SB(inode->i_sb)->cluster_size)) {
+               int err;
+
+               fat_truncate_blocks(inode, MSDOS_I(inode)->mmu_private);
+               /* Fallocate results in updating the i_start/iogstart
+                * for the zero byte file. So, make it return to
+                * original state during evict and commit it to avoid
+                * any corruption on the next access to the cluster
+                * chain for the file.
+                */
+               err = __fat_write_inode(inode, inode_needs_sync(inode));
+               if (err) {
+                       fat_msg(inode->i_sb, KERN_WARNING, "Failed to "
+                                       "update on disk inode for unused "
+                                       "fallocated blocks, inode could be "
+                                       "corrupted. Please run fsck");
+               }
+
+       }
+}
+
 static void fat_evict_inode(struct inode *inode)
 {
        truncate_inode_pages_final(&inode->i_data);
        if (!inode->i_nlink) {
                inode->i_size = 0;
                fat_truncate_blocks(inode, 0);
-       }
+       } else
+               fat_free_eofblocks(inode);
+
        invalidate_inode_buffers(inode);
        clear_inode(inode);
        fat_cache_inval_inode(inode);
@@ -1146,7 +1229,12 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
                case Opt_time_offset:
                        if (match_int(&args[0], &option))
                                return -EINVAL;
-                       if (option < -12 * 60 || option > 12 * 60)
+                       /*
+                        * GMT+-12 zones may have DST corrections so at least
+                        * 13 hours difference is needed. Make the limit 24
+                        * just in case someone invents something unusual.
+                        */
+                       if (option < -24 * 60 || option > 24 * 60)
                                return -EINVAL;
                        opts->tz_set = 1;
                        opts->time_offset = option;
index db458ee3a546757947a909d1073cb0ac690f66a5..1eb5d415d4346c460f715c90f2127da40830b8fb 100644 (file)
@@ -214,7 +214,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str)
 {
        struct super_block *sb;
        struct hfs_find_data fd;
-       struct list_head *pos;
+       struct hfs_readdir_data *rd;
        int res, type;
 
        hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
@@ -240,9 +240,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str)
                }
        }
 
-       list_for_each(pos, &HFS_I(dir)->open_dir_list) {
-               struct hfs_readdir_data *rd =
-                       list_entry(pos, struct hfs_readdir_data, list);
+       list_for_each_entry(rd, &HFS_I(dir)->open_dir_list, list) {
                if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0)
                        rd->file->f_pos--;
        }
index a4cbdf9824c794678e5bc2e28cafa5bd18687b8f..d250604f985ab91abaa60e70a6a4025049016916 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/parser.h>
 #include <linux/module.h>
+#include <linux/pagemap.h>
 #include <linux/sched.h>
 #include <linux/statfs.h>
 #include <linux/seq_file.h>
index d73291f5f0fcbfb0cd2cff2bb1b628a72f754f6e..b6c00ce0e29e3563037c16fe7bbc754b396f606f 100644 (file)
@@ -395,7 +395,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
 
        state = *get_task_state(task);
        vsize = eip = esp = 0;
-       permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
+       permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
        mm = get_task_mm(task);
        if (mm) {
                vsize = task_vsize(mm);
index 2cf5d7e373757d631de6f9d8a174287d46058219..4f764c2ac1a53db75c7c116cdb3bd4b45074c647 100644 (file)
@@ -403,7 +403,7 @@ static const struct file_operations proc_pid_cmdline_ops = {
 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
                         struct pid *pid, struct task_struct *task)
 {
-       struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
+       struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
        if (mm && !IS_ERR(mm)) {
                unsigned int nwords = 0;
                do {
@@ -430,7 +430,8 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
 
        wchan = get_wchan(task);
 
-       if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
+       if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)
+                       && !lookup_symbol_name(wchan, symname))
                seq_printf(m, "%s", symname);
        else
                seq_putc(m, '0');
@@ -444,7 +445,7 @@ static int lock_trace(struct task_struct *task)
        int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
        if (err)
                return err;
-       if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
+       if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
                mutex_unlock(&task->signal->cred_guard_mutex);
                return -EPERM;
        }
@@ -697,7 +698,7 @@ static int proc_fd_access_allowed(struct inode *inode)
         */
        task = get_proc_task(inode);
        if (task) {
-               allowed = ptrace_may_access(task, PTRACE_MODE_READ);
+               allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
                put_task_struct(task);
        }
        return allowed;
@@ -732,7 +733,7 @@ static bool has_pid_permissions(struct pid_namespace *pid,
                return true;
        if (in_group_p(pid->pid_gid))
                return true;
-       return ptrace_may_access(task, PTRACE_MODE_READ);
+       return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
 }
 
 
@@ -809,7 +810,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
        struct mm_struct *mm = ERR_PTR(-ESRCH);
 
        if (task) {
-               mm = mm_access(task, mode);
+               mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
                put_task_struct(task);
 
                if (!IS_ERR_OR_NULL(mm)) {
@@ -952,6 +953,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
        unsigned long src = *ppos;
        int ret = 0;
        struct mm_struct *mm = file->private_data;
+       unsigned long env_start, env_end;
 
        if (!mm)
                return 0;
@@ -963,19 +965,25 @@ static ssize_t environ_read(struct file *file, char __user *buf,
        ret = 0;
        if (!atomic_inc_not_zero(&mm->mm_users))
                goto free;
+
+       down_read(&mm->mmap_sem);
+       env_start = mm->env_start;
+       env_end = mm->env_end;
+       up_read(&mm->mmap_sem);
+
        while (count > 0) {
                size_t this_len, max_len;
                int retval;
 
-               if (src >= (mm->env_end - mm->env_start))
+               if (src >= (env_end - env_start))
                        break;
 
-               this_len = mm->env_end - (mm->env_start + src);
+               this_len = env_end - (env_start + src);
 
                max_len = min_t(size_t, PAGE_SIZE, count);
                this_len = min(max_len, this_len);
 
-               retval = access_remote_vm(mm, (mm->env_start + src),
+               retval = access_remote_vm(mm, (env_start + src),
                        page, this_len, 0);
 
                if (retval <= 0) {
@@ -1860,7 +1868,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
        if (!task)
                goto out_notask;
 
-       mm = mm_access(task, PTRACE_MODE_READ);
+       mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
        if (IS_ERR_OR_NULL(mm))
                goto out;
 
@@ -2013,7 +2021,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
                goto out;
 
        result = -EACCES;
-       if (!ptrace_may_access(task, PTRACE_MODE_READ))
+       if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
                goto out_put_task;
 
        result = -ENOENT;
@@ -2066,7 +2074,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
                goto out;
 
        ret = -EACCES;
-       if (!ptrace_may_access(task, PTRACE_MODE_READ))
+       if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
                goto out_put_task;
 
        ret = 0;
@@ -2533,7 +2541,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
        if (result)
                return result;
 
-       if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
+       if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
                result = -EACCES;
                goto out_unlock;
        }
index 1dece8781f91687307155c38a4c85a3ffba5c6b9..276f12431dbfccdeb4add56a226c07d6062af38b 100644 (file)
@@ -46,7 +46,7 @@ static const char *proc_ns_get_link(struct dentry *dentry,
        if (!task)
                return error;
 
-       if (ptrace_may_access(task, PTRACE_MODE_READ)) {
+       if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
                error = ns_get_path(&ns_path, task, ns_ops);
                if (!error)
                        nd_jump_link(&ns_path);
@@ -67,7 +67,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
        if (!task)
                return res;
 
-       if (ptrace_may_access(task, PTRACE_MODE_READ)) {
+       if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
                res = ns_get_name(name, sizeof(name), task, ns_ops);
                if (res >= 0)
                        res = readlink_copy(buffer, buflen, name);
index 65a1b6c69c111e26bae44cccd7961a303ecdc993..71ffc91060f6d6ad32081bfa53d2a3c7b3706e53 100644 (file)
@@ -468,7 +468,7 @@ struct mem_size_stats {
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
                bool compound, bool young, bool dirty)
 {
-       int i, nr = compound ? HPAGE_PMD_NR : 1;
+       int i, nr = compound ? 1 << compound_order(page) : 1;
        unsigned long size = nr * PAGE_SIZE;
 
        if (PageAnon(page))
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
deleted file mode 100644 (file)
index 0297e58..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef DMA_COHERENT_H
-#define DMA_COHERENT_H
-
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-/*
- * These three functions are only for dma allocator.
- * Don't use them in device drivers.
- */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
-                                      dma_addr_t *dma_handle, void **ret);
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
-
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
-                           void *cpu_addr, size_t size, int *ret);
-/*
- * Standard interface
- */
-#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
-                               dma_addr_t device_addr, size_t size, int flags);
-
-void dma_release_declared_memory(struct device *dev);
-
-void *dma_mark_declared_memory_occupied(struct device *dev,
-                                       dma_addr_t device_addr, size_t size);
-#else
-#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
-#define dma_release_from_coherent(dev, order, vaddr) (0)
-#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
-#endif
-
-#endif
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
deleted file mode 100644 (file)
index 6c32af9..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef _ASM_GENERIC_DMA_MAPPING_H
-#define _ASM_GENERIC_DMA_MAPPING_H
-
-/* define the dma api to allow compilation but not linking of
- * dma dependent code.  Code that depends on the dma-mapping
- * API needs to set 'depends on HAS_DMA' in its Kconfig
- */
-
-struct scatterlist;
-
-extern void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                  gfp_t flag);
-
-extern void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-                   dma_addr_t dma_handle);
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
-                                   dma_addr_t *dma_handle, gfp_t flag,
-                                   struct dma_attrs *attrs)
-{
-       /* attrs is not supported and ignored */
-       return dma_alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
-                                 void *cpu_addr, dma_addr_t dma_handle,
-                                 struct dma_attrs *attrs)
-{
-       /* attrs is not supported and ignored */
-       dma_free_coherent(dev, size, cpu_addr, dma_handle);
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-extern dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
-              enum dma_data_direction direction);
-
-extern void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                enum dma_data_direction direction);
-
-extern int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-          enum dma_data_direction direction);
-
-extern void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-            enum dma_data_direction direction);
-
-extern dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-            size_t size, enum dma_data_direction direction);
-
-extern void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-              enum dma_data_direction direction);
-
-extern void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-                       enum dma_data_direction direction);
-
-extern void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                             unsigned long offset, size_t size,
-                             enum dma_data_direction direction);
-
-extern void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-                   enum dma_data_direction direction);
-
-#define dma_sync_single_for_device dma_sync_single_for_cpu
-#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu
-#define dma_sync_sg_for_device dma_sync_sg_for_cpu
-
-extern int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-
-extern int
-dma_supported(struct device *dev, u64 mask);
-
-extern int
-dma_set_mask(struct device *dev, u64 mask);
-
-extern int
-dma_get_cache_alignment(void);
-
-extern void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-              enum dma_data_direction direction);
-
-#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
deleted file mode 100644 (file)
index b1bc954..0000000
+++ /dev/null
@@ -1,358 +0,0 @@
-#ifndef _ASM_GENERIC_DMA_MAPPING_H
-#define _ASM_GENERIC_DMA_MAPPING_H
-
-#include <linux/kmemcheck.h>
-#include <linux/bug.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
-#include <asm-generic/dma-coherent.h>
-
-static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
-                                             size_t size,
-                                             enum dma_data_direction dir,
-                                             struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-       dma_addr_t addr;
-
-       kmemcheck_mark_initialized(ptr, size);
-       BUG_ON(!valid_dma_direction(dir));
-       addr = ops->map_page(dev, virt_to_page(ptr),
-                            (unsigned long)ptr & ~PAGE_MASK, size,
-                            dir, attrs);
-       debug_dma_map_page(dev, virt_to_page(ptr),
-                          (unsigned long)ptr & ~PAGE_MASK, size,
-                          dir, addr, true);
-       return addr;
-}
-
-static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
-                                         size_t size,
-                                         enum dma_data_direction dir,
-                                         struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       BUG_ON(!valid_dma_direction(dir));
-       if (ops->unmap_page)
-               ops->unmap_page(dev, addr, size, dir, attrs);
-       debug_dma_unmap_page(dev, addr, size, dir, true);
-}
-
-/*
- * dma_maps_sg_attrs returns 0 on error and > 0 on success.
- * It should never return a value < 0.
- */
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
-                                  int nents, enum dma_data_direction dir,
-                                  struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-       int i, ents;
-       struct scatterlist *s;
-
-       for_each_sg(sg, s, nents, i)
-               kmemcheck_mark_initialized(sg_virt(s), s->length);
-       BUG_ON(!valid_dma_direction(dir));
-       ents = ops->map_sg(dev, sg, nents, dir, attrs);
-       BUG_ON(ents < 0);
-       debug_dma_map_sg(dev, sg, nents, ents, dir);
-
-       return ents;
-}
-
-static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
-                                     int nents, enum dma_data_direction dir,
-                                     struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       BUG_ON(!valid_dma_direction(dir));
-       debug_dma_unmap_sg(dev, sg, nents, dir);
-       if (ops->unmap_sg)
-               ops->unmap_sg(dev, sg, nents, dir, attrs);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
-                                     size_t offset, size_t size,
-                                     enum dma_data_direction dir)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-       dma_addr_t addr;
-
-       kmemcheck_mark_initialized(page_address(page) + offset, size);
-       BUG_ON(!valid_dma_direction(dir));
-       addr = ops->map_page(dev, page, offset, size, dir, NULL);
-       debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
-       return addr;
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
-                                 size_t size, enum dma_data_direction dir)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       BUG_ON(!valid_dma_direction(dir));
-       if (ops->unmap_page)
-               ops->unmap_page(dev, addr, size, dir, NULL);
-       debug_dma_unmap_page(dev, addr, size, dir, false);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
-                                          size_t size,
-                                          enum dma_data_direction dir)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       BUG_ON(!valid_dma_direction(dir));
-       if (ops->sync_single_for_cpu)
-               ops->sync_single_for_cpu(dev, addr, size, dir);
-       debug_dma_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
-                                             dma_addr_t addr, size_t size,
-                                             enum dma_data_direction dir)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       BUG_ON(!valid_dma_direction(dir));
-       if (ops->sync_single_for_device)
-               ops->sync_single_for_device(dev, addr, size, dir);
-       debug_dma_sync_single_for_device(dev, addr, size, dir);
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
-                                                dma_addr_t addr,
-                                                unsigned long offset,
-                                                size_t size,
-                                                enum dma_data_direction dir)
-{
-       const struct dma_map_ops *ops = get_dma_ops(dev);
-
-       BUG_ON(!valid_dma_direction(dir));
-       if (ops->sync_single_for_cpu)
-               ops->sync_single_for_cpu(dev, addr + offset, size, dir);
-       debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
-                                                   dma_addr_t addr,
-                                                   unsigned long offset,
-                                                   size_t size,
-                                                   enum dma_data_direction dir)
-{
-       const struct dma_map_ops *ops = get_dma_ops(dev);
-
-       BUG_ON(!valid_dma_direction(dir));
-       if (ops->sync_single_for_device)
-               ops->sync_single_for_device(dev, addr + offset, size, dir);
-       debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-                   int nelems, enum dma_data_direction dir)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       BUG_ON(!valid_dma_direction(dir));
-       if (ops->sync_sg_for_cpu)
-               ops->sync_sg_for_cpu(dev, sg, nelems, dir);
-       debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-                      int nelems, enum dma_data_direction dir)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       BUG_ON(!valid_dma_direction(dir));
-       if (ops->sync_sg_for_device)
-               ops->sync_sg_for_device(dev, sg, nelems, dir);
-       debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
-
-}
-
-#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
-#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
-#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
-#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
-
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-void *dma_common_contiguous_remap(struct page *page, size_t size,
-                       unsigned long vm_flags,
-                       pgprot_t prot, const void *caller);
-
-void *dma_common_pages_remap(struct page **pages, size_t size,
-                       unsigned long vm_flags, pgprot_t prot,
-                       const void *caller);
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
-
-/**
- * dma_mmap_attrs - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
- * @handle: device-view address returned from dma_alloc_attrs
- * @size: size of memory originally requested in dma_alloc_attrs
- * @attrs: attributes of mapping properties requested in dma_alloc_attrs
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
- * into user space.  The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-static inline int
-dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
-              dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-       BUG_ON(!ops);
-       if (ops->mmap)
-               return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-
-int
-dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-                      void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-static inline int
-dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
-                     dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-       BUG_ON(!ops);
-       if (ops->get_sgtable)
-               return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
-                                       attrs);
-       return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
-}
-
-#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
-
-#ifndef arch_dma_alloc_attrs
-#define arch_dma_alloc_attrs(dev, flag)        (true)
-#endif
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
-                                      dma_addr_t *dma_handle, gfp_t flag,
-                                      struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-       void *cpu_addr;
-
-       BUG_ON(!ops);
-
-       if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
-               return cpu_addr;
-
-       if (!arch_dma_alloc_attrs(&dev, &flag))
-               return NULL;
-       if (!ops->alloc)
-               return NULL;
-
-       cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
-       debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
-       return cpu_addr;
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
-                                    void *cpu_addr, dma_addr_t dma_handle,
-                                    struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       BUG_ON(!ops);
-       WARN_ON(irqs_disabled());
-
-       if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
-               return;
-
-       if (!ops->free)
-               return;
-
-       debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
-       ops->free(dev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flag)
-{
-       return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
-               void *cpu_addr, dma_addr_t dma_handle)
-{
-       return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
-}
-
-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp)
-{
-       DEFINE_DMA_ATTRS(attrs);
-
-       dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
-       return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
-}
-
-static inline void dma_free_noncoherent(struct device *dev, size_t size,
-               void *cpu_addr, dma_addr_t dma_handle)
-{
-       DEFINE_DMA_ATTRS(attrs);
-
-       dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
-       dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       debug_dma_mapping_error(dev, dma_addr);
-
-       if (get_dma_ops(dev)->mapping_error)
-               return get_dma_ops(dev)->mapping_error(dev, dma_addr);
-
-#ifdef DMA_ERROR_CODE
-       return dma_addr == DMA_ERROR_CODE;
-#else
-       return 0;
-#endif
-}
-
-#ifndef HAVE_ARCH_DMA_SUPPORTED
-static inline int dma_supported(struct device *dev, u64 mask)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       if (!ops)
-               return 0;
-       if (!ops->dma_supported)
-               return 1;
-       return ops->dma_supported(dev, mask);
-}
-#endif
-
-#ifndef HAVE_ARCH_DMA_SET_MASK
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       if (ops->set_dma_mask)
-               return ops->set_dma_mask(dev, mask);
-
-       if (!dev->dma_mask || !dma_supported(dev, mask))
-               return -EIO;
-       *dev->dma_mask = mask;
-       return 0;
-}
-#endif
-
-#endif
index 59915ea5373ca798dca185070e11af88cc7745d9..fc14275ff34e49390a6dab06b023b6c2488c9bdc 100644 (file)
@@ -85,10 +85,14 @@ extern int nr_cpu_ids;
  *    only one CPU.
  */
 
-extern const struct cpumask *const cpu_possible_mask;
-extern const struct cpumask *const cpu_online_mask;
-extern const struct cpumask *const cpu_present_mask;
-extern const struct cpumask *const cpu_active_mask;
+extern struct cpumask __cpu_possible_mask;
+extern struct cpumask __cpu_online_mask;
+extern struct cpumask __cpu_present_mask;
+extern struct cpumask __cpu_active_mask;
+#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
+#define cpu_online_mask   ((const struct cpumask *)&__cpu_online_mask)
+#define cpu_present_mask  ((const struct cpumask *)&__cpu_present_mask)
+#define cpu_active_mask   ((const struct cpumask *)&__cpu_active_mask)
 
 #if NR_CPUS > 1
 #define num_online_cpus()      cpumask_weight(cpu_online_mask)
@@ -716,14 +720,49 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
 #define for_each_present_cpu(cpu)  for_each_cpu((cpu), cpu_present_mask)
 
 /* Wrappers for arch boot code to manipulate normally-constant masks */
-void set_cpu_possible(unsigned int cpu, bool possible);
-void set_cpu_present(unsigned int cpu, bool present);
-void set_cpu_online(unsigned int cpu, bool online);
-void set_cpu_active(unsigned int cpu, bool active);
 void init_cpu_present(const struct cpumask *src);
 void init_cpu_possible(const struct cpumask *src);
 void init_cpu_online(const struct cpumask *src);
 
+static inline void
+set_cpu_possible(unsigned int cpu, bool possible)
+{
+       if (possible)
+               cpumask_set_cpu(cpu, &__cpu_possible_mask);
+       else
+               cpumask_clear_cpu(cpu, &__cpu_possible_mask);
+}
+
+static inline void
+set_cpu_present(unsigned int cpu, bool present)
+{
+       if (present)
+               cpumask_set_cpu(cpu, &__cpu_present_mask);
+       else
+               cpumask_clear_cpu(cpu, &__cpu_present_mask);
+}
+
+static inline void
+set_cpu_online(unsigned int cpu, bool online)
+{
+       if (online) {
+               cpumask_set_cpu(cpu, &__cpu_online_mask);
+               cpumask_set_cpu(cpu, &__cpu_active_mask);
+       } else {
+               cpumask_clear_cpu(cpu, &__cpu_online_mask);
+       }
+}
+
+static inline void
+set_cpu_active(unsigned int cpu, bool active)
+{
+       if (active)
+               cpumask_set_cpu(cpu, &__cpu_active_mask);
+       else
+               cpumask_clear_cpu(cpu, &__cpu_active_mask);
+}
+
+
 /**
  * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
  * @bitmap: the bitmap
index c8e1831d757232c7364ebb909cb77a6442bb4a8a..99c0be00b47c4993f205babd17ca8a8442274171 100644 (file)
@@ -41,7 +41,6 @@ static inline void init_dma_attrs(struct dma_attrs *attrs)
        bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
 }
 
-#ifdef CONFIG_HAVE_DMA_ATTRS
 /**
  * dma_set_attr - set a specific attribute
  * @attr: attribute to set
@@ -67,14 +66,5 @@ static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
        BUG_ON(attr >= DMA_ATTR_MAX);
        return test_bit(attr, attrs->flags);
 }
-#else /* !CONFIG_HAVE_DMA_ATTRS */
-static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
-}
 
-static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
-       return 0;
-}
-#endif /* CONFIG_HAVE_DMA_ATTRS */
 #endif /* _DMA_ATTR_H */
index 2e551e2d2d03a7d78c80633637672a483ae42199..75857cda38e989e5a44150c5abb9bd1bd4872957 100644 (file)
@@ -6,8 +6,11 @@
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/dma-attrs.h>
+#include <linux/dma-debug.h>
 #include <linux/dma-direction.h>
 #include <linux/scatterlist.h>
+#include <linux/kmemcheck.h>
+#include <linux/bug.h>
 
 /*
  * A dma_addr_t can hold any valid DMA or bus address for the platform.
@@ -83,10 +86,383 @@ static inline int is_device_dma_capable(struct device *dev)
        return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
 }
 
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+/*
+ * These three functions are only for dma allocator.
+ * Don't use them in device drivers.
+ */
+int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+                                      dma_addr_t *dma_handle, void **ret);
+int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+                           void *cpu_addr, size_t size, int *ret);
+#else
+#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
+#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+
 #ifdef CONFIG_HAS_DMA
 #include <asm/dma-mapping.h>
 #else
-#include <asm-generic/dma-mapping-broken.h>
+/*
+ * Define the dma api to allow compilation but not linking of
+ * dma dependent code.  Code that depends on the dma-mapping
+ * API needs to set 'depends on HAS_DMA' in its Kconfig
+ */
+extern struct dma_map_ops bad_dma_ops;
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+       return &bad_dma_ops;
+}
+#endif
+
+static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+                                             size_t size,
+                                             enum dma_data_direction dir,
+                                             struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       dma_addr_t addr;
+
+       kmemcheck_mark_initialized(ptr, size);
+       BUG_ON(!valid_dma_direction(dir));
+       addr = ops->map_page(dev, virt_to_page(ptr),
+                            offset_in_page(ptr), size,
+                            dir, attrs);
+       debug_dma_map_page(dev, virt_to_page(ptr),
+                          offset_in_page(ptr), size,
+                          dir, addr, true);
+       return addr;
+}
+
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+                                         size_t size,
+                                         enum dma_data_direction dir,
+                                         struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!valid_dma_direction(dir));
+       if (ops->unmap_page)
+               ops->unmap_page(dev, addr, size, dir, attrs);
+       debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
+/*
+ * dma_maps_sg_attrs returns 0 on error and > 0 on success.
+ * It should never return a value < 0.
+ */
+static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+                                  int nents, enum dma_data_direction dir,
+                                  struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       int i, ents;
+       struct scatterlist *s;
+
+       for_each_sg(sg, s, nents, i)
+               kmemcheck_mark_initialized(sg_virt(s), s->length);
+       BUG_ON(!valid_dma_direction(dir));
+       ents = ops->map_sg(dev, sg, nents, dir, attrs);
+       BUG_ON(ents < 0);
+       debug_dma_map_sg(dev, sg, nents, ents, dir);
+
+       return ents;
+}
+
+static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+                                     int nents, enum dma_data_direction dir,
+                                     struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!valid_dma_direction(dir));
+       debug_dma_unmap_sg(dev, sg, nents, dir);
+       if (ops->unmap_sg)
+               ops->unmap_sg(dev, sg, nents, dir, attrs);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+                                     size_t offset, size_t size,
+                                     enum dma_data_direction dir)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       dma_addr_t addr;
+
+       kmemcheck_mark_initialized(page_address(page) + offset, size);
+       BUG_ON(!valid_dma_direction(dir));
+       addr = ops->map_page(dev, page, offset, size, dir, NULL);
+       debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+       return addr;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+                                 size_t size, enum dma_data_direction dir)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!valid_dma_direction(dir));
+       if (ops->unmap_page)
+               ops->unmap_page(dev, addr, size, dir, NULL);
+       debug_dma_unmap_page(dev, addr, size, dir, false);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+                                          size_t size,
+                                          enum dma_data_direction dir)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!valid_dma_direction(dir));
+       if (ops->sync_single_for_cpu)
+               ops->sync_single_for_cpu(dev, addr, size, dir);
+       debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+                                             dma_addr_t addr, size_t size,
+                                             enum dma_data_direction dir)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!valid_dma_direction(dir));
+       if (ops->sync_single_for_device)
+               ops->sync_single_for_device(dev, addr, size, dir);
+       debug_dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+                                                dma_addr_t addr,
+                                                unsigned long offset,
+                                                size_t size,
+                                                enum dma_data_direction dir)
+{
+       const struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!valid_dma_direction(dir));
+       if (ops->sync_single_for_cpu)
+               ops->sync_single_for_cpu(dev, addr + offset, size, dir);
+       debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+                                                   dma_addr_t addr,
+                                                   unsigned long offset,
+                                                   size_t size,
+                                                   enum dma_data_direction dir)
+{
+       const struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!valid_dma_direction(dir));
+       if (ops->sync_single_for_device)
+               ops->sync_single_for_device(dev, addr + offset, size, dir);
+       debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+                   int nelems, enum dma_data_direction dir)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!valid_dma_direction(dir));
+       if (ops->sync_sg_for_cpu)
+               ops->sync_sg_for_cpu(dev, sg, nelems, dir);
+       debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+                      int nelems, enum dma_data_direction dir)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!valid_dma_direction(dir));
+       if (ops->sync_sg_for_device)
+               ops->sync_sg_for_device(dev, sg, nelems, dir);
+       debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
+
+}
+
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+                       unsigned long vm_flags,
+                       pgprot_t prot, const void *caller);
+
+void *dma_common_pages_remap(struct page **pages, size_t size,
+                       unsigned long vm_flags, pgprot_t prot,
+                       const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+
+/**
+ * dma_mmap_attrs - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+ * @handle: device-view address returned from dma_alloc_attrs
+ * @size: size of memory originally requested in dma_alloc_attrs
+ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
+ * into user space.  The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+static inline int
+dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
+              dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       BUG_ON(!ops);
+       if (ops->mmap)
+               return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+int
+dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                      void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+static inline int
+dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+                     dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       BUG_ON(!ops);
+       if (ops->get_sgtable)
+               return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+                                       attrs);
+       return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+}
+
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+
+#ifndef arch_dma_alloc_attrs
+#define arch_dma_alloc_attrs(dev, flag)        (true)
+#endif
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                      dma_addr_t *dma_handle, gfp_t flag,
+                                      struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       void *cpu_addr;
+
+       BUG_ON(!ops);
+
+       if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+               return cpu_addr;
+
+       if (!arch_dma_alloc_attrs(&dev, &flag))
+               return NULL;
+       if (!ops->alloc)
+               return NULL;
+
+       cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+       debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+       return cpu_addr;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+                                    void *cpu_addr, dma_addr_t dma_handle,
+                                    struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!ops);
+       WARN_ON(irqs_disabled());
+
+       if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+               return;
+
+       if (!ops->free)
+               return;
+
+       debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+       ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t flag)
+{
+       return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+               void *cpu_addr, dma_addr_t dma_handle)
+{
+       return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+}
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp)
+{
+       DEFINE_DMA_ATTRS(attrs);
+
+       dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+       return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+               void *cpu_addr, dma_addr_t dma_handle)
+{
+       DEFINE_DMA_ATTRS(attrs);
+
+       dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+       dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       debug_dma_mapping_error(dev, dma_addr);
+
+       if (get_dma_ops(dev)->mapping_error)
+               return get_dma_ops(dev)->mapping_error(dev, dma_addr);
+
+#ifdef DMA_ERROR_CODE
+       return dma_addr == DMA_ERROR_CODE;
+#else
+       return 0;
+#endif
+}
+
+#ifndef HAVE_ARCH_DMA_SUPPORTED
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       if (!ops)
+               return 0;
+       if (!ops->dma_supported)
+               return 1;
+       return ops->dma_supported(dev, mask);
+}
+#endif
+
+#ifndef HAVE_ARCH_DMA_SET_MASK
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       if (ops->set_dma_mask)
+               return ops->set_dma_mask(dev, mask);
+
+       if (!dev->dma_mask || !dma_supported(dev, mask))
+               return -EIO;
+       *dev->dma_mask = mask;
+       return 0;
+}
 #endif
 
 static inline u64 dma_get_mask(struct device *dev)
@@ -208,7 +584,13 @@ static inline int dma_get_cache_alignment(void)
 #define DMA_MEMORY_INCLUDES_CHILDREN   0x04
 #define DMA_MEMORY_EXCLUSIVE           0x08
 
-#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+                               dma_addr_t device_addr, size_t size, int flags);
+void dma_release_declared_memory(struct device *dev);
+void *dma_mark_declared_memory_occupied(struct device *dev,
+                                       dma_addr_t device_addr, size_t size);
+#else
 static inline int
 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
                            dma_addr_t device_addr, size_t size, int flags)
@@ -227,7 +609,7 @@ dma_mark_declared_memory_occupied(struct device *dev,
 {
        return ERR_PTR(-EBUSY);
 }
-#endif
+#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
 
 /*
  * Managed DMA API
@@ -240,13 +622,13 @@ extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
                                    dma_addr_t *dma_handle, gfp_t gfp);
 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
                                  dma_addr_t dma_handle);
-#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
 extern int dmam_declare_coherent_memory(struct device *dev,
                                        phys_addr_t phys_addr,
                                        dma_addr_t device_addr, size_t size,
                                        int flags);
 extern void dmam_release_declared_memory(struct device *dev);
-#else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
+#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
 static inline int dmam_declare_coherent_memory(struct device *dev,
                                phys_addr_t phys_addr, dma_addr_t device_addr,
                                size_t size, gfp_t gfp)
@@ -257,24 +639,8 @@ static inline int dmam_declare_coherent_memory(struct device *dev,
 static inline void dmam_release_declared_memory(struct device *dev)
 {
 }
-#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
-
-#ifndef CONFIG_HAVE_DMA_ATTRS
-struct dma_attrs;
-
-#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
-       dma_map_single(dev, cpu_addr, size, dir)
+#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
 
-#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
-       dma_unmap_single(dev, dma_addr, size, dir)
-
-#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
-       dma_map_sg(dev, sgl, nents, dir)
-
-#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
-       dma_unmap_sg(dev, sgl, nents, dir)
-
-#else
 static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
                                           dma_addr_t *dma_addr, gfp_t gfp)
 {
@@ -300,7 +666,6 @@ static inline int dma_mmap_writecombine(struct device *dev,
        dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
        return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
 }
-#endif /* CONFIG_HAVE_DMA_ATTRS */
 
 #ifdef CONFIG_NEED_DMA_MAP_STATE
 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
index fffd88d7f4269852277e7d9bed29e86c1a9f9490..32403b5716e5b8c4a5eb21bfad1f2e4bf7604501 100644 (file)
@@ -29,6 +29,7 @@ struct device;
 struct resource;
 
 __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+void __ioread32_copy(void *to, const void __iomem *from, size_t count);
 void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
 
 #ifdef CONFIG_MMU
index 7b68d2788a56da4e21cb71c9780bb11470ca0064..2cc643c6e8708207aea022d0e1f790f805364be2 100644 (file)
@@ -109,11 +109,7 @@ struct compat_kexec_segment {
 };
 #endif
 
-struct kexec_sha_region {
-       unsigned long start;
-       unsigned long len;
-};
-
+#ifdef CONFIG_KEXEC_FILE
 struct purgatory_info {
        /* Pointer to elf header of read only purgatory */
        Elf_Ehdr *ehdr;
@@ -130,6 +126,28 @@ struct purgatory_info {
        unsigned long purgatory_load_addr;
 };
 
+typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
+typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
+                            unsigned long kernel_len, char *initrd,
+                            unsigned long initrd_len, char *cmdline,
+                            unsigned long cmdline_len);
+typedef int (kexec_cleanup_t)(void *loader_data);
+
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+typedef int (kexec_verify_sig_t)(const char *kernel_buf,
+                                unsigned long kernel_len);
+#endif
+
+struct kexec_file_ops {
+       kexec_probe_t *probe;
+       kexec_load_t *load;
+       kexec_cleanup_t *cleanup;
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+       kexec_verify_sig_t *verify_sig;
+#endif
+};
+#endif
+
 struct kimage {
        kimage_entry_t head;
        kimage_entry_t *entry;
@@ -161,6 +179,7 @@ struct kimage {
        struct kimage_arch arch;
 #endif
 
+#ifdef CONFIG_KEXEC_FILE
        /* Additional fields for file based kexec syscall */
        void *kernel_buf;
        unsigned long kernel_buf_len;
@@ -179,38 +198,7 @@ struct kimage {
 
        /* Information for loading purgatory */
        struct purgatory_info purgatory_info;
-};
-
-/*
- * Keeps track of buffer parameters as provided by caller for requesting
- * memory placement of buffer.
- */
-struct kexec_buf {
-       struct kimage *image;
-       char *buffer;
-       unsigned long bufsz;
-       unsigned long mem;
-       unsigned long memsz;
-       unsigned long buf_align;
-       unsigned long buf_min;
-       unsigned long buf_max;
-       bool top_down;          /* allocate from top of memory hole */
-};
-
-typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
-typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
-                            unsigned long kernel_len, char *initrd,
-                            unsigned long initrd_len, char *cmdline,
-                            unsigned long cmdline_len);
-typedef int (kexec_cleanup_t)(void *loader_data);
-typedef int (kexec_verify_sig_t)(const char *kernel_buf,
-                                unsigned long kernel_len);
-
-struct kexec_file_ops {
-       kexec_probe_t *probe;
-       kexec_load_t *load;
-       kexec_cleanup_t *cleanup;
-       kexec_verify_sig_t *verify_sig;
+#endif
 };
 
 /* kexec interface functions */
index 2a6b9947aaa3191e7f24708dcd8726ccb2418b9f..cb0ba9f2a9a291112eb7bbdc3eb28e9fba8ad4a4 100644 (file)
@@ -40,7 +40,7 @@ struct list_lru_node {
        spinlock_t              lock;
        /* global list, used for the root cgroup in cgroup aware lrus */
        struct list_lru_one     lru;
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
        /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
        struct list_lru_memcg   *memcg_lrus;
 #endif
@@ -48,7 +48,7 @@ struct list_lru_node {
 
 struct list_lru {
        struct list_lru_node    *node;
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
        struct list_head        list;
 #endif
 };
index 4356686b0a3914a2d47271ca87ff74943782be42..6b784c59f321f413b30fec0ba6048db7f3cf617b 100644 (file)
@@ -9,8 +9,8 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#define LZ4_MEM_COMPRESS       (4096 * sizeof(unsigned char *))
-#define LZ4HC_MEM_COMPRESS     (65538 * sizeof(unsigned char *))
+#define LZ4_MEM_COMPRESS       (16384)
+#define LZ4HC_MEM_COMPRESS     (262144 + (2 * sizeof(unsigned char *)))
 
 /*
  * lz4_compressbound()
index 189f04d4d2ecc36c706299dd11ecbd58bd64e315..9ae48d4aeb5ec7d6fafa9528c79f65f62a091ed3 100644 (file)
@@ -50,6 +50,9 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_WRITEBACK,      /* # of pages under writeback */
        MEM_CGROUP_STAT_SWAP,           /* # of pages, swapped out */
        MEM_CGROUP_STAT_NSTATS,
+       /* default hierarchy stats */
+       MEMCG_SOCK,
+       MEMCG_NR_STAT,
 };
 
 struct mem_cgroup_reclaim_cookie {
@@ -85,15 +88,9 @@ enum mem_cgroup_events_target {
        MEM_CGROUP_NTARGETS,
 };
 
-struct cg_proto {
-       struct page_counter     memory_allocated;       /* Current allocated memory. */
-       int                     memory_pressure;
-       bool                    active;
-};
-
 #ifdef CONFIG_MEMCG
 struct mem_cgroup_stat_cpu {
-       long count[MEM_CGROUP_STAT_NSTATS];
+       long count[MEMCG_NR_STAT];
        unsigned long events[MEMCG_NR_EVENTS];
        unsigned long nr_page_events;
        unsigned long targets[MEM_CGROUP_NTARGETS];
@@ -152,6 +149,12 @@ struct mem_cgroup_thresholds {
        struct mem_cgroup_threshold_ary *spare;
 };
 
+enum memcg_kmem_state {
+       KMEM_NONE,
+       KMEM_ALLOCATED,
+       KMEM_ONLINE,
+};
+
 /*
  * The memory controller data structure. The memory controller controls both
  * page cache and RSS per cgroup. We would eventually like to provide
@@ -163,8 +166,12 @@ struct mem_cgroup {
 
        /* Accounted resources */
        struct page_counter memory;
+       struct page_counter swap;
+
+       /* Legacy consumer-oriented counters */
        struct page_counter memsw;
        struct page_counter kmem;
+       struct page_counter tcpmem;
 
        /* Normal memory consumption range */
        unsigned long low;
@@ -178,9 +185,6 @@ struct mem_cgroup {
        /* vmpressure notifications */
        struct vmpressure vmpressure;
 
-       /* css_online() has been completed */
-       int initialized;
-
        /*
         * Should the accounting and control be hierarchical, per subtree?
         */
@@ -227,14 +231,16 @@ struct mem_cgroup {
         */
        struct mem_cgroup_stat_cpu __percpu *stat;
 
-#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
-       struct cg_proto tcp_mem;
-#endif
-#if defined(CONFIG_MEMCG_KMEM)
+       unsigned long           socket_pressure;
+
+       /* Legacy tcp memory accounting */
+       bool                    tcpmem_active;
+       int                     tcpmem_pressure;
+
+#ifndef CONFIG_SLOB
         /* Index in the kmem_cache->memcg_params.memcg_caches array */
        int kmemcg_id;
-       bool kmem_acct_activated;
-       bool kmem_acct_active;
+       enum memcg_kmem_state kmem_state;
 #endif
 
        int last_scanned_node;
@@ -249,10 +255,6 @@ struct mem_cgroup {
        struct wb_domain cgwb_domain;
 #endif
 
-#ifdef CONFIG_INET
-       unsigned long           socket_pressure;
-#endif
-
        /* List of events which userspace want to receive */
        struct list_head event_list;
        spinlock_t event_list_lock;
@@ -356,6 +358,13 @@ static inline bool mem_cgroup_disabled(void)
        return !cgroup_subsys_enabled(memory_cgrp_subsys);
 }
 
+static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
+{
+       if (mem_cgroup_disabled())
+               return true;
+       return !!(memcg->css.flags & CSS_ONLINE);
+}
+
 /*
  * For memory reclaim.
  */
@@ -364,20 +373,6 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
                int nr_pages);
 
-static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
-{
-       struct mem_cgroup_per_zone *mz;
-       struct mem_cgroup *memcg;
-
-       if (mem_cgroup_disabled())
-               return true;
-
-       mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
-       memcg = mz->memcg;
-
-       return !!(memcg->css.flags & CSS_ONLINE);
-}
-
 static inline
 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
@@ -590,13 +585,13 @@ static inline bool mem_cgroup_disabled(void)
        return true;
 }
 
-static inline bool
-mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
+static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
 {
        return true;
 }
 
-static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
+static inline bool
+mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
 {
        return true;
 }
@@ -707,15 +702,13 @@ void sock_update_memcg(struct sock *sk);
 void sock_release_memcg(struct sock *sk);
 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
-#if defined(CONFIG_MEMCG) && defined(CONFIG_INET)
+#ifdef CONFIG_MEMCG
 extern struct static_key_false memcg_sockets_enabled_key;
 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
 {
-#ifdef CONFIG_MEMCG_KMEM
-       if (memcg->tcp_mem.memory_pressure)
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
                return true;
-#endif
        do {
                if (time_before(jiffies, memcg->socket_pressure))
                        return true;
@@ -730,7 +723,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
 }
 #endif
 
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 extern struct static_key_false memcg_kmem_enabled_key;
 
 extern int memcg_nr_cache_ids;
@@ -750,9 +743,9 @@ static inline bool memcg_kmem_enabled(void)
        return static_branch_unlikely(&memcg_kmem_enabled_key);
 }
 
-static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+static inline bool memcg_kmem_online(struct mem_cgroup *memcg)
 {
-       return memcg->kmem_acct_active;
+       return memcg->kmem_state == KMEM_ONLINE;
 }
 
 /*
@@ -850,7 +843,7 @@ static inline bool memcg_kmem_enabled(void)
        return false;
 }
 
-static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+static inline bool memcg_kmem_online(struct mem_cgroup *memcg)
 {
        return false;
 }
@@ -886,5 +879,6 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
 {
 }
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
+
 #endif /* _LINUX_MEMCONTROL_H */
index 061265f9287676afce057892572222a477a47505..504c98a278d46606d27f09d109589e0a1e2263d8 100644 (file)
@@ -57,7 +57,29 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
 #define PTRACE_MODE_READ       0x01
 #define PTRACE_MODE_ATTACH     0x02
 #define PTRACE_MODE_NOAUDIT    0x04
-/* Returns true on success, false on denial. */
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
+
+/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
+#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
+#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+
+/**
+ * ptrace_may_access - check whether the caller is permitted to access
+ * a target task.
+ * @task: target task
+ * @mode: selects type of access and caller credentials
+ *
+ * Returns true on success, false on denial.
+ *
+ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
+ * be set in @mode to specify whether the access was requested through
+ * a filesystem syscall (should use effective capabilities and fsuid
+ * of the caller) or through an explicit syscall such as
+ * process_vm_writev or ptrace (and should use the real credentials).
+ */
 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
 
 static inline int ptrace_reparented(struct task_struct *child)
index 33170dbd9db40227f6d1f200193d83772ed9db28..57e7d87d2d4c15de691d8fa3075f779ddbda5172 100644 (file)
@@ -154,7 +154,7 @@ do {                                                                        \
  * radix_tree_gang_lookup_tag_slot
  * radix_tree_tagged
  *
- * The first 7 functions are able to be called locklessly, using RCU. The
+ * The first 8 functions are able to be called locklessly, using RCU. The
  * caller must ensure calls to these functions are made within rcu_read_lock()
  * regions. Other readers (lock-free or otherwise) and modifications may be
  * running concurrently.
index a5aa7ae671f42d64727ebc2df1ba2950cebae66f..b6900099ea81c9445dfaec79ca07573726034787 100644 (file)
@@ -50,7 +50,7 @@ struct rb_root {
 #define RB_ROOT        (struct rb_root) { NULL, }
 #define        rb_entry(ptr, type, member) container_of(ptr, type, member)
 
-#define RB_EMPTY_ROOT(root)  ((root)->rb_node == NULL)
+#define RB_EMPTY_ROOT(root)  (READ_ONCE((root)->rb_node) == NULL)
 
 /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
 #define RB_EMPTY_NODE(node)  \
index 61aa9bbea871fc6017f612bdbb06f2004782ee97..f1e81e128592cbcf5b3c3e61949bcbe75b34c74f 100644 (file)
@@ -1476,10 +1476,10 @@ struct task_struct {
        unsigned in_iowait:1;
 #ifdef CONFIG_MEMCG
        unsigned memcg_may_oom:1;
-#endif
-#ifdef CONFIG_MEMCG_KMEM
+#ifndef CONFIG_SLOB
        unsigned memcg_kmem_skip_account:1;
 #endif
+#endif
 #ifdef CONFIG_COMPAT_BRK
        unsigned brk_randomized:1;
 #endif
@@ -1643,6 +1643,9 @@ struct task_struct {
        struct held_lock held_locks[MAX_LOCK_DEPTH];
        gfp_t lockdep_reclaim_gfp;
 #endif
+#ifdef CONFIG_UBSAN
+       unsigned int in_ubsan;
+#endif
 
 /* journalling filesystem info */
        void *journal_info;
index 6fb801686ad6cea28b6fb4b1538dad63bd61117c..04e88182962511da30753010075c42f8ab30f086 100644 (file)
@@ -52,7 +52,7 @@ struct sysv_shm {
 
 long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr,
              unsigned long shmlba);
-int is_file_shm_hugepages(struct file *file);
+bool is_file_shm_hugepages(struct file *file);
 void exit_shm(struct task_struct *task);
 #define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist)
 #else
@@ -66,9 +66,9 @@ static inline long do_shmat(int shmid, char __user *shmaddr,
 {
        return -ENOSYS;
 }
-static inline int is_file_shm_hugepages(struct file *file)
+static inline bool is_file_shm_hugepages(struct file *file)
 {
-       return 0;
+       return false;
 }
 static inline void exit_shm(struct task_struct *task)
 {
index 3ffee74220126d9b6f52df7b600a6de33f29dbb1..3627d5c1bc470d7b655416e9a941cd2b4e378e03 100644 (file)
@@ -86,7 +86,7 @@
 #else
 # define SLAB_FAILSLAB         0x00000000UL
 #endif
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 # define SLAB_ACCOUNT          0x04000000UL    /* Account to memcg */
 #else
 # define SLAB_ACCOUNT          0x00000000UL
index 33d049066c3d43a1722fdf7b5832aab4d4e39533..cf139d3fa51392caa1ab4b4aa8304e1ad26cde3a 100644 (file)
@@ -69,7 +69,8 @@ struct kmem_cache {
         */
        int obj_offset;
 #endif /* CONFIG_DEBUG_SLAB */
-#ifdef CONFIG_MEMCG_KMEM
+
+#ifdef CONFIG_MEMCG
        struct memcg_cache_params memcg_params;
 #endif
 
index 33885118523c7ce695e2c901e42d76250a84a8da..b7e57927f5218f57ee5c4fe35da90b5ce84d2854 100644 (file)
@@ -84,7 +84,7 @@ struct kmem_cache {
 #ifdef CONFIG_SYSFS
        struct kobject kobj;    /* For sysfs */
 #endif
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
        struct memcg_cache_params memcg_params;
        int max_attr_size; /* for propagation, maximum size of a stored attr */
 #ifdef CONFIG_SYSFS
index 414e101cd06195fe60339e71a8dfd2487001df7e..d18b65c53dbb8b421f5a1499a2791ddc4f594319 100644 (file)
@@ -350,33 +350,7 @@ extern void check_move_unevictable_pages(struct page **, int nr_pages);
 
 extern int kswapd_run(int nid);
 extern void kswapd_stop(int nid);
-#ifdef CONFIG_MEMCG
-static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
-{
-       /* root ? */
-       if (mem_cgroup_disabled() || !memcg->css.parent)
-               return vm_swappiness;
-
-       return memcg->swappiness;
-}
 
-#else
-static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
-{
-       return vm_swappiness;
-}
-#endif
-#ifdef CONFIG_MEMCG_SWAP
-extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
-extern void mem_cgroup_uncharge_swap(swp_entry_t entry);
-#else
-static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
-{
-}
-static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
-{
-}
-#endif
 #ifdef CONFIG_SWAP
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct page *);
@@ -555,5 +529,55 @@ static inline swp_entry_t get_swap_page(void)
 }
 
 #endif /* CONFIG_SWAP */
+
+#ifdef CONFIG_MEMCG
+static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
+{
+       /* root ? */
+       if (mem_cgroup_disabled() || !memcg->css.parent)
+               return vm_swappiness;
+
+       return memcg->swappiness;
+}
+
+#else
+static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
+{
+       return vm_swappiness;
+}
+#endif
+
+#ifdef CONFIG_MEMCG_SWAP
+extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
+extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
+extern void mem_cgroup_uncharge_swap(swp_entry_t entry);
+extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
+extern bool mem_cgroup_swap_full(struct page *page);
+#else
+static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+{
+}
+
+static inline int mem_cgroup_try_charge_swap(struct page *page,
+                                            swp_entry_t entry)
+{
+       return 0;
+}
+
+static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
+{
+}
+
+static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
+{
+       return get_nr_swap_pages();
+}
+
+static inline bool mem_cgroup_swap_full(struct page *page)
+{
+       return vm_swap_full();
+}
+#endif
+
 #endif /* __KERNEL__*/
 #endif /* _LINUX_SWAP_H */
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h
deleted file mode 100644 (file)
index 01ff7c6..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _TCP_MEMCG_H
-#define _TCP_MEMCG_H
-
-struct cgroup_subsys;
-struct mem_cgroup;
-
-int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
-void tcp_destroy_cgroup(struct mem_cgroup *memcg);
-#endif /* _TCP_MEMCG_H */
index bc81fb2e1f0e19d363d3c8095c85b971c521d12a..1c3154913a391fb0f67ade68150fd8af939d791d 100644 (file)
@@ -26,6 +26,9 @@
 #define EPOLL_CTL_DEL 2
 #define EPOLL_CTL_MOD 3
 
+/* Set exclusive wakeup mode for the target file descriptor */
+#define EPOLLEXCLUSIVE (1 << 28)
+
 /*
  * Request the handling of system wakeup events so as to prevent system suspends
  * from happening while those events are being processed.
index 5b86082fa238a86462650deb6613772847b70c4e..22320804fbafdc57b7228b68392199657a7c2f49 100644 (file)
@@ -964,17 +964,6 @@ config MEMCG_SWAP_ENABLED
          For those who want to have the feature enabled by default should
          select this option (if, for some reason, they need to disable it
          then swapaccount=0 does the trick).
-config MEMCG_KMEM
-       bool "Memory Resource Controller Kernel Memory accounting"
-       depends on MEMCG
-       depends on SLUB || SLAB
-       help
-         The Kernel Memory extension for Memory Resource Controller can limit
-         the amount of memory used by kernel objects in the system. Those are
-         fundamentally different from the entities handled by the standard
-         Memory Controller, which are page-based, and can be swapped. Users of
-         the kmem extension can use it to guarantee that no group of processes
-         will ever exhaust kernel resources alone.
 
 config BLK_CGROUP
        bool "IO controller"
@@ -1071,6 +1060,11 @@ config CGROUP_FREEZER
          Provides a way to freeze and unfreeze all tasks in a
          cgroup.
 
+         This option affects the ORIGINAL cgroup interface. The cgroup2 memory
+         controller includes important in-kernel memory consumers per default.
+
+         If you're using cgroup2, say N.
+
 config CGROUP_HUGETLB
        bool "HugeTLB controller"
        depends on HUGETLB_PAGE
@@ -1182,10 +1176,9 @@ config USER_NS
          to provide different user info for different servers.
 
          When user namespaces are enabled in the kernel it is
-         recommended that the MEMCG and MEMCG_KMEM options also be
-         enabled and that user-space use the memory control groups to
-         limit the amount of memory a memory unprivileged users can
-         use.
+         recommended that the MEMCG option also be enabled and that
+         user-space use the memory control groups to limit the amount
+         of memory a memory unprivileged users can use.
 
          If unsure, say N.
 
index f5b978a9bb92892a5e876ae3ce1338ad8a896e04..067af1d9e8b620bfac146dae57d0a0f690b33475 100644 (file)
@@ -57,11 +57,11 @@ static inline int rd_load_image(char *from) { return 0; }
 
 #ifdef CONFIG_BLK_DEV_INITRD
 
-int __init initrd_load(void);
+bool __init initrd_load(void);
 
 #else
 
-static inline int initrd_load(void) { return 0; }
+static inline bool initrd_load(void) { return false; }
 
 #endif
 
index 3e0878e8a80d65548ffac907a3285f9deea8d5a3..a1000ca29fc9971f82609c8cba8d137b01452603 100644 (file)
@@ -116,7 +116,7 @@ static void __init handle_initrd(void)
        }
 }
 
-int __init initrd_load(void)
+bool __init initrd_load(void)
 {
        if (mount_initrd) {
                create_dev("/dev/ram", Root_RAM0);
@@ -129,9 +129,9 @@ int __init initrd_load(void)
                if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
                        sys_unlink("/initrd.image");
                        handle_initrd();
-                       return 1;
+                       return true;
                }
        }
        sys_unlink("/initrd.image");
-       return 0;
+       return false;
 }
index c6ebefafa496106fbb398656245d75f4d59f1f8d..58c9e374704bb20cfff41fa92afcf7cafe498c32 100644 (file)
@@ -164,10 +164,10 @@ static const char *panic_later, *panic_param;
 
 extern const struct obs_kernel_param __setup_start[], __setup_end[];
 
-static int __init obsolete_checksetup(char *line)
+static bool __init obsolete_checksetup(char *line)
 {
        const struct obs_kernel_param *p;
-       int had_early_param = 0;
+       bool had_early_param = false;
 
        p = __setup_start;
        do {
@@ -179,13 +179,13 @@ static int __init obsolete_checksetup(char *line)
                                 * Keep iterating, as we can have early
                                 * params and __setups of same names 8( */
                                if (line[n] == '\0' || line[n] == '=')
-                                       had_early_param = 1;
+                                       had_early_param = true;
                        } else if (!p->setup_func) {
                                pr_warn("Parameter %s is obsolete, ignored\n",
                                        p->str);
-                               return 1;
+                               return true;
                        } else if (p->setup_func(line + n))
-                               return 1;
+                               return true;
                }
                p++;
        } while (p < __setup_end);
index 41787276e14170af7de8261181721991fde528bf..ed3027d0f277a53745b41ea731c4783e3c554bee 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -459,7 +459,7 @@ static const struct file_operations shm_file_operations_huge = {
        .fallocate      = shm_fallocate,
 };
 
-int is_file_shm_hugepages(struct file *file)
+bool is_file_shm_hugepages(struct file *file)
 {
        return file->f_op == &shm_file_operations_huge;
 }
index 85ff5e26e23b45b34201120c758082599f995b7e..5b9d39633ce9d9c01bac677f3575e9b301dcb394 100644 (file)
@@ -759,71 +759,33 @@ const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
 EXPORT_SYMBOL(cpu_all_bits);
 
 #ifdef CONFIG_INIT_ALL_POSSIBLE
-static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
-       = CPU_BITS_ALL;
+struct cpumask __cpu_possible_mask __read_mostly
+       = {CPU_BITS_ALL};
 #else
-static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
+struct cpumask __cpu_possible_mask __read_mostly;
 #endif
-const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
-EXPORT_SYMBOL(cpu_possible_mask);
+EXPORT_SYMBOL(__cpu_possible_mask);
 
-static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
-const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
-EXPORT_SYMBOL(cpu_online_mask);
+struct cpumask __cpu_online_mask __read_mostly;
+EXPORT_SYMBOL(__cpu_online_mask);
 
-static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
-const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
-EXPORT_SYMBOL(cpu_present_mask);
+struct cpumask __cpu_present_mask __read_mostly;
+EXPORT_SYMBOL(__cpu_present_mask);
 
-static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
-const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
-EXPORT_SYMBOL(cpu_active_mask);
-
-void set_cpu_possible(unsigned int cpu, bool possible)
-{
-       if (possible)
-               cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
-       else
-               cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
-}
-
-void set_cpu_present(unsigned int cpu, bool present)
-{
-       if (present)
-               cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
-       else
-               cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
-}
-
-void set_cpu_online(unsigned int cpu, bool online)
-{
-       if (online) {
-               cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
-               cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
-       } else {
-               cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
-       }
-}
-
-void set_cpu_active(unsigned int cpu, bool active)
-{
-       if (active)
-               cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
-       else
-               cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
-}
+struct cpumask __cpu_active_mask __read_mostly;
+EXPORT_SYMBOL(__cpu_active_mask);
 
 void init_cpu_present(const struct cpumask *src)
 {
-       cpumask_copy(to_cpumask(cpu_present_bits), src);
+       cpumask_copy(&__cpu_present_mask, src);
 }
 
 void init_cpu_possible(const struct cpumask *src)
 {
-       cpumask_copy(to_cpumask(cpu_possible_bits), src);
+       cpumask_copy(&__cpu_possible_mask, src);
 }
 
 void init_cpu_online(const struct cpumask *src)
 {
-       cpumask_copy(to_cpumask(cpu_online_bits), src);
+       cpumask_copy(&__cpu_online_mask, src);
 }
index bf8244190d0faa696336c4d4ae9b609a95d5c50b..c0957416b32edd7f945201839c3058128bd718f5 100644 (file)
@@ -3376,7 +3376,7 @@ find_lively_task_by_vpid(pid_t vpid)
 
        /* Reuse ptrace permission checks for now. */
        err = -EACCES;
-       if (!ptrace_may_access(task, PTRACE_MODE_READ))
+       if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
                goto errout;
 
        return task;
index 07110c6020a04ea37c04bc18bd0b9287cd0466dc..10e088237fed91bf453a651ea7b3bccc89a003ec 100644 (file)
@@ -59,8 +59,6 @@
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
 
-static void exit_mm(struct task_struct *tsk);
-
 static void __unhash_process(struct task_struct *p, bool group_dead)
 {
        nr_threads--;
@@ -1120,8 +1118,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
 static int *task_stopped_code(struct task_struct *p, bool ptrace)
 {
        if (ptrace) {
-               if (task_is_stopped_or_traced(p) &&
-                   !(p->jobctl & JOBCTL_LISTENING))
+               if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
                        return &p->exit_code;
        } else {
                if (p->signal->flags & SIGNAL_STOP_STOPPED)
index c6f514573b28a425f648569473bab692d0ee1940..0773f2b23b107dcc3be9e5caabdbaa0a25de01d0 100644 (file)
@@ -2884,7 +2884,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
        }
 
        ret = -EPERM;
-       if (!ptrace_may_access(p, PTRACE_MODE_READ))
+       if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
                goto err_unlock;
 
        head = p->robust_list;
index 55c8c9349cfe6db49b9443c7b4aefb378c6f0249..4ae3232e7a28a507d5ba316d9603c4275012a7ad 100644 (file)
@@ -155,7 +155,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
        }
 
        ret = -EPERM;
-       if (!ptrace_may_access(p, PTRACE_MODE_READ))
+       if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
                goto err_unlock;
 
        head = p->compat_robust_list;
index 0aa69ea1d8fdcfa68046aa75b03c4373783a02fa..3a47fa998fe07277c592f62de36f59dad0d09f8b 100644 (file)
@@ -122,8 +122,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
                        &task2->signal->cred_guard_mutex);
        if (ret)
                goto err;
-       if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
-           !ptrace_may_access(task2, PTRACE_MODE_READ)) {
+       if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
+           !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
                ret = -EPERM;
                goto err_unlock;
        }
index d873b64fbddcdd9e1666738f2b6192c6c5dc0029..ee70aef5cd81dfb7ee5c4397b91810b0109a58b8 100644 (file)
@@ -63,16 +63,16 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
        if (ret)
                goto out_free_image;
 
-       ret = sanity_check_segment_list(image);
-       if (ret)
-               goto out_free_image;
-
-        /* Enable the special crash kernel control page allocation policy. */
        if (kexec_on_panic) {
+               /* Enable special crash kernel control page alloc policy. */
                image->control_page = crashk_res.start;
                image->type = KEXEC_TYPE_CRASH;
        }
 
+       ret = sanity_check_segment_list(image);
+       if (ret)
+               goto out_free_image;
+
        /*
         * Find a location for the control code buffer, and add it
         * the vector of segments so that it's pages will also be
index c823f3001e121d0c51352befe941271c803cb108..8dc65914486999f43caa9276dde914663919fbc6 100644 (file)
@@ -310,12 +310,9 @@ static void kimage_free_pages(struct page *page)
 
 void kimage_free_page_list(struct list_head *list)
 {
-       struct list_head *pos, *next;
+       struct page *page, *next;
 
-       list_for_each_safe(pos, next, list) {
-               struct page *page;
-
-               page = list_entry(pos, struct page, lru);
+       list_for_each_entry_safe(page, next, list, lru) {
                list_del(&page->lru);
                kimage_free_pages(page);
        }
index b70ada0028d251d7171f13b0de7e9e47e1e6086c..007b791f676d5605fb674b6db333d9dd1d0a23b4 100644 (file)
@@ -109,11 +109,13 @@ int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
        return -EINVAL;
 }
 
+#ifdef CONFIG_KEXEC_VERIFY_SIG
 int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
                                        unsigned long buf_len)
 {
        return -EKEYREJECTED;
 }
+#endif
 
 /* Apply relocations of type RELA */
 int __weak
index e4392a698ad4a3c269d997708ae6447190e85ea0..0a52315d9c626abf5e17db2b348291e09efb6e15 100644 (file)
@@ -15,6 +15,27 @@ int kimage_is_destination_range(struct kimage *image,
 extern struct mutex kexec_mutex;
 
 #ifdef CONFIG_KEXEC_FILE
+struct kexec_sha_region {
+       unsigned long start;
+       unsigned long len;
+};
+
+/*
+ * Keeps track of buffer parameters as provided by caller for requesting
+ * memory placement of buffer.
+ */
+struct kexec_buf {
+       struct kimage *image;
+       char *buffer;
+       unsigned long bufsz;
+       unsigned long mem;
+       unsigned long memsz;
+       unsigned long buf_align;
+       unsigned long buf_min;
+       unsigned long buf_max;
+       bool top_down;          /* allocate from top of memory hole */
+};
+
 void kimage_file_post_load_cleanup(struct kimage *image);
 #else /* CONFIG_KEXEC_FILE */
 static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
index e79439134978cf5f572fa1e981a57c3a5c9a8f69..c963ba534a784f456a5a2f1375025eaf1f27ccdd 100644 (file)
@@ -233,7 +233,11 @@ struct printk_log {
        u8 facility;            /* syslog facility */
        u8 flags:5;             /* internal record flags */
        u8 level:3;             /* syslog level */
-};
+}
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+__packed __aligned(4)
+#endif
+;
 
 /*
  * The logbuf_lock protects kmsg buffer, indices, counters.  This can be taken
@@ -274,11 +278,7 @@ static u32 clear_idx;
 #define LOG_FACILITY(v)                ((v) >> 3 & 0xff)
 
 /* record buffer */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-#define LOG_ALIGN 4
-#else
 #define LOG_ALIGN __alignof__(struct printk_log)
-#endif
 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
 static char *log_buf = __log_buf;
index b760bae64cf123b16863b6180a68a099abca51d1..2341efe7fe026f496095c4a9c1526a69cad3849a 100644 (file)
@@ -219,6 +219,14 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 {
        const struct cred *cred = current_cred(), *tcred;
+       int dumpable = 0;
+       kuid_t caller_uid;
+       kgid_t caller_gid;
+
+       if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
+               WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
+               return -EPERM;
+       }
 
        /* May we inspect the given task?
         * This check is used both for attaching with ptrace
@@ -228,18 +236,33 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
         * because setting up the necessary parent/child relationship
         * or halting the specified task is impossible.
         */
-       int dumpable = 0;
+
        /* Don't let security modules deny introspection */
        if (same_thread_group(task, current))
                return 0;
        rcu_read_lock();
+       if (mode & PTRACE_MODE_FSCREDS) {
+               caller_uid = cred->fsuid;
+               caller_gid = cred->fsgid;
+       } else {
+               /*
+                * Using the euid would make more sense here, but something
+                * in userland might rely on the old behavior, and this
+                * shouldn't be a security problem since
+                * PTRACE_MODE_REALCREDS implies that the caller explicitly
+                * used a syscall that requests access to another process
+                * (and not a filesystem syscall to procfs).
+                */
+               caller_uid = cred->uid;
+               caller_gid = cred->gid;
+       }
        tcred = __task_cred(task);
-       if (uid_eq(cred->uid, tcred->euid) &&
-           uid_eq(cred->uid, tcred->suid) &&
-           uid_eq(cred->uid, tcred->uid)  &&
-           gid_eq(cred->gid, tcred->egid) &&
-           gid_eq(cred->gid, tcred->sgid) &&
-           gid_eq(cred->gid, tcred->gid))
+       if (uid_eq(caller_uid, tcred->euid) &&
+           uid_eq(caller_uid, tcred->suid) &&
+           uid_eq(caller_uid, tcred->uid)  &&
+           gid_eq(caller_gid, tcred->egid) &&
+           gid_eq(caller_gid, tcred->sgid) &&
+           gid_eq(caller_gid, tcred->gid))
                goto ok;
        if (ptrace_has_cap(tcred->user_ns, mode))
                goto ok;
@@ -306,7 +329,7 @@ static int ptrace_attach(struct task_struct *task, long request,
                goto out;
 
        task_lock(task);
-       retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
+       retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
        task_unlock(task);
        if (retval)
                goto unlock_creds;
@@ -364,8 +387,14 @@ unlock_creds:
        mutex_unlock(&task->signal->cred_guard_mutex);
 out:
        if (!retval) {
-               wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
-                           TASK_UNINTERRUPTIBLE);
+               /*
+                * We do not bother to change retval or clear JOBCTL_TRAPPING
+                * if wait_on_bit() was interrupted by SIGKILL. The tracer will
+                * not return to user-mode, it will exit and clear this bit in
+                * __ptrace_unlink() if it wasn't already cleared by the tracee;
+                * and until then nobody can ptrace this task.
+                */
+               wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
                proc_ptrace_connector(task, PTRACE_ATTACH);
        }
 
index 6af9212ab5aab7cef491eb4e40e7c9277c1f2ac0..78947de6f9691e898adf3803ec59fded4a0a28da 100644 (file)
@@ -1853,11 +1853,13 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
                user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
        }
 
-       if (prctl_map.exe_fd != (u32)-1)
+       if (prctl_map.exe_fd != (u32)-1) {
                error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
-       down_read(&mm->mmap_sem);
-       if (error)
-               goto out;
+               if (error)
+                       return error;
+       }
+
+       down_write(&mm->mmap_sem);
 
        /*
         * We don't validate if these members are pointing to
@@ -1894,10 +1896,8 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
        if (prctl_map.auxv_size)
                memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
 
-       error = 0;
-out:
-       up_read(&mm->mmap_sem);
-       return error;
+       up_write(&mm->mmap_sem);
+       return 0;
 }
 #endif /* CONFIG_CHECKPOINT_RESTORE */
 
@@ -1963,7 +1963,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
 
        error = -EINVAL;
 
-       down_read(&mm->mmap_sem);
+       down_write(&mm->mmap_sem);
        vma = find_vma(mm, addr);
 
        prctl_map.start_code    = mm->start_code;
@@ -2056,7 +2056,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
 
        error = 0;
 out:
-       up_read(&mm->mmap_sem);
+       up_write(&mm->mmap_sem);
        return error;
 }
 
index c810f8afdb7f79439737ee3b2528f7e2b10a233a..91420362e0b339fcdfe930066dab863604357b13 100644 (file)
@@ -173,7 +173,7 @@ extern int no_unaligned_warning;
 #define SYSCTL_WRITES_WARN      0
 #define SYSCTL_WRITES_STRICT    1
 
-static int sysctl_writes_strict = SYSCTL_WRITES_WARN;
+static int sysctl_writes_strict = SYSCTL_WRITES_STRICT;
 
 static int proc_do_cad_pid(struct ctl_table *table, int write,
                  void __user *buffer, size_t *lenp, loff_t *ppos);
index 7d0b49c536c531624f8f8148803d91cb5be8898b..ecb9e75614bf87f1e368074d6ef84bfe3f1003b8 100644 (file)
@@ -1893,6 +1893,8 @@ source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
 
+source "lib/Kconfig.ubsan"
+
 config ARCH_HAS_DEVMEM_IS_ALLOWED
        bool
 
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
new file mode 100644 (file)
index 0000000..49518fb
--- /dev/null
@@ -0,0 +1,29 @@
+config ARCH_HAS_UBSAN_SANITIZE_ALL
+       bool
+
+config UBSAN
+       bool "Undefined behaviour sanity checker"
+       help
+         This option enables undefined behaviour sanity checker
+         Compile-time instrumentation is used to detect various undefined
+         behaviours in runtime. Various types of checks may be enabled
+         via boot parameter ubsan_handle (see: Documentation/ubsan.txt).
+
+config UBSAN_SANITIZE_ALL
+       bool "Enable instrumentation for the entire kernel"
+       depends on UBSAN
+       depends on ARCH_HAS_UBSAN_SANITIZE_ALL
+       default y
+       help
+         This option activates instrumentation for the entire kernel.
+         If you don't enable this option, you have to explicitly specify
+         UBSAN_SANITIZE := y for the files/directories you want to check for UB.
+
+config UBSAN_ALIGNMENT
+       bool "Enable checking of pointers alignment"
+       depends on UBSAN
+       default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS
+       help
+         This option enables detection of unaligned memory accesses.
+         Enabling this option on architectures that support unalligned
+         accesses may produce a lot of false positives.
index 180dd4d0dd412b6119f276e9c45a42386a7182bb..2d4bc33d09b42097085480d7ce93ec3a8882faba 100644 (file)
@@ -31,7 +31,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
 obj-y += hexdump.o
-obj-$(CONFIG_TEST_HEXDUMP) += test-hexdump.o
+obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
 obj-y += kstrtox.o
 obj-$(CONFIG_TEST_BPF) += test_bpf.o
 obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
@@ -154,7 +154,7 @@ obj-$(CONFIG_GLOB) += glob.o
 obj-$(CONFIG_MPILIB) += mpi/
 obj-$(CONFIG_SIGNATURE) += digsig.o
 
-obj-$(CONFIG_CLZ_TAB) += clz_tab.o
+lib-$(CONFIG_CLZ_TAB) += clz_tab.o
 
 obj-$(CONFIG_DDR) += jedec_ddr_data.o
 
@@ -209,3 +209,6 @@ quiet_cmd_build_OID_registry = GEN     $@
 clean-files    += oid_registry_data.c
 
 obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
+obj-$(CONFIG_UBSAN) += ubsan.o
+
+UBSAN_SANITIZE_ubsan.o := n
index 4527e751b5e0db75d02a5f513e730a0ac4a6d67b..b8f1d6cbb20057c6b1d7ba880ad20fbfe120c243 100644 (file)
@@ -41,6 +41,27 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
 }
 EXPORT_SYMBOL_GPL(__iowrite32_copy);
 
+/**
+ * __ioread32_copy - copy data from MMIO space, in 32-bit units
+ * @to: destination (must be 32-bit aligned)
+ * @from: source, in MMIO space (must be 32-bit aligned)
+ * @count: number of 32-bit quantities to copy
+ *
+ * Copy data from MMIO space to kernel space, in units of 32 bits at a
+ * time.  Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void __ioread32_copy(void *to, const void __iomem *from, size_t count)
+{
+       u32 *dst = to;
+       const u32 __iomem *src = from;
+       const u32 __iomem *end = src + count;
+
+       while (src < end)
+               *dst++ = __raw_readl(src++);
+}
+EXPORT_SYMBOL_GPL(__ioread32_copy);
+
 /**
  * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units
  * @to: destination, in MMIO space (must be 64-bit aligned)
index 6a08ce7d6adc054966658d9b6de0dfb36e2297e3..31ce853fbfb1be6a2b1905e593377520ab951eab 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/crc32c.h>
 
 static struct crypto_shash *tfm;
 
index 5939f63d90cde79fe1e09814765539a7e43a3c28..5c88204b6f1f152e1cecf70c4cb20d123d8bfa42 100644 (file)
@@ -43,50 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
                [STRING_UNITS_10] = 1000,
                [STRING_UNITS_2] = 1024,
        };
-       int i, j;
-       u32 remainder = 0, sf_cap, exp;
+       static const unsigned int rounding[] = { 500, 50, 5 };
+       int i = 0, j;
+       u32 remainder = 0, sf_cap;
        char tmp[8];
        const char *unit;
 
        tmp[0] = '\0';
-       i = 0;
-       if (!size)
+
+       if (blk_size == 0)
+               size = 0;
+       if (size == 0)
                goto out;
 
-       while (blk_size >= divisor[units]) {
-               remainder = do_div(blk_size, divisor[units]);
+       /* This is Napier's algorithm.  Reduce the original block size to
+        *
+        * coefficient * divisor[units]^i
+        *
+        * we do the reduction so both coefficients are just under 32 bits so
+        * that multiplying them together won't overflow 64 bits and we keep
+        * as much precision as possible in the numbers.
+        *
+        * Note: it's safe to throw away the remainders here because all the
+        * precision is in the coefficients.
+        */
+       while (blk_size >> 32) {
+               do_div(blk_size, divisor[units]);
                i++;
        }
 
-       exp = divisor[units] / (u32)blk_size;
-       /*
-        * size must be strictly greater than exp here to ensure that remainder
-        * is greater than divisor[units] coming out of the if below.
-        */
-       if (size > exp) {
-               remainder = do_div(size, divisor[units]);
-               remainder *= blk_size;
+       while (size >> 32) {
+               do_div(size, divisor[units]);
                i++;
-       } else {
-               remainder *= size;
        }
 
+       /* now perform the actual multiplication keeping i as the sum of the
+        * two logarithms */
        size *= blk_size;
-       size += remainder / divisor[units];
-       remainder %= divisor[units];
 
+       /* and logarithmically reduce it until it's just under the divisor */
        while (size >= divisor[units]) {
                remainder = do_div(size, divisor[units]);
                i++;
        }
 
+       /* work out in j how many digits of precision we need from the
+        * remainder */
        sf_cap = size;
        for (j = 0; sf_cap*10 < 1000; j++)
                sf_cap *= 10;
 
-       if (j) {
+       if (units == STRING_UNITS_2) {
+               /* express the remainder as a decimal.  It's currently the
+                * numerator of a fraction whose denominator is
+                * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
                remainder *= 1000;
-               remainder /= divisor[units];
+               remainder >>= 10;
+       }
+
+       /* add a 5 to the digit below what will be printed to ensure
+        * an arithmetical round up and carry it through to size */
+       remainder += rounding[j];
+       if (remainder >= 1000) {
+               remainder -= 1000;
+               size += 1;
+       }
+
+       if (j) {
                snprintf(tmp, sizeof(tmp), ".%03u", remainder);
                tmp[j+1] = '\0';
        }
diff --git a/lib/test-hexdump.c b/lib/test-hexdump.c
deleted file mode 100644 (file)
index 5241df3..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Test cases for lib/hexdump.c module.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/random.h>
-#include <linux/string.h>
-
-static const unsigned char data_b[] = {
-       '\xbe', '\x32', '\xdb', '\x7b', '\x0a', '\x18', '\x93', '\xb2', /* 00 - 07 */
-       '\x70', '\xba', '\xc4', '\x24', '\x7d', '\x83', '\x34', '\x9b', /* 08 - 0f */
-       '\xa6', '\x9c', '\x31', '\xad', '\x9c', '\x0f', '\xac', '\xe9', /* 10 - 17 */
-       '\x4c', '\xd1', '\x19', '\x99', '\x43', '\xb1', '\xaf', '\x0c', /* 18 - 1f */
-};
-
-static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C...";
-
-static const char * const test_data_1_le[] __initconst = {
-       "be", "32", "db", "7b", "0a", "18", "93", "b2",
-       "70", "ba", "c4", "24", "7d", "83", "34", "9b",
-       "a6", "9c", "31", "ad", "9c", "0f", "ac", "e9",
-       "4c", "d1", "19", "99", "43", "b1", "af", "0c",
-};
-
-static const char * const test_data_2_le[] __initconst = {
-       "32be", "7bdb", "180a", "b293",
-       "ba70", "24c4", "837d", "9b34",
-       "9ca6", "ad31", "0f9c", "e9ac",
-       "d14c", "9919", "b143", "0caf",
-};
-
-static const char * const test_data_4_le[] __initconst = {
-       "7bdb32be", "b293180a", "24c4ba70", "9b34837d",
-       "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
-};
-
-static const char * const test_data_8_le[] __initconst = {
-       "b293180a7bdb32be", "9b34837d24c4ba70",
-       "e9ac0f9cad319ca6", "0cafb1439919d14c",
-};
-
-static void __init test_hexdump(size_t len, int rowsize, int groupsize,
-                               bool ascii)
-{
-       char test[32 * 3 + 2 + 32 + 1];
-       char real[32 * 3 + 2 + 32 + 1];
-       char *p;
-       const char * const *result;
-       size_t l = len;
-       int gs = groupsize, rs = rowsize;
-       unsigned int i;
-
-       hex_dump_to_buffer(data_b, l, rs, gs, real, sizeof(real), ascii);
-
-       if (rs != 16 && rs != 32)
-               rs = 16;
-
-       if (l > rs)
-               l = rs;
-
-       if (!is_power_of_2(gs) || gs > 8 || (len % gs != 0))
-               gs = 1;
-
-       if (gs == 8)
-               result = test_data_8_le;
-       else if (gs == 4)
-               result = test_data_4_le;
-       else if (gs == 2)
-               result = test_data_2_le;
-       else
-               result = test_data_1_le;
-
-       memset(test, ' ', sizeof(test));
-
-       /* hex dump */
-       p = test;
-       for (i = 0; i < l / gs; i++) {
-               const char *q = *result++;
-               size_t amount = strlen(q);
-
-               strncpy(p, q, amount);
-               p += amount + 1;
-       }
-       if (i)
-               p--;
-
-       /* ASCII part */
-       if (ascii) {
-               p = test + rs * 2 + rs / gs + 1;
-               strncpy(p, data_a, l);
-               p += l;
-       }
-
-       *p = '\0';
-
-       if (strcmp(test, real)) {
-               pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize);
-               pr_err("Result: '%s'\n", real);
-               pr_err("Expect: '%s'\n", test);
-       }
-}
-
-static void __init test_hexdump_set(int rowsize, bool ascii)
-{
-       size_t d = min_t(size_t, sizeof(data_b), rowsize);
-       size_t len = get_random_int() % d + 1;
-
-       test_hexdump(len, rowsize, 4, ascii);
-       test_hexdump(len, rowsize, 2, ascii);
-       test_hexdump(len, rowsize, 8, ascii);
-       test_hexdump(len, rowsize, 1, ascii);
-}
-
-static void __init test_hexdump_overflow(bool ascii)
-{
-       char buf[56];
-       const char *t = test_data_1_le[0];
-       size_t l = get_random_int() % sizeof(buf);
-       bool a;
-       int e, r;
-
-       memset(buf, ' ', sizeof(buf));
-
-       r = hex_dump_to_buffer(data_b, 1, 16, 1, buf, l, ascii);
-
-       if (ascii)
-               e = 50;
-       else
-               e = 2;
-       buf[e + 2] = '\0';
-
-       if (!l) {
-               a = r == e && buf[0] == ' ';
-       } else if (l < 3) {
-               a = r == e && buf[0] == '\0';
-       } else if (l < 4) {
-               a = r == e && !strcmp(buf, t);
-       } else if (ascii) {
-               if (l < 51)
-                       a = r == e && buf[l - 1] == '\0' && buf[l - 2] == ' ';
-               else
-                       a = r == e && buf[50] == '\0' && buf[49] == '.';
-       } else {
-               a = r == e && buf[e] == '\0';
-       }
-
-       if (!a) {
-               pr_err("Len: %zu rc: %u strlen: %zu\n", l, r, strlen(buf));
-               pr_err("Result: '%s'\n", buf);
-       }
-}
-
-static int __init test_hexdump_init(void)
-{
-       unsigned int i;
-       int rowsize;
-
-       pr_info("Running tests...\n");
-
-       rowsize = (get_random_int() % 2 + 1) * 16;
-       for (i = 0; i < 16; i++)
-               test_hexdump_set(rowsize, false);
-
-       rowsize = (get_random_int() % 2 + 1) * 16;
-       for (i = 0; i < 16; i++)
-               test_hexdump_set(rowsize, true);
-
-       for (i = 0; i < 16; i++)
-               test_hexdump_overflow(false);
-
-       for (i = 0; i < 16; i++)
-               test_hexdump_overflow(true);
-
-       return -EINVAL;
-}
-module_init(test_hexdump_init);
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
new file mode 100644 (file)
index 0000000..3f415d8
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * Test cases for lib/hexdump.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/string.h>
+
+static const unsigned char data_b[] = {
+       '\xbe', '\x32', '\xdb', '\x7b', '\x0a', '\x18', '\x93', '\xb2', /* 00 - 07 */
+       '\x70', '\xba', '\xc4', '\x24', '\x7d', '\x83', '\x34', '\x9b', /* 08 - 0f */
+       '\xa6', '\x9c', '\x31', '\xad', '\x9c', '\x0f', '\xac', '\xe9', /* 10 - 17 */
+       '\x4c', '\xd1', '\x19', '\x99', '\x43', '\xb1', '\xaf', '\x0c', /* 18 - 1f */
+};
+
+static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C...";
+
+static const char * const test_data_1_le[] __initconst = {
+       "be", "32", "db", "7b", "0a", "18", "93", "b2",
+       "70", "ba", "c4", "24", "7d", "83", "34", "9b",
+       "a6", "9c", "31", "ad", "9c", "0f", "ac", "e9",
+       "4c", "d1", "19", "99", "43", "b1", "af", "0c",
+};
+
+static const char * const test_data_2_le[] __initconst = {
+       "32be", "7bdb", "180a", "b293",
+       "ba70", "24c4", "837d", "9b34",
+       "9ca6", "ad31", "0f9c", "e9ac",
+       "d14c", "9919", "b143", "0caf",
+};
+
+static const char * const test_data_4_le[] __initconst = {
+       "7bdb32be", "b293180a", "24c4ba70", "9b34837d",
+       "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
+};
+
+static const char * const test_data_8_le[] __initconst = {
+       "b293180a7bdb32be", "9b34837d24c4ba70",
+       "e9ac0f9cad319ca6", "0cafb1439919d14c",
+};
+
+#define FILL_CHAR      '#'
+
+static unsigned total_tests __initdata;
+static unsigned failed_tests __initdata;
+
+static void __init test_hexdump_prepare_test(size_t len, int rowsize,
+                                            int groupsize, char *test,
+                                            size_t testlen, bool ascii)
+{
+       char *p;
+       const char * const *result;
+       size_t l = len;
+       int gs = groupsize, rs = rowsize;
+       unsigned int i;
+
+       if (rs != 16 && rs != 32)
+               rs = 16;
+
+       if (l > rs)
+               l = rs;
+
+       if (!is_power_of_2(gs) || gs > 8 || (len % gs != 0))
+               gs = 1;
+
+       if (gs == 8)
+               result = test_data_8_le;
+       else if (gs == 4)
+               result = test_data_4_le;
+       else if (gs == 2)
+               result = test_data_2_le;
+       else
+               result = test_data_1_le;
+
+       /* hex dump */
+       p = test;
+       for (i = 0; i < l / gs; i++) {
+               const char *q = *result++;
+               size_t amount = strlen(q);
+
+               strncpy(p, q, amount);
+               p += amount;
+
+               *p++ = ' ';
+       }
+       if (i)
+               p--;
+
+       /* ASCII part */
+       if (ascii) {
+               do {
+                       *p++ = ' ';
+               } while (p < test + rs * 2 + rs / gs + 1);
+
+               strncpy(p, data_a, l);
+               p += l;
+       }
+
+       *p = '\0';
+}
+
+#define TEST_HEXDUMP_BUF_SIZE          (32 * 3 + 2 + 32 + 1)
+
+static void __init test_hexdump(size_t len, int rowsize, int groupsize,
+                               bool ascii)
+{
+       char test[TEST_HEXDUMP_BUF_SIZE];
+       char real[TEST_HEXDUMP_BUF_SIZE];
+
+       total_tests++;
+
+       memset(real, FILL_CHAR, sizeof(real));
+       hex_dump_to_buffer(data_b, len, rowsize, groupsize, real, sizeof(real),
+                          ascii);
+
+       memset(test, FILL_CHAR, sizeof(test));
+       test_hexdump_prepare_test(len, rowsize, groupsize, test, sizeof(test),
+                                 ascii);
+
+       if (memcmp(test, real, TEST_HEXDUMP_BUF_SIZE)) {
+               pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize);
+               pr_err("Result: '%s'\n", real);
+               pr_err("Expect: '%s'\n", test);
+               failed_tests++;
+       }
+}
+
+static void __init test_hexdump_set(int rowsize, bool ascii)
+{
+       size_t d = min_t(size_t, sizeof(data_b), rowsize);
+       size_t len = get_random_int() % d + 1;
+
+       test_hexdump(len, rowsize, 4, ascii);
+       test_hexdump(len, rowsize, 2, ascii);
+       test_hexdump(len, rowsize, 8, ascii);
+       test_hexdump(len, rowsize, 1, ascii);
+}
+
+static void __init test_hexdump_overflow(size_t buflen, size_t len,
+                                        int rowsize, int groupsize,
+                                        bool ascii)
+{
+       char test[TEST_HEXDUMP_BUF_SIZE];
+       char buf[TEST_HEXDUMP_BUF_SIZE];
+       int rs = rowsize, gs = groupsize;
+       int ae, he, e, f, r;
+       bool a;
+
+       total_tests++;
+
+       memset(buf, FILL_CHAR, sizeof(buf));
+
+       r = hex_dump_to_buffer(data_b, len, rs, gs, buf, buflen, ascii);
+
+       /*
+        * Caller must provide the data length multiple of groupsize. The
+        * calculations below are made with that assumption in mind.
+        */
+       ae = rs * 2 /* hex */ + rs / gs /* spaces */ + 1 /* space */ + len /* ascii */;
+       he = (gs * 2 /* hex */ + 1 /* space */) * len / gs - 1 /* no trailing space */;
+
+       if (ascii)
+               e = ae;
+       else
+               e = he;
+
+       f = min_t(int, e + 1, buflen);
+       if (buflen) {
+               test_hexdump_prepare_test(len, rs, gs, test, sizeof(test), ascii);
+               test[f - 1] = '\0';
+       }
+       memset(test + f, FILL_CHAR, sizeof(test) - f);
+
+       a = r == e && !memcmp(test, buf, TEST_HEXDUMP_BUF_SIZE);
+
+       buf[sizeof(buf) - 1] = '\0';
+
+       if (!a) {
+               pr_err("Len: %zu buflen: %zu strlen: %zu\n",
+                       len, buflen, strnlen(buf, sizeof(buf)));
+               pr_err("Result: %d '%s'\n", r, buf);
+               pr_err("Expect: %d '%s'\n", e, test);
+               failed_tests++;
+       }
+}
+
+static void __init test_hexdump_overflow_set(size_t buflen, bool ascii)
+{
+       unsigned int i = 0;
+       int rs = (get_random_int() % 2 + 1) * 16;
+
+       do {
+               int gs = 1 << i;
+               size_t len = get_random_int() % rs + gs;
+
+               test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii);
+       } while (i++ < 3);
+}
+
+static int __init test_hexdump_init(void)
+{
+       unsigned int i;
+       int rowsize;
+
+       rowsize = (get_random_int() % 2 + 1) * 16;
+       for (i = 0; i < 16; i++)
+               test_hexdump_set(rowsize, false);
+
+       rowsize = (get_random_int() % 2 + 1) * 16;
+       for (i = 0; i < 16; i++)
+               test_hexdump_set(rowsize, true);
+
+       for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++)
+               test_hexdump_overflow_set(i, false);
+
+       for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++)
+               test_hexdump_overflow_set(i, true);
+
+       if (failed_tests == 0)
+               pr_info("all %u tests passed\n", total_tests);
+       else
+               pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
+
+       return failed_tests ? -EINVAL : 0;
+}
+module_init(test_hexdump_init);
+
+static void __exit test_hexdump_exit(void)
+{
+       /* do nothing */
+}
+module_exit(test_hexdump_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/ubsan.c b/lib/ubsan.c
new file mode 100644 (file)
index 0000000..8799ae5
--- /dev/null
@@ -0,0 +1,456 @@
+/*
+ * UBSAN error reporting functions
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+
+#include "ubsan.h"
+
+const char *type_check_kinds[] = {
+       "load of",
+       "store to",
+       "reference binding to",
+       "member access within",
+       "member call on",
+       "constructor call on",
+       "downcast of",
+       "downcast of"
+};
+
+#define REPORTED_BIT 31
+
+#if (BITS_PER_LONG == 64) && defined(__BIG_ENDIAN)
+#define COLUMN_MASK (~(1U << REPORTED_BIT))
+#define LINE_MASK   (~0U)
+#else
+#define COLUMN_MASK   (~0U)
+#define LINE_MASK (~(1U << REPORTED_BIT))
+#endif
+
+#define VALUE_LENGTH 40
+
+static bool was_reported(struct source_location *location)
+{
+       return test_and_set_bit(REPORTED_BIT, &location->reported);
+}
+
+static void print_source_location(const char *prefix,
+                               struct source_location *loc)
+{
+       pr_err("%s %s:%d:%d\n", prefix, loc->file_name,
+               loc->line & LINE_MASK, loc->column & COLUMN_MASK);
+}
+
+static bool suppress_report(struct source_location *loc)
+{
+       return current->in_ubsan || was_reported(loc);
+}
+
+static bool type_is_int(struct type_descriptor *type)
+{
+       return type->type_kind == type_kind_int;
+}
+
+static bool type_is_signed(struct type_descriptor *type)
+{
+       WARN_ON(!type_is_int(type));
+       return  type->type_info & 1;
+}
+
+static unsigned type_bit_width(struct type_descriptor *type)
+{
+       return 1 << (type->type_info >> 1);
+}
+
+static bool is_inline_int(struct type_descriptor *type)
+{
+       unsigned inline_bits = sizeof(unsigned long)*8;
+       unsigned bits = type_bit_width(type);
+
+       WARN_ON(!type_is_int(type));
+
+       return bits <= inline_bits;
+}
+
+static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
+{
+       if (is_inline_int(type)) {
+               unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
+               return ((s_max)val) << extra_bits >> extra_bits;
+       }
+
+       if (type_bit_width(type) == 64)
+               return *(s64 *)val;
+
+       return *(s_max *)val;
+}
+
+static bool val_is_negative(struct type_descriptor *type, unsigned long val)
+{
+       return type_is_signed(type) && get_signed_val(type, val) < 0;
+}
+
+static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
+{
+       if (is_inline_int(type))
+               return val;
+
+       if (type_bit_width(type) == 64)
+               return *(u64 *)val;
+
+       return *(u_max *)val;
+}
+
+static void val_to_string(char *str, size_t size, struct type_descriptor *type,
+       unsigned long value)
+{
+       if (type_is_int(type)) {
+               if (type_bit_width(type) == 128) {
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+                       u_max val = get_unsigned_val(type, value);
+
+                       scnprintf(str, size, "0x%08x%08x%08x%08x",
+                               (u32)(val >> 96),
+                               (u32)(val >> 64),
+                               (u32)(val >> 32),
+                               (u32)(val));
+#else
+                       WARN_ON(1);
+#endif
+               } else if (type_is_signed(type)) {
+                       scnprintf(str, size, "%lld",
+                               (s64)get_signed_val(type, value));
+               } else {
+                       scnprintf(str, size, "%llu",
+                               (u64)get_unsigned_val(type, value));
+               }
+       }
+}
+
+static bool location_is_valid(struct source_location *loc)
+{
+       return loc->file_name != NULL;
+}
+
+static DEFINE_SPINLOCK(report_lock);
+
+static void ubsan_prologue(struct source_location *location,
+                       unsigned long *flags)
+{
+       current->in_ubsan++;
+       spin_lock_irqsave(&report_lock, *flags);
+
+       pr_err("========================================"
+               "========================================\n");
+       print_source_location("UBSAN: Undefined behaviour in", location);
+}
+
+static void ubsan_epilogue(unsigned long *flags)
+{
+       dump_stack();
+       pr_err("========================================"
+               "========================================\n");
+       spin_unlock_irqrestore(&report_lock, *flags);
+       current->in_ubsan--;
+}
+
+static void handle_overflow(struct overflow_data *data, unsigned long lhs,
+                       unsigned long rhs, char op)
+{
+
+       struct type_descriptor *type = data->type;
+       unsigned long flags;
+       char lhs_val_str[VALUE_LENGTH];
+       char rhs_val_str[VALUE_LENGTH];
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+
+       val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
+       val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
+       pr_err("%s integer overflow:\n",
+               type_is_signed(type) ? "signed" : "unsigned");
+       pr_err("%s %c %s cannot be represented in type %s\n",
+               lhs_val_str,
+               op,
+               rhs_val_str,
+               type->type_name);
+
+       ubsan_epilogue(&flags);
+}
+
+void __ubsan_handle_add_overflow(struct overflow_data *data,
+                               unsigned long lhs,
+                               unsigned long rhs)
+{
+
+       handle_overflow(data, lhs, rhs, '+');
+}
+EXPORT_SYMBOL(__ubsan_handle_add_overflow);
+
+void __ubsan_handle_sub_overflow(struct overflow_data *data,
+                               unsigned long lhs,
+                               unsigned long rhs)
+{
+       handle_overflow(data, lhs, rhs, '-');
+}
+EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
+
+void __ubsan_handle_mul_overflow(struct overflow_data *data,
+                               unsigned long lhs,
+                               unsigned long rhs)
+{
+       handle_overflow(data, lhs, rhs, '*');
+}
+EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
+
+void __ubsan_handle_negate_overflow(struct overflow_data *data,
+                               unsigned long old_val)
+{
+       unsigned long flags;
+       char old_val_str[VALUE_LENGTH];
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+
+       val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
+
+       pr_err("negation of %s cannot be represented in type %s:\n",
+               old_val_str, data->type->type_name);
+
+       ubsan_epilogue(&flags);
+}
+EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
+
+
+void __ubsan_handle_divrem_overflow(struct overflow_data *data,
+                               unsigned long lhs,
+                               unsigned long rhs)
+{
+       unsigned long flags;
+       char rhs_val_str[VALUE_LENGTH];
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+
+       val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
+
+       if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1)
+               pr_err("division of %s by -1 cannot be represented in type %s\n",
+                       rhs_val_str, data->type->type_name);
+       else
+               pr_err("division by zero\n");
+
+       ubsan_epilogue(&flags);
+}
+EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
+
+static void handle_null_ptr_deref(struct type_mismatch_data *data)
+{
+       unsigned long flags;
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+
+       pr_err("%s null pointer of type %s\n",
+               type_check_kinds[data->type_check_kind],
+               data->type->type_name);
+
+       ubsan_epilogue(&flags);
+}
+
+static void handle_missaligned_access(struct type_mismatch_data *data,
+                               unsigned long ptr)
+{
+       unsigned long flags;
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+
+       pr_err("%s misaligned address %p for type %s\n",
+               type_check_kinds[data->type_check_kind],
+               (void *)ptr, data->type->type_name);
+       pr_err("which requires %ld byte alignment\n", data->alignment);
+
+       ubsan_epilogue(&flags);
+}
+
+static void handle_object_size_mismatch(struct type_mismatch_data *data,
+                                       unsigned long ptr)
+{
+       unsigned long flags;
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+       pr_err("%s address %pk with insufficient space\n",
+               type_check_kinds[data->type_check_kind],
+               (void *) ptr);
+       pr_err("for an object of type %s\n", data->type->type_name);
+       ubsan_epilogue(&flags);
+}
+
+void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
+                               unsigned long ptr)
+{
+
+       if (!ptr)
+               handle_null_ptr_deref(data);
+       else if (data->alignment && !IS_ALIGNED(ptr, data->alignment))
+               handle_missaligned_access(data, ptr);
+       else
+               handle_object_size_mismatch(data, ptr);
+}
+EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
+
+void __ubsan_handle_nonnull_return(struct nonnull_return_data *data)
+{
+       unsigned long flags;
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+
+       pr_err("null pointer returned from function declared to never return null\n");
+
+       if (location_is_valid(&data->attr_location))
+               print_source_location("returns_nonnull attribute specified in",
+                               &data->attr_location);
+
+       ubsan_epilogue(&flags);
+}
+EXPORT_SYMBOL(__ubsan_handle_nonnull_return);
+
+void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
+                                       unsigned long bound)
+{
+       unsigned long flags;
+       char bound_str[VALUE_LENGTH];
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+
+       val_to_string(bound_str, sizeof(bound_str), data->type, bound);
+       pr_err("variable length array bound value %s <= 0\n", bound_str);
+
+       ubsan_epilogue(&flags);
+}
+EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
+
+void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
+                               unsigned long index)
+{
+       unsigned long flags;
+       char index_str[VALUE_LENGTH];
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+
+       val_to_string(index_str, sizeof(index_str), data->index_type, index);
+       pr_err("index %s is out of range for type %s\n", index_str,
+               data->array_type->type_name);
+       ubsan_epilogue(&flags);
+}
+EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
+
+void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
+                                       unsigned long lhs, unsigned long rhs)
+{
+       unsigned long flags;
+       struct type_descriptor *rhs_type = data->rhs_type;
+       struct type_descriptor *lhs_type = data->lhs_type;
+       char rhs_str[VALUE_LENGTH];
+       char lhs_str[VALUE_LENGTH];
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+
+       val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
+       val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
+
+       if (val_is_negative(rhs_type, rhs))
+               pr_err("shift exponent %s is negative\n", rhs_str);
+
+       else if (get_unsigned_val(rhs_type, rhs) >=
+               type_bit_width(lhs_type))
+               pr_err("shift exponent %s is too large for %u-bit type %s\n",
+                       rhs_str,
+                       type_bit_width(lhs_type),
+                       lhs_type->type_name);
+       else if (val_is_negative(lhs_type, lhs))
+               pr_err("left shift of negative value %s\n",
+                       lhs_str);
+       else
+               pr_err("left shift of %s by %s places cannot be"
+                       " represented in type %s\n",
+                       lhs_str, rhs_str,
+                       lhs_type->type_name);
+
+       ubsan_epilogue(&flags);
+}
+EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
+
+
+void __noreturn
+__ubsan_handle_builtin_unreachable(struct unreachable_data *data)
+{
+       unsigned long flags;
+
+       ubsan_prologue(&data->location, &flags);
+       pr_err("calling __builtin_unreachable()\n");
+       ubsan_epilogue(&flags);
+       panic("can't return from __builtin_unreachable()");
+}
+EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
+
+void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
+                               unsigned long val)
+{
+       unsigned long flags;
+       char val_str[VALUE_LENGTH];
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, &flags);
+
+       val_to_string(val_str, sizeof(val_str), data->type, val);
+
+       pr_err("load of value %s is not a valid value for type %s\n",
+               val_str, data->type->type_name);
+
+       ubsan_epilogue(&flags);
+}
+EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
diff --git a/lib/ubsan.h b/lib/ubsan.h
new file mode 100644 (file)
index 0000000..b2d18d4
--- /dev/null
@@ -0,0 +1,84 @@
+#ifndef _LIB_UBSAN_H
+#define _LIB_UBSAN_H
+
+enum {
+       type_kind_int = 0,
+       type_kind_float = 1,
+       type_unknown = 0xffff
+};
+
+struct type_descriptor {
+       u16 type_kind;
+       u16 type_info;
+       char type_name[1];
+};
+
+struct source_location {
+       const char *file_name;
+       union {
+               unsigned long reported;
+               struct {
+                       u32 line;
+                       u32 column;
+               };
+       };
+};
+
+struct overflow_data {
+       struct source_location location;
+       struct type_descriptor *type;
+};
+
+struct type_mismatch_data {
+       struct source_location location;
+       struct type_descriptor *type;
+       unsigned long alignment;
+       unsigned char type_check_kind;
+};
+
+struct nonnull_arg_data {
+       struct source_location location;
+       struct source_location attr_location;
+       int arg_index;
+};
+
+struct nonnull_return_data {
+       struct source_location location;
+       struct source_location attr_location;
+};
+
+struct vla_bound_data {
+       struct source_location location;
+       struct type_descriptor *type;
+};
+
+struct out_of_bounds_data {
+       struct source_location location;
+       struct type_descriptor *array_type;
+       struct type_descriptor *index_type;
+};
+
+struct shift_out_of_bounds_data {
+       struct source_location location;
+       struct type_descriptor *lhs_type;
+       struct type_descriptor *rhs_type;
+};
+
+struct unreachable_data {
+       struct source_location location;
+};
+
+struct invalid_value_data {
+       struct source_location location;
+       struct type_descriptor *type;
+};
+
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+typedef __int128 s_max;
+typedef unsigned __int128 u_max;
+#else
+typedef s64 s_max;
+typedef u64 u_max;
+#endif
+
+#endif
index b1cf73bc3b12969f00740031ac60f28840ac212d..8ad580273521046904ec13478e7b897839d955d7 100644 (file)
@@ -3357,6 +3357,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        struct anon_vma *anon_vma;
        int count, mapcount, ret;
        bool mlocked;
+       unsigned long flags;
 
        VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
        VM_BUG_ON_PAGE(!PageAnon(page), page);
@@ -3396,7 +3397,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                lru_add_drain();
 
        /* Prevent deferred_split_scan() touching ->_count */
-       spin_lock(&split_queue_lock);
+       spin_lock_irqsave(&split_queue_lock, flags);
        count = page_count(head);
        mapcount = total_mapcount(head);
        if (!mapcount && count == 1) {
@@ -3404,11 +3405,11 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                        split_queue_len--;
                        list_del(page_deferred_list(head));
                }
-               spin_unlock(&split_queue_lock);
+               spin_unlock_irqrestore(&split_queue_lock, flags);
                __split_huge_page(page, list);
                ret = 0;
        } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
-               spin_unlock(&split_queue_lock);
+               spin_unlock_irqrestore(&split_queue_lock, flags);
                pr_alert("total_mapcount: %u, page_count(): %u\n",
                                mapcount, count);
                if (PageTail(page))
@@ -3416,7 +3417,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                dump_page(page, "total_mapcount(head) > 0");
                BUG();
        } else {
-               spin_unlock(&split_queue_lock);
+               spin_unlock_irqrestore(&split_queue_lock, flags);
                unfreeze_page(anon_vma, head);
                ret = -EBUSY;
        }
index 64710148941ed33d7467192797711c5c98e9e4a3..a61460d9f5b0e76a8ce5214de597b285dbca74eb 100644 (file)
@@ -1,4 +1,5 @@
 KASAN_SANITIZE := n
+UBSAN_SANITIZE_kasan.o := n
 
 CFLAGS_REMOVE_kasan.o = -pg
 # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
index afc71ea9a381f853faf65a05edc7f411a378d7e5..1d05cb9d363d0bfadd6a9c58efccd0551d0bd7f2 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/mutex.h>
 #include <linux/memcontrol.h>
 
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 static LIST_HEAD(list_lrus);
 static DEFINE_MUTEX(list_lrus_mutex);
 
@@ -37,9 +37,9 @@ static void list_lru_register(struct list_lru *lru)
 static void list_lru_unregister(struct list_lru *lru)
 {
 }
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 static inline bool list_lru_memcg_aware(struct list_lru *lru)
 {
        /*
@@ -104,7 +104,7 @@ list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
 {
        return &nlru->lru;
 }
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 
 bool list_lru_add(struct list_lru *lru, struct list_head *item)
 {
@@ -292,7 +292,7 @@ static void init_one_lru(struct list_lru_one *l)
        l->nr_items = 0;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
                                          int begin, int end)
 {
@@ -529,7 +529,7 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
 static void memcg_destroy_list_lru(struct list_lru *lru)
 {
 }
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 
 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
                    struct lock_class_key *key)
index 0eda67376df4323a62f8268975468a8dd4d91e04..ca052f2a4a0b374eb9a2a6498b407be7da28ac48 100644 (file)
@@ -66,7 +66,6 @@
 #include "internal.h"
 #include <net/sock.h>
 #include <net/ip.h>
-#include <net/tcp_memcontrol.h>
 #include "slab.h"
 
 #include <asm/uaccess.h>
@@ -83,6 +82,9 @@ struct mem_cgroup *root_mem_cgroup __read_mostly;
 /* Socket memory accounting disabled? */
 static bool cgroup_memory_nosocket;
 
+/* Kernel memory accounting disabled? */
+static bool cgroup_memory_nokmem;
+
 /* Whether the swap controller is active */
 #ifdef CONFIG_MEMCG_SWAP
 int do_swap_account __read_mostly;
@@ -239,6 +241,7 @@ enum res_type {
        _MEMSWAP,
        _OOM_TYPE,
        _KMEM,
+       _TCP,
 };
 
 #define MEMFILE_PRIVATE(x, val)        ((x) << 16 | (val))
@@ -247,13 +250,6 @@ enum res_type {
 /* Used for OOM nofiier */
 #define OOM_CONTROL            (0)
 
-/*
- * The memcg_create_mutex will be held whenever a new cgroup is created.
- * As a consequence, any change that needs to protect against new child cgroups
- * appearing has to hold it as well.
- */
-static DEFINE_MUTEX(memcg_create_mutex);
-
 /* Some nice accessors for the vmpressure. */
 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 {
@@ -297,7 +293,7 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
        return mem_cgroup_from_css(css);
 }
 
-#ifdef CONFIG_MEMCG_KMEM
+#ifndef CONFIG_SLOB
 /*
  * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
  * The main reason for not using cgroup id for this:
@@ -349,7 +345,7 @@ void memcg_put_cache_ids(void)
 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 EXPORT_SYMBOL(memcg_kmem_enabled_key);
 
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* !CONFIG_SLOB */
 
 static struct mem_cgroup_per_zone *
 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
@@ -370,13 +366,6 @@ mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
  *
  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
  * is returned.
- *
- * XXX: The above description of behavior on the default hierarchy isn't
- * strictly true yet as replace_page_cache_page() can modify the
- * association before @page is released even on the default hierarchy;
- * however, the current and planned usages don't mix the the two functions
- * and replace_page_cache_page() will soon be updated to make the invariant
- * actually true.
  */
 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 {
@@ -896,17 +885,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
                if (css == &root->css)
                        break;
 
-               if (css_tryget(css)) {
-                       /*
-                        * Make sure the memcg is initialized:
-                        * mem_cgroup_css_online() orders the the
-                        * initialization against setting the flag.
-                        */
-                       if (smp_load_acquire(&memcg->initialized))
-                               break;
-
-                       css_put(css);
-               }
+               if (css_tryget(css))
+                       break;
 
                memcg = NULL;
        }
@@ -1233,7 +1213,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
                pr_cont(":");
 
                for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-                       if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
+                       if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                                continue;
                        pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
                                K(mem_cgroup_read_stat(iter, i)));
@@ -1272,9 +1252,12 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
        limit = memcg->memory.limit;
        if (mem_cgroup_swappiness(memcg)) {
                unsigned long memsw_limit;
+               unsigned long swap_limit;
 
                memsw_limit = memcg->memsw.limit;
-               limit = min(limit + total_swap_pages, memsw_limit);
+               swap_limit = memcg->swap.limit;
+               swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
+               limit = min(limit + swap_limit, memsw_limit);
        }
        return limit;
 }
@@ -2203,7 +2186,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
                unlock_page_lru(page, isolated);
 }
 
-#ifdef CONFIG_MEMCG_KMEM
+#ifndef CONFIG_SLOB
 static int memcg_alloc_cache_id(void)
 {
        int id, size;
@@ -2378,16 +2361,17 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
        struct page_counter *counter;
        int ret;
 
-       if (!memcg_kmem_is_active(memcg))
+       if (!memcg_kmem_online(memcg))
                return 0;
 
-       if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter))
-               return -ENOMEM;
-
        ret = try_charge(memcg, gfp, nr_pages);
-       if (ret) {
-               page_counter_uncharge(&memcg->kmem, nr_pages);
+       if (ret)
                return ret;
+
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
+           !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
+               cancel_charge(memcg, nr_pages);
+               return -ENOMEM;
        }
 
        page->mem_cgroup = memcg;
@@ -2416,7 +2400,9 @@ void __memcg_kmem_uncharge(struct page *page, int order)
 
        VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
 
-       page_counter_uncharge(&memcg->kmem, nr_pages);
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               page_counter_uncharge(&memcg->kmem, nr_pages);
+
        page_counter_uncharge(&memcg->memory, nr_pages);
        if (do_memsw_account())
                page_counter_uncharge(&memcg->memsw, nr_pages);
@@ -2424,7 +2410,7 @@ void __memcg_kmem_uncharge(struct page *page, int order)
        page->mem_cgroup = NULL;
        css_put_many(&memcg->css, nr_pages);
 }
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* !CONFIG_SLOB */
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
@@ -2684,14 +2670,6 @@ static inline bool memcg_has_children(struct mem_cgroup *memcg)
 {
        bool ret;
 
-       /*
-        * The lock does not prevent addition or deletion of children, but
-        * it prevents a new child from being initialized based on this
-        * parent in css_online(), so it's enough to decide whether
-        * hierarchically inherited attributes can still be changed or not.
-        */
-       lockdep_assert_held(&memcg_create_mutex);
-
        rcu_read_lock();
        ret = css_next_child(NULL, &memcg->css);
        rcu_read_unlock();
@@ -2754,10 +2732,8 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
 
-       mutex_lock(&memcg_create_mutex);
-
        if (memcg->use_hierarchy == val)
-               goto out;
+               return 0;
 
        /*
         * If parent's use_hierarchy is set, we can't make any modifications
@@ -2776,9 +2752,6 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
        } else
                retval = -EINVAL;
 
-out:
-       mutex_unlock(&memcg_create_mutex);
-
        return retval;
 }
 
@@ -2794,6 +2767,18 @@ static unsigned long tree_stat(struct mem_cgroup *memcg,
        return val;
 }
 
+static unsigned long tree_events(struct mem_cgroup *memcg,
+                                enum mem_cgroup_events_index idx)
+{
+       struct mem_cgroup *iter;
+       unsigned long val = 0;
+
+       for_each_mem_cgroup_tree(iter, memcg)
+               val += mem_cgroup_read_events(iter, idx);
+
+       return val;
+}
+
 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
 {
        unsigned long val;
@@ -2836,6 +2821,9 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
        case _KMEM:
                counter = &memcg->kmem;
                break;
+       case _TCP:
+               counter = &memcg->tcpmem;
+               break;
        default:
                BUG();
        }
@@ -2860,103 +2848,180 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
        }
 }
 
-#ifdef CONFIG_MEMCG_KMEM
-static int memcg_activate_kmem(struct mem_cgroup *memcg,
-                              unsigned long nr_pages)
+#ifndef CONFIG_SLOB
+static int memcg_online_kmem(struct mem_cgroup *memcg)
 {
-       int err = 0;
        int memcg_id;
 
        BUG_ON(memcg->kmemcg_id >= 0);
-       BUG_ON(memcg->kmem_acct_activated);
-       BUG_ON(memcg->kmem_acct_active);
-
-       /*
-        * For simplicity, we won't allow this to be disabled.  It also can't
-        * be changed if the cgroup has children already, or if tasks had
-        * already joined.
-        *
-        * If tasks join before we set the limit, a person looking at
-        * kmem.usage_in_bytes will have no way to determine when it took
-        * place, which makes the value quite meaningless.
-        *
-        * After it first became limited, changes in the value of the limit are
-        * of course permitted.
-        */
-       mutex_lock(&memcg_create_mutex);
-       if (cgroup_is_populated(memcg->css.cgroup) ||
-           (memcg->use_hierarchy && memcg_has_children(memcg)))
-               err = -EBUSY;
-       mutex_unlock(&memcg_create_mutex);
-       if (err)
-               goto out;
+       BUG_ON(memcg->kmem_state);
 
        memcg_id = memcg_alloc_cache_id();
-       if (memcg_id < 0) {
-               err = memcg_id;
-               goto out;
-       }
-
-       /*
-        * We couldn't have accounted to this cgroup, because it hasn't got
-        * activated yet, so this should succeed.
-        */
-       err = page_counter_limit(&memcg->kmem, nr_pages);
-       VM_BUG_ON(err);
+       if (memcg_id < 0)
+               return memcg_id;
 
        static_branch_inc(&memcg_kmem_enabled_key);
        /*
-        * A memory cgroup is considered kmem-active as soon as it gets
+        * A memory cgroup is considered kmem-online as soon as it gets
         * kmemcg_id. Setting the id after enabling static branching will
         * guarantee no one starts accounting before all call sites are
         * patched.
         */
        memcg->kmemcg_id = memcg_id;
-       memcg->kmem_acct_activated = true;
-       memcg->kmem_acct_active = true;
-out:
-       return err;
+       memcg->kmem_state = KMEM_ONLINE;
+
+       return 0;
 }
 
-static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
-                                  unsigned long limit)
+static int memcg_propagate_kmem(struct mem_cgroup *parent,
+                               struct mem_cgroup *memcg)
 {
-       int ret;
+       int ret = 0;
 
        mutex_lock(&memcg_limit_mutex);
-       if (!memcg_kmem_is_active(memcg))
-               ret = memcg_activate_kmem(memcg, limit);
-       else
-               ret = page_counter_limit(&memcg->kmem, limit);
+       /*
+        * If the parent cgroup is not kmem-online now, it cannot be
+        * onlined after this point, because it has at least one child
+        * already.
+        */
+       if (memcg_kmem_online(parent) ||
+           (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nokmem))
+               ret = memcg_online_kmem(memcg);
        mutex_unlock(&memcg_limit_mutex);
        return ret;
 }
 
-static int memcg_propagate_kmem(struct mem_cgroup *memcg)
+static void memcg_offline_kmem(struct mem_cgroup *memcg)
 {
-       int ret = 0;
-       struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+       struct cgroup_subsys_state *css;
+       struct mem_cgroup *parent, *child;
+       int kmemcg_id;
+
+       if (memcg->kmem_state != KMEM_ONLINE)
+               return;
+       /*
+        * Clear the online state before clearing memcg_caches array
+        * entries. The slab_mutex in memcg_deactivate_kmem_caches()
+        * guarantees that no cache will be created for this cgroup
+        * after we are done (see memcg_create_kmem_cache()).
+        */
+       memcg->kmem_state = KMEM_ALLOCATED;
 
+       memcg_deactivate_kmem_caches(memcg);
+
+       kmemcg_id = memcg->kmemcg_id;
+       BUG_ON(kmemcg_id < 0);
+
+       parent = parent_mem_cgroup(memcg);
        if (!parent)
-               return 0;
+               parent = root_mem_cgroup;
 
-       mutex_lock(&memcg_limit_mutex);
        /*
-        * If the parent cgroup is not kmem-active now, it cannot be activated
-        * after this point, because it has at least one child already.
+        * Change kmemcg_id of this cgroup and all its descendants to the
+        * parent's id, and then move all entries from this cgroup's list_lrus
+        * to ones of the parent. After we have finished, all list_lrus
+        * corresponding to this cgroup are guaranteed to remain empty. The
+        * ordering is imposed by list_lru_node->lock taken by
+        * memcg_drain_all_list_lrus().
         */
-       if (memcg_kmem_is_active(parent))
-               ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
-       mutex_unlock(&memcg_limit_mutex);
-       return ret;
+       css_for_each_descendant_pre(css, &memcg->css) {
+               child = mem_cgroup_from_css(css);
+               BUG_ON(child->kmemcg_id != kmemcg_id);
+               child->kmemcg_id = parent->kmemcg_id;
+               if (!memcg->use_hierarchy)
+                       break;
+       }
+       memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
+
+       memcg_free_cache_id(kmemcg_id);
+}
+
+static void memcg_free_kmem(struct mem_cgroup *memcg)
+{
+       /* css_alloc() failed, offlining didn't happen */
+       if (unlikely(memcg->kmem_state == KMEM_ONLINE))
+               memcg_offline_kmem(memcg);
+
+       if (memcg->kmem_state == KMEM_ALLOCATED) {
+               memcg_destroy_kmem_caches(memcg);
+               static_branch_dec(&memcg_kmem_enabled_key);
+               WARN_ON(page_counter_read(&memcg->kmem));
+       }
 }
 #else
+static int memcg_propagate_kmem(struct mem_cgroup *parent, struct mem_cgroup *memcg)
+{
+       return 0;
+}
+static int memcg_online_kmem(struct mem_cgroup *memcg)
+{
+       return 0;
+}
+static void memcg_offline_kmem(struct mem_cgroup *memcg)
+{
+}
+static void memcg_free_kmem(struct mem_cgroup *memcg)
+{
+}
+#endif /* !CONFIG_SLOB */
+
 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
                                   unsigned long limit)
 {
-       return -EINVAL;
+       int ret = 0;
+
+       mutex_lock(&memcg_limit_mutex);
+       /* Top-level cgroup doesn't propagate from root */
+       if (!memcg_kmem_online(memcg)) {
+               if (cgroup_is_populated(memcg->css.cgroup) ||
+                   (memcg->use_hierarchy && memcg_has_children(memcg)))
+                       ret = -EBUSY;
+               if (ret)
+                       goto out;
+               ret = memcg_online_kmem(memcg);
+               if (ret)
+                       goto out;
+       }
+       ret = page_counter_limit(&memcg->kmem, limit);
+out:
+       mutex_unlock(&memcg_limit_mutex);
+       return ret;
+}
+
+static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
+{
+       int ret;
+
+       mutex_lock(&memcg_limit_mutex);
+
+       ret = page_counter_limit(&memcg->tcpmem, limit);
+       if (ret)
+               goto out;
+
+       if (!memcg->tcpmem_active) {
+               /*
+                * The active flag needs to be written after the static_key
+                * update. This is what guarantees that the socket activation
+                * function is the last one to run. See sock_update_memcg() for
+                * details, and note that we don't mark any socket as belonging
+                * to this memcg until that flag is up.
+                *
+                * We need to do this, because static_keys will span multiple
+                * sites, but we can't control their order. If we mark a socket
+                * as accounted, but the accounting functions are not patched in
+                * yet, we'll lose accounting.
+                *
+                * We never race with the readers in sock_update_memcg(),
+                * because when this value change, the code to process it is not
+                * patched in yet.
+                */
+               static_branch_inc(&memcg_sockets_enabled_key);
+               memcg->tcpmem_active = true;
+       }
+out:
+       mutex_unlock(&memcg_limit_mutex);
+       return ret;
 }
-#endif /* CONFIG_MEMCG_KMEM */
 
 /*
  * The user of this function is...
@@ -2990,6 +3055,9 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
                case _KMEM:
                        ret = memcg_update_kmem_limit(memcg, nr_pages);
                        break;
+               case _TCP:
+                       ret = memcg_update_tcp_limit(memcg, nr_pages);
+                       break;
                }
                break;
        case RES_SOFT_LIMIT:
@@ -3016,6 +3084,9 @@ static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
        case _KMEM:
                counter = &memcg->kmem;
                break;
+       case _TCP:
+               counter = &memcg->tcpmem;
+               break;
        default:
                BUG();
        }
@@ -3582,88 +3653,6 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
        return 0;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
-static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
-{
-       int ret;
-
-       ret = memcg_propagate_kmem(memcg);
-       if (ret)
-               return ret;
-
-       return tcp_init_cgroup(memcg, ss);
-}
-
-static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
-{
-       struct cgroup_subsys_state *css;
-       struct mem_cgroup *parent, *child;
-       int kmemcg_id;
-
-       if (!memcg->kmem_acct_active)
-               return;
-
-       /*
-        * Clear the 'active' flag before clearing memcg_caches arrays entries.
-        * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
-        * guarantees no cache will be created for this cgroup after we are
-        * done (see memcg_create_kmem_cache()).
-        */
-       memcg->kmem_acct_active = false;
-
-       memcg_deactivate_kmem_caches(memcg);
-
-       kmemcg_id = memcg->kmemcg_id;
-       BUG_ON(kmemcg_id < 0);
-
-       parent = parent_mem_cgroup(memcg);
-       if (!parent)
-               parent = root_mem_cgroup;
-
-       /*
-        * Change kmemcg_id of this cgroup and all its descendants to the
-        * parent's id, and then move all entries from this cgroup's list_lrus
-        * to ones of the parent. After we have finished, all list_lrus
-        * corresponding to this cgroup are guaranteed to remain empty. The
-        * ordering is imposed by list_lru_node->lock taken by
-        * memcg_drain_all_list_lrus().
-        */
-       css_for_each_descendant_pre(css, &memcg->css) {
-               child = mem_cgroup_from_css(css);
-               BUG_ON(child->kmemcg_id != kmemcg_id);
-               child->kmemcg_id = parent->kmemcg_id;
-               if (!memcg->use_hierarchy)
-                       break;
-       }
-       memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
-
-       memcg_free_cache_id(kmemcg_id);
-}
-
-static void memcg_destroy_kmem(struct mem_cgroup *memcg)
-{
-       if (memcg->kmem_acct_activated) {
-               memcg_destroy_kmem_caches(memcg);
-               static_branch_dec(&memcg_kmem_enabled_key);
-               WARN_ON(page_counter_read(&memcg->kmem));
-       }
-       tcp_destroy_cgroup(memcg);
-}
-#else
-static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
-{
-       return 0;
-}
-
-static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
-{
-}
-
-static void memcg_destroy_kmem(struct mem_cgroup *memcg)
-{
-}
-#endif
-
 #ifdef CONFIG_CGROUP_WRITEBACK
 
 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
@@ -4051,7 +4040,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
                .seq_show = memcg_numa_stat_show,
        },
 #endif
-#ifdef CONFIG_MEMCG_KMEM
        {
                .name = "kmem.limit_in_bytes",
                .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
@@ -4084,7 +4072,29 @@ static struct cftype mem_cgroup_legacy_files[] = {
                .seq_show = memcg_slab_show,
        },
 #endif
-#endif
+       {
+               .name = "kmem.tcp.limit_in_bytes",
+               .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
+               .write = mem_cgroup_write,
+               .read_u64 = mem_cgroup_read_u64,
+       },
+       {
+               .name = "kmem.tcp.usage_in_bytes",
+               .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
+               .read_u64 = mem_cgroup_read_u64,
+       },
+       {
+               .name = "kmem.tcp.failcnt",
+               .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
+               .write = mem_cgroup_reset,
+               .read_u64 = mem_cgroup_read_u64,
+       },
+       {
+               .name = "kmem.tcp.max_usage_in_bytes",
+               .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
+               .write = mem_cgroup_reset,
+               .read_u64 = mem_cgroup_read_u64,
+       },
        { },    /* terminate */
 };
 
@@ -4123,147 +4133,92 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
        kfree(memcg->nodeinfo[node]);
 }
 
-static struct mem_cgroup *mem_cgroup_alloc(void)
-{
-       struct mem_cgroup *memcg;
-       size_t size;
-
-       size = sizeof(struct mem_cgroup);
-       size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
-
-       memcg = kzalloc(size, GFP_KERNEL);
-       if (!memcg)
-               return NULL;
-
-       memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
-       if (!memcg->stat)
-               goto out_free;
-
-       if (memcg_wb_domain_init(memcg, GFP_KERNEL))
-               goto out_free_stat;
-
-       return memcg;
-
-out_free_stat:
-       free_percpu(memcg->stat);
-out_free:
-       kfree(memcg);
-       return NULL;
-}
-
-/*
- * At destroying mem_cgroup, references from swap_cgroup can remain.
- * (scanning all at force_empty is too costly...)
- *
- * Instead of clearing all references at force_empty, we remember
- * the number of reference from swap_cgroup and free mem_cgroup when
- * it goes down to 0.
- *
- * Removal of cgroup itself succeeds regardless of refs from swap.
- */
-
-static void __mem_cgroup_free(struct mem_cgroup *memcg)
+static void mem_cgroup_free(struct mem_cgroup *memcg)
 {
        int node;
 
-       cancel_work_sync(&memcg->high_work);
-
-       mem_cgroup_remove_from_trees(memcg);
-
+       memcg_wb_domain_exit(memcg);
        for_each_node(node)
                free_mem_cgroup_per_zone_info(memcg, node);
-
        free_percpu(memcg->stat);
-       memcg_wb_domain_exit(memcg);
        kfree(memcg);
 }
 
-static struct cgroup_subsys_state * __ref
-mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+static struct mem_cgroup *mem_cgroup_alloc(void)
 {
        struct mem_cgroup *memcg;
-       long error = -ENOMEM;
+       size_t size;
        int node;
 
-       memcg = mem_cgroup_alloc();
+       size = sizeof(struct mem_cgroup);
+       size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
+
+       memcg = kzalloc(size, GFP_KERNEL);
        if (!memcg)
-               return ERR_PTR(error);
+               return NULL;
+
+       memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
+       if (!memcg->stat)
+               goto fail;
 
        for_each_node(node)
                if (alloc_mem_cgroup_per_zone_info(memcg, node))
-                       goto free_out;
+                       goto fail;
 
-       /* root ? */
-       if (parent_css == NULL) {
-               root_mem_cgroup = memcg;
-               page_counter_init(&memcg->memory, NULL);
-               memcg->high = PAGE_COUNTER_MAX;
-               memcg->soft_limit = PAGE_COUNTER_MAX;
-               page_counter_init(&memcg->memsw, NULL);
-               page_counter_init(&memcg->kmem, NULL);
-       }
+       if (memcg_wb_domain_init(memcg, GFP_KERNEL))
+               goto fail;
 
        INIT_WORK(&memcg->high_work, high_work_func);
        memcg->last_scanned_node = MAX_NUMNODES;
        INIT_LIST_HEAD(&memcg->oom_notify);
-       memcg->move_charge_at_immigrate = 0;
        mutex_init(&memcg->thresholds_lock);
        spin_lock_init(&memcg->move_lock);
        vmpressure_init(&memcg->vmpressure);
        INIT_LIST_HEAD(&memcg->event_list);
        spin_lock_init(&memcg->event_list_lock);
-#ifdef CONFIG_MEMCG_KMEM
+       memcg->socket_pressure = jiffies;
+#ifndef CONFIG_SLOB
        memcg->kmemcg_id = -1;
 #endif
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&memcg->cgwb_list);
 #endif
-#ifdef CONFIG_INET
-       memcg->socket_pressure = jiffies;
-#endif
-       return &memcg->css;
-
-free_out:
-       __mem_cgroup_free(memcg);
-       return ERR_PTR(error);
+       return memcg;
+fail:
+       mem_cgroup_free(memcg);
+       return NULL;
 }
 
-static int
-mem_cgroup_css_online(struct cgroup_subsys_state *css)
+static struct cgroup_subsys_state * __ref
+mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
-       struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
-       int ret;
-
-       if (css->id > MEM_CGROUP_ID_MAX)
-               return -ENOSPC;
-
-       if (!parent)
-               return 0;
-
-       mutex_lock(&memcg_create_mutex);
+       struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
+       struct mem_cgroup *memcg;
+       long error = -ENOMEM;
 
-       memcg->use_hierarchy = parent->use_hierarchy;
-       memcg->oom_kill_disable = parent->oom_kill_disable;
-       memcg->swappiness = mem_cgroup_swappiness(parent);
+       memcg = mem_cgroup_alloc();
+       if (!memcg)
+               return ERR_PTR(error);
 
-       if (parent->use_hierarchy) {
+       memcg->high = PAGE_COUNTER_MAX;
+       memcg->soft_limit = PAGE_COUNTER_MAX;
+       if (parent) {
+               memcg->swappiness = mem_cgroup_swappiness(parent);
+               memcg->oom_kill_disable = parent->oom_kill_disable;
+       }
+       if (parent && parent->use_hierarchy) {
+               memcg->use_hierarchy = true;
                page_counter_init(&memcg->memory, &parent->memory);
-               memcg->high = PAGE_COUNTER_MAX;
-               memcg->soft_limit = PAGE_COUNTER_MAX;
+               page_counter_init(&memcg->swap, &parent->swap);
                page_counter_init(&memcg->memsw, &parent->memsw);
                page_counter_init(&memcg->kmem, &parent->kmem);
-
-               /*
-                * No need to take a reference to the parent because cgroup
-                * core guarantees its existence.
-                */
+               page_counter_init(&memcg->tcpmem, &parent->tcpmem);
        } else {
                page_counter_init(&memcg->memory, NULL);
-               memcg->high = PAGE_COUNTER_MAX;
-               memcg->soft_limit = PAGE_COUNTER_MAX;
+               page_counter_init(&memcg->swap, NULL);
                page_counter_init(&memcg->memsw, NULL);
                page_counter_init(&memcg->kmem, NULL);
+               page_counter_init(&memcg->tcpmem, NULL);
                /*
                 * Deeper hierachy with use_hierarchy == false doesn't make
                 * much sense so let cgroup subsystem know about this
@@ -4272,23 +4227,31 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
                if (parent != root_mem_cgroup)
                        memory_cgrp_subsys.broken_hierarchy = true;
        }
-       mutex_unlock(&memcg_create_mutex);
 
-       ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
-       if (ret)
-               return ret;
+       /* The following stuff does not apply to the root */
+       if (!parent) {
+               root_mem_cgroup = memcg;
+               return &memcg->css;
+       }
+
+       error = memcg_propagate_kmem(parent, memcg);
+       if (error)
+               goto fail;
 
-#ifdef CONFIG_INET
        if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
                static_branch_inc(&memcg_sockets_enabled_key);
-#endif
 
-       /*
-        * Make sure the memcg is initialized: mem_cgroup_iter()
-        * orders reading memcg->initialized against its callers
-        * reading the memcg members.
-        */
-       smp_store_release(&memcg->initialized, 1);
+       return &memcg->css;
+fail:
+       mem_cgroup_free(memcg);
+       return NULL;
+}
+
+static int
+mem_cgroup_css_online(struct cgroup_subsys_state *css)
+{
+       if (css->id > MEM_CGROUP_ID_MAX)
+               return -ENOSPC;
 
        return 0;
 }
@@ -4310,10 +4273,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
        }
        spin_unlock(&memcg->event_list_lock);
 
-       vmpressure_cleanup(&memcg->vmpressure);
-
-       memcg_deactivate_kmem(memcg);
-
+       memcg_offline_kmem(memcg);
        wb_memcg_offline(memcg);
 }
 
@@ -4328,12 +4288,17 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
-       memcg_destroy_kmem(memcg);
-#ifdef CONFIG_INET
        if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
                static_branch_dec(&memcg_sockets_enabled_key);
-#endif
-       __mem_cgroup_free(memcg);
+
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
+               static_branch_dec(&memcg_sockets_enabled_key);
+
+       vmpressure_cleanup(&memcg->vmpressure);
+       cancel_work_sync(&memcg->high_work);
+       mem_cgroup_remove_from_trees(memcg);
+       memcg_free_kmem(memcg);
+       mem_cgroup_free(memcg);
 }
 
 /**
@@ -5143,6 +5108,59 @@ static int memory_events_show(struct seq_file *m, void *v)
        return 0;
 }
 
+static int memory_stat_show(struct seq_file *m, void *v)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+       int i;
+
+       /*
+        * Provide statistics on the state of the memory subsystem as
+        * well as cumulative event counters that show past behavior.
+        *
+        * This list is ordered following a combination of these gradients:
+        * 1) generic big picture -> specifics and details
+        * 2) reflecting userspace activity -> reflecting kernel heuristics
+        *
+        * Current memory state:
+        */
+
+       seq_printf(m, "anon %llu\n",
+                  (u64)tree_stat(memcg, MEM_CGROUP_STAT_RSS) * PAGE_SIZE);
+       seq_printf(m, "file %llu\n",
+                  (u64)tree_stat(memcg, MEM_CGROUP_STAT_CACHE) * PAGE_SIZE);
+       seq_printf(m, "sock %llu\n",
+                  (u64)tree_stat(memcg, MEMCG_SOCK) * PAGE_SIZE);
+
+       seq_printf(m, "file_mapped %llu\n",
+                  (u64)tree_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED) *
+                  PAGE_SIZE);
+       seq_printf(m, "file_dirty %llu\n",
+                  (u64)tree_stat(memcg, MEM_CGROUP_STAT_DIRTY) *
+                  PAGE_SIZE);
+       seq_printf(m, "file_writeback %llu\n",
+                  (u64)tree_stat(memcg, MEM_CGROUP_STAT_WRITEBACK) *
+                  PAGE_SIZE);
+
+       for (i = 0; i < NR_LRU_LISTS; i++) {
+               struct mem_cgroup *mi;
+               unsigned long val = 0;
+
+               for_each_mem_cgroup_tree(mi, memcg)
+                       val += mem_cgroup_nr_lru_pages(mi, BIT(i));
+               seq_printf(m, "%s %llu\n",
+                          mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
+       }
+
+       /* Accumulated memory events */
+
+       seq_printf(m, "pgfault %lu\n",
+                  tree_events(memcg, MEM_CGROUP_EVENTS_PGFAULT));
+       seq_printf(m, "pgmajfault %lu\n",
+                  tree_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT));
+
+       return 0;
+}
+
 static struct cftype memory_files[] = {
        {
                .name = "current",
@@ -5173,6 +5191,11 @@ static struct cftype memory_files[] = {
                .file_offset = offsetof(struct mem_cgroup, events_file),
                .seq_show = memory_events_show,
        },
+       {
+               .name = "stat",
+               .flags = CFTYPE_NOT_ON_ROOT,
+               .seq_show = memory_stat_show,
+       },
        { }     /* terminate */
 };
 
@@ -5269,7 +5292,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
                if (page->mem_cgroup)
                        goto out;
 
-               if (do_memsw_account()) {
+               if (do_swap_account) {
                        swp_entry_t ent = { .val = page_private(page), };
                        unsigned short id = lookup_swap_cgroup_id(ent);
 
@@ -5504,7 +5527,8 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
 void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
 {
        struct mem_cgroup *memcg;
-       int isolated;
+       unsigned int nr_pages;
+       bool compound;
 
        VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
@@ -5524,14 +5548,22 @@ void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
        if (!memcg)
                return;
 
-       lock_page_lru(oldpage, &isolated);
-       oldpage->mem_cgroup = NULL;
-       unlock_page_lru(oldpage, isolated);
+       /* Force-charge the new page. The old one will be freed soon */
+       compound = PageTransHuge(newpage);
+       nr_pages = compound ? hpage_nr_pages(newpage) : 1;
+
+       page_counter_charge(&memcg->memory, nr_pages);
+       if (do_memsw_account())
+               page_counter_charge(&memcg->memsw, nr_pages);
+       css_get_many(&memcg->css, nr_pages);
 
        commit_charge(newpage, memcg, true);
-}
 
-#ifdef CONFIG_INET
+       local_irq_disable();
+       mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
+       memcg_check_events(memcg, newpage);
+       local_irq_enable();
+}
 
 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
 EXPORT_SYMBOL(memcg_sockets_enabled_key);
@@ -5558,10 +5590,8 @@ void sock_update_memcg(struct sock *sk)
        memcg = mem_cgroup_from_task(current);
        if (memcg == root_mem_cgroup)
                goto out;
-#ifdef CONFIG_MEMCG_KMEM
-       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active)
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
                goto out;
-#endif
        if (css_tryget_online(&memcg->css))
                sk->sk_memcg = memcg;
 out:
@@ -5587,24 +5617,24 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
        gfp_t gfp_mask = GFP_KERNEL;
 
-#ifdef CONFIG_MEMCG_KMEM
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
-               struct page_counter *counter;
+               struct page_counter *fail;
 
-               if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
-                                           nr_pages, &counter)) {
-                       memcg->tcp_mem.memory_pressure = 0;
+               if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
+                       memcg->tcpmem_pressure = 0;
                        return true;
                }
-               page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
-               memcg->tcp_mem.memory_pressure = 1;
+               page_counter_charge(&memcg->tcpmem, nr_pages);
+               memcg->tcpmem_pressure = 1;
                return false;
        }
-#endif
+
        /* Don't block in the packet receive path */
        if (in_softirq())
                gfp_mask = GFP_NOWAIT;
 
+       this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
+
        if (try_charge(memcg, gfp_mask, nr_pages) == 0)
                return true;
 
@@ -5619,19 +5649,17 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
  */
 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
-#ifdef CONFIG_MEMCG_KMEM
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
-               page_counter_uncharge(&memcg->tcp_mem.memory_allocated,
-                                     nr_pages);
+               page_counter_uncharge(&memcg->tcpmem, nr_pages);
                return;
        }
-#endif
+
+       this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
+
        page_counter_uncharge(&memcg->memory, nr_pages);
        css_put_many(&memcg->css, nr_pages);
 }
 
-#endif /* CONFIG_INET */
-
 static int __init cgroup_memory(char *s)
 {
        char *token;
@@ -5641,6 +5669,8 @@ static int __init cgroup_memory(char *s)
                        continue;
                if (!strcmp(token, "nosocket"))
                        cgroup_memory_nosocket = true;
+               if (!strcmp(token, "nokmem"))
+                       cgroup_memory_nokmem = true;
        }
        return 0;
 }
@@ -5730,32 +5760,107 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        memcg_check_events(memcg, page);
 }
 
+/*
+ * mem_cgroup_try_charge_swap - try charging a swap entry
+ * @page: page being added to swap
+ * @entry: swap entry to charge
+ *
+ * Try to charge @entry to the memcg that @page belongs to.
+ *
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
+{
+       struct mem_cgroup *memcg;
+       struct page_counter *counter;
+       unsigned short oldid;
+
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
+               return 0;
+
+       memcg = page->mem_cgroup;
+
+       /* Readahead page, never charged */
+       if (!memcg)
+               return 0;
+
+       if (!mem_cgroup_is_root(memcg) &&
+           !page_counter_try_charge(&memcg->swap, 1, &counter))
+               return -ENOMEM;
+
+       oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+       VM_BUG_ON_PAGE(oldid, page);
+       mem_cgroup_swap_statistics(memcg, true);
+
+       css_get(&memcg->css);
+       return 0;
+}
+
 /**
  * mem_cgroup_uncharge_swap - uncharge a swap entry
  * @entry: swap entry to uncharge
  *
- * Drop the memsw charge associated with @entry.
+ * Drop the swap charge associated with @entry.
  */
 void mem_cgroup_uncharge_swap(swp_entry_t entry)
 {
        struct mem_cgroup *memcg;
        unsigned short id;
 
-       if (!do_memsw_account())
+       if (!do_swap_account)
                return;
 
        id = swap_cgroup_record(entry, 0);
        rcu_read_lock();
        memcg = mem_cgroup_from_id(id);
        if (memcg) {
-               if (!mem_cgroup_is_root(memcg))
-                       page_counter_uncharge(&memcg->memsw, 1);
+               if (!mem_cgroup_is_root(memcg)) {
+                       if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+                               page_counter_uncharge(&memcg->swap, 1);
+                       else
+                               page_counter_uncharge(&memcg->memsw, 1);
+               }
                mem_cgroup_swap_statistics(memcg, false);
                css_put(&memcg->css);
        }
        rcu_read_unlock();
 }
 
+long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
+{
+       long nr_swap_pages = get_nr_swap_pages();
+
+       if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return nr_swap_pages;
+       for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
+               nr_swap_pages = min_t(long, nr_swap_pages,
+                                     READ_ONCE(memcg->swap.limit) -
+                                     page_counter_read(&memcg->swap));
+       return nr_swap_pages;
+}
+
+bool mem_cgroup_swap_full(struct page *page)
+{
+       struct mem_cgroup *memcg;
+
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+
+       if (vm_swap_full())
+               return true;
+       if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return false;
+
+       memcg = page->mem_cgroup;
+       if (!memcg)
+               return false;
+
+       for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
+               if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
+                       return true;
+
+       return false;
+}
+
 /* for remember boot option*/
 #ifdef CONFIG_MEMCG_SWAP_ENABLED
 static int really_do_swap_account __initdata = 1;
@@ -5773,6 +5878,63 @@ static int __init enable_swap_account(char *s)
 }
 __setup("swapaccount=", enable_swap_account);
 
+static u64 swap_current_read(struct cgroup_subsys_state *css,
+                            struct cftype *cft)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+
+       return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
+}
+
+static int swap_max_show(struct seq_file *m, void *v)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+       unsigned long max = READ_ONCE(memcg->swap.limit);
+
+       if (max == PAGE_COUNTER_MAX)
+               seq_puts(m, "max\n");
+       else
+               seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
+
+       return 0;
+}
+
+static ssize_t swap_max_write(struct kernfs_open_file *of,
+                             char *buf, size_t nbytes, loff_t off)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+       unsigned long max;
+       int err;
+
+       buf = strstrip(buf);
+       err = page_counter_memparse(buf, "max", &max);
+       if (err)
+               return err;
+
+       mutex_lock(&memcg_limit_mutex);
+       err = page_counter_limit(&memcg->swap, max);
+       mutex_unlock(&memcg_limit_mutex);
+       if (err)
+               return err;
+
+       return nbytes;
+}
+
+static struct cftype swap_files[] = {
+       {
+               .name = "swap.current",
+               .flags = CFTYPE_NOT_ON_ROOT,
+               .read_u64 = swap_current_read,
+       },
+       {
+               .name = "swap.max",
+               .flags = CFTYPE_NOT_ON_ROOT,
+               .seq_show = swap_max_show,
+               .write = swap_max_write,
+       },
+       { }     /* terminate */
+};
+
 static struct cftype memsw_cgroup_files[] = {
        {
                .name = "memsw.usage_in_bytes",
@@ -5804,6 +5966,8 @@ static int __init mem_cgroup_swap_init(void)
 {
        if (!mem_cgroup_disabled() && really_do_swap_account) {
                do_swap_account = 1;
+               WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
+                                              swap_files));
                WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
                                                  memsw_cgroup_files));
        }
index ff17850a52d92c37817dd3dd45aba37b6bb4da7f..30991f83d0bf54dc537f1927a10883aa5787dd97 100644 (file)
@@ -2582,7 +2582,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        swap_free(entry);
-       if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+       if (mem_cgroup_swap_full(page) ||
+           (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
                try_to_free_swap(page);
        unlock_page(page);
        if (page != swapcache) {
index e88d071648c2dece38b25d3fc8e57091d1fcd1d1..5d453e58ddbf7504e78869b4de406aa360fc2a01 100644 (file)
@@ -194,7 +194,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
                goto free_proc_pages;
        }
 
-       mm = mm_access(task, PTRACE_MODE_ATTACH);
+       mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
        if (!mm || IS_ERR(mm)) {
                rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
                /*
index b98e1011858cdefc67108291a3717a48d0324096..fa2ceb2d2655dbd8e06ecd4e02384df5ad5b5891 100644 (file)
@@ -912,6 +912,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        if (!swap.val)
                goto redirty;
 
+       if (mem_cgroup_try_charge_swap(page, swap))
+               goto free_swap;
+
        /*
         * Add inode to shmem_unuse()'s list of swapped-out inodes,
         * if it's not already there.  Do it now before the page is
@@ -940,6 +943,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        }
 
        mutex_unlock(&shmem_swaplist_mutex);
+free_swap:
        swapcache_free(swap);
 redirty:
        set_page_dirty(page);
index c63b8699cfa3d853c63de16b9162df09f3cc72db..834ad240c0bb13980fbe10fb29aba166dcff3f28 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -173,7 +173,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
 
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 /*
  * Iterate over all memcg caches of the given root cache. The caller must hold
  * slab_mutex.
@@ -251,7 +251,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
 
 extern void slab_init_memcg_params(struct kmem_cache *);
 
-#else /* !CONFIG_MEMCG_KMEM */
+#else /* CONFIG_MEMCG && !CONFIG_SLOB */
 
 #define for_each_memcg_cache(iter, root) \
        for ((void)(iter), (void)(root); 0; )
@@ -292,7 +292,7 @@ static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
 static inline void slab_init_memcg_params(struct kmem_cache *s)
 {
 }
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 
 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 {
index e016178063e19e86c21a46ea4a39ced03b0fe001..b50aef01ccf7ea97aa621dc63cfd66e111ee3dae 100644 (file)
@@ -128,7 +128,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
        return i;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 void slab_init_memcg_params(struct kmem_cache *s)
 {
        s->memcg_params.is_root_cache = true;
@@ -221,7 +221,7 @@ static inline int init_memcg_params(struct kmem_cache *s,
 static inline void destroy_memcg_params(struct kmem_cache *s)
 {
 }
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 
 /*
  * Find a mergeable slab cache
@@ -477,7 +477,7 @@ static void release_caches(struct list_head *release, bool need_rcu_barrier)
        }
 }
 
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 /*
  * memcg_create_kmem_cache - Create a cache for a memory cgroup.
  * @memcg: The memory cgroup the new cache is for.
@@ -503,10 +503,10 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
        mutex_lock(&slab_mutex);
 
        /*
-        * The memory cgroup could have been deactivated while the cache
+        * The memory cgroup could have been offlined while the cache
         * creation work was pending.
         */
-       if (!memcg_kmem_is_active(memcg))
+       if (!memcg_kmem_online(memcg))
                goto out_unlock;
 
        idx = memcg_cache_id(memcg);
@@ -689,7 +689,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s,
 {
        return 0;
 }
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 
 void slab_kmem_cache_release(struct kmem_cache *s)
 {
@@ -1123,7 +1123,7 @@ static int slab_show(struct seq_file *m, void *p)
        return 0;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 int memcg_slab_show(struct seq_file *m, void *p)
 {
        struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
index b21fd24b08b1fc886a4d43ca8930e84a810bb5ea..2e1355ac056b02a51778b2b1eef770b276626309 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5207,7 +5207,7 @@ static ssize_t slab_attr_store(struct kobject *kobj,
                return -EIO;
 
        err = attribute->store(s, buf, len);
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
        if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
                struct kmem_cache *c;
 
@@ -5242,7 +5242,7 @@ static ssize_t slab_attr_store(struct kobject *kobj,
 
 static void memcg_propagate_slab_attrs(struct kmem_cache *s)
 {
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
        int i;
        char *buffer = NULL;
        struct kmem_cache *root_cache;
@@ -5328,7 +5328,7 @@ static struct kset *slab_kset;
 
 static inline struct kset *cache_kset(struct kmem_cache *s)
 {
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
        if (!is_root_cache(s))
                return s->memcg_params.root_cache->memcg_kset;
 #endif
@@ -5405,7 +5405,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
        if (err)
                goto out_del_kobj;
 
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
        if (is_root_cache(s)) {
                s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
                if (!s->memcg_kset) {
@@ -5438,7 +5438,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
                 */
                return;
 
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
        kset_unregister(s->memcg_kset);
 #endif
        kobject_uevent(&s->kobj, KOBJ_REMOVE);
index 676ff2991380120275ba5d81d9660592bdc15b75..69cb2464e7dcd598dcf18e8601f97c1c4f8bb627 100644 (file)
@@ -170,6 +170,11 @@ int add_to_swap(struct page *page, struct list_head *list)
        if (!entry.val)
                return 0;
 
+       if (mem_cgroup_try_charge_swap(page, entry)) {
+               swapcache_free(entry);
+               return 0;
+       }
+
        if (unlikely(PageTransHuge(page)))
                if (unlikely(split_huge_page_to_list(page, list))) {
                        swapcache_free(entry);
index 2bb30aa3a4123a547bd29e1585a1234644a92152..c43f654a7b645474592de879f0664e4698d9dfbd 100644 (file)
@@ -785,14 +785,12 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
                        count--;
        }
 
-       if (!count)
-               mem_cgroup_uncharge_swap(entry);
-
        usage = count | has_cache;
        p->swap_map[offset] = usage;
 
        /* free if no reference */
        if (!usage) {
+               mem_cgroup_uncharge_swap(entry);
                dec_cluster_info_page(p, p->cluster_info, offset);
                if (offset < p->lowest_bit)
                        p->lowest_bit = offset;
@@ -1008,7 +1006,7 @@ int free_swap_and_cache(swp_entry_t entry)
                 * Also recheck PageSwapCache now page is locked (above).
                 */
                if (PageSwapCache(page) && !PageWriteback(page) &&
-                               (!page_mapped(page) || vm_swap_full())) {
+                   (!page_mapped(page) || mem_cgroup_swap_full(page))) {
                        delete_from_swap_cache(page);
                        SetPageDirty(page);
                }
index 6d1f9200f74e794c403ec687bae513e4f6e40700..c108a6542d05d3e1c880c1f6ab540ced62156e54 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -476,17 +476,25 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
        int res = 0;
        unsigned int len;
        struct mm_struct *mm = get_task_mm(task);
+       unsigned long arg_start, arg_end, env_start, env_end;
        if (!mm)
                goto out;
        if (!mm->arg_end)
                goto out_mm;    /* Shh! No looking before we're done */
 
-       len = mm->arg_end - mm->arg_start;
+       down_read(&mm->mmap_sem);
+       arg_start = mm->arg_start;
+       arg_end = mm->arg_end;
+       env_start = mm->env_start;
+       env_end = mm->env_end;
+       up_read(&mm->mmap_sem);
+
+       len = arg_end - arg_start;
 
        if (len > buflen)
                len = buflen;
 
-       res = access_process_vm(task, mm->arg_start, buffer, len, 0);
+       res = access_process_vm(task, arg_start, buffer, len, 0);
 
        /*
         * If the nul at the end of args has been overwritten, then
@@ -497,10 +505,10 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
                if (len < res) {
                        res = len;
                } else {
-                       len = mm->env_end - mm->env_start;
+                       len = env_end - env_start;
                        if (len > buflen - res)
                                len = buflen - res;
-                       res += access_process_vm(task, mm->env_start,
+                       res += access_process_vm(task, env_start,
                                                 buffer+res, len, 0);
                        res = strnlen(buffer, res);
                }
index 5ac86956ff9dc09d7dd2f16f02486756b7453320..bd620b65db52680fc8a6e221fe90189c22dca2f1 100644 (file)
@@ -411,7 +411,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
        struct shrinker *shrinker;
        unsigned long freed = 0;
 
-       if (memcg && !memcg_kmem_is_active(memcg))
+       if (memcg && !memcg_kmem_online(memcg))
                return 0;
 
        if (nr_scanned == 0)
@@ -1214,7 +1214,7 @@ cull_mlocked:
 
 activate_locked:
                /* Not a candidate for swapping, so reclaim swap space. */
-               if (PageSwapCache(page) && vm_swap_full())
+               if (PageSwapCache(page) && mem_cgroup_swap_full(page))
                        try_to_free_swap(page);
                VM_BUG_ON_PAGE(PageActive(page), page);
                SetPageActive(page);
@@ -1966,10 +1966,11 @@ enum scan_balance {
  * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
  * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
  */
-static void get_scan_count(struct lruvec *lruvec, int swappiness,
+static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
                           struct scan_control *sc, unsigned long *nr,
                           unsigned long *lru_pages)
 {
+       int swappiness = mem_cgroup_swappiness(memcg);
        struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
        u64 fraction[2];
        u64 denominator = 0;    /* gcc */
@@ -1996,14 +1997,14 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
        if (current_is_kswapd()) {
                if (!zone_reclaimable(zone))
                        force_scan = true;
-               if (!mem_cgroup_lruvec_online(lruvec))
+               if (!mem_cgroup_online(memcg))
                        force_scan = true;
        }
        if (!global_reclaim(sc))
                force_scan = true;
 
        /* If we have no swap space, do not bother scanning anon pages. */
-       if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
+       if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
                scan_balance = SCAN_FILE;
                goto out;
        }
@@ -2193,9 +2194,10 @@ static inline void init_tlb_ubc(void)
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
-static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
-                         struct scan_control *sc, unsigned long *lru_pages)
+static void shrink_zone_memcg(struct zone *zone, struct mem_cgroup *memcg,
+                             struct scan_control *sc, unsigned long *lru_pages)
 {
+       struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
        unsigned long nr[NR_LRU_LISTS];
        unsigned long targets[NR_LRU_LISTS];
        unsigned long nr_to_scan;
@@ -2205,7 +2207,7 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
        struct blk_plug plug;
        bool scan_adjusted;
 
-       get_scan_count(lruvec, swappiness, sc, nr, lru_pages);
+       get_scan_count(lruvec, memcg, sc, nr, lru_pages);
 
        /* Record the original scan target for proportional adjustments later */
        memcpy(targets, nr, sizeof(nr));
@@ -2409,8 +2411,6 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
                        unsigned long lru_pages;
                        unsigned long reclaimed;
                        unsigned long scanned;
-                       struct lruvec *lruvec;
-                       int swappiness;
 
                        if (mem_cgroup_low(root, memcg)) {
                                if (!sc->may_thrash)
@@ -2418,12 +2418,10 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
                                mem_cgroup_events(memcg, MEMCG_LOW, 1);
                        }
 
-                       lruvec = mem_cgroup_zone_lruvec(zone, memcg);
-                       swappiness = mem_cgroup_swappiness(memcg);
                        reclaimed = sc->nr_reclaimed;
                        scanned = sc->nr_scanned;
 
-                       shrink_lruvec(lruvec, swappiness, sc, &lru_pages);
+                       shrink_zone_memcg(zone, memcg, sc, &lru_pages);
                        zone_lru_pages += lru_pages;
 
                        if (memcg && is_classzone)
@@ -2893,8 +2891,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
                .may_unmap = 1,
                .may_swap = !noswap,
        };
-       struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
-       int swappiness = mem_cgroup_swappiness(memcg);
        unsigned long lru_pages;
 
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -2911,7 +2907,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
         * will pick up pages from other mem cgroup's as well. We hack
         * the priority and make it zero.
         */
-       shrink_lruvec(lruvec, swappiness, &sc, &lru_pages);
+       shrink_zone_memcg(zone, memcg, &sc, &lru_pages);
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
index e7414cec220b3bce6cd5abbdd6a769f1f2c334a5..2d7c4c11fc635d3822d64e8b2d796414945305f7 100644 (file)
@@ -309,7 +309,12 @@ static void free_handle(struct zs_pool *pool, unsigned long handle)
 
 static void record_obj(unsigned long handle, unsigned long obj)
 {
-       *(unsigned long *)handle = obj;
+       /*
+        * lsb of @obj represents handle lock while other bits
+        * represent object value the handle is pointing so
+        * updating shouldn't do store tearing.
+        */
+       WRITE_ONCE(*(unsigned long *)handle, obj);
 }
 
 /* zpool driver */
@@ -1635,6 +1640,13 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
                free_obj = obj_malloc(d_page, class, handle);
                zs_object_copy(free_obj, used_obj, class);
                index++;
+               /*
+                * record_obj updates handle's value to free_obj and it will
+                * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
+                * breaks synchronization using pin_tag(e,g, zs_free) so
+                * let's keep the lock bit.
+                */
+               free_obj |= BIT(HANDLE_PIN_BIT);
                record_obj(handle, free_obj);
                unpin_tag(handle);
                obj_free(pool, class, used_obj);
index c29809f765dc5d4d95edd5d6ac3cc321fcb97c88..62c049b647e93424219ad61fbeaec15429cfa9ab 100644 (file)
@@ -56,7 +56,6 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
 obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
 obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
-obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
index 46ce410703b14f7f93b70daa03bb7c0fe94dd243..4d367b4139a34fe04cc17dfdebab5dc42a53921a 100644 (file)
@@ -24,7 +24,6 @@
 #include <net/cipso_ipv4.h>
 #include <net/inet_frag.h>
 #include <net/ping.h>
-#include <net/tcp_memcontrol.h>
 
 static int zero;
 static int one = 1;
index c7d1fb50f3818824fda315d0eb695be86bf0a360..5ced3e4013e3c2119f43b1674e69ff18e281e664 100644 (file)
@@ -73,7 +73,6 @@
 #include <net/timewait_sock.h>
 #include <net/xfrm.h>
 #include <net/secure_seq.h>
-#include <net/tcp_memcontrol.h>
 #include <net/busy_poll.h>
 
 #include <linux/inet.h>
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
deleted file mode 100644 (file)
index 18bc7f7..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-#include <net/tcp.h>
-#include <net/tcp_memcontrol.h>
-#include <net/sock.h>
-#include <net/ip.h>
-#include <linux/nsproxy.h>
-#include <linux/memcontrol.h>
-#include <linux/module.h>
-
-int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
-{
-       struct mem_cgroup *parent = parent_mem_cgroup(memcg);
-       struct page_counter *counter_parent = NULL;
-       /*
-        * The root cgroup does not use page_counters, but rather,
-        * rely on the data already collected by the network
-        * subsystem
-        */
-       if (memcg == root_mem_cgroup)
-               return 0;
-
-       memcg->tcp_mem.memory_pressure = 0;
-
-       if (parent)
-               counter_parent = &parent->tcp_mem.memory_allocated;
-
-       page_counter_init(&memcg->tcp_mem.memory_allocated, counter_parent);
-
-       return 0;
-}
-
-void tcp_destroy_cgroup(struct mem_cgroup *memcg)
-{
-       if (memcg == root_mem_cgroup)
-               return;
-
-       if (memcg->tcp_mem.active)
-               static_branch_dec(&memcg_sockets_enabled_key);
-}
-
-static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
-{
-       int ret;
-
-       if (memcg == root_mem_cgroup)
-               return -EINVAL;
-
-       ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, nr_pages);
-       if (ret)
-               return ret;
-
-       if (!memcg->tcp_mem.active) {
-               /*
-                * The active flag needs to be written after the static_key
-                * update. This is what guarantees that the socket activation
-                * function is the last one to run. See sock_update_memcg() for
-                * details, and note that we don't mark any socket as belonging
-                * to this memcg until that flag is up.
-                *
-                * We need to do this, because static_keys will span multiple
-                * sites, but we can't control their order. If we mark a socket
-                * as accounted, but the accounting functions are not patched in
-                * yet, we'll lose accounting.
-                *
-                * We never race with the readers in sock_update_memcg(),
-                * because when this value change, the code to process it is not
-                * patched in yet.
-                */
-               static_branch_inc(&memcg_sockets_enabled_key);
-               memcg->tcp_mem.active = true;
-       }
-
-       return 0;
-}
-
-enum {
-       RES_USAGE,
-       RES_LIMIT,
-       RES_MAX_USAGE,
-       RES_FAILCNT,
-};
-
-static DEFINE_MUTEX(tcp_limit_mutex);
-
-static ssize_t tcp_cgroup_write(struct kernfs_open_file *of,
-                               char *buf, size_t nbytes, loff_t off)
-{
-       struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
-       unsigned long nr_pages;
-       int ret = 0;
-
-       buf = strstrip(buf);
-
-       switch (of_cft(of)->private) {
-       case RES_LIMIT:
-               /* see memcontrol.c */
-               ret = page_counter_memparse(buf, "-1", &nr_pages);
-               if (ret)
-                       break;
-               mutex_lock(&tcp_limit_mutex);
-               ret = tcp_update_limit(memcg, nr_pages);
-               mutex_unlock(&tcp_limit_mutex);
-               break;
-       default:
-               ret = -EINVAL;
-               break;
-       }
-       return ret ?: nbytes;
-}
-
-static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
-{
-       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
-       u64 val;
-
-       switch (cft->private) {
-       case RES_LIMIT:
-               if (memcg == root_mem_cgroup)
-                       val = PAGE_COUNTER_MAX;
-               else
-                       val = memcg->tcp_mem.memory_allocated.limit;
-               val *= PAGE_SIZE;
-               break;
-       case RES_USAGE:
-               if (memcg == root_mem_cgroup)
-                       val = atomic_long_read(&tcp_memory_allocated);
-               else
-                       val = page_counter_read(&memcg->tcp_mem.memory_allocated);
-               val *= PAGE_SIZE;
-               break;
-       case RES_FAILCNT:
-               if (memcg == root_mem_cgroup)
-                       return 0;
-               val = memcg->tcp_mem.memory_allocated.failcnt;
-               break;
-       case RES_MAX_USAGE:
-               if (memcg == root_mem_cgroup)
-                       return 0;
-               val = memcg->tcp_mem.memory_allocated.watermark;
-               val *= PAGE_SIZE;
-               break;
-       default:
-               BUG();
-       }
-       return val;
-}
-
-static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of,
-                               char *buf, size_t nbytes, loff_t off)
-{
-       struct mem_cgroup *memcg;
-
-       memcg = mem_cgroup_from_css(of_css(of));
-       if (memcg == root_mem_cgroup)
-               return nbytes;
-
-       switch (of_cft(of)->private) {
-       case RES_MAX_USAGE:
-               page_counter_reset_watermark(&memcg->tcp_mem.memory_allocated);
-               break;
-       case RES_FAILCNT:
-               memcg->tcp_mem.memory_allocated.failcnt = 0;
-               break;
-       }
-
-       return nbytes;
-}
-
-static struct cftype tcp_files[] = {
-       {
-               .name = "kmem.tcp.limit_in_bytes",
-               .write = tcp_cgroup_write,
-               .read_u64 = tcp_cgroup_read,
-               .private = RES_LIMIT,
-       },
-       {
-               .name = "kmem.tcp.usage_in_bytes",
-               .read_u64 = tcp_cgroup_read,
-               .private = RES_USAGE,
-       },
-       {
-               .name = "kmem.tcp.failcnt",
-               .private = RES_FAILCNT,
-               .write = tcp_cgroup_reset,
-               .read_u64 = tcp_cgroup_read,
-       },
-       {
-               .name = "kmem.tcp.max_usage_in_bytes",
-               .private = RES_MAX_USAGE,
-               .write = tcp_cgroup_reset,
-               .read_u64 = tcp_cgroup_read,
-       },
-       { }     /* terminate */
-};
-
-static int __init tcp_memcontrol_init(void)
-{
-       WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, tcp_files));
-       return 0;
-}
-__initcall(tcp_memcontrol_init);
index 4ad8edb46f7c523276e6e837e365202d7ca1e7a7..006396e31cb0dcedf359c5aaa068ab3d28f7501c 100644 (file)
@@ -61,7 +61,6 @@
 #include <net/timewait_sock.h>
 #include <net/inet_common.h>
 #include <net/secure_seq.h>
-#include <net/tcp_memcontrol.h>
 #include <net/busy_poll.h>
 
 #include <linux/proc_fs.h>
index abbdff03ce92307551eb52b780e9a76e1873c203..3e24d0ddb51bfaca18d39669e41dba2bb6a91bb5 100644 (file)
@@ -91,7 +91,7 @@ static const struct file_operations reset_ops = {
 };
 #endif
 
-static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = {
+static const char *hw_flag_names[] = {
 #define FLAG(F)        [IEEE80211_HW_##F] = #F
        FLAG(HAS_RATE_CONTROL),
        FLAG(RX_INCLUDES_FCS),
@@ -126,9 +126,6 @@ static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = {
        FLAG(SUPPORTS_AMSDU_IN_AMPDU),
        FLAG(BEACON_TX_STATUS),
        FLAG(NEEDS_UNIQUE_STA_ADDR),
-
-       /* keep last for the build bug below */
-       (void *)0x1
 #undef FLAG
 };
 
@@ -148,7 +145,7 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
        /* fail compilation if somebody adds or removes
         * a flag without updating the name array above
         */
-       BUILD_BUG_ON(hw_flag_names[NUM_IEEE80211_HW_FLAGS] != (void *)0x1);
+       BUILD_BUG_ON(ARRAY_SIZE(hw_flag_names) != NUM_IEEE80211_HW_FLAGS);
 
        for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) {
                if (test_bit(i, local->hw.flags))
index 39d6bb18ce7669fc8b0ea79978cddee1b44b8fc5..2edbcadb3d7f6daa047c68f78c32e6acb5508bfa 100644 (file)
@@ -130,6 +130,12 @@ _c_flags += $(if $(patsubst n%,, \
                $(CFLAGS_KASAN))
 endif
 
+ifeq ($(CONFIG_UBSAN),y)
+_c_flags += $(if $(patsubst n%,, \
+               $(UBSAN_SANITIZE_$(basetarget).o)$(UBSAN_SANITIZE)$(CONFIG_UBSAN_SANITIZE_ALL)), \
+               $(CFLAGS_UBSAN))
+endif
+
 # If building the kernel in a separate objtree expand all occurrences
 # of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/').
 
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan
new file mode 100644 (file)
index 0000000..8ab6867
--- /dev/null
@@ -0,0 +1,17 @@
+ifdef CONFIG_UBSAN
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=shift)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=vla-bound)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=returns-nonnull-attribute)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=bool)
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=enum)
+
+ifdef CONFIG_UBSAN_ALIGNMENT
+      CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
+endif
+endif
index c7bf1aa2eeb3cb046b75100d68b23947562b409c..0147c91fa549e6ab46b68c47b46425b272c37571 100755 (executable)
@@ -433,6 +433,28 @@ our @typeList = (
        qr{${Ident}_handler_fn},
        @typeListMisordered,
 );
+
+our $C90_int_types = qr{(?x:
+       long\s+long\s+int\s+(?:un)?signed|
+       long\s+long\s+(?:un)?signed\s+int|
+       long\s+long\s+(?:un)?signed|
+       (?:(?:un)?signed\s+)?long\s+long\s+int|
+       (?:(?:un)?signed\s+)?long\s+long|
+       int\s+long\s+long\s+(?:un)?signed|
+       int\s+(?:(?:un)?signed\s+)?long\s+long|
+
+       long\s+int\s+(?:un)?signed|
+       long\s+(?:un)?signed\s+int|
+       long\s+(?:un)?signed|
+       (?:(?:un)?signed\s+)?long\s+int|
+       (?:(?:un)?signed\s+)?long|
+       int\s+long\s+(?:un)?signed|
+       int\s+(?:(?:un)?signed\s+)?long|
+
+       int\s+(?:un)?signed|
+       (?:(?:un)?signed\s+)?int
+)};
+
 our @typeListFile = ();
 our @typeListWithAttr = (
        @typeList,
@@ -4517,7 +4539,7 @@ sub process {
                        #print "LINE<$lines[$ln-1]> len<" . length($lines[$ln-1]) . "\n";
 
                        $has_flow_statement = 1 if ($ctx =~ /\b(goto|return)\b/);
-                       $has_arg_concat = 1 if ($ctx =~ /\#\#/);
+                       $has_arg_concat = 1 if ($ctx =~ /\#\#/ && $ctx !~ /\#\#\s*(?:__VA_ARGS__|args)\b/);
 
                        $dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//;
                        $dstat =~ s/$;//g;
@@ -4528,7 +4550,7 @@ sub process {
                        # Flatten any parentheses and braces
                        while ($dstat =~ s/\([^\(\)]*\)/1/ ||
                               $dstat =~ s/\{[^\{\}]*\}/1/ ||
-                              $dstat =~ s/\[[^\[\]]*\]/1/)
+                              $dstat =~ s/.\[[^\[\]]*\]/1/)
                        {
                        }
 
@@ -4548,7 +4570,8 @@ sub process {
                                union|
                                struct|
                                \.$Ident\s*=\s*|
-                               ^\"|\"$
+                               ^\"|\"$|
+                               ^\[
                        }x;
                        #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
                        if ($dstat ne '' &&
@@ -5272,6 +5295,26 @@ sub process {
                        }
                }
 
+# check for cast of C90 native int or longer types constants
+               if ($line =~ /(\(\s*$C90_int_types\s*\)\s*)($Constant)\b/) {
+                       my $cast = $1;
+                       my $const = $2;
+                       if (WARN("TYPECAST_INT_CONSTANT",
+                                "Unnecessary typecast of c90 int constant\n" . $herecurr) &&
+                           $fix) {
+                               my $suffix = "";
+                               my $newconst = $const;
+                               $newconst =~ s/${Int_type}$//;
+                               $suffix .= 'U' if ($cast =~ /\bunsigned\b/);
+                               if ($cast =~ /\blong\s+long\b/) {
+                                       $suffix .= 'LL';
+                               } elsif ($cast =~ /\blong\b/) {
+                                       $suffix .= 'L';
+                               }
+                               $fixed[$fixlinenr] =~ s/\Q$cast\E$const\b/$newconst$suffix/;
+                       }
+               }
+
 # check for sizeof(&)
                if ($line =~ /\bsizeof\s*\(\s*\&/) {
                        WARN("SIZEOF_ADDRESS",
index cab641a12dd587b39ab7a745d5c9623ee852c66c..1873421f2305521a6e078f1817c44c77ee9612e3 100755 (executable)
@@ -16,7 +16,9 @@ my $P = $0;
 my $V = '0.26';
 
 use Getopt::Long qw(:config no_auto_abbrev);
+use Cwd;
 
+my $cur_path = fastgetcwd() . '/';
 my $lk_path = "./";
 my $email = 1;
 my $email_usename = 1;
@@ -429,6 +431,8 @@ foreach my $file (@ARGV) {
        }
     }
     if ($from_filename) {
+       $file =~ s/^\Q${cur_path}\E//;  #strip any absolute path
+       $file =~ s/^\Q${lk_path}\E//;   #or the path to the lk tree
        push(@files, $file);
        if ($file ne "MAINTAINERS" && -f $file && ($keywords || $file_emails)) {
            open(my $f, '<', $file)
index 1832cf701c3d6d44d90adeb278bd04bca489d274..48071ed7c445d025fa4ae57c12f032bfa916521f 100644 (file)
@@ -137,12 +137,17 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
 {
        int ret = 0;
        const struct cred *cred, *child_cred;
+       const kernel_cap_t *caller_caps;
 
        rcu_read_lock();
        cred = current_cred();
        child_cred = __task_cred(child);
+       if (mode & PTRACE_MODE_FSCREDS)
+               caller_caps = &cred->cap_effective;
+       else
+               caller_caps = &cred->cap_permitted;
        if (cred->user_ns == child_cred->user_ns &&
-           cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
+           cap_issubset(child_cred->cap_permitted, *caller_caps))
                goto out;
        if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
                goto out;
index 8d85435a45d7f6f5ee3a81fb4e134cb0023cc6a4..2d6e9bdea398c69fd7f55bb7615e486078557231 100644 (file)
@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
  */
 static inline unsigned int smk_ptrace_mode(unsigned int mode)
 {
-       switch (mode) {
-       case PTRACE_MODE_READ:
-               return MAY_READ;
-       case PTRACE_MODE_ATTACH:
+       if (mode & PTRACE_MODE_ATTACH)
                return MAY_READWRITE;
-       }
+       if (mode & PTRACE_MODE_READ)
+               return MAY_READ;
 
        return 0;
 }
index d3c19c970a06bf35789cd8444971e7421a00c8ca..cb6ed10816d49ac963a65816a92ca738a7fb42d0 100644 (file)
@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
        int rc = 0;
 
        /* require ptrace target be a child of ptracer on attach */
-       if (mode == PTRACE_MODE_ATTACH) {
+       if (mode & PTRACE_MODE_ATTACH) {
                switch (ptrace_scope) {
                case YAMA_SCOPE_DISABLED:
                        /* No additional restrictions. */
@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
                }
        }
 
-       if (rc) {
+       if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
                printk_ratelimited(KERN_NOTICE
                        "ptrace of pid %d was attempted by: %s (pid %d)\n",
                        child->pid, current->comm, current->pid);