This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "IPFire 3.x development tree".
The branch, master has been updated via 8db58f993c25531d74767e969a65d765bf8c8259 (commit) via 4c928ab7b40674cd7d8badcbf24aa9e3b8da6d4f (commit) from 2b17bc813a00623d4befdf19397a587c507d26ac (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit 8db58f993c25531d74767e969a65d765bf8c8259 Merge: 2b17bc8 4c928ab Author: Michael Tremer michael.tremer@ipfire.org Date: Mon Apr 2 02:40:45 2012 +0200
Merge remote-tracking branch 'ms/kernel-3.2'
commit 4c928ab7b40674cd7d8badcbf24aa9e3b8da6d4f Author: Michael Tremer michael.tremer@ipfire.org Date: Thu Mar 1 19:10:03 2012 +0100
kernel: Update to Linux 3.2.
Introduce new way of creating configurations:
From now on, there are no configuration files as they come out of "make oldconfig" for every single architecture and kernel version.
There is now a system that creates those configurations and so it is easier to track changes, see differences between architectures and a lot more.
There are configuration files for armv5tel kernels but they do not build because of problems with the grsecurity patchset.
-----------------------------------------------------------------------
Summary of changes: kernel/config-arm-generic | 459 + kernel/config-armv5tel-kirkwood | 130 + kernel/config-armv7hl-omap | 589 + kernel/{config.i686-legacy => config-generic} | 1286 +- kernel/config-i686-default | 298 + kernel/config-i686-legacy | 118 + kernel/config-x86-generic | 833 + kernel/config-x86_64-default | 171 + kernel/config.armv5tel-versatile | 1449 -- kernel/config.i686 | 4870 ----- kernel/config.x86_64 | 2932 --- kernel/kernel.nm | 91 +- ...port-reading-mac-address-from-device-tree.patch | 92 + ...ch => grsecurity-2.9-3.2.12-201203221944.patch} |21040 ++++++++++++-------- ...rrier-state-forever-when-in-user-stp-mode.patch | 98 - kernel/scripts/configcommon.py | 73 + kernel/scripts/configdiff.py | 84 + kernel/scripts/configure | 315 + kernel/scripts/merge.pl | 66 + 19 files changed, 16477 insertions(+), 18517 deletions(-) create mode 100644 kernel/config-arm-generic create mode 100644 kernel/config-armv5tel-kirkwood create mode 100644 kernel/config-armv7hl-omap rename kernel/{config.i686-legacy => config-generic} (80%) create mode 100644 kernel/config-i686-default create mode 100644 kernel/config-i686-legacy create mode 100644 kernel/config-x86-generic create mode 100644 kernel/config-x86_64-default delete mode 100644 kernel/config.armv5tel-versatile delete mode 100644 kernel/config.i686 delete mode 100644 kernel/config.x86_64 create mode 100644 kernel/patches/arm-smsc-support-reading-mac-address-from-device-tree.patch rename kernel/patches/{grsecurity-2.2.2-3.1.5-201112101853.patch => grsecurity-2.9-3.2.12-201203221944.patch} (84%) delete mode 100644 kernel/patches/linux-3.1-bridge-master-device-stuck-in-no-carrier-state-forever-when-in-user-stp-mode.patch create mode 100755 kernel/scripts/configcommon.py create mode 100755 kernel/scripts/configdiff.py create mode 100755 kernel/scripts/configure create mode 100644 kernel/scripts/merge.pl
Difference in files: diff --git a/kernel/config-arm-generic b/kernel/config-arm-generic new file mode 100644 index 0000000..1e2f0af --- /dev/null +++ b/kernel/config-arm-generic @@ -0,0 +1,459 @@ +CONFIG_ARM=y +CONFIG_MIGHT_HAVE_PCI=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_KTIME_SCALAR=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_ARM_PATCH_PHYS_VIRT=y + +# +# General setup +# +CONFIG_BROKEN_ON_SMP=y +CONFIG_KERNEL_GZIP=y + +# +# IRQ subsystem +# +CONFIG_IRQ_DOMAIN=y + +# +# RCU Subsystem +# +CONFIG_TINY_RCU=y +CONFIG_PERF_USE_VMALLOC=y + +# +# GCOV-based kernel profiling +# +CONFIG_LBDAF=y + +# +# IO Schedulers +# +# CONFIG_MUTEX_SPIN_ON_OWNER is not set + +# +# System Type +# +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +CONFIG_ARCH_VERSATILE=y +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_HIGHBANK is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_PRIMA2 is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PICOXCELL is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +# CONFIG_ARCH_MSM is not set +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_ARCH_VT8500 is not set +# CONFIG_ARCH_ZYNQ is not set + +# +# Versatile platform type +# +CONFIG_ARCH_VERSATILE_PB=y +CONFIG_MACH_VERSATILE_AB=y +CONFIG_MACH_VERSATILE_DT=y +CONFIG_PLAT_VERSATILE_CLCD=y +CONFIG_PLAT_VERSATILE_FPGA_IRQ=y +CONFIG_PLAT_VERSATILE_LEDS=y +CONFIG_PLAT_VERSATILE_SCHED_CLOCK=y +CONFIG_PLAT_VERSATILE=y +CONFIG_ARM_TIMER_SP804=y + +# +# Processor Type +# +CONFIG_CPU_ARM926T=y +CONFIG_CPU_32v5=y +CONFIG_CPU_ABRT_EV5TJ=y +CONFIG_CPU_PABRT_LEGACY=y +CONFIG_CPU_CACHE_VIVT=y +CONFIG_CPU_COPY_V4WB=y +CONFIG_CPU_TLB_V4WBI=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y +CONFIG_CPU_USE_DOMAINS=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_WRITETHROUGH is not set +# CONFIG_CPU_CACHE_ROUND_ROBIN is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_VIC=y +CONFIG_ARM_VIC_NR=2 +CONFIG_ICST=y +CONFIG_PL330=y + +# +# Bus support +# +CONFIG_ARM_AMBA=y +CONFIG_PCI_SYSCALL=y +# CONFIG_ARCH_SUPPORTS_MSI is not set + +# +# Kernel Features +# +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_HZ=100 +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +CONFIG_HIGHMEM=y +# CONFIG_HIGHPTE is not set +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_SPLIT_PTLOCK_CPUS=999999 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_LEDS=y +CONFIG_LEDS_CPU=y +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set + +# +# Boot options +# +CONFIG_USE_OF=y +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZBOOT_ROM_BSS=0 +CONFIG_ARM_APPENDED_DTB=y +# CONFIG_ARM_ATAG_DTB_COMPAT is not set +CONFIG_CMDLINE="console=ttyAM0,115200 root=/dev/sda1 rootdelay=20" +CONFIG_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_CMDLINE_EXTEND is not set +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +CONFIG_ATAGS_PROC=y +CONFIG_AUTO_ZRELADDR=y + +# +# CPU Power Management +# +# CONFIG_CPU_IDLE is not set + +# +# At least one emulation must be selected +# +CONFIG_VFP=y + +# +# Power management options +# +CONFIG_APM_EMULATION=y +CONFIG_PM_CLK=y +CONFIG_CPU_PM=y +CONFIG_ARM_CPU_SUSPEND=y + +# +# Bluetooth device drivers +# +# CONFIG_RFKILL_GPIO is not set + +# +# Generic Driver Options +# +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_DTC=y +CONFIG_OF=y + +# +# Device Tree and Open Firmware support +# +CONFIG_PROC_DEVICETREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_DEVICE=y +CONFIG_OF_GPIO=y +CONFIG_OF_I2C=m +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +# CONFIG_MG_DISK is not set +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_ATMEL_PWM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_ARM_CHARLCD is not set + +# +# SCSI Transports +# +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_NSP32=m + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_ATM_HE is not set + +# +# CAIF transport drivers +# +CONFIG_DM9000=m +# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set +CONFIG_NET_VENDOR_FARADAY=y +# CONFIG_FTMAC100 is not set +# CONFIG_FTGMAC100 is not set +# CONFIG_MLX4_EN is not set +# CONFIG_MLX4_CORE is not set +CONFIG_AX88796=m +CONFIG_AX88796_93CX6=y +CONFIG_SMC91X=m +CONFIG_SMC911X=m +CONFIG_SMSC911X=m +# CONFIG_SMSC911X_ARCH_HOOKS is not set +# CONFIG_SUNGEM is not set + +# +# ISDN feature submodules +# +# CONFIG_ISDN_DRV_LOOP is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO_AMBAKMI=m + +# +# Serial drivers +# +# CONFIG_SERIAL_8250_DW is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=m +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=m + +# +# PC SMBus host controller drivers +# +CONFIG_I2C_ALI1535=m +CONFIG_I2C_ALI1563=m +CONFIG_I2C_ALI15X3=m +CONFIG_I2C_SIS5595=m +CONFIG_I2C_SIS630=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +CONFIG_I2C_VERSATILE=m + +# +# Memory mapped GPIO drivers: +# +# CONFIG_GPIO_PL061 is not set + +# +# 1-wire Slaves +# +# CONFIG_APM_POWER is not set + +# +# Watchdog Device Drivers +# +# CONFIG_ARM_SP805_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set + +# +# Customize TV tuners +# +CONFIG_VIDEOBUF2_DMA_CONTIG=m + +# +# Miscelaneous helper chips +# +CONFIG_VIDEO_CAFE_CCIC=m +# CONFIG_VIDEO_SH_MOBILE_CSI2 is not set +# CONFIG_VIDEO_SH_MOBILE_CEU is not set + +# +# Graphics support +# +# CONFIG_DRM_RADEON is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=m +CONFIG_FB_CFB_COPYAREA=m +CONFIG_FB_CFB_IMAGEBLIT=m +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +CONFIG_FB_SYS_FOPS=m + +# +# Frame buffer hardware drivers +# +CONFIG_FB_ARMCLCD=m +# CONFIG_FB_RADEON is not set +# CONFIG_FB_SAVAGE is not set + +# +# Console display driver support +# +# CONFIG_SND_ALI5451 is not set +CONFIG_SND_ARM=y +CONFIG_SND_ARMAACI=m +CONFIG_SND_SOC=m +# CONFIG_SND_SOC_CACHE_LZO is not set +CONFIG_SND_SOC_I2C_AND_SPI=m +# CONFIG_SND_SOC_ALL_CODECS is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_ULPI is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_SDHCI_PXAV3=m +CONFIG_MMC_SDHCI_PXAV2=m +CONFIG_MMC_DW=m +# CONFIG_MMC_DW_IDMAC is not set + +# +# LED drivers +# +# CONFIG_LEDS_RENESAS_TPU is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_PL030=m +CONFIG_RTC_DRV_PL031=m + +# +# DMA Devices +# +# CONFIG_AMBA_PL08X is not set +# CONFIG_DW_DMAC is not set +CONFIG_PL330_DMA=y + +# +# Virtio drivers +# +# CONFIG_DRM_NOUVEAU is not set + +# +# Speakup console speech +# +CONFIG_CLKDEV_LOOKUP=y + +# +# Hardware Spinlock drivers +# +CONFIG_CLKSRC_MMIO=y + +# +# Pseudo filesystems +# +# CONFIG_HUGETLB_PAGE is not set + +# +# Kernel hacking +# +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DEBUG_HIGHMEM is not set +CONFIG_FUNCTION_GRAPH_TRACER=y +CONFIG_ARM_UNWIND=y +CONFIG_OLD_MCOUNT=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Miscellaneous hardening features +# +CONFIG_PAX_MEMORY_SANITIZE=y +CONFIG_LSM_MMAP_MIN_ADDR=32768 + +# +# Random Number Generation +# +CONFIG_CRYPTO_DEV_HIFN_795X=m +CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y + +# +# Library routines +# +CONFIG_AUDIT_GENERIC=y +CONFIG_GENERIC_ATOMIC64=y diff --git a/kernel/config-armv5tel-kirkwood b/kernel/config-armv5tel-kirkwood new file mode 100644 index 0000000..38a8f21 --- /dev/null +++ b/kernel/config-armv5tel-kirkwood @@ -0,0 +1,130 @@ + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_CHIP=y + +# +# System Type +# +# CONFIG_ARCH_VERSATILE is not set +CONFIG_ARCH_KIRKWOOD=y + +# +# Marvell Kirkwood Implementations +# +CONFIG_MACH_DB88F6281_BP=y +CONFIG_MACH_RD88F6192_NAS=y +CONFIG_MACH_RD88F6281=y +CONFIG_MACH_MV88F6281GTW_GE=y +CONFIG_MACH_SHEEVAPLUG=y +CONFIG_MACH_ESATA_SHEEVAPLUG=y +CONFIG_MACH_GURUPLUG=y +CONFIG_MACH_TS219=y +CONFIG_MACH_TS41X=y +CONFIG_MACH_DOCKSTAR=y +CONFIG_MACH_OPENRD=y +CONFIG_MACH_OPENRD_BASE=y +CONFIG_MACH_OPENRD_CLIENT=y +CONFIG_MACH_OPENRD_ULTIMATE=y +CONFIG_MACH_NETSPACE_V2=y +CONFIG_MACH_INETSPACE_V2=y +CONFIG_MACH_NETSPACE_MAX_V2=y +CONFIG_MACH_D2NET_V2=y +CONFIG_MACH_NET2BIG_V2=y +CONFIG_MACH_NET5BIG_V2=y +CONFIG_MACH_T5325=y + +# +# System MMU +# +CONFIG_PLAT_ORION=y + +# +# Processor Type +# +CONFIG_CPU_FEROCEON=y +# CONFIG_CPU_FEROCEON_OLD_ID is not set +CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_COPY_FEROCEON=y +CONFIG_CPU_TLB_FEROCEON=y + +# +# Processor Features +# +CONFIG_OUTER_CACHE=y +CONFIG_CACHE_FEROCEON_L2=y +CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH=y + +# +# Power management options +# +# CONFIG_ARM_CPU_SUSPEND is not set + +# +# CAIF transport drivers +# +CONFIG_MV643XX_ETH=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +CONFIG_I2C_MV64XXX=m + +# +# Enable Device Drivers -> PPS to see the PTP clock options. +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers: +# +CONFIG_GPIO_GENERIC_PLATFORM=y + +# +# SPI GPIO expanders: +# +CONFIG_GPIO_MCP23S08=m + +# +# Watchdog Device Drivers +# +CONFIG_ORION_WATCHDOG=m + +# +# Console display driver support +# +CONFIG_SND_KIRKWOOD_SOC=m +CONFIG_SND_KIRKWOOD_SOC_I2S=m +CONFIG_SND_KIRKWOOD_SOC_OPENRD=m +CONFIG_SND_KIRKWOOD_SOC_T5325=m +CONFIG_SND_SOC_ALC5623=m +CONFIG_SND_SOC_CS42L51=m + +# +# MMC/SD/SDIO Host Controller Drivers +# +CONFIG_MMC_MVSDIO=m + +# +# LED drivers +# +CONFIG_LEDS_NS2=m +CONFIG_LEDS_NETXBIG=m + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MV=m + +# +# DMA Devices +# +CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=y +CONFIG_MV_XOR=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_DEV_MV_CESA=m diff --git a/kernel/config-armv7hl-omap b/kernel/config-armv7hl-omap new file mode 100644 index 0000000..920644c --- /dev/null +++ b/kernel/config-armv7hl-omap @@ -0,0 +1,589 @@ +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_ARCH_HAS_CPUFREQ=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_CHIP=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +CONFIG_RCU_FAST_NO_HZ=y + +# +# Kernel Performance Events And Counters +# +CONFIG_USE_GENERIC_SMP_HELPERS=y + +# +# GCOV-based kernel profiling +# +CONFIG_STOP_MACHINE=y + +# +# IO Schedulers +# +CONFIG_PADATA=y +CONFIG_MUTEX_SPIN_ON_OWNER=y + +# +# System Type +# +# CONFIG_ARCH_VERSATILE is not set +CONFIG_ARCH_OMAP=y + +# +# TI OMAP Common Features +# +CONFIG_ARCH_OMAP_OTG=y +# CONFIG_ARCH_OMAP1 is not set +CONFIG_ARCH_OMAP2PLUS=y + +# +# OMAP Feature Selections +# +CONFIG_OMAP_SMARTREFLEX=y +CONFIG_OMAP_SMARTREFLEX_CLASS3=y +CONFIG_OMAP_RESET_CLOCKS=y +CONFIG_OMAP_MUX=y +# CONFIG_OMAP_MUX_DEBUG is not set +CONFIG_OMAP_MUX_WARNINGS=y +CONFIG_OMAP_MCBSP=y +CONFIG_OMAP_MBOX_FWK=m +CONFIG_OMAP_MBOX_KFIFO_SIZE=256 +CONFIG_OMAP_32K_TIMER=y +# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set +CONFIG_OMAP_32K_TIMER_HZ=128 +CONFIG_OMAP_DM_TIMER=y +CONFIG_OMAP_PM_NOOP=y +CONFIG_MACH_OMAP_GENERIC=y + +# +# TI OMAP2/3/4 Specific Features +# +CONFIG_ARCH_OMAP2PLUS_TYPICAL=y +# CONFIG_ARCH_OMAP2 is not set +CONFIG_ARCH_OMAP3=y +CONFIG_ARCH_OMAP4=y +CONFIG_SOC_OMAP3430=y +# CONFIG_SOC_OMAPTI816X is not set +CONFIG_OMAP_PACKAGE_CBB=y +CONFIG_OMAP_PACKAGE_CUS=y +CONFIG_OMAP_PACKAGE_CBP=y +CONFIG_OMAP_PACKAGE_CBL=y +CONFIG_OMAP_PACKAGE_CBS=y + +# +# OMAP Board Type +# +CONFIG_MACH_OMAP3_BEAGLE=y +CONFIG_MACH_DEVKIT8000=y +CONFIG_MACH_OMAP_LDP=y +CONFIG_MACH_OMAP3530_LV_SOM=y +CONFIG_MACH_OMAP3_TORPEDO=y +CONFIG_MACH_OVERO=y +CONFIG_MACH_OMAP3EVM=y +CONFIG_MACH_OMAP3517EVM=y +CONFIG_MACH_CRANEBOARD=y +CONFIG_MACH_OMAP3_PANDORA=y +CONFIG_MACH_OMAP3_TOUCHBOOK=y +CONFIG_MACH_OMAP_3430SDP=y +CONFIG_MACH_NOKIA_RM680=y +CONFIG_MACH_NOKIA_RX51=y +CONFIG_MACH_OMAP_ZOOM2=y +CONFIG_MACH_OMAP_ZOOM3=y +CONFIG_MACH_CM_T35=y +CONFIG_MACH_CM_T3517=y +CONFIG_MACH_CM_T3730=y +CONFIG_MACH_IGEP0020=y +CONFIG_MACH_IGEP0030=y +CONFIG_MACH_SBC3530=y +CONFIG_MACH_OMAP_3630SDP=y +CONFIG_MACH_OMAP_4430SDP=y +CONFIG_MACH_OMAP4_PANDA=y +CONFIG_OMAP3_EMU=y +# CONFIG_OMAP3_SDRC_AC_TIMING is not set + +# +# Processor Type +# +CONFIG_CPU_V7=y +CONFIG_CPU_32v6K=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_CPU_HAS_ASID=y + +# +# Processor Features +# +CONFIG_ARM_THUMBEE=y +CONFIG_SWP_EMULATE=y +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_OUTER_CACHE=y +CONFIG_OUTER_CACHE_SYNC=y +CONFIG_CACHE_L2X0=y +CONFIG_CACHE_PL310=y +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_CPU_HAS_PMU=y +CONFIG_ARM_ERRATA_430973=y +# CONFIG_ARM_ERRATA_458693 is not set +# CONFIG_ARM_ERRATA_460075 is not set +# CONFIG_ARM_ERRATA_742230 is not set +# CONFIG_ARM_ERRATA_742231 is not set +CONFIG_PL310_ERRATA_588369=y +CONFIG_ARM_ERRATA_720789=y +CONFIG_PL310_ERRATA_727915=y +# CONFIG_ARM_ERRATA_743622 is not set +# CONFIG_ARM_ERRATA_751472 is not set +# CONFIG_PL310_ERRATA_753970 is not set +# CONFIG_ARM_ERRATA_754322 is not set +# CONFIG_ARM_ERRATA_754327 is not set +# CONFIG_ARM_ERRATA_764369 is not set +CONFIG_PL310_ERRATA_769419=y +CONFIG_ARM_GIC=y + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_SMP=y +CONFIG_SMP_ON_UP=y +CONFIG_ARM_CPU_TOPOLOGY=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=2 +CONFIG_HOTPLUG_CPU=y +CONFIG_LOCAL_TIMERS=y +CONFIG_HZ=128 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SPLIT_PTLOCK_CPUS=4 + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# At least one emulation must be selected +# +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Power management options +# +CONFIG_PM_SLEEP_SMP=y +CONFIG_ARCH_HAS_OPP=y +CONFIG_PM_OPP=y + +# +# Classification +# +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y + +# +# Bluetooth device drivers +# +# CONFIG_RFKILL_REGULATOR is not set + +# +# Generic Driver Options +# +CONFIG_MTD=y +CONFIG_MTD_TESTS=m +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +CONFIG_MTD_OF_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +CONFIG_SM_FTL=m +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +CONFIG_MTD_RAM=m +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PLATRAM=m + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_SLRAM=m +CONFIG_MTD_PHRAM=m +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_DOCG3 is not set +CONFIG_MTD_NAND_ECC=y +# CONFIG_MTD_NAND_ECC_SMC is not set +CONFIG_MTD_NAND=y +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +# CONFIG_MTD_NAND_ECC_BCH is not set +# CONFIG_MTD_SM_COMMON is not set +# CONFIG_MTD_NAND_MUSEUM_IDS is not set +# CONFIG_MTD_NAND_GPIO is not set +CONFIG_MTD_NAND_OMAP2=y +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND_DISKONCHIP is not set +# CONFIG_MTD_NAND_NANDSIM is not set +CONFIG_MTD_NAND_PLATFORM=y +# CONFIG_MTD_ALAUDA is not set +CONFIG_MTD_ONENAND=y +# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set +# CONFIG_MTD_ONENAND_GENERIC is not set +CONFIG_MTD_ONENAND_OMAP2=y +# CONFIG_MTD_ONENAND_OTP is not set +CONFIG_MTD_ONENAND_2X_PROGRAM=y +CONFIG_MTD_ONENAND_SIM=m + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set + +# +# Device Tree and Open Firmware support +# +CONFIG_OF_I2C=y +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV_RAM_SIZE=65536 +CONFIG_MG_DISK=m +CONFIG_MG_DISK_RES=0 + +# +# Generic fallback / legacy drivers +# +# CONFIG_MULTICORE_RAID456 is not set + +# +# CAIF transport drivers +# +# CONFIG_TI_DAVINCI_EMAC is not set +# CONFIG_TI_DAVINCI_MDIO is not set +# CONFIG_TI_DAVINCI_CPDMA is not set + +# +# Input device support +# +CONFIG_INPUT_FF_MEMLESS=y + +# +# Input Device Drivers +# +# CONFIG_KEYBOARD_OMAP4 is not set +CONFIG_KEYBOARD_TWL4030=m +CONFIG_MOUSE_GPIO=m +CONFIG_INPUT_TWL4030_PWRBUTTON=y +CONFIG_INPUT_TWL4030_VIBRA=y +CONFIG_INPUT_TWL6040_VIBRA=y + +# +# Serial drivers +# +CONFIG_SERIAL_8250_DETECT_IRQ=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_OMAP=y +CONFIG_SERIAL_OMAP_CONSOLE=y +CONFIG_I2C=y + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +CONFIG_I2C_OMAP=y + +# +# Enable Device Drivers -> PPS to see the PTP clock options. +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_SX150X is not set +CONFIG_GPIO_TWL4030=y + +# +# 1-wire Bus Masters +# +# CONFIG_HDQ_MASTER_OMAP is not set + +# +# 1-wire Slaves +# +# CONFIG_CHARGER_TWL4030 is not set + +# +# Watchdog Device Drivers +# +# CONFIG_MPCORE_WATCHDOG is not set +CONFIG_OMAP_WATCHDOG=y +# CONFIG_TWL4030_WATCHDOG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_88PM860X is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +CONFIG_TWL4030_CORE=y +# CONFIG_TWL4030_MADC is not set +CONFIG_TWL4030_POWER=y +CONFIG_MFD_TWL4030_AUDIO=y +# CONFIG_TWL6030_PWM is not set +CONFIG_TWL6040_CORE=y +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +CONFIG_MFD_OMAP_USB_HOST=y +# CONFIG_MFD_AAT2870_CORE is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_DUMMY is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=y +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +CONFIG_REGULATOR_GPIO=y +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +CONFIG_REGULATOR_TWL4030=y +# CONFIG_REGULATOR_WM8400 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set + +# +# Miscelaneous helper chips +# +# CONFIG_VIDEO_VPFE_CAPTURE is not set +CONFIG_VIDEO_OMAP2_VOUT_VRFB=y +CONFIG_VIDEO_OMAP2_VOUT=m + +# +# Texas Instruments WL128x FM driver (ST based) +# +# CONFIG_TTPCI_EEPROM is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_FB_DDC is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_BACKLIGHT is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set +CONFIG_OMAP2_VRAM=y +CONFIG_OMAP2_VRFB=y +CONFIG_OMAP2_DSS=y +CONFIG_OMAP2_VRAM_SIZE=12 +CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y +# CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS is not set +CONFIG_OMAP2_DSS_DPI=y +# CONFIG_OMAP2_DSS_RFBI is not set +CONFIG_OMAP2_DSS_VENC=y +CONFIG_OMAP4_DSS_HDMI=y +# CONFIG_OMAP2_DSS_SDI is not set +# CONFIG_OMAP2_DSS_DSI is not set +# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set +CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=1 +CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET=y +CONFIG_FB_OMAP2=y +CONFIG_FB_OMAP2_DEBUG_SUPPORT=y +CONFIG_FB_OMAP2_NUM_FBS=3 + +# +# OMAP2/3 Display Device Drivers +# +CONFIG_PANEL_GENERIC_DPI=y +# CONFIG_PANEL_DVI is not set +CONFIG_PANEL_SHARP_LS037V7DW01=y +# CONFIG_PANEL_PICODLP is not set +# CONFIG_LCD_PLATFORM is not set + +# +# Console display driver support +# +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set +# CONFIG_SND_OMAP_SOC is not set + +# +# Special HID drivers +# +# CONFIG_USB_ARCH_HAS_XHCI is not set + +# +# USB Host Controller Drivers +# +CONFIG_USB_EHCI_HCD_OMAP=y +CONFIG_USB_OHCI_HCD_OMAP3=y + +# +# OTG and related infrastructure +# +CONFIG_USB_GPIO_VBUS=y +# CONFIG_ISP1301_OMAP is not set +CONFIG_TWL4030_USB=y +CONFIG_TWL6030_USB=y +CONFIG_MMC=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y + +# +# MMC/SD/SDIO Host Controller Drivers +# +CONFIG_MMC_SDHCI=y +CONFIG_MMC_OMAP=y +CONFIG_MMC_OMAP_HS=y + +# +# LED drivers +# +CONFIG_LEDS_GPIO=y +# CONFIG_LEDS_REGULATOR is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_HEARTBEAT=y + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_TWL4030 is not set + +# +# Virtio drivers +# +# CONFIG_TIDSPBRIDGE is not set + +# +# Speakup console speech +# +CONFIG_HWSPINLOCK=m + +# +# Hardware Spinlock drivers +# +CONFIG_HWSPINLOCK_OMAP=m +# CONFIG_OMAP_IOMMU is not set + +# +# Pseudo filesystems +# +# CONFIG_JFFS2_FS is not set + +# +# Kernel hacking +# +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set +CONFIG_OC_ETM=y + +# +# Miscellaneous hardening features +# +CONFIG_PAX_REFCOUNT=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_PCRYPT=m + +# +# Random Number Generation +# +CONFIG_CRYPTO_DEV_OMAP_SHAM=m +CONFIG_CRYPTO_DEV_OMAP_AES=m + +# +# Library routines +# +CONFIG_CPU_RMAP=y diff --git a/kernel/config-generic b/kernel/config-generic new file mode 100644 index 0000000..b918e5c --- /dev/null +++ b/kernel/config-generic @@ -0,0 +1,4166 @@ +# +# +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_MMU=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_GPIO=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_HAVE_IRQ_WORK=y +CONFIG_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_LZO is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +CONFIG_FHANDLE=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_AUDIT=y +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +CONFIG_HAVE_SPARSE_IRQ=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_SPARSE_IRQ=y + +# +# RCU Subsystem +# +# CONFIG_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_MEM_RES_CTLR=y +CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y +# CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED is not set +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_MM_OWNER=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_PERF_COUNTERS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_PCI_QUIRKS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_BLOCK=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_THROTTLING=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +CONFIG_INLINE_SPIN_UNLOCK=y +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +CONFIG_INLINE_READ_UNLOCK=y +# CONFIG_INLINE_READ_UNLOCK_BH is not set +CONFIG_INLINE_READ_UNLOCK_IRQ=y +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +CONFIG_INLINE_WRITE_UNLOCK=y +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +CONFIG_FREEZER=y + +# +# Processor type and features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_CLEANCACHE=y +CONFIG_SECCOMP=y +CONFIG_CC_STACKPROTECTOR=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y + +# +# Power management and ACPI options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_PM_SLEEP=y +CONFIG_PM_RUNTIME=y +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set + +# +# CPU Frequency scaling +# + +# +# x86 CPU frequency scaling drivers +# + +# +# shared options +# + +# +# Memory power savings +# + +# +# Bus options (PCI etc.) +# +CONFIG_PCI=y +# CONFIG_PCI_DEBUG is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y + +# +# Executable file formats / Emulations +# +CONFIG_BINFMT_ELF=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_MISC=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_LRO=y +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_NETLABEL is not set +CONFIG_NETWORK_SECMARK=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CT_PROTO_DCCP=m +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=m +CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NETFILTER_TPROXY=m +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +# CONFIG_NETFILTER_XT_MATCH_IPVS is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_CONNTRACK_IPV4=m +# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_DCCP=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PROTO_UDPLITE=m +CONFIG_NF_NAT_PROTO_SCTP=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_NF_NAT_SIP=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_CONNTRACK_IPV6=m +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +# CONFIG_BRIDGE_EBT_ULOG is not set +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_MSG is not set +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_HMAC_NONE is not set +# CONFIG_SCTP_HMAC_SHA1 is not set +CONFIG_SCTP_HMAC_MD5=y +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +# CONFIG_ATM_LANE is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_NET_DSA=y +CONFIG_NET_DSA_TAG_DSA=y +CONFIG_NET_DSA_TAG_EDSA=y +CONFIG_NET_DSA_TAG_TRAILER=y +CONFIG_NET_DSA_MV88E6XXX=y +CONFIG_NET_DSA_MV88E6060=y +CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y +CONFIG_NET_DSA_MV88E6131=y +CONFIG_NET_DSA_MV88E6123_61_65=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_INGRESS=m + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=m +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m + +# +# Bluetooth device drivers +# +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIUART_LL=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_BT_WILINK=m +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_SPY=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=m +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +CONFIG_CFG80211_DEBUGFS=y +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_WEP=m +CONFIG_LIB80211_CRYPT_CCMP=m +CONFIG_LIB80211_CRYPT_TKIP=m +# CONFIG_LIB80211_DEBUG is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +CONFIG_MAC80211_MESH=y +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_DEBUG_MENU is not set +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_MTD is not set +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +# CONFIG_PARPORT_GSC is not set +# CONFIG_PARPORT_AX88796 is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y + +# +# Protocols +# +CONFIG_BLK_DEV=y +# CONFIG_PARIDE is not set +CONFIG_BLK_CPQ_DA=m +CONFIG_BLK_CPQ_CISS_DA=m +# CONFIG_CISS_SCSI_TAPE is not set +CONFIG_BLK_DEV_DAC960=m +CONFIG_BLK_DEV_UMEM=m +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=m +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_OSD is not set +CONFIG_BLK_DEV_SX8=m +# CONFIG_BLK_DEV_UB is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +# CONFIG_PHANTOM is not set +# CONFIG_INTEL_MID_PTI is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +CONFIG_ICS932S401=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_HP_ILO=m +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +CONFIG_DS1682=m +# CONFIG_BMP085 is not set +CONFIG_PCH_PHUB=m +CONFIG_USB_SWITCH_FSA9480=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +CONFIG_TI_ST=m +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_TGT=m +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_WAIT_SCAN=m + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_FC_TGT_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_BLK_DEV_3W_XXXX_RAID=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_3W_9XXX=m +CONFIG_SCSI_3W_SAS=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_AIC7XXX=m +CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 +CONFIG_AIC7XXX_RESET_DELAY_MS=5000 +CONFIG_AIC7XXX_DEBUG_ENABLE=y +CONFIG_AIC7XXX_DEBUG_MASK=0 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y +# CONFIG_SCSI_AIC7XXX_OLD is not set +CONFIG_SCSI_AIC79XX=m +CONFIG_AIC79XX_CMDS_PER_DEVICE=32 +CONFIG_AIC79XX_RESET_DELAY_MS=4000 +# CONFIG_AIC79XX_DEBUG_ENABLE is not set +CONFIG_AIC79XX_DEBUG_MASK=0 +# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set +CONFIG_SCSI_AIC94XX=m +# CONFIG_AIC94XX_DEBUG is not set +CONFIG_SCSI_MVSAS=m +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +# CONFIG_SCSI_MVUMI is not set +CONFIG_SCSI_DPT_I2O=m +CONFIG_SCSI_ADVANSYS=m +CONFIG_SCSI_ARCMSR=m +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS_LOGGING is not set +CONFIG_SCSI_HPTIOP=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_SCSI_DMX3191D=m +CONFIG_SCSI_FUTURE_DOMAIN=m +CONFIG_SCSI_IPS=m +CONFIG_SCSI_INITIO=m +CONFIG_SCSI_INIA100=m +CONFIG_SCSI_PPA=m +CONFIG_SCSI_IMM=m +# CONFIG_SCSI_IZIP_EPP16 is not set +# CONFIG_SCSI_IZIP_SLOW_CTR is not set +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 +CONFIG_SCSI_SYM53C8XX_MMIO=y +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_DC395x=m +CONFIG_SCSI_DC390T=m +# CONFIG_SCSI_DEBUG is not set +CONFIG_SCSI_PMCRAID=m +CONFIG_SCSI_PM8001=m +# CONFIG_SCSI_SRP is not set +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_DH=m +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_HP_SW=m +CONFIG_SCSI_DH_EMC=m +CONFIG_SCSI_DH_ALUA=m +CONFIG_SCSI_OSD_INITIATOR=m +CONFIG_SCSI_OSD_ULD=m +CONFIG_SCSI_OSD_DPRINT_SENSE=1 +# CONFIG_SCSI_OSD_DEBUG is not set +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=m +CONFIG_SATA_INIC162X=m +CONFIG_SATA_ACARD_AHCI=m +CONFIG_SATA_SIL24=m +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +CONFIG_PDC_ADMA=m +CONFIG_SATA_QSTOR=m +CONFIG_SATA_SX4=m +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +CONFIG_SATA_MV=m +CONFIG_SATA_NV=m +CONFIG_SATA_PROMISE=m +CONFIG_SATA_SIL=m +CONFIG_SATA_SIS=m +CONFIG_SATA_SVW=m +CONFIG_SATA_ULI=m +CONFIG_SATA_VIA=m +CONFIG_SATA_VITESSE=m + +# +# PATA SFF controllers with BMDMA +# +CONFIG_PATA_ALI=m +CONFIG_PATA_AMD=m +CONFIG_PATA_ARASAN_CF=m +CONFIG_PATA_ARTOP=m +CONFIG_PATA_ATIIXP=m +CONFIG_PATA_ATP867X=m +CONFIG_PATA_CMD64X=m +CONFIG_PATA_CS5520=m +CONFIG_PATA_CS5530=m +CONFIG_PATA_CS5536=m +CONFIG_PATA_CYPRESS=m +CONFIG_PATA_EFAR=m +CONFIG_PATA_HPT366=m +CONFIG_PATA_HPT37X=m +CONFIG_PATA_HPT3X2N=m +CONFIG_PATA_HPT3X3=m +# CONFIG_PATA_HPT3X3_DMA is not set +CONFIG_PATA_IT8213=m +CONFIG_PATA_IT821X=m +CONFIG_PATA_JMICRON=m +CONFIG_PATA_MARVELL=m +CONFIG_PATA_NETCELL=m +CONFIG_PATA_NINJA32=m +CONFIG_PATA_NS87415=m +CONFIG_PATA_OLDPIIX=m +CONFIG_PATA_OPTIDMA=m +CONFIG_PATA_PDC2027X=m +CONFIG_PATA_PDC_OLD=m +# CONFIG_PATA_RADISYS is not set +CONFIG_PATA_RDC=m +# CONFIG_PATA_SC1200 is not set +CONFIG_PATA_SCH=m +CONFIG_PATA_SERVERWORKS=m +CONFIG_PATA_SIL680=m +CONFIG_PATA_SIS=m +CONFIG_PATA_TOSHIBA=m +CONFIG_PATA_TRIFLEX=m +CONFIG_PATA_VIA=m +CONFIG_PATA_WINBOND=m + +# +# PIO-only SFF controllers +# +CONFIG_PATA_CMD640_PCI=m +CONFIG_PATA_MPIIX=m +CONFIG_PATA_NS87410=m +CONFIG_PATA_OPTI=m +CONFIG_PATA_RZ1000=m + +# +# Generic fallback / legacy drivers +# +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=y +# CONFIG_DM_THIN_PROVISIONING is not set +CONFIG_DM_MIRROR=y +CONFIG_DM_RAID=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_ZERO=y +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +# CONFIG_DM_FLAKEY is not set +# CONFIG_TARGET_CORE is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_FC=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=40 +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_OHCI_DEBUG=y +CONFIG_FIREWIRE_SBP2=m +# CONFIG_FIREWIRE_NET is not set +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_I2O=m +# CONFIG_I2O_LCT_NOTIFY_ON_CHANGES is not set +CONFIG_I2O_EXT_ADAPTEC=y +CONFIG_I2O_CONFIG=m +CONFIG_I2O_CONFIG_OLD_IOCTL=y +CONFIG_I2O_BUS=m +CONFIG_I2O_BLOCK=m +CONFIG_I2O_SCSI=m +CONFIG_I2O_PROC=m +CONFIG_NETDEVICES=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +CONFIG_MII=m +CONFIG_IFB=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NETPOLL_TRAP=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +# CONFIG_ARCNET is not set +CONFIG_ATM_DRIVERS=y +# CONFIG_ATM_DUMMY is not set +CONFIG_ATM_TCP=m +CONFIG_ATM_LANAI=m +CONFIG_ATM_ENI=m +# CONFIG_ATM_ENI_DEBUG is not set +# CONFIG_ATM_ENI_TUNE_BURST is not set +CONFIG_ATM_FIRESTREAM=m +# CONFIG_ATM_ZATM is not set +CONFIG_ATM_NICSTAR=m +# CONFIG_ATM_NICSTAR_USE_SUNI is not set +# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set +CONFIG_ATM_IDT77252=m +# CONFIG_ATM_IDT77252_DEBUG is not set +# CONFIG_ATM_IDT77252_RCV_ALL is not set +CONFIG_ATM_IDT77252_USE_SUNI=y +# CONFIG_ATM_AMBASSADOR is not set +# CONFIG_ATM_HORIZON is not set +# CONFIG_ATM_IA is not set +# CONFIG_ATM_FORE200E is not set +CONFIG_ATM_SOLOS=m + +# +# CAIF transport drivers +# +CONFIG_ETHERNET=y +CONFIG_MDIO=m +CONFIG_NET_VENDOR_3COM=y +CONFIG_VORTEX=m +CONFIG_TYPHOON=m +CONFIG_NET_VENDOR_ADAPTEC=y +CONFIG_ADAPTEC_STARFIRE=m +CONFIG_NET_VENDOR_ALTEON=y +CONFIG_ACENIC=m +# CONFIG_ACENIC_OMIT_TIGON_I is not set +CONFIG_NET_VENDOR_AMD=y +CONFIG_AMD8111_ETH=m +CONFIG_PCNET32=m +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_NET_VENDOR_BROADCOM=y +CONFIG_B44=m +CONFIG_B44_PCI_AUTOSELECT=y +CONFIG_B44_PCICORE_AUTOSELECT=y +CONFIG_B44_PCI=y +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_NET_VENDOR_BROCADE=y +CONFIG_BNA=m +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_TULIP=y +CONFIG_DE2104X=m +CONFIG_DE2104X_DSL=0 +CONFIG_TULIP=m +# CONFIG_TULIP_MWI is not set +CONFIG_TULIP_MMIO=y +CONFIG_TULIP_NAPI=y +CONFIG_TULIP_NAPI_HW_MITIGATION=y +CONFIG_DE4X5=m +CONFIG_WINBOND_840=m +CONFIG_DM9102=m +CONFIG_ULI526X=m +CONFIG_PCMCIA_XIRCOM=m +CONFIG_NET_VENDOR_DLINK=y +CONFIG_DE600=m +CONFIG_DE620=m +CONFIG_DL2K=m +CONFIG_SUNDANCE=m +# CONFIG_SUNDANCE_MMIO is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_NET_VENDOR_EXAR=y +CONFIG_S2IO=m +CONFIG_VXGE=m +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set +CONFIG_NET_VENDOR_HP=y +CONFIG_HP100=m +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E100=m +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_NET_VENDOR_I825XX=y +CONFIG_IP1000=m +CONFIG_JME=m +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_SKGE=m +# CONFIG_SKGE_DEBUG is not set +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_NET_VENDOR_MICREL=y +# CONFIG_KS8842 is not set +# CONFIG_KS8851_MLL is not set +CONFIG_KSZ884X_PCI=m +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_FEALNX=m +CONFIG_NET_VENDOR_NATSEMI=y +CONFIG_NATSEMI=m +CONFIG_NS83820=m +CONFIG_NET_VENDOR_8390=y +CONFIG_NE2K_PCI=m +CONFIG_NET_VENDOR_NVIDIA=y +CONFIG_FORCEDETH=m +CONFIG_NET_VENDOR_OKI=y +CONFIG_PCH_GBE=m +CONFIG_ETHOC=m +# CONFIG_NET_PACKET_ENGINE is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLGE=m +CONFIG_NETXEN_NIC=m +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +CONFIG_NET_VENDOR_RDC=y +CONFIG_R6040=m +CONFIG_NET_VENDOR_SEEQ=y +# CONFIG_SEEQ8005 is not set +CONFIG_NET_VENDOR_SILAN=y +CONFIG_SC92031=m +CONFIG_NET_VENDOR_SIS=y +CONFIG_SIS900=m +CONFIG_SIS190=m +CONFIG_SFC=m +CONFIG_NET_VENDOR_SMSC=y +CONFIG_EPIC100=m +CONFIG_SMSC9420=m +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=m +# CONFIG_STMMAC_DEBUG_FS is not set +# CONFIG_STMMAC_DA is not set +CONFIG_STMMAC_RING=y +# CONFIG_STMMAC_CHAINED is not set +CONFIG_NET_VENDOR_SUN=y +CONFIG_HAPPYMEAL=m +CONFIG_CASSINI=m +CONFIG_NIU=m +CONFIG_NET_VENDOR_TEHUTI=y +CONFIG_TEHUTI=m +CONFIG_NET_VENDOR_TI=y +CONFIG_TLAN=m +CONFIG_NET_VENDOR_VIA=y +CONFIG_VIA_RHINE=m +CONFIG_VIA_RHINE_MMIO=y +CONFIG_VIA_VELOCITY=m +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +CONFIG_MARVELL_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_LXT_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_STE10XP=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MICREL_PHY=m +CONFIG_FIXED_PHY=y +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_GPIO is not set +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +# CONFIG_SLIP is not set +CONFIG_SLHC=m +# CONFIG_TR is not set + +# +# USB Network Adapters +# +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_WLAN=y +CONFIG_LIBERTAS_THINFIRM=m +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set +CONFIG_LIBERTAS_THINFIRM_USB=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_AT76C50X_USB=m +# CONFIG_PRISM54 is not set +CONFIG_USB_ZD1201=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_RTL8180=m +CONFIG_RTL8187=m +CONFIG_RTL8187_LEDS=y +CONFIG_ADM8211=m +CONFIG_MAC80211_HWSIM=m +CONFIG_MWL8K=m +CONFIG_ATH_COMMON=m +# CONFIG_ATH_DEBUG is not set +CONFIG_ATH5K=m +CONFIG_ATH5K_DEBUG=y +# CONFIG_ATH5K_TRACER is not set +CONFIG_ATH5K_PCI=y +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_DEBUGFS=y +CONFIG_ATH9K_RATE_CONTROL=y +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +CONFIG_CARL9170=m +CONFIG_CARL9170_LEDS=y +# CONFIG_CARL9170_DEBUGFS is not set +CONFIG_CARL9170_WPC=y +# CONFIG_CARL9170_HWRNG is not set +# CONFIG_ATH6KL is not set +CONFIG_B43=m +CONFIG_B43_SSB=y +CONFIG_B43_PCI_AUTOSELECT=y +CONFIG_B43_PCICORE_AUTOSELECT=y +CONFIG_B43_SDIO=y +CONFIG_B43_PIO=y +CONFIG_B43_PHY_N=y +CONFIG_B43_PHY_LP=y +# CONFIG_B43_PHY_HT is not set +CONFIG_B43_LEDS=y +CONFIG_B43_HWRNG=y +# CONFIG_B43_DEBUG is not set +CONFIG_B43LEGACY=m +CONFIG_B43LEGACY_PCI_AUTOSELECT=y +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y +CONFIG_B43LEGACY_LEDS=y +CONFIG_B43LEGACY_HWRNG=y +# CONFIG_B43LEGACY_DEBUG is not set +CONFIG_B43LEGACY_DMA=y +CONFIG_B43LEGACY_PIO=y +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y +# CONFIG_B43LEGACY_DMA_MODE is not set +# CONFIG_B43LEGACY_PIO_MODE is not set +# CONFIG_BRCMSMAC is not set +# CONFIG_BRCMFMAC is not set +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +# CONFIG_IPW2100_DEBUG is not set +CONFIG_IPW2200=m +CONFIG_IPW2200_MONITOR=y +CONFIG_IPW2200_RADIOTAP=y +CONFIG_IPW2200_PROMISCUOUS=y +CONFIG_IPW2200_QOS=y +# CONFIG_IPW2200_DEBUG is not set +CONFIG_LIBIPW=m +# CONFIG_LIBIPW_DEBUG is not set +# CONFIG_IWLWIFI is not set +CONFIG_IWLWIFI_LEGACY=m + +# +# Debugging Options +# +# CONFIG_IWLWIFI_LEGACY_DEBUG is not set +CONFIG_IWLWIFI_LEGACY_DEBUGFS=y +# CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING is not set +CONFIG_IWL4965=m +CONFIG_IWL3945=m +# CONFIG_IWM is not set +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_USB=m +CONFIG_LIBERTAS_SDIO=m +# CONFIG_LIBERTAS_DEBUG is not set +CONFIG_LIBERTAS_MESH=y +CONFIG_HERMES=m +# CONFIG_HERMES_PRISM is not set +CONFIG_HERMES_CACHE_FW_ON_INIT=y +CONFIG_PLX_HERMES=m +CONFIG_TMD_HERMES=m +CONFIG_NORTEL_HERMES=m +CONFIG_ORINOCO_USB=m +CONFIG_P54_COMMON=m +CONFIG_P54_USB=m +CONFIG_P54_PCI=m +CONFIG_P54_LEDS=y +CONFIG_RT2X00=m +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT61PCI=m +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2500USB=m +CONFIG_RT73USB=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTL8192C_COMMON=m +CONFIG_WL1251=m +CONFIG_WL1251_SDIO=m +CONFIG_WL12XX_MENU=m +CONFIG_WL12XX=m +CONFIG_WL12XX_SDIO=m +# CONFIG_WL12XX_SDIO_TEST is not set +CONFIG_WL12XX_PLATFORM_DATA=y +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +# CONFIG_MWIFIEX_PCIE is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_VMXNET3=m +CONFIG_ISDN=y +CONFIG_ISDN_I4L=m +CONFIG_ISDN_PPP=y +CONFIG_ISDN_PPP_VJ=y +CONFIG_ISDN_MPP=y +CONFIG_IPPP_FILTER=y +CONFIG_ISDN_PPP_BSDCOMP=m +CONFIG_ISDN_AUDIO=y +CONFIG_ISDN_TTY_FAX=y + +# +# ISDN feature submodules +# +CONFIG_ISDN_DIVERSION=m + +# +# ISDN4Linux hardware drivers +# + +# +# Passive cards +# +CONFIG_ISDN_DRV_HISAX=m + +# +# D-channel protocol features +# +CONFIG_HISAX_EURO=y +CONFIG_DE_AOC=y +CONFIG_HISAX_NO_SENDCOMPLETE=y +CONFIG_HISAX_NO_LLC=y +CONFIG_HISAX_NO_KEYPAD=y +CONFIG_HISAX_1TR6=y +CONFIG_HISAX_NI1=y +CONFIG_HISAX_MAX_CARDS=8 + +# +# HiSax supported cards +# +CONFIG_HISAX_16_3=y +CONFIG_HISAX_TELESPCI=y +CONFIG_HISAX_S0BOX=y +CONFIG_HISAX_FRITZPCI=y +CONFIG_HISAX_AVM_A1_PCMCIA=y +CONFIG_HISAX_ELSA=y +CONFIG_HISAX_DIEHLDIVA=y +CONFIG_HISAX_SEDLBAUER=y +CONFIG_HISAX_NETJET=y +CONFIG_HISAX_NETJET_U=y +CONFIG_HISAX_NICCY=y +CONFIG_HISAX_BKM_A4T=y +CONFIG_HISAX_SCT_QUADRO=y +CONFIG_HISAX_GAZEL=y +CONFIG_HISAX_HFC_PCI=y +CONFIG_HISAX_W6692=y +CONFIG_HISAX_HFC_SX=y +CONFIG_HISAX_ENTERNOW_PCI=y +# CONFIG_HISAX_DEBUG is not set + +# +# HiSax PCMCIA card service modules +# + +# +# HiSax sub driver modules +# +CONFIG_HISAX_ST5481=m +CONFIG_HISAX_HFCUSB=m +CONFIG_HISAX_HFC4S8S=m +CONFIG_HISAX_FRITZ_PCIPNP=m + +# +# Active cards +# +CONFIG_ISDN_CAPI=m +CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_ISDN_CAPI_CAPI20=m +CONFIG_ISDN_CAPI_CAPIDRV=m + +# +# CAPI hardware drivers +# +CONFIG_CAPI_AVM=y +CONFIG_ISDN_DRV_AVMB1_B1PCI=m +CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y +CONFIG_ISDN_DRV_AVMB1_T1PCI=m +CONFIG_ISDN_DRV_AVMB1_C4=m +CONFIG_CAPI_EICON=y +CONFIG_ISDN_DIVAS=m +CONFIG_ISDN_DIVAS_BRIPCI=y +CONFIG_ISDN_DIVAS_PRIPCI=y +CONFIG_ISDN_DIVAS_DIVACAPI=m +CONFIG_ISDN_DIVAS_USERIDI=m +CONFIG_ISDN_DIVAS_MAINT=m +# CONFIG_ISDN_DRV_GIGASET is not set +CONFIG_HYSDN=m +CONFIG_HYSDN_CAPI=y +# CONFIG_MISDN is not set +CONFIG_ISDN_HDLC=m +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=m +CONFIG_KEYBOARD_GPIO_POLLED=m +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_PS2_TOUCHKIT=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_MPU3050 is not set +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_ROCKETPORT=m +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +CONFIG_NOZOMI=m +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +# CONFIG_STALDRV is not set + +# +# Serial drivers +# +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MFD_HSU is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_PCH_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +CONFIG_PRINTER=m +CONFIG_LP_CONSOLE=y +CONFIG_PPDEV=m +CONFIG_HVC_DRIVER=y +CONFIG_IPMI_HANDLER=m +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_NVRAM=y +CONFIG_R3964=m +# CONFIG_APPLICOM is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +# CONFIG_TCG_TPM is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m + +# +# ACPI drivers +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_INTEL_MID is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_PXA_PCI is not set +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set +# CONFIG_I2C_EG20T is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_PARPORT_LIGHT=m +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_STUB=m +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# + +# +# PTP clock support +# + +# +# Enable Device Drivers -> PPS to see the PTP clock options. +# +CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers: +# +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_IT8761E is not set +# CONFIG_GPIO_VX855 is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MCP23S08 is not set + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_W1=m +CONFIG_W1_CON=y + +# +# 1-wire Bus Masters +# +# CONFIG_W1_MASTER_MATROX is not set +CONFIG_W1_MASTER_DS2490=m +CONFIG_W1_MASTER_DS2482=m +CONFIG_W1_MASTER_DS1WM=m +# CONFIG_W1_MASTER_GPIO is not set + +# +# 1-wire Slaves +# +CONFIG_W1_SLAVE_THERM=m +CONFIG_W1_SLAVE_SMEM=m +CONFIG_W1_SLAVE_DS2408=m +CONFIG_W1_SLAVE_DS2423=m +CONFIG_W1_SLAVE_DS2431=m +CONFIG_W1_SLAVE_DS2433=m +CONFIG_W1_SLAVE_DS2433_CRC=y +CONFIG_W1_SLAVE_DS2760=m +CONFIG_W1_SLAVE_DS2780=m +CONFIG_W1_SLAVE_BQ27000=m +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2760 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_ISP1704 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_GPIO is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +# CONFIG_SENSORS_GPIO_FAN is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +# CONFIG_SENSORS_JC42 is not set +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LTC2978 is not set +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_ZL6100 is not set +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +# CONFIG_SENSORS_SMM665 is not set +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_ADS1015=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m + +# +# ACPI drivers +# +CONFIG_THERMAL=y +CONFIG_THERMAL_HWMON=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_NOWAYOUT=y + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_ALIM7101_WDT=m + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +CONFIG_SSB=m +CONFIG_SSB_SPROM=y +CONFIG_SSB_BLOCKIO=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_B43_PCI_BRIDGE=y +CONFIG_SSB_SDIOHOST_POSSIBLE=y +CONFIG_SSB_SDIOHOST=y +# CONFIG_SSB_DEBUG is not set +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +CONFIG_BCMA_POSSIBLE=y + +# +# Broadcom specific AMBA +# +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_HTC_PASIC3 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TMIO is not set +CONFIG_MFD_WM8400=m +# CONFIG_MFD_PCF50633 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_TIMBERDALE is not set +CONFIG_LPC_SCH=m +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +CONFIG_MFD_VX855=m +CONFIG_MFD_WL1273_CORE=m +# CONFIG_REGULATOR is not set +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_DEV=m +CONFIG_VIDEO_V4L2_COMMON=m +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_DVB_CORE=m +CONFIG_DVB_NET=y +CONFIG_VIDEO_MEDIA=m + +# +# Multimedia drivers +# +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_RC_CORE=m +CONFIG_LIRC=m +CONFIG_RC_MAP=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_RC5_SZ_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_LIRC_CODEC=m +# CONFIG_RC_ATI_REMOTE is not set +CONFIG_IR_IMON=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_STREAMZAP=m +CONFIG_RC_LOOPBACK=m +CONFIG_MEDIA_ATTACH=y +CONFIG_MEDIA_TUNER=m +CONFIG_MEDIA_TUNER_CUSTOMISE=y + +# +# Customize TV tuners +# +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC5000=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_VIDEO_V4L2=m +CONFIG_VIDEOBUF_GEN=m +CONFIG_VIDEOBUF_DMA_SG=m +CONFIG_VIDEOBUF_VMALLOC=m +CONFIG_VIDEOBUF_DMA_CONTIG=m +CONFIG_VIDEOBUF_DVB=m +CONFIG_VIDEO_BTCX=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_VIDEO_TUNER=m +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEO_CAPTURE_DRIVERS=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_HELPER_CHIPS_AUTO=y +CONFIG_VIDEO_IR_I2C=m + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_TVAUDIO=m +CONFIG_VIDEO_TDA7432=m +CONFIG_VIDEO_TDA9840=m +CONFIG_VIDEO_TEA6415C=m +CONFIG_VIDEO_TEA6420=m +CONFIG_VIDEO_MSP3400=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_WM8775=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_VP27SMPX=m + +# +# RDS decoders +# +CONFIG_VIDEO_SAA6588=m + +# +# Video decoders +# +CONFIG_VIDEO_ADV7180=m +CONFIG_VIDEO_BT819=m +CONFIG_VIDEO_BT856=m +CONFIG_VIDEO_BT866=m +CONFIG_VIDEO_KS0127=m +CONFIG_VIDEO_SAA7110=m +CONFIG_VIDEO_SAA711X=m +CONFIG_VIDEO_TVP5150=m +CONFIG_VIDEO_VPX3220=m + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m + +# +# MPEG video encoders +# +CONFIG_VIDEO_CX2341X=m + +# +# Video encoders +# +CONFIG_VIDEO_SAA7127=m +CONFIG_VIDEO_SAA7185=m +CONFIG_VIDEO_ADV7170=m +CONFIG_VIDEO_ADV7175=m + +# +# Camera sensor devices +# +CONFIG_VIDEO_OV7670=m +CONFIG_VIDEO_MT9V011=m + +# +# Flash devices +# + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m + +# +# Miscelaneous helper chips +# +CONFIG_VIDEO_M52790=m +# CONFIG_VIDEO_VIVI is not set +CONFIG_VIDEO_BT848=m +CONFIG_VIDEO_BT848_DVB=y +CONFIG_VIDEO_BWQCAM=m +CONFIG_VIDEO_CQCAM=m +CONFIG_VIDEO_W9966=m +CONFIG_VIDEO_CPIA2=m +CONFIG_VIDEO_ZORAN=m +CONFIG_VIDEO_ZORAN_DC30=m +CONFIG_VIDEO_ZORAN_ZR36060=m +CONFIG_VIDEO_ZORAN_BUZ=m +CONFIG_VIDEO_ZORAN_DC10=m +CONFIG_VIDEO_ZORAN_LML33=m +CONFIG_VIDEO_ZORAN_LML33R10=m +CONFIG_VIDEO_ZORAN_AVS6EYES=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_MXB=m +CONFIG_VIDEO_HEXIUM_ORION=m +CONFIG_VIDEO_HEXIUM_GEMINI=m +CONFIG_VIDEO_TIMBERDALE=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_CX88_VP3054=m +CONFIG_VIDEO_CX23885=m +# CONFIG_MEDIA_ALTERA_CI is not set +# CONFIG_VIDEO_CX25821 is not set +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX18_ALSA=m +CONFIG_VIDEO_SAA7164=m +CONFIG_SOC_CAMERA=m +CONFIG_SOC_CAMERA_IMX074=m +CONFIG_SOC_CAMERA_MT9M001=m +CONFIG_SOC_CAMERA_MT9M111=m +CONFIG_SOC_CAMERA_MT9T031=m +CONFIG_SOC_CAMERA_MT9T112=m +CONFIG_SOC_CAMERA_MT9V022=m +CONFIG_SOC_CAMERA_RJ54N1=m +CONFIG_SOC_CAMERA_TW9910=m +CONFIG_SOC_CAMERA_PLATFORM=m +CONFIG_SOC_CAMERA_OV2640=m +CONFIG_SOC_CAMERA_OV5642=m +CONFIG_SOC_CAMERA_OV6650=m +CONFIG_SOC_CAMERA_OV772X=m +CONFIG_SOC_CAMERA_OV9640=m +CONFIG_SOC_CAMERA_OV9740=m +CONFIG_V4L_USB_DRIVERS=y +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +CONFIG_USB_GSPCA=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_GL860=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_KINECT=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +# CONFIG_USB_GSPCA_TOPRO is not set +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=y +CONFIG_VIDEO_TLG2300=m +CONFIG_VIDEO_CX231XX=m +CONFIG_VIDEO_CX231XX_RC=y +CONFIG_VIDEO_CX231XX_ALSA=m +CONFIG_VIDEO_CX231XX_DVB=m +# CONFIG_VIDEO_TM6000 is not set +CONFIG_VIDEO_USBVISION=m +# CONFIG_USB_ET61X251 is not set +# CONFIG_USB_SN9C102 is not set +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y +CONFIG_USB_ZR364XX=m +CONFIG_USB_STKWEBCAM=m +CONFIG_USB_S2255=m +CONFIG_V4L_MEM2MEM_DRIVERS=y +# CONFIG_VIDEO_MEM2MEM_TESTDEV is not set +CONFIG_RADIO_ADAPTERS=y +CONFIG_RADIO_MAXIRADIO=m +CONFIG_I2C_SI4713=m +CONFIG_RADIO_SI4713=m +CONFIG_USB_DSBR=m +CONFIG_RADIO_SI470X=y +CONFIG_USB_SI470X=m +CONFIG_I2C_SI470X=m +CONFIG_USB_MR800=m +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_TEF6862 is not set +CONFIG_RADIO_WL1273=m + +# +# Texas Instruments WL128x FM driver (ST based) +# +# CONFIG_RADIO_WL128X is not set +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +CONFIG_DVB_CAPTURE_DRIVERS=y + +# +# Supported SAA7146 based PCI Adapters +# +CONFIG_TTPCI_EEPROM=m +CONFIG_DVB_AV7110=m +CONFIG_DVB_AV7110_OSD=y +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_DVB_BUDGET_PATCH=m + +# +# Supported USB Adapters +# +CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_VP7045=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +# CONFIG_DVB_USB_PCTV452E is not set +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_AF9015=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_FRIIO=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +# CONFIG_DVB_USB_IT913X is not set +# CONFIG_DVB_USB_MXL111SF is not set +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_SMS_SIANO_MDTV=m + +# +# Siano module components +# +CONFIG_SMS_USB_DRV=m +# CONFIG_SMS_SDIO_DRV is not set + +# +# Supported FlexCopII (B2C2) Adapters +# +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set + +# +# Supported BT878 Adapters +# +CONFIG_DVB_BT8XX=m + +# +# Supported Pluto2 Adapters +# +CONFIG_DVB_PLUTO2=m + +# +# Supported SDMC DM1105 Adapters +# +CONFIG_DVB_DM1105=m + +# +# Supported FireWire (IEEE 1394) Adapters +# +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y + +# +# Supported Earthsoft PT1 Adapters +# +CONFIG_DVB_PT1=m + +# +# Supported Mantis Adapters +# +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m + +# +# Supported nGene Adapters +# +CONFIG_DVB_NGENE=m + +# +# Supported ddbridge ('Octopus') Adapters +# +# CONFIG_DVB_DDBRIDGE is not set + +# +# Supported DVB Frontends +# +CONFIG_DVB_FE_CUSTOMISE=y + +# +# Customise DVB Frontends +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV6110x=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_TDA18271C2DD=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_MT312=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_TDA10071=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_SP8870=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_S5H1432=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_L64781=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_MT352=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_DIB9000=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_AF9013=m +CONFIG_DVB_EC100=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_CXD2820R=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_VES1820=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_STV0297=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_AU8522=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_S921=m +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_A8293=m +CONFIG_DVB_LGS8GL5=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_IT913X_FE=m + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_DRM=m +CONFIG_DRM_TTM=m +CONFIG_DRM_TDFX=m +CONFIG_DRM_R128=m +CONFIG_DRM_MGA=m +CONFIG_DRM_VIA=m +CONFIG_DRM_SAVAGE=m +CONFIG_DRM_VMWGFX=m +CONFIG_STUB_POULSBO=m +CONFIG_VGASTATE=m +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DDC=m +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_WMT_GE_ROPS is not set +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_SVGALIB=m +# CONFIG_FB_MACMODES is not set +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +CONFIG_FB_CIRRUS=m +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +# CONFIG_FB_S1D13XXX is not set +CONFIG_FB_NVIDIA=m +CONFIG_FB_NVIDIA_I2C=y +# CONFIG_FB_NVIDIA_DEBUG is not set +CONFIG_FB_NVIDIA_BACKLIGHT=y +CONFIG_FB_RIVA=m +# CONFIG_FB_RIVA_I2C is not set +# CONFIG_FB_RIVA_DEBUG is not set +CONFIG_FB_RIVA_BACKLIGHT=y +CONFIG_FB_MATROX=m +CONFIG_FB_MATROX_MILLENIUM=y +CONFIG_FB_MATROX_MYSTIQUE=y +CONFIG_FB_MATROX_G=y +CONFIG_FB_MATROX_I2C=m +CONFIG_FB_MATROX_MAVEN=m +CONFIG_FB_ATY128=m +CONFIG_FB_ATY128_BACKLIGHT=y +CONFIG_FB_ATY=m +CONFIG_FB_ATY_CT=y +CONFIG_FB_ATY_GENERIC_LCD=y +CONFIG_FB_ATY_GX=y +CONFIG_FB_ATY_BACKLIGHT=y +CONFIG_FB_S3=m +CONFIG_FB_S3_DDC=y +# CONFIG_FB_SIS is not set +CONFIG_FB_NEOMAGIC=m +CONFIG_FB_KYRO=m +CONFIG_FB_3DFX=m +CONFIG_FB_3DFX_ACCEL=y +CONFIG_FB_3DFX_I2C=y +CONFIG_FB_VOODOO1=m +# CONFIG_FB_VT8623 is not set +CONFIG_FB_TRIDENT=m +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_TMIO is not set +CONFIG_FB_SM501=m +# CONFIG_FB_SMSCUFX is not set +CONFIG_FB_UDL=m +CONFIG_FB_VIRTUAL=m +CONFIG_FB_METRONOME=m +CONFIG_FB_MB862XX=m +CONFIG_FB_MB862XX_PCI_GDC=y +CONFIG_FB_MB862XX_I2C=y +# CONFIG_FB_BROADSHEET is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set + +# +# Display device support +# +CONFIG_DISPLAY_SUPPORT=m + +# +# Display hardware drivers +# + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_HRTIMER=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_DYNAMIC_MINORS=y +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_VERBOSE_PROCFS=y +CONFIG_SND_VERBOSE_PRINTK=y +CONFIG_SND_DEBUG=y +# CONFIG_SND_DEBUG_VERBOSE is not set +CONFIG_SND_PCM_XRUN_DEBUG=y +CONFIG_SND_VMASTER=y +CONFIG_SND_RAWMIDI_SEQ=m +CONFIG_SND_OPL3_LIB_SEQ=m +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +CONFIG_SND_EMU10K1_SEQ=m +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MTS64=m +CONFIG_SND_SERIAL_U16550=m +CONFIG_SND_MPU401=m +CONFIG_SND_PORTMAN2X4=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0 +CONFIG_SND_TEA575X=m +CONFIG_SND_PCI=y +CONFIG_SND_AD1889=m +CONFIG_SND_ALS300=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set +CONFIG_SND_AZT3328=m +CONFIG_SND_BT87X=m +# CONFIG_SND_BT87X_OVERCLOCK is not set +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS4281=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y +CONFIG_SND_CS5535AUDIO=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_EMU10K1=m +CONFIG_SND_EMU10K1X=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_ES1938=m +CONFIG_SND_ES1968=m +CONFIG_SND_ES1968_INPUT=y +CONFIG_SND_ES1968_RADIO=y +CONFIG_SND_FM801=m +CONFIG_SND_FM801_TEA575X_BOOL=y +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_PREALLOC_SIZE=64 +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=1 +CONFIG_SND_HDA_INPUT_JACK=y +# CONFIG_SND_HDA_PATCH_LOADER is not set +CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS=y +CONFIG_SND_HDA_CODEC_ANALOG=y +CONFIG_SND_HDA_CODEC_SIGMATEL=y +CONFIG_SND_HDA_CODEC_VIA=y +CONFIG_SND_HDA_CODEC_HDMI=y +CONFIG_SND_HDA_CODEC_CIRRUS=y +CONFIG_SND_HDA_CODEC_CONEXANT=y +CONFIG_SND_HDA_CODEC_CA0110=y +CONFIG_SND_HDA_CODEC_CA0132=y +CONFIG_SND_HDA_CODEC_CMEDIA=y +CONFIG_SND_HDA_CODEC_SI3054=y +CONFIG_SND_HDA_GENERIC=y +# CONFIG_SND_HDA_POWER_SAVE is not set +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1712=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MAESTRO3=m +CONFIG_SND_MAESTRO3_INPUT=y +CONFIG_SND_MIXART=m +CONFIG_SND_NM256=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RIPTIDE=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_SONICVIBES=m +CONFIG_SND_TRIDENT=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_YMFPCI=m +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m +CONFIG_SND_FIREWIRE_SPEAKERS=m +# CONFIG_SND_ISIGHT is not set +# CONFIG_SOUND_PRIME is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HIDRAW=y + +# +# USB Input Devices +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +# CONFIG_HID_PRODIKEYS is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +CONFIG_HID_EZKEY=y +# CONFIG_HID_HOLTEK is not set +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=y +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=m +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +# CONFIG_HID_MAGICMOUSE is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTRIG is not set +CONFIG_HID_ORTEK=m +# CONFIG_HID_PANTHERLORD is not set +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_QUANTA is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_ROCCAT_COMMON=m +CONFIG_HID_ROCCAT_ARVO=m +CONFIG_HID_ROCCAT_KONE=m +CONFIG_HID_ROCCAT_KONEPLUS=m +CONFIG_HID_ROCCAT_KOVAPLUS=m +CONFIG_HID_ROCCAT_PYRA=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SONY is not set +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_SUNPLUS=m +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +CONFIG_HID_TOPSEED=m +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +# CONFIG_HID_ZEROPLUS is not set +CONFIG_HID_ZYDACRON=m +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB_ARCH_HAS_OHCI=y +CONFIG_USB_ARCH_HAS_EHCI=y +CONFIG_USB_ARCH_HAS_XHCI=y +CONFIG_USB=y +# CONFIG_USB_DEBUG is not set +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEVICEFS=y +# CONFIG_USB_DEVICE_CLASS is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +CONFIG_USB_SUSPEND=y +# CONFIG_USB_OTG is not set +# CONFIG_USB_DWC3 is not set +CONFIG_USB_MON=m +CONFIG_USB_WUSB=m +CONFIG_USB_WUSB_CBAF=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=m +# CONFIG_USB_XHCI_HCD_DEBUGGING is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1760_HCD is not set +CONFIG_USB_ISP1362_HCD=m +CONFIG_USB_OHCI_HCD=y +# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set +# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_U132_HCD is not set +CONFIG_USB_SL811_HCD=m +CONFIG_USB_SL811_HCD_ISO=y +# CONFIG_USB_R8A66597_HCD is not set +CONFIG_USB_WHCI_HCD=m +CONFIG_USB_HWA_HCD=m + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m +# CONFIG_USB_LIBUSUAL is not set + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m + +# +# USB port drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_SERIAL=m +CONFIG_USB_EZUSB=y +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +# CONFIG_USB_SERIAL_EMPEG is not set +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_FUNSOFT=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_GARMIN is not set +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +# CONFIG_USB_SERIAL_KEYSPAN is not set +CONFIG_USB_SERIAL_KLSI=m +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MOTOROLA=m +# CONFIG_USB_SERIAL_NAVMAN is not set +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +# CONFIG_USB_SERIAL_HP4X is not set +# CONFIG_USB_SERIAL_SAFE is not set +CONFIG_USB_SERIAL_SIEMENS_MPI=m +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +# CONFIG_USB_SERIAL_SYMBOL is not set +# CONFIG_USB_SERIAL_TI is not set +# CONFIG_USB_SERIAL_CYBERJACK is not set +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +# CONFIG_USB_SERIAL_OMNINET is not set +# CONFIG_USB_SERIAL_OPTICON is not set +# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set +# CONFIG_USB_SERIAL_ZIO is not set +CONFIG_USB_SERIAL_SSU100=m +# CONFIG_USB_SERIAL_DEBUG is not set + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +CONFIG_USB_LCD=m +CONFIG_USB_LED=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +CONFIG_USB_FTDI_ELAN=m +# CONFIG_USB_APPLEDISPLAY is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SISUSBVGA_CON=y +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_ISIGHTFW is not set +CONFIG_USB_YUREX=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m +# CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +CONFIG_USB_OTG_UTILS=y +# CONFIG_USB_GPIO_VBUS is not set +CONFIG_NOP_USB_XCEIV=m +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_WHCI=m +CONFIG_UWB_I1480U=m +CONFIG_MMC=m +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_UNSAFE_RESUME is not set +# CONFIG_MMC_CLKGATE is not set + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_MMC_BLOCK_BOUNCE=y +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_LP5521=m +CONFIG_LEDS_LP5523=m +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set + +# +# Reporting subsystems +# +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV3029C2=m + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_V3020=m + +# +# on-CPU RTC drivers +# +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_TIMB_DMA=m +CONFIG_DMA_ENGINE=y + +# +# DMA Clients +# +CONFIG_NET_DMA=y +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +CONFIG_VIRTIO=m +CONFIG_VIRTIO_RING=m + +# +# Virtio drivers +# +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_BALLOON=m +# CONFIG_VIRTIO_MMIO is not set + +# +# Xen driver support +# +CONFIG_STAGING=y +CONFIG_ET131X=m +# CONFIG_USBIP_CORE is not set +# CONFIG_W35UND is not set +# CONFIG_PRISM2_USB is not set +# CONFIG_ECHO is not set +# CONFIG_ASUS_OLED is not set +# CONFIG_PANEL is not set +# CONFIG_R8187SE is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTL8192E is not set +# CONFIG_R8712U is not set +# CONFIG_RTS_PSTOR is not set +# CONFIG_RTS5139 is not set +# CONFIG_TRANZPORT is not set +# CONFIG_POHMELFS is not set +# CONFIG_IDE_PHISON is not set +# CONFIG_LINE6_USB is not set + +# +# I2C encoder or helper chips +# +# CONFIG_USB_SERIAL_QUATECH2 is not set +# CONFIG_USB_SERIAL_QUATECH_USB2 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set +# CONFIG_VME_BUS is not set +# CONFIG_DX_SEP is not set +# CONFIG_IIO is not set +# CONFIG_XVMALLOC is not set +# CONFIG_ZRAM is not set +# CONFIG_ZCACHE is not set +# CONFIG_FB_SM7XX is not set +# CONFIG_CRYSTALHD is not set +# CONFIG_FB_XGI is not set +# CONFIG_USB_ENESTORAGE is not set +# CONFIG_BCM_WIMAX is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Hardware Spinlock drivers +# +CONFIG_IOMMU_SUPPORT=y +CONFIG_VIRT_DRIVERS=y +# CONFIG_PM_DEVFREQ is not set + +# +# Firmware Drivers +# + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_NILFS2_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_GENERIC_ACL=y + +# +# Caches +# +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_CONFIGFS_FS=m +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EXOFS_FS is not set +CONFIG_ORE=m +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_OBJLAYOUT=m +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +# CONFIG_NFS_USE_NEW_IDMAPPER is not set +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=m +CONFIG_CIFS_STATS=y +# CONFIG_CIFS_STATS2 is not set +# CONFIG_CIFS_WEAK_PW_HASH is not set +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_FSCACHE=y +CONFIG_CIFS_ACL=y +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf-8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_PRINTK_TIME=y +CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 +# CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_MAGIC_SYSRQ=y +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_LOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +# CONFIG_DETECT_HUNG_TASK is not set +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_LIST=y +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +CONFIG_BOOT_PRINTK_DELAY=y +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_SYSCTL_SYSCALL_CHECK=y +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_FIREWIRE_OHCI_REMOTE_DMA is not set +# CONFIG_BUILD_DOCSRC is not set +CONFIG_DYNAMIC_DEBUG=y +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +CONFIG_STRICT_DEVMEM=y + +# +# Security options +# + +# +# Grsecurity +# +CONFIG_GRKERNSEC=y +# CONFIG_GRKERNSEC_LOW is not set +# CONFIG_GRKERNSEC_MEDIUM is not set +CONFIG_GRKERNSEC_HIGH=y +# CONFIG_GRKERNSEC_CUSTOM is not set + +# +# Memory Protections +# +CONFIG_GRKERNSEC_KMEM=y +CONFIG_GRKERNSEC_PROC_MEMMAP=y +CONFIG_GRKERNSEC_BRUTE=y +CONFIG_GRKERNSEC_MODHARDEN=y +CONFIG_GRKERNSEC_HIDESYM=y +CONFIG_GRKERNSEC_KERN_LOCKOUT=y + +# +# Role Based Access Control Options +# +CONFIG_GRKERNSEC_NO_RBAC=y +# CONFIG_GRKERNSEC_ACL_HIDEKERN is not set +CONFIG_GRKERNSEC_ACL_MAXTRIES=3 +CONFIG_GRKERNSEC_ACL_TIMEOUT=30 + +# +# Filesystem Protections +# +CONFIG_GRKERNSEC_PROC=y +# CONFIG_GRKERNSEC_PROC_USER is not set +CONFIG_GRKERNSEC_PROC_USERGROUP=y +CONFIG_GRKERNSEC_PROC_GID=10 +CONFIG_GRKERNSEC_PROC_ADD=y +CONFIG_GRKERNSEC_LINK=y +CONFIG_GRKERNSEC_FIFO=y +CONFIG_GRKERNSEC_SYSFS_RESTRICT=y +# CONFIG_GRKERNSEC_ROFS is not set +CONFIG_GRKERNSEC_CHROOT=y +CONFIG_GRKERNSEC_CHROOT_MOUNT=y +CONFIG_GRKERNSEC_CHROOT_DOUBLE=y +CONFIG_GRKERNSEC_CHROOT_PIVOT=y +CONFIG_GRKERNSEC_CHROOT_CHDIR=y +CONFIG_GRKERNSEC_CHROOT_CHMOD=y +CONFIG_GRKERNSEC_CHROOT_FCHDIR=y +CONFIG_GRKERNSEC_CHROOT_MKNOD=y +CONFIG_GRKERNSEC_CHROOT_SHMAT=y +CONFIG_GRKERNSEC_CHROOT_UNIX=y +CONFIG_GRKERNSEC_CHROOT_FINDTASK=y +CONFIG_GRKERNSEC_CHROOT_NICE=y +CONFIG_GRKERNSEC_CHROOT_SYSCTL=y +CONFIG_GRKERNSEC_CHROOT_CAPS=y + +# +# Kernel Auditing +# +# CONFIG_GRKERNSEC_AUDIT_GROUP is not set +# CONFIG_GRKERNSEC_EXECLOG is not set +CONFIG_GRKERNSEC_RESLOG=y +# CONFIG_GRKERNSEC_CHROOT_EXECLOG is not set +# CONFIG_GRKERNSEC_AUDIT_PTRACE is not set +# CONFIG_GRKERNSEC_AUDIT_CHDIR is not set +CONFIG_GRKERNSEC_AUDIT_MOUNT=y +CONFIG_GRKERNSEC_SIGNAL=y +CONFIG_GRKERNSEC_FORKFAIL=y +CONFIG_GRKERNSEC_TIME=y +CONFIG_GRKERNSEC_PROC_IPADDR=y +CONFIG_GRKERNSEC_RWXMAP_LOG=y +CONFIG_GRKERNSEC_AUDIT_TEXTREL=y + +# +# Executable Protections +# +CONFIG_GRKERNSEC_DMESG=y +CONFIG_GRKERNSEC_HARDEN_PTRACE=y +CONFIG_GRKERNSEC_PTRACE_READEXEC=y +CONFIG_GRKERNSEC_SETXID=y +# CONFIG_GRKERNSEC_TPE is not set + +# +# Network Protections +# +CONFIG_GRKERNSEC_RANDNET=y +CONFIG_GRKERNSEC_BLACKHOLE=y +# CONFIG_GRKERNSEC_SOCKET is not set + +# +# Sysctl support +# +CONFIG_GRKERNSEC_SYSCTL=y +CONFIG_GRKERNSEC_SYSCTL_ON=y + +# +# Logging Options +# +CONFIG_GRKERNSEC_FLOODTIME=10 +CONFIG_GRKERNSEC_FLOODBURST=6 + +# +# PaX +# +CONFIG_PAX=y + +# +# PaX Control +# +# CONFIG_PAX_SOFTMODE is not set +CONFIG_PAX_EI_PAX=y +CONFIG_PAX_PT_PAX_FLAGS=y +# CONFIG_PAX_XATTR_PAX_FLAGS is not set +# CONFIG_PAX_NO_ACL_FLAGS is not set +CONFIG_PAX_HAVE_ACL_FLAGS=y +# CONFIG_PAX_HOOK_ACL_FLAGS is not set + +# +# Non-executable pages +# +CONFIG_PAX_NOEXEC=y +CONFIG_PAX_PAGEEXEC=y +CONFIG_PAX_MPROTECT=y +# CONFIG_PAX_MPROTECT_COMPAT is not set +CONFIG_PAX_ELFRELOCS=y +CONFIG_PAX_KERNEXEC_PLUGIN_METHOD="" + +# +# Address Space Layout Randomization +# +CONFIG_PAX_ASLR=y +CONFIG_PAX_RANDUSTACK=y +CONFIG_PAX_RANDMMAP=y + +# +# Miscellaneous hardening features +# +CONFIG_PAX_USERCOPY=y +# CONFIG_PAX_SIZE_OVERFLOW is not set +CONFIG_KEYS=y +# CONFIG_ENCRYPTED_KEYS is not set +CONFIG_KEYS_DEBUG_PROC_KEYS=y +CONFIG_SECURITY_DMESG_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_IMA is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=m +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=m +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_SEQIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_GHASH=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=m +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_ZLIB=m +CONFIG_CRYPTO_LZO=m + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=m +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_NLATTR=y +CONFIG_AVERAGE=y +CONFIG_CORDIC=m diff --git a/kernel/config-i686-default b/kernel/config-i686-default new file mode 100644 index 0000000..5a4ba8f --- /dev/null +++ b/kernel/config-i686-default @@ -0,0 +1,298 @@ +# CONFIG_64BIT is not set +CONFIG_X86_32=y +# CONFIG_X86_64 is not set +CONFIG_OUTPUT_FORMAT="elf32-i386" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig" +# CONFIG_GENERIC_TIME_VSYSCALL is not set +# CONFIG_ZONE_DMA32 is not set +# CONFIG_AUDIT_ARCH is not set +CONFIG_X86_32_SMP=y +CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx" +CONFIG_KTIME_SCALAR=y + +# +# RCU Subsystem +# +CONFIG_RCU_FANOUT=32 + +# +# GCOV-based kernel profiling +# +CONFIG_LBDAF=y + +# +# Processor type and features +# +CONFIG_X86_BIGSMP=y +# CONFIG_X86_WANT_INTEL_MID is not set +# CONFIG_X86_RDC321X is not set +CONFIG_X86_32_NON_STANDARD=y +# CONFIG_X86_NUMAQ is not set +# CONFIG_X86_SUMMIT is not set +# CONFIG_X86_ES7000 is not set +CONFIG_X86_32_IRIS=m +# CONFIG_LGUEST_GUEST is not set +CONFIG_X86_CYCLONE_TIMER=y +# CONFIG_M386 is not set +# CONFIG_M486 is not set +# CONFIG_M586 is not set +# CONFIG_M586TSC is not set +# CONFIG_M586MMX is not set +CONFIG_M686=y +# CONFIG_MPENTIUMII is not set +# CONFIG_MPENTIUMIII is not set +# CONFIG_MPENTIUMM is not set +# CONFIG_MPENTIUM4 is not set +# CONFIG_MK6 is not set +# CONFIG_MK7 is not set +# CONFIG_MCRUSOE is not set +# CONFIG_MEFFICEON is not set +# CONFIG_MWINCHIPC6 is not set +# CONFIG_MWINCHIP3D is not set +# CONFIG_MELAN is not set +# CONFIG_MGEODEGX1 is not set +# CONFIG_MGEODE_LX is not set +# CONFIG_MCYRIXIII is not set +# CONFIG_MVIAC3_2 is not set +# CONFIG_MVIAC7 is not set +CONFIG_X86_GENERIC=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +# CONFIG_X86_PPRO_FENCE is not set +CONFIG_X86_INVLPG=y +CONFIG_X86_BSWAP=y +CONFIG_X86_POPAD_OK=y +CONFIG_X86_ALIGNMENT_16=y +CONFIG_X86_INTEL_USERCOPY=y +CONFIG_X86_USE_PPRO_CHECKSUM=y +CONFIG_X86_MINIMUM_CPU_FAMILY=5 +CONFIG_CPU_SUP_CYRIX_32=y +CONFIG_CPU_SUP_TRANSMETA_32=y +CONFIG_CPU_SUP_UMC_32=y +CONFIG_NR_CPUS=32 +# CONFIG_X86_ANCIENT_MCE is not set +CONFIG_VM86=y +CONFIG_TOSHIBA=m +# CONFIG_X86_REBOOTFIXUPS is not set +# CONFIG_NOHIGHMEM is not set +# CONFIG_HIGHMEM4G is not set +CONFIG_HIGHMEM64G=y +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_HIGHMEM=y +CONFIG_X86_PAE=y +# CONFIG_NUMA is not set +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ILLEGAL_POINTER_VALUE=0 +CONFIG_FLATMEM_MANUAL=y +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_SPARSEMEM_STATIC=y +CONFIG_HIGHPTE=y +# CONFIG_MATH_EMULATION is not set +# CONFIG_KEXEC_JUMP is not set +CONFIG_PHYSICAL_START=0x400000 +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x400000 + +# +# Power management and ACPI options +# +CONFIG_ACPI_BLACKLIST_YEAR=1999 +CONFIG_X86_APM_BOOT=y +CONFIG_APM=y +# CONFIG_APM_IGNORE_USER_SUSPEND is not set +# CONFIG_APM_DO_ENABLE is not set +CONFIG_APM_CPU_IDLE=y +# CONFIG_APM_DISPLAY_BLANK is not set +# CONFIG_APM_ALLOW_INTS is not set + +# +# x86 CPU frequency scaling drivers +# +# CONFIG_X86_POWERNOW_K6 is not set +CONFIG_X86_POWERNOW_K7=y +CONFIG_X86_POWERNOW_K7_ACPI=y +# CONFIG_X86_GX_SUSPMOD is not set +CONFIG_X86_SPEEDSTEP_ICH=y +CONFIG_X86_SPEEDSTEP_SMI=y +# CONFIG_X86_CPUFREQ_NFORCE2 is not set +CONFIG_X86_LONGRUN=y +# CONFIG_X86_LONGHAUL is not set +# CONFIG_X86_E_POWERSAVER is not set + +# +# shared options +# +# CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set + +# +# Bus options (PCI etc.) +# +# CONFIG_PCI_GOBIOS is not set +# CONFIG_PCI_GOMMCONFIG is not set +# CONFIG_PCI_GODIRECT is not set +CONFIG_PCI_GOANY=y +CONFIG_PCI_BIOS=y +# CONFIG_ISA is not set +# CONFIG_MCA is not set +# CONFIG_SCx200 is not set +# CONFIG_ALIX is not set + +# +# PC-card bridges +# +# CONFIG_HOTPLUG_PCI_COMPAQ is not set +# CONFIG_HOTPLUG_PCI_IBM is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# Protocols +# +CONFIG_IBM_ASM=m +# CONFIG_SGI_IOC4 is not set +CONFIG_CS5535_MFGPT=m +CONFIG_CS5535_MFGPT_DEFAULT_IRQ=7 +CONFIG_CS5535_CLOCK_EVENT_SRC=m + +# +# SCSI Transports +# +# CONFIG_SCSI_FLASHPOINT is not set +# CONFIG_SCSI_NSP32 is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_CS5535 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_WISTRON_BTNS is not set + +# +# Non-8250 serial port support +# +CONFIG_HW_RANDOM_GEODE=m +CONFIG_SONYPI=m +CONFIG_PC8736x_GPIO=m +CONFIG_NSC_GPIO=m + +# +# PC SMBus host controller drivers +# +CONFIG_I2C_ALI1535=m +CONFIG_I2C_ALI1563=m +CONFIG_I2C_ALI15X3=m +CONFIG_I2C_SIS5595=m +CONFIG_I2C_SIS630=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_SCx200_ACB=m + +# +# PCI GPIO expanders: +# +# CONFIG_GPIO_LANGWELL is not set + +# +# Watchdog Device Drivers +# +CONFIG_GEODE_WDT=m +# CONFIG_SBC7240_WDT is not set + +# +# Customize TV tuners +# +CONFIG_VIDEOBUF2_DMA_CONTIG=m + +# +# Miscelaneous helper chips +# +CONFIG_VIDEO_CAFE_CCIC=m + +# +# Graphics support +# +# CONFIG_AGP_ALI is not set +# CONFIG_AGP_ATI is not set +# CONFIG_AGP_AMD is not set +# CONFIG_AGP_NVIDIA is not set +# CONFIG_AGP_SWORKS is not set +# CONFIG_AGP_EFFICEON is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_I810 is not set + +# +# Console display driver support +# +# CONFIG_SND_SIS7019 is not set + +# +# Reporting subsystems +# +# CONFIG_EDAC_AMD76X is not set +# CONFIG_EDAC_E7XXX is not set +# CONFIG_EDAC_I82875P is not set +# CONFIG_EDAC_I82860 is not set +# CONFIG_EDAC_R82600 is not set + +# +# Speakup console speech +# +CONFIG_TC1100_WMI=m + +# +# Hardware Spinlock drivers +# +CONFIG_CLKSRC_I8253=y + +# +# Kernel hacking +# +# CONFIG_DEBUG_HIGHMEM is not set +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set +CONFIG_DOUBLEFAULT=y + +# +# Memory Protections +# +CONFIG_GRKERNSEC_VM86=y + +# +# PaX +# +CONFIG_ARCH_TRACK_EXEC_LIMIT=y + +# +# Non-executable pages +# +CONFIG_PAX_SEGMEXEC=y + +# +# Miscellaneous hardening features +# +# CONFIG_INTEL_TXT is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES_586=y +# CONFIG_CRYPTO_SALSA20_586 is not set +CONFIG_CRYPTO_TWOFISH_586=m + +# +# Random Number Generation +# +CONFIG_CRYPTO_DEV_GEODE=m +# CONFIG_LGUEST is not set + +# +# Library routines +# +CONFIG_AUDIT_GENERIC=y diff --git a/kernel/config-i686-legacy b/kernel/config-i686-legacy new file mode 100644 index 0000000..6f03829 --- /dev/null +++ b/kernel/config-i686-legacy @@ -0,0 +1,118 @@ + +# +# Processor type and features +# +# CONFIG_XEN_PRIVILEGED_GUEST is not set +# CONFIG_IOMMU_HELPER is not set +CONFIG_HIGHMEM4G=y +# CONFIG_HIGHMEM64G is not set +# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set +# CONFIG_ARCH_DMA_ADDR_T_64BIT is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set + +# +# Bus options (PCI etc.) +# +# CONFIG_PCI_GOOLPC is not set +CONFIG_PCI_OLPC=y +CONFIG_OLPC=y +CONFIG_OLPC_XO1_PM=y +CONFIG_OLPC_XO1_RTC=y +CONFIG_OLPC_XO1_SCI=y +CONFIG_OLPC_XO15_SCI=y + +# +# Generic Driver Options +# +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_OF=y + +# +# Device Tree and Open Firmware support +# +CONFIG_PROC_DEVICETREE=y +CONFIG_OF_PROMTREE=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_DEVICE=y +CONFIG_OF_GPIO=y +CONFIG_OF_I2C=m +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y + +# +# Input Device Drivers +# +CONFIG_MOUSE_PS2_OLPC=y + +# +# Serial drivers +# +# CONFIG_SERIAL_8250_DW is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_OF_PLATFORM=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +CONFIG_I2C_PXA=m +CONFIG_I2C_PXA_PCI=y + +# +# PCI GPIO expanders: +# +CONFIG_GPIO_CS5535=y + +# +# 1-wire Slaves +# +CONFIG_BATTERY_OLPC=y + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y + +# +# Graphics support +# +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +CONFIG_FB_SYS_FOPS=m + +# +# LED drivers +# +# CONFIG_LEDS_NET5501 is not set + +# +# Virtio drivers +# +# CONFIG_FB_OLPC_DCON is not set + +# +# Speakup console speech +# +CONFIG_XO1_RFKILL=m + +# +# Non-executable pages +# +CONFIG_PAX_KERNEXEC=y +CONFIG_PAX_KERNEXEC_MODULE_TEXT=4 + +# +# Miscellaneous hardening features +# +CONFIG_PAX_MEMORY_UDEREF=y + +# +# Random Number Generation +# +# CONFIG_CRYPTO_DEV_HIFN_795X is not set diff --git a/kernel/config-x86-generic b/kernel/config-x86-generic new file mode 100644 index 0000000..224fd52 --- /dev/null +++ b/kernel/config-x86-generic @@ -0,0 +1,833 @@ +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_ZONE_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +# CONFIG_RWSEM_GENERIC_SPINLOCK is not set +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_DEFAULT_IDLE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_X86_HT=y +CONFIG_ARCH_CPU_PROBE_RELEASE=y + +# +# General setup +# +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_BZIP2 is not set +CONFIG_KERNEL_XZ=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_FANOUT_EXACT is not set +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_PCSPKR_PLATFORM=y + +# +# Kernel Performance Events And Counters +# +CONFIG_JUMP_LABEL=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_USE_GENERIC_SMP_HELPERS=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y + +# +# GCOV-based kernel profiling +# +CONFIG_STOP_MACHINE=y + +# +# IO Schedulers +# +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_MUTEX_SPIN_ON_OWNER=y + +# +# Processor type and features +# +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_SMP=y +CONFIG_X86_MPPARSE=y +CONFIG_X86_EXTENDED_PLATFORM=y +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_PARAVIRT_GUEST=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_XEN=y +CONFIG_XEN_DOM0=y +CONFIG_XEN_PRIVILEGED_GUEST=y +CONFIG_XEN_PVHVM=y +CONFIG_XEN_MAX_DOMAIN_MEMORY=128 +CONFIG_XEN_SAVE_RESTORE=y +CONFIG_XEN_DEBUG_FS=y +CONFIG_KVM_CLOCK=y +CONFIG_KVM_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_NO_BOOTMEM=y +# CONFIG_MEMTEST is not set +# CONFIG_MK8 is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_X86_CMPXCHG=y +CONFIG_CMPXCHG_LOCAL=y +CONFIG_CMPXCHG_DOUBLE=y +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_XADD=y +CONFIG_X86_WP_WORKS_OK=y +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +CONFIG_SWIOTLB=y +CONFIG_IOMMU_HELPER=y +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +# CONFIG_X86_MCE_INJECT is not set +CONFIG_X86_THERMAL_VECTOR=y +CONFIG_I8K=m +CONFIG_MICROCODE=m +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_ZONE_DMA_FLAG=1 +CONFIG_MMU_NOTIFIER=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +# CONFIG_HWPOISON_INJECT is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_EFI=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +CONFIG_HZ_300=y +# CONFIG_HZ_1000 is not set +CONFIG_HZ=300 +CONFIG_SCHED_HRTICK=y +CONFIG_RELOCATABLE=y +CONFIG_HOTPLUG_CPU=y +# CONFIG_CMDLINE_BOOL is not set +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y + +# +# Power management and ACPI options +# +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP_SMP=y +CONFIG_ACPI=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_PROCFS=y +# CONFIG_ACPI_PROCFS_POWER is not set +CONFIG_ACPI_EC_DEBUGFS=m +# CONFIG_ACPI_PROC_EVENT is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +# CONFIG_ACPI_CUSTOM_DSDT is not set +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_X86_PM_TIMER=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +# CONFIG_ACPI_APEI_EINJ is not set +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +CONFIG_SFI=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=m +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m + +# +# x86 CPU frequency scaling drivers +# +CONFIG_X86_PCC_CPUFREQ=y +CONFIG_X86_ACPI_CPUFREQ=y +CONFIG_X86_POWERNOW_K8=y +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=y + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_INTEL_IDLE=y + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_PCI_DOMAINS=y +# CONFIG_PCI_CNB20LE_QUIRK is not set +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIE_ECRC=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIE_PME=y +CONFIG_ARCH_SUPPORTS_MSI=y +CONFIG_PCI_MSI=y +CONFIG_XEN_PCIDEV_FRONTEND=m +CONFIG_HT_IRQ=y +CONFIG_PCI_IOAPIC=y +CONFIG_PCI_LABEL=y +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y + +# +# PC-card bridges +# +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_FAKE=m +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_RAPIDIO is not set + +# +# Classification +# +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y + +# +# Generic Driver Options +# +CONFIG_SYS_HYPERVISOR=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV_FD=m +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_XEN_BLKDEV_BACKEND=m +# CONFIG_BLK_DEV_HD is not set +CONFIG_SENSORS_LIS3LV02D=m +CONFIG_VMWARE_BALLOON=m + +# +# SCSI Transports +# +CONFIG_SCSI_ACARD=m +CONFIG_SCSI_BUSLOGIC=m +CONFIG_VMWARE_PVSCSI=m +CONFIG_FCOE_FNIC=m +CONFIG_SCSI_EATA=m +CONFIG_SCSI_EATA_TAGGED_QUEUE=y +# CONFIG_SCSI_EATA_LINKED_COMMANDS is not set +CONFIG_SCSI_EATA_MAX_TAGS=16 +CONFIG_SCSI_GDTH=m +CONFIG_SCSI_ISCI=m +CONFIG_ATA_ACPI=y + +# +# Generic fallback / legacy drivers +# +CONFIG_PATA_ACPI=m +# CONFIG_MULTICORE_RAID456 is not set + +# +# IEEE 1394 (FireWire) support +# +CONFIG_I2O_EXT_ADAPTEC_DMA64=y +CONFIG_MACINTOSH_DRIVERS=y +# CONFIG_MAC_EMUMOUSEBTN is not set +CONFIG_SUNGEM_PHY=m +CONFIG_ATM_HE=m +# CONFIG_ATM_HE_USE_SUNI is not set + +# +# CAIF transport drivers +# +CONFIG_IGB_DCA=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBEVF=m +# CONFIG_ZNET is not set +CONFIG_MLX4_EN=m +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MYRI10GE_DCA=y +CONFIG_ATP=m +CONFIG_SUNGEM=m +# CONFIG_NET_SB1000 is not set + +# +# USB Network Adapters +# +CONFIG_AIRO=m + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_XEN_NETDEV_BACKEND=m + +# +# Input Device Drivers +# +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_INPUT_PCSPKR=m +CONFIG_INPUT_APANEL=m +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m + +# +# Hardware I/O ports +# +CONFIG_SERIO_I8042=y +# CONFIG_SERIO_CT82C710 is not set + +# +# Character devices +# +CONFIG_SYNCLINK=m + +# +# Serial drivers +# +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_SERIAL_8250_PNP=y + +# +# Non-8250 serial port support +# +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +# CONFIG_VIRTIO_CONSOLE is not set +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_VIA=m +CONFIG_MWAVE=m +CONFIG_HPET=y +# CONFIG_HPET_MMAP is not set +CONFIG_HANGCHECK_TIMER=m +# CONFIG_TELCLOCK is not set + +# +# PC SMBus host controller drivers +# +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_NFORCE2_S4985=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m + +# +# Memory mapped GPIO drivers: +# +CONFIG_GPIO_SCH=m + +# +# PCI GPIO expanders: +# +# CONFIG_GPIO_CS5535 is not set +# CONFIG_GPIO_PCH is not set + +# +# Native drivers +# +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_FSCHMD=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_APPLESMC=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m + +# +# Watchdog Device Drivers +# +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +CONFIG_ALIM1535_WDT=m +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +# CONFIG_SC520_WDT is not set +CONFIG_SBC_FITPC2_WATCHDOG=m +# CONFIG_EUROTECH_WDT is not set +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +# CONFIG_WAFER_WDT is not set +CONFIG_I6300ESB_WDT=m +CONFIG_ITCO_WDT=m +# CONFIG_ITCO_VENDOR_SUPPORT is not set +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_HPWDT_NMI_DECODING=y +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +CONFIG_NV_TCO=m +# CONFIG_60XX_WDT is not set +# CONFIG_SBC8360_WDT is not set +# CONFIG_CPU5_WDT is not set +CONFIG_SMSC_SCH311X_WDT=m +# CONFIG_SMSC37B787_WDT is not set +CONFIG_W83627HF_WDT=m +CONFIG_W83697HF_WDT=m +CONFIG_W83697UG_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +CONFIG_XEN_WDT=m + +# +# Multifunction device drivers +# +CONFIG_MFD_CS5535=m + +# +# Multimedia drivers +# +CONFIG_IR_ENE=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_FINTEK=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_WINBOND_CIR=m + +# +# Miscelaneous helper chips +# +CONFIG_VIDEO_MEYE=m +CONFIG_VIDEO_VIA_CAMERA=m + +# +# Graphics support +# +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_AGP_SIS=y +CONFIG_AGP_VIA=y +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_KMS=y +CONFIG_DRM_I810=m +CONFIG_DRM_I915=m +CONFIG_DRM_I915_KMS=y +CONFIG_DRM_SIS=m +CONFIG_FB_BOOT_VESA_SUPPORT=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +CONFIG_FB_SYS_FOPS=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_ARC is not set +CONFIG_FB_VGA16=m +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_LE80578 is not set +CONFIG_FB_RADEON=m +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +CONFIG_FB_SAVAGE=m +CONFIG_FB_SAVAGE_I2C=y +CONFIG_FB_SAVAGE_ACCEL=y +CONFIG_FB_VIA=m +# CONFIG_FB_VIA_DIRECT_PROCFS is not set +CONFIG_FB_VIA_X_COMPATIBILITY=y +CONFIG_FB_GEODE=y +CONFIG_FB_GEODE_LX=y +CONFIG_FB_GEODE_GX=y +# CONFIG_FB_GEODE_GX1 is not set +CONFIG_XEN_FBDEV_FRONTEND=y +CONFIG_BACKLIGHT_PROGEAR=m +CONFIG_BACKLIGHT_APPLE=m +# CONFIG_BACKLIGHT_SAHARA is not set + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_VGACON_SOFT_SCROLLBACK=y +CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=128 +CONFIG_SND_DMA_SGBUF=y +CONFIG_SND_PCSP=m +CONFIG_SND_SB_COMMON=m +CONFIG_SND_SB16_DSP=m +CONFIG_SND_ALS4000=m +CONFIG_SND_ALI5451=m +CONFIG_SND_ASIHPI=m +CONFIG_SND_CS5530=m +CONFIG_SND_USB_USX2Y=m +CONFIG_SND_USB_US122L=m +# CONFIG_SND_SOC is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +CONFIG_MMC_WBSD=m + +# +# LED drivers +# +CONFIG_LEDS_CLEVO_MAIL=m +CONFIG_LEDS_INTEL_SS4200=m +CONFIG_LEDS_DELL_NETBOOKS=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_EDAC=y + +# +# Reporting subsystems +# +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_MCE_INJ=m +CONFIG_EDAC_MM_EDAC=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m + +# +# DMA Devices +# +# CONFIG_INTEL_MID_DMAC is not set +CONFIG_INTEL_IOATDMA=m +CONFIG_PCH_DMA=m + +# +# DMA Clients +# +CONFIG_DCA=m + +# +# Xen driver support +# +CONFIG_XEN_BALLOON=y +CONFIG_XEN_SELFBALLOONING=y +CONFIG_XEN_SCRUB_PAGES=y +CONFIG_XEN_DEV_EVTCHN=m +CONFIG_XEN_BACKEND=y +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +CONFIG_XEN_GNTDEV=m +CONFIG_XEN_GRANT_DEV_ALLOC=m +CONFIG_SWIOTLB_XEN=y +CONFIG_XEN_TMEM=y +CONFIG_XEN_PCIDEV_BACKEND=m +# CONFIG_SLICOSS is not set +# CONFIG_COMEDI is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_NOUVEAU_DEBUG=y + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_ACPI_QUICKSTART is not set + +# +# Speakup console speech +# +# CONFIG_DRM_PSB is not set +# CONFIG_INTEL_MEI is not set +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACER_WMI=m +CONFIG_ACERHDF=m +CONFIG_ASUS_LAPTOP=m +CONFIG_DELL_LAPTOP=m +CONFIG_DELL_WMI=m +CONFIG_DELL_WMI_AIO=m +CONFIG_FUJITSU_LAPTOP=m +# CONFIG_FUJITSU_LAPTOP_DEBUG is not set +CONFIG_HP_ACCEL=m +CONFIG_HP_WMI=m +CONFIG_MSI_LAPTOP=m +CONFIG_PANASONIC_LAPTOP=m +CONFIG_COMPAL_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +CONFIG_IDEAPAD_LAPTOP=m +CONFIG_THINKPAD_ACPI=m +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +CONFIG_SENSORS_HDAPS=m +# CONFIG_INTEL_MENLOW is not set +CONFIG_EEEPC_LAPTOP=m +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +CONFIG_EEEPC_WMI=m +CONFIG_ACPI_WMI=m +CONFIG_MSI_WMI=m +# CONFIG_ACPI_ASUS is not set +CONFIG_TOPSTAR_LAPTOP=m +CONFIG_ACPI_TOSHIBA=m +CONFIG_TOSHIBA_BT_RFKILL=m +CONFIG_ACPI_CMPC=m +CONFIG_INTEL_IPS=m +# CONFIG_IBM_RTL is not set +# CONFIG_XO15_EBOOK is not set +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_MXM_WMI=m +CONFIG_INTEL_OAKTRAIL=m +CONFIG_SAMSUNG_Q10=m + +# +# Hardware Spinlock drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +CONFIG_IOMMU_API=y +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +# CONFIG_HYPERV is not set + +# +# Firmware Drivers +# +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_EFI_VARS=y +CONFIG_DELL_RBU=m +CONFIG_DCDBAS=m +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +# CONFIG_SIGMA is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# Pseudo filesystems +# +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y + +# +# Kernel hacking +# +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_FTRACE_NMI_ENTER=y +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_MMIOTRACE is not set +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +# CONFIG_EARLY_PRINTK_DBGP is not set +CONFIG_DEBUG_STACKOVERFLOW=y +# CONFIG_X86_PTDUMP is not set +CONFIG_DEBUG_NX_TEST=m +# CONFIG_IOMMU_STRESS is not set +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set + +# +# Memory Protections +# +# CONFIG_GRKERNSEC_IO is not set + +# +# Non-executable pages +# +CONFIG_PAX_EMUTRAMP=y + +# +# Address Space Layout Randomization +# +CONFIG_PAX_RANDKSTACK=y + +# +# Miscellaneous hardening features +# +CONFIG_PAX_MEMORY_STACKLEAK=y +CONFIG_PAX_REFCOUNT=y +CONFIG_LSM_MMAP_MIN_ADDR=65536 +CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y +CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_PCRYPT=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C_INTEL=y + +# +# Ciphers +# +CONFIG_CRYPTO_AES_NI_INTEL=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +CONFIG_KVM_APIC_ARCHITECTURE=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +CONFIG_KVM_MMU_AUDIT=y +CONFIG_VHOST_NET=m + +# +# Library routines +# +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_CPU_RMAP=y diff --git a/kernel/config-x86_64-default b/kernel/config-x86_64-default new file mode 100644 index 0000000..c6b9a49 --- /dev/null +++ b/kernel/config-x86_64-default @@ -0,0 +1,171 @@ +CONFIG_64BIT=y +# CONFIG_X86_32 is not set +CONFIG_X86_64=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" +# CONFIG_KTIME_SCALAR is not set + +# +# RCU Subsystem +# +CONFIG_RCU_FANOUT=64 + +# +# GCOV-based kernel profiling +# +CONFIG_BLOCK_COMPAT=y + +# +# Processor type and features +# +CONFIG_X86_X2APIC=y +# CONFIG_X86_VSMP is not set +# CONFIG_X86_UV is not set +# CONFIG_MPSC is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=7 +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_GART_IOMMU=y +# CONFIG_CALGARY_IOMMU is not set +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS=64 +CONFIG_DIRECT_GBPAGES=y +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NODES_SPAN_OTHER_NODES=y +# CONFIG_NUMA_EMU is not set +CONFIG_NODES_SHIFT=9 +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y +CONFIG_SPARSEMEM_VMEMMAP=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_KEXEC_JUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_BLACKLIST_YEAR=0 + +# +# Memory power savings +# +CONFIG_I7300_IDLE_IOAT_CHANNEL=y +CONFIG_I7300_IDLE=m + +# +# PC-card bridges +# +CONFIG_HOTPLUG_PCI_SHPC=m + +# +# Executable file formats / Emulations +# +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_IA32_EMULATION=y +# CONFIG_IA32_AOUT is not set +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_KEYS_COMPAT=y +CONFIG_COMPAT_NETLINK_MESSAGES=y + +# +# Classification +# +CONFIG_BPF_JIT=y + +# +# Protocols +# +# CONFIG_IBM_ASM is not set +CONFIG_SGI_IOC4=m +# CONFIG_CS5535_MFGPT is not set + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set + +# +# PCI GPIO expanders: +# +CONFIG_GPIO_LANGWELL=y + +# +# Miscelaneous helper chips +# +# CONFIG_VIDEO_CAFE_CCIC is not set + +# +# Reporting subsystems +# +CONFIG_EDAC_AMD64=m +# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set +CONFIG_EDAC_SBRIDGE=m + +# +# Hardware Spinlock drivers +# +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_STATS=y +CONFIG_IRQ_REMAP=y + +# +# File systems +# +CONFIG_QUOTACTL_COMPAT=y + +# +# Kernel hacking +# +CONFIG_FUNCTION_GRAPH_TRACER=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_IOMMU_DEBUG is not set + +# +# PaX +# +CONFIG_TASK_SIZE_MAX_SHIFT=47 + +# +# Miscellaneous hardening features +# +CONFIG_INTEL_TXT=y + +# +# Digest +# +CONFIG_CRYPTO_SHA1_SSSE3=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES_X86_64=y +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_SALSA20_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m diff --git a/kernel/config.armv5tel-versatile b/kernel/config.armv5tel-versatile deleted file mode 100644 index bee5a61..0000000 --- a/kernel/config.armv5tel-versatile +++ /dev/null @@ -1,1449 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/arm 3.1.1-2.ip3.armv5tel Kernel Configuration -# -CONFIG_ARM=y -CONFIG_MIGHT_HAVE_PCI=y -CONFIG_SYS_SUPPORTS_APM_EMULATION=y -CONFIG_HAVE_SCHED_CLOCK=y -# CONFIG_ARCH_USES_GETTIMEOFFSET is not set -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_KTIME_SCALAR=y -CONFIG_HAVE_PROC_CPU=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_HAVE_LATENCYTOP_SUPPORT=y -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_RWSEM_GENERIC_SPINLOCK=y -CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_VECTORS_BASE=0xffff0000 -# CONFIG_ARM_PATCH_PHYS_VIRT is not set -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -CONFIG_HAVE_IRQ_WORK=y - -# -# General setup -# -CONFIG_EXPERIMENTAL=y -CONFIG_BROKEN_ON_SMP=y -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="" -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_KERNEL_GZIP=y -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_LZO is not set -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -# CONFIG_POSIX_MQUEUE is not set -# CONFIG_BSD_PROCESS_ACCT is not set -# CONFIG_FHANDLE is not set -# CONFIG_TASKSTATS is not set -# CONFIG_AUDIT is not set -CONFIG_HAVE_GENERIC_HARDIRQS=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_HARDIRQS=y -CONFIG_HAVE_SPARSE_IRQ=y -CONFIG_GENERIC_IRQ_SHOW=y -# CONFIG_SPARSE_IRQ is not set - -# -# RCU Subsystem -# -CONFIG_TINY_RCU=y -# CONFIG_PREEMPT_RCU is not set -# CONFIG_RCU_TRACE is not set -# CONFIG_TREE_RCU_TRACE is not set -# CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -# CONFIG_SCHED_AUTOGROUP is not set -# CONFIG_SYSFS_DEPRECATED is not set -# CONFIG_RELAY is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -# CONFIG_EXPERT is not set -CONFIG_UID16=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_ALL is not set -CONFIG_HOTPLUG=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_PERF_USE_VMALLOC=y - -# -# Kernel Performance Events And Counters -# -# CONFIG_PERF_EVENTS is not set -# CONFIG_PERF_COUNTERS is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_COMPAT_BRK=y -CONFIG_SLAB=y -# CONFIG_SLUB is not set -# CONFIG_PROFILING is not set -CONFIG_HAVE_OPROFILE=y -# CONFIG_KPROBES is not set -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_DMA_API_DEBUG=y - -# -# GCOV-based kernel profiling -# -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODVERSIONS is not set -# CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_BLOCK=y -CONFIG_LBDAF=y -CONFIG_BLK_DEV_BSG=y -# CONFIG_BLK_DEV_BSGLIB is not set -# CONFIG_BLK_DEV_INTEGRITY is not set - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -# CONFIG_INLINE_SPIN_TRYLOCK is not set -# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK is not set -# CONFIG_INLINE_SPIN_LOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK_IRQ is not set -# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set -CONFIG_INLINE_SPIN_UNLOCK=y -# CONFIG_INLINE_SPIN_UNLOCK_BH is not set -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_READ_TRYLOCK is not set -# CONFIG_INLINE_READ_LOCK is not set -# CONFIG_INLINE_READ_LOCK_BH is not set -# CONFIG_INLINE_READ_LOCK_IRQ is not set -# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set -CONFIG_INLINE_READ_UNLOCK=y -# CONFIG_INLINE_READ_UNLOCK_BH is not set -CONFIG_INLINE_READ_UNLOCK_IRQ=y -# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_WRITE_TRYLOCK is not set -# CONFIG_INLINE_WRITE_LOCK is not set -# CONFIG_INLINE_WRITE_LOCK_BH is not set -# CONFIG_INLINE_WRITE_LOCK_IRQ is not set -# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set -CONFIG_INLINE_WRITE_UNLOCK=y -# CONFIG_INLINE_WRITE_UNLOCK_BH is not set -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set -# CONFIG_MUTEX_SPIN_ON_OWNER is not set -CONFIG_FREEZER=y - -# -# System Type -# -CONFIG_MMU=y -# CONFIG_ARCH_INTEGRATOR is not set -# CONFIG_ARCH_REALVIEW is not set -CONFIG_ARCH_VERSATILE=y -# CONFIG_ARCH_VEXPRESS is not set -# CONFIG_ARCH_AT91 is not set -# CONFIG_ARCH_BCMRING is not set -# CONFIG_ARCH_CLPS711X is not set -# CONFIG_ARCH_CNS3XXX is not set -# CONFIG_ARCH_GEMINI is not set -# CONFIG_ARCH_PRIMA2 is not set -# CONFIG_ARCH_EBSA110 is not set -# CONFIG_ARCH_EP93XX is not set -# CONFIG_ARCH_FOOTBRIDGE is not set -# CONFIG_ARCH_MXC is not set -# CONFIG_ARCH_MXS is not set -# CONFIG_ARCH_NETX is not set -# CONFIG_ARCH_H720X is not set -# CONFIG_ARCH_IOP13XX is not set -# CONFIG_ARCH_IOP32X is not set -# CONFIG_ARCH_IOP33X is not set -# CONFIG_ARCH_IXP23XX is not set -# CONFIG_ARCH_IXP2000 is not set -# CONFIG_ARCH_IXP4XX is not set -# CONFIG_ARCH_DOVE is not set -# CONFIG_ARCH_KIRKWOOD is not set -# CONFIG_ARCH_LPC32XX is not set -# CONFIG_ARCH_MV78XX0 is not set -# CONFIG_ARCH_ORION5X is not set -# CONFIG_ARCH_MMP is not set -# CONFIG_ARCH_KS8695 is not set -# CONFIG_ARCH_W90X900 is not set -# CONFIG_ARCH_NUC93X is not set -# CONFIG_ARCH_TEGRA is not set -# CONFIG_ARCH_PNX4008 is not set -# CONFIG_ARCH_PXA is not set -# CONFIG_ARCH_MSM is not set -# CONFIG_ARCH_SHMOBILE is not set -# CONFIG_ARCH_RPC is not set -# CONFIG_ARCH_SA1100 is not set -# CONFIG_ARCH_S3C2410 is not set -# CONFIG_ARCH_S3C64XX is not set -# CONFIG_ARCH_S5P64X0 is not set -# CONFIG_ARCH_S5PC100 is not set -# CONFIG_ARCH_S5PV210 is not set -# CONFIG_ARCH_EXYNOS4 is not set -# CONFIG_ARCH_SHARK is not set -# CONFIG_ARCH_TCC_926 is not set -# CONFIG_ARCH_U300 is not set -# CONFIG_ARCH_U8500 is not set -# CONFIG_ARCH_NOMADIK is not set -# CONFIG_ARCH_DAVINCI is not set -# CONFIG_ARCH_OMAP is not set -# CONFIG_PLAT_SPEAR is not set -# CONFIG_ARCH_VT8500 is not set -# CONFIG_ARCH_ZYNQ is not set - -# -# System MMU -# - -# -# Versatile platform type -# -CONFIG_ARCH_VERSATILE_PB=y -CONFIG_MACH_VERSATILE_AB=y -# CONFIG_MACH_VERSATILE_DT is not set -CONFIG_PLAT_VERSATILE_CLCD=y -CONFIG_PLAT_VERSATILE_FPGA_IRQ=y -CONFIG_PLAT_VERSATILE_SCHED_CLOCK=y -CONFIG_PLAT_VERSATILE=y -CONFIG_ARM_TIMER_SP804=y - -# -# Processor Type -# -CONFIG_CPU_ARM926T=y -CONFIG_CPU_32v5=y -CONFIG_CPU_ABRT_EV5TJ=y -CONFIG_CPU_PABRT_LEGACY=y -CONFIG_CPU_CACHE_VIVT=y -CONFIG_CPU_COPY_V4WB=y -CONFIG_CPU_TLB_V4WBI=y -CONFIG_CPU_CP15=y -CONFIG_CPU_CP15_MMU=y -CONFIG_CPU_USE_DOMAINS=y - -# -# Processor Features -# -CONFIG_ARM_THUMB=y -# CONFIG_CPU_ICACHE_DISABLE is not set -# CONFIG_CPU_DCACHE_DISABLE is not set -# CONFIG_CPU_DCACHE_WRITETHROUGH is not set -# CONFIG_CPU_CACHE_ROUND_ROBIN is not set -CONFIG_ARM_L1_CACHE_SHIFT=5 -CONFIG_ARM_VIC=y -CONFIG_ARM_VIC_NR=2 -CONFIG_ICST=y - -# -# Bus support -# -CONFIG_ARM_AMBA=y -# CONFIG_PCI is not set -# CONFIG_PCI_SYSCALL is not set -# CONFIG_ARCH_SUPPORTS_MSI is not set -# CONFIG_PCCARD is not set - -# -# Kernel Features -# -# CONFIG_NO_HZ is not set -# CONFIG_HIGH_RES_TIMERS is not set -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -CONFIG_VMSPLIT_3G=y -# CONFIG_VMSPLIT_2G is not set -# CONFIG_VMSPLIT_1G is not set -CONFIG_PAGE_OFFSET=0xC0000000 -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set -CONFIG_HZ=100 -# CONFIG_AEABI is not set -# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set -# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set -CONFIG_HAVE_ARCH_PFN_VALID=y -# CONFIG_HIGHMEM is not set -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y -CONFIG_FLATMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=999999 -# CONFIG_COMPACTION is not set -# CONFIG_PHYS_ADDR_T_64BIT is not set -CONFIG_ZONE_DMA_FLAG=0 -CONFIG_VIRT_TO_BUS=y -# CONFIG_KSM is not set -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_NEED_PER_CPU_KM=y -# CONFIG_CLEANCACHE is not set -CONFIG_FORCE_MAX_ZONEORDER=11 -CONFIG_LEDS=y -CONFIG_LEDS_CPU=y -CONFIG_ALIGNMENT_TRAP=y -# CONFIG_UACCESS_WITH_MEMCPY is not set -# CONFIG_SECCOMP is not set -# CONFIG_CC_STACKPROTECTOR is not set -# CONFIG_DEPRECATED_PARAM_STRUCT is not set - -# -# Boot options -# -# CONFIG_USE_OF is not set -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="root=1f03 mem=32M" -CONFIG_CMDLINE_FROM_BOOTLOADER=y -# CONFIG_CMDLINE_EXTEND is not set -# CONFIG_CMDLINE_FORCE is not set -# CONFIG_XIP_KERNEL is not set -# CONFIG_KEXEC is not set -# CONFIG_CRASH_DUMP is not set -# CONFIG_AUTO_ZRELADDR is not set - -# -# CPU Power Management -# -# CONFIG_CPU_IDLE is not set - -# -# Floating point emulation -# - -# -# At least one emulation must be selected -# -CONFIG_FPE_NWFPE=y -# CONFIG_FPE_NWFPE_XP is not set -# CONFIG_FPE_FASTFPE is not set -CONFIG_VFP=y - -# -# Userspace binary formats -# -CONFIG_BINFMT_ELF=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_HAVE_AOUT=y -# CONFIG_BINFMT_AOUT is not set -# CONFIG_BINFMT_MISC is not set -# CONFIG_ARTHUR is not set - -# -# Power management options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_PM_SLEEP=y -# CONFIG_PM_RUNTIME is not set -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -# CONFIG_APM_EMULATION is not set -CONFIG_PM_CLK=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_NET=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set -# CONFIG_NET_KEY is not set -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -# CONFIG_IP_ADVANCED_ROUTER is not set -CONFIG_IP_PNP=y -# CONFIG_IP_PNP_DHCP is not set -CONFIG_IP_PNP_BOOTP=y -# CONFIG_IP_PNP_RARP is not set -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE_DEMUX is not set -# CONFIG_IP_MROUTE is not set -# CONFIG_ARPD is not set -# CONFIG_SYN_COOKIES is not set -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_TUNNEL is not set -# CONFIG_INET_TUNNEL is not set -CONFIG_INET_XFRM_MODE_TRANSPORT=y -CONFIG_INET_XFRM_MODE_TUNNEL=y -CONFIG_INET_XFRM_MODE_BEET=y -CONFIG_INET_LRO=y -# CONFIG_INET_DIAG is not set -# CONFIG_TCP_CONG_ADVANCED is not set -CONFIG_TCP_CONG_CUBIC=y -CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_TCP_MD5SIG is not set -# CONFIG_IPV6 is not set -# CONFIG_NETWORK_SECMARK is not set -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -# CONFIG_NETFILTER is not set -# CONFIG_IP_DCCP is not set -# CONFIG_IP_SCTP is not set -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -# CONFIG_L2TP is not set -# CONFIG_BRIDGE is not set -# CONFIG_NET_DSA is not set -# CONFIG_VLAN_8021Q is not set -# CONFIG_DECNET is not set -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_ECONET is not set -# CONFIG_WAN_ROUTER is not set -# CONFIG_PHONET is not set -# CONFIG_IEEE802154 is not set -# CONFIG_NET_SCHED is not set -# CONFIG_DCB is not set -# CONFIG_BATMAN_ADV is not set - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -# CONFIG_LIB80211 is not set - -# -# CFG80211 needs to be enabled for MAC80211 -# -# CONFIG_WIMAX is not set -# CONFIG_RFKILL is not set -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -# CONFIG_CEPH_LIB is not set -# CONFIG_NFC is not set - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER_PATH="" -# CONFIG_DEVTMPFS is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -CONFIG_FIRMWARE_IN_KERNEL=y -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_SYS_HYPERVISOR is not set -# CONFIG_CONNECTOR is not set -CONFIG_MTD=y -# CONFIG_MTD_DEBUG is not set -# CONFIG_MTD_TESTS is not set -# CONFIG_MTD_REDBOOT_PARTS is not set -CONFIG_MTD_CMDLINE_PARTS=y -# CONFIG_MTD_AFS_PARTS is not set -# CONFIG_MTD_AR7_PARTS is not set - -# -# User Modules And Translation Layers -# -CONFIG_MTD_CHAR=y -CONFIG_MTD_BLKDEVS=y -CONFIG_MTD_BLOCK=y -# CONFIG_FTL is not set -# CONFIG_NFTL is not set -# CONFIG_INFTL is not set -# CONFIG_RFD_FTL is not set -# CONFIG_SSFDC is not set -# CONFIG_SM_FTL is not set -# CONFIG_MTD_OOPS is not set -# CONFIG_MTD_SWAP is not set - -# -# RAM/ROM/Flash chip drivers -# -CONFIG_MTD_CFI=y -# CONFIG_MTD_JEDECPROBE is not set -CONFIG_MTD_GEN_PROBE=y -CONFIG_MTD_CFI_ADV_OPTIONS=y -CONFIG_MTD_CFI_NOSWAP=y -# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set -# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set -# CONFIG_MTD_CFI_GEOMETRY is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set -# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set -# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -# CONFIG_MTD_CFI_I4 is not set -# CONFIG_MTD_CFI_I8 is not set -# CONFIG_MTD_OTP is not set -CONFIG_MTD_CFI_INTELEXT=y -# CONFIG_MTD_CFI_AMDSTD is not set -# CONFIG_MTD_CFI_STAA is not set -CONFIG_MTD_CFI_UTIL=y -# CONFIG_MTD_RAM is not set -# CONFIG_MTD_ROM is not set -# CONFIG_MTD_ABSENT is not set - -# -# Mapping drivers for chip access -# -# CONFIG_MTD_COMPLEX_MAPPINGS is not set -CONFIG_MTD_PHYSMAP=y -# CONFIG_MTD_PHYSMAP_COMPAT is not set -# CONFIG_MTD_ARM_INTEGRATOR is not set -# CONFIG_MTD_PLATRAM is not set -# CONFIG_MTD_PISMO is not set - -# -# Self-contained MTD device drivers -# -# CONFIG_MTD_SLRAM is not set -# CONFIG_MTD_PHRAM is not set -# CONFIG_MTD_MTDRAM is not set -# CONFIG_MTD_BLOCK2MTD is not set - -# -# Disk-On-Chip Device Drivers -# -# CONFIG_MTD_DOC2000 is not set -# CONFIG_MTD_DOC2001 is not set -# CONFIG_MTD_DOC2001PLUS is not set -# CONFIG_MTD_NAND is not set -# CONFIG_MTD_ONENAND is not set - -# -# LPDDR flash memory drivers -# -# CONFIG_MTD_LPDDR is not set -# CONFIG_MTD_UBI is not set -# CONFIG_PARPORT is not set -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_COW_COMMON is not set -# CONFIG_BLK_DEV_LOOP is not set - -# -# DRBD disabled because PROC_FS, INET or CONNECTOR not selected -# -# CONFIG_BLK_DEV_NBD is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=4096 -# CONFIG_BLK_DEV_XIP is not set -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set -# CONFIG_BLK_DEV_RBD is not set -# CONFIG_SENSORS_LIS3LV02D is not set -# CONFIG_MISC_DEVICES is not set -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -# CONFIG_RAID_ATTRS is not set -# CONFIG_SCSI is not set -# CONFIG_SCSI_DMA is not set -# CONFIG_SCSI_NETLINK is not set -# CONFIG_ATA is not set -# CONFIG_MD is not set -CONFIG_NETDEVICES=y -# CONFIG_DUMMY is not set -# CONFIG_BONDING is not set -# CONFIG_MACVLAN is not set -# CONFIG_EQUALIZER is not set -# CONFIG_TUN is not set -# CONFIG_VETH is not set -CONFIG_MII=y -# CONFIG_PHYLIB is not set -CONFIG_NET_ETHERNET=y -# CONFIG_AX88796 is not set -CONFIG_SMC91X=y -# CONFIG_DM9000 is not set -# CONFIG_ETHOC is not set -# CONFIG_SMC911X is not set -# CONFIG_SMSC911X is not set -# CONFIG_DNET is not set -# CONFIG_IBM_NEW_EMAC_ZMII is not set -# CONFIG_IBM_NEW_EMAC_RGMII is not set -# CONFIG_IBM_NEW_EMAC_TAH is not set -# CONFIG_IBM_NEW_EMAC_EMAC4 is not set -# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set -# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set -# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set -# CONFIG_B44 is not set -# CONFIG_KS8851_MLL is not set -# CONFIG_FTMAC100 is not set -CONFIG_NETDEV_1000=y -# CONFIG_STMMAC_ETH is not set -# CONFIG_FTGMAC100 is not set -CONFIG_NETDEV_10000=y -CONFIG_WLAN=y -# CONFIG_HOSTAP is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -# CONFIG_WAN is not set - -# -# CAIF transport drivers -# -# CONFIG_PPP is not set -# CONFIG_SLIP is not set -# CONFIG_NETCONSOLE is not set -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set -# CONFIG_ISDN is not set -# CONFIG_PHONE is not set - -# -# Input device support -# -CONFIG_INPUT=y -# CONFIG_INPUT_FF_MEMLESS is not set -# CONFIG_INPUT_POLLDEV is not set -# CONFIG_INPUT_SPARSEKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -# CONFIG_INPUT_EVDEV is not set -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_XTKBD is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=y -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_TRACKPOINT=y -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_SERIAL is not set -# CONFIG_MOUSE_APPLETOUCH is not set -# CONFIG_MOUSE_BCM5974 is not set -# CONFIG_MOUSE_VSXXXAA is not set -# CONFIG_MOUSE_SYNAPTICS_I2C is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -# CONFIG_SERIO_SERPORT is not set -CONFIG_SERIO_AMBAKMI=y -CONFIG_SERIO_LIBPS2=y -# CONFIG_SERIO_RAW is not set -# CONFIG_SERIO_ALTERA_PS2 is not set -# CONFIG_SERIO_PS2MULT is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -# CONFIG_VT_HW_CONSOLE_BINDING is not set -CONFIG_UNIX98_PTYS=y -# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set -CONFIG_LEGACY_PTYS=y -CONFIG_LEGACY_PTY_COUNT=16 -# CONFIG_SERIAL_NONSTANDARD is not set -# CONFIG_N_GSM is not set -# CONFIG_TRACE_SINK is not set -CONFIG_DEVKMEM=y - -# -# Serial drivers -# -CONFIG_SERIAL_8250=m -CONFIG_SERIAL_8250_NR_UARTS=4 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y - -# -# Non-8250 serial port support -# -CONFIG_SERIAL_AMBA_PL011=y -CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -# CONFIG_SERIAL_TIMBERDALE is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set -# CONFIG_HVC_DCC is not set -# CONFIG_IPMI_HANDLER is not set -CONFIG_HW_RANDOM=m -# CONFIG_HW_RANDOM_TIMERIOMEM is not set -# CONFIG_R3964 is not set -# CONFIG_RAW_DRIVER is not set -# CONFIG_TCG_TPM is not set -# CONFIG_RAMOOPS is not set -CONFIG_I2C=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=m -# CONFIG_I2C_MUX is not set -CONFIG_I2C_HELPER_AUTO=y - -# -# I2C Hardware Bus support -# - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_DESIGNWARE is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PCA_PLATFORM is not set -# CONFIG_I2C_PXA_PCI is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_VERSATILE is not set -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_TAOS_EVM is not set - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_STUB is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# CONFIG_SPI is not set - -# -# PPS support -# -# CONFIG_PPS is not set - -# -# PPS generators support -# - -# -# PTP clock support -# - -# -# Enable Device Drivers -> PPS to see the PTP clock options. -# -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -# CONFIG_GPIOLIB is not set -# CONFIG_W1 is not set -# CONFIG_POWER_SUPPLY is not set -# CONFIG_HWMON is not set -# CONFIG_THERMAL is not set -# CONFIG_WATCHDOG is not set -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y - -# -# Broadcom specific AMBA -# -# CONFIG_BCMA is not set -CONFIG_MFD_SUPPORT=y -# CONFIG_MFD_CORE is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS6507X is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_MFD_STMPE is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TMIO is not set -# CONFIG_MFD_T7L66XB is not set -# CONFIG_MFD_TC6387XB is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_REGULATOR is not set -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -# CONFIG_DRM is not set -# CONFIG_VGASTATE is not set -# CONFIG_VIDEO_OUTPUT_CONTROL is not set -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -# CONFIG_FB_DDC is not set -# CONFIG_FB_BOOT_VESA_SUPPORT is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -# CONFIG_FB_SYS_FILLRECT is not set -# CONFIG_FB_SYS_COPYAREA is not set -# CONFIG_FB_SYS_IMAGEBLIT is not set -# CONFIG_FB_FOREIGN_ENDIAN is not set -# CONFIG_FB_SYS_FOPS is not set -# CONFIG_FB_WMT_GE_ROPS is not set -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -# CONFIG_FB_BACKLIGHT is not set -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -CONFIG_FB_ARMCLCD=y -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_BROADSHEET is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set - -# -# Display device support -# -# CONFIG_DISPLAY_SUPPORT is not set - -# -# Console display driver support -# -CONFIG_DUMMY_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -CONFIG_FONTS=y -# CONFIG_FONT_8x8 is not set -# CONFIG_FONT_8x16 is not set -# CONFIG_FONT_6x11 is not set -# CONFIG_FONT_7x14 is not set -# CONFIG_FONT_PEARL_8x8 is not set -CONFIG_FONT_ACORN_8x8=y -# CONFIG_FONT_MINI_4x6 is not set -# CONFIG_FONT_SUN8x16 is not set -# CONFIG_FONT_SUN12x22 is not set -# CONFIG_FONT_10x18 is not set -# CONFIG_LOGO is not set -CONFIG_SOUND=y -CONFIG_SOUND_OSS_CORE=y -CONFIG_SOUND_OSS_CORE_PRECLAIM=y -CONFIG_SND=m -CONFIG_SND_TIMER=m -CONFIG_SND_PCM=m -# CONFIG_SND_SEQUENCER is not set -CONFIG_SND_OSSEMUL=y -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_PCM_OSS_PLUGINS=y -# CONFIG_SND_DYNAMIC_MINORS is not set -CONFIG_SND_SUPPORT_OLD_API=y -CONFIG_SND_VERBOSE_PROCFS=y -# CONFIG_SND_VERBOSE_PRINTK is not set -# CONFIG_SND_DEBUG is not set -CONFIG_SND_VMASTER=y -# CONFIG_SND_RAWMIDI_SEQ is not set -# CONFIG_SND_OPL3_LIB_SEQ is not set -# CONFIG_SND_OPL4_LIB_SEQ is not set -# CONFIG_SND_SBAWE_SEQ is not set -# CONFIG_SND_EMU10K1_SEQ is not set -CONFIG_SND_AC97_CODEC=m -CONFIG_SND_DRIVERS=y -# CONFIG_SND_DUMMY is not set -# CONFIG_SND_ALOOP is not set -# CONFIG_SND_MTPAV is not set -# CONFIG_SND_SERIAL_U16550 is not set -# CONFIG_SND_MPU401 is not set -# CONFIG_SND_AC97_POWER_SAVE is not set -CONFIG_SND_ARM=y -CONFIG_SND_ARMAACI=m -# CONFIG_SND_SOC is not set -# CONFIG_SOUND_PRIME is not set -CONFIG_AC97_BUS=m -CONFIG_HID_SUPPORT=y -CONFIG_HID=y -# CONFIG_HIDRAW is not set -# CONFIG_HID_PID is not set - -# -# Special HID drivers -# -CONFIG_USB_SUPPORT=y -CONFIG_USB_ARCH_HAS_HCD=y -# CONFIG_USB_ARCH_HAS_OHCI is not set -# CONFIG_USB_ARCH_HAS_EHCI is not set -# CONFIG_USB is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# -# CONFIG_USB_GADGET is not set - -# -# OTG and related infrastructure -# -CONFIG_MMC=y -# CONFIG_MMC_DEBUG is not set -# CONFIG_MMC_UNSAFE_RESUME is not set -# CONFIG_MMC_CLKGATE is not set - -# -# MMC/SD/SDIO Card Drivers -# -CONFIG_MMC_BLOCK=y -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_MMC_BLOCK_BOUNCE=y -# CONFIG_SDIO_UART is not set -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -CONFIG_MMC_ARMMMCI=m -# CONFIG_MMC_SDHCI is not set -# CONFIG_MMC_SDHCI_PXAV3 is not set -# CONFIG_MMC_SDHCI_PXAV2 is not set -# CONFIG_MMC_DW is not set -# CONFIG_MEMSTICK is not set -# CONFIG_NEW_LEDS is not set -# CONFIG_ACCESSIBILITY is not set -CONFIG_RTC_LIB=y -# CONFIG_RTC_CLASS is not set -# CONFIG_DMADEVICES is not set -# CONFIG_AUXDISPLAY is not set -# CONFIG_UIO is not set - -# -# Virtio drivers -# -# CONFIG_VIRTIO_BALLOON is not set -# CONFIG_STAGING is not set -CONFIG_CLKDEV_LOOKUP=y -CONFIG_HAVE_MACH_CLKDEV=y -CONFIG_CLKSRC_MMIO=y -CONFIG_IOMMU_SUPPORT=y -# CONFIG_VIRT_DRIVERS is not set - -# -# File systems -# -CONFIG_EXT2_FS=y -# CONFIG_EXT2_FS_XATTR is not set -# CONFIG_EXT2_FS_XIP is not set -# CONFIG_EXT3_FS is not set -# CONFIG_EXT4_FS is not set -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -# CONFIG_XFS_FS is not set -# CONFIG_GFS2_FS is not set -# CONFIG_BTRFS_FS is not set -# CONFIG_NILFS2_FS is not set -# CONFIG_FS_POSIX_ACL is not set -CONFIG_EXPORTFS=y -CONFIG_FILE_LOCKING=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -# CONFIG_FANOTIFY is not set -# CONFIG_QUOTA is not set -# CONFIG_QUOTACTL is not set -# CONFIG_AUTOFS4_FS is not set -# CONFIG_FUSE_FS is not set - -# -# Caches -# -# CONFIG_FSCACHE is not set - -# -# CD-ROM/DVD Filesystems -# -# CONFIG_ISO9660_FS is not set -# CONFIG_UDF_FS is not set - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=m -# CONFIG_MSDOS_FS is not set -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_SYSFS=y -# CONFIG_TMPFS is not set -# CONFIG_HUGETLB_PAGE is not set -# CONFIG_CONFIGFS_FS is not set -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -CONFIG_JFFS2_FS=y -CONFIG_JFFS2_FS_DEBUG=0 -CONFIG_JFFS2_FS_WRITEBUFFER=y -# CONFIG_JFFS2_FS_WBUF_VERIFY is not set -# CONFIG_JFFS2_SUMMARY is not set -# CONFIG_JFFS2_FS_XATTR is not set -# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set -CONFIG_JFFS2_ZLIB=y -# CONFIG_JFFS2_LZO is not set -CONFIG_JFFS2_RTIME=y -# CONFIG_JFFS2_RUBIN is not set -# CONFIG_LOGFS is not set -CONFIG_CRAMFS=y -# CONFIG_SQUASHFS is not set -# CONFIG_VXFS_FS is not set -CONFIG_MINIX_FS=y -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -CONFIG_ROMFS_FS=y -CONFIG_ROMFS_BACKED_BY_BLOCK=y -# CONFIG_ROMFS_BACKED_BY_MTD is not set -# CONFIG_ROMFS_BACKED_BY_BOTH is not set -CONFIG_ROMFS_ON_BLOCK=y -# CONFIG_PSTORE is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -# CONFIG_NFS_V3_ACL is not set -# CONFIG_NFS_V4 is not set -CONFIG_ROOT_NFS=y -CONFIG_NFSD=y -CONFIG_NFSD_V3=y -# CONFIG_NFSD_V3_ACL is not set -# CONFIG_NFSD_V4 is not set -CONFIG_LOCKD=y -CONFIG_LOCKD_V4=y -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=y -# CONFIG_CEPH_FS is not set -# CONFIG_CIFS is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -# CONFIG_LDM_PARTITION is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -# CONFIG_EFI_PARTITION is not set -# CONFIG_SYSV68_PARTITION is not set -CONFIG_NLS=m -CONFIG_NLS_DEFAULT="iso8859-1" -# CONFIG_NLS_CODEPAGE_437 is not set -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -CONFIG_NLS_CODEPAGE_850=m -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -# CONFIG_NLS_CODEPAGE_932 is not set -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -# CONFIG_NLS_ASCII is not set -CONFIG_NLS_ISO8859_1=m -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_UTF8 is not set - -# -# Kernel hacking -# -# CONFIG_PRINTK_TIME is not set -CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=1024 -CONFIG_MAGIC_SYSRQ=y -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_DEBUG_FS is not set -# CONFIG_HEADERS_CHECK is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_DEBUG_KERNEL=y -# CONFIG_DEBUG_SHIRQ is not set -# CONFIG_LOCKUP_DETECTOR is not set -# CONFIG_HARDLOCKUP_DETECTOR is not set -# CONFIG_DETECT_HUNG_TASK is not set -CONFIG_SCHED_DEBUG=y -# CONFIG_SCHEDSTATS is not set -# CONFIG_TIMER_STATS is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_DEBUG_SLAB is not set -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_SPARSE_RCU_POINTER is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_INFO is not set -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_WRITECOUNT is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_LIST is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set -CONFIG_FRAME_POINTER=y -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_RCU_TORTURE_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_LATENCYTOP is not set -# CONFIG_SYSCTL_SYSCALL_CHECK is not set -# CONFIG_DEBUG_PAGEALLOC is not set -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_FUNCTION_TRACER is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_ENABLE_DEFAULT_TRACERS is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -# CONFIG_STACK_TRACER is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_STRICT_DEVMEM is not set -CONFIG_DEBUG_USER=y -CONFIG_DEBUG_LL=y -# CONFIG_EARLY_PRINTK is not set -# CONFIG_DEBUG_ICEDCC is not set -# CONFIG_OC_ETM is not set - -# -# Security options -# -# CONFIG_KEYS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -# CONFIG_SECURITY is not set -# CONFIG_SECURITYFS is not set -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_DEFAULT_SECURITY="" -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -# CONFIG_CRYPTO_FIPS is not set -CONFIG_CRYPTO_ALGAPI=m -CONFIG_CRYPTO_ALGAPI2=m -CONFIG_CRYPTO_RNG=m -CONFIG_CRYPTO_RNG2=m -# CONFIG_CRYPTO_MANAGER is not set -# CONFIG_CRYPTO_MANAGER2 is not set -# CONFIG_CRYPTO_GF128MUL is not set -# CONFIG_CRYPTO_NULL is not set -# CONFIG_CRYPTO_CRYPTD is not set -# CONFIG_CRYPTO_AUTHENC is not set -# CONFIG_CRYPTO_TEST is not set - -# -# Authenticated Encryption with Associated Data -# -# CONFIG_CRYPTO_CCM is not set -# CONFIG_CRYPTO_GCM is not set -# CONFIG_CRYPTO_SEQIV is not set - -# -# Block modes -# -# CONFIG_CRYPTO_CBC is not set -# CONFIG_CRYPTO_CTR is not set -# CONFIG_CRYPTO_CTS is not set -# CONFIG_CRYPTO_ECB is not set -# CONFIG_CRYPTO_LRW is not set -# CONFIG_CRYPTO_PCBC is not set -# CONFIG_CRYPTO_XTS is not set - -# -# Hash modes -# -# CONFIG_CRYPTO_HMAC is not set -# CONFIG_CRYPTO_XCBC is not set -# CONFIG_CRYPTO_VMAC is not set - -# -# Digest -# -# CONFIG_CRYPTO_CRC32C is not set -# CONFIG_CRYPTO_GHASH is not set -# CONFIG_CRYPTO_MD4 is not set -# CONFIG_CRYPTO_MD5 is not set -# CONFIG_CRYPTO_MICHAEL_MIC is not set -# CONFIG_CRYPTO_RMD128 is not set -# CONFIG_CRYPTO_RMD160 is not set -# CONFIG_CRYPTO_RMD256 is not set -# CONFIG_CRYPTO_RMD320 is not set -# CONFIG_CRYPTO_SHA1 is not set -# CONFIG_CRYPTO_SHA256 is not set -# CONFIG_CRYPTO_SHA512 is not set -# CONFIG_CRYPTO_TGR192 is not set -# CONFIG_CRYPTO_WP512 is not set - -# -# Ciphers -# -CONFIG_CRYPTO_AES=m -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_ARC4 is not set -# CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_CAST5 is not set -# CONFIG_CRYPTO_CAST6 is not set -# CONFIG_CRYPTO_DES is not set -# CONFIG_CRYPTO_FCRYPT is not set -# CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_SALSA20 is not set -# CONFIG_CRYPTO_SEED is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_TEA is not set -# CONFIG_CRYPTO_TWOFISH is not set - -# -# Compression -# -# CONFIG_CRYPTO_DEFLATE is not set -# CONFIG_CRYPTO_ZLIB is not set -# CONFIG_CRYPTO_LZO is not set - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -# CONFIG_CRYPTO_USER_API_HASH is not set -# CONFIG_CRYPTO_USER_API_SKCIPHER is not set -CONFIG_CRYPTO_HW=y -# CONFIG_BINARY_PRINTF is not set - -# -# Library routines -# -CONFIG_BITREVERSE=y -# CONFIG_CRC_CCITT is not set -# CONFIG_CRC16 is not set -# CONFIG_CRC_T10DIF is not set -# CONFIG_CRC_ITU_T is not set -CONFIG_CRC32=y -# CONFIG_CRC7 is not set -# CONFIG_LIBCRC32C is not set -# CONFIG_CRC8 is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_DMA=y -CONFIG_NLATTR=y -CONFIG_GENERIC_ATOMIC64=y -# CONFIG_AVERAGE is not set -# CONFIG_CORDIC is not set diff --git a/kernel/config.i686 b/kernel/config.i686 deleted file mode 100644 index 6344c96..0000000 --- a/kernel/config.i686 +++ /dev/null @@ -1,4870 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/i386 3.1.1-1.ip3.i686.PAE Kernel Configuration -# -# CONFIG_64BIT is not set -CONFIG_X86_32=y -# CONFIG_X86_64 is not set -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf32-i386" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig" -CONFIG_GENERIC_CMOS_UPDATE=y -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_HAVE_LATENCYTOP_SUPPORT=y -CONFIG_MMU=y -CONFIG_ZONE_DMA=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_IOMAP=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_GPIO=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -# CONFIG_RWSEM_GENERIC_SPINLOCK is not set -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -# CONFIG_GENERIC_TIME_VSYSCALL is not set -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_DEFAULT_IDLE=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -# CONFIG_ZONE_DMA32 is not set -CONFIG_ARCH_POPULATES_NODE_MAP=y -# CONFIG_AUDIT_ARCH is not set -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_32_SMP=y -CONFIG_X86_HT=y -CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx" -CONFIG_KTIME_SCALAR=y -CONFIG_ARCH_CPU_PROBE_RELEASE=y -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -CONFIG_HAVE_IRQ_WORK=y -CONFIG_IRQ_WORK=y - -# -# General setup -# -CONFIG_EXPERIMENTAL=y -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="" -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -# CONFIG_KERNEL_GZIP is not set -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set -CONFIG_KERNEL_XZ=y -# CONFIG_KERNEL_LZO is not set -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_BSD_PROCESS_ACCT=y -# CONFIG_BSD_PROCESS_ACCT_V3 is not set -CONFIG_FHANDLE=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y -CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y -CONFIG_HAVE_GENERIC_HARDIRQS=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_HARDIRQS=y -CONFIG_HAVE_SPARSE_IRQ=y -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_IRQ_FORCED_THREADING=y -# CONFIG_SPARSE_IRQ is not set - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_PREEMPT_RCU is not set -# CONFIG_RCU_TRACE is not set -CONFIG_RCU_FANOUT=32 -# CONFIG_RCU_FANOUT_EXACT is not set -CONFIG_RCU_FAST_NO_HZ=y -# CONFIG_TREE_RCU_TRACE is not set -# CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=18 -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_RESOURCE_COUNTERS=y -CONFIG_CGROUP_MEM_RES_CTLR=y -CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y -# CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED is not set -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_MM_OWNER=y -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -# CONFIG_EXPERT is not set -CONFIG_UID16=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -CONFIG_HOTPLUG=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -CONFIG_PERF_COUNTERS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_PCI_QUIRKS=y -CONFIG_SLUB_DEBUG=y -# CONFIG_COMPAT_BRK is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y -# CONFIG_PROFILING is not set -CONFIG_TRACEPOINTS=y -CONFIG_HAVE_OPROFILE=y -# CONFIG_KPROBES is not set -CONFIG_JUMP_LABEL=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_USE_GENERIC_SMP_HELPERS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODVERSIONS is not set -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_STOP_MACHINE=y -CONFIG_BLOCK=y -CONFIG_LBDAF=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_THROTTLING=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -# CONFIG_INLINE_SPIN_TRYLOCK is not set -# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK is not set -# CONFIG_INLINE_SPIN_LOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK_IRQ is not set -# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set -CONFIG_INLINE_SPIN_UNLOCK=y -# CONFIG_INLINE_SPIN_UNLOCK_BH is not set -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_READ_TRYLOCK is not set -# CONFIG_INLINE_READ_LOCK is not set -# CONFIG_INLINE_READ_LOCK_BH is not set -# CONFIG_INLINE_READ_LOCK_IRQ is not set -# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set -CONFIG_INLINE_READ_UNLOCK=y -# CONFIG_INLINE_READ_UNLOCK_BH is not set -CONFIG_INLINE_READ_UNLOCK_IRQ=y -# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_WRITE_TRYLOCK is not set -# CONFIG_INLINE_WRITE_LOCK is not set -# CONFIG_INLINE_WRITE_LOCK_BH is not set -# CONFIG_INLINE_WRITE_LOCK_IRQ is not set -# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set -CONFIG_INLINE_WRITE_UNLOCK=y -# CONFIG_INLINE_WRITE_UNLOCK_BH is not set -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_FREEZER=y - -# -# Processor type and features -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -CONFIG_SMP=y -CONFIG_X86_MPPARSE=y -CONFIG_X86_BIGSMP=y -CONFIG_X86_EXTENDED_PLATFORM=y -# CONFIG_X86_INTEL_MID is not set -# CONFIG_X86_RDC321X is not set -# CONFIG_X86_32_NON_STANDARD is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -# CONFIG_X86_32_IRIS is not set -CONFIG_SCHED_OMIT_FRAME_POINTER=y -CONFIG_PARAVIRT_GUEST=y -CONFIG_PARAVIRT_TIME_ACCOUNTING=y -# CONFIG_XEN is not set -# CONFIG_XEN_PRIVILEGED_GUEST is not set -CONFIG_KVM_CLOCK=y -CONFIG_KVM_GUEST=y -# CONFIG_LGUEST_GUEST is not set -CONFIG_PARAVIRT=y -# CONFIG_PARAVIRT_SPINLOCKS is not set -CONFIG_PARAVIRT_CLOCK=y -# CONFIG_PARAVIRT_DEBUG is not set -CONFIG_NO_BOOTMEM=y -# CONFIG_MEMTEST is not set -# CONFIG_M386 is not set -# CONFIG_M486 is not set -# CONFIG_M586 is not set -# CONFIG_M586TSC is not set -# CONFIG_M586MMX is not set -CONFIG_M686=y -# CONFIG_MPENTIUMII is not set -# CONFIG_MPENTIUMIII is not set -# CONFIG_MPENTIUMM is not set -# CONFIG_MPENTIUM4 is not set -# CONFIG_MK6 is not set -# CONFIG_MK7 is not set -# CONFIG_MK8 is not set -# CONFIG_MCRUSOE is not set -# CONFIG_MEFFICEON is not set -# CONFIG_MWINCHIPC6 is not set -# CONFIG_MWINCHIP3D is not set -# CONFIG_MELAN is not set -# CONFIG_MGEODEGX1 is not set -# CONFIG_MGEODE_LX is not set -# CONFIG_MCYRIXIII is not set -# CONFIG_MVIAC3_2 is not set -# CONFIG_MVIAC7 is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_X86_GENERIC=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=7 -CONFIG_X86_CMPXCHG=y -CONFIG_CMPXCHG_LOCAL=y -CONFIG_CMPXCHG_DOUBLE=y -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_XADD=y -CONFIG_X86_PPRO_FENCE=y -CONFIG_X86_WP_WORKS_OK=y -CONFIG_X86_INVLPG=y -CONFIG_X86_BSWAP=y -CONFIG_X86_POPAD_OK=y -CONFIG_X86_ALIGNMENT_16=y -CONFIG_X86_INTEL_USERCOPY=y -CONFIG_X86_USE_PPRO_CHECKSUM=y -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=5 -CONFIG_X86_DEBUGCTLMSR=y -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_CYRIX_32=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_CPU_SUP_TRANSMETA_32=y -CONFIG_CPU_SUP_UMC_32=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -# CONFIG_IOMMU_HELPER is not set -CONFIG_NR_CPUS=256 -CONFIG_SCHED_SMT=y -CONFIG_SCHED_MC=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -# CONFIG_X86_ANCIENT_MCE is not set -CONFIG_X86_MCE_THRESHOLD=y -# CONFIG_X86_MCE_INJECT is not set -CONFIG_X86_THERMAL_VECTOR=y -CONFIG_VM86=y -CONFIG_TOSHIBA=m -CONFIG_I8K=m -# CONFIG_X86_REBOOTFIXUPS is not set -CONFIG_MICROCODE=m -CONFIG_MICROCODE_INTEL=y -CONFIG_MICROCODE_AMD=y -CONFIG_MICROCODE_OLD_INTERFACE=y -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -# CONFIG_NOHIGHMEM is not set -# CONFIG_HIGHMEM4G is not set -CONFIG_HIGHMEM64G=y -CONFIG_PAGE_OFFSET=0xC0000000 -CONFIG_HIGHMEM=y -CONFIG_X86_PAE=y -CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_NUMA=y -# CONFIG_NUMA_EMU is not set -CONFIG_NODES_SHIFT=3 -CONFIG_HAVE_ARCH_BOOTMEM=y -CONFIG_HAVE_ARCH_ALLOC_REMAP=y -CONFIG_ARCH_HAVE_MEMORY_PRESENT=y -CONFIG_NEED_NODE_MEMMAP_SIZE=y -CONFIG_ARCH_DISCONTIGMEM_ENABLE=y -CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ILLEGAL_POINTER_VALUE=0 -CONFIG_SELECT_MEMORY_MODEL=y -# CONFIG_FLATMEM_MANUAL is not set -CONFIG_DISCONTIGMEM_MANUAL=y -# CONFIG_SPARSEMEM_MANUAL is not set -CONFIG_DISCONTIGMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y -CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_STATIC=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -# CONFIG_HWPOISON_INJECT is not set -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_CLEANCACHE=y -CONFIG_HIGHPTE=y -# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set -CONFIG_X86_RESERVE_LOW=64 -# CONFIG_MATH_EMULATION is not set -CONFIG_MTRR=y -CONFIG_MTRR_SANITIZER=y -CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 -CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_EFI=y -CONFIG_SECCOMP=y -# CONFIG_HZ_100 is not set -# CONFIG_HZ_250 is not set -CONFIG_HZ_300=y -# CONFIG_HZ_1000 is not set -CONFIG_HZ=300 -CONFIG_SCHED_HRTICK=y -CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y -# CONFIG_KEXEC_JUMP is not set -CONFIG_PHYSICAL_START=0x400000 -CONFIG_RELOCATABLE=y -CONFIG_X86_NEED_RELOCS=y -CONFIG_PHYSICAL_ALIGN=0x400000 -CONFIG_HOTPLUG_CPU=y -# CONFIG_CMDLINE_BOOL is not set -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y - -# -# Power management and ACPI options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_HIBERNATE_CALLBACKS=y -CONFIG_HIBERNATION=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -CONFIG_PM_RUNTIME=y -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_ACPI=y -CONFIG_ACPI_SLEEP=y -CONFIG_ACPI_PROCFS=y -CONFIG_ACPI_PROCFS_POWER=y -CONFIG_ACPI_EC_DEBUGFS=m -# CONFIG_ACPI_PROC_EVENT is not set -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=m -CONFIG_ACPI_FAN=y -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=m -CONFIG_ACPI_THERMAL=y -# CONFIG_ACPI_NUMA is not set -# CONFIG_ACPI_CUSTOM_DSDT is not set -CONFIG_ACPI_BLACKLIST_YEAR=1999 -# CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_PCI_SLOT=y -CONFIG_X86_PM_TIMER=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_SBS=m -CONFIG_ACPI_HED=y -CONFIG_ACPI_CUSTOM_METHOD=m -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_APEI_MEMORY_FAILURE=y -# CONFIG_ACPI_APEI_EINJ is not set -# CONFIG_ACPI_APEI_ERST_DEBUG is not set -CONFIG_SFI=y -CONFIG_X86_APM_BOOT=y -CONFIG_APM=y -# CONFIG_APM_IGNORE_USER_SUSPEND is not set -# CONFIG_APM_DO_ENABLE is not set -CONFIG_APM_CPU_IDLE=y -# CONFIG_APM_DISPLAY_BLANK is not set -# CONFIG_APM_ALLOW_INTS is not set - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_TABLE=y -CONFIG_CPU_FREQ_STAT=m -CONFIG_CPU_FREQ_STAT_DETAILS=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=m -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=m -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m - -# -# x86 CPU frequency scaling drivers -# -CONFIG_X86_PCC_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ=m -# CONFIG_X86_POWERNOW_K6 is not set -CONFIG_X86_POWERNOW_K7=y -CONFIG_X86_POWERNOW_K7_ACPI=y -CONFIG_X86_POWERNOW_K8=m -# CONFIG_X86_GX_SUSPMOD is not set -# CONFIG_X86_SPEEDSTEP_CENTRINO is not set -CONFIG_X86_SPEEDSTEP_ICH=y -CONFIG_X86_SPEEDSTEP_SMI=y -CONFIG_X86_P4_CLOCKMOD=m -# CONFIG_X86_CPUFREQ_NFORCE2 is not set -CONFIG_X86_LONGRUN=y -# CONFIG_X86_LONGHAUL is not set -# CONFIG_X86_E_POWERSAVER is not set - -# -# shared options -# -CONFIG_X86_SPEEDSTEP_LIB=y -# CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -CONFIG_INTEL_IDLE=y - -# -# Bus options (PCI etc.) -# -CONFIG_PCI=y -# CONFIG_PCI_GOBIOS is not set -# CONFIG_PCI_GOMMCONFIG is not set -# CONFIG_PCI_GODIRECT is not set -CONFIG_PCI_GOANY=y -CONFIG_PCI_BIOS=y -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_DOMAINS=y -# CONFIG_PCI_CNB20LE_QUIRK is not set -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIE_ECRC=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIE_PME=y -CONFIG_ARCH_SUPPORTS_MSI=y -CONFIG_PCI_MSI=y -# CONFIG_PCI_DEBUG is not set -CONFIG_PCI_STUB=y -CONFIG_HT_IRQ=y -CONFIG_PCI_IOV=y -CONFIG_PCI_IOAPIC=y -CONFIG_PCI_LABEL=y -CONFIG_ISA_DMA_API=y -# CONFIG_ISA is not set -# CONFIG_MCA is not set -# CONFIG_SCx200 is not set -CONFIG_AMD_NB=y -CONFIG_PCCARD=m -# CONFIG_PCMCIA is not set -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_FAKE=m -CONFIG_HOTPLUG_PCI_COMPAQ=m -# CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM is not set -CONFIG_HOTPLUG_PCI_IBM=m -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -# CONFIG_HOTPLUG_PCI_CPCI is not set -# CONFIG_HOTPLUG_PCI_SHPC is not set -# CONFIG_RAPIDIO is not set - -# -# Executable file formats / Emulations -# -CONFIG_BINFMT_ELF=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_HAVE_AOUT=y -CONFIG_BINFMT_MISC=y -CONFIG_HAVE_ATOMIC_IOMAP=y -CONFIG_HAVE_TEXT_POKE_SMP=y -CONFIG_NET=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -# CONFIG_ARPD is not set -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_LRO=y -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=m -CONFIG_IPV6_PRIVACY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -# CONFIG_NETLABEL is not set -CONFIG_NETWORK_SECMARK=y -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -CONFIG_NETFILTER=y -# CONFIG_NETFILTER_DEBUG is not set -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CT_PROTO_DCCP=m -CONFIG_NF_CT_PROTO_GRE=m -CONFIG_NF_CT_PROTO_SCTP=m -CONFIG_NF_CT_PROTO_UDPLITE=m -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_TPROXY=m -CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -# CONFIG_NETFILTER_XT_MATCH_IPVS is not set -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_CONNTRACK_IPV4=m -# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set -# CONFIG_IP_NF_QUEUE is not set -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -# CONFIG_IP_NF_TARGET_ULOG is not set -CONFIG_NF_NAT=m -CONFIG_NF_NAT_NEEDED=y -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PROTO_DCCP=m -CONFIG_NF_NAT_PROTO_GRE=m -CONFIG_NF_NAT_PROTO_UDPLITE=m -CONFIG_NF_NAT_PROTO_SCTP=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_NF_NAT_SIP=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_CONNTRACK_IPV6=m -# CONFIG_IP6_NF_QUEUE is not set -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_TARGET_LOG=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -# CONFIG_BRIDGE_EBT_ULOG is not set -CONFIG_BRIDGE_EBT_NFLOG=m -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=m -# CONFIG_SCTP_DBG_MSG is not set -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_HMAC_NONE is not set -# CONFIG_SCTP_HMAC_SHA1 is not set -CONFIG_SCTP_HMAC_MD5=y -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -# CONFIG_ATM_CLIP_NO_ICMP is not set -# CONFIG_ATM_LANE is not set -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_NET_DSA=y -CONFIG_NET_DSA_TAG_DSA=y -CONFIG_NET_DSA_TAG_EDSA=y -CONFIG_NET_DSA_TAG_TRAILER=y -CONFIG_NET_DSA_MV88E6XXX=y -CONFIG_NET_DSA_MV88E6060=y -CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y -CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123_61_65=y -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -# CONFIG_DECNET is not set -CONFIG_LLC=m -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_WAN_ROUTER is not set -# CONFIG_PHONET is not set -# CONFIG_IEEE802154 is not set -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_ATM=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_INGRESS=m - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -CONFIG_NET_CLS_IND=y -CONFIG_NET_SCH_FIFO=y -# CONFIG_DCB is not set -CONFIG_DNS_RESOLVER=y -# CONFIG_BATMAN_ADV is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_NET_DROP_MONITOR is not set -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_IRDA is not set -CONFIG_BT=m -CONFIG_BT_L2CAP=y -CONFIG_BT_SCO=y -CONFIG_BT_RFCOMM=m -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=m -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_CMTP=m -CONFIG_BT_HIDP=m - -# -# Bluetooth device drivers -# -CONFIG_BT_HCIBTUSB=m -CONFIG_BT_HCIBTSDIO=m -CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_H4=y -CONFIG_BT_HCIUART_BCSP=y -CONFIG_BT_HCIUART_ATH3K=y -CONFIG_BT_HCIUART_LL=y -CONFIG_BT_HCIBCM203X=m -CONFIG_BT_HCIBPA10X=m -CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIVHCI=m -CONFIG_BT_MRVL=m -CONFIG_BT_MRVL_SDIO=m -CONFIG_BT_ATH3K=m -CONFIG_BT_WILINK=m -# CONFIG_AF_RXRPC is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_WIRELESS_EXT=y -CONFIG_WEXT_CORE=y -CONFIG_WEXT_PROC=y -CONFIG_WEXT_SPY=y -CONFIG_WEXT_PRIV=y -CONFIG_CFG80211=m -CONFIG_NL80211_TESTMODE=y -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -# CONFIG_CFG80211_REG_DEBUG is not set -CONFIG_CFG80211_DEFAULT_PS=y -CONFIG_CFG80211_DEBUGFS=y -# CONFIG_CFG80211_INTERNAL_REGDB is not set -CONFIG_CFG80211_WEXT=y -CONFIG_WIRELESS_EXT_SYSFS=y -CONFIG_LIB80211=m -CONFIG_LIB80211_CRYPT_WEP=m -CONFIG_LIB80211_CRYPT_CCMP=m -CONFIG_LIB80211_CRYPT_TKIP=m -# CONFIG_LIB80211_DEBUG is not set -CONFIG_MAC80211=m -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_MINSTREL_HT=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -CONFIG_MAC80211_MESH=y -CONFIG_MAC80211_LEDS=y -CONFIG_MAC80211_DEBUGFS=y -# CONFIG_MAC80211_DEBUG_MENU is not set -# CONFIG_WIMAX is not set -CONFIG_RFKILL=m -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -# CONFIG_CEPH_LIB is not set -# CONFIG_NFC is not set - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -# CONFIG_FIRMWARE_IN_KERNEL is not set -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_SYS_HYPERVISOR is not set -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -# CONFIG_MTD is not set -CONFIG_PARPORT=m -CONFIG_PARPORT_PC=m -CONFIG_PARPORT_SERIAL=m -# CONFIG_PARPORT_PC_FIFO is not set -# CONFIG_PARPORT_PC_SUPERIO is not set -# CONFIG_PARPORT_GSC is not set -# CONFIG_PARPORT_AX88796 is not set -CONFIG_PARPORT_1284=y -CONFIG_PARPORT_NOT_PC=y -CONFIG_PNP=y -# CONFIG_PNP_DEBUG_MESSAGES is not set - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_FD=m -# CONFIG_PARIDE is not set -CONFIG_BLK_CPQ_DA=m -CONFIG_BLK_CPQ_CISS_DA=m -# CONFIG_CISS_SCSI_TAPE is not set -CONFIG_BLK_DEV_DAC960=m -CONFIG_BLK_DEV_UMEM=m -# CONFIG_BLK_DEV_COW_COMMON is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -CONFIG_BLK_DEV_CRYPTOLOOP=m -# CONFIG_BLK_DEV_DRBD is not set -# CONFIG_BLK_DEV_NBD is not set -# CONFIG_BLK_DEV_OSD is not set -CONFIG_BLK_DEV_SX8=m -# CONFIG_BLK_DEV_UB is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -# CONFIG_BLK_DEV_XIP is not set -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_VIRTIO_BLK=m -# CONFIG_BLK_DEV_HD is not set -# CONFIG_BLK_DEV_RBD is not set -CONFIG_SENSORS_LIS3LV02D=m -CONFIG_MISC_DEVICES=y -# CONFIG_AD525X_DPOT is not set -# CONFIG_IBM_ASM is not set -# CONFIG_PHANTOM is not set -# CONFIG_INTEL_MID_PTI is not set -# CONFIG_SGI_IOC4 is not set -CONFIG_TIFM_CORE=m -CONFIG_TIFM_7XX1=m -CONFIG_ICS932S401=m -CONFIG_ENCLOSURE_SERVICES=m -# CONFIG_CS5535_MFGPT is not set -CONFIG_HP_ILO=m -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1780 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -CONFIG_DS1682=m -CONFIG_VMWARE_BALLOON=m -# CONFIG_BMP085 is not set -CONFIG_PCH_PHUB=m -CONFIG_USB_SWITCH_FSA9480=m -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -CONFIG_EEPROM_AT24=m -CONFIG_EEPROM_LEGACY=m -CONFIG_EEPROM_MAX6875=m -CONFIG_EEPROM_93CX6=m -CONFIG_CB710_CORE=m -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y -# CONFIG_IWMC3200TOP is not set - -# -# Texas Instruments shared transport line discipline -# -CONFIG_TI_ST=m -# CONFIG_SENSORS_LIS3_I2C is not set -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_TGT=m -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=m -# CONFIG_CHR_DEV_ST is not set -# CONFIG_CHR_DEV_OSST is not set -CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=m -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_MULTI_LUN=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_WAIT_SCAN=m - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_FC_TGT_ATTRS=y -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=m -CONFIG_SCSI_SRP_TGT_ATTRS=y -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -CONFIG_SCSI_CXGB3_ISCSI=m -CONFIG_SCSI_CXGB4_ISCSI=m -CONFIG_SCSI_BNX2_ISCSI=m -CONFIG_SCSI_BNX2X_FCOE=m -CONFIG_BE2ISCSI=m -CONFIG_BLK_DEV_3W_XXXX_RAID=m -CONFIG_SCSI_HPSA=m -CONFIG_SCSI_3W_9XXX=m -CONFIG_SCSI_3W_SAS=m -CONFIG_SCSI_ACARD=m -CONFIG_SCSI_AACRAID=m -CONFIG_SCSI_AIC7XXX=m -CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 -CONFIG_AIC7XXX_RESET_DELAY_MS=5000 -CONFIG_AIC7XXX_DEBUG_ENABLE=y -CONFIG_AIC7XXX_DEBUG_MASK=0 -CONFIG_AIC7XXX_REG_PRETTY_PRINT=y -# CONFIG_SCSI_AIC7XXX_OLD is not set -CONFIG_SCSI_AIC79XX=m -CONFIG_AIC79XX_CMDS_PER_DEVICE=32 -CONFIG_AIC79XX_RESET_DELAY_MS=4000 -# CONFIG_AIC79XX_DEBUG_ENABLE is not set -CONFIG_AIC79XX_DEBUG_MASK=0 -# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set -CONFIG_SCSI_AIC94XX=m -# CONFIG_AIC94XX_DEBUG is not set -CONFIG_SCSI_MVSAS=m -# CONFIG_SCSI_MVSAS_DEBUG is not set -CONFIG_SCSI_MVSAS_TASKLET=y -CONFIG_SCSI_DPT_I2O=m -CONFIG_SCSI_ADVANSYS=m -CONFIG_SCSI_ARCMSR=m -# CONFIG_SCSI_ARCMSR_AER is not set -CONFIG_MEGARAID_NEWGEN=y -CONFIG_MEGARAID_MM=m -CONFIG_MEGARAID_MAILBOX=m -CONFIG_MEGARAID_LEGACY=m -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT2SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -# CONFIG_SCSI_MPT2SAS_LOGGING is not set -CONFIG_SCSI_HPTIOP=m -CONFIG_SCSI_BUSLOGIC=m -CONFIG_SCSI_FLASHPOINT=y -CONFIG_VMWARE_PVSCSI=m -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -CONFIG_FCOE=m -CONFIG_FCOE_FNIC=m -CONFIG_SCSI_DMX3191D=m -CONFIG_SCSI_EATA=m -CONFIG_SCSI_EATA_TAGGED_QUEUE=y -# CONFIG_SCSI_EATA_LINKED_COMMANDS is not set -CONFIG_SCSI_EATA_MAX_TAGS=16 -CONFIG_SCSI_FUTURE_DOMAIN=m -CONFIG_SCSI_GDTH=m -CONFIG_SCSI_ISCI=m -CONFIG_SCSI_IPS=m -CONFIG_SCSI_INITIO=m -CONFIG_SCSI_INIA100=m -CONFIG_SCSI_PPA=m -CONFIG_SCSI_IMM=m -# CONFIG_SCSI_IZIP_EPP16 is not set -# CONFIG_SCSI_IZIP_SLOW_CTR is not set -CONFIG_SCSI_STEX=m -CONFIG_SCSI_SYM53C8XX_2=m -CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 -CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 -CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 -CONFIG_SCSI_SYM53C8XX_MMIO=y -CONFIG_SCSI_IPR=m -CONFIG_SCSI_IPR_TRACE=y -CONFIG_SCSI_IPR_DUMP=y -CONFIG_SCSI_QLOGIC_1280=m -CONFIG_SCSI_QLA_FC=m -CONFIG_SCSI_QLA_ISCSI=m -CONFIG_SCSI_LPFC=m -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -CONFIG_SCSI_DC395x=m -CONFIG_SCSI_DC390T=m -CONFIG_SCSI_NSP32=m -# CONFIG_SCSI_DEBUG is not set -CONFIG_SCSI_PMCRAID=m -CONFIG_SCSI_PM8001=m -# CONFIG_SCSI_SRP is not set -CONFIG_SCSI_BFA_FC=m -CONFIG_SCSI_DH=m -CONFIG_SCSI_DH_RDAC=m -CONFIG_SCSI_DH_HP_SW=m -CONFIG_SCSI_DH_EMC=m -CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -CONFIG_SCSI_OSD_DPRINT_SENSE=1 -# CONFIG_SCSI_OSD_DEBUG is not set -CONFIG_ATA=y -# CONFIG_ATA_NONSTANDARD is not set -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_AHCI_PLATFORM=m -CONFIG_SATA_INIC162X=m -CONFIG_SATA_ACARD_AHCI=m -CONFIG_SATA_SIL24=m -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -CONFIG_PDC_ADMA=m -CONFIG_SATA_QSTOR=m -CONFIG_SATA_SX4=m -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=y -CONFIG_SATA_MV=m -CONFIG_SATA_NV=m -CONFIG_SATA_PROMISE=m -CONFIG_SATA_SIL=m -CONFIG_SATA_SIS=m -CONFIG_SATA_SVW=m -CONFIG_SATA_ULI=m -CONFIG_SATA_VIA=m -CONFIG_SATA_VITESSE=m - -# -# PATA SFF controllers with BMDMA -# -CONFIG_PATA_ALI=m -CONFIG_PATA_AMD=m -CONFIG_PATA_ARASAN_CF=m -CONFIG_PATA_ARTOP=m -CONFIG_PATA_ATIIXP=m -CONFIG_PATA_ATP867X=m -CONFIG_PATA_CMD64X=m -CONFIG_PATA_CS5520=m -CONFIG_PATA_CS5530=m -CONFIG_PATA_CS5535=m -CONFIG_PATA_CS5536=m -CONFIG_PATA_CYPRESS=m -CONFIG_PATA_EFAR=m -CONFIG_PATA_HPT366=m -CONFIG_PATA_HPT37X=m -CONFIG_PATA_HPT3X2N=m -CONFIG_PATA_HPT3X3=m -# CONFIG_PATA_HPT3X3_DMA is not set -CONFIG_PATA_IT8213=m -CONFIG_PATA_IT821X=m -CONFIG_PATA_JMICRON=m -CONFIG_PATA_MARVELL=m -CONFIG_PATA_NETCELL=m -CONFIG_PATA_NINJA32=m -CONFIG_PATA_NS87415=m -CONFIG_PATA_OLDPIIX=m -CONFIG_PATA_OPTIDMA=m -CONFIG_PATA_PDC2027X=m -CONFIG_PATA_PDC_OLD=m -# CONFIG_PATA_RADISYS is not set -CONFIG_PATA_RDC=m -# CONFIG_PATA_SC1200 is not set -CONFIG_PATA_SCH=m -CONFIG_PATA_SERVERWORKS=m -CONFIG_PATA_SIL680=m -CONFIG_PATA_SIS=m -CONFIG_PATA_TOSHIBA=m -CONFIG_PATA_TRIFLEX=m -CONFIG_PATA_VIA=m -CONFIG_PATA_WINBOND=m - -# -# PIO-only SFF controllers -# -CONFIG_PATA_CMD640_PCI=m -CONFIG_PATA_MPIIX=m -CONFIG_PATA_NS87410=m -CONFIG_PATA_OPTI=m -CONFIG_PATA_RZ1000=m - -# -# Generic fallback / legacy drivers -# -CONFIG_PATA_ACPI=m -CONFIG_ATA_GENERIC=m -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -# CONFIG_MULTICORE_RAID456 is not set -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_DEBUG is not set -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_MIRROR=y -CONFIG_DM_RAID=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_ZERO=y -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -# CONFIG_DM_DELAY is not set -CONFIG_DM_UEVENT=y -# CONFIG_DM_FLAKEY is not set -# CONFIG_TARGET_CORE is not set -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -CONFIG_FUSION_FC=m -CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=40 -CONFIG_FUSION_CTL=m -CONFIG_FUSION_LOGGING=y - -# -# IEEE 1394 (FireWire) support -# -CONFIG_FIREWIRE=m -CONFIG_FIREWIRE_OHCI=m -CONFIG_FIREWIRE_OHCI_DEBUG=y -CONFIG_FIREWIRE_SBP2=m -# CONFIG_FIREWIRE_NET is not set -# CONFIG_FIREWIRE_NOSY is not set -CONFIG_I2O=m -# CONFIG_I2O_LCT_NOTIFY_ON_CHANGES is not set -CONFIG_I2O_EXT_ADAPTEC=y -CONFIG_I2O_EXT_ADAPTEC_DMA64=y -CONFIG_I2O_CONFIG=m -CONFIG_I2O_CONFIG_OLD_IOCTL=y -CONFIG_I2O_BUS=m -CONFIG_I2O_BLOCK=m -CONFIG_I2O_SCSI=m -CONFIG_I2O_PROC=m -# CONFIG_MACINTOSH_DRIVERS is not set -CONFIG_NETDEVICES=y -CONFIG_IFB=m -CONFIG_DUMMY=m -CONFIG_BONDING=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -# CONFIG_EQUALIZER is not set -CONFIG_TUN=m -CONFIG_VETH=m -# CONFIG_NET_SB1000 is not set -# CONFIG_ARCNET is not set -CONFIG_MII=m -CONFIG_PHYLIB=y - -# -# MII PHY device drivers -# -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_BROADCOM_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_NATIONAL_PHY=m -CONFIG_STE10XP=m -CONFIG_LSI_ET1011C_PHY=m -CONFIG_MICREL_PHY=m -CONFIG_FIXED_PHY=y -CONFIG_MDIO_BITBANG=m -# CONFIG_MDIO_GPIO is not set -CONFIG_NET_ETHERNET=y -CONFIG_HAPPYMEAL=m -CONFIG_SUNGEM=m -CONFIG_CASSINI=m -CONFIG_NET_VENDOR_3COM=y -CONFIG_VORTEX=m -CONFIG_TYPHOON=m -CONFIG_ETHOC=m -CONFIG_DNET=m -CONFIG_NET_TULIP=y -CONFIG_DE2104X=m -CONFIG_DE2104X_DSL=0 -CONFIG_TULIP=m -# CONFIG_TULIP_MWI is not set -CONFIG_TULIP_MMIO=y -CONFIG_TULIP_NAPI=y -CONFIG_TULIP_NAPI_HW_MITIGATION=y -CONFIG_DE4X5=m -CONFIG_WINBOND_840=m -CONFIG_DM9102=m -CONFIG_ULI526X=m -CONFIG_PCMCIA_XIRCOM=m -CONFIG_HP100=m -# CONFIG_IBM_NEW_EMAC_ZMII is not set -# CONFIG_IBM_NEW_EMAC_RGMII is not set -# CONFIG_IBM_NEW_EMAC_TAH is not set -# CONFIG_IBM_NEW_EMAC_EMAC4 is not set -# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set -# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set -# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set -CONFIG_NET_PCI=y -CONFIG_PCNET32=m -CONFIG_AMD8111_ETH=m -CONFIG_ADAPTEC_STARFIRE=m -CONFIG_KSZ884X_PCI=m -CONFIG_B44=m -CONFIG_B44_PCI_AUTOSELECT=y -CONFIG_B44_PCICORE_AUTOSELECT=y -CONFIG_B44_PCI=y -CONFIG_FORCEDETH=m -CONFIG_E100=m -CONFIG_FEALNX=m -CONFIG_NATSEMI=m -CONFIG_NE2K_PCI=m -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R6040=m -CONFIG_SIS900=m -CONFIG_EPIC100=m -CONFIG_SMSC9420=m -CONFIG_SUNDANCE=m -# CONFIG_SUNDANCE_MMIO is not set -CONFIG_TLAN=m -# CONFIG_KS8842 is not set -# CONFIG_KS8851_MLL is not set -CONFIG_VIA_RHINE=m -CONFIG_VIA_RHINE_MMIO=y -CONFIG_SC92031=m -CONFIG_NET_POCKET=y -CONFIG_ATP=m -CONFIG_DE600=m -CONFIG_DE620=m -CONFIG_ATL2=m -CONFIG_NETDEV_1000=y -CONFIG_ACENIC=m -# CONFIG_ACENIC_OMIT_TIGON_I is not set -CONFIG_DL2K=m -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_IP1000=m -CONFIG_IGB=m -CONFIG_IGB_DCA=y -CONFIG_IGBVF=m -CONFIG_NS83820=m -CONFIG_HAMACHI=m -CONFIG_YELLOWFIN=m -CONFIG_R8169=m -CONFIG_SIS190=m -CONFIG_SKGE=m -# CONFIG_SKGE_DEBUG is not set -CONFIG_SKGE_GENESIS=y -CONFIG_SKY2=m -# CONFIG_SKY2_DEBUG is not set -CONFIG_VIA_VELOCITY=m -CONFIG_TIGON3=m -CONFIG_BNX2=m -CONFIG_CNIC=m -CONFIG_QLA3XXX=m -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_JME=m -CONFIG_STMMAC_ETH=m -# CONFIG_STMMAC_DA is not set -# CONFIG_STMMAC_DUAL_MAC is not set -CONFIG_PCH_GBE=m -CONFIG_NETDEV_10000=y -CONFIG_MDIO=m -CONFIG_CHELSIO_T1=m -CONFIG_CHELSIO_T1_1G=y -CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m -CONFIG_CHELSIO_T4VF=m -CONFIG_ENIC=m -CONFIG_IXGBE=m -CONFIG_IXGBE_DCA=y -CONFIG_IXGBEVF=m -CONFIG_IXGB=m -CONFIG_S2IO=m -CONFIG_VXGE=m -# CONFIG_VXGE_DEBUG_TRACE_ALL is not set -CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y -CONFIG_NETXEN_NIC=m -CONFIG_NIU=m -CONFIG_MLX4_EN=m -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -CONFIG_TEHUTI=m -CONFIG_BNX2X=m -CONFIG_QLCNIC=m -CONFIG_QLGE=m -CONFIG_BNA=m -CONFIG_SFC=m -CONFIG_BE2NET=m -# CONFIG_TR is not set -CONFIG_WLAN=y -CONFIG_LIBERTAS_THINFIRM=m -# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set -CONFIG_LIBERTAS_THINFIRM_USB=m -CONFIG_AIRO=m -CONFIG_ATMEL=m -CONFIG_PCI_ATMEL=m -CONFIG_AT76C50X_USB=m -# CONFIG_PRISM54 is not set -CONFIG_USB_ZD1201=m -CONFIG_USB_NET_RNDIS_WLAN=m -CONFIG_RTL8180=m -CONFIG_RTL8187=m -CONFIG_RTL8187_LEDS=y -CONFIG_ADM8211=m -CONFIG_MAC80211_HWSIM=m -CONFIG_MWL8K=m -CONFIG_ATH_COMMON=m -# CONFIG_ATH_DEBUG is not set -CONFIG_ATH5K=m -CONFIG_ATH5K_DEBUG=y -# CONFIG_ATH5K_TRACER is not set -CONFIG_ATH5K_PCI=y -CONFIG_ATH9K_HW=m -CONFIG_ATH9K_COMMON=m -CONFIG_ATH9K=m -CONFIG_ATH9K_PCI=y -CONFIG_ATH9K_AHB=y -CONFIG_ATH9K_DEBUGFS=y -CONFIG_ATH9K_RATE_CONTROL=y -CONFIG_ATH9K_HTC=m -# CONFIG_ATH9K_HTC_DEBUGFS is not set -CONFIG_CARL9170=m -CONFIG_CARL9170_LEDS=y -# CONFIG_CARL9170_DEBUGFS is not set -CONFIG_CARL9170_WPC=y -CONFIG_B43=m -CONFIG_B43_SSB=y -CONFIG_B43_PCI_AUTOSELECT=y -CONFIG_B43_PCICORE_AUTOSELECT=y -CONFIG_B43_SDIO=y -CONFIG_B43_PIO=y -CONFIG_B43_PHY_N=y -CONFIG_B43_PHY_LP=y -CONFIG_B43_LEDS=y -CONFIG_B43_HWRNG=y -# CONFIG_B43_DEBUG is not set -CONFIG_B43LEGACY=m -CONFIG_B43LEGACY_PCI_AUTOSELECT=y -CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y -CONFIG_B43LEGACY_LEDS=y -CONFIG_B43LEGACY_HWRNG=y -# CONFIG_B43LEGACY_DEBUG is not set -CONFIG_B43LEGACY_DMA=y -CONFIG_B43LEGACY_PIO=y -CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y -# CONFIG_B43LEGACY_DMA_MODE is not set -# CONFIG_B43LEGACY_PIO_MODE is not set -CONFIG_HOSTAP=m -CONFIG_HOSTAP_FIRMWARE=y -CONFIG_HOSTAP_FIRMWARE_NVRAM=y -CONFIG_HOSTAP_PLX=m -CONFIG_HOSTAP_PCI=m -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y -# CONFIG_IPW2100_DEBUG is not set -CONFIG_IPW2200=m -CONFIG_IPW2200_MONITOR=y -CONFIG_IPW2200_RADIOTAP=y -CONFIG_IPW2200_PROMISCUOUS=y -CONFIG_IPW2200_QOS=y -# CONFIG_IPW2200_DEBUG is not set -CONFIG_LIBIPW=m -# CONFIG_LIBIPW_DEBUG is not set -CONFIG_IWLAGN=m - -# -# Debugging Options -# -# CONFIG_IWLWIFI_DEBUG is not set -CONFIG_IWLWIFI_DEBUGFS=y -# CONFIG_IWLWIFI_DEVICE_TRACING is not set -CONFIG_IWLWIFI_DEVICE_SVTOOL=y -# CONFIG_IWL_P2P is not set -CONFIG_IWLWIFI_LEGACY=m - -# -# Debugging Options -# -# CONFIG_IWLWIFI_LEGACY_DEBUG is not set -CONFIG_IWLWIFI_LEGACY_DEBUGFS=y -# CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING is not set -CONFIG_IWL4965=m -CONFIG_IWL3945=m -# CONFIG_IWM is not set -CONFIG_LIBERTAS=m -CONFIG_LIBERTAS_USB=m -CONFIG_LIBERTAS_SDIO=m -# CONFIG_LIBERTAS_DEBUG is not set -CONFIG_LIBERTAS_MESH=y -CONFIG_HERMES=m -# CONFIG_HERMES_PRISM is not set -CONFIG_HERMES_CACHE_FW_ON_INIT=y -CONFIG_PLX_HERMES=m -CONFIG_TMD_HERMES=m -CONFIG_NORTEL_HERMES=m -CONFIG_ORINOCO_USB=m -CONFIG_P54_COMMON=m -CONFIG_P54_USB=m -CONFIG_P54_PCI=m -CONFIG_P54_LEDS=y -CONFIG_RT2X00=m -CONFIG_RT2400PCI=m -CONFIG_RT2500PCI=m -CONFIG_RT61PCI=m -CONFIG_RT2800PCI=m -CONFIG_RT2800PCI_RT33XX=y -CONFIG_RT2800PCI_RT35XX=y -CONFIG_RT2800PCI_RT53XX=y -CONFIG_RT2500USB=m -CONFIG_RT73USB=m -CONFIG_RT2800USB=m -CONFIG_RT2800USB_RT33XX=y -CONFIG_RT2800USB_RT35XX=y -CONFIG_RT2800USB_RT53XX=y -CONFIG_RT2800USB_UNKNOWN=y -CONFIG_RT2800_LIB=m -CONFIG_RT2X00_LIB_PCI=m -CONFIG_RT2X00_LIB_USB=m -CONFIG_RT2X00_LIB=m -CONFIG_RT2X00_LIB_FIRMWARE=y -CONFIG_RT2X00_LIB_CRYPTO=y -CONFIG_RT2X00_LIB_LEDS=y -CONFIG_RT2X00_LIB_DEBUGFS=y -# CONFIG_RT2X00_DEBUG is not set -CONFIG_RTL8192CE=m -CONFIG_RTL8192SE=m -CONFIG_RTL8192DE=m -CONFIG_RTL8192CU=m -CONFIG_RTLWIFI=m -CONFIG_RTL8192C_COMMON=m -CONFIG_WL1251=m -CONFIG_WL1251_SDIO=m -CONFIG_WL12XX_MENU=m -CONFIG_WL12XX=m -# CONFIG_WL12XX_HT is not set -CONFIG_WL12XX_SDIO=m -# CONFIG_WL12XX_SDIO_TEST is not set -CONFIG_WL12XX_PLATFORM_DATA=y -CONFIG_ZD1211RW=m -# CONFIG_ZD1211RW_DEBUG is not set -CONFIG_MWIFIEX=m -CONFIG_MWIFIEX_SDIO=m - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# - -# -# USB Network Adapters -# -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_CDCETHER=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_DM9601=m -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_GL620A=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m -CONFIG_USB_NET_RNDIS_HOST=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_ALI_M5632=y -CONFIG_USB_AN2720=y -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -CONFIG_USB_EPSON2888=y -CONFIG_USB_KC2190=y -CONFIG_USB_NET_ZAURUS=m -CONFIG_USB_NET_CX82310_ETH=m -CONFIG_USB_NET_KALMIA=m -CONFIG_USB_HSO=m -CONFIG_USB_NET_INT51X1=m -CONFIG_USB_IPHETH=m -CONFIG_USB_SIERRA_NET=m -CONFIG_USB_VL600=m -# CONFIG_WAN is not set -CONFIG_ATM_DRIVERS=y -# CONFIG_ATM_DUMMY is not set -CONFIG_ATM_TCP=m -CONFIG_ATM_LANAI=m -CONFIG_ATM_ENI=m -# CONFIG_ATM_ENI_DEBUG is not set -# CONFIG_ATM_ENI_TUNE_BURST is not set -CONFIG_ATM_FIRESTREAM=m -# CONFIG_ATM_ZATM is not set -CONFIG_ATM_NICSTAR=m -# CONFIG_ATM_NICSTAR_USE_SUNI is not set -# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set -CONFIG_ATM_IDT77252=m -# CONFIG_ATM_IDT77252_DEBUG is not set -# CONFIG_ATM_IDT77252_RCV_ALL is not set -CONFIG_ATM_IDT77252_USE_SUNI=y -# CONFIG_ATM_AMBASSADOR is not set -# CONFIG_ATM_HORIZON is not set -# CONFIG_ATM_IA is not set -# CONFIG_ATM_FORE200E is not set -CONFIG_ATM_HE=m -# CONFIG_ATM_HE_USE_SUNI is not set -CONFIG_ATM_SOLOS=m - -# -# CAIF transport drivers -# -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_PLIP is not set -CONFIG_PPP=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOATM=m -CONFIG_PPPOL2TP=m -# CONFIG_SLIP is not set -CONFIG_SLHC=m -# CONFIG_NET_FC is not set -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL=y -CONFIG_NETPOLL_TRAP=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_VIRTIO_NET=m -CONFIG_VMXNET3=m -CONFIG_ISDN=y -CONFIG_ISDN_I4L=m -CONFIG_ISDN_PPP=y -CONFIG_ISDN_PPP_VJ=y -CONFIG_ISDN_MPP=y -CONFIG_IPPP_FILTER=y -CONFIG_ISDN_PPP_BSDCOMP=m -CONFIG_ISDN_AUDIO=y -CONFIG_ISDN_TTY_FAX=y - -# -# ISDN feature submodules -# -CONFIG_ISDN_DIVERSION=m - -# -# ISDN4Linux hardware drivers -# - -# -# Passive cards -# -CONFIG_ISDN_DRV_HISAX=m - -# -# D-channel protocol features -# -CONFIG_HISAX_EURO=y -CONFIG_DE_AOC=y -CONFIG_HISAX_NO_SENDCOMPLETE=y -CONFIG_HISAX_NO_LLC=y -CONFIG_HISAX_NO_KEYPAD=y -CONFIG_HISAX_1TR6=y -CONFIG_HISAX_NI1=y -CONFIG_HISAX_MAX_CARDS=8 - -# -# HiSax supported cards -# -CONFIG_HISAX_16_3=y -CONFIG_HISAX_TELESPCI=y -CONFIG_HISAX_S0BOX=y -CONFIG_HISAX_FRITZPCI=y -CONFIG_HISAX_AVM_A1_PCMCIA=y -CONFIG_HISAX_ELSA=y -CONFIG_HISAX_DIEHLDIVA=y -CONFIG_HISAX_SEDLBAUER=y -CONFIG_HISAX_NETJET=y -CONFIG_HISAX_NETJET_U=y -CONFIG_HISAX_NICCY=y -CONFIG_HISAX_BKM_A4T=y -CONFIG_HISAX_SCT_QUADRO=y -CONFIG_HISAX_GAZEL=y -CONFIG_HISAX_HFC_PCI=y -CONFIG_HISAX_W6692=y -CONFIG_HISAX_HFC_SX=y -CONFIG_HISAX_ENTERNOW_PCI=y -# CONFIG_HISAX_DEBUG is not set - -# -# HiSax PCMCIA card service modules -# - -# -# HiSax sub driver modules -# -CONFIG_HISAX_ST5481=m -CONFIG_HISAX_HFCUSB=m -CONFIG_HISAX_HFC4S8S=m -CONFIG_HISAX_FRITZ_PCIPNP=m - -# -# Active cards -# -CONFIG_ISDN_CAPI=m -CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y -CONFIG_CAPI_TRACE=y -CONFIG_ISDN_CAPI_MIDDLEWARE=y -CONFIG_ISDN_CAPI_CAPI20=m -CONFIG_ISDN_CAPI_CAPIDRV=m - -# -# CAPI hardware drivers -# -CONFIG_CAPI_AVM=y -CONFIG_ISDN_DRV_AVMB1_B1PCI=m -CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y -CONFIG_ISDN_DRV_AVMB1_T1PCI=m -CONFIG_ISDN_DRV_AVMB1_C4=m -CONFIG_CAPI_EICON=y -CONFIG_ISDN_DIVAS=m -CONFIG_ISDN_DIVAS_BRIPCI=y -CONFIG_ISDN_DIVAS_PRIPCI=y -CONFIG_ISDN_DIVAS_DIVACAPI=m -CONFIG_ISDN_DIVAS_USERIDI=m -CONFIG_ISDN_DIVAS_MAINT=m -CONFIG_ISDN_DRV_GIGASET=m -CONFIG_GIGASET_CAPI=y -# CONFIG_GIGASET_I4L is not set -# CONFIG_GIGASET_DUMMYLL is not set -CONFIG_GIGASET_BASE=m -CONFIG_GIGASET_M105=m -CONFIG_GIGASET_M101=m -# CONFIG_GIGASET_DEBUG is not set -CONFIG_HYSDN=m -CONFIG_HYSDN_CAPI=y -# CONFIG_MISDN is not set -CONFIG_ISDN_HDLC=m -# CONFIG_PHONE is not set - -# -# Input device support -# -CONFIG_INPUT=y -# CONFIG_INPUT_FF_MEMLESS is not set -CONFIG_INPUT_POLLDEV=m -CONFIG_INPUT_SPARSEKMAP=m - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_LKKBD is not set -CONFIG_KEYBOARD_GPIO=m -CONFIG_KEYBOARD_GPIO_POLLED=m -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8323 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_XTKBD is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=m -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_LIFEBOOK=y -CONFIG_MOUSE_PS2_TRACKPOINT=y -CONFIG_MOUSE_PS2_ELANTECH=y -CONFIG_MOUSE_PS2_SENTELIC=y -CONFIG_MOUSE_PS2_TOUCHKIT=y -CONFIG_MOUSE_SERIAL=m -CONFIG_MOUSE_APPLETOUCH=m -CONFIG_MOUSE_BCM5974=m -CONFIG_MOUSE_VSXXXAA=m -# CONFIG_MOUSE_GPIO is not set -CONFIG_MOUSE_SYNAPTICS_I2C=m -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -CONFIG_INPUT_PCSPKR=m -# CONFIG_INPUT_MMA8450 is not set -# CONFIG_INPUT_MPU3050 is not set -CONFIG_INPUT_APANEL=m -CONFIG_INPUT_WISTRON_BTNS=m -CONFIG_INPUT_ATLAS_BTNS=m -CONFIG_INPUT_ATI_REMOTE=m -CONFIG_INPUT_ATI_REMOTE2=m -CONFIG_INPUT_KEYSPAN_REMOTE=m -# CONFIG_INPUT_KXTJ9 is not set -CONFIG_INPUT_POWERMATE=m -CONFIG_INPUT_YEALINK=m -CONFIG_INPUT_CM109=m -CONFIG_INPUT_UINPUT=m -# CONFIG_INPUT_PCF8574 is not set -CONFIG_INPUT_GPIO_ROTARY_ENCODER=m -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_CMA3000 is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_SERIO_I8042=y -CONFIG_SERIO_SERPORT=m -# CONFIG_SERIO_CT82C710 is not set -# CONFIG_SERIO_PARKBD is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=m -CONFIG_SERIO_ALTERA_PS2=m -# CONFIG_SERIO_PS2MULT is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_NONSTANDARD=y -CONFIG_ROCKETPORT=m -CONFIG_CYCLADES=m -# CONFIG_CYZ_INTR is not set -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -CONFIG_SYNCLINK=m -CONFIG_SYNCLINKMP=m -CONFIG_SYNCLINK_GT=m -CONFIG_NOZOMI=m -# CONFIG_ISI is not set -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -# CONFIG_TRACE_SINK is not set -# CONFIG_STALDRV is not set - -# -# Serial drivers -# -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_PNP=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_MFD_HSU is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_SERIAL_JSM=m -# CONFIG_SERIAL_TIMBERDALE is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_PCH_UART is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set -CONFIG_PRINTER=m -CONFIG_LP_CONSOLE=y -CONFIG_PPDEV=m -CONFIG_HVC_DRIVER=y -CONFIG_VIRTIO_CONSOLE=m -CONFIG_IPMI_HANDLER=m -# CONFIG_IPMI_PANIC_EVENT is not set -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -CONFIG_HW_RANDOM_INTEL=m -CONFIG_HW_RANDOM_AMD=m -CONFIG_HW_RANDOM_GEODE=m -CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_NVRAM=y -CONFIG_R3964=m -# CONFIG_APPLICOM is not set -CONFIG_SONYPI=m -CONFIG_MWAVE=m -CONFIG_PC8736x_GPIO=m -CONFIG_NSC_GPIO=m -CONFIG_RAW_DRIVER=y -CONFIG_MAX_RAW_DEVS=8192 -CONFIG_HPET=y -# CONFIG_HPET_MMAP is not set -CONFIG_HANGCHECK_TIMER=m -# CONFIG_TCG_TPM is not set -# CONFIG_TELCLOCK is not set -# CONFIG_RAMOOPS is not set -CONFIG_I2C=m -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=m -# CONFIG_I2C_MUX is not set -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_SMBUS=m -CONFIG_I2C_ALGOBIT=m -CONFIG_I2C_ALGOPCA=m - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -CONFIG_I2C_ALI1535=m -CONFIG_I2C_ALI1563=m -CONFIG_I2C_ALI15X3=m -CONFIG_I2C_AMD756=m -CONFIG_I2C_AMD756_S4882=m -CONFIG_I2C_AMD8111=m -CONFIG_I2C_I801=m -CONFIG_I2C_ISCH=m -CONFIG_I2C_PIIX4=m -CONFIG_I2C_NFORCE2=m -CONFIG_I2C_NFORCE2_S4985=m -CONFIG_I2C_SIS5595=m -CONFIG_I2C_SIS630=m -CONFIG_I2C_SIS96X=m -CONFIG_I2C_VIA=m -CONFIG_I2C_VIAPRO=m - -# -# ACPI drivers -# -CONFIG_I2C_SCMI=m - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_GPIO is not set -# CONFIG_I2C_INTEL_MID is not set -# CONFIG_I2C_OCORES is not set -CONFIG_I2C_PCA_PLATFORM=m -# CONFIG_I2C_PXA_PCI is not set -CONFIG_I2C_SIMTEC=m -# CONFIG_I2C_XILINX is not set -# CONFIG_I2C_EG20T is not set - -# -# External I2C/SMBus adapter drivers -# -CONFIG_I2C_DIOLAN_U2C=m -CONFIG_I2C_PARPORT=m -CONFIG_I2C_PARPORT_LIGHT=m -# CONFIG_I2C_TAOS_EVM is not set -CONFIG_I2C_TINY_USB=m - -# -# Other I2C/SMBus bus drivers -# -CONFIG_I2C_STUB=m -CONFIG_SCx200_ACB=m -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# CONFIG_SPI is not set - -# -# PPS support -# -# CONFIG_PPS is not set - -# -# PPS generators support -# - -# -# PTP clock support -# - -# -# Enable Device Drivers -> PPS to see the PTP clock options. -# -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_GPIOLIB=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y - -# -# Memory mapped GPIO drivers: -# -# CONFIG_GPIO_GENERIC_PLATFORM is not set -# CONFIG_GPIO_IT8761E is not set -CONFIG_GPIO_SCH=m -# CONFIG_GPIO_VX855 is not set - -# -# I2C GPIO expanders: -# -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_ADP5588 is not set - -# -# PCI GPIO expanders: -# -# CONFIG_GPIO_CS5535 is not set -# CONFIG_GPIO_LANGWELL is not set -# CONFIG_GPIO_PCH is not set -# CONFIG_GPIO_ML_IOH is not set -# CONFIG_GPIO_RDC321X is not set - -# -# SPI GPIO expanders: -# -# CONFIG_GPIO_MCP23S08 is not set - -# -# AC97 GPIO expanders: -# - -# -# MODULbus GPIO expanders: -# -CONFIG_W1=m -CONFIG_W1_CON=y - -# -# 1-wire Bus Masters -# -# CONFIG_W1_MASTER_MATROX is not set -CONFIG_W1_MASTER_DS2490=m -CONFIG_W1_MASTER_DS2482=m -CONFIG_W1_MASTER_DS1WM=m -# CONFIG_W1_MASTER_GPIO is not set - -# -# 1-wire Slaves -# -CONFIG_W1_SLAVE_THERM=m -CONFIG_W1_SLAVE_SMEM=m -CONFIG_W1_SLAVE_DS2408=m -CONFIG_W1_SLAVE_DS2423=m -CONFIG_W1_SLAVE_DS2431=m -CONFIG_W1_SLAVE_DS2433=m -CONFIG_W1_SLAVE_DS2433_CRC=y -CONFIG_W1_SLAVE_DS2760=m -CONFIG_W1_SLAVE_DS2780=m -CONFIG_W1_SLAVE_BQ27000=m -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_BATTERY_DS2760 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_BQ20Z75 is not set -# CONFIG_BATTERY_BQ27x00 is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_ISP1704 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_GPIO is not set -CONFIG_HWMON=y -CONFIG_HWMON_VID=m -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -CONFIG_SENSORS_ABITUGURU=m -CONFIG_SENSORS_ABITUGURU3=m -CONFIG_SENSORS_AD7414=m -CONFIG_SENSORS_AD7418=m -CONFIG_SENSORS_ADM1021=m -CONFIG_SENSORS_ADM1025=m -CONFIG_SENSORS_ADM1026=m -CONFIG_SENSORS_ADM1029=m -CONFIG_SENSORS_ADM1031=m -CONFIG_SENSORS_ADM9240=m -CONFIG_SENSORS_ADT7411=m -CONFIG_SENSORS_ADT7462=m -CONFIG_SENSORS_ADT7470=m -CONFIG_SENSORS_ADT7475=m -CONFIG_SENSORS_ASC7621=m -CONFIG_SENSORS_K8TEMP=m -CONFIG_SENSORS_K10TEMP=m -CONFIG_SENSORS_FAM15H_POWER=m -CONFIG_SENSORS_ASB100=m -CONFIG_SENSORS_ATXP1=m -CONFIG_SENSORS_DS620=m -CONFIG_SENSORS_DS1621=m -CONFIG_SENSORS_I5K_AMB=m -CONFIG_SENSORS_F71805F=m -CONFIG_SENSORS_F71882FG=m -CONFIG_SENSORS_F75375S=m -CONFIG_SENSORS_FSCHMD=m -CONFIG_SENSORS_G760A=m -CONFIG_SENSORS_GL518SM=m -CONFIG_SENSORS_GL520SM=m -# CONFIG_SENSORS_GPIO_FAN is not set -CONFIG_SENSORS_CORETEMP=m -CONFIG_SENSORS_IBMAEM=m -CONFIG_SENSORS_IBMPEX=m -CONFIG_SENSORS_IT87=m -# CONFIG_SENSORS_JC42 is not set -CONFIG_SENSORS_LINEAGE=m -CONFIG_SENSORS_LM63=m -CONFIG_SENSORS_LM73=m -CONFIG_SENSORS_LM75=m -CONFIG_SENSORS_LM77=m -CONFIG_SENSORS_LM78=m -CONFIG_SENSORS_LM80=m -CONFIG_SENSORS_LM83=m -CONFIG_SENSORS_LM85=m -CONFIG_SENSORS_LM87=m -CONFIG_SENSORS_LM90=m -CONFIG_SENSORS_LM92=m -CONFIG_SENSORS_LM93=m -CONFIG_SENSORS_LTC4151=m -CONFIG_SENSORS_LTC4215=m -CONFIG_SENSORS_LTC4245=m -CONFIG_SENSORS_LTC4261=m -CONFIG_SENSORS_LM95241=m -CONFIG_SENSORS_LM95245=m -CONFIG_SENSORS_MAX16065=m -CONFIG_SENSORS_MAX1619=m -CONFIG_SENSORS_MAX1668=m -CONFIG_SENSORS_MAX6639=m -CONFIG_SENSORS_MAX6642=m -CONFIG_SENSORS_MAX6650=m -CONFIG_SENSORS_NTC_THERMISTOR=m -CONFIG_SENSORS_PC87360=m -CONFIG_SENSORS_PC87427=m -CONFIG_SENSORS_PCF8591=m -CONFIG_PMBUS=m -CONFIG_SENSORS_PMBUS=m -CONFIG_SENSORS_ADM1275=m -CONFIG_SENSORS_LM25066=m -CONFIG_SENSORS_MAX16064=m -CONFIG_SENSORS_MAX34440=m -CONFIG_SENSORS_MAX8688=m -CONFIG_SENSORS_UCD9000=m -CONFIG_SENSORS_UCD9200=m -CONFIG_SENSORS_SHT15=m -CONFIG_SENSORS_SHT21=m -CONFIG_SENSORS_SIS5595=m -# CONFIG_SENSORS_SMM665 is not set -CONFIG_SENSORS_DME1737=m -CONFIG_SENSORS_EMC1403=m -# CONFIG_SENSORS_EMC2103 is not set -CONFIG_SENSORS_EMC6W201=m -CONFIG_SENSORS_SMSC47M1=m -CONFIG_SENSORS_SMSC47M192=m -CONFIG_SENSORS_SMSC47B397=m -CONFIG_SENSORS_SCH56XX_COMMON=m -CONFIG_SENSORS_SCH5627=m -CONFIG_SENSORS_SCH5636=m -CONFIG_SENSORS_ADS1015=m -CONFIG_SENSORS_ADS7828=m -CONFIG_SENSORS_AMC6821=m -CONFIG_SENSORS_THMC50=m -CONFIG_SENSORS_TMP102=m -CONFIG_SENSORS_TMP401=m -CONFIG_SENSORS_TMP421=m -CONFIG_SENSORS_VIA_CPUTEMP=m -CONFIG_SENSORS_VIA686A=m -CONFIG_SENSORS_VT1211=m -CONFIG_SENSORS_VT8231=m -CONFIG_SENSORS_W83781D=m -CONFIG_SENSORS_W83791D=m -CONFIG_SENSORS_W83792D=m -CONFIG_SENSORS_W83793=m -CONFIG_SENSORS_W83795=m -# CONFIG_SENSORS_W83795_FANCTRL is not set -CONFIG_SENSORS_W83L785TS=m -CONFIG_SENSORS_W83L786NG=m -CONFIG_SENSORS_W83627HF=m -CONFIG_SENSORS_W83627EHF=m -CONFIG_SENSORS_APPLESMC=m - -# -# ACPI drivers -# -CONFIG_SENSORS_ACPI_POWER=m -CONFIG_SENSORS_ATK0110=m -CONFIG_THERMAL=y -CONFIG_THERMAL_HWMON=y -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y -CONFIG_WATCHDOG_NOWAYOUT=y - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -# CONFIG_ACQUIRE_WDT is not set -# CONFIG_ADVANTECH_WDT is not set -CONFIG_ALIM1535_WDT=m -CONFIG_ALIM7101_WDT=m -CONFIG_F71808E_WDT=m -CONFIG_SP5100_TCO=m -# CONFIG_SC520_WDT is not set -CONFIG_SBC_FITPC2_WATCHDOG=m -# CONFIG_EUROTECH_WDT is not set -CONFIG_IB700_WDT=m -CONFIG_IBMASR=m -# CONFIG_WAFER_WDT is not set -CONFIG_I6300ESB_WDT=m -CONFIG_ITCO_WDT=m -# CONFIG_ITCO_VENDOR_SUPPORT is not set -CONFIG_IT8712F_WDT=m -CONFIG_IT87_WDT=m -CONFIG_HP_WATCHDOG=m -CONFIG_HPWDT_NMI_DECODING=y -# CONFIG_SC1200_WDT is not set -# CONFIG_PC87413_WDT is not set -CONFIG_NV_TCO=m -# CONFIG_60XX_WDT is not set -# CONFIG_SBC8360_WDT is not set -# CONFIG_SBC7240_WDT is not set -# CONFIG_CPU5_WDT is not set -CONFIG_SMSC_SCH311X_WDT=m -# CONFIG_SMSC37B787_WDT is not set -CONFIG_W83627HF_WDT=m -CONFIG_W83697HF_WDT=m -CONFIG_W83697UG_WDT=m -CONFIG_W83877F_WDT=m -CONFIG_W83977F_WDT=m -CONFIG_MACHZ_WDT=m -# CONFIG_SBC_EPX_C3_WATCHDOG is not set - -# -# PCI-based Watchdog Cards -# -CONFIG_PCIPCWATCHDOG=m -CONFIG_WDTPCI=m - -# -# USB-based Watchdog Cards -# -CONFIG_USBPCWATCHDOG=m -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -CONFIG_SSB=m -CONFIG_SSB_SPROM=y -CONFIG_SSB_BLOCKIO=y -CONFIG_SSB_PCIHOST_POSSIBLE=y -CONFIG_SSB_PCIHOST=y -CONFIG_SSB_B43_PCI_BRIDGE=y -CONFIG_SSB_SDIOHOST_POSSIBLE=y -CONFIG_SSB_SDIOHOST=y -# CONFIG_SSB_DEBUG is not set -CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y -CONFIG_SSB_DRIVER_PCICORE=y -CONFIG_BCMA_POSSIBLE=y - -# -# Broadcom specific AMBA -# -# CONFIG_BCMA is not set -CONFIG_MFD_SUPPORT=y -CONFIG_MFD_CORE=m -CONFIG_MFD_SM501=m -CONFIG_MFD_SM501_GPIO=y -# CONFIG_HTC_PASIC3 is not set -# CONFIG_UCB1400_CORE is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TMIO is not set -CONFIG_MFD_WM8400=m -# CONFIG_MFD_PCF50633 is not set -# CONFIG_ABX500_CORE is not set -CONFIG_MFD_CS5535=m -# CONFIG_MFD_TIMBERDALE is not set -CONFIG_LPC_SCH=m -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_JANZ_CMODIO is not set -CONFIG_MFD_VX855=m -CONFIG_MFD_WL1273_CORE=m -# CONFIG_REGULATOR is not set -CONFIG_MEDIA_SUPPORT=m - -# -# Multimedia core support -# -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_DEV=m -CONFIG_VIDEO_V4L2_COMMON=m -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_DVB_CORE=m -CONFIG_DVB_NET=y -CONFIG_VIDEO_MEDIA=m - -# -# Multimedia drivers -# -CONFIG_VIDEO_SAA7146=m -CONFIG_VIDEO_SAA7146_VV=m -CONFIG_RC_CORE=m -CONFIG_LIRC=m -CONFIG_RC_MAP=m -CONFIG_IR_NEC_DECODER=m -CONFIG_IR_RC5_DECODER=m -CONFIG_IR_RC6_DECODER=m -CONFIG_IR_JVC_DECODER=m -CONFIG_IR_SONY_DECODER=m -CONFIG_IR_RC5_SZ_DECODER=m -CONFIG_IR_MCE_KBD_DECODER=m -CONFIG_IR_LIRC_CODEC=m -CONFIG_IR_ENE=m -CONFIG_IR_IMON=m -CONFIG_IR_MCEUSB=m -CONFIG_IR_ITE_CIR=m -CONFIG_IR_FINTEK=m -CONFIG_IR_NUVOTON=m -CONFIG_IR_REDRAT3=m -CONFIG_IR_STREAMZAP=m -CONFIG_IR_WINBOND_CIR=m -CONFIG_RC_LOOPBACK=m -CONFIG_MEDIA_ATTACH=y -CONFIG_MEDIA_TUNER=m -CONFIG_MEDIA_TUNER_CUSTOMISE=y - -# -# Customize TV tuners -# -CONFIG_MEDIA_TUNER_SIMPLE=m -CONFIG_MEDIA_TUNER_TDA8290=m -CONFIG_MEDIA_TUNER_TDA827X=m -CONFIG_MEDIA_TUNER_TDA18271=m -CONFIG_MEDIA_TUNER_TDA9887=m -CONFIG_MEDIA_TUNER_TEA5761=m -CONFIG_MEDIA_TUNER_TEA5767=m -CONFIG_MEDIA_TUNER_MT20XX=m -CONFIG_MEDIA_TUNER_MT2060=m -CONFIG_MEDIA_TUNER_MT2266=m -CONFIG_MEDIA_TUNER_MT2131=m -CONFIG_MEDIA_TUNER_QT1010=m -CONFIG_MEDIA_TUNER_XC2028=m -CONFIG_MEDIA_TUNER_XC5000=m -CONFIG_MEDIA_TUNER_XC4000=m -CONFIG_MEDIA_TUNER_MXL5005S=m -CONFIG_MEDIA_TUNER_MXL5007T=m -CONFIG_MEDIA_TUNER_MC44S803=m -CONFIG_MEDIA_TUNER_MAX2165=m -CONFIG_MEDIA_TUNER_TDA18218=m -CONFIG_MEDIA_TUNER_TDA18212=m -CONFIG_VIDEO_V4L2=m -CONFIG_VIDEOBUF_GEN=m -CONFIG_VIDEOBUF_DMA_SG=m -CONFIG_VIDEOBUF_VMALLOC=m -CONFIG_VIDEOBUF_DMA_CONTIG=m -CONFIG_VIDEOBUF_DVB=m -CONFIG_VIDEO_BTCX=m -CONFIG_VIDEO_TVEEPROM=m -CONFIG_VIDEO_TUNER=m -CONFIG_VIDEOBUF2_CORE=m -CONFIG_VIDEOBUF2_MEMOPS=m -CONFIG_VIDEOBUF2_DMA_CONTIG=m -CONFIG_VIDEOBUF2_VMALLOC=m -CONFIG_VIDEO_CAPTURE_DRIVERS=y -# CONFIG_VIDEO_ADV_DEBUG is not set -# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set -CONFIG_VIDEO_HELPER_CHIPS_AUTO=y -CONFIG_VIDEO_IR_I2C=m - -# -# Audio decoders, processors and mixers -# -CONFIG_VIDEO_TVAUDIO=m -CONFIG_VIDEO_TDA7432=m -CONFIG_VIDEO_TDA9840=m -CONFIG_VIDEO_TEA6415C=m -CONFIG_VIDEO_TEA6420=m -CONFIG_VIDEO_MSP3400=m -CONFIG_VIDEO_CS5345=m -CONFIG_VIDEO_CS53L32A=m -CONFIG_VIDEO_WM8775=m -CONFIG_VIDEO_WM8739=m -CONFIG_VIDEO_VP27SMPX=m - -# -# RDS decoders -# -CONFIG_VIDEO_SAA6588=m - -# -# Video decoders -# -CONFIG_VIDEO_ADV7180=m -CONFIG_VIDEO_BT819=m -CONFIG_VIDEO_BT856=m -CONFIG_VIDEO_BT866=m -CONFIG_VIDEO_KS0127=m -CONFIG_VIDEO_SAA7110=m -CONFIG_VIDEO_SAA711X=m -CONFIG_VIDEO_TVP5150=m -CONFIG_VIDEO_VPX3220=m - -# -# Video and audio decoders -# -CONFIG_VIDEO_SAA717X=m -CONFIG_VIDEO_CX25840=m - -# -# MPEG video encoders -# -CONFIG_VIDEO_CX2341X=m - -# -# Video encoders -# -CONFIG_VIDEO_SAA7127=m -CONFIG_VIDEO_SAA7185=m -CONFIG_VIDEO_ADV7170=m -CONFIG_VIDEO_ADV7175=m - -# -# Camera sensor devices -# -CONFIG_VIDEO_OV7670=m -CONFIG_VIDEO_MT9V011=m - -# -# Flash devices -# - -# -# Video improvement chips -# -CONFIG_VIDEO_UPD64031A=m -CONFIG_VIDEO_UPD64083=m - -# -# Miscelaneous helper chips -# -CONFIG_VIDEO_M52790=m -# CONFIG_VIDEO_VIVI is not set -CONFIG_VIDEO_BT848=m -CONFIG_VIDEO_BT848_DVB=y -CONFIG_VIDEO_BWQCAM=m -CONFIG_VIDEO_CQCAM=m -CONFIG_VIDEO_W9966=m -CONFIG_VIDEO_CPIA2=m -CONFIG_VIDEO_ZORAN=m -CONFIG_VIDEO_ZORAN_DC30=m -CONFIG_VIDEO_ZORAN_ZR36060=m -CONFIG_VIDEO_ZORAN_BUZ=m -CONFIG_VIDEO_ZORAN_DC10=m -CONFIG_VIDEO_ZORAN_LML33=m -CONFIG_VIDEO_ZORAN_LML33R10=m -CONFIG_VIDEO_ZORAN_AVS6EYES=m -CONFIG_VIDEO_MEYE=m -CONFIG_VIDEO_SAA7134=m -CONFIG_VIDEO_SAA7134_ALSA=m -CONFIG_VIDEO_SAA7134_RC=y -CONFIG_VIDEO_SAA7134_DVB=m -CONFIG_VIDEO_MXB=m -CONFIG_VIDEO_HEXIUM_ORION=m -CONFIG_VIDEO_HEXIUM_GEMINI=m -CONFIG_VIDEO_TIMBERDALE=m -CONFIG_VIDEO_CX88=m -CONFIG_VIDEO_CX88_ALSA=m -CONFIG_VIDEO_CX88_BLACKBIRD=m -CONFIG_VIDEO_CX88_DVB=m -CONFIG_VIDEO_CX88_MPEG=m -CONFIG_VIDEO_CX88_VP3054=m -CONFIG_VIDEO_CX23885=m -# CONFIG_MEDIA_ALTERA_CI is not set -CONFIG_VIDEO_AU0828=m -CONFIG_VIDEO_IVTV=m -CONFIG_VIDEO_FB_IVTV=m -CONFIG_VIDEO_CX18=m -CONFIG_VIDEO_CX18_ALSA=m -CONFIG_VIDEO_SAA7164=m -CONFIG_VIDEO_CAFE_CCIC=m -CONFIG_VIDEO_SR030PC30=m -CONFIG_VIDEO_VIA_CAMERA=m -CONFIG_VIDEO_NOON010PC30=m -# CONFIG_VIDEO_M5MOLS is not set -CONFIG_SOC_CAMERA=m -CONFIG_SOC_CAMERA_IMX074=m -CONFIG_SOC_CAMERA_MT9M001=m -CONFIG_SOC_CAMERA_MT9M111=m -CONFIG_SOC_CAMERA_MT9T031=m -CONFIG_SOC_CAMERA_MT9T112=m -CONFIG_SOC_CAMERA_MT9V022=m -CONFIG_SOC_CAMERA_RJ54N1=m -CONFIG_SOC_CAMERA_TW9910=m -CONFIG_SOC_CAMERA_PLATFORM=m -CONFIG_SOC_CAMERA_OV2640=m -CONFIG_SOC_CAMERA_OV5642=m -CONFIG_SOC_CAMERA_OV6650=m -CONFIG_SOC_CAMERA_OV772X=m -CONFIG_SOC_CAMERA_OV9640=m -CONFIG_SOC_CAMERA_OV9740=m -CONFIG_V4L_USB_DRIVERS=y -CONFIG_USB_VIDEO_CLASS=m -CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y -CONFIG_USB_GSPCA=m -CONFIG_USB_M5602=m -CONFIG_USB_STV06XX=m -CONFIG_USB_GL860=m -CONFIG_USB_GSPCA_BENQ=m -CONFIG_USB_GSPCA_CONEX=m -CONFIG_USB_GSPCA_CPIA1=m -CONFIG_USB_GSPCA_ETOMS=m -CONFIG_USB_GSPCA_FINEPIX=m -CONFIG_USB_GSPCA_JEILINJ=m -CONFIG_USB_GSPCA_KINECT=m -CONFIG_USB_GSPCA_KONICA=m -CONFIG_USB_GSPCA_MARS=m -CONFIG_USB_GSPCA_MR97310A=m -CONFIG_USB_GSPCA_NW80X=m -CONFIG_USB_GSPCA_OV519=m -CONFIG_USB_GSPCA_OV534=m -CONFIG_USB_GSPCA_OV534_9=m -CONFIG_USB_GSPCA_PAC207=m -CONFIG_USB_GSPCA_PAC7302=m -CONFIG_USB_GSPCA_PAC7311=m -CONFIG_USB_GSPCA_SE401=m -CONFIG_USB_GSPCA_SN9C2028=m -CONFIG_USB_GSPCA_SN9C20X=m -CONFIG_USB_GSPCA_SONIXB=m -CONFIG_USB_GSPCA_SONIXJ=m -CONFIG_USB_GSPCA_SPCA500=m -CONFIG_USB_GSPCA_SPCA501=m -CONFIG_USB_GSPCA_SPCA505=m -CONFIG_USB_GSPCA_SPCA506=m -CONFIG_USB_GSPCA_SPCA508=m -CONFIG_USB_GSPCA_SPCA561=m -CONFIG_USB_GSPCA_SPCA1528=m -CONFIG_USB_GSPCA_SQ905=m -CONFIG_USB_GSPCA_SQ905C=m -CONFIG_USB_GSPCA_SQ930X=m -CONFIG_USB_GSPCA_STK014=m -CONFIG_USB_GSPCA_STV0680=m -CONFIG_USB_GSPCA_SUNPLUS=m -CONFIG_USB_GSPCA_T613=m -CONFIG_USB_GSPCA_TV8532=m -CONFIG_USB_GSPCA_VC032X=m -CONFIG_USB_GSPCA_VICAM=m -CONFIG_USB_GSPCA_XIRLINK_CIT=m -CONFIG_USB_GSPCA_ZC3XX=m -CONFIG_VIDEO_PVRUSB2=m -CONFIG_VIDEO_PVRUSB2_SYSFS=y -CONFIG_VIDEO_PVRUSB2_DVB=y -# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set -CONFIG_VIDEO_HDPVR=m -CONFIG_VIDEO_EM28XX=m -CONFIG_VIDEO_EM28XX_ALSA=m -CONFIG_VIDEO_EM28XX_DVB=m -CONFIG_VIDEO_EM28XX_RC=y -CONFIG_VIDEO_TLG2300=m -CONFIG_VIDEO_CX231XX=m -CONFIG_VIDEO_CX231XX_RC=y -CONFIG_VIDEO_CX231XX_ALSA=m -CONFIG_VIDEO_CX231XX_DVB=m -CONFIG_VIDEO_USBVISION=m -# CONFIG_USB_ET61X251 is not set -# CONFIG_USB_SN9C102 is not set -CONFIG_USB_PWC=m -# CONFIG_USB_PWC_DEBUG is not set -CONFIG_USB_PWC_INPUT_EVDEV=y -CONFIG_USB_ZR364XX=m -CONFIG_USB_STKWEBCAM=m -CONFIG_USB_S2255=m -CONFIG_V4L_MEM2MEM_DRIVERS=y -# CONFIG_VIDEO_MEM2MEM_TESTDEV is not set -CONFIG_RADIO_ADAPTERS=y -CONFIG_RADIO_MAXIRADIO=m -CONFIG_I2C_SI4713=m -CONFIG_RADIO_SI4713=m -CONFIG_USB_DSBR=m -CONFIG_RADIO_SI470X=y -CONFIG_USB_SI470X=m -CONFIG_I2C_SI470X=m -CONFIG_USB_MR800=m -# CONFIG_RADIO_TEA5764 is not set -# CONFIG_RADIO_SAA7706H is not set -# CONFIG_RADIO_TEF6862 is not set -CONFIG_RADIO_WL1273=m - -# -# Texas Instruments WL128x FM driver (ST based) -# -# CONFIG_RADIO_WL128X is not set -CONFIG_DVB_MAX_ADAPTERS=8 -CONFIG_DVB_DYNAMIC_MINORS=y -CONFIG_DVB_CAPTURE_DRIVERS=y - -# -# Supported SAA7146 based PCI Adapters -# -CONFIG_TTPCI_EEPROM=m -CONFIG_DVB_AV7110=m -CONFIG_DVB_AV7110_OSD=y -CONFIG_DVB_BUDGET_CORE=m -CONFIG_DVB_BUDGET=m -CONFIG_DVB_BUDGET_CI=m -CONFIG_DVB_BUDGET_AV=m -CONFIG_DVB_BUDGET_PATCH=m - -# -# Supported USB Adapters -# -CONFIG_DVB_USB=m -# CONFIG_DVB_USB_DEBUG is not set -CONFIG_DVB_USB_A800=m -CONFIG_DVB_USB_DIBUSB_MB=m -CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y -CONFIG_DVB_USB_DIBUSB_MC=m -CONFIG_DVB_USB_DIB0700=m -CONFIG_DVB_USB_UMT_010=m -CONFIG_DVB_USB_CXUSB=m -CONFIG_DVB_USB_M920X=m -CONFIG_DVB_USB_GL861=m -CONFIG_DVB_USB_AU6610=m -CONFIG_DVB_USB_DIGITV=m -CONFIG_DVB_USB_VP7045=m -CONFIG_DVB_USB_VP702X=m -CONFIG_DVB_USB_GP8PSK=m -CONFIG_DVB_USB_NOVA_T_USB2=m -CONFIG_DVB_USB_TTUSB2=m -CONFIG_DVB_USB_DTT200U=m -CONFIG_DVB_USB_OPERA1=m -CONFIG_DVB_USB_AF9005=m -CONFIG_DVB_USB_AF9005_REMOTE=m -CONFIG_DVB_USB_DW2102=m -CONFIG_DVB_USB_CINERGY_T2=m -CONFIG_DVB_USB_ANYSEE=m -CONFIG_DVB_USB_DTV5100=m -CONFIG_DVB_USB_AF9015=m -CONFIG_DVB_USB_CE6230=m -CONFIG_DVB_USB_FRIIO=m -CONFIG_DVB_USB_EC168=m -CONFIG_DVB_USB_AZ6027=m -CONFIG_DVB_USB_LME2510=m -CONFIG_DVB_USB_TECHNISAT_USB2=m -CONFIG_DVB_TTUSB_BUDGET=m -CONFIG_DVB_TTUSB_DEC=m -CONFIG_SMS_SIANO_MDTV=m - -# -# Siano module components -# -CONFIG_SMS_USB_DRV=m -# CONFIG_SMS_SDIO_DRV is not set - -# -# Supported FlexCopII (B2C2) Adapters -# -CONFIG_DVB_B2C2_FLEXCOP=m -CONFIG_DVB_B2C2_FLEXCOP_PCI=m -CONFIG_DVB_B2C2_FLEXCOP_USB=m -# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set - -# -# Supported BT878 Adapters -# -CONFIG_DVB_BT8XX=m - -# -# Supported Pluto2 Adapters -# -CONFIG_DVB_PLUTO2=m - -# -# Supported SDMC DM1105 Adapters -# -CONFIG_DVB_DM1105=m - -# -# Supported FireWire (IEEE 1394) Adapters -# -CONFIG_DVB_FIREDTV=m -CONFIG_DVB_FIREDTV_INPUT=y - -# -# Supported Earthsoft PT1 Adapters -# -CONFIG_DVB_PT1=m - -# -# Supported Mantis Adapters -# -CONFIG_MANTIS_CORE=m -CONFIG_DVB_MANTIS=m -CONFIG_DVB_HOPPER=m - -# -# Supported nGene Adapters -# -CONFIG_DVB_NGENE=m - -# -# Supported ddbridge ('Octopus') Adapters -# -# CONFIG_DVB_DDBRIDGE is not set - -# -# Supported DVB Frontends -# -CONFIG_DVB_FE_CUSTOMISE=y - -# -# Customise DVB Frontends -# - -# -# Multistandard (satellite) frontends -# -CONFIG_DVB_STB0899=m -CONFIG_DVB_STB6100=m -CONFIG_DVB_STV090x=m -CONFIG_DVB_STV6110x=m - -# -# Multistandard (cable + terrestrial) frontends -# -CONFIG_DVB_DRXK=m -CONFIG_DVB_TDA18271C2DD=m - -# -# DVB-S (satellite) frontends -# -CONFIG_DVB_CX24110=m -CONFIG_DVB_CX24123=m -CONFIG_DVB_MT312=m -CONFIG_DVB_ZL10036=m -CONFIG_DVB_ZL10039=m -CONFIG_DVB_S5H1420=m -CONFIG_DVB_STV0288=m -CONFIG_DVB_STB6000=m -CONFIG_DVB_STV0299=m -CONFIG_DVB_STV6110=m -CONFIG_DVB_STV0900=m -CONFIG_DVB_TDA8083=m -CONFIG_DVB_TDA10086=m -CONFIG_DVB_TDA8261=m -CONFIG_DVB_VES1X93=m -CONFIG_DVB_TUNER_ITD1000=m -CONFIG_DVB_TUNER_CX24113=m -CONFIG_DVB_TDA826X=m -CONFIG_DVB_TUA6100=m -CONFIG_DVB_CX24116=m -CONFIG_DVB_SI21XX=m -CONFIG_DVB_DS3000=m -CONFIG_DVB_MB86A16=m - -# -# DVB-T (terrestrial) frontends -# -CONFIG_DVB_SP8870=m -CONFIG_DVB_SP887X=m -CONFIG_DVB_CX22700=m -CONFIG_DVB_CX22702=m -CONFIG_DVB_S5H1432=m -CONFIG_DVB_DRXD=m -CONFIG_DVB_L64781=m -CONFIG_DVB_TDA1004X=m -CONFIG_DVB_NXT6000=m -CONFIG_DVB_MT352=m -CONFIG_DVB_ZL10353=m -CONFIG_DVB_DIB3000MB=m -CONFIG_DVB_DIB3000MC=m -CONFIG_DVB_DIB7000M=m -CONFIG_DVB_DIB7000P=m -CONFIG_DVB_DIB9000=m -CONFIG_DVB_TDA10048=m -CONFIG_DVB_AF9013=m -CONFIG_DVB_EC100=m -CONFIG_DVB_STV0367=m -CONFIG_DVB_CXD2820R=m - -# -# DVB-C (cable) frontends -# -CONFIG_DVB_VES1820=m -CONFIG_DVB_TDA10021=m -CONFIG_DVB_TDA10023=m -CONFIG_DVB_STV0297=m - -# -# ATSC (North American/Korean Terrestrial/Cable DTV) frontends -# -CONFIG_DVB_NXT200X=m -CONFIG_DVB_OR51211=m -CONFIG_DVB_OR51132=m -CONFIG_DVB_BCM3510=m -CONFIG_DVB_LGDT330X=m -CONFIG_DVB_LGDT3305=m -CONFIG_DVB_S5H1409=m -CONFIG_DVB_AU8522=m -CONFIG_DVB_S5H1411=m - -# -# ISDB-T (terrestrial) frontends -# -CONFIG_DVB_S921=m -CONFIG_DVB_DIB8000=m -CONFIG_DVB_MB86A20S=m - -# -# Digital terrestrial only tuners/PLL -# -CONFIG_DVB_PLL=m -CONFIG_DVB_TUNER_DIB0070=m -CONFIG_DVB_TUNER_DIB0090=m - -# -# SEC control devices for DVB-S -# -CONFIG_DVB_LNBP21=m -CONFIG_DVB_ISL6405=m -CONFIG_DVB_ISL6421=m -CONFIG_DVB_ISL6423=m -CONFIG_DVB_LGS8GL5=m -CONFIG_DVB_LGS8GXX=m -CONFIG_DVB_ATBM8830=m -CONFIG_DVB_TDA665x=m -CONFIG_DVB_IX2505V=m - -# -# Tools to develop new frontends -# -# CONFIG_DVB_DUMMY_FE is not set - -# -# Graphics support -# -CONFIG_AGP=y -CONFIG_AGP_ALI=y -CONFIG_AGP_ATI=y -CONFIG_AGP_AMD=y -CONFIG_AGP_AMD64=y -CONFIG_AGP_INTEL=y -CONFIG_AGP_NVIDIA=y -CONFIG_AGP_SIS=y -CONFIG_AGP_SWORKS=y -CONFIG_AGP_VIA=y -CONFIG_AGP_EFFICEON=y -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 -CONFIG_VGA_SWITCHEROO=y -CONFIG_DRM=m -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_TTM=m -CONFIG_DRM_TDFX=m -CONFIG_DRM_R128=m -CONFIG_DRM_RADEON=m -CONFIG_DRM_RADEON_KMS=y -CONFIG_DRM_I810=m -CONFIG_DRM_I915=m -CONFIG_DRM_I915_KMS=y -CONFIG_DRM_MGA=m -CONFIG_DRM_SIS=m -CONFIG_DRM_VIA=m -CONFIG_DRM_SAVAGE=m -CONFIG_STUB_POULSBO=m -CONFIG_VGASTATE=m -CONFIG_VIDEO_OUTPUT_CONTROL=m -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_DDC=m -CONFIG_FB_BOOT_VESA_SUPPORT=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -CONFIG_FB_SYS_FILLRECT=m -CONFIG_FB_SYS_COPYAREA=m -CONFIG_FB_SYS_IMAGEBLIT=m -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=m -# CONFIG_FB_WMT_GE_ROPS is not set -CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_SVGALIB=m -# CONFIG_FB_MACMODES is not set -CONFIG_FB_BACKLIGHT=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y - -# -# Frame buffer hardware drivers -# -CONFIG_FB_CIRRUS=m -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -CONFIG_FB_VGA16=m -# CONFIG_FB_UVESA is not set -CONFIG_FB_VESA=y -CONFIG_FB_EFI=y -# CONFIG_FB_N411 is not set -# CONFIG_FB_HGA is not set -# CONFIG_FB_S1D13XXX is not set -CONFIG_FB_NVIDIA=m -CONFIG_FB_NVIDIA_I2C=y -# CONFIG_FB_NVIDIA_DEBUG is not set -CONFIG_FB_NVIDIA_BACKLIGHT=y -CONFIG_FB_RIVA=m -# CONFIG_FB_RIVA_I2C is not set -# CONFIG_FB_RIVA_DEBUG is not set -CONFIG_FB_RIVA_BACKLIGHT=y -CONFIG_FB_I810=m -CONFIG_FB_I810_GTF=y -CONFIG_FB_I810_I2C=y -# CONFIG_FB_LE80578 is not set -CONFIG_FB_MATROX=m -CONFIG_FB_MATROX_MILLENIUM=y -CONFIG_FB_MATROX_MYSTIQUE=y -CONFIG_FB_MATROX_G=y -CONFIG_FB_MATROX_I2C=m -CONFIG_FB_MATROX_MAVEN=m -CONFIG_FB_RADEON=m -CONFIG_FB_RADEON_I2C=y -CONFIG_FB_RADEON_BACKLIGHT=y -# CONFIG_FB_RADEON_DEBUG is not set -CONFIG_FB_ATY128=m -CONFIG_FB_ATY128_BACKLIGHT=y -CONFIG_FB_ATY=m -CONFIG_FB_ATY_CT=y -CONFIG_FB_ATY_GENERIC_LCD=y -CONFIG_FB_ATY_GX=y -CONFIG_FB_ATY_BACKLIGHT=y -CONFIG_FB_S3=m -CONFIG_FB_S3_DDC=y -CONFIG_FB_SAVAGE=m -CONFIG_FB_SAVAGE_I2C=y -CONFIG_FB_SAVAGE_ACCEL=y -# CONFIG_FB_SIS is not set -CONFIG_FB_VIA=m -# CONFIG_FB_VIA_DIRECT_PROCFS is not set -CONFIG_FB_VIA_X_COMPATIBILITY=y -CONFIG_FB_NEOMAGIC=m -CONFIG_FB_KYRO=m -CONFIG_FB_3DFX=m -CONFIG_FB_3DFX_ACCEL=y -CONFIG_FB_3DFX_I2C=y -CONFIG_FB_VOODOO1=m -# CONFIG_FB_VT8623 is not set -CONFIG_FB_TRIDENT=m -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -CONFIG_FB_GEODE=y -CONFIG_FB_GEODE_LX=y -CONFIG_FB_GEODE_GX=y -# CONFIG_FB_GEODE_GX1 is not set -# CONFIG_FB_TMIO is not set -CONFIG_FB_SM501=m -CONFIG_FB_UDL=m -CONFIG_FB_VIRTUAL=m -CONFIG_FB_METRONOME=m -CONFIG_FB_MB862XX=m -CONFIG_FB_MB862XX_PCI_GDC=y -CONFIG_FB_MB862XX_I2C=y -# CONFIG_FB_BROADSHEET is not set -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_LCD_CLASS_DEVICE=m -CONFIG_LCD_PLATFORM=m -CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_GENERIC is not set -CONFIG_BACKLIGHT_PROGEAR=m -CONFIG_BACKLIGHT_APPLE=m -# CONFIG_BACKLIGHT_SAHARA is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set - -# -# Display device support -# -CONFIG_DISPLAY_SUPPORT=m - -# -# Display hardware drivers -# - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -CONFIG_VGACON_SOFT_SCROLLBACK=y -CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=128 -CONFIG_DUMMY_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -CONFIG_SOUND=m -CONFIG_SOUND_OSS_CORE=y -CONFIG_SOUND_OSS_CORE_PRECLAIM=y -CONFIG_SND=m -CONFIG_SND_TIMER=m -CONFIG_SND_PCM=m -CONFIG_SND_HWDEP=m -CONFIG_SND_RAWMIDI=m -CONFIG_SND_JACK=y -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m -CONFIG_SND_OSSEMUL=y -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_PCM_OSS_PLUGINS=y -CONFIG_SND_SEQUENCER_OSS=y -CONFIG_SND_HRTIMER=m -CONFIG_SND_SEQ_HRTIMER_DEFAULT=y -CONFIG_SND_DYNAMIC_MINORS=y -# CONFIG_SND_SUPPORT_OLD_API is not set -CONFIG_SND_VERBOSE_PROCFS=y -CONFIG_SND_VERBOSE_PRINTK=y -CONFIG_SND_DEBUG=y -# CONFIG_SND_DEBUG_VERBOSE is not set -CONFIG_SND_PCM_XRUN_DEBUG=y -CONFIG_SND_VMASTER=y -CONFIG_SND_DMA_SGBUF=y -CONFIG_SND_RAWMIDI_SEQ=m -CONFIG_SND_OPL3_LIB_SEQ=m -# CONFIG_SND_OPL4_LIB_SEQ is not set -# CONFIG_SND_SBAWE_SEQ is not set -CONFIG_SND_EMU10K1_SEQ=m -CONFIG_SND_MPU401_UART=m -CONFIG_SND_OPL3_LIB=m -CONFIG_SND_VX_LIB=m -CONFIG_SND_AC97_CODEC=m -CONFIG_SND_DRIVERS=y -CONFIG_SND_PCSP=m -CONFIG_SND_DUMMY=m -CONFIG_SND_ALOOP=m -CONFIG_SND_VIRMIDI=m -CONFIG_SND_MTPAV=m -CONFIG_SND_MTS64=m -CONFIG_SND_SERIAL_U16550=m -CONFIG_SND_MPU401=m -CONFIG_SND_PORTMAN2X4=m -CONFIG_SND_AC97_POWER_SAVE=y -CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0 -CONFIG_SND_SB_COMMON=m -CONFIG_SND_SB16_DSP=m -CONFIG_SND_TEA575X=m -CONFIG_SND_PCI=y -CONFIG_SND_AD1889=m -CONFIG_SND_ALS300=m -CONFIG_SND_ALS4000=m -CONFIG_SND_ALI5451=m -CONFIG_SND_ASIHPI=m -CONFIG_SND_ATIIXP=m -CONFIG_SND_ATIIXP_MODEM=m -CONFIG_SND_AU8810=m -CONFIG_SND_AU8820=m -CONFIG_SND_AU8830=m -# CONFIG_SND_AW2 is not set -CONFIG_SND_AZT3328=m -CONFIG_SND_BT87X=m -# CONFIG_SND_BT87X_OVERCLOCK is not set -CONFIG_SND_CA0106=m -CONFIG_SND_CMIPCI=m -CONFIG_SND_OXYGEN_LIB=m -CONFIG_SND_OXYGEN=m -CONFIG_SND_CS4281=m -CONFIG_SND_CS46XX=m -CONFIG_SND_CS46XX_NEW_DSP=y -CONFIG_SND_CS5530=m -CONFIG_SND_CS5535AUDIO=m -CONFIG_SND_CTXFI=m -CONFIG_SND_DARLA20=m -CONFIG_SND_GINA20=m -CONFIG_SND_LAYLA20=m -CONFIG_SND_DARLA24=m -CONFIG_SND_GINA24=m -CONFIG_SND_LAYLA24=m -CONFIG_SND_MONA=m -CONFIG_SND_MIA=m -CONFIG_SND_ECHO3G=m -CONFIG_SND_INDIGO=m -CONFIG_SND_INDIGOIO=m -CONFIG_SND_INDIGODJ=m -CONFIG_SND_INDIGOIOX=m -CONFIG_SND_INDIGODJX=m -CONFIG_SND_EMU10K1=m -CONFIG_SND_EMU10K1X=m -CONFIG_SND_ENS1370=m -CONFIG_SND_ENS1371=m -CONFIG_SND_ES1938=m -CONFIG_SND_ES1968=m -CONFIG_SND_ES1968_INPUT=y -CONFIG_SND_ES1968_RADIO=y -CONFIG_SND_FM801=m -CONFIG_SND_FM801_TEA575X_BOOL=y -CONFIG_SND_HDA_INTEL=m -CONFIG_SND_HDA_PREALLOC_SIZE=64 -CONFIG_SND_HDA_HWDEP=y -CONFIG_SND_HDA_RECONFIG=y -CONFIG_SND_HDA_INPUT_BEEP=y -CONFIG_SND_HDA_INPUT_BEEP_MODE=1 -CONFIG_SND_HDA_INPUT_JACK=y -# CONFIG_SND_HDA_PATCH_LOADER is not set -CONFIG_SND_HDA_CODEC_REALTEK=y -CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS=y -CONFIG_SND_HDA_CODEC_ANALOG=y -CONFIG_SND_HDA_CODEC_SIGMATEL=y -CONFIG_SND_HDA_CODEC_VIA=y -CONFIG_SND_HDA_CODEC_HDMI=y -CONFIG_SND_HDA_CODEC_CIRRUS=y -CONFIG_SND_HDA_CODEC_CONEXANT=y -CONFIG_SND_HDA_CODEC_CA0110=y -CONFIG_SND_HDA_CODEC_CA0132=y -CONFIG_SND_HDA_CODEC_CMEDIA=y -CONFIG_SND_HDA_CODEC_SI3054=y -CONFIG_SND_HDA_GENERIC=y -# CONFIG_SND_HDA_POWER_SAVE is not set -CONFIG_SND_HDSP=m -CONFIG_SND_HDSPM=m -CONFIG_SND_ICE1712=m -CONFIG_SND_ICE1724=m -CONFIG_SND_INTEL8X0=m -CONFIG_SND_INTEL8X0M=m -CONFIG_SND_KORG1212=m -CONFIG_SND_LOLA=m -CONFIG_SND_LX6464ES=m -CONFIG_SND_MAESTRO3=m -CONFIG_SND_MAESTRO3_INPUT=y -CONFIG_SND_MIXART=m -CONFIG_SND_NM256=m -CONFIG_SND_PCXHR=m -CONFIG_SND_RIPTIDE=m -CONFIG_SND_RME32=m -CONFIG_SND_RME96=m -CONFIG_SND_RME9652=m -CONFIG_SND_SIS7019=m -CONFIG_SND_SONICVIBES=m -CONFIG_SND_TRIDENT=m -CONFIG_SND_VIA82XX=m -CONFIG_SND_VIA82XX_MODEM=m -CONFIG_SND_VIRTUOSO=m -CONFIG_SND_VX222=m -CONFIG_SND_YMFPCI=m -CONFIG_SND_USB=y -CONFIG_SND_USB_AUDIO=m -CONFIG_SND_USB_UA101=m -CONFIG_SND_USB_USX2Y=m -CONFIG_SND_USB_CAIAQ=m -CONFIG_SND_USB_CAIAQ_INPUT=y -CONFIG_SND_USB_US122L=m -CONFIG_SND_USB_6FIRE=m -CONFIG_SND_FIREWIRE=y -CONFIG_SND_FIREWIRE_LIB=m -CONFIG_SND_FIREWIRE_SPEAKERS=m -# CONFIG_SND_ISIGHT is not set -# CONFIG_SND_SOC is not set -# CONFIG_SOUND_PRIME is not set -CONFIG_AC97_BUS=m -CONFIG_HID_SUPPORT=y -CONFIG_HID=y -CONFIG_HIDRAW=y - -# -# USB Input Devices -# -CONFIG_USB_HID=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=y -# CONFIG_HID_ACRUX is not set -CONFIG_HID_APPLE=y -CONFIG_HID_BELKIN=y -CONFIG_HID_CHERRY=y -CONFIG_HID_CHICONY=y -# CONFIG_HID_PRODIKEYS is not set -CONFIG_HID_CYPRESS=y -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELECOM is not set -CONFIG_HID_EZKEY=y -# CONFIG_HID_HOLTEK is not set -CONFIG_HID_KEYTOUCH=m -CONFIG_HID_KYE=y -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -CONFIG_HID_GYRATION=m -CONFIG_HID_TWINHAN=m -CONFIG_HID_KENSINGTON=y -CONFIG_HID_LCPOWER=m -CONFIG_HID_LOGITECH=y -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWII_FF is not set -# CONFIG_HID_MAGICMOUSE is not set -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MONTEREY=y -# CONFIG_HID_MULTITOUCH is not set -# CONFIG_HID_NTRIG is not set -CONFIG_HID_ORTEK=m -# CONFIG_HID_PANTHERLORD is not set -CONFIG_HID_PETALYNX=m -CONFIG_HID_PICOLCD=m -CONFIG_HID_PICOLCD_FB=y -CONFIG_HID_PICOLCD_BACKLIGHT=y -CONFIG_HID_PICOLCD_LCD=y -CONFIG_HID_PICOLCD_LEDS=y -# CONFIG_HID_QUANTA is not set -CONFIG_HID_ROCCAT=m -CONFIG_HID_ROCCAT_COMMON=m -CONFIG_HID_ROCCAT_ARVO=m -CONFIG_HID_ROCCAT_KONE=m -CONFIG_HID_ROCCAT_KONEPLUS=m -CONFIG_HID_ROCCAT_KOVAPLUS=m -CONFIG_HID_ROCCAT_PYRA=m -CONFIG_HID_SAMSUNG=m -# CONFIG_HID_SONY is not set -CONFIG_HID_SPEEDLINK=m -CONFIG_HID_SUNPLUS=m -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -CONFIG_HID_TOPSEED=m -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_WIIMOTE is not set -# CONFIG_HID_ZEROPLUS is not set -CONFIG_HID_ZYDACRON=m -CONFIG_USB_SUPPORT=y -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB_ARCH_HAS_OHCI=y -CONFIG_USB_ARCH_HAS_EHCI=y -CONFIG_USB=y -# CONFIG_USB_DEBUG is not set -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEVICEFS=y -# CONFIG_USB_DEVICE_CLASS is not set -# CONFIG_USB_DYNAMIC_MINORS is not set -CONFIG_USB_SUSPEND=y -# CONFIG_USB_OTG is not set -CONFIG_USB_MON=m -CONFIG_USB_WUSB=m -CONFIG_USB_WUSB_CBAF=m -# CONFIG_USB_WUSB_CBAF_DEBUG is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=m -# CONFIG_USB_XHCI_HCD_DEBUGGING is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_ISP1760_HCD is not set -CONFIG_USB_ISP1362_HCD=m -CONFIG_USB_OHCI_HCD=y -# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set -# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_U132_HCD is not set -CONFIG_USB_SL811_HCD=m -CONFIG_USB_SL811_HCD_ISO=y -# CONFIG_USB_R8A66597_HCD is not set -CONFIG_USB_WHCI_HCD=m -CONFIG_USB_HWA_HCD=m - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m -CONFIG_USB_TMC=m - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m -# CONFIG_USB_LIBUSUAL is not set - -# -# USB Imaging devices -# -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m - -# -# USB port drivers -# -CONFIG_USB_USS720=m -CONFIG_USB_SERIAL=m -CONFIG_USB_EZUSB=y -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_AIRCABLE=m -CONFIG_USB_SERIAL_ARK3116=m -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_CH341=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_CP210X=m -CONFIG_USB_SERIAL_CYPRESS_M8=m -# CONFIG_USB_SERIAL_EMPEG is not set -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_FUNSOFT=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -# CONFIG_USB_SERIAL_GARMIN is not set -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_IUU=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -# CONFIG_USB_SERIAL_KEYSPAN is not set -CONFIG_USB_SERIAL_KLSI=m -# CONFIG_USB_SERIAL_KOBIL_SCT is not set -CONFIG_USB_SERIAL_MCT_U232=m -CONFIG_USB_SERIAL_MOS7720=m -CONFIG_USB_SERIAL_MOS7715_PARPORT=y -CONFIG_USB_SERIAL_MOS7840=m -CONFIG_USB_SERIAL_MOTOROLA=m -# CONFIG_USB_SERIAL_NAVMAN is not set -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_OTI6858=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_QUALCOMM=m -CONFIG_USB_SERIAL_SPCP8X5=m -# CONFIG_USB_SERIAL_HP4X is not set -# CONFIG_USB_SERIAL_SAFE is not set -CONFIG_USB_SERIAL_SIEMENS_MPI=m -CONFIG_USB_SERIAL_SIERRAWIRELESS=m -# CONFIG_USB_SERIAL_SYMBOL is not set -# CONFIG_USB_SERIAL_TI is not set -# CONFIG_USB_SERIAL_CYBERJACK is not set -CONFIG_USB_SERIAL_XIRCOM=m -CONFIG_USB_SERIAL_WWAN=m -CONFIG_USB_SERIAL_OPTION=m -# CONFIG_USB_SERIAL_OMNINET is not set -# CONFIG_USB_SERIAL_OPTICON is not set -# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set -# CONFIG_USB_SERIAL_ZIO is not set -CONFIG_USB_SERIAL_SSU100=m -# CONFIG_USB_SERIAL_DEBUG is not set - -# -# USB Miscellaneous drivers -# -CONFIG_USB_EMI62=m -CONFIG_USB_EMI26=m -CONFIG_USB_ADUTUX=m -CONFIG_USB_SEVSEG=m -# CONFIG_USB_RIO500 is not set -# CONFIG_USB_LEGOTOWER is not set -CONFIG_USB_LCD=m -CONFIG_USB_LED=m -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -CONFIG_USB_FTDI_ELAN=m -# CONFIG_USB_APPLEDISPLAY is not set -CONFIG_USB_SISUSBVGA=m -CONFIG_USB_SISUSBVGA_CON=y -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -CONFIG_USB_IOWARRIOR=m -# CONFIG_USB_TEST is not set -# CONFIG_USB_ISIGHTFW is not set -CONFIG_USB_YUREX=m -CONFIG_USB_ATM=m -CONFIG_USB_SPEEDTOUCH=m -CONFIG_USB_CXACRU=m -CONFIG_USB_UEAGLEATM=m -CONFIG_USB_XUSBATM=m -# CONFIG_USB_GADGET is not set - -# -# OTG and related infrastructure -# -CONFIG_USB_OTG_UTILS=y -# CONFIG_USB_GPIO_VBUS is not set -CONFIG_NOP_USB_XCEIV=m -CONFIG_UWB=m -CONFIG_UWB_HWA=m -CONFIG_UWB_WHCI=m -CONFIG_UWB_I1480U=m -CONFIG_MMC=m -# CONFIG_MMC_DEBUG is not set -# CONFIG_MMC_UNSAFE_RESUME is not set -# CONFIG_MMC_CLKGATE is not set - -# -# MMC/SD/SDIO Card Drivers -# -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_MMC_BLOCK_BOUNCE=y -CONFIG_SDIO_UART=m -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_PCI=m -CONFIG_MMC_RICOH_MMC=y -CONFIG_MMC_SDHCI_PLTFM=m -CONFIG_MMC_WBSD=m -CONFIG_MMC_TIFM_SD=m -CONFIG_MMC_CB710=m -CONFIG_MMC_VIA_SDMMC=m -CONFIG_MMC_VUB300=m -CONFIG_MMC_USHC=m -CONFIG_MEMSTICK=m -# CONFIG_MEMSTICK_DEBUG is not set - -# -# MemoryStick drivers -# -# CONFIG_MEMSTICK_UNSAFE_RESUME is not set -CONFIG_MSPRO_BLOCK=m - -# -# MemoryStick Host Controller Drivers -# -CONFIG_MEMSTICK_TIFM_MS=m -CONFIG_MEMSTICK_JMICRON_38X=m -CONFIG_MEMSTICK_R592=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y - -# -# LED drivers -# -CONFIG_LEDS_LM3530=m -CONFIG_LEDS_ALIX2=m -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -CONFIG_LEDS_LP3944=m -CONFIG_LEDS_LP5521=m -CONFIG_LEDS_LP5523=m -CONFIG_LEDS_CLEVO_MAIL=m -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_BD2802 is not set -CONFIG_LEDS_INTEL_SS4200=m -CONFIG_LEDS_LT3593=m -CONFIG_LEDS_DELL_NETBOOKS=m -CONFIG_LEDS_TRIGGERS=y - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -CONFIG_LEDS_TRIGGER_GPIO=m -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m - -# -# iptables trigger is under Netfilter config (LED target) -# -# CONFIG_ACCESSIBILITY is not set -# CONFIG_INFINIBAND is not set -CONFIG_EDAC=y - -# -# Reporting subsystems -# -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_DECODE_MCE=m -CONFIG_EDAC_MCE_INJ=m -CONFIG_EDAC_MM_EDAC=m -CONFIG_EDAC_MCE=y -CONFIG_EDAC_AMD76X=m -CONFIG_EDAC_E7XXX=m -CONFIG_EDAC_E752X=m -CONFIG_EDAC_I82875P=m -CONFIG_EDAC_I82975X=m -CONFIG_EDAC_I3000=m -CONFIG_EDAC_I3200=m -CONFIG_EDAC_X38=m -CONFIG_EDAC_I5400=m -CONFIG_EDAC_I7CORE=m -CONFIG_EDAC_I82860=m -CONFIG_EDAC_R82600=m -CONFIG_EDAC_I5000=m -CONFIG_EDAC_I5100=m -CONFIG_EDAC_I7300=m -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -CONFIG_RTC_DRV_DS1307=m -CONFIG_RTC_DRV_DS1374=m -CONFIG_RTC_DRV_DS1672=m -CONFIG_RTC_DRV_DS3232=m -CONFIG_RTC_DRV_MAX6900=m -CONFIG_RTC_DRV_RS5C372=m -CONFIG_RTC_DRV_ISL1208=m -CONFIG_RTC_DRV_ISL12022=m -CONFIG_RTC_DRV_X1205=m -CONFIG_RTC_DRV_PCF8563=m -CONFIG_RTC_DRV_PCF8583=m -CONFIG_RTC_DRV_M41T80=m -CONFIG_RTC_DRV_M41T80_WDT=y -CONFIG_RTC_DRV_BQ32K=m -# CONFIG_RTC_DRV_S35390A is not set -CONFIG_RTC_DRV_FM3130=m -CONFIG_RTC_DRV_RX8581=m -CONFIG_RTC_DRV_RX8025=m -CONFIG_RTC_DRV_EM3027=m -CONFIG_RTC_DRV_RV3029C2=m - -# -# SPI RTC drivers -# - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -CONFIG_RTC_DRV_DS1286=m -CONFIG_RTC_DRV_DS1511=m -CONFIG_RTC_DRV_DS1553=m -CONFIG_RTC_DRV_DS1742=m -CONFIG_RTC_DRV_STK17TA8=m -# CONFIG_RTC_DRV_M48T86 is not set -CONFIG_RTC_DRV_M48T35=m -CONFIG_RTC_DRV_M48T59=m -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_BQ4802=m -CONFIG_RTC_DRV_RP5C01=m -CONFIG_RTC_DRV_V3020=m - -# -# on-CPU RTC drivers -# -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -# CONFIG_INTEL_MID_DMAC is not set -CONFIG_INTEL_IOATDMA=m -CONFIG_TIMB_DMA=m -CONFIG_PCH_DMA=m -CONFIG_DMA_ENGINE=y - -# -# DMA Clients -# -CONFIG_NET_DMA=y -CONFIG_ASYNC_TX_DMA=y -# CONFIG_DMATEST is not set -CONFIG_DCA=m -# CONFIG_AUXDISPLAY is not set -CONFIG_UIO=m -# CONFIG_UIO_CIF is not set -# CONFIG_UIO_PDRV is not set -# CONFIG_UIO_PDRV_GENIRQ is not set -CONFIG_UIO_AEC=m -CONFIG_UIO_SERCOS3=m -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -CONFIG_VIRTIO=m -CONFIG_VIRTIO_RING=m - -# -# Virtio drivers -# -CONFIG_VIRTIO_PCI=m -CONFIG_VIRTIO_BALLOON=m -CONFIG_STAGING=y -CONFIG_ET131X=m -# CONFIG_ET131X_DEBUG is not set -# CONFIG_SLICOSS is not set -# CONFIG_VIDEO_GO7007 is not set -# CONFIG_VIDEO_CX25821 is not set -# CONFIG_VIDEO_TM6000 is not set -# CONFIG_DVB_CXD2099 is not set -# CONFIG_USBIP_CORE is not set -# CONFIG_W35UND is not set -# CONFIG_PRISM2_USB is not set -# CONFIG_ECHO is not set -# CONFIG_BRCMUTIL is not set -# CONFIG_BRCMSMAC is not set -# CONFIG_BRCMFMAC is not set -# CONFIG_COMEDI is not set -# CONFIG_ASUS_OLED is not set -# CONFIG_PANEL is not set -# CONFIG_R8187SE is not set -# CONFIG_RTL8192U is not set -# CONFIG_RTL8192E is not set -# CONFIG_R8712U is not set -# CONFIG_RTS_PSTOR is not set -# CONFIG_TRANZPORT is not set -# CONFIG_POHMELFS is not set -# CONFIG_IDE_PHISON is not set -# CONFIG_LINE6_USB is not set -CONFIG_DRM_VMWGFX=m -CONFIG_DRM_NOUVEAU=m -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -CONFIG_DRM_NOUVEAU_DEBUG=y - -# -# I2C encoder or helper chips -# -# CONFIG_DRM_I2C_CH7006 is not set -# CONFIG_DRM_I2C_SIL164 is not set -# CONFIG_USB_SERIAL_QUATECH2 is not set -# CONFIG_USB_SERIAL_QUATECH_USB2 is not set -# CONFIG_VT6655 is not set -# CONFIG_VT6656 is not set -CONFIG_HYPERV=m -CONFIG_HYPERV_STORAGE=m -CONFIG_HYPERV_BLOCK=m -CONFIG_HYPERV_NET=m -CONFIG_HYPERV_UTILS=m -CONFIG_HYPERV_MOUSE=m -# CONFIG_VME_BUS is not set -# CONFIG_DX_SEP is not set -# CONFIG_IIO is not set -# CONFIG_XVMALLOC is not set -# CONFIG_ZRAM is not set -# CONFIG_ZCACHE is not set -# CONFIG_FB_SM7XX is not set -# CONFIG_VIDEO_DT3155 is not set -# CONFIG_CRYSTALHD is not set -# CONFIG_FB_XGI is not set -# CONFIG_LIRC_STAGING is not set -# CONFIG_EASYCAP is not set -# CONFIG_SOLO6X10 is not set -# CONFIG_ACPI_QUICKSTART is not set -# CONFIG_ATH6K_LEGACY is not set -# CONFIG_USB_ENESTORAGE is not set -# CONFIG_BCM_WIMAX is not set -# CONFIG_FT1000 is not set - -# -# Speakup console speech -# -# CONFIG_SPEAKUP is not set -# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set -# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set -# CONFIG_DRM_PSB is not set -# CONFIG_ALTERA_STAPL is not set -# CONFIG_INTEL_MEI is not set -CONFIG_X86_PLATFORM_DEVICES=y -CONFIG_ACER_WMI=m -CONFIG_ACERHDF=m -CONFIG_ASUS_LAPTOP=m -CONFIG_DELL_LAPTOP=m -CONFIG_DELL_WMI=m -CONFIG_DELL_WMI_AIO=m -CONFIG_FUJITSU_LAPTOP=m -# CONFIG_FUJITSU_LAPTOP_DEBUG is not set -CONFIG_TC1100_WMI=m -CONFIG_HP_ACCEL=m -CONFIG_HP_WMI=m -CONFIG_MSI_LAPTOP=m -CONFIG_PANASONIC_LAPTOP=m -CONFIG_COMPAL_LAPTOP=m -CONFIG_SONY_LAPTOP=m -CONFIG_SONYPI_COMPAT=y -CONFIG_IDEAPAD_LAPTOP=m -CONFIG_THINKPAD_ACPI=m -CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y -# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set -# CONFIG_THINKPAD_ACPI_DEBUG is not set -# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set -CONFIG_THINKPAD_ACPI_VIDEO=y -CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y -CONFIG_SENSORS_HDAPS=m -# CONFIG_INTEL_MENLOW is not set -CONFIG_EEEPC_LAPTOP=m -CONFIG_ASUS_WMI=m -CONFIG_ASUS_NB_WMI=m -CONFIG_EEEPC_WMI=m -CONFIG_ACPI_WMI=m -CONFIG_MSI_WMI=m -# CONFIG_ACPI_ASUS is not set -CONFIG_TOPSTAR_LAPTOP=m -CONFIG_ACPI_TOSHIBA=m -CONFIG_TOSHIBA_BT_RFKILL=m -CONFIG_ACPI_CMPC=m -CONFIG_INTEL_IPS=m -# CONFIG_IBM_RTL is not set -CONFIG_XO15_EBOOK=m -CONFIG_SAMSUNG_LAPTOP=m -CONFIG_MXM_WMI=m -CONFIG_INTEL_OAKTRAIL=m -CONFIG_SAMSUNG_Q10=m -CONFIG_CLKSRC_I8253=y -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y -CONFIG_DMAR=y -CONFIG_DMAR_DEFAULT_ON=y -CONFIG_DMAR_FLOPPY_WA=y -CONFIG_VIRT_DRIVERS=y - -# -# Firmware Drivers -# -CONFIG_EDD=m -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_EFI_VARS=y -CONFIG_DELL_RBU=m -CONFIG_DCDBAS=m -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -# CONFIG_ISCSI_IBFT_FIND is not set -# CONFIG_SIGMA is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# File systems -# -# CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set -CONFIG_EXT4_FS=y -CONFIG_EXT4_USE_FOR_EXT23=y -CONFIG_EXT4_FS_XATTR=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -CONFIG_REISERFS_FS=m -# CONFIG_REISERFS_CHECK is not set -CONFIG_REISERFS_PROC_INFO=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -# CONFIG_JFS_FS is not set -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_XFS_RT=y -# CONFIG_XFS_DEBUG is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_NILFS2_FS is not set -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_FILE_LOCKING=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -CONFIG_AUTOFS4_FS=y -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_GENERIC_ACL=y - -# -# Caches -# -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_HISTOGRAM is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_UDF_NLS=y - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_SYSCTL=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_CONFIGFS_FS=m -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_LOGFS is not set -# CONFIG_CRAMFS is not set -# CONFIG_SQUASHFS is not set -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -# CONFIG_EXOFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NFS_V4_1=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_OBJLAYOUT=m -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -# CONFIG_NFS_USE_NEW_IDMAPPER is not set -CONFIG_NFSD=m -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -# CONFIG_CEPH_FS is not set -CONFIG_CIFS=m -CONFIG_CIFS_STATS=y -# CONFIG_CIFS_STATS2 is not set -# CONFIG_CIFS_WEAK_PW_HASH is not set -# CONFIG_CIFS_UPCALL is not set -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -# CONFIG_CIFS_DEBUG2 is not set -CONFIG_CIFS_DFS_UPCALL=y -CONFIG_CIFS_FSCACHE=y -CONFIG_CIFS_ACL=y -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf-8" -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_UTF8=m -CONFIG_DLM=m -# CONFIG_DLM_DEBUG is not set - -# -# Kernel hacking -# -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_PRINTK_TIME=y -CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 -# CONFIG_ENABLE_WARN_DEPRECATED is not set -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 -CONFIG_MAGIC_SYSRQ=y -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_UNUSED_SYMBOLS is not set -CONFIG_DEBUG_FS=y -CONFIG_HEADERS_CHECK=y -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_SHIRQ=y -CONFIG_LOCKUP_DETECTOR=y -CONFIG_HARDLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -# CONFIG_DETECT_HUNG_TASK is not set -CONFIG_SCHED_DEBUG=y -CONFIG_SCHEDSTATS=y -CONFIG_TIMER_STATS=y -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_SLUB_STATS is not set -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_SPARSE_RCU_POINTER is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -CONFIG_STACKTRACE=y -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_KOBJECT is not set -# CONFIG_DEBUG_HIGHMEM is not set -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_INFO is not set -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_VIRTUAL is not set -# CONFIG_DEBUG_WRITECOUNT is not set -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_DEBUG_LIST=y -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_FRAME_POINTER=y -CONFIG_BOOT_PRINTK_DELAY=y -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# CONFIG_DEBUG_PER_CPU_MAPS is not set -# CONFIG_LKDTM is not set -# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set -# CONFIG_FAULT_INJECTION is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y -# CONFIG_DEBUG_PAGEALLOC is not set -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FTRACE_NMI_ENTER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y -CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_RING_BUFFER=y -CONFIG_FTRACE_NMI_ENTER=y -CONFIG_EVENT_TRACING=y -CONFIG_EVENT_POWER_TRACING_DEPRECATED=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -# CONFIG_IRQSOFF_TRACER is not set -CONFIG_SCHED_TRACER=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_DYNAMIC_FTRACE=y -CONFIG_FUNCTION_PROFILER=y -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -CONFIG_RING_BUFFER_BENCHMARK=m -# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set -# CONFIG_FIREWIRE_OHCI_REMOTE_DMA is not set -# CONFIG_BUILD_DOCSRC is not set -CONFIG_DYNAMIC_DEBUG=y -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_ATOMIC64_SELFTEST is not set -CONFIG_ASYNC_RAID6_TEST=m -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_HAVE_ARCH_KMEMCHECK=y -# CONFIG_TEST_KSTRTOX is not set -CONFIG_STRICT_DEVMEM=y -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -# CONFIG_EARLY_PRINTK_DBGP is not set -CONFIG_DEBUG_STACKOVERFLOW=y -# CONFIG_X86_PTDUMP is not set -CONFIG_DEBUG_NX_TEST=m -CONFIG_DOUBLEFAULT=y -# CONFIG_IOMMU_STRESS is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -CONFIG_DEBUG_BOOT_PARAMS=y -# CONFIG_CPA_DEBUG is not set -CONFIG_OPTIMIZE_INLINING=y -# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set - -# -# Security options -# - -# -# Grsecurity -# -CONFIG_GRKERNSEC=y -# CONFIG_GRKERNSEC_LOW is not set -# CONFIG_GRKERNSEC_MEDIUM is not set -CONFIG_GRKERNSEC_HIGH=y -# CONFIG_GRKERNSEC_CUSTOM is not set - -# -# Address Space Protection -# -CONFIG_GRKERNSEC_KMEM=y -CONFIG_GRKERNSEC_VM86=y -# CONFIG_GRKERNSEC_IO is not set -CONFIG_GRKERNSEC_PROC_MEMMAP=y -CONFIG_GRKERNSEC_BRUTE=y -CONFIG_GRKERNSEC_MODHARDEN=y -CONFIG_GRKERNSEC_HIDESYM=y -CONFIG_GRKERNSEC_KERN_LOCKOUT=y - -# -# Role Based Access Control Options -# -CONFIG_GRKERNSEC_NO_RBAC=y -# CONFIG_GRKERNSEC_ACL_HIDEKERN is not set -CONFIG_GRKERNSEC_ACL_MAXTRIES=3 -CONFIG_GRKERNSEC_ACL_TIMEOUT=30 - -# -# Filesystem Protections -# -CONFIG_GRKERNSEC_PROC=y -# CONFIG_GRKERNSEC_PROC_USER is not set -CONFIG_GRKERNSEC_PROC_USERGROUP=y -CONFIG_GRKERNSEC_PROC_GID=10 -CONFIG_GRKERNSEC_PROC_ADD=y -CONFIG_GRKERNSEC_LINK=y -CONFIG_GRKERNSEC_FIFO=y -CONFIG_GRKERNSEC_SYSFS_RESTRICT=y -# CONFIG_GRKERNSEC_ROFS is not set -CONFIG_GRKERNSEC_CHROOT=y -CONFIG_GRKERNSEC_CHROOT_MOUNT=y -CONFIG_GRKERNSEC_CHROOT_DOUBLE=y -CONFIG_GRKERNSEC_CHROOT_PIVOT=y -CONFIG_GRKERNSEC_CHROOT_CHDIR=y -CONFIG_GRKERNSEC_CHROOT_CHMOD=y -CONFIG_GRKERNSEC_CHROOT_FCHDIR=y -CONFIG_GRKERNSEC_CHROOT_MKNOD=y -CONFIG_GRKERNSEC_CHROOT_SHMAT=y -CONFIG_GRKERNSEC_CHROOT_UNIX=y -CONFIG_GRKERNSEC_CHROOT_FINDTASK=y -CONFIG_GRKERNSEC_CHROOT_NICE=y -CONFIG_GRKERNSEC_CHROOT_SYSCTL=y -CONFIG_GRKERNSEC_CHROOT_CAPS=y - -# -# Kernel Auditing -# -# CONFIG_GRKERNSEC_AUDIT_GROUP is not set -# CONFIG_GRKERNSEC_EXECLOG is not set -CONFIG_GRKERNSEC_RESLOG=y -# CONFIG_GRKERNSEC_CHROOT_EXECLOG is not set -# CONFIG_GRKERNSEC_AUDIT_PTRACE is not set -# CONFIG_GRKERNSEC_AUDIT_CHDIR is not set -CONFIG_GRKERNSEC_AUDIT_MOUNT=y -CONFIG_GRKERNSEC_SIGNAL=y -CONFIG_GRKERNSEC_FORKFAIL=y -CONFIG_GRKERNSEC_TIME=y -CONFIG_GRKERNSEC_PROC_IPADDR=y -CONFIG_GRKERNSEC_RWXMAP_LOG=y -CONFIG_GRKERNSEC_AUDIT_TEXTREL=y - -# -# Executable Protections -# -CONFIG_GRKERNSEC_DMESG=y -CONFIG_GRKERNSEC_HARDEN_PTRACE=y -# CONFIG_GRKERNSEC_TPE is not set - -# -# Network Protections -# -CONFIG_GRKERNSEC_RANDNET=y -CONFIG_GRKERNSEC_BLACKHOLE=y -# CONFIG_GRKERNSEC_SOCKET is not set - -# -# Sysctl support -# -CONFIG_GRKERNSEC_SYSCTL=y -CONFIG_GRKERNSEC_SYSCTL_ON=y - -# -# Logging Options -# -CONFIG_GRKERNSEC_FLOODTIME=10 -CONFIG_GRKERNSEC_FLOODBURST=6 - -# -# PaX -# -CONFIG_ARCH_TRACK_EXEC_LIMIT=y -CONFIG_PAX_PER_CPU_PGD=y -CONFIG_PAX=y - -# -# PaX Control -# -# CONFIG_PAX_SOFTMODE is not set -CONFIG_PAX_EI_PAX=y -CONFIG_PAX_PT_PAX_FLAGS=y -# CONFIG_PAX_NO_ACL_FLAGS is not set -CONFIG_PAX_HAVE_ACL_FLAGS=y -# CONFIG_PAX_HOOK_ACL_FLAGS is not set - -# -# Non-executable pages -# -CONFIG_PAX_NOEXEC=y -CONFIG_PAX_PAGEEXEC=y -CONFIG_PAX_SEGMEXEC=y -CONFIG_PAX_EMUTRAMP=y -CONFIG_PAX_MPROTECT=y -# CONFIG_PAX_MPROTECT_COMPAT is not set -CONFIG_PAX_ELFRELOCS=y -CONFIG_PAX_KERNEXEC=y -CONFIG_PAX_KERNEXEC_MODULE_TEXT=4 - -# -# Address Space Layout Randomization -# -CONFIG_PAX_ASLR=y -CONFIG_PAX_RANDKSTACK=y -CONFIG_PAX_RANDUSTACK=y -CONFIG_PAX_RANDMMAP=y - -# -# Miscellaneous hardening features -# -CONFIG_PAX_MEMORY_SANITIZE=y -CONFIG_PAX_MEMORY_STACKLEAK=y -CONFIG_PAX_MEMORY_UDEREF=y -CONFIG_PAX_REFCOUNT=y -CONFIG_PAX_USERCOPY=y -CONFIG_KEYS=y -CONFIG_KEYS_DEBUG_PROC_KEYS=y -CONFIG_SECURITY_DMESG_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_NETWORK_XFRM=y -# CONFIG_SECURITY_PATH is not set -CONFIG_INTEL_TXT=y -CONFIG_LSM_MMAP_MIN_ADDR=65536 -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 -# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_IMA is not set -CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_DEFAULT_SECURITY="selinux" -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y -CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_PCOMP=m -CONFIG_CRYPTO_PCOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -CONFIG_CRYPTO_GF128MUL=m -# CONFIG_CRYPTO_NULL is not set -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=y -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_SEQIV=y - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=m -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m - -# -# Hash modes -# -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32C_INTEL=y -CONFIG_CRYPTO_GHASH=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA1=m -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m - -# -# Ciphers -# -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_AES_586=y -CONFIG_CRYPTO_AES_NI_INTEL=y -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SALSA20_586=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -CONFIG_CRYPTO_TWOFISH_586=m - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_ZLIB=m -CONFIG_CRYPTO_LZO=m - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_PADLOCK=m -CONFIG_CRYPTO_DEV_PADLOCK_AES=m -CONFIG_CRYPTO_DEV_PADLOCK_SHA=m -CONFIG_CRYPTO_DEV_GEODE=m -CONFIG_CRYPTO_DEV_HIFN_795X=m -CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_APIC_ARCHITECTURE=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_ASYNC_PF=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_KVM_AMD=m -CONFIG_KVM_MMU_AUDIT=y -CONFIG_VHOST_NET=m -# CONFIG_LGUEST is not set -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_BITREVERSE=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -CONFIG_CRC_CCITT=m -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=m -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -CONFIG_CRC7=m -CONFIG_LIBCRC32C=m -CONFIG_CRC8=m -CONFIG_AUDIT_GENERIC=y -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=m -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_DMA=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPU_RMAP=y -CONFIG_NLATTR=y -CONFIG_AVERAGE=y -CONFIG_CORDIC=m -CONFIG_LLIST=y diff --git a/kernel/config.i686-legacy b/kernel/config.i686-legacy deleted file mode 100644 index 0cb854c..0000000 --- a/kernel/config.i686-legacy +++ /dev/null @@ -1,4854 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/i386 3.1.1-1.ip3.i686 Kernel Configuration -# -# CONFIG_64BIT is not set -CONFIG_X86_32=y -# CONFIG_X86_64 is not set -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf32-i386" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig" -CONFIG_GENERIC_CMOS_UPDATE=y -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_HAVE_LATENCYTOP_SUPPORT=y -CONFIG_MMU=y -CONFIG_ZONE_DMA=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_IOMAP=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_GPIO=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -# CONFIG_RWSEM_GENERIC_SPINLOCK is not set -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -# CONFIG_GENERIC_TIME_VSYSCALL is not set -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_DEFAULT_IDLE=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -# CONFIG_ZONE_DMA32 is not set -CONFIG_ARCH_POPULATES_NODE_MAP=y -# CONFIG_AUDIT_ARCH is not set -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_32_SMP=y -CONFIG_X86_HT=y -CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx" -CONFIG_KTIME_SCALAR=y -CONFIG_ARCH_CPU_PROBE_RELEASE=y -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -CONFIG_HAVE_IRQ_WORK=y -CONFIG_IRQ_WORK=y - -# -# General setup -# -CONFIG_EXPERIMENTAL=y -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="" -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -# CONFIG_KERNEL_GZIP is not set -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set -CONFIG_KERNEL_XZ=y -# CONFIG_KERNEL_LZO is not set -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_BSD_PROCESS_ACCT=y -# CONFIG_BSD_PROCESS_ACCT_V3 is not set -CONFIG_FHANDLE=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y -CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y -CONFIG_HAVE_GENERIC_HARDIRQS=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_HARDIRQS=y -CONFIG_HAVE_SPARSE_IRQ=y -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_IRQ_FORCED_THREADING=y -# CONFIG_SPARSE_IRQ is not set - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_PREEMPT_RCU is not set -# CONFIG_RCU_TRACE is not set -CONFIG_RCU_FANOUT=32 -# CONFIG_RCU_FANOUT_EXACT is not set -CONFIG_RCU_FAST_NO_HZ=y -# CONFIG_TREE_RCU_TRACE is not set -# CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=18 -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_RESOURCE_COUNTERS=y -CONFIG_CGROUP_MEM_RES_CTLR=y -CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y -# CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED is not set -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_MM_OWNER=y -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -# CONFIG_EXPERT is not set -CONFIG_UID16=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -CONFIG_HOTPLUG=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -CONFIG_PERF_COUNTERS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_PCI_QUIRKS=y -CONFIG_SLUB_DEBUG=y -# CONFIG_COMPAT_BRK is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y -# CONFIG_PROFILING is not set -CONFIG_TRACEPOINTS=y -CONFIG_HAVE_OPROFILE=y -# CONFIG_KPROBES is not set -CONFIG_JUMP_LABEL=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_USE_GENERIC_SMP_HELPERS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODVERSIONS is not set -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_STOP_MACHINE=y -CONFIG_BLOCK=y -CONFIG_LBDAF=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_THROTTLING=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -# CONFIG_INLINE_SPIN_TRYLOCK is not set -# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK is not set -# CONFIG_INLINE_SPIN_LOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK_IRQ is not set -# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set -CONFIG_INLINE_SPIN_UNLOCK=y -# CONFIG_INLINE_SPIN_UNLOCK_BH is not set -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_READ_TRYLOCK is not set -# CONFIG_INLINE_READ_LOCK is not set -# CONFIG_INLINE_READ_LOCK_BH is not set -# CONFIG_INLINE_READ_LOCK_IRQ is not set -# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set -CONFIG_INLINE_READ_UNLOCK=y -# CONFIG_INLINE_READ_UNLOCK_BH is not set -CONFIG_INLINE_READ_UNLOCK_IRQ=y -# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_WRITE_TRYLOCK is not set -# CONFIG_INLINE_WRITE_LOCK is not set -# CONFIG_INLINE_WRITE_LOCK_BH is not set -# CONFIG_INLINE_WRITE_LOCK_IRQ is not set -# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set -CONFIG_INLINE_WRITE_UNLOCK=y -# CONFIG_INLINE_WRITE_UNLOCK_BH is not set -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_FREEZER=y - -# -# Processor type and features -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -CONFIG_SMP=y -CONFIG_X86_MPPARSE=y -CONFIG_X86_BIGSMP=y -CONFIG_X86_EXTENDED_PLATFORM=y -# CONFIG_X86_INTEL_MID is not set -# CONFIG_X86_RDC321X is not set -# CONFIG_X86_32_NON_STANDARD is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -# CONFIG_X86_32_IRIS is not set -CONFIG_SCHED_OMIT_FRAME_POINTER=y -CONFIG_PARAVIRT_GUEST=y -CONFIG_PARAVIRT_TIME_ACCOUNTING=y -# CONFIG_XEN_PRIVILEGED_GUEST is not set -CONFIG_KVM_CLOCK=y -CONFIG_KVM_GUEST=y -# CONFIG_LGUEST_GUEST is not set -CONFIG_PARAVIRT=y -# CONFIG_PARAVIRT_SPINLOCKS is not set -CONFIG_PARAVIRT_CLOCK=y -# CONFIG_PARAVIRT_DEBUG is not set -CONFIG_NO_BOOTMEM=y -# CONFIG_MEMTEST is not set -# CONFIG_M386 is not set -# CONFIG_M486 is not set -# CONFIG_M586 is not set -# CONFIG_M586TSC is not set -# CONFIG_M586MMX is not set -CONFIG_M686=y -# CONFIG_MPENTIUMII is not set -# CONFIG_MPENTIUMIII is not set -# CONFIG_MPENTIUMM is not set -# CONFIG_MPENTIUM4 is not set -# CONFIG_MK6 is not set -# CONFIG_MK7 is not set -# CONFIG_MK8 is not set -# CONFIG_MCRUSOE is not set -# CONFIG_MEFFICEON is not set -# CONFIG_MWINCHIPC6 is not set -# CONFIG_MWINCHIP3D is not set -# CONFIG_MELAN is not set -# CONFIG_MGEODEGX1 is not set -# CONFIG_MGEODE_LX is not set -# CONFIG_MCYRIXIII is not set -# CONFIG_MVIAC3_2 is not set -# CONFIG_MVIAC7 is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_X86_GENERIC=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=6 -CONFIG_X86_CMPXCHG=y -CONFIG_CMPXCHG_LOCAL=y -CONFIG_CMPXCHG_DOUBLE=y -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_XADD=y -CONFIG_X86_PPRO_FENCE=y -CONFIG_X86_WP_WORKS_OK=y -CONFIG_X86_INVLPG=y -CONFIG_X86_BSWAP=y -CONFIG_X86_POPAD_OK=y -CONFIG_X86_ALIGNMENT_16=y -CONFIG_X86_INTEL_USERCOPY=y -CONFIG_X86_USE_PPRO_CHECKSUM=y -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=5 -CONFIG_X86_DEBUGCTLMSR=y -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_CYRIX_32=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_CPU_SUP_TRANSMETA_32=y -CONFIG_CPU_SUP_UMC_32=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -# CONFIG_IOMMU_HELPER is not set -CONFIG_NR_CPUS=256 -CONFIG_SCHED_SMT=y -CONFIG_SCHED_MC=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -# CONFIG_X86_ANCIENT_MCE is not set -CONFIG_X86_MCE_THRESHOLD=y -# CONFIG_X86_MCE_INJECT is not set -CONFIG_X86_THERMAL_VECTOR=y -CONFIG_VM86=y -CONFIG_TOSHIBA=m -CONFIG_I8K=m -# CONFIG_X86_REBOOTFIXUPS is not set -CONFIG_MICROCODE=m -CONFIG_MICROCODE_INTEL=y -CONFIG_MICROCODE_AMD=y -CONFIG_MICROCODE_OLD_INTERFACE=y -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -# CONFIG_NOHIGHMEM is not set -CONFIG_HIGHMEM4G=y -# CONFIG_HIGHMEM64G is not set -CONFIG_PAGE_OFFSET=0xC0000000 -CONFIG_HIGHMEM=y -# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set -# CONFIG_ARCH_DMA_ADDR_T_64BIT is not set -CONFIG_ARCH_FLATMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ILLEGAL_POINTER_VALUE=0 -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y -# CONFIG_SPARSEMEM_MANUAL is not set -CONFIG_FLATMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y -CONFIG_SPARSEMEM_STATIC=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -# CONFIG_PHYS_ADDR_T_64BIT is not set -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -# CONFIG_HWPOISON_INJECT is not set -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_CLEANCACHE=y -CONFIG_HIGHPTE=y -# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set -CONFIG_X86_RESERVE_LOW=64 -# CONFIG_MATH_EMULATION is not set -CONFIG_MTRR=y -CONFIG_MTRR_SANITIZER=y -CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 -CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_EFI=y -CONFIG_SECCOMP=y -# CONFIG_HZ_100 is not set -# CONFIG_HZ_250 is not set -CONFIG_HZ_300=y -# CONFIG_HZ_1000 is not set -CONFIG_HZ=300 -CONFIG_SCHED_HRTICK=y -CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y -# CONFIG_KEXEC_JUMP is not set -CONFIG_PHYSICAL_START=0x400000 -CONFIG_RELOCATABLE=y -CONFIG_X86_NEED_RELOCS=y -CONFIG_PHYSICAL_ALIGN=0x400000 -CONFIG_HOTPLUG_CPU=y -# CONFIG_CMDLINE_BOOL is not set -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y - -# -# Power management and ACPI options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_HIBERNATE_CALLBACKS=y -CONFIG_HIBERNATION=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -CONFIG_PM_RUNTIME=y -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_ACPI=y -CONFIG_ACPI_SLEEP=y -CONFIG_ACPI_PROCFS=y -CONFIG_ACPI_PROCFS_POWER=y -CONFIG_ACPI_EC_DEBUGFS=m -# CONFIG_ACPI_PROC_EVENT is not set -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=m -CONFIG_ACPI_FAN=y -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=m -CONFIG_ACPI_THERMAL=y -# CONFIG_ACPI_CUSTOM_DSDT is not set -CONFIG_ACPI_BLACKLIST_YEAR=1999 -# CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_PCI_SLOT=y -CONFIG_X86_PM_TIMER=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_SBS=m -CONFIG_ACPI_HED=y -CONFIG_ACPI_CUSTOM_METHOD=m -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_APEI_MEMORY_FAILURE=y -# CONFIG_ACPI_APEI_EINJ is not set -# CONFIG_ACPI_APEI_ERST_DEBUG is not set -CONFIG_SFI=y -CONFIG_X86_APM_BOOT=y -CONFIG_APM=y -# CONFIG_APM_IGNORE_USER_SUSPEND is not set -# CONFIG_APM_DO_ENABLE is not set -CONFIG_APM_CPU_IDLE=y -# CONFIG_APM_DISPLAY_BLANK is not set -# CONFIG_APM_ALLOW_INTS is not set - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_TABLE=y -CONFIG_CPU_FREQ_STAT=m -CONFIG_CPU_FREQ_STAT_DETAILS=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=m -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=m -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m - -# -# x86 CPU frequency scaling drivers -# -CONFIG_X86_PCC_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ=m -# CONFIG_X86_POWERNOW_K6 is not set -CONFIG_X86_POWERNOW_K7=y -CONFIG_X86_POWERNOW_K7_ACPI=y -CONFIG_X86_POWERNOW_K8=m -# CONFIG_X86_GX_SUSPMOD is not set -# CONFIG_X86_SPEEDSTEP_CENTRINO is not set -CONFIG_X86_SPEEDSTEP_ICH=y -CONFIG_X86_SPEEDSTEP_SMI=y -CONFIG_X86_P4_CLOCKMOD=m -# CONFIG_X86_CPUFREQ_NFORCE2 is not set -CONFIG_X86_LONGRUN=y -# CONFIG_X86_LONGHAUL is not set -# CONFIG_X86_E_POWERSAVER is not set - -# -# shared options -# -CONFIG_X86_SPEEDSTEP_LIB=y -# CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -CONFIG_INTEL_IDLE=y - -# -# Bus options (PCI etc.) -# -CONFIG_PCI=y -# CONFIG_PCI_GOBIOS is not set -# CONFIG_PCI_GOMMCONFIG is not set -# CONFIG_PCI_GODIRECT is not set -CONFIG_PCI_GOANY=y -CONFIG_PCI_BIOS=y -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_DOMAINS=y -# CONFIG_PCI_CNB20LE_QUIRK is not set -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIE_ECRC=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIE_PME=y -CONFIG_ARCH_SUPPORTS_MSI=y -CONFIG_PCI_MSI=y -# CONFIG_PCI_DEBUG is not set -CONFIG_PCI_STUB=y -CONFIG_HT_IRQ=y -CONFIG_PCI_IOV=y -CONFIG_PCI_IOAPIC=y -CONFIG_PCI_LABEL=y -CONFIG_ISA_DMA_API=y -# CONFIG_ISA is not set -# CONFIG_MCA is not set -# CONFIG_SCx200 is not set -# CONFIG_OLPC is not set -CONFIG_AMD_NB=y -CONFIG_PCCARD=m -# CONFIG_PCMCIA is not set -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_FAKE=m -CONFIG_HOTPLUG_PCI_COMPAQ=m -# CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM is not set -CONFIG_HOTPLUG_PCI_IBM=m -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -# CONFIG_HOTPLUG_PCI_CPCI is not set -# CONFIG_HOTPLUG_PCI_SHPC is not set -# CONFIG_RAPIDIO is not set - -# -# Executable file formats / Emulations -# -CONFIG_BINFMT_ELF=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_HAVE_AOUT=y -CONFIG_BINFMT_MISC=y -CONFIG_HAVE_ATOMIC_IOMAP=y -CONFIG_HAVE_TEXT_POKE_SMP=y -CONFIG_NET=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -# CONFIG_ARPD is not set -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_LRO=y -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=m -CONFIG_IPV6_PRIVACY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -# CONFIG_NETLABEL is not set -CONFIG_NETWORK_SECMARK=y -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -CONFIG_NETFILTER=y -# CONFIG_NETFILTER_DEBUG is not set -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CT_PROTO_DCCP=m -CONFIG_NF_CT_PROTO_GRE=m -CONFIG_NF_CT_PROTO_SCTP=m -CONFIG_NF_CT_PROTO_UDPLITE=m -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_TPROXY=m -CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -# CONFIG_NETFILTER_XT_MATCH_IPVS is not set -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_CONNTRACK_IPV4=m -# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set -# CONFIG_IP_NF_QUEUE is not set -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -# CONFIG_IP_NF_TARGET_ULOG is not set -CONFIG_NF_NAT=m -CONFIG_NF_NAT_NEEDED=y -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PROTO_DCCP=m -CONFIG_NF_NAT_PROTO_GRE=m -CONFIG_NF_NAT_PROTO_UDPLITE=m -CONFIG_NF_NAT_PROTO_SCTP=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_NF_NAT_SIP=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_CONNTRACK_IPV6=m -# CONFIG_IP6_NF_QUEUE is not set -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_TARGET_LOG=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -# CONFIG_BRIDGE_EBT_ULOG is not set -CONFIG_BRIDGE_EBT_NFLOG=m -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=m -# CONFIG_SCTP_DBG_MSG is not set -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_HMAC_NONE is not set -# CONFIG_SCTP_HMAC_SHA1 is not set -CONFIG_SCTP_HMAC_MD5=y -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -# CONFIG_ATM_CLIP_NO_ICMP is not set -# CONFIG_ATM_LANE is not set -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_NET_DSA=y -CONFIG_NET_DSA_TAG_DSA=y -CONFIG_NET_DSA_TAG_EDSA=y -CONFIG_NET_DSA_TAG_TRAILER=y -CONFIG_NET_DSA_MV88E6XXX=y -CONFIG_NET_DSA_MV88E6060=y -CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y -CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123_61_65=y -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -# CONFIG_DECNET is not set -CONFIG_LLC=m -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_WAN_ROUTER is not set -# CONFIG_PHONET is not set -# CONFIG_IEEE802154 is not set -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_ATM=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_INGRESS=m - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -CONFIG_NET_CLS_IND=y -CONFIG_NET_SCH_FIFO=y -# CONFIG_DCB is not set -CONFIG_DNS_RESOLVER=y -# CONFIG_BATMAN_ADV is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_NET_DROP_MONITOR is not set -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_IRDA is not set -CONFIG_BT=m -CONFIG_BT_L2CAP=y -CONFIG_BT_SCO=y -CONFIG_BT_RFCOMM=m -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=m -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_CMTP=m -CONFIG_BT_HIDP=m - -# -# Bluetooth device drivers -# -CONFIG_BT_HCIBTUSB=m -CONFIG_BT_HCIBTSDIO=m -CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_H4=y -CONFIG_BT_HCIUART_BCSP=y -CONFIG_BT_HCIUART_ATH3K=y -CONFIG_BT_HCIUART_LL=y -CONFIG_BT_HCIBCM203X=m -CONFIG_BT_HCIBPA10X=m -CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIVHCI=m -CONFIG_BT_MRVL=m -CONFIG_BT_MRVL_SDIO=m -CONFIG_BT_ATH3K=m -CONFIG_BT_WILINK=m -# CONFIG_AF_RXRPC is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_WIRELESS_EXT=y -CONFIG_WEXT_CORE=y -CONFIG_WEXT_PROC=y -CONFIG_WEXT_SPY=y -CONFIG_WEXT_PRIV=y -CONFIG_CFG80211=m -CONFIG_NL80211_TESTMODE=y -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -# CONFIG_CFG80211_REG_DEBUG is not set -CONFIG_CFG80211_DEFAULT_PS=y -CONFIG_CFG80211_DEBUGFS=y -# CONFIG_CFG80211_INTERNAL_REGDB is not set -CONFIG_CFG80211_WEXT=y -CONFIG_WIRELESS_EXT_SYSFS=y -CONFIG_LIB80211=m -CONFIG_LIB80211_CRYPT_WEP=m -CONFIG_LIB80211_CRYPT_CCMP=m -CONFIG_LIB80211_CRYPT_TKIP=m -# CONFIG_LIB80211_DEBUG is not set -CONFIG_MAC80211=m -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_MINSTREL_HT=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -CONFIG_MAC80211_MESH=y -CONFIG_MAC80211_LEDS=y -CONFIG_MAC80211_DEBUGFS=y -# CONFIG_MAC80211_DEBUG_MENU is not set -# CONFIG_WIMAX is not set -CONFIG_RFKILL=m -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -# CONFIG_CEPH_LIB is not set -# CONFIG_NFC is not set - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -# CONFIG_FIRMWARE_IN_KERNEL is not set -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_SYS_HYPERVISOR is not set -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -# CONFIG_MTD is not set -CONFIG_PARPORT=m -CONFIG_PARPORT_PC=m -CONFIG_PARPORT_SERIAL=m -# CONFIG_PARPORT_PC_FIFO is not set -# CONFIG_PARPORT_PC_SUPERIO is not set -# CONFIG_PARPORT_GSC is not set -# CONFIG_PARPORT_AX88796 is not set -CONFIG_PARPORT_1284=y -CONFIG_PARPORT_NOT_PC=y -CONFIG_PNP=y -# CONFIG_PNP_DEBUG_MESSAGES is not set - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_FD=m -# CONFIG_PARIDE is not set -CONFIG_BLK_CPQ_DA=m -CONFIG_BLK_CPQ_CISS_DA=m -# CONFIG_CISS_SCSI_TAPE is not set -CONFIG_BLK_DEV_DAC960=m -CONFIG_BLK_DEV_UMEM=m -# CONFIG_BLK_DEV_COW_COMMON is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -CONFIG_BLK_DEV_CRYPTOLOOP=m -# CONFIG_BLK_DEV_DRBD is not set -# CONFIG_BLK_DEV_NBD is not set -# CONFIG_BLK_DEV_OSD is not set -CONFIG_BLK_DEV_SX8=m -# CONFIG_BLK_DEV_UB is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -# CONFIG_BLK_DEV_XIP is not set -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_VIRTIO_BLK=m -# CONFIG_BLK_DEV_HD is not set -# CONFIG_BLK_DEV_RBD is not set -CONFIG_SENSORS_LIS3LV02D=m -CONFIG_MISC_DEVICES=y -# CONFIG_AD525X_DPOT is not set -# CONFIG_IBM_ASM is not set -# CONFIG_PHANTOM is not set -# CONFIG_INTEL_MID_PTI is not set -# CONFIG_SGI_IOC4 is not set -CONFIG_TIFM_CORE=m -CONFIG_TIFM_7XX1=m -CONFIG_ICS932S401=m -CONFIG_ENCLOSURE_SERVICES=m -# CONFIG_CS5535_MFGPT is not set -CONFIG_HP_ILO=m -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1780 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -CONFIG_DS1682=m -CONFIG_VMWARE_BALLOON=m -# CONFIG_BMP085 is not set -CONFIG_PCH_PHUB=m -CONFIG_USB_SWITCH_FSA9480=m -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -CONFIG_EEPROM_AT24=m -CONFIG_EEPROM_LEGACY=m -CONFIG_EEPROM_MAX6875=m -CONFIG_EEPROM_93CX6=m -CONFIG_CB710_CORE=m -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y -# CONFIG_IWMC3200TOP is not set - -# -# Texas Instruments shared transport line discipline -# -CONFIG_TI_ST=m -# CONFIG_SENSORS_LIS3_I2C is not set -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_TGT=m -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=m -# CONFIG_CHR_DEV_ST is not set -# CONFIG_CHR_DEV_OSST is not set -CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=m -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_MULTI_LUN=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_WAIT_SCAN=m - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_FC_TGT_ATTRS=y -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=m -CONFIG_SCSI_SRP_TGT_ATTRS=y -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -CONFIG_SCSI_CXGB3_ISCSI=m -CONFIG_SCSI_CXGB4_ISCSI=m -CONFIG_SCSI_BNX2_ISCSI=m -CONFIG_SCSI_BNX2X_FCOE=m -CONFIG_BE2ISCSI=m -CONFIG_BLK_DEV_3W_XXXX_RAID=m -CONFIG_SCSI_HPSA=m -CONFIG_SCSI_3W_9XXX=m -CONFIG_SCSI_3W_SAS=m -CONFIG_SCSI_ACARD=m -CONFIG_SCSI_AACRAID=m -CONFIG_SCSI_AIC7XXX=m -CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 -CONFIG_AIC7XXX_RESET_DELAY_MS=5000 -CONFIG_AIC7XXX_DEBUG_ENABLE=y -CONFIG_AIC7XXX_DEBUG_MASK=0 -CONFIG_AIC7XXX_REG_PRETTY_PRINT=y -# CONFIG_SCSI_AIC7XXX_OLD is not set -CONFIG_SCSI_AIC79XX=m -CONFIG_AIC79XX_CMDS_PER_DEVICE=32 -CONFIG_AIC79XX_RESET_DELAY_MS=4000 -# CONFIG_AIC79XX_DEBUG_ENABLE is not set -CONFIG_AIC79XX_DEBUG_MASK=0 -# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set -CONFIG_SCSI_AIC94XX=m -# CONFIG_AIC94XX_DEBUG is not set -CONFIG_SCSI_MVSAS=m -# CONFIG_SCSI_MVSAS_DEBUG is not set -CONFIG_SCSI_MVSAS_TASKLET=y -CONFIG_SCSI_DPT_I2O=m -CONFIG_SCSI_ADVANSYS=m -CONFIG_SCSI_ARCMSR=m -# CONFIG_SCSI_ARCMSR_AER is not set -CONFIG_MEGARAID_NEWGEN=y -CONFIG_MEGARAID_MM=m -CONFIG_MEGARAID_MAILBOX=m -CONFIG_MEGARAID_LEGACY=m -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT2SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -# CONFIG_SCSI_MPT2SAS_LOGGING is not set -CONFIG_SCSI_HPTIOP=m -CONFIG_SCSI_BUSLOGIC=m -CONFIG_SCSI_FLASHPOINT=y -CONFIG_VMWARE_PVSCSI=m -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -CONFIG_FCOE=m -CONFIG_FCOE_FNIC=m -CONFIG_SCSI_DMX3191D=m -CONFIG_SCSI_EATA=m -CONFIG_SCSI_EATA_TAGGED_QUEUE=y -# CONFIG_SCSI_EATA_LINKED_COMMANDS is not set -CONFIG_SCSI_EATA_MAX_TAGS=16 -CONFIG_SCSI_FUTURE_DOMAIN=m -CONFIG_SCSI_GDTH=m -CONFIG_SCSI_ISCI=m -CONFIG_SCSI_IPS=m -CONFIG_SCSI_INITIO=m -CONFIG_SCSI_INIA100=m -CONFIG_SCSI_PPA=m -CONFIG_SCSI_IMM=m -# CONFIG_SCSI_IZIP_EPP16 is not set -# CONFIG_SCSI_IZIP_SLOW_CTR is not set -CONFIG_SCSI_STEX=m -CONFIG_SCSI_SYM53C8XX_2=m -CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 -CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 -CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 -CONFIG_SCSI_SYM53C8XX_MMIO=y -CONFIG_SCSI_IPR=m -CONFIG_SCSI_IPR_TRACE=y -CONFIG_SCSI_IPR_DUMP=y -CONFIG_SCSI_QLOGIC_1280=m -CONFIG_SCSI_QLA_FC=m -CONFIG_SCSI_QLA_ISCSI=m -CONFIG_SCSI_LPFC=m -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -CONFIG_SCSI_DC395x=m -CONFIG_SCSI_DC390T=m -CONFIG_SCSI_NSP32=m -# CONFIG_SCSI_DEBUG is not set -CONFIG_SCSI_PMCRAID=m -CONFIG_SCSI_PM8001=m -# CONFIG_SCSI_SRP is not set -CONFIG_SCSI_BFA_FC=m -CONFIG_SCSI_DH=m -CONFIG_SCSI_DH_RDAC=m -CONFIG_SCSI_DH_HP_SW=m -CONFIG_SCSI_DH_EMC=m -CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -CONFIG_SCSI_OSD_DPRINT_SENSE=1 -# CONFIG_SCSI_OSD_DEBUG is not set -CONFIG_ATA=y -# CONFIG_ATA_NONSTANDARD is not set -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_AHCI_PLATFORM=m -CONFIG_SATA_INIC162X=m -CONFIG_SATA_ACARD_AHCI=m -CONFIG_SATA_SIL24=m -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -CONFIG_PDC_ADMA=m -CONFIG_SATA_QSTOR=m -CONFIG_SATA_SX4=m -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=y -CONFIG_SATA_MV=m -CONFIG_SATA_NV=m -CONFIG_SATA_PROMISE=m -CONFIG_SATA_SIL=m -CONFIG_SATA_SIS=m -CONFIG_SATA_SVW=m -CONFIG_SATA_ULI=m -CONFIG_SATA_VIA=m -CONFIG_SATA_VITESSE=m - -# -# PATA SFF controllers with BMDMA -# -CONFIG_PATA_ALI=m -CONFIG_PATA_AMD=m -CONFIG_PATA_ARASAN_CF=m -CONFIG_PATA_ARTOP=m -CONFIG_PATA_ATIIXP=m -CONFIG_PATA_ATP867X=m -CONFIG_PATA_CMD64X=m -CONFIG_PATA_CS5520=m -CONFIG_PATA_CS5530=m -CONFIG_PATA_CS5535=m -CONFIG_PATA_CS5536=m -CONFIG_PATA_CYPRESS=m -CONFIG_PATA_EFAR=m -CONFIG_PATA_HPT366=m -CONFIG_PATA_HPT37X=m -CONFIG_PATA_HPT3X2N=m -CONFIG_PATA_HPT3X3=m -# CONFIG_PATA_HPT3X3_DMA is not set -CONFIG_PATA_IT8213=m -CONFIG_PATA_IT821X=m -CONFIG_PATA_JMICRON=m -CONFIG_PATA_MARVELL=m -CONFIG_PATA_NETCELL=m -CONFIG_PATA_NINJA32=m -CONFIG_PATA_NS87415=m -CONFIG_PATA_OLDPIIX=m -CONFIG_PATA_OPTIDMA=m -CONFIG_PATA_PDC2027X=m -CONFIG_PATA_PDC_OLD=m -# CONFIG_PATA_RADISYS is not set -CONFIG_PATA_RDC=m -# CONFIG_PATA_SC1200 is not set -CONFIG_PATA_SCH=m -CONFIG_PATA_SERVERWORKS=m -CONFIG_PATA_SIL680=m -CONFIG_PATA_SIS=m -CONFIG_PATA_TOSHIBA=m -CONFIG_PATA_TRIFLEX=m -CONFIG_PATA_VIA=m -CONFIG_PATA_WINBOND=m - -# -# PIO-only SFF controllers -# -CONFIG_PATA_CMD640_PCI=m -CONFIG_PATA_MPIIX=m -CONFIG_PATA_NS87410=m -CONFIG_PATA_OPTI=m -CONFIG_PATA_RZ1000=m - -# -# Generic fallback / legacy drivers -# -CONFIG_PATA_ACPI=m -CONFIG_ATA_GENERIC=m -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -# CONFIG_MULTICORE_RAID456 is not set -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_DEBUG is not set -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_MIRROR=y -CONFIG_DM_RAID=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_ZERO=y -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -# CONFIG_DM_DELAY is not set -CONFIG_DM_UEVENT=y -# CONFIG_DM_FLAKEY is not set -# CONFIG_TARGET_CORE is not set -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -CONFIG_FUSION_FC=m -CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=40 -CONFIG_FUSION_CTL=m -CONFIG_FUSION_LOGGING=y - -# -# IEEE 1394 (FireWire) support -# -CONFIG_FIREWIRE=m -CONFIG_FIREWIRE_OHCI=m -CONFIG_FIREWIRE_OHCI_DEBUG=y -CONFIG_FIREWIRE_SBP2=m -# CONFIG_FIREWIRE_NET is not set -# CONFIG_FIREWIRE_NOSY is not set -CONFIG_I2O=m -# CONFIG_I2O_LCT_NOTIFY_ON_CHANGES is not set -CONFIG_I2O_EXT_ADAPTEC=y -CONFIG_I2O_CONFIG=m -CONFIG_I2O_CONFIG_OLD_IOCTL=y -CONFIG_I2O_BUS=m -CONFIG_I2O_BLOCK=m -CONFIG_I2O_SCSI=m -CONFIG_I2O_PROC=m -# CONFIG_MACINTOSH_DRIVERS is not set -CONFIG_NETDEVICES=y -CONFIG_IFB=m -CONFIG_DUMMY=m -CONFIG_BONDING=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -# CONFIG_EQUALIZER is not set -CONFIG_TUN=m -CONFIG_VETH=m -# CONFIG_NET_SB1000 is not set -# CONFIG_ARCNET is not set -CONFIG_MII=m -CONFIG_PHYLIB=y - -# -# MII PHY device drivers -# -CONFIG_MARVELL_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_LXT_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_BROADCOM_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_NATIONAL_PHY=m -CONFIG_STE10XP=m -CONFIG_LSI_ET1011C_PHY=m -CONFIG_MICREL_PHY=m -CONFIG_FIXED_PHY=y -CONFIG_MDIO_BITBANG=m -# CONFIG_MDIO_GPIO is not set -CONFIG_NET_ETHERNET=y -CONFIG_HAPPYMEAL=m -CONFIG_SUNGEM=m -CONFIG_CASSINI=m -CONFIG_NET_VENDOR_3COM=y -CONFIG_VORTEX=m -CONFIG_TYPHOON=m -CONFIG_ETHOC=m -CONFIG_DNET=m -CONFIG_NET_TULIP=y -CONFIG_DE2104X=m -CONFIG_DE2104X_DSL=0 -CONFIG_TULIP=m -# CONFIG_TULIP_MWI is not set -CONFIG_TULIP_MMIO=y -CONFIG_TULIP_NAPI=y -CONFIG_TULIP_NAPI_HW_MITIGATION=y -CONFIG_DE4X5=m -CONFIG_WINBOND_840=m -CONFIG_DM9102=m -CONFIG_ULI526X=m -CONFIG_PCMCIA_XIRCOM=m -CONFIG_HP100=m -# CONFIG_IBM_NEW_EMAC_ZMII is not set -# CONFIG_IBM_NEW_EMAC_RGMII is not set -# CONFIG_IBM_NEW_EMAC_TAH is not set -# CONFIG_IBM_NEW_EMAC_EMAC4 is not set -# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set -# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set -# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set -CONFIG_NET_PCI=y -CONFIG_PCNET32=m -CONFIG_AMD8111_ETH=m -CONFIG_ADAPTEC_STARFIRE=m -CONFIG_KSZ884X_PCI=m -CONFIG_B44=m -CONFIG_B44_PCI_AUTOSELECT=y -CONFIG_B44_PCICORE_AUTOSELECT=y -CONFIG_B44_PCI=y -CONFIG_FORCEDETH=m -CONFIG_E100=m -CONFIG_FEALNX=m -CONFIG_NATSEMI=m -CONFIG_NE2K_PCI=m -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R6040=m -CONFIG_SIS900=m -CONFIG_EPIC100=m -CONFIG_SMSC9420=m -CONFIG_SUNDANCE=m -# CONFIG_SUNDANCE_MMIO is not set -CONFIG_TLAN=m -# CONFIG_KS8842 is not set -# CONFIG_KS8851_MLL is not set -CONFIG_VIA_RHINE=m -CONFIG_VIA_RHINE_MMIO=y -CONFIG_SC92031=m -CONFIG_NET_POCKET=y -CONFIG_ATP=m -CONFIG_DE600=m -CONFIG_DE620=m -CONFIG_ATL2=m -CONFIG_NETDEV_1000=y -CONFIG_ACENIC=m -# CONFIG_ACENIC_OMIT_TIGON_I is not set -CONFIG_DL2K=m -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_IP1000=m -CONFIG_IGB=m -CONFIG_IGB_DCA=y -CONFIG_IGBVF=m -CONFIG_NS83820=m -CONFIG_HAMACHI=m -CONFIG_YELLOWFIN=m -CONFIG_R8169=m -CONFIG_SIS190=m -CONFIG_SKGE=m -# CONFIG_SKGE_DEBUG is not set -CONFIG_SKGE_GENESIS=y -CONFIG_SKY2=m -# CONFIG_SKY2_DEBUG is not set -CONFIG_VIA_VELOCITY=m -CONFIG_TIGON3=m -CONFIG_BNX2=m -CONFIG_CNIC=m -CONFIG_QLA3XXX=m -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_JME=m -CONFIG_STMMAC_ETH=m -# CONFIG_STMMAC_DA is not set -# CONFIG_STMMAC_DUAL_MAC is not set -CONFIG_PCH_GBE=m -CONFIG_NETDEV_10000=y -CONFIG_MDIO=m -CONFIG_CHELSIO_T1=m -CONFIG_CHELSIO_T1_1G=y -CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m -CONFIG_CHELSIO_T4VF=m -CONFIG_ENIC=m -CONFIG_IXGBE=m -CONFIG_IXGBE_DCA=y -CONFIG_IXGBEVF=m -CONFIG_IXGB=m -CONFIG_S2IO=m -CONFIG_VXGE=m -# CONFIG_VXGE_DEBUG_TRACE_ALL is not set -CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y -CONFIG_NETXEN_NIC=m -CONFIG_NIU=m -CONFIG_MLX4_EN=m -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -CONFIG_TEHUTI=m -CONFIG_BNX2X=m -CONFIG_QLCNIC=m -CONFIG_QLGE=m -CONFIG_BNA=m -CONFIG_SFC=m -CONFIG_BE2NET=m -# CONFIG_TR is not set -CONFIG_WLAN=y -CONFIG_LIBERTAS_THINFIRM=m -# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set -CONFIG_LIBERTAS_THINFIRM_USB=m -CONFIG_AIRO=m -CONFIG_ATMEL=m -CONFIG_PCI_ATMEL=m -CONFIG_AT76C50X_USB=m -# CONFIG_PRISM54 is not set -CONFIG_USB_ZD1201=m -CONFIG_USB_NET_RNDIS_WLAN=m -CONFIG_RTL8180=m -CONFIG_RTL8187=m -CONFIG_RTL8187_LEDS=y -CONFIG_ADM8211=m -CONFIG_MAC80211_HWSIM=m -CONFIG_MWL8K=m -CONFIG_ATH_COMMON=m -# CONFIG_ATH_DEBUG is not set -CONFIG_ATH5K=m -CONFIG_ATH5K_DEBUG=y -# CONFIG_ATH5K_TRACER is not set -CONFIG_ATH5K_PCI=y -CONFIG_ATH9K_HW=m -CONFIG_ATH9K_COMMON=m -CONFIG_ATH9K=m -CONFIG_ATH9K_PCI=y -CONFIG_ATH9K_AHB=y -CONFIG_ATH9K_DEBUGFS=y -CONFIG_ATH9K_RATE_CONTROL=y -CONFIG_ATH9K_HTC=m -# CONFIG_ATH9K_HTC_DEBUGFS is not set -CONFIG_CARL9170=m -CONFIG_CARL9170_LEDS=y -# CONFIG_CARL9170_DEBUGFS is not set -CONFIG_CARL9170_WPC=y -CONFIG_B43=m -CONFIG_B43_SSB=y -CONFIG_B43_PCI_AUTOSELECT=y -CONFIG_B43_PCICORE_AUTOSELECT=y -CONFIG_B43_SDIO=y -CONFIG_B43_PIO=y -CONFIG_B43_PHY_N=y -CONFIG_B43_PHY_LP=y -CONFIG_B43_LEDS=y -CONFIG_B43_HWRNG=y -# CONFIG_B43_DEBUG is not set -CONFIG_B43LEGACY=m -CONFIG_B43LEGACY_PCI_AUTOSELECT=y -CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y -CONFIG_B43LEGACY_LEDS=y -CONFIG_B43LEGACY_HWRNG=y -# CONFIG_B43LEGACY_DEBUG is not set -CONFIG_B43LEGACY_DMA=y -CONFIG_B43LEGACY_PIO=y -CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y -# CONFIG_B43LEGACY_DMA_MODE is not set -# CONFIG_B43LEGACY_PIO_MODE is not set -CONFIG_HOSTAP=m -CONFIG_HOSTAP_FIRMWARE=y -CONFIG_HOSTAP_FIRMWARE_NVRAM=y -CONFIG_HOSTAP_PLX=m -CONFIG_HOSTAP_PCI=m -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y -# CONFIG_IPW2100_DEBUG is not set -CONFIG_IPW2200=m -CONFIG_IPW2200_MONITOR=y -CONFIG_IPW2200_RADIOTAP=y -CONFIG_IPW2200_PROMISCUOUS=y -CONFIG_IPW2200_QOS=y -# CONFIG_IPW2200_DEBUG is not set -CONFIG_LIBIPW=m -# CONFIG_LIBIPW_DEBUG is not set -CONFIG_IWLAGN=m - -# -# Debugging Options -# -# CONFIG_IWLWIFI_DEBUG is not set -CONFIG_IWLWIFI_DEBUGFS=y -# CONFIG_IWLWIFI_DEVICE_TRACING is not set -CONFIG_IWLWIFI_DEVICE_SVTOOL=y -# CONFIG_IWL_P2P is not set -CONFIG_IWLWIFI_LEGACY=m - -# -# Debugging Options -# -# CONFIG_IWLWIFI_LEGACY_DEBUG is not set -CONFIG_IWLWIFI_LEGACY_DEBUGFS=y -# CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING is not set -CONFIG_IWL4965=m -CONFIG_IWL3945=m -# CONFIG_IWM is not set -CONFIG_LIBERTAS=m -CONFIG_LIBERTAS_USB=m -CONFIG_LIBERTAS_SDIO=m -# CONFIG_LIBERTAS_DEBUG is not set -CONFIG_LIBERTAS_MESH=y -CONFIG_HERMES=m -# CONFIG_HERMES_PRISM is not set -CONFIG_HERMES_CACHE_FW_ON_INIT=y -CONFIG_PLX_HERMES=m -CONFIG_TMD_HERMES=m -CONFIG_NORTEL_HERMES=m -CONFIG_ORINOCO_USB=m -CONFIG_P54_COMMON=m -CONFIG_P54_USB=m -CONFIG_P54_PCI=m -CONFIG_P54_LEDS=y -CONFIG_RT2X00=m -CONFIG_RT2400PCI=m -CONFIG_RT2500PCI=m -CONFIG_RT61PCI=m -CONFIG_RT2800PCI=m -CONFIG_RT2800PCI_RT33XX=y -CONFIG_RT2800PCI_RT35XX=y -CONFIG_RT2800PCI_RT53XX=y -CONFIG_RT2500USB=m -CONFIG_RT73USB=m -CONFIG_RT2800USB=m -CONFIG_RT2800USB_RT33XX=y -CONFIG_RT2800USB_RT35XX=y -CONFIG_RT2800USB_RT53XX=y -CONFIG_RT2800USB_UNKNOWN=y -CONFIG_RT2800_LIB=m -CONFIG_RT2X00_LIB_PCI=m -CONFIG_RT2X00_LIB_USB=m -CONFIG_RT2X00_LIB=m -CONFIG_RT2X00_LIB_FIRMWARE=y -CONFIG_RT2X00_LIB_CRYPTO=y -CONFIG_RT2X00_LIB_LEDS=y -CONFIG_RT2X00_LIB_DEBUGFS=y -# CONFIG_RT2X00_DEBUG is not set -CONFIG_RTL8192CE=m -CONFIG_RTL8192SE=m -CONFIG_RTL8192DE=m -CONFIG_RTL8192CU=m -CONFIG_RTLWIFI=m -CONFIG_RTL8192C_COMMON=m -CONFIG_WL1251=m -CONFIG_WL1251_SDIO=m -CONFIG_WL12XX_MENU=m -CONFIG_WL12XX=m -# CONFIG_WL12XX_HT is not set -CONFIG_WL12XX_SDIO=m -# CONFIG_WL12XX_SDIO_TEST is not set -CONFIG_WL12XX_PLATFORM_DATA=y -CONFIG_ZD1211RW=m -# CONFIG_ZD1211RW_DEBUG is not set -CONFIG_MWIFIEX=m -CONFIG_MWIFIEX_SDIO=m - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# - -# -# USB Network Adapters -# -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_CDCETHER=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_DM9601=m -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_GL620A=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m -CONFIG_USB_NET_RNDIS_HOST=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_ALI_M5632=y -CONFIG_USB_AN2720=y -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -CONFIG_USB_EPSON2888=y -CONFIG_USB_KC2190=y -CONFIG_USB_NET_ZAURUS=m -CONFIG_USB_NET_CX82310_ETH=m -CONFIG_USB_NET_KALMIA=m -CONFIG_USB_HSO=m -CONFIG_USB_NET_INT51X1=m -CONFIG_USB_IPHETH=m -CONFIG_USB_SIERRA_NET=m -CONFIG_USB_VL600=m -# CONFIG_WAN is not set -CONFIG_ATM_DRIVERS=y -# CONFIG_ATM_DUMMY is not set -CONFIG_ATM_TCP=m -CONFIG_ATM_LANAI=m -CONFIG_ATM_ENI=m -# CONFIG_ATM_ENI_DEBUG is not set -# CONFIG_ATM_ENI_TUNE_BURST is not set -CONFIG_ATM_FIRESTREAM=m -# CONFIG_ATM_ZATM is not set -CONFIG_ATM_NICSTAR=m -# CONFIG_ATM_NICSTAR_USE_SUNI is not set -# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set -CONFIG_ATM_IDT77252=m -# CONFIG_ATM_IDT77252_DEBUG is not set -# CONFIG_ATM_IDT77252_RCV_ALL is not set -CONFIG_ATM_IDT77252_USE_SUNI=y -# CONFIG_ATM_AMBASSADOR is not set -# CONFIG_ATM_HORIZON is not set -# CONFIG_ATM_IA is not set -# CONFIG_ATM_FORE200E is not set -CONFIG_ATM_HE=m -# CONFIG_ATM_HE_USE_SUNI is not set -CONFIG_ATM_SOLOS=m - -# -# CAIF transport drivers -# -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_PLIP is not set -CONFIG_PPP=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOATM=m -CONFIG_PPPOL2TP=m -# CONFIG_SLIP is not set -CONFIG_SLHC=m -# CONFIG_NET_FC is not set -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL=y -CONFIG_NETPOLL_TRAP=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_VIRTIO_NET=m -CONFIG_VMXNET3=m -CONFIG_ISDN=y -CONFIG_ISDN_I4L=m -CONFIG_ISDN_PPP=y -CONFIG_ISDN_PPP_VJ=y -CONFIG_ISDN_MPP=y -CONFIG_IPPP_FILTER=y -CONFIG_ISDN_PPP_BSDCOMP=m -CONFIG_ISDN_AUDIO=y -CONFIG_ISDN_TTY_FAX=y - -# -# ISDN feature submodules -# -CONFIG_ISDN_DIVERSION=m - -# -# ISDN4Linux hardware drivers -# - -# -# Passive cards -# -CONFIG_ISDN_DRV_HISAX=m - -# -# D-channel protocol features -# -CONFIG_HISAX_EURO=y -CONFIG_DE_AOC=y -CONFIG_HISAX_NO_SENDCOMPLETE=y -CONFIG_HISAX_NO_LLC=y -CONFIG_HISAX_NO_KEYPAD=y -CONFIG_HISAX_1TR6=y -CONFIG_HISAX_NI1=y -CONFIG_HISAX_MAX_CARDS=8 - -# -# HiSax supported cards -# -CONFIG_HISAX_16_3=y -CONFIG_HISAX_TELESPCI=y -CONFIG_HISAX_S0BOX=y -CONFIG_HISAX_FRITZPCI=y -CONFIG_HISAX_AVM_A1_PCMCIA=y -CONFIG_HISAX_ELSA=y -CONFIG_HISAX_DIEHLDIVA=y -CONFIG_HISAX_SEDLBAUER=y -CONFIG_HISAX_NETJET=y -CONFIG_HISAX_NETJET_U=y -CONFIG_HISAX_NICCY=y -CONFIG_HISAX_BKM_A4T=y -CONFIG_HISAX_SCT_QUADRO=y -CONFIG_HISAX_GAZEL=y -CONFIG_HISAX_HFC_PCI=y -CONFIG_HISAX_W6692=y -CONFIG_HISAX_HFC_SX=y -CONFIG_HISAX_ENTERNOW_PCI=y -# CONFIG_HISAX_DEBUG is not set - -# -# HiSax PCMCIA card service modules -# - -# -# HiSax sub driver modules -# -CONFIG_HISAX_ST5481=m -CONFIG_HISAX_HFCUSB=m -CONFIG_HISAX_HFC4S8S=m -CONFIG_HISAX_FRITZ_PCIPNP=m - -# -# Active cards -# -CONFIG_ISDN_CAPI=m -CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y -CONFIG_CAPI_TRACE=y -CONFIG_ISDN_CAPI_MIDDLEWARE=y -CONFIG_ISDN_CAPI_CAPI20=m -CONFIG_ISDN_CAPI_CAPIDRV=m - -# -# CAPI hardware drivers -# -CONFIG_CAPI_AVM=y -CONFIG_ISDN_DRV_AVMB1_B1PCI=m -CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y -CONFIG_ISDN_DRV_AVMB1_T1PCI=m -CONFIG_ISDN_DRV_AVMB1_C4=m -CONFIG_CAPI_EICON=y -CONFIG_ISDN_DIVAS=m -CONFIG_ISDN_DIVAS_BRIPCI=y -CONFIG_ISDN_DIVAS_PRIPCI=y -CONFIG_ISDN_DIVAS_DIVACAPI=m -CONFIG_ISDN_DIVAS_USERIDI=m -CONFIG_ISDN_DIVAS_MAINT=m -CONFIG_ISDN_DRV_GIGASET=m -CONFIG_GIGASET_CAPI=y -# CONFIG_GIGASET_I4L is not set -# CONFIG_GIGASET_DUMMYLL is not set -CONFIG_GIGASET_BASE=m -CONFIG_GIGASET_M105=m -CONFIG_GIGASET_M101=m -# CONFIG_GIGASET_DEBUG is not set -CONFIG_HYSDN=m -CONFIG_HYSDN_CAPI=y -# CONFIG_MISDN is not set -CONFIG_ISDN_HDLC=m -# CONFIG_PHONE is not set - -# -# Input device support -# -CONFIG_INPUT=y -# CONFIG_INPUT_FF_MEMLESS is not set -CONFIG_INPUT_POLLDEV=m -CONFIG_INPUT_SPARSEKMAP=m - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_LKKBD is not set -CONFIG_KEYBOARD_GPIO=m -CONFIG_KEYBOARD_GPIO_POLLED=m -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8323 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_XTKBD is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=m -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_LIFEBOOK=y -CONFIG_MOUSE_PS2_TRACKPOINT=y -CONFIG_MOUSE_PS2_ELANTECH=y -CONFIG_MOUSE_PS2_SENTELIC=y -CONFIG_MOUSE_PS2_TOUCHKIT=y -CONFIG_MOUSE_SERIAL=m -CONFIG_MOUSE_APPLETOUCH=m -CONFIG_MOUSE_BCM5974=m -CONFIG_MOUSE_VSXXXAA=m -# CONFIG_MOUSE_GPIO is not set -CONFIG_MOUSE_SYNAPTICS_I2C=m -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -CONFIG_INPUT_PCSPKR=m -# CONFIG_INPUT_MMA8450 is not set -# CONFIG_INPUT_MPU3050 is not set -CONFIG_INPUT_APANEL=m -CONFIG_INPUT_WISTRON_BTNS=m -CONFIG_INPUT_ATLAS_BTNS=m -CONFIG_INPUT_ATI_REMOTE=m -CONFIG_INPUT_ATI_REMOTE2=m -CONFIG_INPUT_KEYSPAN_REMOTE=m -# CONFIG_INPUT_KXTJ9 is not set -CONFIG_INPUT_POWERMATE=m -CONFIG_INPUT_YEALINK=m -CONFIG_INPUT_CM109=m -CONFIG_INPUT_UINPUT=m -# CONFIG_INPUT_PCF8574 is not set -CONFIG_INPUT_GPIO_ROTARY_ENCODER=m -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_CMA3000 is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_SERIO_I8042=y -CONFIG_SERIO_SERPORT=m -# CONFIG_SERIO_CT82C710 is not set -# CONFIG_SERIO_PARKBD is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=m -CONFIG_SERIO_ALTERA_PS2=m -# CONFIG_SERIO_PS2MULT is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_NONSTANDARD=y -CONFIG_ROCKETPORT=m -CONFIG_CYCLADES=m -# CONFIG_CYZ_INTR is not set -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -CONFIG_SYNCLINK=m -CONFIG_SYNCLINKMP=m -CONFIG_SYNCLINK_GT=m -CONFIG_NOZOMI=m -# CONFIG_ISI is not set -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -# CONFIG_TRACE_SINK is not set -# CONFIG_STALDRV is not set - -# -# Serial drivers -# -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_PNP=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_MFD_HSU is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_SERIAL_JSM=m -# CONFIG_SERIAL_TIMBERDALE is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_PCH_UART is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set -CONFIG_PRINTER=m -CONFIG_LP_CONSOLE=y -CONFIG_PPDEV=m -CONFIG_HVC_DRIVER=y -CONFIG_VIRTIO_CONSOLE=m -CONFIG_IPMI_HANDLER=m -# CONFIG_IPMI_PANIC_EVENT is not set -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -CONFIG_HW_RANDOM_INTEL=m -CONFIG_HW_RANDOM_AMD=m -CONFIG_HW_RANDOM_GEODE=m -CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_NVRAM=y -CONFIG_R3964=m -# CONFIG_APPLICOM is not set -CONFIG_SONYPI=m -CONFIG_MWAVE=m -CONFIG_PC8736x_GPIO=m -CONFIG_NSC_GPIO=m -CONFIG_RAW_DRIVER=y -CONFIG_MAX_RAW_DEVS=8192 -CONFIG_HPET=y -# CONFIG_HPET_MMAP is not set -CONFIG_HANGCHECK_TIMER=m -# CONFIG_TCG_TPM is not set -# CONFIG_TELCLOCK is not set -# CONFIG_RAMOOPS is not set -CONFIG_I2C=m -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=m -# CONFIG_I2C_MUX is not set -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_SMBUS=m -CONFIG_I2C_ALGOBIT=m -CONFIG_I2C_ALGOPCA=m - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -CONFIG_I2C_ALI1535=m -CONFIG_I2C_ALI1563=m -CONFIG_I2C_ALI15X3=m -CONFIG_I2C_AMD756=m -CONFIG_I2C_AMD756_S4882=m -CONFIG_I2C_AMD8111=m -CONFIG_I2C_I801=m -CONFIG_I2C_ISCH=m -CONFIG_I2C_PIIX4=m -CONFIG_I2C_NFORCE2=m -CONFIG_I2C_NFORCE2_S4985=m -CONFIG_I2C_SIS5595=m -CONFIG_I2C_SIS630=m -CONFIG_I2C_SIS96X=m -CONFIG_I2C_VIA=m -CONFIG_I2C_VIAPRO=m - -# -# ACPI drivers -# -CONFIG_I2C_SCMI=m - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_GPIO is not set -# CONFIG_I2C_INTEL_MID is not set -# CONFIG_I2C_OCORES is not set -CONFIG_I2C_PCA_PLATFORM=m -# CONFIG_I2C_PXA_PCI is not set -CONFIG_I2C_SIMTEC=m -# CONFIG_I2C_XILINX is not set -# CONFIG_I2C_EG20T is not set - -# -# External I2C/SMBus adapter drivers -# -CONFIG_I2C_DIOLAN_U2C=m -CONFIG_I2C_PARPORT=m -CONFIG_I2C_PARPORT_LIGHT=m -# CONFIG_I2C_TAOS_EVM is not set -CONFIG_I2C_TINY_USB=m - -# -# Other I2C/SMBus bus drivers -# -CONFIG_I2C_STUB=m -CONFIG_SCx200_ACB=m -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# CONFIG_SPI is not set - -# -# PPS support -# -# CONFIG_PPS is not set - -# -# PPS generators support -# - -# -# PTP clock support -# - -# -# Enable Device Drivers -> PPS to see the PTP clock options. -# -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_GPIOLIB=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y - -# -# Memory mapped GPIO drivers: -# -# CONFIG_GPIO_GENERIC_PLATFORM is not set -# CONFIG_GPIO_IT8761E is not set -CONFIG_GPIO_SCH=m -# CONFIG_GPIO_VX855 is not set - -# -# I2C GPIO expanders: -# -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_ADP5588 is not set - -# -# PCI GPIO expanders: -# -# CONFIG_GPIO_CS5535 is not set -# CONFIG_GPIO_LANGWELL is not set -# CONFIG_GPIO_PCH is not set -# CONFIG_GPIO_ML_IOH is not set -# CONFIG_GPIO_RDC321X is not set - -# -# SPI GPIO expanders: -# -# CONFIG_GPIO_MCP23S08 is not set - -# -# AC97 GPIO expanders: -# - -# -# MODULbus GPIO expanders: -# -CONFIG_W1=m -CONFIG_W1_CON=y - -# -# 1-wire Bus Masters -# -# CONFIG_W1_MASTER_MATROX is not set -CONFIG_W1_MASTER_DS2490=m -CONFIG_W1_MASTER_DS2482=m -CONFIG_W1_MASTER_DS1WM=m -# CONFIG_W1_MASTER_GPIO is not set - -# -# 1-wire Slaves -# -CONFIG_W1_SLAVE_THERM=m -CONFIG_W1_SLAVE_SMEM=m -CONFIG_W1_SLAVE_DS2408=m -CONFIG_W1_SLAVE_DS2423=m -CONFIG_W1_SLAVE_DS2431=m -CONFIG_W1_SLAVE_DS2433=m -CONFIG_W1_SLAVE_DS2433_CRC=y -CONFIG_W1_SLAVE_DS2760=m -CONFIG_W1_SLAVE_DS2780=m -CONFIG_W1_SLAVE_BQ27000=m -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_BATTERY_DS2760 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_BQ20Z75 is not set -# CONFIG_BATTERY_BQ27x00 is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_ISP1704 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_GPIO is not set -CONFIG_HWMON=y -CONFIG_HWMON_VID=m -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -CONFIG_SENSORS_ABITUGURU=m -CONFIG_SENSORS_ABITUGURU3=m -CONFIG_SENSORS_AD7414=m -CONFIG_SENSORS_AD7418=m -CONFIG_SENSORS_ADM1021=m -CONFIG_SENSORS_ADM1025=m -CONFIG_SENSORS_ADM1026=m -CONFIG_SENSORS_ADM1029=m -CONFIG_SENSORS_ADM1031=m -CONFIG_SENSORS_ADM9240=m -CONFIG_SENSORS_ADT7411=m -CONFIG_SENSORS_ADT7462=m -CONFIG_SENSORS_ADT7470=m -CONFIG_SENSORS_ADT7475=m -CONFIG_SENSORS_ASC7621=m -CONFIG_SENSORS_K8TEMP=m -CONFIG_SENSORS_K10TEMP=m -CONFIG_SENSORS_FAM15H_POWER=m -CONFIG_SENSORS_ASB100=m -CONFIG_SENSORS_ATXP1=m -CONFIG_SENSORS_DS620=m -CONFIG_SENSORS_DS1621=m -CONFIG_SENSORS_I5K_AMB=m -CONFIG_SENSORS_F71805F=m -CONFIG_SENSORS_F71882FG=m -CONFIG_SENSORS_F75375S=m -CONFIG_SENSORS_FSCHMD=m -CONFIG_SENSORS_G760A=m -CONFIG_SENSORS_GL518SM=m -CONFIG_SENSORS_GL520SM=m -# CONFIG_SENSORS_GPIO_FAN is not set -CONFIG_SENSORS_CORETEMP=m -CONFIG_SENSORS_IBMAEM=m -CONFIG_SENSORS_IBMPEX=m -CONFIG_SENSORS_IT87=m -# CONFIG_SENSORS_JC42 is not set -CONFIG_SENSORS_LINEAGE=m -CONFIG_SENSORS_LM63=m -CONFIG_SENSORS_LM73=m -CONFIG_SENSORS_LM75=m -CONFIG_SENSORS_LM77=m -CONFIG_SENSORS_LM78=m -CONFIG_SENSORS_LM80=m -CONFIG_SENSORS_LM83=m -CONFIG_SENSORS_LM85=m -CONFIG_SENSORS_LM87=m -CONFIG_SENSORS_LM90=m -CONFIG_SENSORS_LM92=m -CONFIG_SENSORS_LM93=m -CONFIG_SENSORS_LTC4151=m -CONFIG_SENSORS_LTC4215=m -CONFIG_SENSORS_LTC4245=m -CONFIG_SENSORS_LTC4261=m -CONFIG_SENSORS_LM95241=m -CONFIG_SENSORS_LM95245=m -CONFIG_SENSORS_MAX16065=m -CONFIG_SENSORS_MAX1619=m -CONFIG_SENSORS_MAX1668=m -CONFIG_SENSORS_MAX6639=m -CONFIG_SENSORS_MAX6642=m -CONFIG_SENSORS_MAX6650=m -CONFIG_SENSORS_NTC_THERMISTOR=m -CONFIG_SENSORS_PC87360=m -CONFIG_SENSORS_PC87427=m -CONFIG_SENSORS_PCF8591=m -CONFIG_PMBUS=m -CONFIG_SENSORS_PMBUS=m -CONFIG_SENSORS_ADM1275=m -CONFIG_SENSORS_LM25066=m -CONFIG_SENSORS_MAX16064=m -CONFIG_SENSORS_MAX34440=m -CONFIG_SENSORS_MAX8688=m -CONFIG_SENSORS_UCD9000=m -CONFIG_SENSORS_UCD9200=m -CONFIG_SENSORS_SHT15=m -CONFIG_SENSORS_SHT21=m -CONFIG_SENSORS_SIS5595=m -# CONFIG_SENSORS_SMM665 is not set -CONFIG_SENSORS_DME1737=m -CONFIG_SENSORS_EMC1403=m -# CONFIG_SENSORS_EMC2103 is not set -CONFIG_SENSORS_EMC6W201=m -CONFIG_SENSORS_SMSC47M1=m -CONFIG_SENSORS_SMSC47M192=m -CONFIG_SENSORS_SMSC47B397=m -CONFIG_SENSORS_SCH56XX_COMMON=m -CONFIG_SENSORS_SCH5627=m -CONFIG_SENSORS_SCH5636=m -CONFIG_SENSORS_ADS1015=m -CONFIG_SENSORS_ADS7828=m -CONFIG_SENSORS_AMC6821=m -CONFIG_SENSORS_THMC50=m -CONFIG_SENSORS_TMP102=m -CONFIG_SENSORS_TMP401=m -CONFIG_SENSORS_TMP421=m -CONFIG_SENSORS_VIA_CPUTEMP=m -CONFIG_SENSORS_VIA686A=m -CONFIG_SENSORS_VT1211=m -CONFIG_SENSORS_VT8231=m -CONFIG_SENSORS_W83781D=m -CONFIG_SENSORS_W83791D=m -CONFIG_SENSORS_W83792D=m -CONFIG_SENSORS_W83793=m -CONFIG_SENSORS_W83795=m -# CONFIG_SENSORS_W83795_FANCTRL is not set -CONFIG_SENSORS_W83L785TS=m -CONFIG_SENSORS_W83L786NG=m -CONFIG_SENSORS_W83627HF=m -CONFIG_SENSORS_W83627EHF=m -CONFIG_SENSORS_APPLESMC=m - -# -# ACPI drivers -# -CONFIG_SENSORS_ACPI_POWER=m -CONFIG_SENSORS_ATK0110=m -CONFIG_THERMAL=y -CONFIG_THERMAL_HWMON=y -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y -CONFIG_WATCHDOG_NOWAYOUT=y - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -# CONFIG_ACQUIRE_WDT is not set -# CONFIG_ADVANTECH_WDT is not set -CONFIG_ALIM1535_WDT=m -CONFIG_ALIM7101_WDT=m -CONFIG_F71808E_WDT=m -CONFIG_SP5100_TCO=m -# CONFIG_SC520_WDT is not set -CONFIG_SBC_FITPC2_WATCHDOG=m -# CONFIG_EUROTECH_WDT is not set -CONFIG_IB700_WDT=m -CONFIG_IBMASR=m -# CONFIG_WAFER_WDT is not set -CONFIG_I6300ESB_WDT=m -CONFIG_ITCO_WDT=m -# CONFIG_ITCO_VENDOR_SUPPORT is not set -CONFIG_IT8712F_WDT=m -CONFIG_IT87_WDT=m -CONFIG_HP_WATCHDOG=m -CONFIG_HPWDT_NMI_DECODING=y -# CONFIG_SC1200_WDT is not set -# CONFIG_PC87413_WDT is not set -CONFIG_NV_TCO=m -# CONFIG_60XX_WDT is not set -# CONFIG_SBC8360_WDT is not set -# CONFIG_SBC7240_WDT is not set -# CONFIG_CPU5_WDT is not set -CONFIG_SMSC_SCH311X_WDT=m -# CONFIG_SMSC37B787_WDT is not set -CONFIG_W83627HF_WDT=m -CONFIG_W83697HF_WDT=m -CONFIG_W83697UG_WDT=m -CONFIG_W83877F_WDT=m -CONFIG_W83977F_WDT=m -CONFIG_MACHZ_WDT=m -# CONFIG_SBC_EPX_C3_WATCHDOG is not set - -# -# PCI-based Watchdog Cards -# -CONFIG_PCIPCWATCHDOG=m -CONFIG_WDTPCI=m - -# -# USB-based Watchdog Cards -# -CONFIG_USBPCWATCHDOG=m -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -CONFIG_SSB=m -CONFIG_SSB_SPROM=y -CONFIG_SSB_BLOCKIO=y -CONFIG_SSB_PCIHOST_POSSIBLE=y -CONFIG_SSB_PCIHOST=y -CONFIG_SSB_B43_PCI_BRIDGE=y -CONFIG_SSB_SDIOHOST_POSSIBLE=y -CONFIG_SSB_SDIOHOST=y -# CONFIG_SSB_DEBUG is not set -CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y -CONFIG_SSB_DRIVER_PCICORE=y -CONFIG_BCMA_POSSIBLE=y - -# -# Broadcom specific AMBA -# -# CONFIG_BCMA is not set -CONFIG_MFD_SUPPORT=y -CONFIG_MFD_CORE=m -CONFIG_MFD_SM501=m -CONFIG_MFD_SM501_GPIO=y -# CONFIG_HTC_PASIC3 is not set -# CONFIG_UCB1400_CORE is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TMIO is not set -CONFIG_MFD_WM8400=m -# CONFIG_MFD_PCF50633 is not set -# CONFIG_ABX500_CORE is not set -CONFIG_MFD_CS5535=m -# CONFIG_MFD_TIMBERDALE is not set -CONFIG_LPC_SCH=m -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_JANZ_CMODIO is not set -CONFIG_MFD_VX855=m -CONFIG_MFD_WL1273_CORE=m -# CONFIG_REGULATOR is not set -CONFIG_MEDIA_SUPPORT=m - -# -# Multimedia core support -# -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_DEV=m -CONFIG_VIDEO_V4L2_COMMON=m -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_DVB_CORE=m -CONFIG_DVB_NET=y -CONFIG_VIDEO_MEDIA=m - -# -# Multimedia drivers -# -CONFIG_VIDEO_SAA7146=m -CONFIG_VIDEO_SAA7146_VV=m -CONFIG_RC_CORE=m -CONFIG_LIRC=m -CONFIG_RC_MAP=m -CONFIG_IR_NEC_DECODER=m -CONFIG_IR_RC5_DECODER=m -CONFIG_IR_RC6_DECODER=m -CONFIG_IR_JVC_DECODER=m -CONFIG_IR_SONY_DECODER=m -CONFIG_IR_RC5_SZ_DECODER=m -CONFIG_IR_MCE_KBD_DECODER=m -CONFIG_IR_LIRC_CODEC=m -CONFIG_IR_ENE=m -CONFIG_IR_IMON=m -CONFIG_IR_MCEUSB=m -CONFIG_IR_ITE_CIR=m -CONFIG_IR_FINTEK=m -CONFIG_IR_NUVOTON=m -CONFIG_IR_REDRAT3=m -CONFIG_IR_STREAMZAP=m -CONFIG_IR_WINBOND_CIR=m -CONFIG_RC_LOOPBACK=m -CONFIG_MEDIA_ATTACH=y -CONFIG_MEDIA_TUNER=m -CONFIG_MEDIA_TUNER_CUSTOMISE=y - -# -# Customize TV tuners -# -CONFIG_MEDIA_TUNER_SIMPLE=m -CONFIG_MEDIA_TUNER_TDA8290=m -CONFIG_MEDIA_TUNER_TDA827X=m -CONFIG_MEDIA_TUNER_TDA18271=m -CONFIG_MEDIA_TUNER_TDA9887=m -CONFIG_MEDIA_TUNER_TEA5761=m -CONFIG_MEDIA_TUNER_TEA5767=m -CONFIG_MEDIA_TUNER_MT20XX=m -CONFIG_MEDIA_TUNER_MT2060=m -CONFIG_MEDIA_TUNER_MT2266=m -CONFIG_MEDIA_TUNER_MT2131=m -CONFIG_MEDIA_TUNER_QT1010=m -CONFIG_MEDIA_TUNER_XC2028=m -CONFIG_MEDIA_TUNER_XC5000=m -CONFIG_MEDIA_TUNER_XC4000=m -CONFIG_MEDIA_TUNER_MXL5005S=m -CONFIG_MEDIA_TUNER_MXL5007T=m -CONFIG_MEDIA_TUNER_MC44S803=m -CONFIG_MEDIA_TUNER_MAX2165=m -CONFIG_MEDIA_TUNER_TDA18218=m -CONFIG_MEDIA_TUNER_TDA18212=m -CONFIG_VIDEO_V4L2=m -CONFIG_VIDEOBUF_GEN=m -CONFIG_VIDEOBUF_DMA_SG=m -CONFIG_VIDEOBUF_VMALLOC=m -CONFIG_VIDEOBUF_DMA_CONTIG=m -CONFIG_VIDEOBUF_DVB=m -CONFIG_VIDEO_BTCX=m -CONFIG_VIDEO_TVEEPROM=m -CONFIG_VIDEO_TUNER=m -CONFIG_VIDEOBUF2_CORE=m -CONFIG_VIDEOBUF2_MEMOPS=m -CONFIG_VIDEOBUF2_DMA_CONTIG=m -CONFIG_VIDEOBUF2_VMALLOC=m -CONFIG_VIDEO_CAPTURE_DRIVERS=y -# CONFIG_VIDEO_ADV_DEBUG is not set -# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set -CONFIG_VIDEO_HELPER_CHIPS_AUTO=y -CONFIG_VIDEO_IR_I2C=m - -# -# Audio decoders, processors and mixers -# -CONFIG_VIDEO_TVAUDIO=m -CONFIG_VIDEO_TDA7432=m -CONFIG_VIDEO_TDA9840=m -CONFIG_VIDEO_TEA6415C=m -CONFIG_VIDEO_TEA6420=m -CONFIG_VIDEO_MSP3400=m -CONFIG_VIDEO_CS5345=m -CONFIG_VIDEO_CS53L32A=m -CONFIG_VIDEO_WM8775=m -CONFIG_VIDEO_WM8739=m -CONFIG_VIDEO_VP27SMPX=m - -# -# RDS decoders -# -CONFIG_VIDEO_SAA6588=m - -# -# Video decoders -# -CONFIG_VIDEO_ADV7180=m -CONFIG_VIDEO_BT819=m -CONFIG_VIDEO_BT856=m -CONFIG_VIDEO_BT866=m -CONFIG_VIDEO_KS0127=m -CONFIG_VIDEO_SAA7110=m -CONFIG_VIDEO_SAA711X=m -CONFIG_VIDEO_TVP5150=m -CONFIG_VIDEO_VPX3220=m - -# -# Video and audio decoders -# -CONFIG_VIDEO_SAA717X=m -CONFIG_VIDEO_CX25840=m - -# -# MPEG video encoders -# -CONFIG_VIDEO_CX2341X=m - -# -# Video encoders -# -CONFIG_VIDEO_SAA7127=m -CONFIG_VIDEO_SAA7185=m -CONFIG_VIDEO_ADV7170=m -CONFIG_VIDEO_ADV7175=m - -# -# Camera sensor devices -# -CONFIG_VIDEO_OV7670=m -CONFIG_VIDEO_MT9V011=m - -# -# Flash devices -# - -# -# Video improvement chips -# -CONFIG_VIDEO_UPD64031A=m -CONFIG_VIDEO_UPD64083=m - -# -# Miscelaneous helper chips -# -CONFIG_VIDEO_M52790=m -# CONFIG_VIDEO_VIVI is not set -CONFIG_VIDEO_BT848=m -CONFIG_VIDEO_BT848_DVB=y -CONFIG_VIDEO_BWQCAM=m -CONFIG_VIDEO_CQCAM=m -CONFIG_VIDEO_W9966=m -CONFIG_VIDEO_CPIA2=m -CONFIG_VIDEO_ZORAN=m -CONFIG_VIDEO_ZORAN_DC30=m -CONFIG_VIDEO_ZORAN_ZR36060=m -CONFIG_VIDEO_ZORAN_BUZ=m -CONFIG_VIDEO_ZORAN_DC10=m -CONFIG_VIDEO_ZORAN_LML33=m -CONFIG_VIDEO_ZORAN_LML33R10=m -CONFIG_VIDEO_ZORAN_AVS6EYES=m -CONFIG_VIDEO_MEYE=m -CONFIG_VIDEO_SAA7134=m -CONFIG_VIDEO_SAA7134_ALSA=m -CONFIG_VIDEO_SAA7134_RC=y -CONFIG_VIDEO_SAA7134_DVB=m -CONFIG_VIDEO_MXB=m -CONFIG_VIDEO_HEXIUM_ORION=m -CONFIG_VIDEO_HEXIUM_GEMINI=m -CONFIG_VIDEO_TIMBERDALE=m -CONFIG_VIDEO_CX88=m -CONFIG_VIDEO_CX88_ALSA=m -CONFIG_VIDEO_CX88_BLACKBIRD=m -CONFIG_VIDEO_CX88_DVB=m -CONFIG_VIDEO_CX88_MPEG=m -CONFIG_VIDEO_CX88_VP3054=m -CONFIG_VIDEO_CX23885=m -# CONFIG_MEDIA_ALTERA_CI is not set -CONFIG_VIDEO_AU0828=m -CONFIG_VIDEO_IVTV=m -CONFIG_VIDEO_FB_IVTV=m -CONFIG_VIDEO_CX18=m -CONFIG_VIDEO_CX18_ALSA=m -CONFIG_VIDEO_SAA7164=m -CONFIG_VIDEO_CAFE_CCIC=m -CONFIG_VIDEO_SR030PC30=m -CONFIG_VIDEO_VIA_CAMERA=m -CONFIG_VIDEO_NOON010PC30=m -# CONFIG_VIDEO_M5MOLS is not set -CONFIG_SOC_CAMERA=m -CONFIG_SOC_CAMERA_IMX074=m -CONFIG_SOC_CAMERA_MT9M001=m -CONFIG_SOC_CAMERA_MT9M111=m -CONFIG_SOC_CAMERA_MT9T031=m -CONFIG_SOC_CAMERA_MT9T112=m -CONFIG_SOC_CAMERA_MT9V022=m -CONFIG_SOC_CAMERA_RJ54N1=m -CONFIG_SOC_CAMERA_TW9910=m -CONFIG_SOC_CAMERA_PLATFORM=m -CONFIG_SOC_CAMERA_OV2640=m -CONFIG_SOC_CAMERA_OV5642=m -CONFIG_SOC_CAMERA_OV6650=m -CONFIG_SOC_CAMERA_OV772X=m -CONFIG_SOC_CAMERA_OV9640=m -CONFIG_SOC_CAMERA_OV9740=m -CONFIG_V4L_USB_DRIVERS=y -CONFIG_USB_VIDEO_CLASS=m -CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y -CONFIG_USB_GSPCA=m -CONFIG_USB_M5602=m -CONFIG_USB_STV06XX=m -CONFIG_USB_GL860=m -CONFIG_USB_GSPCA_BENQ=m -CONFIG_USB_GSPCA_CONEX=m -CONFIG_USB_GSPCA_CPIA1=m -CONFIG_USB_GSPCA_ETOMS=m -CONFIG_USB_GSPCA_FINEPIX=m -CONFIG_USB_GSPCA_JEILINJ=m -CONFIG_USB_GSPCA_KINECT=m -CONFIG_USB_GSPCA_KONICA=m -CONFIG_USB_GSPCA_MARS=m -CONFIG_USB_GSPCA_MR97310A=m -CONFIG_USB_GSPCA_NW80X=m -CONFIG_USB_GSPCA_OV519=m -CONFIG_USB_GSPCA_OV534=m -CONFIG_USB_GSPCA_OV534_9=m -CONFIG_USB_GSPCA_PAC207=m -CONFIG_USB_GSPCA_PAC7302=m -CONFIG_USB_GSPCA_PAC7311=m -CONFIG_USB_GSPCA_SE401=m -CONFIG_USB_GSPCA_SN9C2028=m -CONFIG_USB_GSPCA_SN9C20X=m -CONFIG_USB_GSPCA_SONIXB=m -CONFIG_USB_GSPCA_SONIXJ=m -CONFIG_USB_GSPCA_SPCA500=m -CONFIG_USB_GSPCA_SPCA501=m -CONFIG_USB_GSPCA_SPCA505=m -CONFIG_USB_GSPCA_SPCA506=m -CONFIG_USB_GSPCA_SPCA508=m -CONFIG_USB_GSPCA_SPCA561=m -CONFIG_USB_GSPCA_SPCA1528=m -CONFIG_USB_GSPCA_SQ905=m -CONFIG_USB_GSPCA_SQ905C=m -CONFIG_USB_GSPCA_SQ930X=m -CONFIG_USB_GSPCA_STK014=m -CONFIG_USB_GSPCA_STV0680=m -CONFIG_USB_GSPCA_SUNPLUS=m -CONFIG_USB_GSPCA_T613=m -CONFIG_USB_GSPCA_TV8532=m -CONFIG_USB_GSPCA_VC032X=m -CONFIG_USB_GSPCA_VICAM=m -CONFIG_USB_GSPCA_XIRLINK_CIT=m -CONFIG_USB_GSPCA_ZC3XX=m -CONFIG_VIDEO_PVRUSB2=m -CONFIG_VIDEO_PVRUSB2_SYSFS=y -CONFIG_VIDEO_PVRUSB2_DVB=y -# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set -CONFIG_VIDEO_HDPVR=m -CONFIG_VIDEO_EM28XX=m -CONFIG_VIDEO_EM28XX_ALSA=m -CONFIG_VIDEO_EM28XX_DVB=m -CONFIG_VIDEO_EM28XX_RC=y -CONFIG_VIDEO_TLG2300=m -CONFIG_VIDEO_CX231XX=m -CONFIG_VIDEO_CX231XX_RC=y -CONFIG_VIDEO_CX231XX_ALSA=m -CONFIG_VIDEO_CX231XX_DVB=m -CONFIG_VIDEO_USBVISION=m -# CONFIG_USB_ET61X251 is not set -# CONFIG_USB_SN9C102 is not set -CONFIG_USB_PWC=m -# CONFIG_USB_PWC_DEBUG is not set -CONFIG_USB_PWC_INPUT_EVDEV=y -CONFIG_USB_ZR364XX=m -CONFIG_USB_STKWEBCAM=m -CONFIG_USB_S2255=m -CONFIG_V4L_MEM2MEM_DRIVERS=y -# CONFIG_VIDEO_MEM2MEM_TESTDEV is not set -CONFIG_RADIO_ADAPTERS=y -CONFIG_RADIO_MAXIRADIO=m -CONFIG_I2C_SI4713=m -CONFIG_RADIO_SI4713=m -CONFIG_USB_DSBR=m -CONFIG_RADIO_SI470X=y -CONFIG_USB_SI470X=m -CONFIG_I2C_SI470X=m -CONFIG_USB_MR800=m -# CONFIG_RADIO_TEA5764 is not set -# CONFIG_RADIO_SAA7706H is not set -# CONFIG_RADIO_TEF6862 is not set -CONFIG_RADIO_WL1273=m - -# -# Texas Instruments WL128x FM driver (ST based) -# -# CONFIG_RADIO_WL128X is not set -CONFIG_DVB_MAX_ADAPTERS=8 -CONFIG_DVB_DYNAMIC_MINORS=y -CONFIG_DVB_CAPTURE_DRIVERS=y - -# -# Supported SAA7146 based PCI Adapters -# -CONFIG_TTPCI_EEPROM=m -CONFIG_DVB_AV7110=m -CONFIG_DVB_AV7110_OSD=y -CONFIG_DVB_BUDGET_CORE=m -CONFIG_DVB_BUDGET=m -CONFIG_DVB_BUDGET_CI=m -CONFIG_DVB_BUDGET_AV=m -CONFIG_DVB_BUDGET_PATCH=m - -# -# Supported USB Adapters -# -CONFIG_DVB_USB=m -# CONFIG_DVB_USB_DEBUG is not set -CONFIG_DVB_USB_A800=m -CONFIG_DVB_USB_DIBUSB_MB=m -CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y -CONFIG_DVB_USB_DIBUSB_MC=m -CONFIG_DVB_USB_DIB0700=m -CONFIG_DVB_USB_UMT_010=m -CONFIG_DVB_USB_CXUSB=m -CONFIG_DVB_USB_M920X=m -CONFIG_DVB_USB_GL861=m -CONFIG_DVB_USB_AU6610=m -CONFIG_DVB_USB_DIGITV=m -CONFIG_DVB_USB_VP7045=m -CONFIG_DVB_USB_VP702X=m -CONFIG_DVB_USB_GP8PSK=m -CONFIG_DVB_USB_NOVA_T_USB2=m -CONFIG_DVB_USB_TTUSB2=m -CONFIG_DVB_USB_DTT200U=m -CONFIG_DVB_USB_OPERA1=m -CONFIG_DVB_USB_AF9005=m -CONFIG_DVB_USB_AF9005_REMOTE=m -CONFIG_DVB_USB_DW2102=m -CONFIG_DVB_USB_CINERGY_T2=m -CONFIG_DVB_USB_ANYSEE=m -CONFIG_DVB_USB_DTV5100=m -CONFIG_DVB_USB_AF9015=m -CONFIG_DVB_USB_CE6230=m -CONFIG_DVB_USB_FRIIO=m -CONFIG_DVB_USB_EC168=m -CONFIG_DVB_USB_AZ6027=m -CONFIG_DVB_USB_LME2510=m -CONFIG_DVB_USB_TECHNISAT_USB2=m -CONFIG_DVB_TTUSB_BUDGET=m -CONFIG_DVB_TTUSB_DEC=m -CONFIG_SMS_SIANO_MDTV=m - -# -# Siano module components -# -CONFIG_SMS_USB_DRV=m -# CONFIG_SMS_SDIO_DRV is not set - -# -# Supported FlexCopII (B2C2) Adapters -# -CONFIG_DVB_B2C2_FLEXCOP=m -CONFIG_DVB_B2C2_FLEXCOP_PCI=m -CONFIG_DVB_B2C2_FLEXCOP_USB=m -# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set - -# -# Supported BT878 Adapters -# -CONFIG_DVB_BT8XX=m - -# -# Supported Pluto2 Adapters -# -CONFIG_DVB_PLUTO2=m - -# -# Supported SDMC DM1105 Adapters -# -CONFIG_DVB_DM1105=m - -# -# Supported FireWire (IEEE 1394) Adapters -# -CONFIG_DVB_FIREDTV=m -CONFIG_DVB_FIREDTV_INPUT=y - -# -# Supported Earthsoft PT1 Adapters -# -CONFIG_DVB_PT1=m - -# -# Supported Mantis Adapters -# -CONFIG_MANTIS_CORE=m -CONFIG_DVB_MANTIS=m -CONFIG_DVB_HOPPER=m - -# -# Supported nGene Adapters -# -CONFIG_DVB_NGENE=m - -# -# Supported ddbridge ('Octopus') Adapters -# -# CONFIG_DVB_DDBRIDGE is not set - -# -# Supported DVB Frontends -# -CONFIG_DVB_FE_CUSTOMISE=y - -# -# Customise DVB Frontends -# - -# -# Multistandard (satellite) frontends -# -CONFIG_DVB_STB0899=m -CONFIG_DVB_STB6100=m -CONFIG_DVB_STV090x=m -CONFIG_DVB_STV6110x=m - -# -# Multistandard (cable + terrestrial) frontends -# -CONFIG_DVB_DRXK=m -CONFIG_DVB_TDA18271C2DD=m - -# -# DVB-S (satellite) frontends -# -CONFIG_DVB_CX24110=m -CONFIG_DVB_CX24123=m -CONFIG_DVB_MT312=m -CONFIG_DVB_ZL10036=m -CONFIG_DVB_ZL10039=m -CONFIG_DVB_S5H1420=m -CONFIG_DVB_STV0288=m -CONFIG_DVB_STB6000=m -CONFIG_DVB_STV0299=m -CONFIG_DVB_STV6110=m -CONFIG_DVB_STV0900=m -CONFIG_DVB_TDA8083=m -CONFIG_DVB_TDA10086=m -CONFIG_DVB_TDA8261=m -CONFIG_DVB_VES1X93=m -CONFIG_DVB_TUNER_ITD1000=m -CONFIG_DVB_TUNER_CX24113=m -CONFIG_DVB_TDA826X=m -CONFIG_DVB_TUA6100=m -CONFIG_DVB_CX24116=m -CONFIG_DVB_SI21XX=m -CONFIG_DVB_DS3000=m -CONFIG_DVB_MB86A16=m - -# -# DVB-T (terrestrial) frontends -# -CONFIG_DVB_SP8870=m -CONFIG_DVB_SP887X=m -CONFIG_DVB_CX22700=m -CONFIG_DVB_CX22702=m -CONFIG_DVB_S5H1432=m -CONFIG_DVB_DRXD=m -CONFIG_DVB_L64781=m -CONFIG_DVB_TDA1004X=m -CONFIG_DVB_NXT6000=m -CONFIG_DVB_MT352=m -CONFIG_DVB_ZL10353=m -CONFIG_DVB_DIB3000MB=m -CONFIG_DVB_DIB3000MC=m -CONFIG_DVB_DIB7000M=m -CONFIG_DVB_DIB7000P=m -CONFIG_DVB_DIB9000=m -CONFIG_DVB_TDA10048=m -CONFIG_DVB_AF9013=m -CONFIG_DVB_EC100=m -CONFIG_DVB_STV0367=m -CONFIG_DVB_CXD2820R=m - -# -# DVB-C (cable) frontends -# -CONFIG_DVB_VES1820=m -CONFIG_DVB_TDA10021=m -CONFIG_DVB_TDA10023=m -CONFIG_DVB_STV0297=m - -# -# ATSC (North American/Korean Terrestrial/Cable DTV) frontends -# -CONFIG_DVB_NXT200X=m -CONFIG_DVB_OR51211=m -CONFIG_DVB_OR51132=m -CONFIG_DVB_BCM3510=m -CONFIG_DVB_LGDT330X=m -CONFIG_DVB_LGDT3305=m -CONFIG_DVB_S5H1409=m -CONFIG_DVB_AU8522=m -CONFIG_DVB_S5H1411=m - -# -# ISDB-T (terrestrial) frontends -# -CONFIG_DVB_S921=m -CONFIG_DVB_DIB8000=m -CONFIG_DVB_MB86A20S=m - -# -# Digital terrestrial only tuners/PLL -# -CONFIG_DVB_PLL=m -CONFIG_DVB_TUNER_DIB0070=m -CONFIG_DVB_TUNER_DIB0090=m - -# -# SEC control devices for DVB-S -# -CONFIG_DVB_LNBP21=m -CONFIG_DVB_ISL6405=m -CONFIG_DVB_ISL6421=m -CONFIG_DVB_ISL6423=m -CONFIG_DVB_LGS8GL5=m -CONFIG_DVB_LGS8GXX=m -CONFIG_DVB_ATBM8830=m -CONFIG_DVB_TDA665x=m -CONFIG_DVB_IX2505V=m - -# -# Tools to develop new frontends -# -# CONFIG_DVB_DUMMY_FE is not set - -# -# Graphics support -# -CONFIG_AGP=y -CONFIG_AGP_ALI=y -CONFIG_AGP_ATI=y -CONFIG_AGP_AMD=y -CONFIG_AGP_AMD64=y -CONFIG_AGP_INTEL=y -CONFIG_AGP_NVIDIA=y -CONFIG_AGP_SIS=y -CONFIG_AGP_SWORKS=y -CONFIG_AGP_VIA=y -CONFIG_AGP_EFFICEON=y -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 -CONFIG_VGA_SWITCHEROO=y -CONFIG_DRM=m -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_TTM=m -CONFIG_DRM_TDFX=m -CONFIG_DRM_R128=m -CONFIG_DRM_RADEON=m -CONFIG_DRM_RADEON_KMS=y -CONFIG_DRM_I810=m -CONFIG_DRM_I915=m -CONFIG_DRM_I915_KMS=y -CONFIG_DRM_MGA=m -CONFIG_DRM_SIS=m -CONFIG_DRM_VIA=m -CONFIG_DRM_SAVAGE=m -CONFIG_STUB_POULSBO=m -CONFIG_VGASTATE=m -CONFIG_VIDEO_OUTPUT_CONTROL=m -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_DDC=m -CONFIG_FB_BOOT_VESA_SUPPORT=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -CONFIG_FB_SYS_FILLRECT=m -CONFIG_FB_SYS_COPYAREA=m -CONFIG_FB_SYS_IMAGEBLIT=m -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=m -# CONFIG_FB_WMT_GE_ROPS is not set -CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_SVGALIB=m -# CONFIG_FB_MACMODES is not set -CONFIG_FB_BACKLIGHT=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y - -# -# Frame buffer hardware drivers -# -CONFIG_FB_CIRRUS=m -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -CONFIG_FB_VGA16=m -# CONFIG_FB_UVESA is not set -CONFIG_FB_VESA=y -CONFIG_FB_EFI=y -# CONFIG_FB_N411 is not set -# CONFIG_FB_HGA is not set -# CONFIG_FB_S1D13XXX is not set -CONFIG_FB_NVIDIA=m -CONFIG_FB_NVIDIA_I2C=y -# CONFIG_FB_NVIDIA_DEBUG is not set -CONFIG_FB_NVIDIA_BACKLIGHT=y -CONFIG_FB_RIVA=m -# CONFIG_FB_RIVA_I2C is not set -# CONFIG_FB_RIVA_DEBUG is not set -CONFIG_FB_RIVA_BACKLIGHT=y -CONFIG_FB_I810=m -CONFIG_FB_I810_GTF=y -CONFIG_FB_I810_I2C=y -# CONFIG_FB_LE80578 is not set -CONFIG_FB_MATROX=m -CONFIG_FB_MATROX_MILLENIUM=y -CONFIG_FB_MATROX_MYSTIQUE=y -CONFIG_FB_MATROX_G=y -CONFIG_FB_MATROX_I2C=m -CONFIG_FB_MATROX_MAVEN=m -CONFIG_FB_RADEON=m -CONFIG_FB_RADEON_I2C=y -CONFIG_FB_RADEON_BACKLIGHT=y -# CONFIG_FB_RADEON_DEBUG is not set -CONFIG_FB_ATY128=m -CONFIG_FB_ATY128_BACKLIGHT=y -CONFIG_FB_ATY=m -CONFIG_FB_ATY_CT=y -CONFIG_FB_ATY_GENERIC_LCD=y -CONFIG_FB_ATY_GX=y -CONFIG_FB_ATY_BACKLIGHT=y -CONFIG_FB_S3=m -CONFIG_FB_S3_DDC=y -CONFIG_FB_SAVAGE=m -CONFIG_FB_SAVAGE_I2C=y -CONFIG_FB_SAVAGE_ACCEL=y -# CONFIG_FB_SIS is not set -CONFIG_FB_VIA=m -# CONFIG_FB_VIA_DIRECT_PROCFS is not set -CONFIG_FB_VIA_X_COMPATIBILITY=y -CONFIG_FB_NEOMAGIC=m -CONFIG_FB_KYRO=m -CONFIG_FB_3DFX=m -CONFIG_FB_3DFX_ACCEL=y -CONFIG_FB_3DFX_I2C=y -CONFIG_FB_VOODOO1=m -# CONFIG_FB_VT8623 is not set -CONFIG_FB_TRIDENT=m -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -CONFIG_FB_GEODE=y -CONFIG_FB_GEODE_LX=y -CONFIG_FB_GEODE_GX=y -# CONFIG_FB_GEODE_GX1 is not set -# CONFIG_FB_TMIO is not set -CONFIG_FB_SM501=m -CONFIG_FB_UDL=m -CONFIG_FB_VIRTUAL=m -CONFIG_FB_METRONOME=m -CONFIG_FB_MB862XX=m -CONFIG_FB_MB862XX_PCI_GDC=y -CONFIG_FB_MB862XX_I2C=y -# CONFIG_FB_BROADSHEET is not set -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_LCD_CLASS_DEVICE=m -CONFIG_LCD_PLATFORM=m -CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_GENERIC is not set -CONFIG_BACKLIGHT_PROGEAR=m -CONFIG_BACKLIGHT_APPLE=m -# CONFIG_BACKLIGHT_SAHARA is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set - -# -# Display device support -# -CONFIG_DISPLAY_SUPPORT=m - -# -# Display hardware drivers -# - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -CONFIG_VGACON_SOFT_SCROLLBACK=y -CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=128 -CONFIG_DUMMY_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -CONFIG_SOUND=m -CONFIG_SOUND_OSS_CORE=y -CONFIG_SOUND_OSS_CORE_PRECLAIM=y -CONFIG_SND=m -CONFIG_SND_TIMER=m -CONFIG_SND_PCM=m -CONFIG_SND_HWDEP=m -CONFIG_SND_RAWMIDI=m -CONFIG_SND_JACK=y -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m -CONFIG_SND_OSSEMUL=y -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_PCM_OSS_PLUGINS=y -CONFIG_SND_SEQUENCER_OSS=y -CONFIG_SND_HRTIMER=m -CONFIG_SND_SEQ_HRTIMER_DEFAULT=y -CONFIG_SND_DYNAMIC_MINORS=y -# CONFIG_SND_SUPPORT_OLD_API is not set -CONFIG_SND_VERBOSE_PROCFS=y -CONFIG_SND_VERBOSE_PRINTK=y -CONFIG_SND_DEBUG=y -# CONFIG_SND_DEBUG_VERBOSE is not set -CONFIG_SND_PCM_XRUN_DEBUG=y -CONFIG_SND_VMASTER=y -CONFIG_SND_DMA_SGBUF=y -CONFIG_SND_RAWMIDI_SEQ=m -CONFIG_SND_OPL3_LIB_SEQ=m -# CONFIG_SND_OPL4_LIB_SEQ is not set -# CONFIG_SND_SBAWE_SEQ is not set -CONFIG_SND_EMU10K1_SEQ=m -CONFIG_SND_MPU401_UART=m -CONFIG_SND_OPL3_LIB=m -CONFIG_SND_VX_LIB=m -CONFIG_SND_AC97_CODEC=m -CONFIG_SND_DRIVERS=y -CONFIG_SND_PCSP=m -CONFIG_SND_DUMMY=m -CONFIG_SND_ALOOP=m -CONFIG_SND_VIRMIDI=m -CONFIG_SND_MTPAV=m -CONFIG_SND_MTS64=m -CONFIG_SND_SERIAL_U16550=m -CONFIG_SND_MPU401=m -CONFIG_SND_PORTMAN2X4=m -CONFIG_SND_AC97_POWER_SAVE=y -CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0 -CONFIG_SND_SB_COMMON=m -CONFIG_SND_SB16_DSP=m -CONFIG_SND_TEA575X=m -CONFIG_SND_PCI=y -CONFIG_SND_AD1889=m -CONFIG_SND_ALS300=m -CONFIG_SND_ALS4000=m -CONFIG_SND_ALI5451=m -CONFIG_SND_ASIHPI=m -CONFIG_SND_ATIIXP=m -CONFIG_SND_ATIIXP_MODEM=m -CONFIG_SND_AU8810=m -CONFIG_SND_AU8820=m -CONFIG_SND_AU8830=m -# CONFIG_SND_AW2 is not set -CONFIG_SND_AZT3328=m -CONFIG_SND_BT87X=m -# CONFIG_SND_BT87X_OVERCLOCK is not set -CONFIG_SND_CA0106=m -CONFIG_SND_CMIPCI=m -CONFIG_SND_OXYGEN_LIB=m -CONFIG_SND_OXYGEN=m -CONFIG_SND_CS4281=m -CONFIG_SND_CS46XX=m -CONFIG_SND_CS46XX_NEW_DSP=y -CONFIG_SND_CS5530=m -CONFIG_SND_CS5535AUDIO=m -CONFIG_SND_CTXFI=m -CONFIG_SND_DARLA20=m -CONFIG_SND_GINA20=m -CONFIG_SND_LAYLA20=m -CONFIG_SND_DARLA24=m -CONFIG_SND_GINA24=m -CONFIG_SND_LAYLA24=m -CONFIG_SND_MONA=m -CONFIG_SND_MIA=m -CONFIG_SND_ECHO3G=m -CONFIG_SND_INDIGO=m -CONFIG_SND_INDIGOIO=m -CONFIG_SND_INDIGODJ=m -CONFIG_SND_INDIGOIOX=m -CONFIG_SND_INDIGODJX=m -CONFIG_SND_EMU10K1=m -CONFIG_SND_EMU10K1X=m -CONFIG_SND_ENS1370=m -CONFIG_SND_ENS1371=m -CONFIG_SND_ES1938=m -CONFIG_SND_ES1968=m -CONFIG_SND_ES1968_INPUT=y -CONFIG_SND_ES1968_RADIO=y -CONFIG_SND_FM801=m -CONFIG_SND_FM801_TEA575X_BOOL=y -CONFIG_SND_HDA_INTEL=m -CONFIG_SND_HDA_PREALLOC_SIZE=64 -CONFIG_SND_HDA_HWDEP=y -CONFIG_SND_HDA_RECONFIG=y -CONFIG_SND_HDA_INPUT_BEEP=y -CONFIG_SND_HDA_INPUT_BEEP_MODE=1 -CONFIG_SND_HDA_INPUT_JACK=y -# CONFIG_SND_HDA_PATCH_LOADER is not set -CONFIG_SND_HDA_CODEC_REALTEK=y -CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS=y -CONFIG_SND_HDA_CODEC_ANALOG=y -CONFIG_SND_HDA_CODEC_SIGMATEL=y -CONFIG_SND_HDA_CODEC_VIA=y -CONFIG_SND_HDA_CODEC_HDMI=y -CONFIG_SND_HDA_CODEC_CIRRUS=y -CONFIG_SND_HDA_CODEC_CONEXANT=y -CONFIG_SND_HDA_CODEC_CA0110=y -CONFIG_SND_HDA_CODEC_CA0132=y -CONFIG_SND_HDA_CODEC_CMEDIA=y -CONFIG_SND_HDA_CODEC_SI3054=y -CONFIG_SND_HDA_GENERIC=y -# CONFIG_SND_HDA_POWER_SAVE is not set -CONFIG_SND_HDSP=m -CONFIG_SND_HDSPM=m -CONFIG_SND_ICE1712=m -CONFIG_SND_ICE1724=m -CONFIG_SND_INTEL8X0=m -CONFIG_SND_INTEL8X0M=m -CONFIG_SND_KORG1212=m -CONFIG_SND_LOLA=m -CONFIG_SND_LX6464ES=m -CONFIG_SND_MAESTRO3=m -CONFIG_SND_MAESTRO3_INPUT=y -CONFIG_SND_MIXART=m -CONFIG_SND_NM256=m -CONFIG_SND_PCXHR=m -CONFIG_SND_RIPTIDE=m -CONFIG_SND_RME32=m -CONFIG_SND_RME96=m -CONFIG_SND_RME9652=m -CONFIG_SND_SIS7019=m -CONFIG_SND_SONICVIBES=m -CONFIG_SND_TRIDENT=m -CONFIG_SND_VIA82XX=m -CONFIG_SND_VIA82XX_MODEM=m -CONFIG_SND_VIRTUOSO=m -CONFIG_SND_VX222=m -CONFIG_SND_YMFPCI=m -CONFIG_SND_USB=y -CONFIG_SND_USB_AUDIO=m -CONFIG_SND_USB_UA101=m -CONFIG_SND_USB_USX2Y=m -CONFIG_SND_USB_CAIAQ=m -CONFIG_SND_USB_CAIAQ_INPUT=y -CONFIG_SND_USB_US122L=m -CONFIG_SND_USB_6FIRE=m -CONFIG_SND_FIREWIRE=y -CONFIG_SND_FIREWIRE_LIB=m -CONFIG_SND_FIREWIRE_SPEAKERS=m -# CONFIG_SND_ISIGHT is not set -# CONFIG_SND_SOC is not set -# CONFIG_SOUND_PRIME is not set -CONFIG_AC97_BUS=m -CONFIG_HID_SUPPORT=y -CONFIG_HID=y -CONFIG_HIDRAW=y - -# -# USB Input Devices -# -CONFIG_USB_HID=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=y -# CONFIG_HID_ACRUX is not set -CONFIG_HID_APPLE=y -CONFIG_HID_BELKIN=y -CONFIG_HID_CHERRY=y -CONFIG_HID_CHICONY=y -# CONFIG_HID_PRODIKEYS is not set -CONFIG_HID_CYPRESS=y -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELECOM is not set -CONFIG_HID_EZKEY=y -# CONFIG_HID_HOLTEK is not set -CONFIG_HID_KEYTOUCH=m -CONFIG_HID_KYE=y -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -CONFIG_HID_GYRATION=m -CONFIG_HID_TWINHAN=m -CONFIG_HID_KENSINGTON=y -CONFIG_HID_LCPOWER=m -CONFIG_HID_LOGITECH=y -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWII_FF is not set -# CONFIG_HID_MAGICMOUSE is not set -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MONTEREY=y -# CONFIG_HID_MULTITOUCH is not set -# CONFIG_HID_NTRIG is not set -CONFIG_HID_ORTEK=m -# CONFIG_HID_PANTHERLORD is not set -CONFIG_HID_PETALYNX=m -CONFIG_HID_PICOLCD=m -CONFIG_HID_PICOLCD_FB=y -CONFIG_HID_PICOLCD_BACKLIGHT=y -CONFIG_HID_PICOLCD_LCD=y -CONFIG_HID_PICOLCD_LEDS=y -# CONFIG_HID_QUANTA is not set -CONFIG_HID_ROCCAT=m -CONFIG_HID_ROCCAT_COMMON=m -CONFIG_HID_ROCCAT_ARVO=m -CONFIG_HID_ROCCAT_KONE=m -CONFIG_HID_ROCCAT_KONEPLUS=m -CONFIG_HID_ROCCAT_KOVAPLUS=m -CONFIG_HID_ROCCAT_PYRA=m -CONFIG_HID_SAMSUNG=m -# CONFIG_HID_SONY is not set -CONFIG_HID_SPEEDLINK=m -CONFIG_HID_SUNPLUS=m -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -CONFIG_HID_TOPSEED=m -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_WIIMOTE is not set -# CONFIG_HID_ZEROPLUS is not set -CONFIG_HID_ZYDACRON=m -CONFIG_USB_SUPPORT=y -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB_ARCH_HAS_OHCI=y -CONFIG_USB_ARCH_HAS_EHCI=y -CONFIG_USB=y -# CONFIG_USB_DEBUG is not set -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEVICEFS=y -# CONFIG_USB_DEVICE_CLASS is not set -# CONFIG_USB_DYNAMIC_MINORS is not set -CONFIG_USB_SUSPEND=y -# CONFIG_USB_OTG is not set -CONFIG_USB_MON=m -CONFIG_USB_WUSB=m -CONFIG_USB_WUSB_CBAF=m -# CONFIG_USB_WUSB_CBAF_DEBUG is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=m -# CONFIG_USB_XHCI_HCD_DEBUGGING is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_ISP1760_HCD is not set -CONFIG_USB_ISP1362_HCD=m -CONFIG_USB_OHCI_HCD=y -# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set -# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_U132_HCD is not set -CONFIG_USB_SL811_HCD=m -CONFIG_USB_SL811_HCD_ISO=y -# CONFIG_USB_R8A66597_HCD is not set -CONFIG_USB_WHCI_HCD=m -CONFIG_USB_HWA_HCD=m - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m -CONFIG_USB_TMC=m - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m -# CONFIG_USB_LIBUSUAL is not set - -# -# USB Imaging devices -# -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m - -# -# USB port drivers -# -CONFIG_USB_USS720=m -CONFIG_USB_SERIAL=m -CONFIG_USB_EZUSB=y -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_AIRCABLE=m -CONFIG_USB_SERIAL_ARK3116=m -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_CH341=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_CP210X=m -CONFIG_USB_SERIAL_CYPRESS_M8=m -# CONFIG_USB_SERIAL_EMPEG is not set -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_FUNSOFT=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -# CONFIG_USB_SERIAL_GARMIN is not set -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_IUU=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -# CONFIG_USB_SERIAL_KEYSPAN is not set -CONFIG_USB_SERIAL_KLSI=m -# CONFIG_USB_SERIAL_KOBIL_SCT is not set -CONFIG_USB_SERIAL_MCT_U232=m -CONFIG_USB_SERIAL_MOS7720=m -CONFIG_USB_SERIAL_MOS7715_PARPORT=y -CONFIG_USB_SERIAL_MOS7840=m -CONFIG_USB_SERIAL_MOTOROLA=m -# CONFIG_USB_SERIAL_NAVMAN is not set -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_OTI6858=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_QUALCOMM=m -CONFIG_USB_SERIAL_SPCP8X5=m -# CONFIG_USB_SERIAL_HP4X is not set -# CONFIG_USB_SERIAL_SAFE is not set -CONFIG_USB_SERIAL_SIEMENS_MPI=m -CONFIG_USB_SERIAL_SIERRAWIRELESS=m -# CONFIG_USB_SERIAL_SYMBOL is not set -# CONFIG_USB_SERIAL_TI is not set -# CONFIG_USB_SERIAL_CYBERJACK is not set -CONFIG_USB_SERIAL_XIRCOM=m -CONFIG_USB_SERIAL_WWAN=m -CONFIG_USB_SERIAL_OPTION=m -# CONFIG_USB_SERIAL_OMNINET is not set -# CONFIG_USB_SERIAL_OPTICON is not set -# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set -# CONFIG_USB_SERIAL_ZIO is not set -CONFIG_USB_SERIAL_SSU100=m -# CONFIG_USB_SERIAL_DEBUG is not set - -# -# USB Miscellaneous drivers -# -CONFIG_USB_EMI62=m -CONFIG_USB_EMI26=m -CONFIG_USB_ADUTUX=m -CONFIG_USB_SEVSEG=m -# CONFIG_USB_RIO500 is not set -# CONFIG_USB_LEGOTOWER is not set -CONFIG_USB_LCD=m -CONFIG_USB_LED=m -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -CONFIG_USB_FTDI_ELAN=m -# CONFIG_USB_APPLEDISPLAY is not set -CONFIG_USB_SISUSBVGA=m -CONFIG_USB_SISUSBVGA_CON=y -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -CONFIG_USB_IOWARRIOR=m -# CONFIG_USB_TEST is not set -# CONFIG_USB_ISIGHTFW is not set -CONFIG_USB_YUREX=m -CONFIG_USB_ATM=m -CONFIG_USB_SPEEDTOUCH=m -CONFIG_USB_CXACRU=m -CONFIG_USB_UEAGLEATM=m -CONFIG_USB_XUSBATM=m -# CONFIG_USB_GADGET is not set - -# -# OTG and related infrastructure -# -CONFIG_USB_OTG_UTILS=y -# CONFIG_USB_GPIO_VBUS is not set -CONFIG_NOP_USB_XCEIV=m -CONFIG_UWB=m -CONFIG_UWB_HWA=m -CONFIG_UWB_WHCI=m -CONFIG_UWB_I1480U=m -CONFIG_MMC=m -# CONFIG_MMC_DEBUG is not set -# CONFIG_MMC_UNSAFE_RESUME is not set -# CONFIG_MMC_CLKGATE is not set - -# -# MMC/SD/SDIO Card Drivers -# -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_MMC_BLOCK_BOUNCE=y -CONFIG_SDIO_UART=m -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_PCI=m -CONFIG_MMC_RICOH_MMC=y -CONFIG_MMC_SDHCI_PLTFM=m -CONFIG_MMC_WBSD=m -CONFIG_MMC_TIFM_SD=m -CONFIG_MMC_CB710=m -CONFIG_MMC_VIA_SDMMC=m -CONFIG_MMC_VUB300=m -CONFIG_MMC_USHC=m -CONFIG_MEMSTICK=m -# CONFIG_MEMSTICK_DEBUG is not set - -# -# MemoryStick drivers -# -# CONFIG_MEMSTICK_UNSAFE_RESUME is not set -CONFIG_MSPRO_BLOCK=m - -# -# MemoryStick Host Controller Drivers -# -CONFIG_MEMSTICK_TIFM_MS=m -CONFIG_MEMSTICK_JMICRON_38X=m -CONFIG_MEMSTICK_R592=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y - -# -# LED drivers -# -CONFIG_LEDS_LM3530=m -CONFIG_LEDS_ALIX2=m -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -CONFIG_LEDS_LP3944=m -CONFIG_LEDS_LP5521=m -CONFIG_LEDS_LP5523=m -CONFIG_LEDS_CLEVO_MAIL=m -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_BD2802 is not set -CONFIG_LEDS_INTEL_SS4200=m -CONFIG_LEDS_LT3593=m -CONFIG_LEDS_DELL_NETBOOKS=m -CONFIG_LEDS_TRIGGERS=y - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -CONFIG_LEDS_TRIGGER_GPIO=m -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m - -# -# iptables trigger is under Netfilter config (LED target) -# -# CONFIG_ACCESSIBILITY is not set -# CONFIG_INFINIBAND is not set -CONFIG_EDAC=y - -# -# Reporting subsystems -# -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_DECODE_MCE=m -CONFIG_EDAC_MCE_INJ=m -CONFIG_EDAC_MM_EDAC=m -CONFIG_EDAC_MCE=y -CONFIG_EDAC_AMD76X=m -CONFIG_EDAC_E7XXX=m -CONFIG_EDAC_E752X=m -CONFIG_EDAC_I82875P=m -CONFIG_EDAC_I82975X=m -CONFIG_EDAC_I3000=m -CONFIG_EDAC_I3200=m -CONFIG_EDAC_X38=m -CONFIG_EDAC_I5400=m -CONFIG_EDAC_I7CORE=m -CONFIG_EDAC_I82860=m -CONFIG_EDAC_R82600=m -CONFIG_EDAC_I5000=m -CONFIG_EDAC_I5100=m -CONFIG_EDAC_I7300=m -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -CONFIG_RTC_DRV_DS1307=m -CONFIG_RTC_DRV_DS1374=m -CONFIG_RTC_DRV_DS1672=m -CONFIG_RTC_DRV_DS3232=m -CONFIG_RTC_DRV_MAX6900=m -CONFIG_RTC_DRV_RS5C372=m -CONFIG_RTC_DRV_ISL1208=m -CONFIG_RTC_DRV_ISL12022=m -CONFIG_RTC_DRV_X1205=m -CONFIG_RTC_DRV_PCF8563=m -CONFIG_RTC_DRV_PCF8583=m -CONFIG_RTC_DRV_M41T80=m -CONFIG_RTC_DRV_M41T80_WDT=y -CONFIG_RTC_DRV_BQ32K=m -# CONFIG_RTC_DRV_S35390A is not set -CONFIG_RTC_DRV_FM3130=m -CONFIG_RTC_DRV_RX8581=m -CONFIG_RTC_DRV_RX8025=m -CONFIG_RTC_DRV_EM3027=m -CONFIG_RTC_DRV_RV3029C2=m - -# -# SPI RTC drivers -# - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -CONFIG_RTC_DRV_DS1286=m -CONFIG_RTC_DRV_DS1511=m -CONFIG_RTC_DRV_DS1553=m -CONFIG_RTC_DRV_DS1742=m -CONFIG_RTC_DRV_STK17TA8=m -# CONFIG_RTC_DRV_M48T86 is not set -CONFIG_RTC_DRV_M48T35=m -CONFIG_RTC_DRV_M48T59=m -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_BQ4802=m -CONFIG_RTC_DRV_RP5C01=m -CONFIG_RTC_DRV_V3020=m - -# -# on-CPU RTC drivers -# -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -# CONFIG_INTEL_MID_DMAC is not set -CONFIG_INTEL_IOATDMA=m -CONFIG_TIMB_DMA=m -CONFIG_PCH_DMA=m -CONFIG_DMA_ENGINE=y - -# -# DMA Clients -# -CONFIG_NET_DMA=y -CONFIG_ASYNC_TX_DMA=y -# CONFIG_DMATEST is not set -CONFIG_DCA=m -# CONFIG_AUXDISPLAY is not set -CONFIG_UIO=m -# CONFIG_UIO_CIF is not set -# CONFIG_UIO_PDRV is not set -# CONFIG_UIO_PDRV_GENIRQ is not set -CONFIG_UIO_AEC=m -CONFIG_UIO_SERCOS3=m -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -CONFIG_VIRTIO=m -CONFIG_VIRTIO_RING=m - -# -# Virtio drivers -# -CONFIG_VIRTIO_PCI=m -CONFIG_VIRTIO_BALLOON=m -CONFIG_STAGING=y -CONFIG_ET131X=m -# CONFIG_ET131X_DEBUG is not set -# CONFIG_SLICOSS is not set -# CONFIG_VIDEO_GO7007 is not set -# CONFIG_VIDEO_CX25821 is not set -# CONFIG_VIDEO_TM6000 is not set -# CONFIG_DVB_CXD2099 is not set -# CONFIG_USBIP_CORE is not set -# CONFIG_W35UND is not set -# CONFIG_PRISM2_USB is not set -# CONFIG_ECHO is not set -# CONFIG_BRCMUTIL is not set -# CONFIG_BRCMSMAC is not set -# CONFIG_BRCMFMAC is not set -# CONFIG_COMEDI is not set -# CONFIG_ASUS_OLED is not set -# CONFIG_PANEL is not set -# CONFIG_R8187SE is not set -# CONFIG_RTL8192U is not set -# CONFIG_RTL8192E is not set -# CONFIG_R8712U is not set -# CONFIG_RTS_PSTOR is not set -# CONFIG_TRANZPORT is not set -# CONFIG_POHMELFS is not set -# CONFIG_IDE_PHISON is not set -# CONFIG_LINE6_USB is not set -CONFIG_DRM_VMWGFX=m -CONFIG_DRM_NOUVEAU=m -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -CONFIG_DRM_NOUVEAU_DEBUG=y - -# -# I2C encoder or helper chips -# -# CONFIG_DRM_I2C_CH7006 is not set -# CONFIG_DRM_I2C_SIL164 is not set -# CONFIG_USB_SERIAL_QUATECH2 is not set -# CONFIG_USB_SERIAL_QUATECH_USB2 is not set -# CONFIG_VT6655 is not set -# CONFIG_VT6656 is not set -CONFIG_HYPERV=m -CONFIG_HYPERV_STORAGE=m -CONFIG_HYPERV_BLOCK=m -CONFIG_HYPERV_NET=m -CONFIG_HYPERV_UTILS=m -CONFIG_HYPERV_MOUSE=m -# CONFIG_VME_BUS is not set -# CONFIG_DX_SEP is not set -# CONFIG_IIO is not set -# CONFIG_XVMALLOC is not set -# CONFIG_ZRAM is not set -# CONFIG_ZCACHE is not set -# CONFIG_FB_SM7XX is not set -# CONFIG_VIDEO_DT3155 is not set -# CONFIG_CRYSTALHD is not set -# CONFIG_FB_XGI is not set -# CONFIG_LIRC_STAGING is not set -# CONFIG_EASYCAP is not set -# CONFIG_SOLO6X10 is not set -# CONFIG_ACPI_QUICKSTART is not set -# CONFIG_ATH6K_LEGACY is not set -# CONFIG_USB_ENESTORAGE is not set -# CONFIG_BCM_WIMAX is not set -# CONFIG_FT1000 is not set - -# -# Speakup console speech -# -# CONFIG_SPEAKUP is not set -# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set -# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set -# CONFIG_DRM_PSB is not set -# CONFIG_ALTERA_STAPL is not set -# CONFIG_INTEL_MEI is not set -CONFIG_X86_PLATFORM_DEVICES=y -CONFIG_ACER_WMI=m -CONFIG_ACERHDF=m -CONFIG_ASUS_LAPTOP=m -CONFIG_DELL_LAPTOP=m -CONFIG_DELL_WMI=m -CONFIG_DELL_WMI_AIO=m -CONFIG_FUJITSU_LAPTOP=m -# CONFIG_FUJITSU_LAPTOP_DEBUG is not set -CONFIG_TC1100_WMI=m -CONFIG_HP_ACCEL=m -CONFIG_HP_WMI=m -CONFIG_MSI_LAPTOP=m -CONFIG_PANASONIC_LAPTOP=m -CONFIG_COMPAL_LAPTOP=m -CONFIG_SONY_LAPTOP=m -CONFIG_SONYPI_COMPAT=y -CONFIG_IDEAPAD_LAPTOP=m -CONFIG_THINKPAD_ACPI=m -CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y -# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set -# CONFIG_THINKPAD_ACPI_DEBUG is not set -# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set -CONFIG_THINKPAD_ACPI_VIDEO=y -CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y -CONFIG_SENSORS_HDAPS=m -# CONFIG_INTEL_MENLOW is not set -CONFIG_EEEPC_LAPTOP=m -CONFIG_ASUS_WMI=m -CONFIG_ASUS_NB_WMI=m -CONFIG_EEEPC_WMI=m -CONFIG_ACPI_WMI=m -CONFIG_MSI_WMI=m -# CONFIG_ACPI_ASUS is not set -CONFIG_TOPSTAR_LAPTOP=m -CONFIG_ACPI_TOSHIBA=m -CONFIG_TOSHIBA_BT_RFKILL=m -CONFIG_ACPI_CMPC=m -CONFIG_INTEL_IPS=m -# CONFIG_IBM_RTL is not set -CONFIG_XO15_EBOOK=m -CONFIG_SAMSUNG_LAPTOP=m -CONFIG_MXM_WMI=m -CONFIG_INTEL_OAKTRAIL=m -CONFIG_SAMSUNG_Q10=m -CONFIG_CLKSRC_I8253=y -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y -CONFIG_DMAR=y -CONFIG_DMAR_DEFAULT_ON=y -CONFIG_DMAR_FLOPPY_WA=y -CONFIG_VIRT_DRIVERS=y - -# -# Firmware Drivers -# -CONFIG_EDD=m -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_EFI_VARS=y -CONFIG_DELL_RBU=m -CONFIG_DCDBAS=m -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -# CONFIG_ISCSI_IBFT_FIND is not set -# CONFIG_SIGMA is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# File systems -# -# CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set -CONFIG_EXT4_FS=y -CONFIG_EXT4_USE_FOR_EXT23=y -CONFIG_EXT4_FS_XATTR=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -CONFIG_REISERFS_FS=m -# CONFIG_REISERFS_CHECK is not set -CONFIG_REISERFS_PROC_INFO=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -# CONFIG_JFS_FS is not set -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_XFS_RT=y -# CONFIG_XFS_DEBUG is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_NILFS2_FS is not set -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_FILE_LOCKING=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -CONFIG_AUTOFS4_FS=y -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_GENERIC_ACL=y - -# -# Caches -# -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_HISTOGRAM is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_UDF_NLS=y - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_SYSCTL=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_CONFIGFS_FS=m -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_LOGFS is not set -# CONFIG_CRAMFS is not set -# CONFIG_SQUASHFS is not set -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -# CONFIG_EXOFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NFS_V4_1=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_OBJLAYOUT=m -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -# CONFIG_NFS_USE_NEW_IDMAPPER is not set -CONFIG_NFSD=m -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -# CONFIG_CEPH_FS is not set -CONFIG_CIFS=m -CONFIG_CIFS_STATS=y -# CONFIG_CIFS_STATS2 is not set -# CONFIG_CIFS_WEAK_PW_HASH is not set -# CONFIG_CIFS_UPCALL is not set -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -# CONFIG_CIFS_DEBUG2 is not set -CONFIG_CIFS_DFS_UPCALL=y -CONFIG_CIFS_FSCACHE=y -CONFIG_CIFS_ACL=y -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf-8" -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_UTF8=m -CONFIG_DLM=m -# CONFIG_DLM_DEBUG is not set - -# -# Kernel hacking -# -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_PRINTK_TIME=y -CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 -# CONFIG_ENABLE_WARN_DEPRECATED is not set -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 -CONFIG_MAGIC_SYSRQ=y -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_UNUSED_SYMBOLS is not set -CONFIG_DEBUG_FS=y -CONFIG_HEADERS_CHECK=y -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_SHIRQ=y -CONFIG_LOCKUP_DETECTOR=y -CONFIG_HARDLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -# CONFIG_DETECT_HUNG_TASK is not set -CONFIG_SCHED_DEBUG=y -CONFIG_SCHEDSTATS=y -CONFIG_TIMER_STATS=y -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_SLUB_STATS is not set -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_SPARSE_RCU_POINTER is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -CONFIG_STACKTRACE=y -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_KOBJECT is not set -# CONFIG_DEBUG_HIGHMEM is not set -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_INFO is not set -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_VIRTUAL is not set -# CONFIG_DEBUG_WRITECOUNT is not set -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_DEBUG_LIST=y -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_FRAME_POINTER=y -CONFIG_BOOT_PRINTK_DELAY=y -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# CONFIG_DEBUG_PER_CPU_MAPS is not set -# CONFIG_LKDTM is not set -# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set -# CONFIG_FAULT_INJECTION is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y -# CONFIG_DEBUG_PAGEALLOC is not set -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FTRACE_NMI_ENTER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y -CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_RING_BUFFER=y -CONFIG_FTRACE_NMI_ENTER=y -CONFIG_EVENT_TRACING=y -CONFIG_EVENT_POWER_TRACING_DEPRECATED=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -# CONFIG_IRQSOFF_TRACER is not set -CONFIG_SCHED_TRACER=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_DYNAMIC_FTRACE=y -CONFIG_FUNCTION_PROFILER=y -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -CONFIG_RING_BUFFER_BENCHMARK=m -# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set -# CONFIG_FIREWIRE_OHCI_REMOTE_DMA is not set -# CONFIG_BUILD_DOCSRC is not set -CONFIG_DYNAMIC_DEBUG=y -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_ATOMIC64_SELFTEST is not set -CONFIG_ASYNC_RAID6_TEST=m -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_HAVE_ARCH_KMEMCHECK=y -# CONFIG_TEST_KSTRTOX is not set -CONFIG_STRICT_DEVMEM=y -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -# CONFIG_EARLY_PRINTK_DBGP is not set -CONFIG_DEBUG_STACKOVERFLOW=y -# CONFIG_X86_PTDUMP is not set -CONFIG_DEBUG_NX_TEST=m -CONFIG_DOUBLEFAULT=y -# CONFIG_IOMMU_STRESS is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -CONFIG_DEBUG_BOOT_PARAMS=y -# CONFIG_CPA_DEBUG is not set -CONFIG_OPTIMIZE_INLINING=y -# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set - -# -# Security options -# - -# -# Grsecurity -# -CONFIG_GRKERNSEC=y -# CONFIG_GRKERNSEC_LOW is not set -# CONFIG_GRKERNSEC_MEDIUM is not set -CONFIG_GRKERNSEC_HIGH=y -# CONFIG_GRKERNSEC_CUSTOM is not set - -# -# Address Space Protection -# -CONFIG_GRKERNSEC_KMEM=y -CONFIG_GRKERNSEC_VM86=y -# CONFIG_GRKERNSEC_IO is not set -CONFIG_GRKERNSEC_PROC_MEMMAP=y -CONFIG_GRKERNSEC_BRUTE=y -CONFIG_GRKERNSEC_MODHARDEN=y -CONFIG_GRKERNSEC_HIDESYM=y -CONFIG_GRKERNSEC_KERN_LOCKOUT=y - -# -# Role Based Access Control Options -# -CONFIG_GRKERNSEC_NO_RBAC=y -# CONFIG_GRKERNSEC_ACL_HIDEKERN is not set -CONFIG_GRKERNSEC_ACL_MAXTRIES=3 -CONFIG_GRKERNSEC_ACL_TIMEOUT=30 - -# -# Filesystem Protections -# -CONFIG_GRKERNSEC_PROC=y -# CONFIG_GRKERNSEC_PROC_USER is not set -CONFIG_GRKERNSEC_PROC_USERGROUP=y -CONFIG_GRKERNSEC_PROC_GID=10 -CONFIG_GRKERNSEC_PROC_ADD=y -CONFIG_GRKERNSEC_LINK=y -CONFIG_GRKERNSEC_FIFO=y -CONFIG_GRKERNSEC_SYSFS_RESTRICT=y -# CONFIG_GRKERNSEC_ROFS is not set -CONFIG_GRKERNSEC_CHROOT=y -CONFIG_GRKERNSEC_CHROOT_MOUNT=y -CONFIG_GRKERNSEC_CHROOT_DOUBLE=y -CONFIG_GRKERNSEC_CHROOT_PIVOT=y -CONFIG_GRKERNSEC_CHROOT_CHDIR=y -CONFIG_GRKERNSEC_CHROOT_CHMOD=y -CONFIG_GRKERNSEC_CHROOT_FCHDIR=y -CONFIG_GRKERNSEC_CHROOT_MKNOD=y -CONFIG_GRKERNSEC_CHROOT_SHMAT=y -CONFIG_GRKERNSEC_CHROOT_UNIX=y -CONFIG_GRKERNSEC_CHROOT_FINDTASK=y -CONFIG_GRKERNSEC_CHROOT_NICE=y -CONFIG_GRKERNSEC_CHROOT_SYSCTL=y -CONFIG_GRKERNSEC_CHROOT_CAPS=y - -# -# Kernel Auditing -# -# CONFIG_GRKERNSEC_AUDIT_GROUP is not set -# CONFIG_GRKERNSEC_EXECLOG is not set -CONFIG_GRKERNSEC_RESLOG=y -# CONFIG_GRKERNSEC_CHROOT_EXECLOG is not set -# CONFIG_GRKERNSEC_AUDIT_PTRACE is not set -# CONFIG_GRKERNSEC_AUDIT_CHDIR is not set -CONFIG_GRKERNSEC_AUDIT_MOUNT=y -CONFIG_GRKERNSEC_SIGNAL=y -CONFIG_GRKERNSEC_FORKFAIL=y -CONFIG_GRKERNSEC_TIME=y -CONFIG_GRKERNSEC_PROC_IPADDR=y -CONFIG_GRKERNSEC_RWXMAP_LOG=y -CONFIG_GRKERNSEC_AUDIT_TEXTREL=y - -# -# Executable Protections -# -CONFIG_GRKERNSEC_DMESG=y -CONFIG_GRKERNSEC_HARDEN_PTRACE=y -# CONFIG_GRKERNSEC_TPE is not set - -# -# Network Protections -# -CONFIG_GRKERNSEC_RANDNET=y -CONFIG_GRKERNSEC_BLACKHOLE=y -# CONFIG_GRKERNSEC_SOCKET is not set - -# -# Sysctl support -# -CONFIG_GRKERNSEC_SYSCTL=y -CONFIG_GRKERNSEC_SYSCTL_ON=y - -# -# Logging Options -# -CONFIG_GRKERNSEC_FLOODTIME=10 -CONFIG_GRKERNSEC_FLOODBURST=6 - -# -# PaX -# -CONFIG_ARCH_TRACK_EXEC_LIMIT=y -CONFIG_PAX=y - -# -# PaX Control -# -# CONFIG_PAX_SOFTMODE is not set -CONFIG_PAX_EI_PAX=y -CONFIG_PAX_PT_PAX_FLAGS=y -# CONFIG_PAX_NO_ACL_FLAGS is not set -CONFIG_PAX_HAVE_ACL_FLAGS=y -# CONFIG_PAX_HOOK_ACL_FLAGS is not set - -# -# Non-executable pages -# -CONFIG_PAX_NOEXEC=y -CONFIG_PAX_PAGEEXEC=y -CONFIG_PAX_SEGMEXEC=y -CONFIG_PAX_EMUTRAMP=y -CONFIG_PAX_MPROTECT=y -# CONFIG_PAX_MPROTECT_COMPAT is not set -CONFIG_PAX_ELFRELOCS=y -CONFIG_PAX_KERNEXEC=y -CONFIG_PAX_KERNEXEC_MODULE_TEXT=4 - -# -# Address Space Layout Randomization -# -CONFIG_PAX_ASLR=y -CONFIG_PAX_RANDKSTACK=y -CONFIG_PAX_RANDUSTACK=y -CONFIG_PAX_RANDMMAP=y - -# -# Miscellaneous hardening features -# -CONFIG_PAX_MEMORY_SANITIZE=y -CONFIG_PAX_MEMORY_STACKLEAK=y -CONFIG_PAX_MEMORY_UDEREF=y -CONFIG_PAX_REFCOUNT=y -CONFIG_PAX_USERCOPY=y -CONFIG_KEYS=y -CONFIG_KEYS_DEBUG_PROC_KEYS=y -CONFIG_SECURITY_DMESG_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_NETWORK_XFRM=y -# CONFIG_SECURITY_PATH is not set -CONFIG_INTEL_TXT=y -CONFIG_LSM_MMAP_MIN_ADDR=65536 -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 -# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_IMA is not set -CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_DEFAULT_SECURITY="selinux" -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y -CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_PCOMP=m -CONFIG_CRYPTO_PCOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -CONFIG_CRYPTO_GF128MUL=m -# CONFIG_CRYPTO_NULL is not set -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=y -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_SEQIV=y - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=m -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m - -# -# Hash modes -# -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32C_INTEL=y -CONFIG_CRYPTO_GHASH=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA1=m -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m - -# -# Ciphers -# -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_AES_586=y -CONFIG_CRYPTO_AES_NI_INTEL=y -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SALSA20_586=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -CONFIG_CRYPTO_TWOFISH_586=m - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_ZLIB=m -CONFIG_CRYPTO_LZO=m - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_PADLOCK=m -CONFIG_CRYPTO_DEV_PADLOCK_AES=m -CONFIG_CRYPTO_DEV_PADLOCK_SHA=m -CONFIG_CRYPTO_DEV_GEODE=m -CONFIG_CRYPTO_DEV_HIFN_795X=m -CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_APIC_ARCHITECTURE=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_ASYNC_PF=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_KVM_AMD=m -CONFIG_KVM_MMU_AUDIT=y -CONFIG_VHOST_NET=m -# CONFIG_LGUEST is not set -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_BITREVERSE=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -CONFIG_CRC_CCITT=m -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=m -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -CONFIG_CRC7=m -CONFIG_LIBCRC32C=m -CONFIG_CRC8=m -CONFIG_AUDIT_GENERIC=y -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=m -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_DMA=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPU_RMAP=y -CONFIG_NLATTR=y -CONFIG_AVERAGE=y -CONFIG_CORDIC=m -CONFIG_LLIST=y diff --git a/kernel/config.x86_64 b/kernel/config.x86_64 deleted file mode 100644 index 5114d27..0000000 --- a/kernel/config.x86_64 +++ /dev/null @@ -1,2932 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/x86_64 3.1.5 Kernel Configuration -# -CONFIG_64BIT=y -# CONFIG_X86_32 is not set -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_GENERIC_CMOS_UPDATE=y -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_ARCH_CLOCKSOURCE_DATA=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_HAVE_LATENCYTOP_SUPPORT=y -CONFIG_MMU=y -CONFIG_ZONE_DMA=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_IOMAP=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -# CONFIG_RWSEM_GENERIC_SPINLOCK is not set -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_DEFAULT_IDLE=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_HAVE_CPUMASK_OF_CPU_MAP=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ZONE_DMA32=y -CONFIG_ARCH_POPULATES_NODE_MAP=y -CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_64_SMP=y -CONFIG_X86_HT=y -CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" -# CONFIG_KTIME_SCALAR is not set -CONFIG_ARCH_CPU_PROBE_RELEASE=y -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -CONFIG_HAVE_IRQ_WORK=y -CONFIG_IRQ_WORK=y - -# -# General setup -# -CONFIG_EXPERIMENTAL=y -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="" -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_KERNEL_GZIP=y -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_XZ is not set -# CONFIG_KERNEL_LZO is not set -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_BSD_PROCESS_ACCT=y -# CONFIG_BSD_PROCESS_ACCT_V3 is not set -# CONFIG_FHANDLE is not set -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y -CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y -CONFIG_HAVE_GENERIC_HARDIRQS=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_HARDIRQS=y -CONFIG_HAVE_SPARSE_IRQ=y -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_PREEMPT_RCU is not set -# CONFIG_RCU_TRACE is not set -CONFIG_RCU_FANOUT=64 -# CONFIG_RCU_FANOUT_EXACT is not set -# CONFIG_RCU_FAST_NO_HZ is not set -# CONFIG_TREE_RCU_TRACE is not set -# CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=18 -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_CGROUP_FREEZER=y -# CONFIG_CGROUP_DEVICE is not set -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_RESOURCE_COUNTERS=y -# CONFIG_CGROUP_MEM_RES_CTLR is not set -# CONFIG_CGROUP_PERF is not set -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_RT_GROUP_SCHED is not set -# CONFIG_BLK_CGROUP is not set -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -# CONFIG_SCHED_AUTOGROUP is not set -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -# CONFIG_EXPERT is not set -CONFIG_UID16=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_ALL is not set -CONFIG_HOTPLUG=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_PERF_COUNTERS is not set -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_PCI_QUIRKS=y -CONFIG_SLUB_DEBUG=y -# CONFIG_COMPAT_BRK is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y -# CONFIG_OPROFILE is not set -CONFIG_HAVE_OPROFILE=y -CONFIG_KPROBES=y -# CONFIG_JUMP_LABEL is not set -CONFIG_OPTPROBES=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_KRETPROBES=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_USE_GENERIC_SMP_HELPERS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_MODVERSIONS is not set -# CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_STOP_MACHINE=y -CONFIG_BLOCK=y -CONFIG_BLK_DEV_BSG=y -# CONFIG_BLK_DEV_BSGLIB is not set -# CONFIG_BLK_DEV_INTEGRITY is not set -CONFIG_BLOCK_COMPAT=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -# CONFIG_INLINE_SPIN_TRYLOCK is not set -# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK is not set -# CONFIG_INLINE_SPIN_LOCK_BH is not set -# CONFIG_INLINE_SPIN_LOCK_IRQ is not set -# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set -CONFIG_INLINE_SPIN_UNLOCK=y -# CONFIG_INLINE_SPIN_UNLOCK_BH is not set -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_READ_TRYLOCK is not set -# CONFIG_INLINE_READ_LOCK is not set -# CONFIG_INLINE_READ_LOCK_BH is not set -# CONFIG_INLINE_READ_LOCK_IRQ is not set -# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set -CONFIG_INLINE_READ_UNLOCK=y -# CONFIG_INLINE_READ_UNLOCK_BH is not set -CONFIG_INLINE_READ_UNLOCK_IRQ=y -# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set -# CONFIG_INLINE_WRITE_TRYLOCK is not set -# CONFIG_INLINE_WRITE_LOCK is not set -# CONFIG_INLINE_WRITE_LOCK_BH is not set -# CONFIG_INLINE_WRITE_LOCK_IRQ is not set -# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set -CONFIG_INLINE_WRITE_UNLOCK=y -# CONFIG_INLINE_WRITE_UNLOCK_BH is not set -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_FREEZER=y - -# -# Processor type and features -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -CONFIG_SMP=y -CONFIG_X86_MPPARSE=y -CONFIG_X86_EXTENDED_PLATFORM=y -# CONFIG_X86_VSMP is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -CONFIG_SCHED_OMIT_FRAME_POINTER=y -# CONFIG_PARAVIRT_GUEST is not set -CONFIG_NO_BOOTMEM=y -# CONFIG_MEMTEST is not set -# CONFIG_MK8 is not set -# CONFIG_MPSC is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=7 -CONFIG_X86_CMPXCHG=y -CONFIG_CMPXCHG_LOCAL=y -CONFIG_CMPXCHG_DOUBLE=y -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_XADD=y -CONFIG_X86_WP_WORKS_OK=y -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=64 -CONFIG_X86_DEBUGCTLMSR=y -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -CONFIG_GART_IOMMU=y -CONFIG_CALGARY_IOMMU=y -CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y -CONFIG_SWIOTLB=y -CONFIG_IOMMU_HELPER=y -# CONFIG_MAXSMP is not set -CONFIG_NR_CPUS=64 -CONFIG_SCHED_SMT=y -CONFIG_SCHED_MC=y -# CONFIG_IRQ_TIME_ACCOUNTING is not set -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_PREEMPT is not set -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -CONFIG_X86_MCE_THRESHOLD=y -# CONFIG_X86_MCE_INJECT is not set -CONFIG_X86_THERMAL_VECTOR=y -# CONFIG_I8K is not set -CONFIG_MICROCODE=y -CONFIG_MICROCODE_INTEL=y -CONFIG_MICROCODE_AMD=y -CONFIG_MICROCODE_OLD_INTERFACE=y -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DIRECT_GBPAGES=y -CONFIG_NUMA=y -CONFIG_AMD_NUMA=y -CONFIG_X86_64_ACPI_NUMA=y -CONFIG_NODES_SPAN_OTHER_NODES=y -# CONFIG_NUMA_EMU is not set -CONFIG_NODES_SHIFT=6 -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK=y -# CONFIG_MEMORY_HOTPLUG is not set -CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_COMPACTION is not set -CONFIG_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -# CONFIG_KSM is not set -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -# CONFIG_MEMORY_FAILURE is not set -# CONFIG_TRANSPARENT_HUGEPAGE is not set -# CONFIG_CLEANCACHE is not set -CONFIG_X86_CHECK_BIOS_CORRUPTION=y -CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y -CONFIG_X86_RESERVE_LOW=64 -CONFIG_MTRR=y -# CONFIG_MTRR_SANITIZER is not set -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_EFI=y -CONFIG_SECCOMP=y -# CONFIG_CC_STACKPROTECTOR is not set -# CONFIG_HZ_100 is not set -# CONFIG_HZ_250 is not set -# CONFIG_HZ_300 is not set -CONFIG_HZ_1000=y -CONFIG_HZ=1000 -CONFIG_SCHED_HRTICK=y -CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y -# CONFIG_KEXEC_JUMP is not set -CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_HOTPLUG_CPU=y -# CONFIG_COMPAT_VDSO is not set -# CONFIG_CMDLINE_BOOL is not set -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y - -# -# Power management and ACPI options -# -CONFIG_ARCH_HIBERNATION_HEADER=y -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_HIBERNATE_CALLBACKS=y -CONFIG_HIBERNATION=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_RUNTIME is not set -CONFIG_PM=y -CONFIG_PM_DEBUG=y -# CONFIG_PM_ADVANCED_DEBUG is not set -# CONFIG_PM_TEST_SUSPEND is not set -CONFIG_CAN_PM_TRACE=y -CONFIG_PM_TRACE=y -CONFIG_PM_TRACE_RTC=y -CONFIG_ACPI=y -CONFIG_ACPI_SLEEP=y -CONFIG_ACPI_PROCFS=y -# CONFIG_ACPI_PROCFS_POWER is not set -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_PROC_EVENT=y -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=y -CONFIG_ACPI_FAN=y -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_HOTPLUG_CPU=y -# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set -CONFIG_ACPI_THERMAL=y -CONFIG_ACPI_NUMA=y -# CONFIG_ACPI_CUSTOM_DSDT is not set -CONFIG_ACPI_BLACKLIST_YEAR=0 -# CONFIG_ACPI_DEBUG is not set -# CONFIG_ACPI_PCI_SLOT is not set -CONFIG_X86_PM_TIMER=y -CONFIG_ACPI_CONTAINER=y -# CONFIG_ACPI_SBS is not set -# CONFIG_ACPI_HED is not set -# CONFIG_ACPI_CUSTOM_METHOD is not set -# CONFIG_ACPI_APEI is not set -# CONFIG_SFI is not set - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_TABLE=y -# CONFIG_CPU_FREQ_STAT is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set - -# -# x86 CPU frequency scaling drivers -# -# CONFIG_X86_PCC_CPUFREQ is not set -CONFIG_X86_ACPI_CPUFREQ=y -# CONFIG_X86_POWERNOW_K8 is not set -# CONFIG_X86_SPEEDSTEP_CENTRINO is not set -# CONFIG_X86_P4_CLOCKMOD is not set - -# -# shared options -# -# CONFIG_X86_SPEEDSTEP_LIB is not set -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_INTEL_IDLE is not set - -# -# Memory power savings -# -# CONFIG_I7300_IDLE is not set - -# -# Bus options (PCI etc.) -# -CONFIG_PCI=y -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_DOMAINS=y -# CONFIG_PCI_CNB20LE_QUIRK is not set -CONFIG_PCIEPORTBUS=y -# CONFIG_HOTPLUG_PCI_PCIE is not set -CONFIG_PCIEAER=y -# CONFIG_PCIE_ECRC is not set -# CONFIG_PCIEAER_INJECT is not set -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_ARCH_SUPPORTS_MSI=y -CONFIG_PCI_MSI=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_STUB is not set -CONFIG_HT_IRQ=y -CONFIG_PCI_IOV=y -CONFIG_PCI_IOAPIC=y -CONFIG_PCI_LABEL=y -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -CONFIG_PCCARD=y -CONFIG_PCMCIA=y -CONFIG_PCMCIA_LOAD_CIS=y -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -CONFIG_YENTA=y -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -# CONFIG_PD6729 is not set -# CONFIG_I82092 is not set -CONFIG_PCCARD_NONSTATIC=y -CONFIG_HOTPLUG_PCI=y -# CONFIG_HOTPLUG_PCI_FAKE is not set -# CONFIG_HOTPLUG_PCI_ACPI is not set -# CONFIG_HOTPLUG_PCI_CPCI is not set -# CONFIG_HOTPLUG_PCI_SHPC is not set -# CONFIG_RAPIDIO is not set - -# -# Executable file formats / Emulations -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -# CONFIG_HAVE_AOUT is not set -CONFIG_BINFMT_MISC=y -CONFIG_IA32_EMULATION=y -# CONFIG_IA32_AOUT is not set -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_KEYS_COMPAT=y -CONFIG_HAVE_TEXT_POKE_SMP=y -CONFIG_NET=y -CONFIG_COMPAT_NETLINK_MESSAGES=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM=y -CONFIG_XFRM_USER=y -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set -# CONFIG_NET_KEY is not set -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -# CONFIG_IP_FIB_TRIE_STATS is not set -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE_DEMUX is not set -CONFIG_IP_MROUTE=y -# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -# CONFIG_ARPD is not set -CONFIG_SYN_COOKIES=y -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_TUNNEL is not set -CONFIG_INET_TUNNEL=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set -CONFIG_INET_LRO=y -# CONFIG_INET_DIAG is not set -CONFIG_TCP_CONG_ADVANCED=y -# CONFIG_TCP_CONG_BIC is not set -CONFIG_TCP_CONG_CUBIC=y -# CONFIG_TCP_CONG_WESTWOOD is not set -# CONFIG_TCP_CONG_HTCP is not set -# CONFIG_TCP_CONG_HSTCP is not set -# CONFIG_TCP_CONG_HYBLA is not set -# CONFIG_TCP_CONG_VEGAS is not set -# CONFIG_TCP_CONG_SCALABLE is not set -# CONFIG_TCP_CONG_LP is not set -# CONFIG_TCP_CONG_VENO is not set -# CONFIG_TCP_CONG_YEAH is not set -# CONFIG_TCP_CONG_ILLINOIS is not set -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -# CONFIG_IPV6_PRIVACY is not set -# CONFIG_IPV6_ROUTER_PREF is not set -# CONFIG_IPV6_OPTIMISTIC_DAD is not set -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -# CONFIG_INET6_IPCOMP is not set -# CONFIG_IPV6_MIP6 is not set -# CONFIG_INET6_XFRM_TUNNEL is not set -# CONFIG_INET6_TUNNEL is not set -CONFIG_INET6_XFRM_MODE_TRANSPORT=y -CONFIG_INET6_XFRM_MODE_TUNNEL=y -CONFIG_INET6_XFRM_MODE_BEET=y -# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set -CONFIG_IPV6_SIT=y -# CONFIG_IPV6_SIT_6RD is not set -CONFIG_IPV6_NDISC_NODETYPE=y -# CONFIG_IPV6_TUNNEL is not set -# CONFIG_IPV6_MULTIPLE_TABLES is not set -# CONFIG_IPV6_MROUTE is not set -CONFIG_NETLABEL=y -CONFIG_NETWORK_SECMARK=y -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -CONFIG_NETFILTER=y -# CONFIG_NETFILTER_DEBUG is not set -# CONFIG_NETFILTER_ADVANCED is not set - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_NETLINK=y -CONFIG_NETFILTER_NETLINK_LOG=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_SIP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -# CONFIG_IP_SET is not set -# CONFIG_IP_VS is not set - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_NF_CONNTRACK_PROC_COMPAT=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_TARGET_LOG=y -CONFIG_IP_NF_TARGET_ULOG=y -CONFIG_NF_NAT=y -CONFIG_NF_NAT_NEEDED=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_NF_NAT_FTP=y -CONFIG_NF_NAT_IRC=y -# CONFIG_NF_NAT_TFTP is not set -# CONFIG_NF_NAT_AMANDA is not set -# CONFIG_NF_NAT_PPTP is not set -# CONFIG_NF_NAT_H323 is not set -CONFIG_NF_NAT_SIP=y -CONFIG_IP_NF_MANGLE=y - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV6=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_IPV6HEADER=y -CONFIG_IP6_NF_TARGET_LOG=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_MANGLE=y -# CONFIG_IP_DCCP is not set -# CONFIG_IP_SCTP is not set -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -# CONFIG_L2TP is not set -# CONFIG_BRIDGE is not set -# CONFIG_NET_DSA is not set -# CONFIG_VLAN_8021Q is not set -# CONFIG_DECNET is not set -CONFIG_LLC=y -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_WAN_ROUTER is not set -# CONFIG_PHONET is not set -# CONFIG_IEEE802154 is not set -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -# CONFIG_NET_SCH_CBQ is not set -# CONFIG_NET_SCH_HTB is not set -# CONFIG_NET_SCH_HFSC is not set -# CONFIG_NET_SCH_PRIO is not set -# CONFIG_NET_SCH_MULTIQ is not set -# CONFIG_NET_SCH_RED is not set -# CONFIG_NET_SCH_SFB is not set -# CONFIG_NET_SCH_SFQ is not set -# CONFIG_NET_SCH_TEQL is not set -# CONFIG_NET_SCH_TBF is not set -# CONFIG_NET_SCH_GRED is not set -# CONFIG_NET_SCH_DSMARK is not set -# CONFIG_NET_SCH_NETEM is not set -# CONFIG_NET_SCH_DRR is not set -# CONFIG_NET_SCH_MQPRIO is not set -# CONFIG_NET_SCH_CHOKE is not set -# CONFIG_NET_SCH_QFQ is not set -# CONFIG_NET_SCH_INGRESS is not set - -# -# Classification -# -CONFIG_NET_CLS=y -# CONFIG_NET_CLS_BASIC is not set -# CONFIG_NET_CLS_TCINDEX is not set -# CONFIG_NET_CLS_ROUTE4 is not set -# CONFIG_NET_CLS_FW is not set -# CONFIG_NET_CLS_U32 is not set -# CONFIG_NET_CLS_RSVP is not set -# CONFIG_NET_CLS_RSVP6 is not set -# CONFIG_NET_CLS_FLOW is not set -# CONFIG_NET_CLS_CGROUP is not set -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -# CONFIG_NET_EMATCH_CMP is not set -# CONFIG_NET_EMATCH_NBYTE is not set -# CONFIG_NET_EMATCH_U32 is not set -# CONFIG_NET_EMATCH_META is not set -# CONFIG_NET_EMATCH_TEXT is not set -CONFIG_NET_CLS_ACT=y -# CONFIG_NET_ACT_POLICE is not set -# CONFIG_NET_ACT_GACT is not set -# CONFIG_NET_ACT_MIRRED is not set -# CONFIG_NET_ACT_IPT is not set -# CONFIG_NET_ACT_NAT is not set -# CONFIG_NET_ACT_PEDIT is not set -# CONFIG_NET_ACT_SIMP is not set -# CONFIG_NET_ACT_SKBEDIT is not set -# CONFIG_NET_ACT_CSUM is not set -CONFIG_NET_SCH_FIFO=y -# CONFIG_DCB is not set -CONFIG_DNS_RESOLVER=y -# CONFIG_BATMAN_ADV is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -CONFIG_HAVE_BPF_JIT=y -# CONFIG_BPF_JIT is not set - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_NET_TCPPROBE is not set -# CONFIG_NET_DROP_MONITOR is not set -CONFIG_HAMRADIO=y - -# -# Packet Radio protocols -# -# CONFIG_AX25 is not set -# CONFIG_CAN is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_WEXT_CORE=y -CONFIG_WEXT_PROC=y -CONFIG_CFG80211=y -# CONFIG_NL80211_TESTMODE is not set -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -# CONFIG_CFG80211_REG_DEBUG is not set -CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set -# CONFIG_CFG80211_INTERNAL_REGDB is not set -CONFIG_CFG80211_WEXT=y -CONFIG_WIRELESS_EXT_SYSFS=y -# CONFIG_LIB80211 is not set -CONFIG_MAC80211=y -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_MINSTREL_HT=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -# CONFIG_MAC80211_MESH is not set -CONFIG_MAC80211_LEDS=y -# CONFIG_MAC80211_DEBUGFS is not set -# CONFIG_MAC80211_DEBUG_MENU is not set -# CONFIG_WIMAX is not set -CONFIG_RFKILL=y -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -# CONFIG_CEPH_LIB is not set -# CONFIG_NFC is not set - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -# CONFIG_DEVTMPFS is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -CONFIG_FIRMWARE_IN_KERNEL=y -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_DEBUG_DRIVER is not set -CONFIG_DEBUG_DEVRES=y -# CONFIG_SYS_HYPERVISOR is not set -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -# CONFIG_MTD is not set -# CONFIG_PARPORT is not set -CONFIG_PNP=y -CONFIG_PNP_DEBUG_MESSAGES=y - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_FD is not set -# CONFIG_BLK_CPQ_DA is not set -# CONFIG_BLK_CPQ_CISS_DA is not set -# CONFIG_BLK_DEV_DAC960 is not set -# CONFIG_BLK_DEV_UMEM is not set -# CONFIG_BLK_DEV_COW_COMMON is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -# CONFIG_BLK_DEV_CRYPTOLOOP is not set -# CONFIG_BLK_DEV_DRBD is not set -# CONFIG_BLK_DEV_NBD is not set -# CONFIG_BLK_DEV_SX8 is not set -# CONFIG_BLK_DEV_UB is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -# CONFIG_BLK_DEV_XIP is not set -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set -# CONFIG_BLK_DEV_HD is not set -# CONFIG_BLK_DEV_RBD is not set -# CONFIG_SENSORS_LIS3LV02D is not set -# CONFIG_MISC_DEVICES is not set -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -# CONFIG_RAID_ATTRS is not set -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -# CONFIG_SCSI_TGT is not set -# CONFIG_SCSI_NETLINK is not set -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -# CONFIG_CHR_DEV_ST is not set -# CONFIG_CHR_DEV_OSST is not set -CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=y -# CONFIG_CHR_DEV_SCH is not set -# CONFIG_SCSI_MULTI_LUN is not set -CONFIG_SCSI_CONSTANTS=y -# CONFIG_SCSI_LOGGING is not set -# CONFIG_SCSI_SCAN_ASYNC is not set -CONFIG_SCSI_WAIT_SCAN=m - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=y -# CONFIG_SCSI_FC_ATTRS is not set -# CONFIG_SCSI_ISCSI_ATTRS is not set -# CONFIG_SCSI_SAS_ATTRS is not set -# CONFIG_SCSI_SAS_LIBSAS is not set -# CONFIG_SCSI_SRP_ATTRS is not set -# CONFIG_SCSI_LOWLEVEL is not set -# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set -# CONFIG_SCSI_DH is not set -# CONFIG_SCSI_OSD_INITIATOR is not set -CONFIG_ATA=y -# CONFIG_ATA_NONSTANDARD is not set -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -# CONFIG_SATA_AHCI_PLATFORM is not set -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -# CONFIG_SATA_SIL24 is not set -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=y -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -CONFIG_PATA_AMD=y -# CONFIG_PATA_ARASAN_CF is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CS5520 is not set -# CONFIG_PATA_CS5530 is not set -# CONFIG_PATA_CS5536 is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -CONFIG_PATA_OLDPIIX=y -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SC1200 is not set -CONFIG_PATA_SCH=y -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_PCMCIA is not set -# CONFIG_PATA_RZ1000 is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set -# CONFIG_ATA_GENERIC is not set -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -# CONFIG_MD_LINEAR is not set -# CONFIG_MD_RAID0 is not set -# CONFIG_MD_RAID1 is not set -# CONFIG_MD_RAID10 is not set -# CONFIG_MD_RAID456 is not set -# CONFIG_MD_MULTIPATH is not set -# CONFIG_MD_FAULTY is not set -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_DEBUG is not set -# CONFIG_DM_CRYPT is not set -# CONFIG_DM_SNAPSHOT is not set -CONFIG_DM_MIRROR=y -# CONFIG_DM_RAID is not set -# CONFIG_DM_LOG_USERSPACE is not set -CONFIG_DM_ZERO=y -# CONFIG_DM_MULTIPATH is not set -# CONFIG_DM_DELAY is not set -# CONFIG_DM_UEVENT is not set -# CONFIG_DM_FLAKEY is not set -# CONFIG_TARGET_CORE is not set -# CONFIG_FUSION is not set - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -# CONFIG_I2O is not set -CONFIG_MACINTOSH_DRIVERS=y -CONFIG_MAC_EMUMOUSEBTN=y -CONFIG_NETDEVICES=y -# CONFIG_IFB is not set -# CONFIG_DUMMY is not set -# CONFIG_BONDING is not set -# CONFIG_MACVLAN is not set -# CONFIG_EQUALIZER is not set -# CONFIG_TUN is not set -# CONFIG_VETH is not set -# CONFIG_NET_SB1000 is not set -# CONFIG_ARCNET is not set -CONFIG_MII=y -CONFIG_PHYLIB=y - -# -# MII PHY device drivers -# -# CONFIG_MARVELL_PHY is not set -# CONFIG_DAVICOM_PHY is not set -# CONFIG_QSEMI_PHY is not set -# CONFIG_LXT_PHY is not set -# CONFIG_CICADA_PHY is not set -# CONFIG_VITESSE_PHY is not set -# CONFIG_SMSC_PHY is not set -# CONFIG_BROADCOM_PHY is not set -# CONFIG_ICPLUS_PHY is not set -# CONFIG_REALTEK_PHY is not set -# CONFIG_NATIONAL_PHY is not set -# CONFIG_STE10XP is not set -# CONFIG_LSI_ET1011C_PHY is not set -# CONFIG_MICREL_PHY is not set -# CONFIG_FIXED_PHY is not set -# CONFIG_MDIO_BITBANG is not set -CONFIG_NET_ETHERNET=y -# CONFIG_HAPPYMEAL is not set -# CONFIG_SUNGEM is not set -# CONFIG_CASSINI is not set -CONFIG_NET_VENDOR_3COM=y -# CONFIG_VORTEX is not set -# CONFIG_TYPHOON is not set -# CONFIG_ETHOC is not set -# CONFIG_DNET is not set -CONFIG_NET_TULIP=y -# CONFIG_DE2104X is not set -# CONFIG_TULIP is not set -# CONFIG_DE4X5 is not set -# CONFIG_WINBOND_840 is not set -# CONFIG_DM9102 is not set -# CONFIG_ULI526X is not set -# CONFIG_PCMCIA_XIRCOM is not set -# CONFIG_HP100 is not set -# CONFIG_IBM_NEW_EMAC_ZMII is not set -# CONFIG_IBM_NEW_EMAC_RGMII is not set -# CONFIG_IBM_NEW_EMAC_TAH is not set -# CONFIG_IBM_NEW_EMAC_EMAC4 is not set -# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set -# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set -# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set -CONFIG_NET_PCI=y -# CONFIG_PCNET32 is not set -# CONFIG_AMD8111_ETH is not set -# CONFIG_ADAPTEC_STARFIRE is not set -# CONFIG_KSZ884X_PCI is not set -# CONFIG_B44 is not set -CONFIG_FORCEDETH=y -CONFIG_E100=y -# CONFIG_FEALNX is not set -# CONFIG_NATSEMI is not set -# CONFIG_NE2K_PCI is not set -# CONFIG_8139CP is not set -CONFIG_8139TOO=y -CONFIG_8139TOO_PIO=y -# CONFIG_8139TOO_TUNE_TWISTER is not set -# CONFIG_8139TOO_8129 is not set -# CONFIG_8139_OLD_RX_RESET is not set -# CONFIG_R6040 is not set -# CONFIG_SIS900 is not set -# CONFIG_EPIC100 is not set -# CONFIG_SMSC9420 is not set -# CONFIG_SUNDANCE is not set -# CONFIG_TLAN is not set -# CONFIG_KS8851_MLL is not set -# CONFIG_VIA_RHINE is not set -# CONFIG_SC92031 is not set -# CONFIG_ATL2 is not set -CONFIG_NETDEV_1000=y -# CONFIG_ACENIC is not set -# CONFIG_DL2K is not set -CONFIG_E1000=y -# CONFIG_E1000E is not set -# CONFIG_IP1000 is not set -# CONFIG_IGB is not set -# CONFIG_IGBVF is not set -# CONFIG_NS83820 is not set -# CONFIG_HAMACHI is not set -# CONFIG_YELLOWFIN is not set -# CONFIG_R8169 is not set -# CONFIG_SIS190 is not set -# CONFIG_SKGE is not set -CONFIG_SKY2=y -# CONFIG_SKY2_DEBUG is not set -# CONFIG_VIA_VELOCITY is not set -CONFIG_TIGON3=y -# CONFIG_BNX2 is not set -# CONFIG_CNIC is not set -# CONFIG_QLA3XXX is not set -# CONFIG_ATL1 is not set -# CONFIG_ATL1E is not set -# CONFIG_ATL1C is not set -# CONFIG_JME is not set -# CONFIG_STMMAC_ETH is not set -# CONFIG_PCH_GBE is not set -CONFIG_NETDEV_10000=y -# CONFIG_CHELSIO_T1 is not set -# CONFIG_CHELSIO_T3 is not set -# CONFIG_CHELSIO_T4 is not set -# CONFIG_CHELSIO_T4VF is not set -# CONFIG_ENIC is not set -# CONFIG_IXGBE is not set -# CONFIG_IXGBEVF is not set -# CONFIG_IXGB is not set -# CONFIG_S2IO is not set -# CONFIG_VXGE is not set -# CONFIG_MYRI10GE is not set -# CONFIG_NETXEN_NIC is not set -# CONFIG_NIU is not set -# CONFIG_MLX4_EN is not set -# CONFIG_MLX4_CORE is not set -# CONFIG_TEHUTI is not set -# CONFIG_BNX2X is not set -# CONFIG_QLCNIC is not set -# CONFIG_QLGE is not set -# CONFIG_BNA is not set -# CONFIG_SFC is not set -# CONFIG_BE2NET is not set -CONFIG_TR=y -# CONFIG_IBMOL is not set -# CONFIG_3C359 is not set -# CONFIG_TMS380TR is not set -CONFIG_WLAN=y -# CONFIG_PCMCIA_RAYCS is not set -# CONFIG_LIBERTAS_THINFIRM is not set -# CONFIG_AIRO is not set -# CONFIG_ATMEL is not set -# CONFIG_AT76C50X_USB is not set -# CONFIG_AIRO_CS is not set -# CONFIG_PCMCIA_WL3501 is not set -# CONFIG_PRISM54 is not set -# CONFIG_USB_ZD1201 is not set -# CONFIG_USB_NET_RNDIS_WLAN is not set -# CONFIG_RTL8180 is not set -# CONFIG_RTL8187 is not set -# CONFIG_ADM8211 is not set -# CONFIG_MAC80211_HWSIM is not set -# CONFIG_MWL8K is not set -# CONFIG_ATH_COMMON is not set -# CONFIG_B43 is not set -# CONFIG_B43LEGACY is not set -# CONFIG_HOSTAP is not set -# CONFIG_IPW2100 is not set -# CONFIG_IPW2200 is not set -# CONFIG_IWLAGN is not set -# CONFIG_IWL4965 is not set -# CONFIG_IWL3945 is not set -# CONFIG_LIBERTAS is not set -# CONFIG_HERMES is not set -# CONFIG_P54_COMMON is not set -# CONFIG_RT2X00 is not set -# CONFIG_RTL8192CE is not set -# CONFIG_RTL8192SE is not set -# CONFIG_RTL8192DE is not set -# CONFIG_RTL8192CU is not set -# CONFIG_WL1251 is not set -# CONFIG_WL12XX_MENU is not set -# CONFIG_ZD1211RW is not set -# CONFIG_MWIFIEX is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# - -# -# USB Network Adapters -# -# CONFIG_USB_CATC is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_RTL8150 is not set -# CONFIG_USB_USBNET is not set -# CONFIG_USB_HSO is not set -# CONFIG_USB_IPHETH is not set -CONFIG_NET_PCMCIA=y -# CONFIG_PCMCIA_3C589 is not set -# CONFIG_PCMCIA_3C574 is not set -# CONFIG_PCMCIA_FMVJ18X is not set -# CONFIG_PCMCIA_PCNET is not set -# CONFIG_PCMCIA_NMCLAN is not set -# CONFIG_PCMCIA_SMC91C92 is not set -# CONFIG_PCMCIA_XIRC2PS is not set -# CONFIG_PCMCIA_AXNET is not set -# CONFIG_PCMCIA_IBMTR is not set -# CONFIG_WAN is not set - -# -# CAIF transport drivers -# -CONFIG_FDDI=y -# CONFIG_DEFXX is not set -# CONFIG_SKFP is not set -# CONFIG_HIPPI is not set -# CONFIG_PPP is not set -# CONFIG_SLIP is not set -# CONFIG_NET_FC is not set -CONFIG_NETCONSOLE=y -CONFIG_NETPOLL=y -# CONFIG_NETPOLL_TRAP is not set -CONFIG_NET_POLL_CONTROLLER=y -# CONFIG_VMXNET3 is not set -# CONFIG_ISDN is not set -# CONFIG_PHONE is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_FF_MEMLESS=y -CONFIG_INPUT_POLLDEV=y -CONFIG_INPUT_SPARSEKMAP=y - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_LM8323 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_XTKBD is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=y -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_LIFEBOOK=y -CONFIG_MOUSE_PS2_TRACKPOINT=y -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_SERIAL is not set -# CONFIG_MOUSE_APPLETOUCH is not set -# CONFIG_MOUSE_BCM5974 is not set -# CONFIG_MOUSE_VSXXXAA is not set -# CONFIG_MOUSE_SYNAPTICS_I2C is not set -CONFIG_INPUT_JOYSTICK=y -# CONFIG_JOYSTICK_ANALOG is not set -# CONFIG_JOYSTICK_A3D is not set -# CONFIG_JOYSTICK_ADI is not set -# CONFIG_JOYSTICK_COBRA is not set -# CONFIG_JOYSTICK_GF2K is not set -# CONFIG_JOYSTICK_GRIP is not set -# CONFIG_JOYSTICK_GRIP_MP is not set -# CONFIG_JOYSTICK_GUILLEMOT is not set -# CONFIG_JOYSTICK_INTERACT is not set -# CONFIG_JOYSTICK_SIDEWINDER is not set -# CONFIG_JOYSTICK_TMDC is not set -# CONFIG_JOYSTICK_IFORCE is not set -# CONFIG_JOYSTICK_WARRIOR is not set -# CONFIG_JOYSTICK_MAGELLAN is not set -# CONFIG_JOYSTICK_SPACEORB is not set -# CONFIG_JOYSTICK_SPACEBALL is not set -# CONFIG_JOYSTICK_STINGER is not set -# CONFIG_JOYSTICK_TWIDJOY is not set -# CONFIG_JOYSTICK_ZHENHUA is not set -# CONFIG_JOYSTICK_AS5011 is not set -# CONFIG_JOYSTICK_JOYDUMP is not set -# CONFIG_JOYSTICK_XPAD is not set -CONFIG_INPUT_TABLET=y -# CONFIG_TABLET_USB_ACECAD is not set -# CONFIG_TABLET_USB_AIPTEK is not set -# CONFIG_TABLET_USB_GTCO is not set -# CONFIG_TABLET_USB_HANWANG is not set -# CONFIG_TABLET_USB_KBTAB is not set -# CONFIG_TABLET_USB_WACOM is not set -CONFIG_INPUT_TOUCHSCREEN=y -# CONFIG_TOUCHSCREEN_AD7879 is not set -# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set -# CONFIG_TOUCHSCREEN_BU21013 is not set -# CONFIG_TOUCHSCREEN_DYNAPRO is not set -# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set -# CONFIG_TOUCHSCREEN_EETI is not set -# CONFIG_TOUCHSCREEN_FUJITSU is not set -# CONFIG_TOUCHSCREEN_GUNZE is not set -# CONFIG_TOUCHSCREEN_ELO is not set -# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set -# CONFIG_TOUCHSCREEN_MAX11801 is not set -# CONFIG_TOUCHSCREEN_MCS5000 is not set -# CONFIG_TOUCHSCREEN_MTOUCH is not set -# CONFIG_TOUCHSCREEN_INEXIO is not set -# CONFIG_TOUCHSCREEN_MK712 is not set -# CONFIG_TOUCHSCREEN_PENMOUNT is not set -# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set -# CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set -# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set -# CONFIG_TOUCHSCREEN_TSC2007 is not set -# CONFIG_TOUCHSCREEN_ST1232 is not set -# CONFIG_TOUCHSCREEN_TPS6507X is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_PCSPKR is not set -# CONFIG_INPUT_MMA8450 is not set -# CONFIG_INPUT_MPU3050 is not set -# CONFIG_INPUT_APANEL is not set -# CONFIG_INPUT_ATLAS_BTNS is not set -# CONFIG_INPUT_ATI_REMOTE is not set -# CONFIG_INPUT_ATI_REMOTE2 is not set -# CONFIG_INPUT_KEYSPAN_REMOTE is not set -# CONFIG_INPUT_KXTJ9 is not set -# CONFIG_INPUT_POWERMATE is not set -# CONFIG_INPUT_YEALINK is not set -# CONFIG_INPUT_CM109 is not set -# CONFIG_INPUT_UINPUT is not set -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_CMA3000 is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_SERIO_I8042=y -CONFIG_SERIO_SERPORT=y -# CONFIG_SERIO_CT82C710 is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -# CONFIG_SERIO_RAW is not set -# CONFIG_SERIO_ALTERA_PS2 is not set -# CONFIG_SERIO_PS2MULT is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_ROCKETPORT is not set -# CONFIG_CYCLADES is not set -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -# CONFIG_SYNCLINK is not set -# CONFIG_SYNCLINKMP is not set -# CONFIG_SYNCLINK_GT is not set -# CONFIG_NOZOMI is not set -# CONFIG_ISI is not set -# CONFIG_N_HDLC is not set -# CONFIG_N_GSM is not set -# CONFIG_TRACE_SINK is not set -# CONFIG_DEVKMEM is not set -# CONFIG_STALDRV is not set - -# -# Serial drivers -# -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_PNP=y -# CONFIG_SERIAL_8250_CS is not set -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_8250_DETECT_IRQ=y -CONFIG_SERIAL_8250_RSA=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_MFD_HSU is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -# CONFIG_SERIAL_JSM is not set -# CONFIG_SERIAL_TIMBERDALE is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_PCH_UART is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set -# CONFIG_IPMI_HANDLER is not set -CONFIG_HW_RANDOM=y -# CONFIG_HW_RANDOM_TIMERIOMEM is not set -# CONFIG_HW_RANDOM_INTEL is not set -# CONFIG_HW_RANDOM_AMD is not set -CONFIG_HW_RANDOM_VIA=y -CONFIG_NVRAM=y -# CONFIG_R3964 is not set -# CONFIG_APPLICOM is not set - -# -# PCMCIA character devices -# -# CONFIG_SYNCLINK_CS is not set -# CONFIG_CARDMAN_4000 is not set -# CONFIG_CARDMAN_4040 is not set -# CONFIG_IPWIRELESS is not set -# CONFIG_MWAVE is not set -# CONFIG_RAW_DRIVER is not set -CONFIG_HPET=y -# CONFIG_HPET_MMAP is not set -# CONFIG_HANGCHECK_TIMER is not set -# CONFIG_TCG_TPM is not set -# CONFIG_TELCLOCK is not set -CONFIG_DEVPORT=y -# CONFIG_RAMOOPS is not set -CONFIG_I2C=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -# CONFIG_I2C_CHARDEV is not set -# CONFIG_I2C_MUX is not set -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_ALGOBIT=y - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -CONFIG_I2C_I801=y -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_INTEL_MID is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PCA_PLATFORM is not set -# CONFIG_I2C_PXA_PCI is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_XILINX is not set -# CONFIG_I2C_EG20T is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_TINY_USB is not set - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_STUB is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# CONFIG_SPI is not set - -# -# PPS support -# -# CONFIG_PPS is not set - -# -# PPS generators support -# - -# -# PTP clock support -# - -# -# Enable Device Drivers -> PPS to see the PTP clock options. -# -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -# CONFIG_GPIOLIB is not set -# CONFIG_W1 is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_BQ20Z75 is not set -# CONFIG_BATTERY_BQ27x00 is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -CONFIG_HWMON=y -# CONFIG_HWMON_VID is not set -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_ABITUGURU is not set -# CONFIG_SENSORS_ABITUGURU3 is not set -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_K8TEMP is not set -# CONFIG_SENSORS_K10TEMP is not set -# CONFIG_SENSORS_FAM15H_POWER is not set -# CONFIG_SENSORS_ASB100 is not set -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_FSCHMD is not set -# CONFIG_SENSORS_G760A is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_CORETEMP is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -# CONFIG_SENSORS_LINEAGE is not set -# CONFIG_SENSORS_LM63 is not set -# CONFIG_SENSORS_LM73 is not set -# CONFIG_SENSORS_LM75 is not set -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -# CONFIG_SENSORS_LM85 is not set -# CONFIG_SENSORS_LM87 is not set -# CONFIG_SENSORS_LM90 is not set -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LTC4151 is not set -# CONFIG_SENSORS_LTC4215 is not set -# CONFIG_SENSORS_LTC4245 is not set -# CONFIG_SENSORS_LTC4261 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -# CONFIG_SENSORS_MAX6650 is not set -# CONFIG_SENSORS_NTC_THERMISTOR is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_PMBUS is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_SMM665 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_SCH56XX_COMMON is not set -# CONFIG_SENSORS_SCH5627 is not set -# CONFIG_SENSORS_SCH5636 is not set -# CONFIG_SENSORS_ADS1015 is not set -# CONFIG_SENSORS_ADS7828 is not set -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_VIA_CPUTEMP is not set -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -# CONFIG_SENSORS_W83781D is not set -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set -# CONFIG_SENSORS_APPLESMC is not set - -# -# ACPI drivers -# -# CONFIG_SENSORS_ACPI_POWER is not set -# CONFIG_SENSORS_ATK0110 is not set -CONFIG_THERMAL=y -CONFIG_THERMAL_HWMON=y -CONFIG_WATCHDOG=y -# CONFIG_WATCHDOG_CORE is not set -# CONFIG_WATCHDOG_NOWAYOUT is not set - -# -# Watchdog Device Drivers -# -# CONFIG_SOFT_WATCHDOG is not set -# CONFIG_ACQUIRE_WDT is not set -# CONFIG_ADVANTECH_WDT is not set -# CONFIG_ALIM1535_WDT is not set -# CONFIG_ALIM7101_WDT is not set -# CONFIG_F71808E_WDT is not set -# CONFIG_SP5100_TCO is not set -# CONFIG_SC520_WDT is not set -# CONFIG_SBC_FITPC2_WATCHDOG is not set -# CONFIG_EUROTECH_WDT is not set -# CONFIG_IB700_WDT is not set -# CONFIG_IBMASR is not set -# CONFIG_WAFER_WDT is not set -# CONFIG_I6300ESB_WDT is not set -# CONFIG_ITCO_WDT is not set -# CONFIG_IT8712F_WDT is not set -# CONFIG_IT87_WDT is not set -# CONFIG_HP_WATCHDOG is not set -# CONFIG_SC1200_WDT is not set -# CONFIG_PC87413_WDT is not set -# CONFIG_NV_TCO is not set -# CONFIG_60XX_WDT is not set -# CONFIG_SBC8360_WDT is not set -# CONFIG_CPU5_WDT is not set -# CONFIG_SMSC_SCH311X_WDT is not set -# CONFIG_SMSC37B787_WDT is not set -# CONFIG_W83627HF_WDT is not set -# CONFIG_W83697HF_WDT is not set -# CONFIG_W83697UG_WDT is not set -# CONFIG_W83877F_WDT is not set -# CONFIG_W83977F_WDT is not set -# CONFIG_MACHZ_WDT is not set -# CONFIG_SBC_EPX_C3_WATCHDOG is not set - -# -# PCI-based Watchdog Cards -# -# CONFIG_PCIPCWATCHDOG is not set -# CONFIG_WDTPCI is not set - -# -# USB-based Watchdog Cards -# -# CONFIG_USBPCWATCHDOG is not set -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y - -# -# Broadcom specific AMBA -# -# CONFIG_BCMA is not set -CONFIG_MFD_SUPPORT=y -# CONFIG_MFD_CORE is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS6507X is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_MFD_STMPE is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TMIO is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_CS5535 is not set -# CONFIG_LPC_SCH is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_REGULATOR is not set -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -CONFIG_AGP=y -CONFIG_AGP_AMD64=y -CONFIG_AGP_INTEL=y -# CONFIG_AGP_SIS is not set -# CONFIG_AGP_VIA is not set -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 -# CONFIG_VGA_SWITCHEROO is not set -CONFIG_DRM=y -CONFIG_DRM_KMS_HELPER=y -# CONFIG_DRM_TDFX is not set -# CONFIG_DRM_R128 is not set -# CONFIG_DRM_RADEON is not set -# CONFIG_DRM_I810 is not set -CONFIG_DRM_I915=y -CONFIG_DRM_I915_KMS=y -# CONFIG_DRM_MGA is not set -# CONFIG_DRM_SIS is not set -# CONFIG_DRM_VIA is not set -# CONFIG_DRM_SAVAGE is not set -# CONFIG_STUB_POULSBO is not set -# CONFIG_VGASTATE is not set -CONFIG_VIDEO_OUTPUT_CONTROL=y -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -# CONFIG_FB_DDC is not set -# CONFIG_FB_BOOT_VESA_SUPPORT is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -# CONFIG_FB_SYS_FILLRECT is not set -# CONFIG_FB_SYS_COPYAREA is not set -# CONFIG_FB_SYS_IMAGEBLIT is not set -# CONFIG_FB_FOREIGN_ENDIAN is not set -# CONFIG_FB_SYS_FOPS is not set -# CONFIG_FB_WMT_GE_ROPS is not set -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -# CONFIG_FB_BACKLIGHT is not set -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_VGA16 is not set -# CONFIG_FB_UVESA is not set -# CONFIG_FB_VESA is not set -CONFIG_FB_EFI=y -# CONFIG_FB_N411 is not set -# CONFIG_FB_HGA is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_LE80578 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_VIA is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_GEODE is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set -CONFIG_BACKLIGHT_LCD_SUPPORT=y -# CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -CONFIG_BACKLIGHT_GENERIC=y -# CONFIG_BACKLIGHT_PROGEAR is not set -# CONFIG_BACKLIGHT_APPLE is not set -# CONFIG_BACKLIGHT_SAHARA is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set - -# -# Display device support -# -# CONFIG_DISPLAY_SUPPORT is not set - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -CONFIG_VGACON_SOFT_SCROLLBACK=y -CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 -CONFIG_DUMMY_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -CONFIG_SOUND=y -CONFIG_SOUND_OSS_CORE=y -CONFIG_SOUND_OSS_CORE_PRECLAIM=y -CONFIG_SND=y -CONFIG_SND_TIMER=y -CONFIG_SND_PCM=y -CONFIG_SND_HWDEP=y -CONFIG_SND_SEQUENCER=y -CONFIG_SND_SEQ_DUMMY=y -CONFIG_SND_OSSEMUL=y -CONFIG_SND_MIXER_OSS=y -CONFIG_SND_PCM_OSS=y -CONFIG_SND_PCM_OSS_PLUGINS=y -CONFIG_SND_SEQUENCER_OSS=y -CONFIG_SND_HRTIMER=y -CONFIG_SND_SEQ_HRTIMER_DEFAULT=y -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_SUPPORT_OLD_API=y -CONFIG_SND_VERBOSE_PROCFS=y -# CONFIG_SND_VERBOSE_PRINTK is not set -# CONFIG_SND_DEBUG is not set -CONFIG_SND_VMASTER=y -CONFIG_SND_DMA_SGBUF=y -# CONFIG_SND_RAWMIDI_SEQ is not set -# CONFIG_SND_OPL3_LIB_SEQ is not set -# CONFIG_SND_OPL4_LIB_SEQ is not set -# CONFIG_SND_SBAWE_SEQ is not set -# CONFIG_SND_EMU10K1_SEQ is not set -CONFIG_SND_DRIVERS=y -# CONFIG_SND_PCSP is not set -# CONFIG_SND_DUMMY is not set -# CONFIG_SND_ALOOP is not set -# CONFIG_SND_VIRMIDI is not set -# CONFIG_SND_MTPAV is not set -# CONFIG_SND_SERIAL_U16550 is not set -# CONFIG_SND_MPU401 is not set -CONFIG_SND_PCI=y -# CONFIG_SND_AD1889 is not set -# CONFIG_SND_ALS300 is not set -# CONFIG_SND_ALS4000 is not set -# CONFIG_SND_ALI5451 is not set -# CONFIG_SND_ASIHPI is not set -# CONFIG_SND_ATIIXP is not set -# CONFIG_SND_ATIIXP_MODEM is not set -# CONFIG_SND_AU8810 is not set -# CONFIG_SND_AU8820 is not set -# CONFIG_SND_AU8830 is not set -# CONFIG_SND_AW2 is not set -# CONFIG_SND_AZT3328 is not set -# CONFIG_SND_BT87X is not set -# CONFIG_SND_CA0106 is not set -# CONFIG_SND_CMIPCI is not set -# CONFIG_SND_OXYGEN is not set -# CONFIG_SND_CS4281 is not set -# CONFIG_SND_CS46XX is not set -# CONFIG_SND_CS5530 is not set -# CONFIG_SND_CS5535AUDIO is not set -# CONFIG_SND_CTXFI is not set -# CONFIG_SND_DARLA20 is not set -# CONFIG_SND_GINA20 is not set -# CONFIG_SND_LAYLA20 is not set -# CONFIG_SND_DARLA24 is not set -# CONFIG_SND_GINA24 is not set -# CONFIG_SND_LAYLA24 is not set -# CONFIG_SND_MONA is not set -# CONFIG_SND_MIA is not set -# CONFIG_SND_ECHO3G is not set -# CONFIG_SND_INDIGO is not set -# CONFIG_SND_INDIGOIO is not set -# CONFIG_SND_INDIGODJ is not set -# CONFIG_SND_INDIGOIOX is not set -# CONFIG_SND_INDIGODJX is not set -# CONFIG_SND_EMU10K1 is not set -# CONFIG_SND_EMU10K1X is not set -# CONFIG_SND_ENS1370 is not set -# CONFIG_SND_ENS1371 is not set -# CONFIG_SND_ES1938 is not set -# CONFIG_SND_ES1968 is not set -# CONFIG_SND_FM801 is not set -CONFIG_SND_HDA_INTEL=y -CONFIG_SND_HDA_PREALLOC_SIZE=64 -CONFIG_SND_HDA_HWDEP=y -# CONFIG_SND_HDA_RECONFIG is not set -# CONFIG_SND_HDA_INPUT_BEEP is not set -# CONFIG_SND_HDA_INPUT_JACK is not set -# CONFIG_SND_HDA_PATCH_LOADER is not set -CONFIG_SND_HDA_CODEC_REALTEK=y -CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS=y -CONFIG_SND_HDA_CODEC_ANALOG=y -CONFIG_SND_HDA_CODEC_SIGMATEL=y -CONFIG_SND_HDA_CODEC_VIA=y -CONFIG_SND_HDA_CODEC_HDMI=y -CONFIG_SND_HDA_CODEC_CIRRUS=y -CONFIG_SND_HDA_CODEC_CONEXANT=y -CONFIG_SND_HDA_CODEC_CA0110=y -CONFIG_SND_HDA_CODEC_CA0132=y -CONFIG_SND_HDA_CODEC_CMEDIA=y -CONFIG_SND_HDA_CODEC_SI3054=y -CONFIG_SND_HDA_GENERIC=y -# CONFIG_SND_HDA_POWER_SAVE is not set -# CONFIG_SND_HDSP is not set -# CONFIG_SND_HDSPM is not set -# CONFIG_SND_ICE1712 is not set -# CONFIG_SND_ICE1724 is not set -# CONFIG_SND_INTEL8X0 is not set -# CONFIG_SND_INTEL8X0M is not set -# CONFIG_SND_KORG1212 is not set -# CONFIG_SND_LOLA is not set -# CONFIG_SND_LX6464ES is not set -# CONFIG_SND_MAESTRO3 is not set -# CONFIG_SND_MIXART is not set -# CONFIG_SND_NM256 is not set -# CONFIG_SND_PCXHR is not set -# CONFIG_SND_RIPTIDE is not set -# CONFIG_SND_RME32 is not set -# CONFIG_SND_RME96 is not set -# CONFIG_SND_RME9652 is not set -# CONFIG_SND_SONICVIBES is not set -# CONFIG_SND_TRIDENT is not set -# CONFIG_SND_VIA82XX is not set -# CONFIG_SND_VIA82XX_MODEM is not set -# CONFIG_SND_VIRTUOSO is not set -# CONFIG_SND_VX222 is not set -# CONFIG_SND_YMFPCI is not set -CONFIG_SND_USB=y -# CONFIG_SND_USB_AUDIO is not set -# CONFIG_SND_USB_UA101 is not set -# CONFIG_SND_USB_USX2Y is not set -# CONFIG_SND_USB_CAIAQ is not set -# CONFIG_SND_USB_US122L is not set -# CONFIG_SND_USB_6FIRE is not set -CONFIG_SND_PCMCIA=y -# CONFIG_SND_VXPOCKET is not set -# CONFIG_SND_PDAUDIOCF is not set -# CONFIG_SND_SOC is not set -# CONFIG_SOUND_PRIME is not set -CONFIG_HID_SUPPORT=y -CONFIG_HID=y -CONFIG_HIDRAW=y - -# -# USB Input Devices -# -CONFIG_USB_HID=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=y -# CONFIG_HID_ACRUX is not set -CONFIG_HID_APPLE=y -CONFIG_HID_BELKIN=y -CONFIG_HID_CHERRY=y -CONFIG_HID_CHICONY=y -# CONFIG_HID_PRODIKEYS is not set -CONFIG_HID_CYPRESS=y -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -CONFIG_HID_EZKEY=y -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_KEYTOUCH is not set -CONFIG_HID_KYE=y -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -CONFIG_HID_GYRATION=y -# CONFIG_HID_TWINHAN is not set -CONFIG_HID_KENSINGTON=y -# CONFIG_HID_LCPOWER is not set -CONFIG_HID_LOGITECH=y -CONFIG_LOGITECH_FF=y -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWII_FF is not set -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MONTEREY=y -# CONFIG_HID_MULTITOUCH is not set -CONFIG_HID_NTRIG=y -# CONFIG_HID_ORTEK is not set -CONFIG_HID_PANTHERLORD=y -CONFIG_PANTHERLORD_FF=y -CONFIG_HID_PETALYNX=y -# CONFIG_HID_PICOLCD is not set -# CONFIG_HID_QUANTA is not set -# CONFIG_HID_ROCCAT is not set -CONFIG_HID_SAMSUNG=y -CONFIG_HID_SONY=y -# CONFIG_HID_SPEEDLINK is not set -CONFIG_HID_SUNPLUS=y -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -CONFIG_HID_TOPSEED=y -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -CONFIG_USB_SUPPORT=y -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB_ARCH_HAS_OHCI=y -CONFIG_USB_ARCH_HAS_EHCI=y -CONFIG_USB=y -CONFIG_USB_DEBUG=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEVICEFS=y -# CONFIG_USB_DEVICE_CLASS is not set -# CONFIG_USB_DYNAMIC_MINORS is not set -CONFIG_USB_MON=y -# CONFIG_USB_WUSB is not set -# CONFIG_USB_WUSB_CBAF is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -# CONFIG_USB_XHCI_HCD is not set -CONFIG_USB_EHCI_HCD=y -# CONFIG_USB_EHCI_ROOT_HUB_TT is not set -# CONFIG_USB_EHCI_TT_NEWSCHED is not set -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_ISP1760_HCD is not set -# CONFIG_USB_ISP1362_HCD is not set -CONFIG_USB_OHCI_HCD=y -# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set -# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_WHCI_HCD is not set -# CONFIG_USB_HWA_HCD is not set - -# -# USB Device Class drivers -# -# CONFIG_USB_ACM is not set -CONFIG_USB_PRINTER=y -# CONFIG_USB_WDM is not set -# CONFIG_USB_TMC is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=y -# CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_REALTEK is not set -# CONFIG_USB_STORAGE_DATAFAB is not set -# CONFIG_USB_STORAGE_FREECOM is not set -# CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_USBAT is not set -# CONFIG_USB_STORAGE_SDDR09 is not set -# CONFIG_USB_STORAGE_SDDR55 is not set -# CONFIG_USB_STORAGE_JUMPSHOT is not set -# CONFIG_USB_STORAGE_ALAUDA is not set -# CONFIG_USB_STORAGE_ONETOUCH is not set -# CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set -# CONFIG_USB_STORAGE_ENE_UB6250 is not set -# CONFIG_USB_UAS is not set -CONFIG_USB_LIBUSUAL=y - -# -# USB Imaging devices -# -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_MICROTEK is not set - -# -# USB port drivers -# -# CONFIG_USB_SERIAL is not set - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_EMI62 is not set -# CONFIG_USB_EMI26 is not set -# CONFIG_USB_ADUTUX is not set -# CONFIG_USB_SEVSEG is not set -# CONFIG_USB_RIO500 is not set -# CONFIG_USB_LEGOTOWER is not set -# CONFIG_USB_LCD is not set -# CONFIG_USB_LED is not set -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set -# CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_SISUSBVGA is not set -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -# CONFIG_USB_IOWARRIOR is not set -# CONFIG_USB_TEST is not set -# CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_YUREX is not set -# CONFIG_USB_GADGET is not set - -# -# OTG and related infrastructure -# -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_UWB is not set -# CONFIG_MMC is not set -# CONFIG_MEMSTICK is not set -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y - -# -# LED drivers -# -# CONFIG_LEDS_LM3530 is not set -# CONFIG_LEDS_ALIX2 is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_LP3944 is not set -# CONFIG_LEDS_LP5521 is not set -# CONFIG_LEDS_LP5523 is not set -# CONFIG_LEDS_CLEVO_MAIL is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_BD2802 is not set -# CONFIG_LEDS_INTEL_SS4200 is not set -CONFIG_LEDS_TRIGGERS=y - -# -# LED Triggers -# -# CONFIG_LEDS_TRIGGER_TIMER is not set -# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set -# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set -# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set - -# -# iptables trigger is under Netfilter config (LED target) -# -# CONFIG_ACCESSIBILITY is not set -# CONFIG_INFINIBAND is not set -CONFIG_EDAC=y - -# -# Reporting subsystems -# -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_DECODE_MCE=y -# CONFIG_EDAC_MCE_INJ is not set -# CONFIG_EDAC_MM_EDAC is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -# CONFIG_RTC_HCTOSYS is not set -# CONFIG_RTC_DEBUG is not set - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_DS1307 is not set -# CONFIG_RTC_DRV_DS1374 is not set -# CONFIG_RTC_DRV_DS1672 is not set -# CONFIG_RTC_DRV_DS3232 is not set -# CONFIG_RTC_DRV_MAX6900 is not set -# CONFIG_RTC_DRV_RS5C372 is not set -# CONFIG_RTC_DRV_ISL1208 is not set -# CONFIG_RTC_DRV_ISL12022 is not set -# CONFIG_RTC_DRV_X1205 is not set -# CONFIG_RTC_DRV_PCF8563 is not set -# CONFIG_RTC_DRV_PCF8583 is not set -# CONFIG_RTC_DRV_M41T80 is not set -# CONFIG_RTC_DRV_BQ32K is not set -# CONFIG_RTC_DRV_S35390A is not set -# CONFIG_RTC_DRV_FM3130 is not set -# CONFIG_RTC_DRV_RX8581 is not set -# CONFIG_RTC_DRV_RX8025 is not set -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set - -# -# SPI RTC drivers -# - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_MSM6242 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_RP5C01 is not set -# CONFIG_RTC_DRV_V3020 is not set - -# -# on-CPU RTC drivers -# -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -# CONFIG_INTEL_MID_DMAC is not set -# CONFIG_INTEL_IOATDMA is not set -# CONFIG_TIMB_DMA is not set -# CONFIG_PCH_DMA is not set -# CONFIG_AUXDISPLAY is not set -# CONFIG_UIO is not set - -# -# Virtio drivers -# -# CONFIG_VIRTIO_PCI is not set -# CONFIG_VIRTIO_BALLOON is not set -# CONFIG_STAGING is not set -CONFIG_X86_PLATFORM_DEVICES=y -# CONFIG_ACERHDF is not set -# CONFIG_ASUS_LAPTOP is not set -# CONFIG_FUJITSU_LAPTOP is not set -# CONFIG_HP_ACCEL is not set -# CONFIG_MSI_LAPTOP is not set -# CONFIG_PANASONIC_LAPTOP is not set -# CONFIG_COMPAL_LAPTOP is not set -# CONFIG_SONY_LAPTOP is not set -# CONFIG_IDEAPAD_LAPTOP is not set -# CONFIG_THINKPAD_ACPI is not set -# CONFIG_SENSORS_HDAPS is not set -# CONFIG_INTEL_MENLOW is not set -CONFIG_EEEPC_LAPTOP=y -# CONFIG_ACPI_WMI is not set -# CONFIG_ACPI_ASUS is not set -# CONFIG_TOPSTAR_LAPTOP is not set -# CONFIG_ACPI_TOSHIBA is not set -# CONFIG_TOSHIBA_BT_RFKILL is not set -# CONFIG_ACPI_CMPC is not set -# CONFIG_INTEL_IPS is not set -# CONFIG_IBM_RTL is not set -# CONFIG_XO15_EBOOK is not set -# CONFIG_SAMSUNG_LAPTOP is not set -# CONFIG_INTEL_OAKTRAIL is not set -# CONFIG_SAMSUNG_Q10 is not set -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y -CONFIG_AMD_IOMMU=y -CONFIG_AMD_IOMMU_STATS=y -CONFIG_DMAR=y -# CONFIG_DMAR_DEFAULT_ON is not set -CONFIG_DMAR_FLOPPY_WA=y -# CONFIG_INTR_REMAP is not set -# CONFIG_VIRT_DRIVERS is not set - -# -# Firmware Drivers -# -# CONFIG_EDD is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_EFI_VARS=y -# CONFIG_DELL_RBU is not set -# CONFIG_DCDBAS is not set -CONFIG_DMIID=y -# CONFIG_DMI_SYSFS is not set -# CONFIG_ISCSI_IBFT_FIND is not set -# CONFIG_SIGMA is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# File systems -# -# CONFIG_EXT2_FS is not set -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_XATTR=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -# CONFIG_EXT4_FS is not set -CONFIG_JBD=y -# CONFIG_JBD_DEBUG is not set -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -# CONFIG_XFS_FS is not set -# CONFIG_GFS2_FS is not set -# CONFIG_BTRFS_FS is not set -# CONFIG_NILFS2_FS is not set -CONFIG_FS_POSIX_ACL=y -CONFIG_FILE_LOCKING=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -# CONFIG_FANOTIFY is not set -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -CONFIG_QUOTACTL_COMPAT=y -CONFIG_AUTOFS4_FS=y -# CONFIG_FUSE_FS is not set -CONFIG_GENERIC_ACL=y - -# -# Caches -# -# CONFIG_FSCACHE is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -# CONFIG_UDF_FS is not set - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -# CONFIG_PROC_VMCORE is not set -CONFIG_PROC_SYSCTL=y -# CONFIG_PROC_PAGE_MONITOR is not set -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -# CONFIG_CONFIGFS_FS is not set -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_LOGFS is not set -# CONFIG_CRAMFS is not set -# CONFIG_SQUASHFS is not set -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_ROMFS_FS is not set -# CONFIG_PSTORE is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -# CONFIG_NFS_V4_1 is not set -CONFIG_ROOT_NFS=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -# CONFIG_NFS_USE_NEW_IDMAPPER is not set -# CONFIG_NFSD is not set -CONFIG_LOCKD=y -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=y -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=y -CONFIG_SUNRPC_GSS=y -# CONFIG_CEPH_FS is not set -# CONFIG_CIFS is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -# CONFIG_ATARI_PARTITION is not set -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -# CONFIG_LDM_PARTITION is not set -CONFIG_SGI_PARTITION=y -# CONFIG_ULTRIX_PARTITION is not set -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -# CONFIG_NLS_CODEPAGE_850 is not set -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -# CONFIG_NLS_CODEPAGE_932 is not set -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -CONFIG_NLS_UTF8=y - -# -# Kernel hacking -# -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_PRINTK_TIME=y -CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 -# CONFIG_ENABLE_WARN_DEPRECATED is not set -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 -CONFIG_MAGIC_SYSRQ=y -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_UNUSED_SYMBOLS is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_DEBUG_KERNEL=y -# CONFIG_DEBUG_SHIRQ is not set -# CONFIG_LOCKUP_DETECTOR is not set -# CONFIG_HARDLOCKUP_DETECTOR is not set -# CONFIG_DETECT_HUNG_TASK is not set -# CONFIG_SCHED_DEBUG is not set -CONFIG_SCHEDSTATS=y -CONFIG_TIMER_STATS=y -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_SLUB_STATS is not set -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_SPARSE_RCU_POINTER is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -CONFIG_STACKTRACE=y -CONFIG_DEBUG_STACK_USAGE=y -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_INFO is not set -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_VIRTUAL is not set -# CONFIG_DEBUG_WRITECOUNT is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_LIST is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_FRAME_POINTER=y -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# CONFIG_DEBUG_PER_CPU_MAPS is not set -# CONFIG_LKDTM is not set -# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_LATENCYTOP is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y -# CONFIG_DEBUG_PAGEALLOC is not set -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y -CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_EVENT_POWER_TRACING_DEPRECATED=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_FUNCTION_TRACER is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_FTRACE_SYSCALLS is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -# CONFIG_STACK_TRACER is not set -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENT=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -CONFIG_PROVIDE_OHCI1394_DMA_INIT=y -# CONFIG_DYNAMIC_DEBUG is not set -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_HAVE_ARCH_KMEMCHECK=y -# CONFIG_KMEMCHECK is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_STRICT_DEVMEM is not set -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -CONFIG_EARLY_PRINTK_DBGP=y -CONFIG_DEBUG_STACKOVERFLOW=y -# CONFIG_X86_PTDUMP is not set -CONFIG_DEBUG_NX_TEST=m -# CONFIG_IOMMU_DEBUG is not set -# CONFIG_IOMMU_STRESS is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -# CONFIG_X86_DECODER_SELFTEST is not set -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -CONFIG_DEBUG_BOOT_PARAMS=y -# CONFIG_CPA_DEBUG is not set -CONFIG_OPTIMIZE_INLINING=y -# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set - -# -# Security options -# - -# -# Grsecurity -# -# CONFIG_GRKERNSEC is not set - -# -# PaX -# -CONFIG_TASK_SIZE_MAX_SHIFT=47 - -# -# Miscellaneous hardening features -# -# CONFIG_PAX_MEMORY_SANITIZE is not set -# CONFIG_PAX_MEMORY_STACKLEAK is not set -# CONFIG_PAX_MEMORY_UDEREF is not set -CONFIG_KEYS=y -CONFIG_KEYS_DEBUG_PROC_KEYS=y -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y -# CONFIG_SECURITYFS is not set -CONFIG_SECURITY_NETWORK=y -# CONFIG_SECURITY_NETWORK_XFRM is not set -# CONFIG_SECURITY_PATH is not set -# CONFIG_INTEL_TXT is not set -CONFIG_LSM_MMAP_MIN_ADDR=65536 -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 -# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_IMA is not set -CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_DEFAULT_SECURITY="selinux" -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_PCOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -# CONFIG_CRYPTO_GF128MUL is not set -# CONFIG_CRYPTO_NULL is not set -# CONFIG_CRYPTO_PCRYPT is not set -CONFIG_CRYPTO_WORKQUEUE=y -# CONFIG_CRYPTO_CRYPTD is not set -CONFIG_CRYPTO_AUTHENC=y -# CONFIG_CRYPTO_TEST is not set - -# -# Authenticated Encryption with Associated Data -# -# CONFIG_CRYPTO_CCM is not set -# CONFIG_CRYPTO_GCM is not set -# CONFIG_CRYPTO_SEQIV is not set - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_CTR is not set -# CONFIG_CRYPTO_CTS is not set -# CONFIG_CRYPTO_ECB is not set -# CONFIG_CRYPTO_LRW is not set -# CONFIG_CRYPTO_PCBC is not set -# CONFIG_CRYPTO_XTS is not set - -# -# Hash modes -# -CONFIG_CRYPTO_HMAC=y -# CONFIG_CRYPTO_XCBC is not set -# CONFIG_CRYPTO_VMAC is not set - -# -# Digest -# -# CONFIG_CRYPTO_CRC32C is not set -# CONFIG_CRYPTO_CRC32C_INTEL is not set -# CONFIG_CRYPTO_GHASH is not set -# CONFIG_CRYPTO_MD4 is not set -CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_MICHAEL_MIC is not set -# CONFIG_CRYPTO_RMD128 is not set -# CONFIG_CRYPTO_RMD160 is not set -# CONFIG_CRYPTO_RMD256 is not set -# CONFIG_CRYPTO_RMD320 is not set -CONFIG_CRYPTO_SHA1=y -# CONFIG_CRYPTO_SHA256 is not set -# CONFIG_CRYPTO_SHA512 is not set -# CONFIG_CRYPTO_TGR192 is not set -# CONFIG_CRYPTO_WP512 is not set -# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set - -# -# Ciphers -# -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_X86_64 is not set -# CONFIG_CRYPTO_AES_NI_INTEL is not set -# CONFIG_CRYPTO_ANUBIS is not set -CONFIG_CRYPTO_ARC4=y -# CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_CAST5 is not set -# CONFIG_CRYPTO_CAST6 is not set -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_FCRYPT is not set -# CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_SALSA20 is not set -# CONFIG_CRYPTO_SALSA20_X86_64 is not set -# CONFIG_CRYPTO_SEED is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_TEA is not set -# CONFIG_CRYPTO_TWOFISH is not set -# CONFIG_CRYPTO_TWOFISH_X86_64 is not set - -# -# Compression -# -# CONFIG_CRYPTO_DEFLATE is not set -# CONFIG_CRYPTO_ZLIB is not set -# CONFIG_CRYPTO_LZO is not set - -# -# Random Number Generation -# -# CONFIG_CRYPTO_ANSI_CPRNG is not set -# CONFIG_CRYPTO_USER_API_HASH is not set -# CONFIG_CRYPTO_USER_API_SKCIPHER is not set -CONFIG_CRYPTO_HW=y -# CONFIG_CRYPTO_DEV_PADLOCK is not set -# CONFIG_CRYPTO_DEV_HIFN_795X is not set -CONFIG_HAVE_KVM=y -CONFIG_VIRTUALIZATION=y -# CONFIG_KVM is not set -# CONFIG_VHOST_NET is not set -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_BITREVERSE=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -# CONFIG_CRC_CCITT is not set -# CONFIG_CRC16 is not set -CONFIG_CRC_T10DIF=y -# CONFIG_CRC_ITU_T is not set -CONFIG_CRC32=y -# CONFIG_CRC7 is not set -# CONFIG_LIBCRC32C is not set -# CONFIG_CRC8 is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_DMA=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPU_RMAP=y -CONFIG_NLATTR=y -CONFIG_AVERAGE=y -# CONFIG_CORDIC is not set diff --git a/kernel/kernel.nm b/kernel/kernel.nm index e266e41..ed507e4 100644 --- a/kernel/kernel.nm +++ b/kernel/kernel.nm @@ -4,8 +4,8 @@ ###############################################################################
name = kernel -version = 3.1.5 -release = 4 +version = 3.2.12 +release = 1 thisapp = linux-%{version}
maintainer = Michael Tremer michael.tremer@ipfire.org @@ -15,9 +15,9 @@ license = GPLv2 summary = The Linux kernel.
description - The kernel package contains the Linux kernel (vmlinuz), the core of any \ - Linux operating system. The kernel handles the basic functions \ - of the operating system: memory allocation, process allocation, device \ + The kernel package contains the Linux kernel (vmlinuz), the core of any + Linux operating system. The kernel handles the basic functions + of the operating system: memory allocation, process allocation, device input and output, etc. end
@@ -40,13 +40,14 @@ build elfutils-devel gcc-plugin-devel gettext - module-init-tools ncurses-devel net-tools perl python-devel xmlto xz-lzma-compat + + /sbin/depmod end
tools_cpupower_requires @@ -81,8 +82,14 @@ build end
if "%{DISTRO_ARCH}" == "armv5tel" + # ERROR Currently all kernel images are disabled, because they + # won't build with the grsecurity patch. + # Build versatile kernel. - build_kernel_versatile = 1 + #build_kernel_versatile = 1 + + # Build a kernel for Marvell Kirkwood-based devices. + #build_kernel_kirkwood = 1
kernel_arch = arm kernel_image = arch/%{kernel_arch}/boot/zImage @@ -92,9 +99,11 @@ build end
if "%{DISTRO_ARCH}" == "armv7hl" - # Currently build no kernel images. + # Build a kernel for TI OMAP SoCs. + build_kernel_omap = 1
kernel_arch = arm + kernel_image = arch/%{kernel_arch}/boot/zImage
# ARM does not support cpupower. build_cpupower = 0 @@ -115,6 +124,16 @@ build kernels += versatile end
+ # Build ARM kirkwood kernel. + if "%{build_kernel_kirkwood}" == "1" + kernels += kirkwood + end + + # Build ARM omap kernel. + if "%{build_kernel_omap}" == "1" + kernels += omap + end + # Add tools' build requirements if build is requested. if "%{build_cpupower}" == "1" requires += %{tools_cpupower_requires} @@ -138,6 +157,8 @@ build touch .scmversion
mkdir -pv configs + + cd %{DIR_SOURCE} configure_kernel() { local flavour=${1} local suffix @@ -145,13 +166,8 @@ build suffix="-${flavour}" fi
- # This is the place, where the configuration files - # should be dynamically generated. - # For now, we just copy pregenerated ones. - cp %{DIR_SOURCE}/config.%{arch}${suffix} .config - - make ARCH=%{kernel_arch} oldnoconfig - mv .config configs/config.${flavour} + scripts/configure --kernel-dir=%{DIR_APP} \ + merge %{DISTRO_ARCH} ${flavour} %{DIR_APP}/configs/config.${flavour} }
# Generate configuration files for all kernels we are going to @@ -276,6 +292,10 @@ build end
install + # When no kernels are configured, we create a default config + # which enables us to install at least the header files. + [ -n "%{kernels}" ] || make ARCH=%{kernel_arch} defconfig + # Install the header files make ARCH=%{kernel_arch} INSTALL_HDR_PATH=dest headers_install mkdir -pv %{BUILDROOT}/usr/include @@ -317,9 +337,7 @@ packages summary = The Linux kernel %{kernel_release}.
prerequires - dracut - grubby - module-init-tools + /sbin/depmod end
provides @@ -327,7 +345,8 @@ packages end
requires - %{prerequires} + dracut + grubby linux-firmware end
@@ -428,13 +447,45 @@ packages end
package kernel-versatile-devel - kernel KERNELDEVEL + template KERNELDEVEL
kernel_name = kernel-versatile kernel_release = %{fullver}.versatile end end
+ if "%{build_kernel_kirkwood}" == "1" + package kernel-kirkwood + template KERNEL + + kernel_name = kernel-kirkwood + kernel_release = %{fullver}.kirkwood + end + + package kernel-kirkwood-devel + template KERNELDEVEL + + kernel_name = kernel-kirkwood + kernel_release = %{fullver}.kirkwood + end + end + + if "%{build_kernel_omap}" == "1" + package kernel-omap + template KERNEL + + kernel_name = kernel-omap + kernel_release = %{fullver}.omap + end + + package kernel-omap-devel + template KERNELDEVEL + + kernel_name = kernel-omap + kernel_release = %{fullver}.omap + end + end + package kernel-headers summary = Header files of the kernel release %{fullver}. desciption = %{summary} diff --git a/kernel/patches/arm-smsc-support-reading-mac-address-from-device-tree.patch b/kernel/patches/arm-smsc-support-reading-mac-address-from-device-tree.patch new file mode 100644 index 0000000..a36c683 --- /dev/null +++ b/kernel/patches/arm-smsc-support-reading-mac-address-from-device-tree.patch @@ -0,0 +1,92 @@ +From 0b608345e114681f66ca0a3cf9d9434728da62ce Mon Sep 17 00:00:00 2001 +From: Ken Cox ken@coxcampers.net +Date: Thu, 23 Jun 2011 10:36:43 -0500 +Subject: [PATCH] Support reading mac address from device tree. + +If CONFIG_OF is enabled, we will try to read the mac address from the device tree. This enables us the ability to have a "static" mac address on arm boards such as the pandaboard and beagleboard which generate random mac addresses. +--- + drivers/net/usb/smsc75xx.c | 17 +++++++++++++++++ + drivers/net/usb/smsc95xx.c | 18 +++++++++++++++++- + 2 files changed, 34 insertions(+), 1 deletions(-) + +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c +index 753ee6e..ac0a200 100644 +--- a/drivers/net/usb/smsc75xx.c ++++ b/drivers/net/usb/smsc75xx.c +@@ -29,6 +29,7 @@ + #include <linux/crc32.h> + #include <linux/usb/usbnet.h> + #include <linux/slab.h> ++#include <linux/of_device.h> + #include "smsc75xx.h" + + #define SMSC_CHIPNAME "smsc75xx" +@@ -658,6 +659,22 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) + + static void smsc75xx_init_mac_address(struct usbnet *dev) + { ++ void *address; ++#ifdef CONFIG_OF ++ struct device_node *np; ++ ++ /* try the device tree */ ++ np = of_find_node_by_name(NULL, "smsc75xx"); ++ if (np) { ++ address = of_get_property(np, "local-mac-address", NULL); ++ if (address) { ++ memcpy(dev->net->dev_addr, address, ETH_ALEN); ++ netif_dbg(dev, ifup, dev->net, "MAC address read from device tree\n"); ++ return; ++ } ++ } ++#endif ++ + /* try reading mac address from EEPROM */ + if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, + dev->net->dev_addr) == 0) { +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c +index bc86f4b..c83942d 100644 +--- a/drivers/net/usb/smsc95xx.c ++++ b/drivers/net/usb/smsc95xx.c +@@ -29,6 +29,7 @@ + #include <linux/crc32.h> + #include <linux/usb/usbnet.h> + #include <linux/slab.h> ++#include <linux/of_device.h> + #include "smsc95xx.h" + + #define SMSC_CHIPNAME "smsc95xx" +@@ -639,6 +640,22 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) + + static void smsc95xx_init_mac_address(struct usbnet *dev) + { ++ void *address; ++#ifdef CONFIG_OF ++ struct device_node *np; ++ ++ /* try the device tree */ ++ np = of_find_node_by_name(NULL, "smsc95xx"); ++ if (np) { ++ address = of_get_property(np, "local-mac-address", NULL); ++ if (address) { ++ memcpy(dev->net->dev_addr, address, ETH_ALEN); ++ netif_dbg(dev, ifup, dev->net, "MAC address read from device tree\n"); ++ return; ++ } ++ } ++#endif ++ + /* try reading mac address from EEPROM */ + if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, + dev->net->dev_addr) == 0) { +@@ -648,7 +665,6 @@ static void smsc95xx_init_mac_address(struct usbnet *dev) + return; + } + } +- + /* no eeprom, or eeprom values are invalid. generate random MAC */ + random_ether_addr(dev->net->dev_addr); + netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr\n"); +-- +1.7.2.3 + diff --git a/kernel/patches/grsecurity-2.2.2-3.1.5-201112101853.patch b/kernel/patches/grsecurity-2.2.2-3.1.5-201112101853.patch deleted file mode 100644 index 67dea05..0000000 --- a/kernel/patches/grsecurity-2.2.2-3.1.5-201112101853.patch +++ /dev/null @@ -1,81446 +0,0 @@ -diff --git a/Documentation/dontdiff b/Documentation/dontdiff -index dfa6fc6..0095943 100644 ---- a/Documentation/dontdiff -+++ b/Documentation/dontdiff -@@ -5,6 +5,7 @@ - *.cis - *.cpio - *.csp -+*.dbg - *.dsp - *.dvi - *.elf -@@ -14,6 +15,7 @@ - *.gcov - *.gen.S - *.gif -+*.gmo - *.grep - *.grp - *.gz -@@ -48,9 +50,11 @@ - *.tab.h - *.tex - *.ver -+*.vim - *.xml - *.xz - *_MODULES -+*_reg_safe.h - *_vga16.c - *~ - #*# -@@ -70,6 +74,7 @@ Kerntypes - Module.markers - Module.symvers - PENDING -+PERF* - SCCS - System.map* - TAGS -@@ -93,19 +98,24 @@ bounds.h - bsetup - btfixupprep - build -+builtin-policy.h - bvmlinux - bzImage* - capability_names.h - capflags.c - classlist.h* -+clut_vga16.c -+common-cmds.h - comp*.log - compile.h* - conf - config - config-* - config_data.h* -+config.c - config.mak - config.mak.autogen -+config.tmp - conmakehash - consolemap_deftbl.c* - cpustr.h -@@ -119,6 +129,7 @@ dslm - elf2ecoff - elfconfig.h* - evergreen_reg_safe.h -+exception_policy.conf - fixdep - flask.h - fore200e_mkfirm -@@ -126,12 +137,15 @@ fore200e_pca_fw.c* - gconf - gconf.glade.h - gen-devlist -+gen-kdb_cmds.c - gen_crc32table - gen_init_cpio - generated - genheaders - genksyms - *_gray256.c -+hash -+hid-example - hpet_example - hugepage-mmap - hugepage-shm -@@ -146,7 +160,7 @@ int32.c - int4.c - int8.c - kallsyms --kconfig -+kern_constants.h - keywords.c - ksym.c* - ksym.h* -@@ -154,7 +168,6 @@ kxgettext - lkc_defs.h - lex.c - lex.*.c --linux - logo_*.c - logo_*_clut224.c - logo_*_mono.c -@@ -166,14 +179,15 @@ machtypes.h - map - map_hugetlb - maui_boot.h --media - mconf -+mdp - miboot* - mk_elfconfig - mkboot - mkbugboot - mkcpustr - mkdep -+mkpiggy - mkprep - mkregtable - mktables -@@ -209,6 +223,7 @@ r300_reg_safe.h - r420_reg_safe.h - r600_reg_safe.h - recordmcount -+regdb.c - relocs - rlim_names.h - rn50_reg_safe.h -@@ -219,6 +234,7 @@ setup - setup.bin - setup.elf - sImage -+slabinfo - sm_tbl* - split-include - syscalltab.h -@@ -229,6 +245,7 @@ tftpboot.img - timeconst.h - times.h* - trix_boot.h -+user_constants.h - utsrelease.h* - vdso-syms.lds - vdso.lds -@@ -246,7 +263,9 @@ vmlinux - vmlinux-* - vmlinux.aout - vmlinux.bin.all -+vmlinux.bin.bz2 - vmlinux.lds -+vmlinux.relocs - vmlinuz - voffset.h - vsyscall.lds -@@ -254,9 +273,11 @@ vsyscall_32.lds - wanxlfw.inc - uImage - unifdef -+utsrelease.h - wakeup.bin - wakeup.elf - wakeup.lds - zImage* - zconf.hash.c -+zconf.lex.c - zoffset.h -diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt -index d6e6724..a024ce8 100644 ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. - the specified number of seconds. This is to be used if - your oopses keep scrolling off the screen. - -+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain -+ virtualization environments that don't cope well with the -+ expand down segment used by UDEREF on X86-32 or the frequent -+ page table updates on X86-64. -+ -+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already. -+ - pcbit= [HW,ISDN] - - pcd. [PARIDE] -diff --git a/Makefile b/Makefile -index 94ab2ad..1e4a6e8 100644 ---- a/Makefile -+++ b/Makefile -@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ - - HOSTCC = gcc - HOSTCXX = g++ --HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer --HOSTCXXFLAGS = -O2 -+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks -+HOSTCLFAGS += $(call cc-option, -Wno-empty-body) -+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks - - # Decide whether to build built-in, modular, or both. - # Normally, just do built-in. -@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc - # Rules shared between *config targets and build targets - - # Basic helpers built in scripts/ --PHONY += scripts_basic --scripts_basic: -+PHONY += scripts_basic gcc-plugins -+scripts_basic: gcc-plugins - $(Q)$(MAKE) $(build)=scripts/basic - $(Q)rm -f .tmp_quiet_recordmcount - -@@ -564,6 +565,42 @@ else - KBUILD_CFLAGS += -O2 - endif - -+ifndef DISABLE_PAX_PLUGINS -+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y) -+ifndef DISABLE_PAX_CONSTIFY_PLUGIN -+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN -+endif -+ifdef CONFIG_PAX_MEMORY_STACKLEAK -+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN -+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100 -+endif -+ifdef CONFIG_KALLOCSTAT_PLUGIN -+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so -+endif -+ifdef CONFIG_PAX_KERNEXEC_PLUGIN -+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so -+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -+endif -+ifdef CONFIG_CHECKER_PLUGIN -+ifeq ($(call cc-ifversion, -ge, 0406, y), y) -+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN -+endif -+endif -+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN) -+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN -+gcc-plugins: -+ $(Q)$(MAKE) $(build)=tools/gcc -+else -+gcc-plugins: -+ifeq ($(call cc-ifversion, -ge, 0405, y), y) -+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.)) -+else -+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least" -+endif -+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure" -+endif -+endif -+ - include $(srctree)/arch/$(SRCARCH)/Makefile - - ifneq ($(CONFIG_FRAME_WARN),0) -@@ -708,7 +745,7 @@ export mod_strip_cmd - - - ifeq ($(KBUILD_EXTMOD),) --core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ -+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ - - vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ - $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ -@@ -932,6 +969,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE - - # The actual objects are generated when descending, - # make sure no implicit rule kicks in -+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS) - $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ; - - # Handle descending into subdirectories listed in $(vmlinux-dirs) -@@ -941,7 +979,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ; - # Error messages still appears in the original language - - PHONY += $(vmlinux-dirs) --$(vmlinux-dirs): prepare scripts -+$(vmlinux-dirs): gcc-plugins prepare scripts - $(Q)$(MAKE) $(build)=$@ - - # Store (new) KERNELRELASE string in include/config/kernel.release -@@ -986,6 +1024,7 @@ prepare0: archprepare FORCE - $(Q)$(MAKE) $(build)=. missing-syscalls - - # All the preparing.. -+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS)) - prepare: prepare0 - - # Generate some files -@@ -1087,6 +1126,7 @@ all: modules - # using awk while concatenating to the final file. - - PHONY += modules -+modules: KBUILD_CFLAGS += $(GCC_PLUGINS) - modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin - $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order - @$(kecho) ' Building modules, stage 2.'; -@@ -1102,7 +1142,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin) - - # Target to prepare building external modules - PHONY += modules_prepare --modules_prepare: prepare scripts -+modules_prepare: gcc-plugins prepare scripts - - # Target to install modules - PHONY += modules_install -@@ -1198,7 +1238,7 @@ distclean: mrproper - @find $(srctree) $(RCS_FIND_IGNORE) \ - ( -name '*.orig' -o -name '*.rej' -o -name '*~' \ - -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ -- -o -name '.*.rej' -o -size 0 \ -+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \ - -o -name '*%' -o -name '.*.cmd' -o -name 'core' ) \ - -type f -print | xargs rm -f - -@@ -1360,6 +1400,7 @@ PHONY += $(module-dirs) modules - $(module-dirs): crmodverdir $(objtree)/Module.symvers - $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) - -+modules: KBUILD_CFLAGS += $(GCC_PLUGINS) - modules: $(module-dirs) - @$(kecho) ' Building modules, stage 2.'; - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost -@@ -1486,17 +1527,19 @@ else - target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@)) - endif - --%.s: %.c prepare scripts FORCE -+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS) -+%.s: %.c gcc-plugins prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) - %.i: %.c prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) --%.o: %.c prepare scripts FORCE -+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS) -+%.o: %.c gcc-plugins prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) - %.lst: %.c prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) --%.s: %.S prepare scripts FORCE -+%.s: %.S gcc-plugins prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) --%.o: %.S prepare scripts FORCE -+%.o: %.S gcc-plugins prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) - %.symtypes: %.c prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) -@@ -1506,11 +1549,13 @@ endif - $(cmd_crmodverdir) - $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ - $(build)=$(build-dir) --%/: prepare scripts FORCE -+%/: KBUILD_CFLAGS += $(GCC_PLUGINS) -+%/: gcc-plugins prepare scripts FORCE - $(cmd_crmodverdir) - $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ - $(build)=$(build-dir) --%.ko: prepare scripts FORCE -+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS) -+%.ko: gcc-plugins prepare scripts FORCE - $(cmd_crmodverdir) - $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ - $(build)=$(build-dir) $(@:.ko=.o) -diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h -index da5449e..7418343 100644 ---- a/arch/alpha/include/asm/elf.h -+++ b/arch/alpha/include/asm/elf.h -@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; - - #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) -+ -+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) -+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) -+#endif -+ - /* $0 is set by ld.so to a pointer to a function which might be - registered using atexit. This provides a mean for the dynamic - linker to call DT_FINI functions for shared libraries that have -diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h -index de98a73..bd4f1f8 100644 ---- a/arch/alpha/include/asm/pgtable.h -+++ b/arch/alpha/include/asm/pgtable.h -@@ -101,6 +101,17 @@ struct vm_area_struct; - #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) - #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) - #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) -+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) -+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) -+#else -+# define PAGE_SHARED_NOEXEC PAGE_SHARED -+# define PAGE_COPY_NOEXEC PAGE_COPY -+# define PAGE_READONLY_NOEXEC PAGE_READONLY -+#endif -+ - #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) - - #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) -diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c -index 2fd00b7..cfd5069 100644 ---- a/arch/alpha/kernel/module.c -+++ b/arch/alpha/kernel/module.c -@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, - - /* The small sections were sorted to the end of the segment. - The following should definitely cover them. */ -- gp = (u64)me->module_core + me->core_size - 0x8000; -+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000; - got = sechdrs[me->arch.gotsecindex].sh_addr; - - for (i = 0; i < n; i++) { -diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c -index 01e8715..be0e80f 100644 ---- a/arch/alpha/kernel/osf_sys.c -+++ b/arch/alpha/kernel/osf_sys.c -@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len, - /* At this point: (!vma || addr < vma->vm_end). */ - if (limit - len < addr) - return -ENOMEM; -- if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len)) - return addr; - addr = vma->vm_end; - vma = vma->vm_next; -@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - merely specific addresses, but regions of memory -- perhaps - this feature should be incorporated into all ports? */ - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); - if (addr != (unsigned long) -ENOMEM) -@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - } - - /* Next, try allocating at TASK_UNMAPPED_BASE. */ -- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), -- len, limit); -+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit); -+ - if (addr != (unsigned long) -ENOMEM) - return addr; - -diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c -index fadd5f8..904e73a 100644 ---- a/arch/alpha/mm/fault.c -+++ b/arch/alpha/mm/fault.c -@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm) - __reload_thread(pcb); - } - -+#ifdef CONFIG_PAX_PAGEEXEC -+/* -+ * PaX: decide what to do with offenders (regs->pc = fault address) -+ * -+ * returns 1 when task should be killed -+ * 2 when patched PLT trampoline was detected -+ * 3 when unpatched PLT trampoline was detected -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ -+#ifdef CONFIG_PAX_EMUPLT -+ int err; -+ -+ do { /* PaX: patched PLT emulation #1 */ -+ unsigned int ldah, ldq, jmp; -+ -+ err = get_user(ldah, (unsigned int *)regs->pc); -+ err |= get_user(ldq, (unsigned int *)(regs->pc+4)); -+ err |= get_user(jmp, (unsigned int *)(regs->pc+8)); -+ -+ if (err) -+ break; -+ -+ if ((ldah & 0xFFFF0000U) == 0x277B0000U && -+ (ldq & 0xFFFF0000U) == 0xA77B0000U && -+ jmp == 0x6BFB0000U) -+ { -+ unsigned long r27, addr; -+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; -+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; -+ -+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); -+ err = get_user(r27, (unsigned long *)addr); -+ if (err) -+ break; -+ -+ regs->r27 = r27; -+ regs->pc = r27; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #2 */ -+ unsigned int ldah, lda, br; -+ -+ err = get_user(ldah, (unsigned int *)regs->pc); -+ err |= get_user(lda, (unsigned int *)(regs->pc+4)); -+ err |= get_user(br, (unsigned int *)(regs->pc+8)); -+ -+ if (err) -+ break; -+ -+ if ((ldah & 0xFFFF0000U) == 0x277B0000U && -+ (lda & 0xFFFF0000U) == 0xA77B0000U && -+ (br & 0xFFE00000U) == 0xC3E00000U) -+ { -+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL; -+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; -+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; -+ -+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); -+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: unpatched PLT emulation */ -+ unsigned int br; -+ -+ err = get_user(br, (unsigned int *)regs->pc); -+ -+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) { -+ unsigned int br2, ldq, nop, jmp; -+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; -+ -+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); -+ err = get_user(br2, (unsigned int *)addr); -+ err |= get_user(ldq, (unsigned int *)(addr+4)); -+ err |= get_user(nop, (unsigned int *)(addr+8)); -+ err |= get_user(jmp, (unsigned int *)(addr+12)); -+ err |= get_user(resolver, (unsigned long *)(addr+16)); -+ -+ if (err) -+ break; -+ -+ if (br2 == 0xC3600000U && -+ ldq == 0xA77B000CU && -+ nop == 0x47FF041FU && -+ jmp == 0x6B7B0000U) -+ { -+ regs->r28 = regs->pc+4; -+ regs->r27 = addr+16; -+ regs->pc = resolver; -+ return 3; -+ } -+ } -+ } while (0); -+#endif -+ -+ return 1; -+} -+ -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 5; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif - - /* - * This routine handles page faults. It determines the address, -@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr, - good_area: - si_code = SEGV_ACCERR; - if (cause < 0) { -- if (!(vma->vm_flags & VM_EXEC)) -+ if (!(vma->vm_flags & VM_EXEC)) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) -+ goto bad_area; -+ -+ up_read(&mm->mmap_sem); -+ switch (pax_handle_fetch_fault(regs)) { -+ -+#ifdef CONFIG_PAX_EMUPLT -+ case 2: -+ case 3: -+ return; -+#endif -+ -+ } -+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); -+ do_group_exit(SIGKILL); -+#else - goto bad_area; -+#endif -+ -+ } - } else if (!cause) { - /* Allow reads even for write-only mappings */ - if (!(vma->vm_flags & (VM_READ | VM_WRITE))) -diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h -index 86976d0..8a57797 100644 ---- a/arch/arm/include/asm/atomic.h -+++ b/arch/arm/include/asm/atomic.h -@@ -239,6 +239,14 @@ typedef struct { - u64 __aligned(8) counter; - } atomic64_t; - -+#ifdef CONFIG_PAX_REFCOUNT -+typedef struct { -+ u64 __aligned(8) counter; -+} atomic64_unchecked_t; -+#else -+typedef atomic64_t atomic64_unchecked_t; -+#endif -+ - #define ATOMIC64_INIT(i) { (i) } - - static inline u64 atomic64_read(atomic64_t *v) -diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h -index 0e9ce8d..6ef1e03 100644 ---- a/arch/arm/include/asm/elf.h -+++ b/arch/arm/include/asm/elf.h -@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - --#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) -+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) -+ -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE 0x00008000UL -+ -+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) -+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) -+#endif - - /* When the program starts, a1 contains a pointer to a function to be - registered with atexit, as per the SVR4 ABI. A value of 0 means we -@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); - extern void elf_set_personality(const struct elf32_hdr *); - #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) - --struct mm_struct; --extern unsigned long arch_randomize_brk(struct mm_struct *mm); --#define arch_randomize_brk arch_randomize_brk -- - extern int vectors_user_mapping(void); - #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES -diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h -index e51b1e8..32a3113 100644 ---- a/arch/arm/include/asm/kmap_types.h -+++ b/arch/arm/include/asm/kmap_types.h -@@ -21,6 +21,7 @@ enum km_type { - KM_L1_CACHE, - KM_L2_CACHE, - KM_KDB, -+ KM_CLEARPAGE, - KM_TYPE_NR - }; - -diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h -index b293616..96310e5 100644 ---- a/arch/arm/include/asm/uaccess.h -+++ b/arch/arm/include/asm/uaccess.h -@@ -22,6 +22,8 @@ - #define VERIFY_READ 0 - #define VERIFY_WRITE 1 - -+extern void check_object_size(const void *ptr, unsigned long n, bool to); -+ - /* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is -@@ -387,8 +389,23 @@ do { \ - - - #ifdef CONFIG_MMU --extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); --extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); -+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n); -+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n); -+ -+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) -+{ -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); -+ return ___copy_from_user(to, from, n); -+} -+ -+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) -+{ -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); -+ return ___copy_to_user(to, from, n); -+} -+ - extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); - extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); - extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); -@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n); - - static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - if (access_ok(VERIFY_READ, from, n)) - n = __copy_from_user(to, from, n); - else /* security hole - plug it */ -@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u - - static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); - return n; -diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c -index aeef960..2966009 100644 ---- a/arch/arm/kernel/armksyms.c -+++ b/arch/arm/kernel/armksyms.c -@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user); - #ifdef CONFIG_MMU - EXPORT_SYMBOL(copy_page); - --EXPORT_SYMBOL(__copy_from_user); --EXPORT_SYMBOL(__copy_to_user); -+EXPORT_SYMBOL(___copy_from_user); -+EXPORT_SYMBOL(___copy_to_user); - EXPORT_SYMBOL(__clear_user); - - EXPORT_SYMBOL(__get_user_1); -diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c -index c9d11ea..5078081 100644 ---- a/arch/arm/kernel/process.c -+++ b/arch/arm/kernel/process.c -@@ -28,7 +28,6 @@ - #include <linux/tick.h> - #include <linux/utsname.h> - #include <linux/uaccess.h> --#include <linux/random.h> - #include <linux/hw_breakpoint.h> - #include <linux/cpuidle.h> - -@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p) - return 0; - } - --unsigned long arch_randomize_brk(struct mm_struct *mm) --{ -- unsigned long range_end = mm->brk + 0x02000000; -- return randomize_range(mm->brk, range_end, 0) ? : mm->brk; --} -- - #ifdef CONFIG_MMU - /* - * The vectors page is always readable from user space for the -diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c -index bc9f9da..c75d826 100644 ---- a/arch/arm/kernel/traps.c -+++ b/arch/arm/kernel/traps.c -@@ -257,6 +257,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt - - static DEFINE_SPINLOCK(die_lock); - -+extern void gr_handle_kernel_exploit(void); -+ - /* - * This function is protected against re-entrancy. - */ -@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs *regs, int err) - panic("Fatal exception in interrupt"); - if (panic_on_oops) - panic("Fatal exception"); -+ -+ gr_handle_kernel_exploit(); -+ - if (ret != NOTIFY_STOP) - do_exit(SIGSEGV); - } -diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S -index 66a477a..bee61d3 100644 ---- a/arch/arm/lib/copy_from_user.S -+++ b/arch/arm/lib/copy_from_user.S -@@ -16,7 +16,7 @@ - /* - * Prototype: - * -- * size_t __copy_from_user(void *to, const void *from, size_t n) -+ * size_t ___copy_from_user(void *to, const void *from, size_t n) - * - * Purpose: - * -@@ -84,11 +84,11 @@ - - .text - --ENTRY(__copy_from_user) -+ENTRY(___copy_from_user) - - #include "copy_template.S" - --ENDPROC(__copy_from_user) -+ENDPROC(___copy_from_user) - - .pushsection .fixup,"ax" - .align 0 -diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S -index d066df6..df28194 100644 ---- a/arch/arm/lib/copy_to_user.S -+++ b/arch/arm/lib/copy_to_user.S -@@ -16,7 +16,7 @@ - /* - * Prototype: - * -- * size_t __copy_to_user(void *to, const void *from, size_t n) -+ * size_t ___copy_to_user(void *to, const void *from, size_t n) - * - * Purpose: - * -@@ -88,11 +88,11 @@ - .text - - ENTRY(__copy_to_user_std) --WEAK(__copy_to_user) -+WEAK(___copy_to_user) - - #include "copy_template.S" - --ENDPROC(__copy_to_user) -+ENDPROC(___copy_to_user) - ENDPROC(__copy_to_user_std) - - .pushsection .fixup,"ax" -diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S -index d0ece2a..5ae2f39 100644 ---- a/arch/arm/lib/uaccess.S -+++ b/arch/arm/lib/uaccess.S -@@ -20,7 +20,7 @@ - - #define PAGE_SHIFT 12 - --/* Prototype: int __copy_to_user(void *to, const char *from, size_t n) -+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n) - * Purpose : copy a block to user memory from kernel memory - * Params : to - user memory - * : from - kernel memory -@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault - sub r2, r2, ip - b .Lc2u_dest_aligned - --ENTRY(__copy_to_user) -+ENTRY(___copy_to_user) - stmfd sp!, {r2, r4 - r7, lr} - cmp r2, #4 - blt .Lc2u_not_enough -@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault - ldrgtb r3, [r1], #0 - USER( T(strgtb) r3, [r0], #1) @ May fault - b .Lc2u_finished --ENDPROC(__copy_to_user) -+ENDPROC(___copy_to_user) - - .pushsection .fixup,"ax" - .align 0 - 9001: ldmfd sp!, {r0, r4 - r7, pc} - .popsection - --/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n); -+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n); - * Purpose : copy a block from user memory to kernel memory - * Params : to - kernel memory - * : from - user memory -@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault - sub r2, r2, ip - b .Lcfu_dest_aligned - --ENTRY(__copy_from_user) -+ENTRY(___copy_from_user) - stmfd sp!, {r0, r2, r4 - r7, lr} - cmp r2, #4 - blt .Lcfu_not_enough -@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault - USER( T(ldrgtb) r3, [r1], #1) @ May fault - strgtb r3, [r0], #1 - b .Lcfu_finished --ENDPROC(__copy_from_user) -+ENDPROC(___copy_from_user) - - .pushsection .fixup,"ax" - .align 0 -diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c -index 8b9b136..70d5100 100644 ---- a/arch/arm/lib/uaccess_with_memcpy.c -+++ b/arch/arm/lib/uaccess_with_memcpy.c -@@ -103,7 +103,7 @@ out: - } - - unsigned long --__copy_to_user(void __user *to, const void *from, unsigned long n) -+___copy_to_user(void __user *to, const void *from, unsigned long n) - { - /* - * This test is stubbed out of the main function above to keep -diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c -index 2b2d51c..0127490 100644 ---- a/arch/arm/mach-ux500/mbox-db5500.c -+++ b/arch/arm/mach-ux500/mbox-db5500.c -@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev, - return sprintf(buf, "0x%X\n", mbox_value); - } - --static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo); -+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo); - - static int mbox_show(struct seq_file *s, void *data) - { -diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c -index 3b5ea68..42fc9af 100644 ---- a/arch/arm/mm/fault.c -+++ b/arch/arm/mm/fault.c -@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, - } - #endif - -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (fsr & FSR_LNX_PF) { -+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - tsk->thread.address = addr; - tsk->thread.error_code = fsr; - tsk->thread.trap_no = 14; -@@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) - } - #endif /* CONFIG_MMU */ - -+#ifdef CONFIG_PAX_PAGEEXEC -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 20; i++) { -+ unsigned char c; -+ if (get_user(c, (__force unsigned char __user *)pc+i)) -+ printk(KERN_CONT "?? "); -+ else -+ printk(KERN_CONT "%02x ", c); -+ } -+ printk("\n"); -+ -+ printk(KERN_ERR "PAX: bytes at SP-4: "); -+ for (i = -1; i < 20; i++) { -+ unsigned long c; -+ if (get_user(c, (__force unsigned long __user *)sp+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08lx ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - /* - * First Level Translation Fault Handler - * -diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c -index 74be05f..f605b8c 100644 ---- a/arch/arm/mm/mmap.c -+++ b/arch/arm/mm/mmap.c -@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - if (len > TASK_SIZE) - return -ENOMEM; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - if (do_align) - addr = COLOUR_ALIGN(addr, pgoff); -@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) - return addr; - } - if (len > mm->cached_hole_size) { -- start_addr = addr = mm->free_area_cache; -+ start_addr = addr = mm->free_area_cache; - } else { -- start_addr = addr = TASK_UNMAPPED_BASE; -- mm->cached_hole_size = 0; -+ start_addr = addr = mm->mmap_base; -+ mm->cached_hole_size = 0; - } - /* 8 bits of randomness in 20 address space bits */ - if ((current->flags & PF_RANDOMIZE) && -@@ -100,14 +103,14 @@ full_search: - * Start a new search - just in case we missed - * some holes. - */ -- if (start_addr != TASK_UNMAPPED_BASE) { -- start_addr = addr = TASK_UNMAPPED_BASE; -+ if (start_addr != mm->mmap_base) { -+ start_addr = addr = mm->mmap_base; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } -- if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len)) { - /* - * Remember the place where we stopped the search: - */ -diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h -index 3b3159b..425ea94 100644 ---- a/arch/avr32/include/asm/elf.h -+++ b/arch/avr32/include/asm/elf.h -@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t; - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - --#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) -+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE 0x00001000UL -+ -+#define PAX_DELTA_MMAP_LEN 15 -+#define PAX_DELTA_STACK_LEN 15 -+#endif - - /* This yields a mask that user programs can use to figure out what - instruction set this CPU supports. This could be done in user space, -diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h -index b7f5c68..556135c 100644 ---- a/arch/avr32/include/asm/kmap_types.h -+++ b/arch/avr32/include/asm/kmap_types.h -@@ -22,7 +22,8 @@ D(10) KM_IRQ0, - D(11) KM_IRQ1, - D(12) KM_SOFTIRQ0, - D(13) KM_SOFTIRQ1, --D(14) KM_TYPE_NR -+D(14) KM_CLEARPAGE, -+D(15) KM_TYPE_NR - }; - - #undef D -diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c -index f7040a1..db9f300 100644 ---- a/arch/avr32/mm/fault.c -+++ b/arch/avr32/mm/fault.c -@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) - - int exception_trace = 1; - -+#ifdef CONFIG_PAX_PAGEEXEC -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 20; i++) { -+ unsigned char c; -+ if (get_user(c, (unsigned char *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%02x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - /* - * This routine handles page faults. It determines the address and the - * problem, and then passes it off to one of the appropriate routines. -@@ -156,6 +173,16 @@ bad_area: - up_read(&mm->mmap_sem); - - if (user_mode(regs)) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (mm->pax_flags & MF_PAX_PAGEEXEC) { -+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { -+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); -+ do_group_exit(SIGKILL); -+ } -+ } -+#endif -+ - if (exception_trace && printk_ratelimit()) - printk("%s%s[%d]: segfault at %08lx pc %08lx " - "sp %08lx ecr %lu\n", -diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h -index f8e16b2..c73ff79 100644 ---- a/arch/frv/include/asm/kmap_types.h -+++ b/arch/frv/include/asm/kmap_types.h -@@ -23,6 +23,7 @@ enum km_type { - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, -+ KM_CLEARPAGE, - KM_TYPE_NR - }; - -diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c -index 385fd30..6c3d97e 100644 ---- a/arch/frv/mm/elf-fdpic.c -+++ b/arch/frv/mm/elf-fdpic.c -@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - if (addr) { - addr = PAGE_ALIGN(addr); - vma = find_vma(current->mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) - goto success; - } - -@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - for (; vma; vma = vma->vm_next) { - if (addr > limit) - break; -- if (addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len)) - goto success; - addr = vma->vm_end; - } -@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - for (; vma; vma = vma->vm_next) { - if (addr > limit) - break; -- if (addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len)) - goto success; - addr = vma->vm_end; - } -diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h -index b5298eb..67c6e62 100644 ---- a/arch/ia64/include/asm/elf.h -+++ b/arch/ia64/include/asm/elf.h -@@ -42,6 +42,13 @@ - */ - #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) -+ -+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) -+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) -+#endif -+ - #define PT_IA_64_UNWIND 0x70000001 - - /* IA-64 relocations: */ -diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h -index 1a97af3..7529d31 100644 ---- a/arch/ia64/include/asm/pgtable.h -+++ b/arch/ia64/include/asm/pgtable.h -@@ -12,7 +12,7 @@ - * David Mosberger-Tang davidm@hpl.hp.com - */ - -- -+#include <linux/const.h> - #include <asm/mman.h> - #include <asm/page.h> - #include <asm/processor.h> -@@ -143,6 +143,17 @@ - #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) - #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) - #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) -+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) -+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) -+#else -+# define PAGE_SHARED_NOEXEC PAGE_SHARED -+# define PAGE_READONLY_NOEXEC PAGE_READONLY -+# define PAGE_COPY_NOEXEC PAGE_COPY -+#endif -+ - #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) - #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) - #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) -diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h -index b77768d..e0795eb 100644 ---- a/arch/ia64/include/asm/spinlock.h -+++ b/arch/ia64/include/asm/spinlock.h -@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) - unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; - - asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); -- ACCESS_ONCE(*p) = (tmp + 2) & ~1; -+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1; - } - - static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) -diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h -index 449c8c0..432a3d2 100644 ---- a/arch/ia64/include/asm/uaccess.h -+++ b/arch/ia64/include/asm/uaccess.h -@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) - const void *__cu_from = (from); \ - long __cu_len = (n); \ - \ -- if (__access_ok(__cu_to, __cu_len, get_fs())) \ -+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \ - __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ - __cu_len; \ - }) -@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) - long __cu_len = (n); \ - \ - __chk_user_ptr(__cu_from); \ -- if (__access_ok(__cu_from, __cu_len, get_fs())) \ -+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \ - __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ - __cu_len; \ - }) -diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c -index 24603be..948052d 100644 ---- a/arch/ia64/kernel/module.c -+++ b/arch/ia64/kernel/module.c -@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt) - void - module_free (struct module *mod, void *module_region) - { -- if (mod && mod->arch.init_unw_table && -- module_region == mod->module_init) { -+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) { - unw_remove_unwind_table(mod->arch.init_unw_table); - mod->arch.init_unw_table = NULL; - } -@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, - } - - static inline int -+in_init_rx (const struct module *mod, uint64_t addr) -+{ -+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx; -+} -+ -+static inline int -+in_init_rw (const struct module *mod, uint64_t addr) -+{ -+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw; -+} -+ -+static inline int - in_init (const struct module *mod, uint64_t addr) - { -- return addr - (uint64_t) mod->module_init < mod->init_size; -+ return in_init_rx(mod, addr) || in_init_rw(mod, addr); -+} -+ -+static inline int -+in_core_rx (const struct module *mod, uint64_t addr) -+{ -+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx; -+} -+ -+static inline int -+in_core_rw (const struct module *mod, uint64_t addr) -+{ -+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw; - } - - static inline int - in_core (const struct module *mod, uint64_t addr) - { -- return addr - (uint64_t) mod->module_core < mod->core_size; -+ return in_core_rx(mod, addr) || in_core_rw(mod, addr); - } - - static inline int -@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, - break; - - case RV_BDREL: -- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); -+ if (in_init_rx(mod, val)) -+ val -= (uint64_t) mod->module_init_rx; -+ else if (in_init_rw(mod, val)) -+ val -= (uint64_t) mod->module_init_rw; -+ else if (in_core_rx(mod, val)) -+ val -= (uint64_t) mod->module_core_rx; -+ else if (in_core_rw(mod, val)) -+ val -= (uint64_t) mod->module_core_rw; - break; - - case RV_LTV: -@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind - * addresses have been selected... - */ - uint64_t gp; -- if (mod->core_size > MAX_LTOFF) -+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF) - /* - * This takes advantage of fact that SHF_ARCH_SMALL gets allocated - * at the end of the module. - */ -- gp = mod->core_size - MAX_LTOFF / 2; -+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2; - else -- gp = mod->core_size / 2; -- gp = (uint64_t) mod->module_core + ((gp + 7) & -8); -+ gp = (mod->core_size_rx + mod->core_size_rw) / 2; -+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8); - mod->arch.gp = gp; - DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); - } -diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c -index 609d500..7dde2a8 100644 ---- a/arch/ia64/kernel/sys_ia64.c -+++ b/arch/ia64/kernel/sys_ia64.c -@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len - if (REGION_NUMBER(addr) == RGN_HPAGE) - addr = 0; - #endif -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ addr = mm->free_area_cache; -+ else -+#endif -+ - if (!addr) - addr = mm->free_area_cache; - -@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { -- if (start_addr != TASK_UNMAPPED_BASE) { -+ if (start_addr != mm->mmap_base) { - /* Start a new search --- just in case we missed some holes. */ -- addr = TASK_UNMAPPED_BASE; -+ addr = mm->mmap_base; - goto full_search; - } - return -ENOMEM; - } -- if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len)) { - /* Remember the address where we stopped this search: */ - mm->free_area_cache = addr + len; - return addr; -diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S -index 53c0ba0..2accdde 100644 ---- a/arch/ia64/kernel/vmlinux.lds.S -+++ b/arch/ia64/kernel/vmlinux.lds.S -@@ -199,7 +199,7 @@ SECTIONS { - /* Per-cpu data: */ - . = ALIGN(PERCPU_PAGE_SIZE); - PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) -- __phys_per_cpu_start = __per_cpu_load; -+ __phys_per_cpu_start = per_cpu_load; - /* - * ensure percpu data fits - * into percpu page size -diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c -index 20b3593..1ce77f0 100644 ---- a/arch/ia64/mm/fault.c -+++ b/arch/ia64/mm/fault.c -@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address) - return pte_present(pte); - } - -+#ifdef CONFIG_PAX_PAGEEXEC -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 8; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - void __kprobes - ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) - { -@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re - mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) - | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); - -- if ((vma->vm_flags & mask) != mask) -+ if ((vma->vm_flags & mask) != mask) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { -+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) -+ goto bad_area; -+ -+ up_read(&mm->mmap_sem); -+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - goto bad_area; - -+ } -+ - /* - * If for any reason at all we couldn't handle the fault, make - * sure we exit gracefully rather than endlessly redo the -diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c -index 5ca674b..e0e1b70 100644 ---- a/arch/ia64/mm/hugetlbpage.c -+++ b/arch/ia64/mm/hugetlbpage.c -@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) - return -ENOMEM; -- if (!vmm || (addr + len) <= vmm->vm_start) -+ if (check_heap_stack_gap(vmm, addr, len)) - return addr; - addr = ALIGN(vmm->vm_end, HPAGE_SIZE); - } -diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c -index 00cb0e2..2ad8024 100644 ---- a/arch/ia64/mm/init.c -+++ b/arch/ia64/mm/init.c -@@ -120,6 +120,19 @@ ia64_init_addr_space (void) - vma->vm_start = current->thread.rbs_bot & PAGE_MASK; - vma->vm_end = vma->vm_start + PAGE_SIZE; - vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) { -+ vma->vm_flags &= ~VM_EXEC; -+ -+#ifdef CONFIG_PAX_MPROTECT -+ if (current->mm->pax_flags & MF_PAX_MPROTECT) -+ vma->vm_flags &= ~VM_MAYEXEC; -+#endif -+ -+ } -+#endif -+ - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - down_write(¤t->mm->mmap_sem); - if (insert_vm_struct(current->mm, vma)) { -diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c -index 82abd15..d95ae5d 100644 ---- a/arch/m32r/lib/usercopy.c -+++ b/arch/m32r/lib/usercopy.c -@@ -14,6 +14,9 @@ - unsigned long - __generic_copy_to_user(void __user *to, const void *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - prefetch(from); - if (access_ok(VERIFY_WRITE, to, n)) - __copy_user(to,from,n); -@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n) - unsigned long - __generic_copy_from_user(void *to, const void __user *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - prefetchw(to); - if (access_ok(VERIFY_READ, from, n)) - __copy_user_zeroing(to,from,n); -diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h -index 455c0ac..ad65fbe 100644 ---- a/arch/mips/include/asm/elf.h -+++ b/arch/mips/include/asm/elf.h -@@ -372,13 +372,16 @@ extern const char *__elf_platform; - #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) - #endif - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) -+ -+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#endif -+ - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 - struct linux_binprm; - extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int uses_interp); - --struct mm_struct; --extern unsigned long arch_randomize_brk(struct mm_struct *mm); --#define arch_randomize_brk arch_randomize_brk -- - #endif /* _ASM_ELF_H */ -diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h -index e59cd1a..8e329d6 100644 ---- a/arch/mips/include/asm/page.h -+++ b/arch/mips/include/asm/page.h -@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from, - #ifdef CONFIG_CPU_MIPS32 - typedef struct { unsigned long pte_low, pte_high; } pte_t; - #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) -- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) -+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) - #else - typedef struct { unsigned long long pte; } pte_t; - #define pte_val(x) ((x).pte) -diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h -index 6018c80..7c37203 100644 ---- a/arch/mips/include/asm/system.h -+++ b/arch/mips/include/asm/system.h -@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void); - */ - #define __ARCH_WANT_UNLOCKED_CTXSW - --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) ((x) & ~0xfUL) - - #endif /* _ASM_SYSTEM_H */ -diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c -index 9fdd8bc..4bd7f1a 100644 ---- a/arch/mips/kernel/binfmt_elfn32.c -+++ b/arch/mips/kernel/binfmt_elfn32.c -@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; - #undef ELF_ET_DYN_BASE - #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) -+ -+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#endif -+ - #include <asm/processor.h> - #include <linux/module.h> - #include <linux/elfcore.h> -diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c -index ff44823..97f8906 100644 ---- a/arch/mips/kernel/binfmt_elfo32.c -+++ b/arch/mips/kernel/binfmt_elfo32.c -@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; - #undef ELF_ET_DYN_BASE - #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) -+ -+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#endif -+ - #include <asm/processor.h> - - /* -diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c -index b30cb25..454c0a9 100644 ---- a/arch/mips/kernel/process.c -+++ b/arch/mips/kernel/process.c -@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task) - out: - return pc; - } -- --/* -- * Don't forget that the stack pointer must be aligned on a 8 bytes -- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. -- */ --unsigned long arch_align_stack(unsigned long sp) --{ -- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) -- sp -= get_random_int() & ~PAGE_MASK; -- -- return sp & ALMASK; --} -diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c -index 937cf33..adb39bb 100644 ---- a/arch/mips/mm/fault.c -+++ b/arch/mips/mm/fault.c -@@ -28,6 +28,23 @@ - #include <asm/highmem.h> /* For VMALLOC_END */ - #include <linux/kdebug.h> - -+#ifdef CONFIG_PAX_PAGEEXEC -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 5; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - /* - * This routine handles page faults. It determines the address, - * and the problem, and then passes it off to one of the appropriate -diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c -index 302d779..7d35bf8 100644 ---- a/arch/mips/mm/mmap.c -+++ b/arch/mips/mm/mmap.c -@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, - do_color_align = 1; - - /* requesting a specific address */ -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); -@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len)) - return addr; - } - -@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) - return -ENOMEM; -- if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vmm, addr, len)) - return addr; - addr = vma->vm_end; - if (do_color_align) -@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr - len); -- if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vmm, addr - len, len)) - /* cache the address as a hint for next time */ - return mm->free_area_cache = addr - len; - } -@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, - * return with success: - */ - vma = find_vma(mm, addr); -- if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (check_heap_stack_gap(vmm, addr, len)) { - /* cache the address as a hint for next time */ - return mm->free_area_cache = addr; - } -@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - mm->unmap_area = arch_unmap_area_topdown; - } - } -- --static inline unsigned long brk_rnd(void) --{ -- unsigned long rnd = get_random_int(); -- -- rnd = rnd << PAGE_SHIFT; -- /* 8MB for 32bit, 256MB for 64bit */ -- if (TASK_IS_32BIT_ADDR) -- rnd = rnd & 0x7ffffful; -- else -- rnd = rnd & 0xffffffful; -- -- return rnd; --} -- --unsigned long arch_randomize_brk(struct mm_struct *mm) --{ -- unsigned long base = mm->brk; -- unsigned long ret; -- -- ret = PAGE_ALIGN(base + brk_rnd()); -- -- if (ret < mm->brk) -- return mm->brk; -- -- return ret; --} -diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h -index 19f6cb1..6c78cf2 100644 ---- a/arch/parisc/include/asm/elf.h -+++ b/arch/parisc/include/asm/elf.h -@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */ - - #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE 0x10000UL -+ -+#define PAX_DELTA_MMAP_LEN 16 -+#define PAX_DELTA_STACK_LEN 16 -+#endif -+ - /* This yields a mask that user programs can use to figure out what - instruction set this CPU supports. This could be done in user space, - but it's not easy, and we've already done it here. */ -diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h -index 22dadeb..f6c2be4 100644 ---- a/arch/parisc/include/asm/pgtable.h -+++ b/arch/parisc/include/asm/pgtable.h -@@ -210,6 +210,17 @@ struct vm_area_struct; - #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) - #define PAGE_COPY PAGE_EXECREAD - #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) -+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) -+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) -+#else -+# define PAGE_SHARED_NOEXEC PAGE_SHARED -+# define PAGE_COPY_NOEXEC PAGE_COPY -+# define PAGE_READONLY_NOEXEC PAGE_READONLY -+#endif -+ - #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) - #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) - #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) -diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c -index 5e34ccf..672bc9c 100644 ---- a/arch/parisc/kernel/module.c -+++ b/arch/parisc/kernel/module.c -@@ -98,16 +98,38 @@ - - /* three functions to determine where in the module core - * or init pieces the location is */ -+static inline int in_init_rx(struct module *me, void *loc) -+{ -+ return (loc >= me->module_init_rx && -+ loc < (me->module_init_rx + me->init_size_rx)); -+} -+ -+static inline int in_init_rw(struct module *me, void *loc) -+{ -+ return (loc >= me->module_init_rw && -+ loc < (me->module_init_rw + me->init_size_rw)); -+} -+ - static inline int in_init(struct module *me, void *loc) - { -- return (loc >= me->module_init && -- loc <= (me->module_init + me->init_size)); -+ return in_init_rx(me, loc) || in_init_rw(me, loc); -+} -+ -+static inline int in_core_rx(struct module *me, void *loc) -+{ -+ return (loc >= me->module_core_rx && -+ loc < (me->module_core_rx + me->core_size_rx)); -+} -+ -+static inline int in_core_rw(struct module *me, void *loc) -+{ -+ return (loc >= me->module_core_rw && -+ loc < (me->module_core_rw + me->core_size_rw)); - } - - static inline int in_core(struct module *me, void *loc) - { -- return (loc >= me->module_core && -- loc <= (me->module_core + me->core_size)); -+ return in_core_rx(me, loc) || in_core_rw(me, loc); - } - - static inline int in_local(struct module *me, void *loc) -@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, - } - - /* align things a bit */ -- me->core_size = ALIGN(me->core_size, 16); -- me->arch.got_offset = me->core_size; -- me->core_size += gots * sizeof(struct got_entry); -+ me->core_size_rw = ALIGN(me->core_size_rw, 16); -+ me->arch.got_offset = me->core_size_rw; -+ me->core_size_rw += gots * sizeof(struct got_entry); - -- me->core_size = ALIGN(me->core_size, 16); -- me->arch.fdesc_offset = me->core_size; -- me->core_size += fdescs * sizeof(Elf_Fdesc); -+ me->core_size_rw = ALIGN(me->core_size_rw, 16); -+ me->arch.fdesc_offset = me->core_size_rw; -+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc); - - me->arch.got_max = gots; - me->arch.fdesc_max = fdescs; -@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) - - BUG_ON(value == 0); - -- got = me->module_core + me->arch.got_offset; -+ got = me->module_core_rw + me->arch.got_offset; - for (i = 0; got[i].addr; i++) - if (got[i].addr == value) - goto out; -@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) - #ifdef CONFIG_64BIT - static Elf_Addr get_fdesc(struct module *me, unsigned long value) - { -- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; -+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset; - - if (!value) { - printk(KERN_ERR "%s: zero OPD requested!\n", me->name); -@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) - - /* Create new one */ - fdesc->addr = value; -- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; -+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; - return (Elf_Addr)fdesc; - } - #endif /* CONFIG_64BIT */ -@@ -845,7 +867,7 @@ register_unwind_table(struct module *me, - - table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; - end = table + sechdrs[me->arch.unwind_section].sh_size; -- gp = (Elf_Addr)me->module_core + me->arch.got_offset; -+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; - - DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", - me->arch.unwind_section, table, end, gp); -diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c -index c9b9322..02d8940 100644 ---- a/arch/parisc/kernel/sys_parisc.c -+++ b/arch/parisc/kernel/sys_parisc.c -@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len) - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) - return -ENOMEM; -- if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len)) - return addr; - addr = vma->vm_end; - } -@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping, - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) - return -ENOMEM; -- if (!vma || addr + len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len)) - return addr; - addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; - if (addr < vma->vm_end) /* handle wraparound */ -@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - if (flags & MAP_FIXED) - return addr; - if (!addr) -- addr = TASK_UNMAPPED_BASE; -+ addr = current->mm->mmap_base; - - if (filp) { - addr = get_shared_area(filp->f_mapping, addr, len, pgoff); -diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c -index f19e660..414fe24 100644 ---- a/arch/parisc/kernel/traps.c -+++ b/arch/parisc/kernel/traps.c -@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) - - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm,regs->iaoq[0]); -- if (vma && (regs->iaoq[0] >= vma->vm_start) -- && (vma->vm_flags & VM_EXEC)) { -- -+ if (vma && (regs->iaoq[0] >= vma->vm_start)) { - fault_address = regs->iaoq[0]; - fault_space = regs->iasq[0]; - -diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c -index 18162ce..94de376 100644 ---- a/arch/parisc/mm/fault.c -+++ b/arch/parisc/mm/fault.c -@@ -15,6 +15,7 @@ - #include <linux/sched.h> - #include <linux/interrupt.h> - #include <linux/module.h> -+#include <linux/unistd.h> - - #include <asm/uaccess.h> - #include <asm/traps.h> -@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data); - static unsigned long - parisc_acctyp(unsigned long code, unsigned int inst) - { -- if (code == 6 || code == 16) -+ if (code == 6 || code == 7 || code == 16) - return VM_EXEC; - - switch (inst & 0xf0000000) { -@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst) - } - #endif - -+#ifdef CONFIG_PAX_PAGEEXEC -+/* -+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) -+ * -+ * returns 1 when task should be killed -+ * 2 when rt_sigreturn trampoline was detected -+ * 3 when unpatched PLT trampoline was detected -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ -+#ifdef CONFIG_PAX_EMUPLT -+ int err; -+ -+ do { /* PaX: unpatched PLT emulation */ -+ unsigned int bl, depwi; -+ -+ err = get_user(bl, (unsigned int *)instruction_pointer(regs)); -+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4)); -+ -+ if (err) -+ break; -+ -+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { -+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; -+ -+ err = get_user(ldw, (unsigned int *)addr); -+ err |= get_user(bv, (unsigned int *)(addr+4)); -+ err |= get_user(ldw2, (unsigned int *)(addr+8)); -+ -+ if (err) -+ break; -+ -+ if (ldw == 0x0E801096U && -+ bv == 0xEAC0C000U && -+ ldw2 == 0x0E881095U) -+ { -+ unsigned int resolver, map; -+ -+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8)); -+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12)); -+ if (err) -+ break; -+ -+ regs->gr[20] = instruction_pointer(regs)+8; -+ regs->gr[21] = map; -+ regs->gr[22] = resolver; -+ regs->iaoq[0] = resolver | 3UL; -+ regs->iaoq[1] = regs->iaoq[0] + 4; -+ return 3; -+ } -+ } -+ } while (0); -+#endif -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ -+#ifndef CONFIG_PAX_EMUSIGRT -+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) -+ return 1; -+#endif -+ -+ do { /* PaX: rt_sigreturn emulation */ -+ unsigned int ldi1, ldi2, bel, nop; -+ -+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); -+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); -+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); -+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); -+ -+ if (err) -+ break; -+ -+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && -+ ldi2 == 0x3414015AU && -+ bel == 0xE4008200U && -+ nop == 0x08000240U) -+ { -+ regs->gr[25] = (ldi1 & 2) >> 1; -+ regs->gr[20] = __NR_rt_sigreturn; -+ regs->gr[31] = regs->iaoq[1] + 16; -+ regs->sr[0] = regs->iasq[1]; -+ regs->iaoq[0] = 0x100UL; -+ regs->iaoq[1] = regs->iaoq[0] + 4; -+ regs->iasq[0] = regs->sr[2]; -+ regs->iasq[1] = regs->sr[2]; -+ return 2; -+ } -+ } while (0); -+#endif -+ -+ return 1; -+} -+ -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 5; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - int fixup_exception(struct pt_regs *regs) - { - const struct exception_table_entry *fix; -@@ -192,8 +303,33 @@ good_area: - - acc_type = parisc_acctyp(code,regs->iir); - -- if ((vma->vm_flags & acc_type) != acc_type) -+ if ((vma->vm_flags & acc_type) != acc_type) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && -+ (address & ~3UL) == instruction_pointer(regs)) -+ { -+ up_read(&mm->mmap_sem); -+ switch (pax_handle_fetch_fault(regs)) { -+ -+#ifdef CONFIG_PAX_EMUPLT -+ case 3: -+ return; -+#endif -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ case 2: -+ return; -+#endif -+ -+ } -+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - goto bad_area; -+ } - - /* - * If for any reason at all we couldn't handle the fault, make -diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h -index 3bf9cca..e7457d0 100644 ---- a/arch/powerpc/include/asm/elf.h -+++ b/arch/powerpc/include/asm/elf.h -@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG]; - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - --extern unsigned long randomize_et_dyn(unsigned long base); --#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) -+#define ELF_ET_DYN_BASE (0x20000000) -+ -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (0x10000000UL) -+ -+#ifdef __powerpc64__ -+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28) -+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28) -+#else -+#define PAX_DELTA_MMAP_LEN 15 -+#define PAX_DELTA_STACK_LEN 15 -+#endif -+#endif - - /* - * Our registers are always unsigned longs, whether we're a 32 bit -@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, - (0x7ff >> (PAGE_SHIFT - 12)) : \ - (0x3ffff >> (PAGE_SHIFT - 12))) - --extern unsigned long arch_randomize_brk(struct mm_struct *mm); --#define arch_randomize_brk arch_randomize_brk -- - #endif /* __KERNEL__ */ - - /* -diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h -index bca8fdc..61e9580 100644 ---- a/arch/powerpc/include/asm/kmap_types.h -+++ b/arch/powerpc/include/asm/kmap_types.h -@@ -27,6 +27,7 @@ enum km_type { - KM_PPC_SYNC_PAGE, - KM_PPC_SYNC_ICACHE, - KM_KDB, -+ KM_CLEARPAGE, - KM_TYPE_NR - }; - -diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h -index d4a7f64..451de1c 100644 ---- a/arch/powerpc/include/asm/mman.h -+++ b/arch/powerpc/include/asm/mman.h -@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) - } - #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot) - --static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) -+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags) - { - return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); - } -diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h -index 2cd664e..1d2e8a7 100644 ---- a/arch/powerpc/include/asm/page.h -+++ b/arch/powerpc/include/asm/page.h -@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr; - * and needs to be executable. This means the whole heap ends - * up being executable. - */ --#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ -- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -+#define VM_DATA_DEFAULT_FLAGS32 \ -+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ -+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - - #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr; - #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) - #endif - -+#define ktla_ktva(addr) (addr) -+#define ktva_ktla(addr) (addr) -+ - #ifndef __ASSEMBLY__ - - #undef STRICT_MM_TYPECHECKS -diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h -index 9356262..ea96148 100644 ---- a/arch/powerpc/include/asm/page_64.h -+++ b/arch/powerpc/include/asm/page_64.h -@@ -155,15 +155,18 @@ do { \ - * stack by default, so in the absence of a PT_GNU_STACK program header - * we turn execute permission off. - */ --#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ -- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -+#define VM_STACK_DEFAULT_FLAGS32 \ -+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ -+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - - #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -+#ifndef CONFIG_PAX_PAGEEXEC - #define VM_STACK_DEFAULT_FLAGS \ - (is_32bit_task() ? \ - VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) -+#endif - - #include <asm-generic/getorder.h> - -diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h -index 88b0bd9..e32bc67 100644 ---- a/arch/powerpc/include/asm/pgtable.h -+++ b/arch/powerpc/include/asm/pgtable.h -@@ -2,6 +2,7 @@ - #define _ASM_POWERPC_PGTABLE_H - #ifdef __KERNEL__ - -+#include <linux/const.h> - #ifndef __ASSEMBLY__ - #include <asm/processor.h> /* For TASK_SIZE */ - #include <asm/mmu.h> -diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h -index 4aad413..85d86bf 100644 ---- a/arch/powerpc/include/asm/pte-hash32.h -+++ b/arch/powerpc/include/asm/pte-hash32.h -@@ -21,6 +21,7 @@ - #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ - #define _PAGE_USER 0x004 /* usermode access allowed */ - #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ -+#define _PAGE_EXEC _PAGE_GUARDED - #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ - #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ - #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ -diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h -index 559da19..7e5835c 100644 ---- a/arch/powerpc/include/asm/reg.h -+++ b/arch/powerpc/include/asm/reg.h -@@ -212,6 +212,7 @@ - #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ - #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ - #define DSISR_NOHPTE 0x40000000 /* no translation found */ -+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */ - #define DSISR_PROTFAULT 0x08000000 /* protection fault */ - #define DSISR_ISSTORE 0x02000000 /* access was a store */ - #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ -diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h -index e30a13d..2b7d994 100644 ---- a/arch/powerpc/include/asm/system.h -+++ b/arch/powerpc/include/asm/system.h -@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, - #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - #endif - --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) ((x) & ~0xfUL) - - /* Used in very early kernel initialization. */ - extern unsigned long reloc_offset(void); -diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h -index bd0fb84..a42a14b 100644 ---- a/arch/powerpc/include/asm/uaccess.h -+++ b/arch/powerpc/include/asm/uaccess.h -@@ -13,6 +13,8 @@ - #define VERIFY_READ 0 - #define VERIFY_WRITE 1 - -+extern void check_object_size(const void *ptr, unsigned long n, bool to); -+ - /* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with -@@ -327,52 +329,6 @@ do { \ - extern unsigned long __copy_tofrom_user(void __user *to, - const void __user *from, unsigned long size); - --#ifndef __powerpc64__ -- --static inline unsigned long copy_from_user(void *to, -- const void __user *from, unsigned long n) --{ -- unsigned long over; -- -- if (access_ok(VERIFY_READ, from, n)) -- return __copy_tofrom_user((__force void __user *)to, from, n); -- if ((unsigned long)from < TASK_SIZE) { -- over = (unsigned long)from + n - TASK_SIZE; -- return __copy_tofrom_user((__force void __user *)to, from, -- n - over) + over; -- } -- return n; --} -- --static inline unsigned long copy_to_user(void __user *to, -- const void *from, unsigned long n) --{ -- unsigned long over; -- -- if (access_ok(VERIFY_WRITE, to, n)) -- return __copy_tofrom_user(to, (__force void __user *)from, n); -- if ((unsigned long)to < TASK_SIZE) { -- over = (unsigned long)to + n - TASK_SIZE; -- return __copy_tofrom_user(to, (__force void __user *)from, -- n - over) + over; -- } -- return n; --} -- --#else /* __powerpc64__ */ -- --#define __copy_in_user(to, from, size) \ -- __copy_tofrom_user((to), (from), (size)) -- --extern unsigned long copy_from_user(void *to, const void __user *from, -- unsigned long n); --extern unsigned long copy_to_user(void __user *to, const void *from, -- unsigned long n); --extern unsigned long copy_in_user(void __user *to, const void __user *from, -- unsigned long n); -- --#endif /* __powerpc64__ */ -- - static inline unsigned long __copy_from_user_inatomic(void *to, - const void __user *from, unsigned long n) - { -@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, - if (ret == 0) - return 0; - } -+ -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); -+ - return __copy_tofrom_user((__force void __user *)to, from, n); - } - -@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, - if (ret == 0) - return 0; - } -+ -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); -+ - return __copy_tofrom_user(to, (__force const void __user *)from, n); - } - -@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to, - return __copy_to_user_inatomic(to, from, size); - } - -+#ifndef __powerpc64__ -+ -+static inline unsigned long __must_check copy_from_user(void *to, -+ const void __user *from, unsigned long n) -+{ -+ unsigned long over; -+ -+ if ((long)n < 0) -+ return n; -+ -+ if (access_ok(VERIFY_READ, from, n)) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); -+ return __copy_tofrom_user((__force void __user *)to, from, n); -+ } -+ if ((unsigned long)from < TASK_SIZE) { -+ over = (unsigned long)from + n - TASK_SIZE; -+ if (!__builtin_constant_p(n - over)) -+ check_object_size(to, n - over, false); -+ return __copy_tofrom_user((__force void __user *)to, from, -+ n - over) + over; -+ } -+ return n; -+} -+ -+static inline unsigned long __must_check copy_to_user(void __user *to, -+ const void *from, unsigned long n) -+{ -+ unsigned long over; -+ -+ if ((long)n < 0) -+ return n; -+ -+ if (access_ok(VERIFY_WRITE, to, n)) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); -+ return __copy_tofrom_user(to, (__force void __user *)from, n); -+ } -+ if ((unsigned long)to < TASK_SIZE) { -+ over = (unsigned long)to + n - TASK_SIZE; -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n - over, true); -+ return __copy_tofrom_user(to, (__force void __user *)from, -+ n - over) + over; -+ } -+ return n; -+} -+ -+#else /* __powerpc64__ */ -+ -+#define __copy_in_user(to, from, size) \ -+ __copy_tofrom_user((to), (from), (size)) -+ -+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) -+{ -+ if ((long)n < 0 || n > INT_MAX) -+ return n; -+ -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); -+ -+ if (likely(access_ok(VERIFY_READ, from, n))) -+ n = __copy_from_user(to, from, n); -+ else -+ memset(to, 0, n); -+ return n; -+} -+ -+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) -+{ -+ if ((long)n < 0 || n > INT_MAX) -+ return n; -+ -+ if (likely(access_ok(VERIFY_WRITE, to, n))) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); -+ n = __copy_to_user(to, from, n); -+ } -+ return n; -+} -+ -+extern unsigned long copy_in_user(void __user *to, const void __user *from, -+ unsigned long n); -+ -+#endif /* __powerpc64__ */ -+ - extern unsigned long __clear_user(void __user *addr, unsigned long size); - - static inline unsigned long clear_user(void __user *addr, unsigned long size) -diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S -index 429983c..7af363b 100644 ---- a/arch/powerpc/kernel/exceptions-64e.S -+++ b/arch/powerpc/kernel/exceptions-64e.S -@@ -587,6 +587,7 @@ storage_fault_common: - std r14,_DAR(r1) - std r15,_DSISR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD -+ bl .save_nvgprs - mr r4,r14 - mr r5,r15 - ld r14,PACA_EXGEN+EX_R14(r13) -@@ -596,8 +597,7 @@ storage_fault_common: - cmpdi r3,0 - bne- 1f - b .ret_from_except_lite --1: bl .save_nvgprs -- mr r5,r3 -+1: mr r5,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - ld r4,_DAR(r1) - bl .bad_page_fault -diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S -index 41b02c7..05e76fb 100644 ---- a/arch/powerpc/kernel/exceptions-64s.S -+++ b/arch/powerpc/kernel/exceptions-64s.S -@@ -1014,10 +1014,10 @@ handle_page_fault: - 11: ld r4,_DAR(r1) - ld r5,_DSISR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD -+ bl .save_nvgprs - bl .do_page_fault - cmpdi r3,0 - beq+ 13f -- bl .save_nvgprs - mr r5,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - lwz r4,_DAR(r1) -diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c -index 0b6d796..d760ddb 100644 ---- a/arch/powerpc/kernel/module_32.c -+++ b/arch/powerpc/kernel/module_32.c -@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, - me->arch.core_plt_section = i; - } - if (!me->arch.core_plt_section || !me->arch.init_plt_section) { -- printk("Module doesn't contain .plt or .init.plt sections.\n"); -+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name); - return -ENOEXEC; - } - -@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location, - - DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); - /* Init, or core PLT? */ -- if (location >= mod->module_core -- && location < mod->module_core + mod->core_size) -+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) || -+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw)) - entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; -- else -+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) || -+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw)) - entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; -+ else { -+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name); -+ return ~0UL; -+ } - - /* Find this entry, or if that fails, the next avail. entry */ - while (entry->jump[0]) { -diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c -index 8f53954..a704ad6 100644 ---- a/arch/powerpc/kernel/process.c -+++ b/arch/powerpc/kernel/process.c -@@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs) - * Lookup NIP late so we have the best change of getting the - * above info out without failing - */ -- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); -- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); -+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip); -+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link); - #endif - show_stack(current, (unsigned long *) regs->gpr[1]); - if (!user_mode(regs)) -@@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) - newsp = stack[0]; - ip = stack[STACK_FRAME_LR_SAVE]; - if (!firstframe || ip != lr) { -- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); -+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip); - #ifdef CONFIG_FUNCTION_GRAPH_TRACER - if ((ip == rth || ip == mrth) && curr_frame >= 0) { -- printk(" (%pS)", -+ printk(" (%pA)", - (void *)current->ret_stack[curr_frame].ret); - curr_frame--; - } -@@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) - struct pt_regs *regs = (struct pt_regs *) - (sp + STACK_FRAME_OVERHEAD); - lr = regs->link; -- printk("--- Exception: %lx at %pS\n LR = %pS\n", -+ printk("--- Exception: %lx at %pA\n LR = %pA\n", - regs->trap, (void *)regs->nip, (void *)lr); - firstframe = 1; - } -@@ -1285,58 +1285,3 @@ void thread_info_cache_init(void) - } - - #endif /* THREAD_SHIFT < PAGE_SHIFT */ -- --unsigned long arch_align_stack(unsigned long sp) --{ -- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) -- sp -= get_random_int() & ~PAGE_MASK; -- return sp & ~0xf; --} -- --static inline unsigned long brk_rnd(void) --{ -- unsigned long rnd = 0; -- -- /* 8MB for 32bit, 1GB for 64bit */ -- if (is_32bit_task()) -- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); -- else -- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); -- -- return rnd << PAGE_SHIFT; --} -- --unsigned long arch_randomize_brk(struct mm_struct *mm) --{ -- unsigned long base = mm->brk; -- unsigned long ret; -- --#ifdef CONFIG_PPC_STD_MMU_64 -- /* -- * If we are using 1TB segments and we are allowed to randomise -- * the heap, we can put it above 1TB so it is backed by a 1TB -- * segment. Otherwise the heap will be in the bottom 1TB -- * which always uses 256MB segments and this may result in a -- * performance penalty. -- */ -- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) -- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); --#endif -- -- ret = PAGE_ALIGN(base + brk_rnd()); -- -- if (ret < mm->brk) -- return mm->brk; -- -- return ret; --} -- --unsigned long randomize_et_dyn(unsigned long base) --{ -- unsigned long ret = PAGE_ALIGN(base + brk_rnd()); -- -- if (ret < base) -- return base; -- -- return ret; --} -diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c -index 78b76dc..7f232ef 100644 ---- a/arch/powerpc/kernel/signal_32.c -+++ b/arch/powerpc/kernel/signal_32.c -@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, - /* Save user registers on the stack */ - frame = &rt_sf->uc.uc_mcontext; - addr = frame; -- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { -+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { - if (save_user_regs(regs, frame, 0, 1)) - goto badframe; - regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; -diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c -index e91c736..742ec06 100644 ---- a/arch/powerpc/kernel/signal_64.c -+++ b/arch/powerpc/kernel/signal_64.c -@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, - current->thread.fpscr.val = 0; - - /* Set up to return from userspace. */ -- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { -+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { - regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; - } else { - err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); -diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c -index f19d977..8ac286e 100644 ---- a/arch/powerpc/kernel/traps.c -+++ b/arch/powerpc/kernel/traps.c -@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void) - static inline void pmac_backlight_unblank(void) { } - #endif - -+extern void gr_handle_kernel_exploit(void); -+ - int die(const char *str, struct pt_regs *regs, long err) - { - static struct { -@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err) - if (panic_on_oops) - panic("Fatal exception"); - -+ gr_handle_kernel_exploit(); -+ - oops_exit(); - do_exit(err); - -diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c -index 142ab10..236e61a 100644 ---- a/arch/powerpc/kernel/vdso.c -+++ b/arch/powerpc/kernel/vdso.c -@@ -36,6 +36,7 @@ - #include <asm/firmware.h> - #include <asm/vdso.h> - #include <asm/vdso_datapage.h> -+#include <asm/mman.h> - - #include "setup.h" - -@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) - vdso_base = VDSO32_MBASE; - #endif - -- current->mm->context.vdso_base = 0; -+ current->mm->context.vdso_base = ~0UL; - - /* vDSO has a problem and was disabled, just don't "enable" it for the - * process -@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) - vdso_base = get_unmapped_area(NULL, vdso_base, - (vdso_pages << PAGE_SHIFT) + - ((VDSO_ALIGNMENT - 1) & PAGE_MASK), -- 0, 0); -+ 0, MAP_PRIVATE | MAP_EXECUTABLE); - if (IS_ERR_VALUE(vdso_base)) { - rc = vdso_base; - goto fail_mmapsem; -diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c -index 5eea6f3..5d10396 100644 ---- a/arch/powerpc/lib/usercopy_64.c -+++ b/arch/powerpc/lib/usercopy_64.c -@@ -9,22 +9,6 @@ - #include <linux/module.h> - #include <asm/uaccess.h> - --unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) --{ -- if (likely(access_ok(VERIFY_READ, from, n))) -- n = __copy_from_user(to, from, n); -- else -- memset(to, 0, n); -- return n; --} -- --unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) --{ -- if (likely(access_ok(VERIFY_WRITE, to, n))) -- n = __copy_to_user(to, from, n); -- return n; --} -- - unsigned long copy_in_user(void __user *to, const void __user *from, - unsigned long n) - { -@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, - return n; - } - --EXPORT_SYMBOL(copy_from_user); --EXPORT_SYMBOL(copy_to_user); - EXPORT_SYMBOL(copy_in_user); - -diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c -index 5efe8c9..db9ceef 100644 ---- a/arch/powerpc/mm/fault.c -+++ b/arch/powerpc/mm/fault.c -@@ -32,6 +32,10 @@ - #include <linux/perf_event.h> - #include <linux/magic.h> - #include <linux/ratelimit.h> -+#include <linux/slab.h> -+#include <linux/pagemap.h> -+#include <linux/compiler.h> -+#include <linux/unistd.h> - - #include <asm/firmware.h> - #include <asm/page.h> -@@ -43,6 +47,7 @@ - #include <asm/tlbflush.h> - #include <asm/siginfo.h> - #include <mm/mmu_decl.h> -+#include <asm/ptrace.h> - - #ifdef CONFIG_KPROBES - static inline int notify_page_fault(struct pt_regs *regs) -@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs) - } - #endif - -+#ifdef CONFIG_PAX_PAGEEXEC -+/* -+ * PaX: decide what to do with offenders (regs->nip = fault address) -+ * -+ * returns 1 when task should be killed -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ return 1; -+} -+ -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 5; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int __user *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - /* - * Check whether the instruction at regs->nip is a store using - * an update addressing form which will update r1. -@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, - * indicate errors in DSISR but can validly be set in SRR1. - */ - if (trap == 0x400) -- error_code &= 0x48200000; -+ error_code &= 0x58200000; - else - is_write = error_code & DSISR_ISSTORE; - #else -@@ -259,7 +291,7 @@ good_area: - * "undefined". Of those that can be set, this is the only - * one which seems bad. - */ -- if (error_code & 0x10000000) -+ if (error_code & DSISR_GUARDED) - /* Guarded storage error. */ - goto bad_area; - #endif /* CONFIG_8xx */ -@@ -274,7 +306,7 @@ good_area: - * processors use the same I/D cache coherency mechanism - * as embedded. - */ -- if (error_code & DSISR_PROTFAULT) -+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED)) - goto bad_area; - #endif /* CONFIG_PPC_STD_MMU */ - -@@ -343,6 +375,23 @@ bad_area: - bad_area_nosemaphore: - /* User mode accesses cause a SIGSEGV */ - if (user_mode(regs)) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (mm->pax_flags & MF_PAX_PAGEEXEC) { -+#ifdef CONFIG_PPC_STD_MMU -+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) { -+#else -+ if (is_exec && regs->nip == address) { -+#endif -+ switch (pax_handle_fetch_fault(regs)) { -+ } -+ -+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]); -+ do_group_exit(SIGKILL); -+ } -+ } -+#endif -+ - _exception(SIGSEGV, regs, code, address); - return 0; - } -diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c -index 5a783d8..c23e14b 100644 ---- a/arch/powerpc/mm/mmap_64.c -+++ b/arch/powerpc/mm/mmap_64.c -@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - */ - if (mmap_is_legacy()) { - mm->mmap_base = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; - } else { - mm->mmap_base = mmap_base(); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; - } -diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c -index ba51948..23009d9 100644 ---- a/arch/powerpc/mm/slice.c -+++ b/arch/powerpc/mm/slice.c -@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, - if ((mm->task_size - len) < addr) - return 0; - vma = find_vma(mm, addr); -- return (!vma || (addr + len) <= vma->vm_start); -+ return check_heap_stack_gap(vma, addr, len); - } - - static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) -@@ -256,7 +256,7 @@ full_search: - addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); - continue; - } -- if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len)) { - /* - * Remember the place where we stopped the search: - */ -@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, - } - } - -- addr = mm->mmap_base; -- while (addr > len) { -+ if (mm->mmap_base < len) -+ addr = -ENOMEM; -+ else -+ addr = mm->mmap_base - len; -+ -+ while (!IS_ERR_VALUE(addr)) { - /* Go down by chunk size */ -- addr = _ALIGN_DOWN(addr - len, 1ul << pshift); -+ addr = _ALIGN_DOWN(addr, 1ul << pshift); - - /* Check for hit with different page size */ - mask = slice_range_to_mask(addr, len); -@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, - * return with success: - */ - vma = find_vma(mm, addr); -- if (!vma || (addr + len) <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len)) { - /* remember the address as a hint for next time */ - if (use_cache) - mm->free_area_cache = addr; -@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ -- addr = vma->vm_start; -+ addr = skip_heap_stack_gap(vma, len); - } - - /* -@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, - if (fixed && addr > (mm->task_size - len)) - return -EINVAL; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP)) -+ addr = 0; -+#endif -+ - /* If hint, make sure it matches our alignment restrictions */ - if (!fixed && addr) { - addr = _ALIGN_UP(addr, 1ul << pshift); -diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h -index 547f1a6..3fff354 100644 ---- a/arch/s390/include/asm/elf.h -+++ b/arch/s390/include/asm/elf.h -@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled; - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - --extern unsigned long randomize_et_dyn(unsigned long base); --#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) -+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) -+ -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL) -+ -+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 ) -+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 ) -+#endif - - /* This yields a mask that user programs can use to figure out what - instruction set this CPU supports. */ -@@ -211,7 +217,4 @@ struct linux_binprm; - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 - int arch_setup_additional_pages(struct linux_binprm *, int); - --extern unsigned long arch_randomize_brk(struct mm_struct *mm); --#define arch_randomize_brk arch_randomize_brk -- - #endif -diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h -index 6582f69..b69906f 100644 ---- a/arch/s390/include/asm/system.h -+++ b/arch/s390/include/asm/system.h -@@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *command); - extern void (*_machine_halt)(void); - extern void (*_machine_power_off)(void); - --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) ((x) & ~0xfUL) - - static inline int tprot(unsigned long addr) - { -diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h -index 2b23885..e136e31 100644 ---- a/arch/s390/include/asm/uaccess.h -+++ b/arch/s390/include/asm/uaccess.h -@@ -235,6 +235,10 @@ static inline unsigned long __must_check - copy_to_user(void __user *to, const void *from, unsigned long n) - { - might_fault(); -+ -+ if ((long)n < 0) -+ return n; -+ - if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); - return n; -@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n) - static inline unsigned long __must_check - __copy_from_user(void *to, const void __user *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - if (__builtin_constant_p(n) && (n <= 256)) - return uaccess.copy_from_user_small(n, from, to); - else -@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) - unsigned int sz = __compiletime_object_size(to); - - might_fault(); -+ -+ if ((long)n < 0) -+ return n; -+ - if (unlikely(sz != -1 && sz < n)) { - copy_from_user_overflow(); - return n; -diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c -index dfcb343..eda788a 100644 ---- a/arch/s390/kernel/module.c -+++ b/arch/s390/kernel/module.c -@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, - - /* Increase core size by size of got & plt and set start - offsets for got and plt. */ -- me->core_size = ALIGN(me->core_size, 4); -- me->arch.got_offset = me->core_size; -- me->core_size += me->arch.got_size; -- me->arch.plt_offset = me->core_size; -- me->core_size += me->arch.plt_size; -+ me->core_size_rw = ALIGN(me->core_size_rw, 4); -+ me->arch.got_offset = me->core_size_rw; -+ me->core_size_rw += me->arch.got_size; -+ me->arch.plt_offset = me->core_size_rx; -+ me->core_size_rx += me->arch.plt_size; - return 0; - } - -@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - if (info->got_initialized == 0) { - Elf_Addr *gotent; - -- gotent = me->module_core + me->arch.got_offset + -+ gotent = me->module_core_rw + me->arch.got_offset + - info->got_offset; - *gotent = val; - info->got_initialized = 1; -@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - else if (r_type == R_390_GOTENT || - r_type == R_390_GOTPLTENT) - *(unsigned int *) loc = -- (val + (Elf_Addr) me->module_core - loc) >> 1; -+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1; - else if (r_type == R_390_GOT64 || - r_type == R_390_GOTPLT64) - *(unsigned long *) loc = val; -@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ - if (info->plt_initialized == 0) { - unsigned int *ip; -- ip = me->module_core + me->arch.plt_offset + -+ ip = me->module_core_rx + me->arch.plt_offset + - info->plt_offset; - #ifndef CONFIG_64BIT - ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ -@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - val - loc + 0xffffUL < 0x1ffffeUL) || - (r_type == R_390_PLT32DBL && - val - loc + 0xffffffffULL < 0x1fffffffeULL))) -- val = (Elf_Addr) me->module_core + -+ val = (Elf_Addr) me->module_core_rx + - me->arch.plt_offset + - info->plt_offset; - val += rela->r_addend - loc; -@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - case R_390_GOTOFF32: /* 32 bit offset to GOT. */ - case R_390_GOTOFF64: /* 64 bit offset to GOT. */ - val = val + rela->r_addend - -- ((Elf_Addr) me->module_core + me->arch.got_offset); -+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset); - if (r_type == R_390_GOTOFF16) - *(unsigned short *) loc = val; - else if (r_type == R_390_GOTOFF32) -@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - break; - case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ - case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ -- val = (Elf_Addr) me->module_core + me->arch.got_offset + -+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset + - rela->r_addend - loc; - if (r_type == R_390_GOTPC) - *(unsigned int *) loc = val; -diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c -index 541a750..8739853 100644 ---- a/arch/s390/kernel/process.c -+++ b/arch/s390/kernel/process.c -@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_struct *p) - } - return 0; - } -- --unsigned long arch_align_stack(unsigned long sp) --{ -- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) -- sp -= get_random_int() & ~PAGE_MASK; -- return sp & ~0xf; --} -- --static inline unsigned long brk_rnd(void) --{ -- /* 8MB for 32bit, 1GB for 64bit */ -- if (is_32bit_task()) -- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; -- else -- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; --} -- --unsigned long arch_randomize_brk(struct mm_struct *mm) --{ -- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); -- -- if (ret < mm->brk) -- return mm->brk; -- return ret; --} -- --unsigned long randomize_et_dyn(unsigned long base) --{ -- unsigned long ret = PAGE_ALIGN(base + brk_rnd()); -- -- if (!(current->flags & PF_RANDOMIZE)) -- return base; -- if (ret < base) -- return base; -- return ret; --} -diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c -index 7b371c3..ad06cf1 100644 ---- a/arch/s390/kernel/setup.c -+++ b/arch/s390/kernel/setup.c -@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *p) - } - early_param("mem", early_parse_mem); - --unsigned int user_mode = HOME_SPACE_MODE; -+unsigned int user_mode = SECONDARY_SPACE_MODE; - EXPORT_SYMBOL_GPL(user_mode); - - static int set_amode_and_uaccess(unsigned long user_amode, -diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c -index c9a9f7f..60d0315 100644 ---- a/arch/s390/mm/mmap.c -+++ b/arch/s390/mm/mmap.c -@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - */ - if (mmap_is_legacy()) { - mm->mmap_base = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; - } else { - mm->mmap_base = mmap_base(); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; - } -@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - */ - if (mmap_is_legacy()) { - mm->mmap_base = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = s390_get_unmapped_area; - mm->unmap_area = arch_unmap_area; - } else { - mm->mmap_base = mmap_base(); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = s390_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; - } -diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h -index 589d5c7..669e274 100644 ---- a/arch/score/include/asm/system.h -+++ b/arch/score/include/asm/system.h -@@ -17,7 +17,7 @@ do { \ - #define finish_arch_switch(prev) do {} while (0) - - typedef void (*vi_handler_t)(void); --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) (x) - - #define mb() barrier() - #define rmb() barrier() -diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c -index 25d0803..d6c8e36 100644 ---- a/arch/score/kernel/process.c -+++ b/arch/score/kernel/process.c -@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task) - - return task_pt_regs(task)->cp0_epc; - } -- --unsigned long arch_align_stack(unsigned long sp) --{ -- return sp; --} -diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c -index afeb710..d1d1289 100644 ---- a/arch/sh/mm/mmap.c -+++ b/arch/sh/mm/mmap.c -@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) - return addr; - } - -@@ -106,7 +105,7 @@ full_search: - } - return -ENOMEM; - } -- if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len))) { - /* - * Remember the place where we stopped the search: - */ -@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) - return addr; - } - -@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr-len); -- if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr - len, len)) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } -@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - if (unlikely(mm->mmap_base < len)) - goto bottomup; - -- addr = mm->mmap_base-len; -- if (do_colour_align) -- addr = COLOUR_ALIGN_DOWN(addr, pgoff); -+ addr = mm->mmap_base - len; - - do { -+ if (do_colour_align) -+ addr = COLOUR_ALIGN_DOWN(addr, pgoff); - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); -- if (likely(!vma || addr+len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len))) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - } -@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ -- addr = vma->vm_start-len; -- if (do_colour_align) -- addr = COLOUR_ALIGN_DOWN(addr, pgoff); -- } while (likely(len < vma->vm_start)); -+ addr = skip_heap_stack_gap(vma, len); -+ } while (!IS_ERR_VALUE(addr)); - - bottomup: - /* -diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile -index ad1fb5d..fc5315b 100644 ---- a/arch/sparc/Makefile -+++ b/arch/sparc/Makefile -@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/ - # Export what is needed by arch/sparc/boot/Makefile - export VMLINUX_INIT VMLINUX_MAIN - VMLINUX_INIT := $(head-y) $(init-y) --VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ -+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ - VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y) - VMLINUX_MAIN += $(drivers-y) $(net-y) - -diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h -index 9f421df..b81fc12 100644 ---- a/arch/sparc/include/asm/atomic_64.h -+++ b/arch/sparc/include/asm/atomic_64.h -@@ -14,18 +14,40 @@ - #define ATOMIC64_INIT(i) { (i) } - - #define atomic_read(v) (*(volatile int *)&(v)->counter) -+static inline int atomic_read_unchecked(const atomic_unchecked_t *v) -+{ -+ return v->counter; -+} - #define atomic64_read(v) (*(volatile long *)&(v)->counter) -+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) -+{ -+ return v->counter; -+} - - #define atomic_set(v, i) (((v)->counter) = i) -+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) -+{ -+ v->counter = i; -+} - #define atomic64_set(v, i) (((v)->counter) = i) -+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) -+{ -+ v->counter = i; -+} - - extern void atomic_add(int, atomic_t *); -+extern void atomic_add_unchecked(int, atomic_unchecked_t *); - extern void atomic64_add(long, atomic64_t *); -+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *); - extern void atomic_sub(int, atomic_t *); -+extern void atomic_sub_unchecked(int, atomic_unchecked_t *); - extern void atomic64_sub(long, atomic64_t *); -+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *); - - extern int atomic_add_ret(int, atomic_t *); -+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *); - extern long atomic64_add_ret(long, atomic64_t *); -+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *); - extern int atomic_sub_ret(int, atomic_t *); - extern long atomic64_sub_ret(long, atomic64_t *); - -@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *); - #define atomic64_dec_return(v) atomic64_sub_ret(1, v) - - #define atomic_inc_return(v) atomic_add_ret(1, v) -+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) -+{ -+ return atomic_add_ret_unchecked(1, v); -+} - #define atomic64_inc_return(v) atomic64_add_ret(1, v) -+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) -+{ -+ return atomic64_add_ret_unchecked(1, v); -+} - - #define atomic_sub_return(i, v) atomic_sub_ret(i, v) - #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) - - #define atomic_add_return(i, v) atomic_add_ret(i, v) -+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) -+{ -+ return atomic_add_ret_unchecked(i, v); -+} - #define atomic64_add_return(i, v) atomic64_add_ret(i, v) -+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) -+{ -+ return atomic64_add_ret_unchecked(i, v); -+} - - /* - * atomic_inc_and_test - increment and test -@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *); - * other cases. - */ - #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) -+{ -+ return atomic_inc_return_unchecked(v) == 0; -+} - #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) - - #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) -@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *); - #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) - - #define atomic_inc(v) atomic_add(1, v) -+static inline void atomic_inc_unchecked(atomic_unchecked_t *v) -+{ -+ atomic_add_unchecked(1, v); -+} - #define atomic64_inc(v) atomic64_add(1, v) -+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) -+{ -+ atomic64_add_unchecked(1, v); -+} - - #define atomic_dec(v) atomic_sub(1, v) -+static inline void atomic_dec_unchecked(atomic_unchecked_t *v) -+{ -+ atomic_sub_unchecked(1, v); -+} - #define atomic64_dec(v) atomic64_sub(1, v) -+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) -+{ -+ atomic64_sub_unchecked(1, v); -+} - - #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) - #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) - - #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) -+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) -+{ -+ return cmpxchg(&v->counter, old, new); -+} - #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) -+{ -+ return xchg(&v->counter, new); -+} - - static inline int __atomic_add_unless(atomic_t *v, int a, int u) - { -- int c, old; -+ int c, old, new; - c = atomic_read(v); - for (;;) { -- if (unlikely(c == (u))) -+ if (unlikely(c == u)) - break; -- old = atomic_cmpxchg((v), c, c + (a)); -+ -+ asm volatile("addcc %2, %0, %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "tvs %%icc, 6\n" -+#endif -+ -+ : "=r" (new) -+ : "0" (c), "ir" (a) -+ : "cc"); -+ -+ old = atomic_cmpxchg(v, c, new); - if (likely(old == c)) - break; - c = old; -@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) - #define atomic64_cmpxchg(v, o, n) \ - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) - #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) -+{ -+ return xchg(&v->counter, new); -+} - - static inline long atomic64_add_unless(atomic64_t *v, long a, long u) - { -- long c, old; -+ long c, old, new; - c = atomic64_read(v); - for (;;) { -- if (unlikely(c == (u))) -+ if (unlikely(c == u)) - break; -- old = atomic64_cmpxchg((v), c, c + (a)); -+ -+ asm volatile("addcc %2, %0, %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "tvs %%xcc, 6\n" -+#endif -+ -+ : "=r" (new) -+ : "0" (c), "ir" (a) -+ : "cc"); -+ -+ old = atomic64_cmpxchg(v, c, new); - if (likely(old == c)) - break; - c = old; - } -- return c != (u); -+ return c != u; - } - - #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h -index 69358b5..17b4745 100644 ---- a/arch/sparc/include/asm/cache.h -+++ b/arch/sparc/include/asm/cache.h -@@ -10,7 +10,7 @@ - #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) - - #define L1_CACHE_SHIFT 5 --#define L1_CACHE_BYTES 32 -+#define L1_CACHE_BYTES 32UL - - #ifdef CONFIG_SPARC32 - #define SMP_CACHE_BYTES_SHIFT 5 -diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h -index 4269ca6..e3da77f 100644 ---- a/arch/sparc/include/asm/elf_32.h -+++ b/arch/sparc/include/asm/elf_32.h -@@ -114,6 +114,13 @@ typedef struct { - - #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE 0x10000UL -+ -+#define PAX_DELTA_MMAP_LEN 16 -+#define PAX_DELTA_STACK_LEN 16 -+#endif -+ - /* This yields a mask that user programs can use to figure out what - instruction set this cpu supports. This can NOT be done in userspace - on Sparc. */ -diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h -index 7df8b7f..4946269 100644 ---- a/arch/sparc/include/asm/elf_64.h -+++ b/arch/sparc/include/asm/elf_64.h -@@ -180,6 +180,13 @@ typedef struct { - #define ELF_ET_DYN_BASE 0x0000010000000000UL - #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL) -+ -+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28) -+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29) -+#endif -+ - extern unsigned long sparc64_elf_hwcap; - #define ELF_HWCAP sparc64_elf_hwcap - -diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h -index 5b31a8e..1d92567 100644 ---- a/arch/sparc/include/asm/pgtable_32.h -+++ b/arch/sparc/include/asm/pgtable_32.h -@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd) - BTFIXUPDEF_INT(page_none) - BTFIXUPDEF_INT(page_copy) - BTFIXUPDEF_INT(page_readonly) -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+BTFIXUPDEF_INT(page_shared_noexec) -+BTFIXUPDEF_INT(page_copy_noexec) -+BTFIXUPDEF_INT(page_readonly_noexec) -+#endif -+ - BTFIXUPDEF_INT(page_kernel) - - #define PMD_SHIFT SUN4C_PMD_SHIFT -@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED; - #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy)) - #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly)) - -+#ifdef CONFIG_PAX_PAGEEXEC -+extern pgprot_t PAGE_SHARED_NOEXEC; -+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec)) -+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec)) -+#else -+# define PAGE_SHARED_NOEXEC PAGE_SHARED -+# define PAGE_COPY_NOEXEC PAGE_COPY -+# define PAGE_READONLY_NOEXEC PAGE_READONLY -+#endif -+ - extern unsigned long page_kernel; - - #ifdef MODULE -diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h -index f6ae2b2..b03ffc7 100644 ---- a/arch/sparc/include/asm/pgtsrmmu.h -+++ b/arch/sparc/include/asm/pgtsrmmu.h -@@ -115,6 +115,13 @@ - SRMMU_EXEC | SRMMU_REF) - #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ - SRMMU_EXEC | SRMMU_REF) -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF) -+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) -+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) -+#endif -+ - #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ - SRMMU_DIRTY | SRMMU_REF) - -diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h -index 9689176..63c18ea 100644 ---- a/arch/sparc/include/asm/spinlock_64.h -+++ b/arch/sparc/include/asm/spinlock_64.h -@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla - - /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ - --static void inline arch_read_lock(arch_rwlock_t *lock) -+static inline void arch_read_lock(arch_rwlock_t *lock) - { - unsigned long tmp1, tmp2; - - __asm__ __volatile__ ( - "1: ldsw [%2], %0\n" - " brlz,pn %0, 2f\n" --"4: add %0, 1, %1\n" -+"4: addcc %0, 1, %1\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" tvs %%icc, 6\n" -+#endif -+ - " cas [%2], %0, %1\n" - " cmp %0, %1\n" - " bne,pn %%icc, 1b\n" -@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock) - " .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) -- : "memory"); -+ : "memory", "cc"); - } - --static int inline arch_read_trylock(arch_rwlock_t *lock) -+static inline int arch_read_trylock(arch_rwlock_t *lock) - { - int tmp1, tmp2; - -@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) - "1: ldsw [%2], %0\n" - " brlz,a,pn %0, 2f\n" - " mov 0, %0\n" --" add %0, 1, %1\n" -+" addcc %0, 1, %1\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" tvs %%icc, 6\n" -+#endif -+ - " cas [%2], %0, %1\n" - " cmp %0, %1\n" - " bne,pn %%icc, 1b\n" -@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) - return tmp1; - } - --static void inline arch_read_unlock(arch_rwlock_t *lock) -+static inline void arch_read_unlock(arch_rwlock_t *lock) - { - unsigned long tmp1, tmp2; - - __asm__ __volatile__( - "1: lduw [%2], %0\n" --" sub %0, 1, %1\n" -+" subcc %0, 1, %1\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" tvs %%icc, 6\n" -+#endif -+ - " cas [%2], %0, %1\n" - " cmp %0, %1\n" - " bne,pn %%xcc, 1b\n" -@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock) - : "memory"); - } - --static void inline arch_write_lock(arch_rwlock_t *lock) -+static inline void arch_write_lock(arch_rwlock_t *lock) - { - unsigned long mask, tmp1, tmp2; - -@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock) - : "memory"); - } - --static void inline arch_write_unlock(arch_rwlock_t *lock) -+static inline void arch_write_unlock(arch_rwlock_t *lock) - { - __asm__ __volatile__( - " stw %%g0, [%0]" -@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock) - : "memory"); - } - --static int inline arch_write_trylock(arch_rwlock_t *lock) -+static inline int arch_write_trylock(arch_rwlock_t *lock) - { - unsigned long mask, tmp1, tmp2, result; - -diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h -index fa57532..e1a4c53 100644 ---- a/arch/sparc/include/asm/thread_info_32.h -+++ b/arch/sparc/include/asm/thread_info_32.h -@@ -50,6 +50,8 @@ struct thread_info { - unsigned long w_saved; - - struct restart_block restart_block; -+ -+ unsigned long lowest_stack; - }; - - /* -diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h -index 60d86be..952dea1 100644 ---- a/arch/sparc/include/asm/thread_info_64.h -+++ b/arch/sparc/include/asm/thread_info_64.h -@@ -63,6 +63,8 @@ struct thread_info { - struct pt_regs *kern_una_regs; - unsigned int kern_una_insn; - -+ unsigned long lowest_stack; -+ - unsigned long fpregs[0] __attribute__ ((aligned(64))); - }; - -diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h -index e88fbe5..96b0ce5 100644 ---- a/arch/sparc/include/asm/uaccess.h -+++ b/arch/sparc/include/asm/uaccess.h -@@ -1,5 +1,13 @@ - #ifndef ___ASM_SPARC_UACCESS_H - #define ___ASM_SPARC_UACCESS_H -+ -+#ifdef __KERNEL__ -+#ifndef __ASSEMBLY__ -+#include <linux/types.h> -+extern void check_object_size(const void *ptr, unsigned long n, bool to); -+#endif -+#endif -+ - #if defined(__sparc__) && defined(__arch64__) - #include <asm/uaccess_64.h> - #else -diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h -index 8303ac4..07f333d 100644 ---- a/arch/sparc/include/asm/uaccess_32.h -+++ b/arch/sparc/include/asm/uaccess_32.h -@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig - - static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) - { -- if (n && __access_ok((unsigned long) to, n)) -+ if ((long)n < 0) -+ return n; -+ -+ if (n && __access_ok((unsigned long) to, n)) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); - return __copy_user(to, (__force void __user *) from, n); -- else -+ } else - return n; - } - - static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); -+ - return __copy_user(to, (__force void __user *) from, n); - } - - static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) - { -- if (n && __access_ok((unsigned long) from, n)) -+ if ((long)n < 0) -+ return n; -+ -+ if (n && __access_ok((unsigned long) from, n)) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); - return __copy_user((__force void __user *) to, from, n); -- else -+ } else - return n; - } - - static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - return __copy_user((__force void __user *) to, from, n); - } - -diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h -index 3e1449f..5293a0e 100644 ---- a/arch/sparc/include/asm/uaccess_64.h -+++ b/arch/sparc/include/asm/uaccess_64.h -@@ -10,6 +10,7 @@ - #include <linux/compiler.h> - #include <linux/string.h> - #include <linux/thread_info.h> -+#include <linux/kernel.h> - #include <asm/asi.h> - #include <asm/system.h> - #include <asm/spitfire.h> -@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from, - static inline unsigned long __must_check - copy_from_user(void *to, const void __user *from, unsigned long size) - { -- unsigned long ret = ___copy_from_user(to, from, size); -+ unsigned long ret; - -+ if ((long)size < 0 || size > INT_MAX) -+ return size; -+ -+ if (!__builtin_constant_p(size)) -+ check_object_size(to, size, false); -+ -+ ret = ___copy_from_user(to, from, size); - if (unlikely(ret)) - ret = copy_from_user_fixup(to, from, size); - -@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from, - static inline unsigned long __must_check - copy_to_user(void __user *to, const void *from, unsigned long size) - { -- unsigned long ret = ___copy_to_user(to, from, size); -+ unsigned long ret; - -+ if ((long)size < 0 || size > INT_MAX) -+ return size; -+ -+ if (!__builtin_constant_p(size)) -+ check_object_size(from, size, true); -+ -+ ret = ___copy_to_user(to, from, size); - if (unlikely(ret)) - ret = copy_to_user_fixup(to, from, size); - return ret; -diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile -index cb85458..e063f17 100644 ---- a/arch/sparc/kernel/Makefile -+++ b/arch/sparc/kernel/Makefile -@@ -3,7 +3,7 @@ - # - - asflags-y := -ansi --ccflags-y := -Werror -+#ccflags-y := -Werror - - extra-y := head_$(BITS).o - extra-y += init_task.o -diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c -index f793742..4d880af 100644 ---- a/arch/sparc/kernel/process_32.c -+++ b/arch/sparc/kernel/process_32.c -@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp) - rw->ins[4], rw->ins[5], - rw->ins[6], - rw->ins[7]); -- printk("%pS\n", (void *) rw->ins[7]); -+ printk("%pA\n", (void *) rw->ins[7]); - rw = (struct reg_window32 *) rw->ins[6]; - } - spin_unlock_irqrestore(&sparc_backtrace_lock, flags); -@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r) - - printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", - r->psr, r->pc, r->npc, r->y, print_tainted()); -- printk("PC: <%pS>\n", (void *) r->pc); -+ printk("PC: <%pA>\n", (void *) r->pc); - printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], - r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); - printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], - r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); -- printk("RPC: <%pS>\n", (void *) r->u_regs[15]); -+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]); - - printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], -@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) - rw = (struct reg_window32 *) fp; - pc = rw->ins[7]; - printk("[%08lx : ", pc); -- printk("%pS ] ", (void *) pc); -+ printk("%pA ] ", (void *) pc); - fp = rw->ins[6]; - } while (++count < 16); - printk("\n"); -diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c -index d959cd0..7b42812 100644 ---- a/arch/sparc/kernel/process_64.c -+++ b/arch/sparc/kernel/process_64.c -@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs) - printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", - rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); - if (regs->tstate & TSTATE_PRIV) -- printk("I7: <%pS>\n", (void *) rwk->ins[7]); -+ printk("I7: <%pA>\n", (void *) rwk->ins[7]); - } - - void show_regs(struct pt_regs *regs) - { - printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, - regs->tpc, regs->tnpc, regs->y, print_tainted()); -- printk("TPC: <%pS>\n", (void *) regs->tpc); -+ printk("TPC: <%pA>\n", (void *) regs->tpc); - printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", - regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], - regs->u_regs[3]); -@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs) - printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", - regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], - regs->u_regs[15]); -- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); -+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]); - show_regwindow(regs); - show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); - } -@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void) - ((tp && tp->task) ? tp->task->pid : -1)); - - if (gp->tstate & TSTATE_PRIV) { -- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", -+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n", - (void *) gp->tpc, - (void *) gp->o7, - (void *) gp->i7, -diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c -index 42b282f..28ce9f2 100644 ---- a/arch/sparc/kernel/sys_sparc_32.c -+++ b/arch/sparc/kernel/sys_sparc_32.c -@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - if (ARCH_SUN4C && len > 0x20000000) - return -ENOMEM; - if (!addr) -- addr = TASK_UNMAPPED_BASE; -+ addr = current->mm->mmap_base; - - if (flags & MAP_SHARED) - addr = COLOUR_ALIGN(addr); -@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - } - if (TASK_SIZE - PAGE_SIZE - len < addr) - return -ENOMEM; -- if (!vmm || addr + len <= vmm->vm_start) -+ if (check_heap_stack_gap(vmm, addr, len)) - return addr; - addr = vmm->vm_end; - if (flags & MAP_SHARED) -diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c -index 908b47a..aa9e584 100644 ---- a/arch/sparc/kernel/sys_sparc_64.c -+++ b/arch/sparc/kernel/sys_sparc_64.c -@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - /* We do not accept a shared mapping if it would violate - * cache aliasing constraints. - */ -- if ((flags & MAP_SHARED) && -+ if ((filp || (flags & MAP_SHARED)) && - ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) - return -EINVAL; - return addr; -@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - if (filp || (flags & MAP_SHARED)) - do_color_align = 1; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); -@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (task_size - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) - return addr; - } - - if (len > mm->cached_hole_size) { -- start_addr = addr = mm->free_area_cache; -+ start_addr = addr = mm->free_area_cache; - } else { -- start_addr = addr = TASK_UNMAPPED_BASE; -+ start_addr = addr = mm->mmap_base; - mm->cached_hole_size = 0; - } - -@@ -174,14 +177,14 @@ full_search: - vma = find_vma(mm, VA_EXCLUDE_END); - } - if (unlikely(task_size < addr)) { -- if (start_addr != TASK_UNMAPPED_BASE) { -- start_addr = addr = TASK_UNMAPPED_BASE; -+ if (start_addr != mm->mmap_base) { -+ start_addr = addr = mm->mmap_base; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } -- if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len))) { - /* - * Remember the place where we stopped the search: - */ -@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - /* We do not accept a shared mapping if it would violate - * cache aliasing constraints. - */ -- if ((flags & MAP_SHARED) && -+ if ((filp || (flags & MAP_SHARED)) && - ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) - return -EINVAL; - return addr; -@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (task_size - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) - return addr; - } - -@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr-len); -- if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr - len, len)) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } -@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - if (unlikely(mm->mmap_base < len)) - goto bottomup; - -- addr = mm->mmap_base-len; -- if (do_color_align) -- addr = COLOUR_ALIGN_DOWN(addr, pgoff); -+ addr = mm->mmap_base - len; - - do { -+ if (do_color_align) -+ addr = COLOUR_ALIGN_DOWN(addr, pgoff); - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); -- if (likely(!vma || addr+len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len))) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - } -@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ -- addr = vma->vm_start-len; -- if (do_color_align) -- addr = COLOUR_ALIGN_DOWN(addr, pgoff); -- } while (likely(len < vma->vm_start)); -+ addr = skip_heap_stack_gap(vma, len); -+ } while (!IS_ERR_VALUE(addr)); - - bottomup: - /* -@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - gap == RLIM_INFINITY || - sysctl_legacy_va_layout) { - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; - } else { -@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - gap = (task_size / 6 * 5); - - mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; - } -diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c -index c0490c7..84959d1 100644 ---- a/arch/sparc/kernel/traps_32.c -+++ b/arch/sparc/kernel/traps_32.c -@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc) - #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") - #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") - -+extern void gr_handle_kernel_exploit(void); -+ - void die_if_kernel(char *str, struct pt_regs *regs) - { - static int die_counter; -@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs) - count++ < 30 && - (((unsigned long) rw) >= PAGE_OFFSET) && - !(((unsigned long) rw) & 0x7)) { -- printk("Caller[%08lx]: %pS\n", rw->ins[7], -+ printk("Caller[%08lx]: %pA\n", rw->ins[7], - (void *) rw->ins[7]); - rw = (struct reg_window32 *)rw->ins[6]; - } - } - printk("Instruction DUMP:"); - instruction_dump ((unsigned long *) regs->pc); -- if(regs->psr & PSR_PS) -+ if(regs->psr & PSR_PS) { -+ gr_handle_kernel_exploit(); - do_exit(SIGKILL); -+ } - do_exit(SIGSEGV); - } - -diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c -index 0cbdaa4..438e4c9 100644 ---- a/arch/sparc/kernel/traps_64.c -+++ b/arch/sparc/kernel/traps_64.c -@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) - i + 1, - p->trapstack[i].tstate, p->trapstack[i].tpc, - p->trapstack[i].tnpc, p->trapstack[i].tt); -- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); -+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc); - } - } - -@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl) - - lvl -= 0x100; - if (regs->tstate & TSTATE_PRIV) { -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ if (lvl == 6) -+ pax_report_refcount_overflow(regs); -+#endif -+ - sprintf(buffer, "Kernel bad sw trap %lx", lvl); - die_if_kernel(buffer, regs); - } -@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl) - void bad_trap_tl1(struct pt_regs *regs, long lvl) - { - char buffer[32]; -- -+ - if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, - 0, lvl, SIGTRAP) == NOTIFY_STOP) - return; - -+#ifdef CONFIG_PAX_REFCOUNT -+ if (lvl == 6) -+ pax_report_refcount_overflow(regs); -+#endif -+ - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - - sprintf (buffer, "Bad trap %lx at tl>0", lvl); -@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in - regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); - printk("%s" "ERROR(%d): ", - (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); -- printk("TPC<%pS>\n", (void *) regs->tpc); -+ printk("TPC<%pA>\n", (void *) regs->tpc); - printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", - (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), - (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, -@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) - smp_processor_id(), - (type & 0x1) ? 'I' : 'D', - regs->tpc); -- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); -+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc); - panic("Irrecoverable Cheetah+ parity error."); - } - -@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) - smp_processor_id(), - (type & 0x1) ? 'I' : 'D', - regs->tpc); -- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); -+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc); - } - - struct sun4v_error_entry { -@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) - - printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", - regs->tpc, tl); -- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); -+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc); - printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); -- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", -+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n", - (void *) regs->u_regs[UREG_I7]); - printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " - "pte[%lx] error[%lx]\n", -@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) - - printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", - regs->tpc, tl); -- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); -+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc); - printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); -- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", -+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n", - (void *) regs->u_regs[UREG_I7]); - printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " - "pte[%lx] error[%lx]\n", -@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) - fp = (unsigned long)sf->fp + STACK_BIAS; - } - -- printk(" [%016lx] %pS\n", pc, (void *) pc); -+ printk(" [%016lx] %pA\n", pc, (void *) pc); - #ifdef CONFIG_FUNCTION_GRAPH_TRACER - if ((pc + 8UL) == (unsigned long) &return_to_handler) { - int index = tsk->curr_ret_stack; - if (tsk->ret_stack && index >= graph) { - pc = tsk->ret_stack[index - graph].ret; -- printk(" [%016lx] %pS\n", pc, (void *) pc); -+ printk(" [%016lx] %pA\n", pc, (void *) pc); - graph++; - } - } -@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) - return (struct reg_window *) (fp + STACK_BIAS); - } - -+extern void gr_handle_kernel_exploit(void); -+ - void die_if_kernel(char *str, struct pt_regs *regs) - { - static int die_counter; -@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) - while (rw && - count++ < 30 && - kstack_valid(tp, (unsigned long) rw)) { -- printk("Caller[%016lx]: %pS\n", rw->ins[7], -+ printk("Caller[%016lx]: %pA\n", rw->ins[7], - (void *) rw->ins[7]); - - rw = kernel_stack_up(rw); -@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs) - } - user_instruction_dump ((unsigned int __user *) regs->tpc); - } -- if (regs->tstate & TSTATE_PRIV) -+ if (regs->tstate & TSTATE_PRIV) { -+ gr_handle_kernel_exploit(); - do_exit(SIGKILL); -+ } - do_exit(SIGSEGV); - } - EXPORT_SYMBOL(die_if_kernel); -diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c -index 76e4ac1..78f8bb1 100644 ---- a/arch/sparc/kernel/unaligned_64.c -+++ b/arch/sparc/kernel/unaligned_64.c -@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs) - static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); - - if (__ratelimit(&ratelimit)) { -- printk("Kernel unaligned access at TPC[%lx] %pS\n", -+ printk("Kernel unaligned access at TPC[%lx] %pA\n", - regs->tpc, (void *) regs->tpc); - } - } -diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile -index a3fc437..fea9957 100644 ---- a/arch/sparc/lib/Makefile -+++ b/arch/sparc/lib/Makefile -@@ -2,7 +2,7 @@ - # - - asflags-y := -ansi -DST_DIV0=0x02 --ccflags-y := -Werror -+#ccflags-y := -Werror - - lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o - lib-$(CONFIG_SPARC32) += memcpy.o memset.o -diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S -index 59186e0..f747d7a 100644 ---- a/arch/sparc/lib/atomic_64.S -+++ b/arch/sparc/lib/atomic_64.S -@@ -18,7 +18,12 @@ - atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: lduw [%o1], %g1 -- add %g1, %o0, %g7 -+ addcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %icc, 6 -+#endif -+ - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) -@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - .size atomic_add, .-atomic_add - -+ .globl atomic_add_unchecked -+ .type atomic_add_unchecked,#function -+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: lduw [%o1], %g1 -+ add %g1, %o0, %g7 -+ cas [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %icc, 2f -+ nop -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ .size atomic_add_unchecked, .-atomic_add_unchecked -+ - .globl atomic_sub - .type atomic_sub,#function - atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: lduw [%o1], %g1 -- sub %g1, %o0, %g7 -+ subcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %icc, 6 -+#endif -+ - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) -@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - .size atomic_sub, .-atomic_sub - -+ .globl atomic_sub_unchecked -+ .type atomic_sub_unchecked,#function -+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: lduw [%o1], %g1 -+ sub %g1, %o0, %g7 -+ cas [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %icc, 2f -+ nop -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ .size atomic_sub_unchecked, .-atomic_sub_unchecked -+ - .globl atomic_add_ret - .type atomic_add_ret,#function - atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: lduw [%o1], %g1 -- add %g1, %o0, %g7 -+ addcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %icc, 6 -+#endif -+ - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) -@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - .size atomic_add_ret, .-atomic_add_ret - -+ .globl atomic_add_ret_unchecked -+ .type atomic_add_ret_unchecked,#function -+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: lduw [%o1], %g1 -+ addcc %g1, %o0, %g7 -+ cas [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %icc, 2f -+ add %g7, %o0, %g7 -+ sra %g7, 0, %o0 -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked -+ - .globl atomic_sub_ret - .type atomic_sub_ret,#function - atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: lduw [%o1], %g1 -- sub %g1, %o0, %g7 -+ subcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %icc, 6 -+#endif -+ - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) -@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ - atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: ldx [%o1], %g1 -- add %g1, %o0, %g7 -+ addcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %xcc, 6 -+#endif -+ - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) -@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - .size atomic64_add, .-atomic64_add - -+ .globl atomic64_add_unchecked -+ .type atomic64_add_unchecked,#function -+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: ldx [%o1], %g1 -+ addcc %g1, %o0, %g7 -+ casx [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %xcc, 2f -+ nop -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ .size atomic64_add_unchecked, .-atomic64_add_unchecked -+ - .globl atomic64_sub - .type atomic64_sub,#function - atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: ldx [%o1], %g1 -- sub %g1, %o0, %g7 -+ subcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %xcc, 6 -+#endif -+ - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) -@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - .size atomic64_sub, .-atomic64_sub - -+ .globl atomic64_sub_unchecked -+ .type atomic64_sub_unchecked,#function -+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: ldx [%o1], %g1 -+ subcc %g1, %o0, %g7 -+ casx [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %xcc, 2f -+ nop -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked -+ - .globl atomic64_add_ret - .type atomic64_add_ret,#function - atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: ldx [%o1], %g1 -- add %g1, %o0, %g7 -+ addcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %xcc, 6 -+#endif -+ - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) -@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - .size atomic64_add_ret, .-atomic64_add_ret - -+ .globl atomic64_add_ret_unchecked -+ .type atomic64_add_ret_unchecked,#function -+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: ldx [%o1], %g1 -+ addcc %g1, %o0, %g7 -+ casx [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %xcc, 2f -+ add %g7, %o0, %g7 -+ mov %g7, %o0 -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked -+ - .globl atomic64_sub_ret - .type atomic64_sub_ret,#function - atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: ldx [%o1], %g1 -- sub %g1, %o0, %g7 -+ subcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %xcc, 6 -+#endif -+ - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) -diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c -index 1b30bb3..b4a16c7 100644 ---- a/arch/sparc/lib/ksyms.c -+++ b/arch/sparc/lib/ksyms.c -@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write); - - /* Atomic counter implementation. */ - EXPORT_SYMBOL(atomic_add); -+EXPORT_SYMBOL(atomic_add_unchecked); - EXPORT_SYMBOL(atomic_add_ret); -+EXPORT_SYMBOL(atomic_add_ret_unchecked); - EXPORT_SYMBOL(atomic_sub); -+EXPORT_SYMBOL(atomic_sub_unchecked); - EXPORT_SYMBOL(atomic_sub_ret); - EXPORT_SYMBOL(atomic64_add); -+EXPORT_SYMBOL(atomic64_add_unchecked); - EXPORT_SYMBOL(atomic64_add_ret); -+EXPORT_SYMBOL(atomic64_add_ret_unchecked); - EXPORT_SYMBOL(atomic64_sub); -+EXPORT_SYMBOL(atomic64_sub_unchecked); - EXPORT_SYMBOL(atomic64_sub_ret); - - /* Atomic bit operations. */ -diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile -index e3cda21..a68e4cb 100644 ---- a/arch/sparc/mm/Makefile -+++ b/arch/sparc/mm/Makefile -@@ -2,7 +2,7 @@ - # - - asflags-y := -ansi --ccflags-y := -Werror -+#ccflags-y := -Werror - - obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o - obj-y += fault_$(BITS).o -diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c -index aa1c1b1..f93e28f 100644 ---- a/arch/sparc/mm/fault_32.c -+++ b/arch/sparc/mm/fault_32.c -@@ -22,6 +22,9 @@ - #include <linux/interrupt.h> - #include <linux/module.h> - #include <linux/kdebug.h> -+#include <linux/slab.h> -+#include <linux/pagemap.h> -+#include <linux/compiler.h> - - #include <asm/system.h> - #include <asm/page.h> -@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) - return safe_compute_effective_address(regs, insn); - } - -+#ifdef CONFIG_PAX_PAGEEXEC -+#ifdef CONFIG_PAX_DLRESOLVE -+static void pax_emuplt_close(struct vm_area_struct *vma) -+{ -+ vma->vm_mm->call_dl_resolve = 0UL; -+} -+ -+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -+{ -+ unsigned int *kaddr; -+ -+ vmf->page = alloc_page(GFP_HIGHUSER); -+ if (!vmf->page) -+ return VM_FAULT_OOM; -+ -+ kaddr = kmap(vmf->page); -+ memset(kaddr, 0, PAGE_SIZE); -+ kaddr[0] = 0x9DE3BFA8U; /* save */ -+ flush_dcache_page(vmf->page); -+ kunmap(vmf->page); -+ return VM_FAULT_MAJOR; -+} -+ -+static const struct vm_operations_struct pax_vm_ops = { -+ .close = pax_emuplt_close, -+ .fault = pax_emuplt_fault -+}; -+ -+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) -+{ -+ int ret; -+ -+ INIT_LIST_HEAD(&vma->anon_vma_chain); -+ vma->vm_mm = current->mm; -+ vma->vm_start = addr; -+ vma->vm_end = addr + PAGE_SIZE; -+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; -+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); -+ vma->vm_ops = &pax_vm_ops; -+ -+ ret = insert_vm_struct(current->mm, vma); -+ if (ret) -+ return ret; -+ -+ ++current->mm->total_vm; -+ return 0; -+} -+#endif -+ -+/* -+ * PaX: decide what to do with offenders (regs->pc = fault address) -+ * -+ * returns 1 when task should be killed -+ * 2 when patched PLT trampoline was detected -+ * 3 when unpatched PLT trampoline was detected -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ -+#ifdef CONFIG_PAX_EMUPLT -+ int err; -+ -+ do { /* PaX: patched PLT emulation #1 */ -+ unsigned int sethi1, sethi2, jmpl; -+ -+ err = get_user(sethi1, (unsigned int *)regs->pc); -+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4)); -+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi1 & 0xFFC00000U) == 0x03000000U && -+ (sethi2 & 0xFFC00000U) == 0x03000000U && -+ (jmpl & 0xFFFFE000U) == 0x81C06000U) -+ { -+ unsigned int addr; -+ -+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; -+ addr = regs->u_regs[UREG_G1]; -+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); -+ regs->pc = addr; -+ regs->npc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ { /* PaX: patched PLT emulation #2 */ -+ unsigned int ba; -+ -+ err = get_user(ba, (unsigned int *)regs->pc); -+ -+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) { -+ unsigned int addr; -+ -+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); -+ regs->pc = addr; -+ regs->npc = addr+4; -+ return 2; -+ } -+ } -+ -+ do { /* PaX: patched PLT emulation #3 */ -+ unsigned int sethi, jmpl, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->pc); -+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4)); -+ err |= get_user(nop, (unsigned int *)(regs->pc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ (jmpl & 0xFFFFE000U) == 0x81C06000U && -+ nop == 0x01000000U) -+ { -+ unsigned int addr; -+ -+ addr = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G1] = addr; -+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); -+ regs->pc = addr; -+ regs->npc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: unpatched PLT emulation step 1 */ -+ unsigned int sethi, ba, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->pc); -+ err |= get_user(ba, (unsigned int *)(regs->pc+4)); -+ err |= get_user(nop, (unsigned int *)(regs->pc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && -+ nop == 0x01000000U) -+ { -+ unsigned int addr, save, call; -+ -+ if ((ba & 0xFFC00000U) == 0x30800000U) -+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); -+ else -+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); -+ -+ err = get_user(save, (unsigned int *)addr); -+ err |= get_user(call, (unsigned int *)(addr+4)); -+ err |= get_user(nop, (unsigned int *)(addr+8)); -+ if (err) -+ break; -+ -+#ifdef CONFIG_PAX_DLRESOLVE -+ if (save == 0x9DE3BFA8U && -+ (call & 0xC0000000U) == 0x40000000U && -+ nop == 0x01000000U) -+ { -+ struct vm_area_struct *vma; -+ unsigned long call_dl_resolve; -+ -+ down_read(¤t->mm->mmap_sem); -+ call_dl_resolve = current->mm->call_dl_resolve; -+ up_read(¤t->mm->mmap_sem); -+ if (likely(call_dl_resolve)) -+ goto emulate; -+ -+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); -+ -+ down_write(¤t->mm->mmap_sem); -+ if (current->mm->call_dl_resolve) { -+ call_dl_resolve = current->mm->call_dl_resolve; -+ up_write(¤t->mm->mmap_sem); -+ if (vma) -+ kmem_cache_free(vm_area_cachep, vma); -+ goto emulate; -+ } -+ -+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); -+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { -+ up_write(¤t->mm->mmap_sem); -+ if (vma) -+ kmem_cache_free(vm_area_cachep, vma); -+ return 1; -+ } -+ -+ if (pax_insert_vma(vma, call_dl_resolve)) { -+ up_write(¤t->mm->mmap_sem); -+ kmem_cache_free(vm_area_cachep, vma); -+ return 1; -+ } -+ -+ current->mm->call_dl_resolve = call_dl_resolve; -+ up_write(¤t->mm->mmap_sem); -+ -+emulate: -+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; -+ regs->pc = call_dl_resolve; -+ regs->npc = addr+4; -+ return 3; -+ } -+#endif -+ -+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ -+ if ((save & 0xFFC00000U) == 0x05000000U && -+ (call & 0xFFFFE000U) == 0x85C0A000U && -+ nop == 0x01000000U) -+ { -+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G2] = addr + 4; -+ addr = (save & 0x003FFFFFU) << 10; -+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); -+ regs->pc = addr; -+ regs->npc = addr+4; -+ return 3; -+ } -+ } -+ } while (0); -+ -+ do { /* PaX: unpatched PLT emulation step 2 */ -+ unsigned int save, call, nop; -+ -+ err = get_user(save, (unsigned int *)(regs->pc-4)); -+ err |= get_user(call, (unsigned int *)regs->pc); -+ err |= get_user(nop, (unsigned int *)(regs->pc+4)); -+ if (err) -+ break; -+ -+ if (save == 0x9DE3BFA8U && -+ (call & 0xC0000000U) == 0x40000000U && -+ nop == 0x01000000U) -+ { -+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); -+ -+ regs->u_regs[UREG_RETPC] = regs->pc; -+ regs->pc = dl_resolve; -+ regs->npc = dl_resolve+4; -+ return 3; -+ } -+ } while (0); -+#endif -+ -+ return 1; -+} -+ -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 8; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, - int text_fault) - { -@@ -281,6 +546,24 @@ good_area: - if(!(vma->vm_flags & VM_WRITE)) - goto bad_area; - } else { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { -+ up_read(&mm->mmap_sem); -+ switch (pax_handle_fetch_fault(regs)) { -+ -+#ifdef CONFIG_PAX_EMUPLT -+ case 2: -+ case 3: -+ return; -+#endif -+ -+ } -+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - /* Allow reads even for write-only mappings */ - if(!(vma->vm_flags & (VM_READ | VM_EXEC))) - goto bad_area; -diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c -index 504c062..6fcb9c6 100644 ---- a/arch/sparc/mm/fault_64.c -+++ b/arch/sparc/mm/fault_64.c -@@ -21,6 +21,9 @@ - #include <linux/kprobes.h> - #include <linux/kdebug.h> - #include <linux/percpu.h> -+#include <linux/slab.h> -+#include <linux/pagemap.h> -+#include <linux/compiler.h> - - #include <asm/page.h> - #include <asm/pgtable.h> -@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) - printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", - regs->tpc); - printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); -- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); -+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]); - printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); - dump_stack(); - unhandled_fault(regs->tpc, current, regs); -@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, - show_regs(regs); - } - -+#ifdef CONFIG_PAX_PAGEEXEC -+#ifdef CONFIG_PAX_DLRESOLVE -+static void pax_emuplt_close(struct vm_area_struct *vma) -+{ -+ vma->vm_mm->call_dl_resolve = 0UL; -+} -+ -+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -+{ -+ unsigned int *kaddr; -+ -+ vmf->page = alloc_page(GFP_HIGHUSER); -+ if (!vmf->page) -+ return VM_FAULT_OOM; -+ -+ kaddr = kmap(vmf->page); -+ memset(kaddr, 0, PAGE_SIZE); -+ kaddr[0] = 0x9DE3BFA8U; /* save */ -+ flush_dcache_page(vmf->page); -+ kunmap(vmf->page); -+ return VM_FAULT_MAJOR; -+} -+ -+static const struct vm_operations_struct pax_vm_ops = { -+ .close = pax_emuplt_close, -+ .fault = pax_emuplt_fault -+}; -+ -+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) -+{ -+ int ret; -+ -+ INIT_LIST_HEAD(&vma->anon_vma_chain); -+ vma->vm_mm = current->mm; -+ vma->vm_start = addr; -+ vma->vm_end = addr + PAGE_SIZE; -+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; -+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); -+ vma->vm_ops = &pax_vm_ops; -+ -+ ret = insert_vm_struct(current->mm, vma); -+ if (ret) -+ return ret; -+ -+ ++current->mm->total_vm; -+ return 0; -+} -+#endif -+ -+/* -+ * PaX: decide what to do with offenders (regs->tpc = fault address) -+ * -+ * returns 1 when task should be killed -+ * 2 when patched PLT trampoline was detected -+ * 3 when unpatched PLT trampoline was detected -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ -+#ifdef CONFIG_PAX_EMUPLT -+ int err; -+ -+ do { /* PaX: patched PLT emulation #1 */ -+ unsigned int sethi1, sethi2, jmpl; -+ -+ err = get_user(sethi1, (unsigned int *)regs->tpc); -+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi1 & 0xFFC00000U) == 0x03000000U && -+ (sethi2 & 0xFFC00000U) == 0x03000000U && -+ (jmpl & 0xFFFFE000U) == 0x81C06000U) -+ { -+ unsigned long addr; -+ -+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; -+ addr = regs->u_regs[UREG_G1]; -+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ { /* PaX: patched PLT emulation #2 */ -+ unsigned int ba; -+ -+ err = get_user(ba, (unsigned int *)regs->tpc); -+ -+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) { -+ unsigned long addr; -+ -+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } -+ -+ do { /* PaX: patched PLT emulation #3 */ -+ unsigned int sethi, jmpl, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ (jmpl & 0xFFFFE000U) == 0x81C06000U && -+ nop == 0x01000000U) -+ { -+ unsigned long addr; -+ -+ addr = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G1] = addr; -+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #4 */ -+ unsigned int sethi, mov1, call, mov2; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(call, (unsigned int *)(regs->tpc+8)); -+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ mov1 == 0x8210000FU && -+ (call & 0xC0000000U) == 0x40000000U && -+ mov2 == 0x9E100001U) -+ { -+ unsigned long addr; -+ -+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; -+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #5 */ -+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); -+ err |= get_user(or1, (unsigned int *)(regs->tpc+12)); -+ err |= get_user(or2, (unsigned int *)(regs->tpc+16)); -+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20)); -+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24)); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+28)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ (sethi1 & 0xFFC00000U) == 0x03000000U && -+ (sethi2 & 0xFFC00000U) == 0x0B000000U && -+ (or1 & 0xFFFFE000U) == 0x82106000U && -+ (or2 & 0xFFFFE000U) == 0x8A116000U && -+ sllx == 0x83287020U && -+ jmpl == 0x81C04005U && -+ nop == 0x01000000U) -+ { -+ unsigned long addr; -+ -+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); -+ regs->u_regs[UREG_G1] <<= 32; -+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); -+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #6 */ -+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); -+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12)); -+ err |= get_user(or, (unsigned int *)(regs->tpc+16)); -+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20)); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+24)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ (sethi1 & 0xFFC00000U) == 0x03000000U && -+ (sethi2 & 0xFFC00000U) == 0x0B000000U && -+ sllx == 0x83287020U && -+ (or & 0xFFFFE000U) == 0x8A116000U && -+ jmpl == 0x81C04005U && -+ nop == 0x01000000U) -+ { -+ unsigned long addr; -+ -+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G1] <<= 32; -+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); -+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: unpatched PLT emulation step 1 */ -+ unsigned int sethi, ba, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && -+ nop == 0x01000000U) -+ { -+ unsigned long addr; -+ unsigned int save, call; -+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl; -+ -+ if ((ba & 0xFFC00000U) == 0x30800000U) -+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); -+ else -+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ err = get_user(save, (unsigned int *)addr); -+ err |= get_user(call, (unsigned int *)(addr+4)); -+ err |= get_user(nop, (unsigned int *)(addr+8)); -+ if (err) -+ break; -+ -+#ifdef CONFIG_PAX_DLRESOLVE -+ if (save == 0x9DE3BFA8U && -+ (call & 0xC0000000U) == 0x40000000U && -+ nop == 0x01000000U) -+ { -+ struct vm_area_struct *vma; -+ unsigned long call_dl_resolve; -+ -+ down_read(¤t->mm->mmap_sem); -+ call_dl_resolve = current->mm->call_dl_resolve; -+ up_read(¤t->mm->mmap_sem); -+ if (likely(call_dl_resolve)) -+ goto emulate; -+ -+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); -+ -+ down_write(¤t->mm->mmap_sem); -+ if (current->mm->call_dl_resolve) { -+ call_dl_resolve = current->mm->call_dl_resolve; -+ up_write(¤t->mm->mmap_sem); -+ if (vma) -+ kmem_cache_free(vm_area_cachep, vma); -+ goto emulate; -+ } -+ -+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); -+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { -+ up_write(¤t->mm->mmap_sem); -+ if (vma) -+ kmem_cache_free(vm_area_cachep, vma); -+ return 1; -+ } -+ -+ if (pax_insert_vma(vma, call_dl_resolve)) { -+ up_write(¤t->mm->mmap_sem); -+ kmem_cache_free(vm_area_cachep, vma); -+ return 1; -+ } -+ -+ current->mm->call_dl_resolve = call_dl_resolve; -+ up_write(¤t->mm->mmap_sem); -+ -+emulate: -+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; -+ regs->tpc = call_dl_resolve; -+ regs->tnpc = addr+4; -+ return 3; -+ } -+#endif -+ -+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ -+ if ((save & 0xFFC00000U) == 0x05000000U && -+ (call & 0xFFFFE000U) == 0x85C0A000U && -+ nop == 0x01000000U) -+ { -+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G2] = addr + 4; -+ addr = (save & 0x003FFFFFU) << 10; -+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 3; -+ } -+ -+ /* PaX: 64-bit PLT stub */ -+ err = get_user(sethi1, (unsigned int *)addr); -+ err |= get_user(sethi2, (unsigned int *)(addr+4)); -+ err |= get_user(or1, (unsigned int *)(addr+8)); -+ err |= get_user(or2, (unsigned int *)(addr+12)); -+ err |= get_user(sllx, (unsigned int *)(addr+16)); -+ err |= get_user(add, (unsigned int *)(addr+20)); -+ err |= get_user(jmpl, (unsigned int *)(addr+24)); -+ err |= get_user(nop, (unsigned int *)(addr+28)); -+ if (err) -+ break; -+ -+ if ((sethi1 & 0xFFC00000U) == 0x09000000U && -+ (sethi2 & 0xFFC00000U) == 0x0B000000U && -+ (or1 & 0xFFFFE000U) == 0x88112000U && -+ (or2 & 0xFFFFE000U) == 0x8A116000U && -+ sllx == 0x89293020U && -+ add == 0x8A010005U && -+ jmpl == 0x89C14000U && -+ nop == 0x01000000U) -+ { -+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); -+ regs->u_regs[UREG_G4] <<= 32; -+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); -+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4]; -+ regs->u_regs[UREG_G4] = addr + 24; -+ addr = regs->u_regs[UREG_G5]; -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 3; -+ } -+ } -+ } while (0); -+ -+#ifdef CONFIG_PAX_DLRESOLVE -+ do { /* PaX: unpatched PLT emulation step 2 */ -+ unsigned int save, call, nop; -+ -+ err = get_user(save, (unsigned int *)(regs->tpc-4)); -+ err |= get_user(call, (unsigned int *)regs->tpc); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+4)); -+ if (err) -+ break; -+ -+ if (save == 0x9DE3BFA8U && -+ (call & 0xC0000000U) == 0x40000000U && -+ nop == 0x01000000U) -+ { -+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ dl_resolve &= 0xFFFFFFFFUL; -+ -+ regs->u_regs[UREG_RETPC] = regs->tpc; -+ regs->tpc = dl_resolve; -+ regs->tnpc = dl_resolve+4; -+ return 3; -+ } -+ } while (0); -+#endif -+ -+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */ -+ unsigned int sethi, ba, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ (ba & 0xFFF00000U) == 0x30600000U && -+ nop == 0x01000000U) -+ { -+ unsigned long addr; -+ -+ addr = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G1] = addr; -+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+#endif -+ -+ return 1; -+} -+ -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 8; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) - { - struct mm_struct *mm = current->mm; -@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) - if (!vma) - goto bad_area; - -+#ifdef CONFIG_PAX_PAGEEXEC -+ /* PaX: detect ITLB misses on non-exec pages */ -+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address && -+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) -+ { -+ if (address != regs->tpc) -+ goto good_area; -+ -+ up_read(&mm->mmap_sem); -+ switch (pax_handle_fetch_fault(regs)) { -+ -+#ifdef CONFIG_PAX_EMUPLT -+ case 2: -+ case 3: -+ return; -+#endif -+ -+ } -+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS)); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - /* Pure DTLB misses do not tell us whether the fault causing - * load/store/atomic was a write or not, it only says that there - * was no match. So in such a case we (carefully) read the -diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c -index f4e9764..5682724 100644 ---- a/arch/sparc/mm/hugetlbpage.c -+++ b/arch/sparc/mm/hugetlbpage.c -@@ -68,7 +68,7 @@ full_search: - } - return -ENOMEM; - } -- if (likely(!vma || addr + len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len))) { - /* - * Remember the place where we stopped the search: - */ -@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr-len); -- if (!vma || addr <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr - len, len)) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } -@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - if (unlikely(mm->mmap_base < len)) - goto bottomup; - -- addr = (mm->mmap_base-len) & HPAGE_MASK; -+ addr = mm->mmap_base - len; - - do { -+ addr &= HPAGE_MASK; - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); -- if (likely(!vma || addr+len <= vma->vm_start)) { -+ if (likely(check_heap_stack_gap(vma, addr, len))) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - } -@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ -- addr = (vma->vm_start-len) & HPAGE_MASK; -- } while (likely(len < vma->vm_start)); -+ addr = skip_heap_stack_gap(vma, len); -+ } while (!IS_ERR_VALUE(addr)); - - bottomup: - /* -@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - if (addr) { - addr = ALIGN(addr, HPAGE_SIZE); - vma = find_vma(mm, addr); -- if (task_size - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) - return addr; - } - if (mm->get_unmapped_area == arch_get_unmapped_area) -diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c -index 7b00de6..78239f4 100644 ---- a/arch/sparc/mm/init_32.c -+++ b/arch/sparc/mm/init_32.c -@@ -316,6 +316,9 @@ extern void device_scan(void); - pgprot_t PAGE_SHARED __read_mostly; - EXPORT_SYMBOL(PAGE_SHARED); - -+pgprot_t PAGE_SHARED_NOEXEC __read_mostly; -+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC); -+ - void __init paging_init(void) - { - switch(sparc_cpu_model) { -@@ -344,17 +347,17 @@ void __init paging_init(void) - - /* Initialize the protection map with non-constant, MMU dependent values. */ - protection_map[0] = PAGE_NONE; -- protection_map[1] = PAGE_READONLY; -- protection_map[2] = PAGE_COPY; -- protection_map[3] = PAGE_COPY; -+ protection_map[1] = PAGE_READONLY_NOEXEC; -+ protection_map[2] = PAGE_COPY_NOEXEC; -+ protection_map[3] = PAGE_COPY_NOEXEC; - protection_map[4] = PAGE_READONLY; - protection_map[5] = PAGE_READONLY; - protection_map[6] = PAGE_COPY; - protection_map[7] = PAGE_COPY; - protection_map[8] = PAGE_NONE; -- protection_map[9] = PAGE_READONLY; -- protection_map[10] = PAGE_SHARED; -- protection_map[11] = PAGE_SHARED; -+ protection_map[9] = PAGE_READONLY_NOEXEC; -+ protection_map[10] = PAGE_SHARED_NOEXEC; -+ protection_map[11] = PAGE_SHARED_NOEXEC; - protection_map[12] = PAGE_READONLY; - protection_map[13] = PAGE_READONLY; - protection_map[14] = PAGE_SHARED; -diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c -index cbef74e..c38fead 100644 ---- a/arch/sparc/mm/srmmu.c -+++ b/arch/sparc/mm/srmmu.c -@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void) - PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); - BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); - BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC); -+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC)); -+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC)); -+#endif -+ - BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); - page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); - -diff --git a/arch/um/Makefile b/arch/um/Makefile -index c0f712c..3a5c4c9 100644 ---- a/arch/um/Makefile -+++ b/arch/um/Makefile -@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\ - $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \ - $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 - -+ifdef CONSTIFY_PLUGIN -+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify -+endif -+ - include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH) - - #This will adjust *FLAGS accordingly to the platform. -diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h -index 6c03acd..a5e0215 100644 ---- a/arch/um/include/asm/kmap_types.h -+++ b/arch/um/include/asm/kmap_types.h -@@ -23,6 +23,7 @@ enum km_type { - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, -+ KM_CLEARPAGE, - KM_TYPE_NR - }; - -diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h -index 4cc9b6c..02e5029 100644 ---- a/arch/um/include/asm/page.h -+++ b/arch/um/include/asm/page.h -@@ -14,6 +14,9 @@ - #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) - #define PAGE_MASK (~(PAGE_SIZE-1)) - -+#define ktla_ktva(addr) (addr) -+#define ktva_ktla(addr) (addr) -+ - #ifndef __ASSEMBLY__ - - struct page; -diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c -index 21c1ae7..4640aaa 100644 ---- a/arch/um/kernel/process.c -+++ b/arch/um/kernel/process.c -@@ -404,22 +404,6 @@ int singlestepping(void * t) - return 2; - } - --/* -- * Only x86 and x86_64 have an arch_align_stack(). -- * All other arches have "#define arch_align_stack(x) (x)" -- * in their asm/system.h -- * As this is included in UML from asm-um/system-generic.h, -- * we can use it to behave as the subarch does. -- */ --#ifndef arch_align_stack --unsigned long arch_align_stack(unsigned long sp) --{ -- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) -- sp -= get_random_int() % 8192; -- return sp & ~0xf; --} --#endif -- - unsigned long get_wchan(struct task_struct *p) - { - unsigned long stack_page, sp, ip; -diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h -index d1b93c4..ae1b7fd 100644 ---- a/arch/um/sys-i386/shared/sysdep/system.h -+++ b/arch/um/sys-i386/shared/sysdep/system.h -@@ -17,7 +17,7 @@ - # define AT_VECTOR_SIZE_ARCH 1 - #endif - --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) ((x) & ~0xfUL) - - void default_idle(void); - -diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c -index 70ca357..728d1cc 100644 ---- a/arch/um/sys-i386/syscalls.c -+++ b/arch/um/sys-i386/syscalls.c -@@ -11,6 +11,21 @@ - #include "asm/uaccess.h" - #include "asm/unistd.h" - -+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) -+{ -+ unsigned long pax_task_size = TASK_SIZE; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+#endif -+ -+ if (len > pax_task_size || addr > pax_task_size - len) -+ return -EINVAL; -+ -+ return 0; -+} -+ - /* - * The prototype on i386 is: - * -diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h -index d1b93c4..ae1b7fd 100644 ---- a/arch/um/sys-x86_64/shared/sysdep/system.h -+++ b/arch/um/sys-x86_64/shared/sysdep/system.h -@@ -17,7 +17,7 @@ - # define AT_VECTOR_SIZE_ARCH 1 - #endif - --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) ((x) & ~0xfUL) - - void default_idle(void); - -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 6a47bb2..dc9a868 100644 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -236,7 +236,7 @@ config X86_HT - - config X86_32_LAZY_GS - def_bool y -- depends on X86_32 && !CC_STACKPROTECTOR -+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF - - config ARCH_HWEIGHT_CFLAGS - string -@@ -1019,7 +1019,7 @@ choice - - config NOHIGHMEM - bool "off" -- depends on !X86_NUMAQ -+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) - ---help--- - Linux can use up to 64 Gigabytes of physical memory on x86 systems. - However, the address space of 32-bit x86 processors is only 4 -@@ -1056,7 +1056,7 @@ config NOHIGHMEM - - config HIGHMEM4G - bool "4GB" -- depends on !X86_NUMAQ -+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) - ---help--- - Select this if you have a 32-bit processor and between 1 and 4 - gigabytes of physical RAM. -@@ -1110,7 +1110,7 @@ config PAGE_OFFSET - hex - default 0xB0000000 if VMSPLIT_3G_OPT - default 0x80000000 if VMSPLIT_2G -- default 0x78000000 if VMSPLIT_2G_OPT -+ default 0x70000000 if VMSPLIT_2G_OPT - default 0x40000000 if VMSPLIT_1G - default 0xC0000000 - depends on X86_32 -@@ -1484,6 +1484,7 @@ config SECCOMP - - config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" -+ depends on X86_64 || !PAX_MEMORY_UDEREF - ---help--- - This option turns on the -fstack-protector GCC feature. This - feature puts, at the beginning of functions, a canary value on -@@ -1541,6 +1542,7 @@ config KEXEC_JUMP - config PHYSICAL_START - hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP) - default "0x1000000" -+ range 0x400000 0x40000000 - ---help--- - This gives the physical address where the kernel is loaded. - -@@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS - config PHYSICAL_ALIGN - hex "Alignment value to which kernel should be aligned" if X86_32 - default "0x1000000" -+ range 0x400000 0x1000000 if PAX_KERNEXEC - range 0x2000 0x1000000 - ---help--- - This value puts the alignment restrictions on physical address -@@ -1635,9 +1638,10 @@ config HOTPLUG_CPU - Say N if you want to disable CPU hotplug. - - config COMPAT_VDSO -- def_bool y -+ def_bool n - prompt "Compat VDSO support" - depends on X86_32 || IA32_EMULATION -+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF - ---help--- - Map the 32-bit VDSO to the predictable old-style address too. - -diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu -index e3ca7e0..b30b28a 100644 ---- a/arch/x86/Kconfig.cpu -+++ b/arch/x86/Kconfig.cpu -@@ -341,7 +341,7 @@ config X86_PPRO_FENCE - - config X86_F00F_BUG - def_bool y -- depends on M586MMX || M586TSC || M586 || M486 || M386 -+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC - - config X86_INVD_BUG - def_bool y -@@ -365,7 +365,7 @@ config X86_POPAD_OK - - config X86_ALIGNMENT_16 - def_bool y -- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 -+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 - - config X86_INTEL_USERCOPY - def_bool y -@@ -411,7 +411,7 @@ config X86_CMPXCHG64 - # generates cmov. - config X86_CMOV - def_bool y -- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) -+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) - - config X86_MINIMUM_CPU_FAMILY - int -diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug -index c0f8a5c..6404f61 100644 ---- a/arch/x86/Kconfig.debug -+++ b/arch/x86/Kconfig.debug -@@ -81,7 +81,7 @@ config X86_PTDUMP - config DEBUG_RODATA - bool "Write protect kernel read-only data structures" - default y -- depends on DEBUG_KERNEL -+ depends on DEBUG_KERNEL && BROKEN - ---help--- - Mark the kernel read-only data as write-protected in the pagetables, - in order to catch accidental (and incorrect) writes to such const -@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST - - config DEBUG_SET_MODULE_RONX - bool "Set loadable kernel module data as NX and text as RO" -- depends on MODULES -+ depends on MODULES && BROKEN - ---help--- - This option helps catch unintended modifications to loadable - kernel module's text and read-only data. It also prevents execution -diff --git a/arch/x86/Makefile b/arch/x86/Makefile -index b02e509..2631e48 100644 ---- a/arch/x86/Makefile -+++ b/arch/x86/Makefile -@@ -46,6 +46,7 @@ else - UTS_MACHINE := x86_64 - CHECKFLAGS += -D__x86_64__ -m64 - -+ biarch := $(call cc-option,-m64) - KBUILD_AFLAGS += -m64 - KBUILD_CFLAGS += -m64 - -@@ -195,3 +196,12 @@ define archhelp - echo ' FDARGS="..." arguments for the booted kernel' - echo ' FDINITRD=file initrd for the booted kernel' - endef -+ -+define OLD_LD -+ -+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils. -+*** Please upgrade your binutils to 2.18 or newer -+endef -+ -+archprepare: -+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD))) -diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile -index 95365a8..52f857b 100644 ---- a/arch/x86/boot/Makefile -+++ b/arch/x86/boot/Makefile -@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ - $(call cc-option, -fno-stack-protector) \ - $(call cc-option, -mpreferred-stack-boundary=2) - KBUILD_CFLAGS += $(call cc-option, -m32) -+ifdef CONSTIFY_PLUGIN -+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify -+endif - KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ - GCOV_PROFILE := n - -diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h -index 878e4b9..20537ab 100644 ---- a/arch/x86/boot/bitops.h -+++ b/arch/x86/boot/bitops.h -@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr) - u8 v; - const u32 *p = (const u32 *)addr; - -- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); -+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); - return v; - } - -@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr) - - static inline void set_bit(int nr, void *addr) - { -- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); -+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); - } - - #endif /* BOOT_BITOPS_H */ -diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h -index c7093bd..d4247ffe0 100644 ---- a/arch/x86/boot/boot.h -+++ b/arch/x86/boot/boot.h -@@ -85,7 +85,7 @@ static inline void io_delay(void) - static inline u16 ds(void) - { - u16 seg; -- asm("movw %%ds,%0" : "=rm" (seg)); -+ asm volatile("movw %%ds,%0" : "=rm" (seg)); - return seg; - } - -@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr) - static inline int memcmp(const void *s1, const void *s2, size_t len) - { - u8 diff; -- asm("repe; cmpsb; setnz %0" -+ asm volatile("repe; cmpsb; setnz %0" - : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); - return diff; - } -diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile -index 09664ef..edc5d03 100644 ---- a/arch/x86/boot/compressed/Makefile -+++ b/arch/x86/boot/compressed/Makefile -@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small - KBUILD_CFLAGS += $(cflags-y) - KBUILD_CFLAGS += $(call cc-option,-ffreestanding) - KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) -+ifdef CONSTIFY_PLUGIN -+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify -+endif - - KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ - GCOV_PROFILE := n -diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S -index 67a655a..b924059 100644 ---- a/arch/x86/boot/compressed/head_32.S -+++ b/arch/x86/boot/compressed/head_32.S -@@ -76,7 +76,7 @@ ENTRY(startup_32) - notl %eax - andl %eax, %ebx - #else -- movl $LOAD_PHYSICAL_ADDR, %ebx -+ movl $____LOAD_PHYSICAL_ADDR, %ebx - #endif - - /* Target address to relocate to for decompression */ -@@ -162,7 +162,7 @@ relocated: - * and where it was actually loaded. - */ - movl %ebp, %ebx -- subl $LOAD_PHYSICAL_ADDR, %ebx -+ subl $____LOAD_PHYSICAL_ADDR, %ebx - jz 2f /* Nothing to be done if loaded at compiled addr. */ - /* - * Process relocations. -@@ -170,8 +170,7 @@ relocated: - - 1: subl $4, %edi - movl (%edi), %ecx -- testl %ecx, %ecx -- jz 2f -+ jecxz 2f - addl %ebx, -__PAGE_OFFSET(%ebx, %ecx) - jmp 1b - 2: -diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S -index 35af09d..99c9676 100644 ---- a/arch/x86/boot/compressed/head_64.S -+++ b/arch/x86/boot/compressed/head_64.S -@@ -91,7 +91,7 @@ ENTRY(startup_32) - notl %eax - andl %eax, %ebx - #else -- movl $LOAD_PHYSICAL_ADDR, %ebx -+ movl $____LOAD_PHYSICAL_ADDR, %ebx - #endif - - /* Target address to relocate to for decompression */ -@@ -233,7 +233,7 @@ ENTRY(startup_64) - notq %rax - andq %rax, %rbp - #else -- movq $LOAD_PHYSICAL_ADDR, %rbp -+ movq $____LOAD_PHYSICAL_ADDR, %rbp - #endif - - /* Target address to relocate to for decompression */ -diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c -index 3a19d04..7c1d55a 100644 ---- a/arch/x86/boot/compressed/misc.c -+++ b/arch/x86/boot/compressed/misc.c -@@ -310,7 +310,7 @@ static void parse_elf(void *output) - case PT_LOAD: - #ifdef CONFIG_RELOCATABLE - dest = output; -- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); -+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR); - #else - dest = (void *)(phdr->p_paddr); - #endif -@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, - error("Destination address too large"); - #endif - #ifndef CONFIG_RELOCATABLE -- if ((unsigned long)output != LOAD_PHYSICAL_ADDR) -+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR) - error("Wrong destination address"); - #endif - -diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c -index 89bbf4e..869908e 100644 ---- a/arch/x86/boot/compressed/relocs.c -+++ b/arch/x86/boot/compressed/relocs.c -@@ -13,8 +13,11 @@ - - static void die(char *fmt, ...); - -+#include "../../../../include/generated/autoconf.h" -+ - #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) - static Elf32_Ehdr ehdr; -+static Elf32_Phdr *phdr; - static unsigned long reloc_count, reloc_idx; - static unsigned long *relocs; - -@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp) - } - } - -+static void read_phdrs(FILE *fp) -+{ -+ unsigned int i; -+ -+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr)); -+ if (!phdr) { -+ die("Unable to allocate %d program headers\n", -+ ehdr.e_phnum); -+ } -+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) { -+ die("Seek to %d failed: %s\n", -+ ehdr.e_phoff, strerror(errno)); -+ } -+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) { -+ die("Cannot read ELF program headers: %s\n", -+ strerror(errno)); -+ } -+ for(i = 0; i < ehdr.e_phnum; i++) { -+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type); -+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset); -+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr); -+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr); -+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz); -+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz); -+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags); -+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align); -+ } -+ -+} -+ - static void read_shdrs(FILE *fp) - { -- int i; -+ unsigned int i; - Elf32_Shdr shdr; - - secs = calloc(ehdr.e_shnum, sizeof(struct section)); -@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp) - - static void read_strtabs(FILE *fp) - { -- int i; -+ unsigned int i; - for (i = 0; i < ehdr.e_shnum; i++) { - struct section *sec = &secs[i]; - if (sec->shdr.sh_type != SHT_STRTAB) { -@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp) - - static void read_symtabs(FILE *fp) - { -- int i,j; -+ unsigned int i,j; - for (i = 0; i < ehdr.e_shnum; i++) { - struct section *sec = &secs[i]; - if (sec->shdr.sh_type != SHT_SYMTAB) { -@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp) - - static void read_relocs(FILE *fp) - { -- int i,j; -+ unsigned int i,j; -+ uint32_t base; -+ - for (i = 0; i < ehdr.e_shnum; i++) { - struct section *sec = &secs[i]; - if (sec->shdr.sh_type != SHT_REL) { -@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp) - die("Cannot read symbol table: %s\n", - strerror(errno)); - } -+ base = 0; -+ for (j = 0; j < ehdr.e_phnum; j++) { -+ if (phdr[j].p_type != PT_LOAD ) -+ continue; -+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz) -+ continue; -+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr; -+ break; -+ } - for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { - Elf32_Rel *rel = &sec->reltab[j]; -- rel->r_offset = elf32_to_cpu(rel->r_offset); -+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base; - rel->r_info = elf32_to_cpu(rel->r_info); - } - } -@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp) - - static void print_absolute_symbols(void) - { -- int i; -+ unsigned int i; - printf("Absolute symbols\n"); - printf(" Num: Value Size Type Bind Visibility Name\n"); - for (i = 0; i < ehdr.e_shnum; i++) { - struct section *sec = &secs[i]; - char *sym_strtab; - Elf32_Sym *sh_symtab; -- int j; -+ unsigned int j; - - if (sec->shdr.sh_type != SHT_SYMTAB) { - continue; -@@ -431,14 +475,14 @@ static void print_absolute_symbols(void) - - static void print_absolute_relocs(void) - { -- int i, printed = 0; -+ unsigned int i, printed = 0; - - for (i = 0; i < ehdr.e_shnum; i++) { - struct section *sec = &secs[i]; - struct section *sec_applies, *sec_symtab; - char *sym_strtab; - Elf32_Sym *sh_symtab; -- int j; -+ unsigned int j; - if (sec->shdr.sh_type != SHT_REL) { - continue; - } -@@ -499,13 +543,13 @@ static void print_absolute_relocs(void) - - static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) - { -- int i; -+ unsigned int i; - /* Walk through the relocations */ - for (i = 0; i < ehdr.e_shnum; i++) { - char *sym_strtab; - Elf32_Sym *sh_symtab; - struct section *sec_applies, *sec_symtab; -- int j; -+ unsigned int j; - struct section *sec = &secs[i]; - - if (sec->shdr.sh_type != SHT_REL) { -@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) - !is_rel_reloc(sym_name(sym_strtab, sym))) { - continue; - } -+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */ -+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load")) -+ continue; -+ -+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32) -+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */ -+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext")) -+ continue; -+ if (!strcmp(sec_name(sym->st_shndx), ".init.text")) -+ continue; -+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text")) -+ continue; -+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR")) -+ continue; -+#endif -+ - switch (r_type) { - case R_386_NONE: - case R_386_PC32: -@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb) - - static void emit_relocs(int as_text) - { -- int i; -+ unsigned int i; - /* Count how many relocations I have and allocate space for them. */ - reloc_count = 0; - walk_relocs(count_reloc); -@@ -665,6 +725,7 @@ int main(int argc, char **argv) - fname, strerror(errno)); - } - read_ehdr(fp); -+ read_phdrs(fp); - read_shdrs(fp); - read_strtabs(fp); - read_symtabs(fp); -diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c -index 4d3ff03..e4972ff 100644 ---- a/arch/x86/boot/cpucheck.c -+++ b/arch/x86/boot/cpucheck.c -@@ -74,7 +74,7 @@ static int has_fpu(void) - u16 fcw = -1, fsw = -1; - u32 cr0; - -- asm("movl %%cr0,%0" : "=r" (cr0)); -+ asm volatile("movl %%cr0,%0" : "=r" (cr0)); - if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { - cr0 &= ~(X86_CR0_EM|X86_CR0_TS); - asm volatile("movl %0,%%cr0" : : "r" (cr0)); -@@ -90,7 +90,7 @@ static int has_eflag(u32 mask) - { - u32 f0, f1; - -- asm("pushfl ; " -+ asm volatile("pushfl ; " - "pushfl ; " - "popl %0 ; " - "movl %0,%1 ; " -@@ -115,7 +115,7 @@ static void get_flags(void) - set_bit(X86_FEATURE_FPU, cpu.flags); - - if (has_eflag(X86_EFLAGS_ID)) { -- asm("cpuid" -+ asm volatile("cpuid" - : "=a" (max_intel_level), - "=b" (cpu_vendor[0]), - "=d" (cpu_vendor[1]), -@@ -124,7 +124,7 @@ static void get_flags(void) - - if (max_intel_level >= 0x00000001 && - max_intel_level <= 0x0000ffff) { -- asm("cpuid" -+ asm volatile("cpuid" - : "=a" (tfms), - "=c" (cpu.flags[4]), - "=d" (cpu.flags[0]) -@@ -136,7 +136,7 @@ static void get_flags(void) - cpu.model += ((tfms >> 16) & 0xf) << 4; - } - -- asm("cpuid" -+ asm volatile("cpuid" - : "=a" (max_amd_level) - : "a" (0x80000000) - : "ebx", "ecx", "edx"); -@@ -144,7 +144,7 @@ static void get_flags(void) - if (max_amd_level >= 0x80000001 && - max_amd_level <= 0x8000ffff) { - u32 eax = 0x80000001; -- asm("cpuid" -+ asm volatile("cpuid" - : "+a" (eax), - "=c" (cpu.flags[6]), - "=d" (cpu.flags[1]) -@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) - u32 ecx = MSR_K7_HWCR; - u32 eax, edx; - -- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); -+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); - eax &= ~(1 << 15); -- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); -+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); - - get_flags(); /* Make sure it really did something */ - err = check_flags(); -@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) - u32 ecx = MSR_VIA_FCR; - u32 eax, edx; - -- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); -+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); - eax |= (1<<1)|(1<<7); -- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); -+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); - - set_bit(X86_FEATURE_CX8, cpu.flags); - err = check_flags(); -@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) - u32 eax, edx; - u32 level = 1; - -- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); -- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); -- asm("cpuid" -+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); -+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); -+ asm volatile("cpuid" - : "+a" (level), "=d" (cpu.flags[0]) - : : "ecx", "ebx"); -- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); -+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); - - err = check_flags(); - } -diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S -index 93e689f..504ba09 100644 ---- a/arch/x86/boot/header.S -+++ b/arch/x86/boot/header.S -@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to - # single linked list of - # struct setup_data - --pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr -+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr - - #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset) - #define VO_INIT_SIZE (VO__end - VO__text) -diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c -index db75d07..8e6d0af 100644 ---- a/arch/x86/boot/memory.c -+++ b/arch/x86/boot/memory.c -@@ -19,7 +19,7 @@ - - static int detect_memory_e820(void) - { -- int count = 0; -+ unsigned int count = 0; - struct biosregs ireg, oreg; - struct e820entry *desc = boot_params.e820_map; - static struct e820entry buf; /* static so it is zeroed */ -diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c -index 11e8c6e..fdbb1ed 100644 ---- a/arch/x86/boot/video-vesa.c -+++ b/arch/x86/boot/video-vesa.c -@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void) - - boot_params.screen_info.vesapm_seg = oreg.es; - boot_params.screen_info.vesapm_off = oreg.di; -+ boot_params.screen_info.vesapm_size = oreg.cx; - } - - /* -diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c -index 43eda28..5ab5fdb 100644 ---- a/arch/x86/boot/video.c -+++ b/arch/x86/boot/video.c -@@ -96,7 +96,7 @@ static void store_mode_params(void) - static unsigned int get_entry(void) - { - char entry_buf[4]; -- int i, len = 0; -+ unsigned int i, len = 0; - int key; - unsigned int v; - -diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S -index 5b577d5..3c1fed4 100644 ---- a/arch/x86/crypto/aes-x86_64-asm_64.S -+++ b/arch/x86/crypto/aes-x86_64-asm_64.S -@@ -8,6 +8,8 @@ - * including this sentence is retained in full. - */ - -+#include <asm/alternative-asm.h> -+ - .extern crypto_ft_tab - .extern crypto_it_tab - .extern crypto_fl_tab -@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \ - je B192; \ - leaq 32(r9),r9; - -+#define ret pax_force_retaddr 0, 1; ret -+ - #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \ - movq r1,r2; \ - movq r3,r4; \ -diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S -index be6d9e3..21fbbca 100644 ---- a/arch/x86/crypto/aesni-intel_asm.S -+++ b/arch/x86/crypto/aesni-intel_asm.S -@@ -31,6 +31,7 @@ - - #include <linux/linkage.h> - #include <asm/inst.h> -+#include <asm/alternative-asm.h> - - #ifdef __x86_64__ - .data -@@ -1436,7 +1437,9 @@ _return_T_done_decrypt: - pop %r14 - pop %r13 - pop %r12 -+ pax_force_retaddr 0, 1 - ret -+ENDPROC(aesni_gcm_dec) - - - /***************************************************************************** -@@ -1699,7 +1702,9 @@ _return_T_done_encrypt: - pop %r14 - pop %r13 - pop %r12 -+ pax_force_retaddr 0, 1 - ret -+ENDPROC(aesni_gcm_enc) - - #endif - -@@ -1714,6 +1719,7 @@ _key_expansion_256a: - pxor %xmm1, %xmm0 - movaps %xmm0, (TKEYP) - add $0x10, TKEYP -+ pax_force_retaddr_bts - ret - - .align 4 -@@ -1738,6 +1744,7 @@ _key_expansion_192a: - shufps $0b01001110, %xmm2, %xmm1 - movaps %xmm1, 0x10(TKEYP) - add $0x20, TKEYP -+ pax_force_retaddr_bts - ret - - .align 4 -@@ -1757,6 +1764,7 @@ _key_expansion_192b: - - movaps %xmm0, (TKEYP) - add $0x10, TKEYP -+ pax_force_retaddr_bts - ret - - .align 4 -@@ -1769,6 +1777,7 @@ _key_expansion_256b: - pxor %xmm1, %xmm2 - movaps %xmm2, (TKEYP) - add $0x10, TKEYP -+ pax_force_retaddr_bts - ret - - /* -@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key) - #ifndef __x86_64__ - popl KEYP - #endif -+ pax_force_retaddr 0, 1 - ret -+ENDPROC(aesni_set_key) - - /* - * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) -@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc) - popl KLEN - popl KEYP - #endif -+ pax_force_retaddr 0, 1 - ret -+ENDPROC(aesni_enc) - - /* - * _aesni_enc1: internal ABI -@@ -1959,6 +1972,7 @@ _aesni_enc1: - AESENC KEY STATE - movaps 0x70(TKEYP), KEY - AESENCLAST KEY STATE -+ pax_force_retaddr_bts - ret - - /* -@@ -2067,6 +2081,7 @@ _aesni_enc4: - AESENCLAST KEY STATE2 - AESENCLAST KEY STATE3 - AESENCLAST KEY STATE4 -+ pax_force_retaddr_bts - ret - - /* -@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec) - popl KLEN - popl KEYP - #endif -+ pax_force_retaddr 0, 1 - ret -+ENDPROC(aesni_dec) - - /* - * _aesni_dec1: internal ABI -@@ -2146,6 +2163,7 @@ _aesni_dec1: - AESDEC KEY STATE - movaps 0x70(TKEYP), KEY - AESDECLAST KEY STATE -+ pax_force_retaddr_bts - ret - - /* -@@ -2254,6 +2272,7 @@ _aesni_dec4: - AESDECLAST KEY STATE2 - AESDECLAST KEY STATE3 - AESDECLAST KEY STATE4 -+ pax_force_retaddr_bts - ret - - /* -@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc) - popl KEYP - popl LEN - #endif -+ pax_force_retaddr 0, 1 - ret -+ENDPROC(aesni_ecb_enc) - - /* - * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, -@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec) - popl KEYP - popl LEN - #endif -+ pax_force_retaddr 0, 1 - ret -+ENDPROC(aesni_ecb_dec) - - /* - * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, -@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc) - popl LEN - popl IVP - #endif -+ pax_force_retaddr 0, 1 - ret -+ENDPROC(aesni_cbc_enc) - - /* - * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, -@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec) - popl LEN - popl IVP - #endif -+ pax_force_retaddr 0, 1 - ret -+ENDPROC(aesni_cbc_dec) - - #ifdef __x86_64__ - .align 16 -@@ -2524,6 +2551,7 @@ _aesni_inc_init: - mov $1, TCTR_LOW - MOVQ_R64_XMM TCTR_LOW INC - MOVQ_R64_XMM CTR TCTR_LOW -+ pax_force_retaddr_bts - ret - - /* -@@ -2552,6 +2580,7 @@ _aesni_inc: - .Linc_low: - movaps CTR, IV - PSHUFB_XMM BSWAP_MASK IV -+ pax_force_retaddr_bts - ret - - /* -@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc) - .Lctr_enc_ret: - movups IV, (IVP) - .Lctr_enc_just_ret: -+ pax_force_retaddr 0, 1 - ret -+ENDPROC(aesni_ctr_enc) - #endif -diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S -index 6214a9b..1f4fc9a 100644 ---- a/arch/x86/crypto/salsa20-x86_64-asm_64.S -+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S -@@ -1,3 +1,5 @@ -+#include <asm/alternative-asm.h> -+ - # enter ECRYPT_encrypt_bytes - .text - .p2align 5 -@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes: - add %r11,%rsp - mov %rdi,%rax - mov %rsi,%rdx -+ pax_force_retaddr 0, 1 - ret - # bytesatleast65: - ._bytesatleast65: -@@ -891,6 +894,7 @@ ECRYPT_keysetup: - add %r11,%rsp - mov %rdi,%rax - mov %rsi,%rdx -+ pax_force_retaddr - ret - # enter ECRYPT_ivsetup - .text -@@ -917,4 +921,5 @@ ECRYPT_ivsetup: - add %r11,%rsp - mov %rdi,%rax - mov %rsi,%rdx -+ pax_force_retaddr - ret -diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S -index 573aa10..b73ad89 100644 ---- a/arch/x86/crypto/twofish-x86_64-asm_64.S -+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S -@@ -21,6 +21,7 @@ - .text - - #include <asm/asm-offsets.h> -+#include <asm/alternative-asm.h> - - #define a_offset 0 - #define b_offset 4 -@@ -269,6 +270,7 @@ twofish_enc_blk: - - popq R1 - movq $1,%rax -+ pax_force_retaddr 0, 1 - ret - - twofish_dec_blk: -@@ -321,4 +323,5 @@ twofish_dec_blk: - - popq R1 - movq $1,%rax -+ pax_force_retaddr 0, 1 - ret -diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c -index fd84387..0b4af7d 100644 ---- a/arch/x86/ia32/ia32_aout.c -+++ b/arch/x86/ia32/ia32_aout.c -@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, - unsigned long dump_start, dump_size; - struct user32 dump; - -+ memset(&dump, 0, sizeof(dump)); -+ - fs = get_fs(); - set_fs(KERNEL_DS); - has_dumped = 1; -diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c -index 6557769..ef6ae89 100644 ---- a/arch/x86/ia32/ia32_signal.c -+++ b/arch/x86/ia32/ia32_signal.c -@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, - } - seg = get_fs(); - set_fs(KERNEL_DS); -- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp); -+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp); - set_fs(seg); - if (ret >= 0 && uoss_ptr) { - if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t))) -@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, - */ - static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, - size_t frame_size, -- void **fpstate) -+ void __user **fpstate) - { - unsigned long sp; - -@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, - - if (used_math()) { - sp = sp - sig_xstate_ia32_size; -- *fpstate = (struct _fpstate_ia32 *) sp; -+ *fpstate = (struct _fpstate_ia32 __user *) sp; - if (save_i387_xstate_ia32(*fpstate) < 0) - return (void __user *) -1L; - } -@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, - sp -= frame_size; - /* Align the stack pointer according to the i386 ABI, - * i.e. so that on function entry ((sp + 4) & 15) == 0. */ -- sp = ((sp + 4) & -16ul) - 4; -+ sp = ((sp - 12) & -16ul) - 4; - return (void __user *) sp; - } - -@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, - * These are actually not used anymore, but left because some - * gdb versions depend on them as a marker. - */ -- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); -+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); - } put_user_catch(err); - - if (err) -@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - 0xb8, - __NR_ia32_rt_sigreturn, - 0x80cd, -- 0, -+ 0 - }; - - frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); -@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - - if (ka->sa.sa_flags & SA_RESTORER) - restorer = ka->sa.sa_restorer; -+ else if (current->mm->context.vdso) -+ /* Return stub is in 32bit vsyscall page */ -+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); - else -- restorer = VDSO32_SYMBOL(current->mm->context.vdso, -- rt_sigreturn); -+ restorer = &frame->retcode; - put_user_ex(ptr_to_compat(restorer), &frame->pretcode); - - /* - * Not actually used anymore, but left because some gdb - * versions need it. - */ -- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); -+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); - } put_user_catch(err); - - if (err) -diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S -index 54edb207..9335b5f 100644 ---- a/arch/x86/ia32/ia32entry.S -+++ b/arch/x86/ia32/ia32entry.S -@@ -13,7 +13,9 @@ - #include <asm/thread_info.h> - #include <asm/segment.h> - #include <asm/irqflags.h> -+#include <asm/pgtable.h> - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ - #include <linux/elf-em.h> -@@ -95,6 +97,30 @@ ENTRY(native_irq_enable_sysexit) - ENDPROC(native_irq_enable_sysexit) - #endif - -+ .macro pax_enter_kernel_user -+ pax_set_fptr_mask -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ call pax_enter_kernel_user -+#endif -+ .endm -+ -+ .macro pax_exit_kernel_user -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ call pax_exit_kernel_user -+#endif -+#ifdef CONFIG_PAX_RANDKSTACK -+ pushq %rax -+ call pax_randomize_kstack -+ popq %rax -+#endif -+ .endm -+ -+.macro pax_erase_kstack -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK -+ call pax_erase_kstack -+#endif -+.endm -+ - /* - * 32bit SYSENTER instruction entry. - * -@@ -121,12 +147,6 @@ ENTRY(ia32_sysenter_target) - CFI_REGISTER rsp,rbp - SWAPGS_UNSAFE_STACK - movq PER_CPU_VAR(kernel_stack), %rsp -- addq $(KERNEL_STACK_OFFSET),%rsp -- /* -- * No need to follow this irqs on/off section: the syscall -- * disabled irqs, here we enable it straight after entry: -- */ -- ENABLE_INTERRUPTS(CLBR_NONE) - movl %ebp,%ebp /* zero extension */ - pushq_cfi $__USER32_DS - /*CFI_REL_OFFSET ss,0*/ -@@ -134,25 +154,38 @@ ENTRY(ia32_sysenter_target) - CFI_REL_OFFSET rsp,0 - pushfq_cfi - /*CFI_REL_OFFSET rflags,0*/ -- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d -- CFI_REGISTER rip,r10 -+ GET_THREAD_INFO(%r11) -+ movl TI_sysenter_return(%r11), %r11d -+ CFI_REGISTER rip,r11 - pushq_cfi $__USER32_CS - /*CFI_REL_OFFSET cs,0*/ - movl %eax, %eax -- pushq_cfi %r10 -+ pushq_cfi %r11 - CFI_REL_OFFSET rip,0 - pushq_cfi %rax - cld - SAVE_ARGS 0,1,0 -+ pax_enter_kernel_user -+ /* -+ * No need to follow this irqs on/off section: the syscall -+ * disabled irqs, here we enable it straight after entry: -+ */ -+ ENABLE_INTERRUPTS(CLBR_NONE) - /* no need to do an access_ok check here because rbp has been - 32bit zero extended */ -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ mov $PAX_USER_SHADOW_BASE,%r11 -+ add %r11,%rbp -+#endif -+ - 1: movl (%rbp),%ebp - .section __ex_table,"a" - .quad 1b,ia32_badarg - .previous -- GET_THREAD_INFO(%r10) -- orl $TS_COMPAT,TI_status(%r10) -- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) -+ GET_THREAD_INFO(%r11) -+ orl $TS_COMPAT,TI_status(%r11) -+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) - CFI_REMEMBER_STATE - jnz sysenter_tracesys - cmpq $(IA32_NR_syscalls-1),%rax -@@ -162,13 +195,15 @@ sysenter_do_call: - sysenter_dispatch: - call *ia32_sys_call_table(,%rax,8) - movq %rax,RAX-ARGOFFSET(%rsp) -- GET_THREAD_INFO(%r10) -+ GET_THREAD_INFO(%r11) - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF -- testl $_TIF_ALLWORK_MASK,TI_flags(%r10) -+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11) - jnz sysexit_audit - sysexit_from_sys_call: -- andl $~TS_COMPAT,TI_status(%r10) -+ pax_exit_kernel_user -+ pax_erase_kstack -+ andl $~TS_COMPAT,TI_status(%r11) - /* clear IF, that popfq doesn't enable interrupts early */ - andl $~0x200,EFLAGS-R11(%rsp) - movl RIP-R11(%rsp),%edx /* User %eip */ -@@ -194,6 +229,9 @@ sysexit_from_sys_call: - movl %eax,%esi /* 2nd arg: syscall number */ - movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ - call audit_syscall_entry -+ -+ pax_erase_kstack -+ - movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ - cmpq $(IA32_NR_syscalls-1),%rax - ja ia32_badsys -@@ -205,7 +243,7 @@ sysexit_from_sys_call: - .endm - - .macro auditsys_exit exit -- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) -+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) - jnz ia32_ret_from_sys_call - TRACE_IRQS_ON - sti -@@ -215,12 +253,12 @@ sysexit_from_sys_call: - movzbl %al,%edi /* zero-extend that into %edi */ - inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ - call audit_syscall_exit -- GET_THREAD_INFO(%r10) -+ GET_THREAD_INFO(%r11) - movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */ - movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi - cli - TRACE_IRQS_OFF -- testl %edi,TI_flags(%r10) -+ testl %edi,TI_flags(%r11) - jz \exit - CLEAR_RREGS -ARGOFFSET - jmp int_with_check -@@ -238,7 +276,7 @@ sysexit_audit: - - sysenter_tracesys: - #ifdef CONFIG_AUDITSYSCALL -- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) -+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) - jz sysenter_auditsys - #endif - SAVE_REST -@@ -246,6 +284,9 @@ sysenter_tracesys: - movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */ - movq %rsp,%rdi /* &pt_regs -> arg1 */ - call syscall_trace_enter -+ -+ pax_erase_kstack -+ - LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ - RESTORE_REST - cmpq $(IA32_NR_syscalls-1),%rax -@@ -277,19 +318,20 @@ ENDPROC(ia32_sysenter_target) - ENTRY(ia32_cstar_target) - CFI_STARTPROC32 simple - CFI_SIGNAL_FRAME -- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET -+ CFI_DEF_CFA rsp,0 - CFI_REGISTER rip,rcx - /*CFI_REGISTER rflags,r11*/ - SWAPGS_UNSAFE_STACK - movl %esp,%r8d - CFI_REGISTER rsp,r8 - movq PER_CPU_VAR(kernel_stack),%rsp -+ SAVE_ARGS 8*6,0,0 -+ pax_enter_kernel_user - /* - * No need to follow this irqs on/off section: the syscall - * disabled irqs and here we enable it straight after entry: - */ - ENABLE_INTERRUPTS(CLBR_NONE) -- SAVE_ARGS 8,0,0 - movl %eax,%eax /* zero extension */ - movq %rax,ORIG_RAX-ARGOFFSET(%rsp) - movq %rcx,RIP-ARGOFFSET(%rsp) -@@ -305,13 +347,19 @@ ENTRY(ia32_cstar_target) - /* no need to do an access_ok check here because r8 has been - 32bit zero extended */ - /* hardware stack frame is complete now */ -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ mov $PAX_USER_SHADOW_BASE,%r11 -+ add %r11,%r8 -+#endif -+ - 1: movl (%r8),%r9d - .section __ex_table,"a" - .quad 1b,ia32_badarg - .previous -- GET_THREAD_INFO(%r10) -- orl $TS_COMPAT,TI_status(%r10) -- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) -+ GET_THREAD_INFO(%r11) -+ orl $TS_COMPAT,TI_status(%r11) -+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) - CFI_REMEMBER_STATE - jnz cstar_tracesys - cmpq $IA32_NR_syscalls-1,%rax -@@ -321,13 +369,15 @@ cstar_do_call: - cstar_dispatch: - call *ia32_sys_call_table(,%rax,8) - movq %rax,RAX-ARGOFFSET(%rsp) -- GET_THREAD_INFO(%r10) -+ GET_THREAD_INFO(%r11) - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF -- testl $_TIF_ALLWORK_MASK,TI_flags(%r10) -+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11) - jnz sysretl_audit - sysretl_from_sys_call: -- andl $~TS_COMPAT,TI_status(%r10) -+ pax_exit_kernel_user -+ pax_erase_kstack -+ andl $~TS_COMPAT,TI_status(%r11) - RESTORE_ARGS 0,-ARG_SKIP,0,0,0 - movl RIP-ARGOFFSET(%rsp),%ecx - CFI_REGISTER rip,rcx -@@ -355,7 +405,7 @@ sysretl_audit: - - cstar_tracesys: - #ifdef CONFIG_AUDITSYSCALL -- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) -+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) - jz cstar_auditsys - #endif - xchgl %r9d,%ebp -@@ -364,6 +414,9 @@ cstar_tracesys: - movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ - movq %rsp,%rdi /* &pt_regs -> arg1 */ - call syscall_trace_enter -+ -+ pax_erase_kstack -+ - LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ - RESTORE_REST - xchgl %ebp,%r9d -@@ -409,20 +462,21 @@ ENTRY(ia32_syscall) - CFI_REL_OFFSET rip,RIP-RIP - PARAVIRT_ADJUST_EXCEPTION_FRAME - SWAPGS -- /* -- * No need to follow this irqs on/off section: the syscall -- * disabled irqs and here we enable it straight after entry: -- */ -- ENABLE_INTERRUPTS(CLBR_NONE) - movl %eax,%eax - pushq_cfi %rax - cld - /* note the registers are not zero extended to the sf. - this could be a problem. */ - SAVE_ARGS 0,1,0 -- GET_THREAD_INFO(%r10) -- orl $TS_COMPAT,TI_status(%r10) -- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) -+ pax_enter_kernel_user -+ /* -+ * No need to follow this irqs on/off section: the syscall -+ * disabled irqs and here we enable it straight after entry: -+ */ -+ ENABLE_INTERRUPTS(CLBR_NONE) -+ GET_THREAD_INFO(%r11) -+ orl $TS_COMPAT,TI_status(%r11) -+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) - jnz ia32_tracesys - cmpq $(IA32_NR_syscalls-1),%rax - ja ia32_badsys -@@ -441,6 +495,9 @@ ia32_tracesys: - movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ - movq %rsp,%rdi /* &pt_regs -> arg1 */ - call syscall_trace_enter -+ -+ pax_erase_kstack -+ - LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ - RESTORE_REST - cmpq $(IA32_NR_syscalls-1),%rax -@@ -455,6 +512,7 @@ ia32_badsys: - - quiet_ni_syscall: - movq $-ENOSYS,%rax -+ pax_force_retaddr - ret - CFI_ENDPROC - -diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c -index f6f5c53..b358b28 100644 ---- a/arch/x86/ia32/sys_ia32.c -+++ b/arch/x86/ia32/sys_ia32.c -@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, - */ - static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) - { -- typeof(ubuf->st_uid) uid = 0; -- typeof(ubuf->st_gid) gid = 0; -+ typeof(((struct stat64 *)0)->st_uid) uid = 0; -+ typeof(((struct stat64 *)0)->st_gid) gid = 0; - SET_UID(uid, stat->uid); - SET_GID(gid, stat->gid); - if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) || -@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, - } - set_fs(KERNEL_DS); - ret = sys_rt_sigprocmask(how, -- set ? (sigset_t __user *)&s : NULL, -- oset ? (sigset_t __user *)&s : NULL, -+ set ? (sigset_t __force_user *)&s : NULL, -+ oset ? (sigset_t __force_user *)&s : NULL, - sigsetsize); - set_fs(old_fs); - if (ret) -@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds) - return alarm_setitimer(seconds); - } - --asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, -+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr, - int options) - { - return compat_sys_wait4(pid, stat_addr, options, NULL); -@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid, - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); -- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); -+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t); - set_fs(old_fs); - if (put_compat_timespec(&t, interval)) - return -EFAULT; -@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); -- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize); -+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize); - set_fs(old_fs); - if (!ret) { - switch (_NSIG_WORDS) { -@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig, - if (copy_siginfo_from_user32(&info, uinfo)) - return -EFAULT; - set_fs(KERNEL_DS); -- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info); -+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info); - set_fs(old_fs); - return ret; - } -@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, - return -EFAULT; - - set_fs(KERNEL_DS); -- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, -+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL, - count); - set_fs(old_fs); - -diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h -index 091508b..0ee32ec 100644 ---- a/arch/x86/include/asm/alternative-asm.h -+++ b/arch/x86/include/asm/alternative-asm.h -@@ -15,6 +15,45 @@ - .endm - #endif - -+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN -+ .macro pax_force_retaddr_bts rip=0 -+ btsq $63,\rip(%rsp) -+ .endm -+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS -+ .macro pax_force_retaddr rip=0, reload=0 -+ btsq $63,\rip(%rsp) -+ .endm -+ .macro pax_force_fptr ptr -+ btsq $63,\ptr -+ .endm -+ .macro pax_set_fptr_mask -+ .endm -+#endif -+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR -+ .macro pax_force_retaddr rip=0, reload=0 -+ .if \reload -+ pax_set_fptr_mask -+ .endif -+ orq %r10,\rip(%rsp) -+ .endm -+ .macro pax_force_fptr ptr -+ orq %r10,\ptr -+ .endm -+ .macro pax_set_fptr_mask -+ movabs $0x8000000000000000,%r10 -+ .endm -+#endif -+#else -+ .macro pax_force_retaddr rip=0, reload=0 -+ .endm -+ .macro pax_force_fptr ptr -+ .endm -+ .macro pax_force_retaddr_bts rip=0 -+ .endm -+ .macro pax_set_fptr_mask -+ .endm -+#endif -+ - .macro altinstruction_entry orig alt feature orig_len alt_len - .long \orig - . - .long \alt - . -diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h -index 37ad100..7d47faa 100644 ---- a/arch/x86/include/asm/alternative.h -+++ b/arch/x86/include/asm/alternative.h -@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end) - ".section .discard,"aw",@progbits\n" \ - " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ - ".previous\n" \ -- ".section .altinstr_replacement, "ax"\n" \ -+ ".section .altinstr_replacement, "a"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" - -diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h -index 9b7273c..e9fcc24 100644 ---- a/arch/x86/include/asm/apic.h -+++ b/arch/x86/include/asm/apic.h -@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void) - - #ifdef CONFIG_X86_LOCAL_APIC - --extern unsigned int apic_verbosity; -+extern int apic_verbosity; - extern int local_apic_timer_c2_ok; - - extern int disable_apic; -diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h -index 20370c6..a2eb9b0 100644 ---- a/arch/x86/include/asm/apm.h -+++ b/arch/x86/include/asm/apm.h -@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, - __asm__ __volatile__(APM_DO_ZERO_SEGS - "pushl %%edi\n\t" - "pushl %%ebp\n\t" -- "lcall *%%cs:apm_bios_entry\n\t" -+ "lcall *%%ss:apm_bios_entry\n\t" - "setc %%al\n\t" - "popl %%ebp\n\t" - "popl %%edi\n\t" -@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, - __asm__ __volatile__(APM_DO_ZERO_SEGS - "pushl %%edi\n\t" - "pushl %%ebp\n\t" -- "lcall *%%cs:apm_bios_entry\n\t" -+ "lcall *%%ss:apm_bios_entry\n\t" - "setc %%bl\n\t" - "popl %%ebp\n\t" - "popl %%edi\n\t" -diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h -index 10572e3..2618d91 100644 ---- a/arch/x86/include/asm/atomic.h -+++ b/arch/x86/include/asm/atomic.h -@@ -22,7 +22,18 @@ - */ - static inline int atomic_read(const atomic_t *v) - { -- return (*(volatile int *)&(v)->counter); -+ return (*(volatile const int *)&(v)->counter); -+} -+ -+/** -+ * atomic_read_unchecked - read atomic variable -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically reads the value of @v. -+ */ -+static inline int atomic_read_unchecked(const atomic_unchecked_t *v) -+{ -+ return (*(volatile const int *)&(v)->counter); - } - - /** -@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i) - } - - /** -+ * atomic_set_unchecked - set atomic variable -+ * @v: pointer of type atomic_unchecked_t -+ * @i: required value -+ * -+ * Atomically sets the value of @v to @i. -+ */ -+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) -+{ -+ v->counter = i; -+} -+ -+/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t -@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i) - */ - static inline void atomic_add(int i, atomic_t *v) - { -- asm volatile(LOCK_PREFIX "addl %1,%0" -+ asm volatile(LOCK_PREFIX "addl %1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "subl %1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "+m" (v->counter) -+ : "ir" (i)); -+} -+ -+/** -+ * atomic_add_unchecked - add integer to atomic variable -+ * @i: integer value to add -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically adds @i to @v. -+ */ -+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "addl %1,%0\n" - : "+m" (v->counter) - : "ir" (i)); - } -@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v) - */ - static inline void atomic_sub(int i, atomic_t *v) - { -- asm volatile(LOCK_PREFIX "subl %1,%0" -+ asm volatile(LOCK_PREFIX "subl %1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "addl %1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "+m" (v->counter) -+ : "ir" (i)); -+} -+ -+/** -+ * atomic_sub_unchecked - subtract integer from atomic variable -+ * @i: integer value to subtract -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically subtracts @i from @v. -+ */ -+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "subl %1,%0\n" - : "+m" (v->counter) - : "ir" (i)); - } -@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) - { - unsigned char c; - -- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" -+ asm volatile(LOCK_PREFIX "subl %2,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "addl %2,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sete %1\n" - : "+m" (v->counter), "=qm" (c) - : "ir" (i) : "memory"); - return c; -@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) - */ - static inline void atomic_inc(atomic_t *v) - { -- asm volatile(LOCK_PREFIX "incl %0" -+ asm volatile(LOCK_PREFIX "incl %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "decl %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "+m" (v->counter)); -+} -+ -+/** -+ * atomic_inc_unchecked - increment atomic variable -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically increments @v by 1. -+ */ -+static inline void atomic_inc_unchecked(atomic_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "incl %0\n" - : "+m" (v->counter)); - } - -@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v) - */ - static inline void atomic_dec(atomic_t *v) - { -- asm volatile(LOCK_PREFIX "decl %0" -+ asm volatile(LOCK_PREFIX "decl %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "incl %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "+m" (v->counter)); -+} -+ -+/** -+ * atomic_dec_unchecked - decrement atomic variable -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically decrements @v by 1. -+ */ -+static inline void atomic_dec_unchecked(atomic_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "decl %0\n" - : "+m" (v->counter)); - } - -@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v) - { - unsigned char c; - -- asm volatile(LOCK_PREFIX "decl %0; sete %1" -+ asm volatile(LOCK_PREFIX "decl %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "incl %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sete %1\n" - : "+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; -@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v) - { - unsigned char c; - -- asm volatile(LOCK_PREFIX "incl %0; sete %1" -+ asm volatile(LOCK_PREFIX "incl %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "decl %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sete %1\n" -+ : "+m" (v->counter), "=qm" (c) -+ : : "memory"); -+ return c != 0; -+} -+ -+/** -+ * atomic_inc_and_test_unchecked - increment and test -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically increments @v by 1 -+ * and returns true if the result is zero, or false for all -+ * other cases. -+ */ -+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) -+{ -+ unsigned char c; -+ -+ asm volatile(LOCK_PREFIX "incl %0\n" -+ "sete %1\n" - : "+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; -@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v) - { - unsigned char c; - -- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" -+ asm volatile(LOCK_PREFIX "addl %2,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "subl %2,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sets %1\n" - : "+m" (v->counter), "=qm" (c) - : "ir" (i) : "memory"); - return c; -@@ -180,6 +342,46 @@ static inline int atomic_add_return(int i, atomic_t *v) - #endif - /* Modern 486+ processor */ - __i = i; -+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "movl %0, %1\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "+r" (i), "+m" (v->counter) -+ : : "memory"); -+ return i + __i; -+ -+#ifdef CONFIG_M386 -+no_xadd: /* Legacy 386 processor */ -+ local_irq_save(flags); -+ __i = atomic_read(v); -+ atomic_set(v, i + __i); -+ local_irq_restore(flags); -+ return i + __i; -+#endif -+} -+ -+/** -+ * atomic_add_return_unchecked - add integer and return -+ * @v: pointer of type atomic_unchecked_t -+ * @i: integer value to add -+ * -+ * Atomically adds @i to @v and returns @i + @v -+ */ -+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) -+{ -+ int __i; -+#ifdef CONFIG_M386 -+ unsigned long flags; -+ if (unlikely(boot_cpu_data.x86 <= 3)) -+ goto no_xadd; -+#endif -+ /* Modern 486+ processor */ -+ __i = i; - asm volatile(LOCK_PREFIX "xaddl %0, %1" - : "+r" (i), "+m" (v->counter) - : : "memory"); -@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) - } - - #define atomic_inc_return(v) (atomic_add_return(1, v)) -+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) -+{ -+ return atomic_add_return_unchecked(1, v); -+} - #define atomic_dec_return(v) (atomic_sub_return(1, v)) - - static inline int atomic_cmpxchg(atomic_t *v, int old, int new) -@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) - return cmpxchg(&v->counter, old, new); - } - -+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) -+{ -+ return cmpxchg(&v->counter, old, new); -+} -+ - static inline int atomic_xchg(atomic_t *v, int new) - { - return xchg(&v->counter, new); - } - -+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) -+{ -+ return xchg(&v->counter, new); -+} -+ - /** - * __atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t -@@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *v, int new) - */ - static inline int __atomic_add_unless(atomic_t *v, int a, int u) - { -- int c, old; -+ int c, old, new; - c = atomic_read(v); - for (;;) { -- if (unlikely(c == (u))) -+ if (unlikely(c == u)) - break; -- old = atomic_cmpxchg((v), c, c + (a)); -+ -+ asm volatile("addl %2,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "subl %2,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=r" (new) -+ : "0" (c), "ir" (a)); -+ -+ old = atomic_cmpxchg(v, c, new); - if (likely(old == c)) - break; - c = old; -@@ -244,6 +473,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) - return c; - } - -+/** -+ * atomic_inc_not_zero_hint - increment if not null -+ * @v: pointer of type atomic_t -+ * @hint: probable value of the atomic before the increment -+ * -+ * This version of atomic_inc_not_zero() gives a hint of probable -+ * value of the atomic. This helps processor to not read the memory -+ * before doing the atomic read/modify/write cycle, lowering -+ * number of bus transactions on some arches. -+ * -+ * Returns: 0 if increment was not done, 1 otherwise. -+ */ -+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint -+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) -+{ -+ int val, c = hint, new; -+ -+ /* sanity test, should be removed by compiler if hint is a constant */ -+ if (!hint) -+ return __atomic_add_unless(v, 1, 0); -+ -+ do { -+ asm volatile("incl %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "decl %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=r" (new) -+ : "0" (c)); -+ -+ val = atomic_cmpxchg(v, c, new); -+ if (val == c) -+ return 1; -+ c = val; -+ } while (c); -+ -+ return 0; -+} - - /* - * atomic_dec_if_positive - decrement by 1 if old value positive -diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h -index 24098aa..1e37723 100644 ---- a/arch/x86/include/asm/atomic64_32.h -+++ b/arch/x86/include/asm/atomic64_32.h -@@ -12,6 +12,14 @@ typedef struct { - u64 __aligned(8) counter; - } atomic64_t; - -+#ifdef CONFIG_PAX_REFCOUNT -+typedef struct { -+ u64 __aligned(8) counter; -+} atomic64_unchecked_t; -+#else -+typedef atomic64_t atomic64_unchecked_t; -+#endif -+ - #define ATOMIC64_INIT(val) { (val) } - - #ifdef CONFIG_X86_CMPXCHG64 -@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n - } - - /** -+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable -+ * @p: pointer to type atomic64_unchecked_t -+ * @o: expected value -+ * @n: new value -+ * -+ * Atomically sets @v to @n if it was equal to @o and returns -+ * the old value. -+ */ -+ -+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n) -+{ -+ return cmpxchg64(&v->counter, o, n); -+} -+ -+/** - * atomic64_xchg - xchg atomic64 variable - * @v: pointer to type atomic64_t - * @n: value to assign -@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i) - } - - /** -+ * atomic64_set_unchecked - set atomic64 variable -+ * @v: pointer to type atomic64_unchecked_t -+ * @n: value to assign -+ * -+ * Atomically sets the value of @v to @n. -+ */ -+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) -+{ -+ unsigned high = (unsigned)(i >> 32); -+ unsigned low = (unsigned)i; -+ asm volatile(ATOMIC64_ALTERNATIVE(set) -+ : "+b" (low), "+c" (high) -+ : "S" (v) -+ : "eax", "edx", "memory" -+ ); -+} -+ -+/** - * atomic64_read - read atomic64 variable - * @v: pointer to type atomic64_t - * -@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v) - } - - /** -+ * atomic64_read_unchecked - read atomic64 variable -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically reads the value of @v and returns it. -+ */ -+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v) -+{ -+ long long r; -+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked) -+ : "=A" (r), "+c" (v) -+ : : "memory" -+ ); -+ return r; -+ } -+ -+/** - * atomic64_add_return - add and return - * @i: integer value to add - * @v: pointer to type atomic64_t -@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) - return i; - } - -+/** -+ * atomic64_add_return_unchecked - add and return -+ * @i: integer value to add -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically adds @i to @v and returns @i + *@v -+ */ -+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v) -+{ -+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked) -+ : "+A" (i), "+c" (v) -+ : : "memory" -+ ); -+ return i; -+} -+ - /* - * Other variants with different arithmetic operators: - */ -@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v) - return a; - } - -+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) -+{ -+ long long a; -+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked) -+ : "=A" (a) -+ : "S" (v) -+ : "memory", "ecx" -+ ); -+ return a; -+} -+ - static inline long long atomic64_dec_return(atomic64_t *v) - { - long long a; -@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v) - } - - /** -+ * atomic64_add_unchecked - add integer to atomic64 variable -+ * @i: integer value to add -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically adds @i to @v. -+ */ -+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v) -+{ -+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked) -+ : "+A" (i), "+c" (v) -+ : : "memory" -+ ); -+ return i; -+} -+ -+/** - * atomic64_sub - subtract the atomic64 variable - * @i: integer value to subtract - * @v: pointer to type atomic64_t -diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h -index 017594d..d3fcf72 100644 ---- a/arch/x86/include/asm/atomic64_64.h -+++ b/arch/x86/include/asm/atomic64_64.h -@@ -18,7 +18,19 @@ - */ - static inline long atomic64_read(const atomic64_t *v) - { -- return (*(volatile long *)&(v)->counter); -+ return (*(volatile const long *)&(v)->counter); -+} -+ -+/** -+ * atomic64_read_unchecked - read atomic64 variable -+ * @v: pointer of type atomic64_unchecked_t -+ * -+ * Atomically reads the value of @v. -+ * Doesn't imply a read memory barrier. -+ */ -+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) -+{ -+ return (*(volatile const long *)&(v)->counter); - } - - /** -@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i) - } - - /** -+ * atomic64_set_unchecked - set atomic64 variable -+ * @v: pointer to type atomic64_unchecked_t -+ * @i: required value -+ * -+ * Atomically sets the value of @v to @i. -+ */ -+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) -+{ -+ v->counter = i; -+} -+ -+/** - * atomic64_add - add integer to atomic64 variable - * @i: integer value to add - * @v: pointer to type atomic64_t -@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i) - */ - static inline void atomic64_add(long i, atomic64_t *v) - { -+ asm volatile(LOCK_PREFIX "addq %1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "subq %1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=m" (v->counter) -+ : "er" (i), "m" (v->counter)); -+} -+ -+/** -+ * atomic64_add_unchecked - add integer to atomic64 variable -+ * @i: integer value to add -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically adds @i to @v. -+ */ -+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) -+{ - asm volatile(LOCK_PREFIX "addq %1,%0" - : "=m" (v->counter) - : "er" (i), "m" (v->counter)); -@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v) - */ - static inline void atomic64_sub(long i, atomic64_t *v) - { -- asm volatile(LOCK_PREFIX "subq %1,%0" -+ asm volatile(LOCK_PREFIX "subq %1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "addq %1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=m" (v->counter) -+ : "er" (i), "m" (v->counter)); -+} -+ -+/** -+ * atomic64_sub_unchecked - subtract the atomic64 variable -+ * @i: integer value to subtract -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically subtracts @i from @v. -+ */ -+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "subq %1,%0\n" - : "=m" (v->counter) - : "er" (i), "m" (v->counter)); - } -@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) - { - unsigned char c; - -- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" -+ asm volatile(LOCK_PREFIX "subq %2,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "addq %2,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sete %1\n" - : "=m" (v->counter), "=qm" (c) - : "er" (i), "m" (v->counter) : "memory"); - return c; -@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) - */ - static inline void atomic64_inc(atomic64_t *v) - { -+ asm volatile(LOCK_PREFIX "incq %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "decq %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=m" (v->counter) -+ : "m" (v->counter)); -+} -+ -+/** -+ * atomic64_inc_unchecked - increment atomic64 variable -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically increments @v by 1. -+ */ -+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) -+{ - asm volatile(LOCK_PREFIX "incq %0" - : "=m" (v->counter) - : "m" (v->counter)); -@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v) - */ - static inline void atomic64_dec(atomic64_t *v) - { -- asm volatile(LOCK_PREFIX "decq %0" -+ asm volatile(LOCK_PREFIX "decq %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "incq %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=m" (v->counter) -+ : "m" (v->counter)); -+} -+ -+/** -+ * atomic64_dec_unchecked - decrement atomic64 variable -+ * @v: pointer to type atomic64_t -+ * -+ * Atomically decrements @v by 1. -+ */ -+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "decq %0\n" - : "=m" (v->counter) - : "m" (v->counter)); - } -@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v) - { - unsigned char c; - -- asm volatile(LOCK_PREFIX "decq %0; sete %1" -+ asm volatile(LOCK_PREFIX "decq %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "incq %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sete %1\n" - : "=m" (v->counter), "=qm" (c) - : "m" (v->counter) : "memory"); - return c != 0; -@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v) - { - unsigned char c; - -- asm volatile(LOCK_PREFIX "incq %0; sete %1" -+ asm volatile(LOCK_PREFIX "incq %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "decq %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sete %1\n" - : "=m" (v->counter), "=qm" (c) - : "m" (v->counter) : "memory"); - return c != 0; -@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) - { - unsigned char c; - -- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" -+ asm volatile(LOCK_PREFIX "addq %2,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "subq %2,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sets %1\n" - : "=m" (v->counter), "=qm" (c) - : "er" (i), "m" (v->counter) : "memory"); - return c; -@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) - static inline long atomic64_add_return(long i, atomic64_t *v) - { - long __i = i; -- asm volatile(LOCK_PREFIX "xaddq %0, %1;" -+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "movq %0, %1\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "+r" (i), "+m" (v->counter) -+ : : "memory"); -+ return i + __i; -+} -+ -+/** -+ * atomic64_add_return_unchecked - add and return -+ * @i: integer value to add -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically adds @i to @v and returns @i + @v -+ */ -+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) -+{ -+ long __i = i; -+ asm volatile(LOCK_PREFIX "xaddq %0, %1" - : "+r" (i), "+m" (v->counter) - : : "memory"); - return i + __i; -@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) - } - - #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) -+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) -+{ -+ return atomic64_add_return_unchecked(1, v); -+} - #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) - - static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) -@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) - return cmpxchg(&v->counter, old, new); - } - -+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new) -+{ -+ return cmpxchg(&v->counter, old, new); -+} -+ - static inline long atomic64_xchg(atomic64_t *v, long new) - { - return xchg(&v->counter, new); -@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new) - */ - static inline int atomic64_add_unless(atomic64_t *v, long a, long u) - { -- long c, old; -+ long c, old, new; - c = atomic64_read(v); - for (;;) { -- if (unlikely(c == (u))) -+ if (unlikely(c == u)) - break; -- old = atomic64_cmpxchg((v), c, c + (a)); -+ -+ asm volatile("add %2,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "sub %2,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=r" (new) -+ : "0" (c), "ir" (a)); -+ -+ old = atomic64_cmpxchg(v, c, new); - if (likely(old == c)) - break; - c = old; - } -- return c != (u); -+ return c != u; - } - - #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h -index 1775d6e..b65017f 100644 ---- a/arch/x86/include/asm/bitops.h -+++ b/arch/x86/include/asm/bitops.h -@@ -38,7 +38,7 @@ - * a mask operation on a byte. - */ - #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) --#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) -+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3)) - #define CONST_MASK(nr) (1 << ((nr) & 7)) - - /** -diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h -index 5e1a2ee..c9f9533 100644 ---- a/arch/x86/include/asm/boot.h -+++ b/arch/x86/include/asm/boot.h -@@ -11,10 +11,15 @@ - #include <asm/pgtable_types.h> - - /* Physical address where kernel should be loaded. */ --#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ -+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ - + (CONFIG_PHYSICAL_ALIGN - 1)) \ - & ~(CONFIG_PHYSICAL_ALIGN - 1)) - -+#ifndef __ASSEMBLY__ -+extern unsigned char __LOAD_PHYSICAL_ADDR[]; -+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR) -+#endif -+ - /* Minimum kernel alignment, as a power of two */ - #ifdef CONFIG_X86_64 - #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT -diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h -index 48f99f1..d78ebf9 100644 ---- a/arch/x86/include/asm/cache.h -+++ b/arch/x86/include/asm/cache.h -@@ -5,12 +5,13 @@ - - /* L1 cache line size */ - #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) -+#define __read_only __attribute__((__section__(".data..read_only"))) - - #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT --#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) -+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT) - - #ifdef CONFIG_X86_VSMP - #ifdef CONFIG_SMP -diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h -index 4e12668..501d239 100644 ---- a/arch/x86/include/asm/cacheflush.h -+++ b/arch/x86/include/asm/cacheflush.h -@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg) - unsigned long pg_flags = pg->flags & _PGMT_MASK; - - if (pg_flags == _PGMT_DEFAULT) -- return -1; -+ return ~0UL; - else if (pg_flags == _PGMT_WC) - return _PAGE_CACHE_WC; - else if (pg_flags == _PGMT_UC_MINUS) -diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h -index 46fc474..b02b0f9 100644 ---- a/arch/x86/include/asm/checksum_32.h -+++ b/arch/x86/include/asm/checksum_32.h -@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err_ptr, int *dst_err_ptr); - -+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst, -+ int len, __wsum sum, -+ int *src_err_ptr, int *dst_err_ptr); -+ -+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst, -+ int len, __wsum sum, -+ int *src_err_ptr, int *dst_err_ptr); -+ - /* - * Note: when you get a NULL pointer exception here this means someone - * passed in an incorrect kernel address to one of these functions. -@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src, - int *err_ptr) - { - might_sleep(); -- return csum_partial_copy_generic((__force void *)src, dst, -+ return csum_partial_copy_generic_from_user((__force void *)src, dst, - len, sum, err_ptr, NULL); - } - -@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src, - { - might_sleep(); - if (access_ok(VERIFY_WRITE, dst, len)) -- return csum_partial_copy_generic(src, (__force void *)dst, -+ return csum_partial_copy_generic_to_user(src, (__force void *)dst, - len, sum, NULL, err_ptr); - - if (len) -diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h -index 88b23a4..d2e5f9f 100644 ---- a/arch/x86/include/asm/cpufeature.h -+++ b/arch/x86/include/asm/cpufeature.h -@@ -358,7 +358,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) - ".section .discard,"aw",@progbits\n" - " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ - ".previous\n" -- ".section .altinstr_replacement,"ax"\n" -+ ".section .altinstr_replacement,"a"\n" - "3: movb $1,%0\n" - "4:\n" - ".previous\n" -diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h -index 41935fa..3b40db8 100644 ---- a/arch/x86/include/asm/desc.h -+++ b/arch/x86/include/asm/desc.h -@@ -4,6 +4,7 @@ - #include <asm/desc_defs.h> - #include <asm/ldt.h> - #include <asm/mmu.h> -+#include <asm/pgtable.h> - - #include <linux/smp.h> - -@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in - - desc->type = (info->read_exec_only ^ 1) << 1; - desc->type |= info->contents << 2; -+ desc->type |= info->seg_not_present ^ 1; - - desc->s = 1; - desc->dpl = 0x3; -@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in - } - - extern struct desc_ptr idt_descr; --extern gate_desc idt_table[]; -- --struct gdt_page { -- struct desc_struct gdt[GDT_ENTRIES]; --} __attribute__((aligned(PAGE_SIZE))); -- --DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); -+extern gate_desc idt_table[256]; - -+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)]; - static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) - { -- return per_cpu(gdt_page, cpu).gdt; -+ return cpu_gdt_table[cpu]; - } - - #ifdef CONFIG_X86_64 -@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type, - unsigned long base, unsigned dpl, unsigned flags, - unsigned short seg) - { -- gate->a = (seg << 16) | (base & 0xffff); -- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8); -+ gate->gate.offset_low = base; -+ gate->gate.seg = seg; -+ gate->gate.reserved = 0; -+ gate->gate.type = type; -+ gate->gate.s = 0; -+ gate->gate.dpl = dpl; -+ gate->gate.p = 1; -+ gate->gate.offset_high = base >> 16; - } - - #endif -@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) - - static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) - { -+ pax_open_kernel(); - memcpy(&idt[entry], gate, sizeof(*gate)); -+ pax_close_kernel(); - } - - static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) - { -+ pax_open_kernel(); - memcpy(&ldt[entry], desc, 8); -+ pax_close_kernel(); - } - - static inline void -@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int - default: size = sizeof(*gdt); break; - } - -+ pax_open_kernel(); - memcpy(&gdt[entry], desc, size); -+ pax_close_kernel(); - } - - static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, -@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) - - static inline void native_load_tr_desc(void) - { -+ pax_open_kernel(); - asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); -+ pax_close_kernel(); - } - - static inline void native_load_gdt(const struct desc_ptr *dtr) -@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) - struct desc_struct *gdt = get_cpu_gdt_table(cpu); - unsigned int i; - -+ pax_open_kernel(); - for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) - gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; -+ pax_close_kernel(); - } - - #define _LDT_empty(info) \ -@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) - desc->limit = (limit >> 16) & 0xf; - } - --static inline void _set_gate(int gate, unsigned type, void *addr, -+static inline void _set_gate(int gate, unsigned type, const void *addr, - unsigned dpl, unsigned ist, unsigned seg) - { - gate_desc s; -@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr, - * Pentium F0 0F bugfix can have resulted in the mapped - * IDT being write-protected. - */ --static inline void set_intr_gate(unsigned int n, void *addr) -+static inline void set_intr_gate(unsigned int n, const void *addr) - { - BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); -@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr) - /* - * This routine sets up an interrupt gate at directory privilege level 3. - */ --static inline void set_system_intr_gate(unsigned int n, void *addr) -+static inline void set_system_intr_gate(unsigned int n, const void *addr) - { - BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); - } - --static inline void set_system_trap_gate(unsigned int n, void *addr) -+static inline void set_system_trap_gate(unsigned int n, const void *addr) - { - BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); - } - --static inline void set_trap_gate(unsigned int n, void *addr) -+static inline void set_trap_gate(unsigned int n, const void *addr) - { - BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS); -@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr) - static inline void set_task_gate(unsigned int n, unsigned int gdt_entry) - { - BUG_ON((unsigned)n > 0xFF); -- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); -+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3)); - } - --static inline void set_intr_gate_ist(int n, void *addr, unsigned ist) -+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist) - { - BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS); - } - --static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) -+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist) - { - BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); - } - -+#ifdef CONFIG_X86_32 -+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu) -+{ -+ struct desc_struct d; -+ -+ if (likely(limit)) -+ limit = (limit - 1UL) >> PAGE_SHIFT; -+ pack_descriptor(&d, base, limit, 0xFB, 0xC); -+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S); -+} -+#endif -+ - #endif /* _ASM_X86_DESC_H */ -diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h -index 278441f..b95a174 100644 ---- a/arch/x86/include/asm/desc_defs.h -+++ b/arch/x86/include/asm/desc_defs.h -@@ -31,6 +31,12 @@ struct desc_struct { - unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; - unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; - }; -+ struct { -+ u16 offset_low; -+ u16 seg; -+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1; -+ unsigned offset_high: 16; -+ } gate; - }; - } __attribute__((packed)); - -diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h -index 908b969..a1f4eb4 100644 ---- a/arch/x86/include/asm/e820.h -+++ b/arch/x86/include/asm/e820.h -@@ -69,7 +69,7 @@ struct e820map { - #define ISA_START_ADDRESS 0xa0000 - #define ISA_END_ADDRESS 0x100000 - --#define BIOS_BEGIN 0x000a0000 -+#define BIOS_BEGIN 0x000c0000 - #define BIOS_END 0x00100000 - - #define BIOS_ROM_BASE 0xffe00000 -diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h -index f2ad216..eb24c96 100644 ---- a/arch/x86/include/asm/elf.h -+++ b/arch/x86/include/asm/elf.h -@@ -237,7 +237,25 @@ extern int force_personality32; - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -+#ifdef CONFIG_PAX_SEGMEXEC -+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2) -+#else - #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) -+#endif -+ -+#ifdef CONFIG_PAX_ASLR -+#ifdef CONFIG_X86_32 -+#define PAX_ELF_ET_DYN_BASE 0x10000000UL -+ -+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) -+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) -+#else -+#define PAX_ELF_ET_DYN_BASE 0x400000UL -+ -+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) -+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) -+#endif -+#endif - - /* This yields a mask that user programs can use to figure out what - instruction set this CPU supports. This could be done in user space, -@@ -290,9 +308,7 @@ do { \ - - #define ARCH_DLINFO \ - do { \ -- if (vdso_enabled) \ -- NEW_AUX_ENT(AT_SYSINFO_EHDR, \ -- (unsigned long)current->mm->context.vdso); \ -+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \ - } while (0) - - #define AT_SYSINFO 32 -@@ -303,7 +319,7 @@ do { \ - - #endif /* !CONFIG_X86_32 */ - --#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) -+#define VDSO_CURRENT_BASE (current->mm->context.vdso) - - #define VDSO_ENTRY \ - ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) -@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, - extern int syscall32_setup_pages(struct linux_binprm *, int exstack); - #define compat_arch_setup_additional_pages syscall32_setup_pages - --extern unsigned long arch_randomize_brk(struct mm_struct *mm); --#define arch_randomize_brk arch_randomize_brk -- - #endif /* _ASM_X86_ELF_H */ -diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h -index cc70c1c..d96d011 100644 ---- a/arch/x86/include/asm/emergency-restart.h -+++ b/arch/x86/include/asm/emergency-restart.h -@@ -15,6 +15,6 @@ enum reboot_type { - - extern enum reboot_type reboot_type; - --extern void machine_emergency_restart(void); -+extern void machine_emergency_restart(void) __noreturn; - - #endif /* _ASM_X86_EMERGENCY_RESTART_H */ -diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h -index d09bb03..4ea4194 100644 ---- a/arch/x86/include/asm/futex.h -+++ b/arch/x86/include/asm/futex.h -@@ -12,16 +12,18 @@ - #include <asm/system.h> - - #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ -+ typecheck(u32 __user *, uaddr); \ - asm volatile("1:\t" insn "\n" \ - "2:\t.section .fixup,"ax"\n" \ - "3:\tmov\t%3, %1\n" \ - "\tjmp\t2b\n" \ - "\t.previous\n" \ - _ASM_EXTABLE(1b, 3b) \ -- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ -+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\ - : "i" (-EFAULT), "0" (oparg), "1" (0)) - - #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ -+ typecheck(u32 __user *, uaddr); \ - asm volatile("1:\tmovl %2, %0\n" \ - "\tmovl\t%0, %3\n" \ - "\t" insn "\n" \ -@@ -34,7 +36,7 @@ - _ASM_EXTABLE(1b, 4b) \ - _ASM_EXTABLE(2b, 4b) \ - : "=&a" (oldval), "=&r" (ret), \ -- "+m" (*uaddr), "=&r" (tem) \ -+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \ - : "r" (oparg), "i" (-EFAULT), "1" (0)) - - static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) -@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) - - switch (op) { - case FUTEX_OP_SET: -- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); -+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg); - break; - case FUTEX_OP_ADD: -- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, -+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval, - uaddr, oparg); - break; - case FUTEX_OP_OR: -@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) - return -EFAULT; - -- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" -+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n" - "2:\t.section .fixup, "ax"\n" - "3:\tmov %3, %0\n" - "\tjmp 2b\n" - "\t.previous\n" - _ASM_EXTABLE(1b, 3b) -- : "+r" (ret), "=a" (oldval), "+m" (*uaddr) -+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr)) - : "i" (-EFAULT), "r" (newval), "1" (oldval) - : "memory" - ); -diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h -index 0919905..2cf38d6 100644 ---- a/arch/x86/include/asm/hw_irq.h -+++ b/arch/x86/include/asm/hw_irq.h -@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void); - extern void enable_IO_APIC(void); - - /* Statistics */ --extern atomic_t irq_err_count; --extern atomic_t irq_mis_count; -+extern atomic_unchecked_t irq_err_count; -+extern atomic_unchecked_t irq_mis_count; - - /* EISA */ - extern void eisa_set_level_irq(unsigned int irq); -diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h -index c9e09ea..73888df 100644 ---- a/arch/x86/include/asm/i387.h -+++ b/arch/x86/include/asm/i387.h -@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) - { - int err; - -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE) -+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE); -+#endif -+ - /* See comment in fxsave() below. */ - #ifdef CONFIG_AS_FXSAVEQ - asm volatile("1: fxrstorq %[fx]\n\t" -@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) - { - int err; - -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE) -+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE); -+#endif -+ - /* - * Clear the bytes not touched by the fxsave and reserved - * for the SW usage. -@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu) - #endif /* CONFIG_X86_64 */ - - /* We need a safe address that is cheap to find and that is already -- in L1 during context switch. The best choices are unfortunately -- different for UP and SMP */ --#ifdef CONFIG_SMP --#define safe_address (__per_cpu_offset[0]) --#else --#define safe_address (kstat_cpu(0).cpustat.user) --#endif -+ in L1 during context switch. */ -+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0) - - /* - * These must be called with preempt disabled -@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void) - struct thread_info *me = current_thread_info(); - preempt_disable(); - if (me->status & TS_USEDFPU) -- __save_init_fpu(me->task); -+ __save_init_fpu(current); - else - clts(); - } -diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h -index d8e8eef..99f81ae 100644 ---- a/arch/x86/include/asm/io.h -+++ b/arch/x86/include/asm/io.h -@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void); - - #include <linux/vmalloc.h> - -+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE -+static inline int valid_phys_addr_range(unsigned long addr, size_t count) -+{ -+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; -+} -+ -+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count) -+{ -+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; -+} -+ - /* - * Convert a virtual cached pointer to an uncached pointer - */ -diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h -index bba3cf8..06bc8da 100644 ---- a/arch/x86/include/asm/irqflags.h -+++ b/arch/x86/include/asm/irqflags.h -@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void) - sti; \ - sysexit - -+#define GET_CR0_INTO_RDI mov %cr0, %rdi -+#define SET_RDI_INTO_CR0 mov %rdi, %cr0 -+#define GET_CR3_INTO_RDI mov %cr3, %rdi -+#define SET_RDI_INTO_CR3 mov %rdi, %cr3 -+ - #else - #define INTERRUPT_RETURN iret - #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit -diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h -index 5478825..839e88c 100644 ---- a/arch/x86/include/asm/kprobes.h -+++ b/arch/x86/include/asm/kprobes.h -@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t; - #define RELATIVEJUMP_SIZE 5 - #define RELATIVECALL_OPCODE 0xe8 - #define RELATIVE_ADDR_SIZE 4 --#define MAX_STACK_SIZE 64 --#define MIN_STACK_SIZE(ADDR) \ -- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ -- THREAD_SIZE - (unsigned long)(ADDR))) \ -- ? (MAX_STACK_SIZE) \ -- : (((unsigned long)current_thread_info()) + \ -- THREAD_SIZE - (unsigned long)(ADDR))) -+#define MAX_STACK_SIZE 64UL -+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR)) - - #define flush_insn_slot(p) do { } while (0) - -diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index dd51c83..66cbfac 100644 ---- a/arch/x86/include/asm/kvm_host.h -+++ b/arch/x86/include/asm/kvm_host.h -@@ -456,7 +456,7 @@ struct kvm_arch { - unsigned int n_requested_mmu_pages; - unsigned int n_max_mmu_pages; - unsigned int indirect_shadow_pages; -- atomic_t invlpg_counter; -+ atomic_unchecked_t invlpg_counter; - struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; - /* - * Hash table of struct kvm_mmu_page. -@@ -636,7 +636,7 @@ struct kvm_x86_ops { - enum x86_intercept_stage stage); - - const struct trace_print_flags *exit_reasons_str; --}; -+} __do_const; - - struct kvm_arch_async_pf { - u32 token; -diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h -index 9cdae5d..300d20f 100644 ---- a/arch/x86/include/asm/local.h -+++ b/arch/x86/include/asm/local.h -@@ -18,26 +18,58 @@ typedef struct { - - static inline void local_inc(local_t *l) - { -- asm volatile(_ASM_INC "%0" -+ asm volatile(_ASM_INC "%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ _ASM_DEC "%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - : "+m" (l->a.counter)); - } - - static inline void local_dec(local_t *l) - { -- asm volatile(_ASM_DEC "%0" -+ asm volatile(_ASM_DEC "%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ _ASM_INC "%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - : "+m" (l->a.counter)); - } - - static inline void local_add(long i, local_t *l) - { -- asm volatile(_ASM_ADD "%1,%0" -+ asm volatile(_ASM_ADD "%1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ _ASM_SUB "%1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - : "+m" (l->a.counter) - : "ir" (i)); - } - - static inline void local_sub(long i, local_t *l) - { -- asm volatile(_ASM_SUB "%1,%0" -+ asm volatile(_ASM_SUB "%1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ _ASM_ADD "%1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - : "+m" (l->a.counter) - : "ir" (i)); - } -@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l) - { - unsigned char c; - -- asm volatile(_ASM_SUB "%2,%0; sete %1" -+ asm volatile(_ASM_SUB "%2,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ _ASM_ADD "%2,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sete %1\n" - : "+m" (l->a.counter), "=qm" (c) - : "ir" (i) : "memory"); - return c; -@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l) - { - unsigned char c; - -- asm volatile(_ASM_DEC "%0; sete %1" -+ asm volatile(_ASM_DEC "%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ _ASM_INC "%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sete %1\n" - : "+m" (l->a.counter), "=qm" (c) - : : "memory"); - return c != 0; -@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l) - { - unsigned char c; - -- asm volatile(_ASM_INC "%0; sete %1" -+ asm volatile(_ASM_INC "%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ _ASM_DEC "%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sete %1\n" - : "+m" (l->a.counter), "=qm" (c) - : : "memory"); - return c != 0; -@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l) - { - unsigned char c; - -- asm volatile(_ASM_ADD "%2,%0; sets %1" -+ asm volatile(_ASM_ADD "%2,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ _ASM_SUB "%2,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ "sets %1\n" - : "+m" (l->a.counter), "=qm" (c) - : "ir" (i) : "memory"); - return c; -@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l) - #endif - /* Modern 486+ processor */ - __i = i; -- asm volatile(_ASM_XADD "%0, %1;" -+ asm volatile(_ASM_XADD "%0, %1\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ _ASM_MOV "%0,%1\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - : "+r" (i), "+m" (l->a.counter) - : : "memory"); - return i + __i; -diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h -index 593e51d..fa69c9a 100644 ---- a/arch/x86/include/asm/mman.h -+++ b/arch/x86/include/asm/mman.h -@@ -5,4 +5,14 @@ - - #include <asm-generic/mman.h> - -+#ifdef __KERNEL__ -+#ifndef __ASSEMBLY__ -+#ifdef CONFIG_X86_32 -+#define arch_mmap_check i386_mmap_check -+int i386_mmap_check(unsigned long addr, unsigned long len, -+ unsigned long flags); -+#endif -+#endif -+#endif -+ - #endif /* _ASM_X86_MMAN_H */ -diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h -index 5f55e69..e20bfb1 100644 ---- a/arch/x86/include/asm/mmu.h -+++ b/arch/x86/include/asm/mmu.h -@@ -9,7 +9,7 @@ - * we put the segment information here. - */ - typedef struct { -- void *ldt; -+ struct desc_struct *ldt; - int size; - - #ifdef CONFIG_X86_64 -@@ -18,7 +18,19 @@ typedef struct { - #endif - - struct mutex lock; -- void *vdso; -+ unsigned long vdso; -+ -+#ifdef CONFIG_X86_32 -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+ unsigned long user_cs_base; -+ unsigned long user_cs_limit; -+ -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) -+ cpumask_t cpu_user_cs_mask; -+#endif -+ -+#endif -+#endif - } mm_context_t; - - #ifdef CONFIG_SMP -diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h -index 6902152..399f3a2 100644 ---- a/arch/x86/include/asm/mmu_context.h -+++ b/arch/x86/include/asm/mmu_context.h -@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm); - - static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) - { -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ unsigned int i; -+ pgd_t *pgd; -+ -+ pax_open_kernel(); -+ pgd = get_cpu_pgd(smp_processor_id()); -+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i) -+ set_pgd_batched(pgd+i, native_make_pgd(0)); -+ pax_close_kernel(); -+#endif -+ - #ifdef CONFIG_SMP - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) - percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); -@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) - { - unsigned cpu = smp_processor_id(); -+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) -+ int tlbstate = TLBSTATE_OK; -+#endif - - if (likely(prev != next)) { - #ifdef CONFIG_SMP -+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) -+ tlbstate = percpu_read(cpu_tlbstate.state); -+#endif - percpu_write(cpu_tlbstate.state, TLBSTATE_OK); - percpu_write(cpu_tlbstate.active_mm, next); - #endif - cpumask_set_cpu(cpu, mm_cpumask(next)); - - /* Re-load page tables */ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ pax_open_kernel(); -+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS); -+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS); -+ pax_close_kernel(); -+ load_cr3(get_cpu_pgd(cpu)); -+#else - load_cr3(next->pgd); -+#endif - - /* stop flush ipis for the previous mm */ - cpumask_clear_cpu(cpu, mm_cpumask(prev)); -@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - */ - if (unlikely(prev->context.ldt != next->context.ldt)) - load_LDT_nolock(&next->context); -- } -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) -+ if (!(__supported_pte_mask & _PAGE_NX)) { -+ smp_mb__before_clear_bit(); -+ cpu_clear(cpu, prev->context.cpu_user_cs_mask); -+ smp_mb__after_clear_bit(); -+ cpu_set(cpu, next->context.cpu_user_cs_mask); -+ } -+#endif -+ -+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) -+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base || -+ prev->context.user_cs_limit != next->context.user_cs_limit)) -+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); - #ifdef CONFIG_SMP -+ else if (unlikely(tlbstate != TLBSTATE_OK)) -+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); -+#endif -+#endif -+ -+ } - else { -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ pax_open_kernel(); -+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS); -+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS); -+ pax_close_kernel(); -+ load_cr3(get_cpu_pgd(cpu)); -+#endif -+ -+#ifdef CONFIG_SMP - percpu_write(cpu_tlbstate.state, TLBSTATE_OK); - BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); - -@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - * tlb flush IPI delivery. We must reload CR3 - * to make sure to use no freed page tables. - */ -+ -+#ifndef CONFIG_PAX_PER_CPU_PGD - load_cr3(next->pgd); -+#endif -+ - load_LDT_nolock(&next->context); -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) -+ if (!(__supported_pte_mask & _PAGE_NX)) -+ cpu_set(cpu, next->context.cpu_user_cs_mask); -+#endif -+ -+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX))) -+#endif -+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); -+#endif -+ - } -+#endif - } --#endif - } - - #define activate_mm(prev, next) \ -diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h -index 9eae775..c914fea 100644 ---- a/arch/x86/include/asm/module.h -+++ b/arch/x86/include/asm/module.h -@@ -5,6 +5,7 @@ - - #ifdef CONFIG_X86_64 - /* X86_64 does not define MODULE_PROC_FAMILY */ -+#define MODULE_PROC_FAMILY "" - #elif defined CONFIG_M386 - #define MODULE_PROC_FAMILY "386 " - #elif defined CONFIG_M486 -@@ -59,8 +60,20 @@ - #error unknown processor family - #endif - --#ifdef CONFIG_X86_32 --# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY -+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS -+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS " -+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR) -+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR " -+#else -+#define MODULE_PAX_KERNEXEC "" - #endif - -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+#define MODULE_PAX_UDEREF "UDEREF " -+#else -+#define MODULE_PAX_UDEREF "" -+#endif -+ -+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF -+ - #endif /* _ASM_X86_MODULE_H */ -diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h -index 7639dbf..e08a58c 100644 ---- a/arch/x86/include/asm/page_64_types.h -+++ b/arch/x86/include/asm/page_64_types.h -@@ -56,7 +56,7 @@ void copy_page(void *to, void *from); - - /* duplicated to the one in bootmem.h */ - extern unsigned long max_pfn; --extern unsigned long phys_base; -+extern const unsigned long phys_base; - - extern unsigned long __phys_addr(unsigned long); - #define __phys_reloc_hide(x) (x) -diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h -index a7d2db9..edb023e 100644 ---- a/arch/x86/include/asm/paravirt.h -+++ b/arch/x86/include/asm/paravirt.h -@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) - val); - } - -+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd) -+{ -+ pgdval_t val = native_pgd_val(pgd); -+ -+ if (sizeof(pgdval_t) > sizeof(long)) -+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp, -+ val, (u64)val >> 32); -+ else -+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp, -+ val); -+} -+ - static inline void pgd_clear(pgd_t *pgdp) - { - set_pgd(pgdp, __pgd(0)); -@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, - pv_mmu_ops.set_fixmap(idx, phys, flags); - } - -+#ifdef CONFIG_PAX_KERNEXEC -+static inline unsigned long pax_open_kernel(void) -+{ -+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel); -+} -+ -+static inline unsigned long pax_close_kernel(void) -+{ -+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel); -+} -+#else -+static inline unsigned long pax_open_kernel(void) { return 0; } -+static inline unsigned long pax_close_kernel(void) { return 0; } -+#endif -+ - #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) - - static inline int arch_spin_is_locked(struct arch_spinlock *lock) -@@ -964,7 +991,7 @@ extern void default_banner(void); - - #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) - #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) --#define PARA_INDIRECT(addr) *%cs:addr -+#define PARA_INDIRECT(addr) *%ss:addr - #endif - - #define INTERRUPT_RETURN \ -@@ -1041,6 +1068,21 @@ extern void default_banner(void); - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ - CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) -+ -+#define GET_CR0_INTO_RDI \ -+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ -+ mov %rax,%rdi -+ -+#define SET_RDI_INTO_CR0 \ -+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) -+ -+#define GET_CR3_INTO_RDI \ -+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \ -+ mov %rax,%rdi -+ -+#define SET_RDI_INTO_CR3 \ -+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3) -+ - #endif /* CONFIG_X86_32 */ - - #endif /* __ASSEMBLY__ */ -diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h -index 8e8b9a4..f07d725 100644 ---- a/arch/x86/include/asm/paravirt_types.h -+++ b/arch/x86/include/asm/paravirt_types.h -@@ -84,20 +84,20 @@ struct pv_init_ops { - */ - unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, - unsigned long addr, unsigned len); --}; -+} __no_const; - - - struct pv_lazy_ops { - /* Set deferred update mode, used for batching operations. */ - void (*enter)(void); - void (*leave)(void); --}; -+} __no_const; - - struct pv_time_ops { - unsigned long long (*sched_clock)(void); - unsigned long long (*steal_clock)(int cpu); - unsigned long (*get_tsc_khz)(void); --}; -+} __no_const; - - struct pv_cpu_ops { - /* hooks for various privileged instructions */ -@@ -193,7 +193,7 @@ struct pv_cpu_ops { - - void (*start_context_switch)(struct task_struct *prev); - void (*end_context_switch)(struct task_struct *next); --}; -+} __no_const; - - struct pv_irq_ops { - /* -@@ -224,7 +224,7 @@ struct pv_apic_ops { - unsigned long start_eip, - unsigned long start_esp); - #endif --}; -+} __no_const; - - struct pv_mmu_ops { - unsigned long (*read_cr2)(void); -@@ -313,6 +313,7 @@ struct pv_mmu_ops { - struct paravirt_callee_save make_pud; - - void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); -+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval); - #endif /* PAGETABLE_LEVELS == 4 */ - #endif /* PAGETABLE_LEVELS >= 3 */ - -@@ -324,6 +325,12 @@ struct pv_mmu_ops { - an mfn. We can tell which is which from the index. */ - void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, - phys_addr_t phys, pgprot_t flags); -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ unsigned long (*pax_open_kernel)(void); -+ unsigned long (*pax_close_kernel)(void); -+#endif -+ - }; - - struct arch_spinlock; -@@ -334,7 +341,7 @@ struct pv_lock_ops { - void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); - int (*spin_trylock)(struct arch_spinlock *lock); - void (*spin_unlock)(struct arch_spinlock *lock); --}; -+} __no_const; - - /* This contains all the paravirt structures: we get a convenient - * number for each function using the offset which we use to indicate -diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h -index b4389a4..b7ff22c 100644 ---- a/arch/x86/include/asm/pgalloc.h -+++ b/arch/x86/include/asm/pgalloc.h -@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, - pmd_t *pmd, pte_t *pte) - { - paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); -+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); -+} -+ -+static inline void pmd_populate_user(struct mm_struct *mm, -+ pmd_t *pmd, pte_t *pte) -+{ -+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); - set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); - } - -diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h -index 98391db..8f6984e 100644 ---- a/arch/x86/include/asm/pgtable-2level.h -+++ b/arch/x86/include/asm/pgtable-2level.h -@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte) - - static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) - { -+ pax_open_kernel(); - *pmdp = pmd; -+ pax_close_kernel(); - } - - static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) -diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h -index effff47..f9e4035 100644 ---- a/arch/x86/include/asm/pgtable-3level.h -+++ b/arch/x86/include/asm/pgtable-3level.h -@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) - - static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) - { -+ pax_open_kernel(); - set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); -+ pax_close_kernel(); - } - - static inline void native_set_pud(pud_t *pudp, pud_t pud) - { -+ pax_open_kernel(); - set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); -+ pax_close_kernel(); - } - - /* -diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h -index 18601c8..3d716d1 100644 ---- a/arch/x86/include/asm/pgtable.h -+++ b/arch/x86/include/asm/pgtable.h -@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); - - #ifndef __PAGETABLE_PUD_FOLDED - #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) -+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd) - #define pgd_clear(pgd) native_pgd_clear(pgd) - #endif - -@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); - - #define arch_end_context_switch(prev) do {} while(0) - -+#define pax_open_kernel() native_pax_open_kernel() -+#define pax_close_kernel() native_pax_close_kernel() - #endif /* CONFIG_PARAVIRT */ - -+#define __HAVE_ARCH_PAX_OPEN_KERNEL -+#define __HAVE_ARCH_PAX_CLOSE_KERNEL -+ -+#ifdef CONFIG_PAX_KERNEXEC -+static inline unsigned long native_pax_open_kernel(void) -+{ -+ unsigned long cr0; -+ -+ preempt_disable(); -+ barrier(); -+ cr0 = read_cr0() ^ X86_CR0_WP; -+ BUG_ON(unlikely(cr0 & X86_CR0_WP)); -+ write_cr0(cr0); -+ return cr0 ^ X86_CR0_WP; -+} -+ -+static inline unsigned long native_pax_close_kernel(void) -+{ -+ unsigned long cr0; -+ -+ cr0 = read_cr0() ^ X86_CR0_WP; -+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP))); -+ write_cr0(cr0); -+ barrier(); -+ preempt_enable_no_resched(); -+ return cr0 ^ X86_CR0_WP; -+} -+#else -+static inline unsigned long native_pax_open_kernel(void) { return 0; } -+static inline unsigned long native_pax_close_kernel(void) { return 0; } -+#endif -+ - /* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -+static inline int pte_user(pte_t pte) -+{ -+ return pte_val(pte) & _PAGE_USER; -+} -+ - static inline int pte_dirty(pte_t pte) - { - return pte_flags(pte) & _PAGE_DIRTY; -@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte) - return pte_clear_flags(pte, _PAGE_RW); - } - -+static inline pte_t pte_mkread(pte_t pte) -+{ -+ return __pte(pte_val(pte) | _PAGE_USER); -+} -+ - static inline pte_t pte_mkexec(pte_t pte) - { -- return pte_clear_flags(pte, _PAGE_NX); -+#ifdef CONFIG_X86_PAE -+ if (__supported_pte_mask & _PAGE_NX) -+ return pte_clear_flags(pte, _PAGE_NX); -+ else -+#endif -+ return pte_set_flags(pte, _PAGE_USER); -+} -+ -+static inline pte_t pte_exprotect(pte_t pte) -+{ -+#ifdef CONFIG_X86_PAE -+ if (__supported_pte_mask & _PAGE_NX) -+ return pte_set_flags(pte, _PAGE_NX); -+ else -+#endif -+ return pte_clear_flags(pte, _PAGE_USER); - } - - static inline pte_t pte_mkdirty(pte_t pte) -@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr); - #endif - - #ifndef __ASSEMBLY__ -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD]; -+static inline pgd_t *get_cpu_pgd(unsigned int cpu) -+{ -+ return cpu_pgd[cpu]; -+} -+#endif -+ - #include <linux/mm_types.h> - - static inline int pte_none(pte_t pte) -@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) - - static inline int pgd_bad(pgd_t pgd) - { -- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; -+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE; - } - - static inline int pgd_none(pgd_t pgd) -@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd) - * pgd_offset() returns a (pgd_t *) - * pgd_index() is used get the offset into the pgd page's array of pgd_t's; - */ --#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) -+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address)) -+#endif -+ - /* - * a shortcut which implies the use of the kernel's pgd, instead - * of a process's -@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd) - #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) - #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) - -+#ifdef CONFIG_X86_32 -+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY -+#else -+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT -+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT)) -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT) -+#else -+#define PAX_USER_SHADOW_BASE (_AC(0,UL)) -+#endif -+ -+#endif -+ - #ifndef __ASSEMBLY__ - - extern int direct_gbpages; -@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, - * dst and src can be on the same page, but the range must not overlap, - * and must not cross a page boundary. - */ --static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) -+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count) - { -- memcpy(dst, src, count * sizeof(pgd_t)); -+ pax_open_kernel(); -+ while (count--) -+ *dst++ = *src++; -+ pax_close_kernel(); - } - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count); -+#endif -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count); -+#else -+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {} -+#endif - - #include <asm-generic/pgtable.h> - #endif /* __ASSEMBLY__ */ -diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h -index 0c92113..34a77c6 100644 ---- a/arch/x86/include/asm/pgtable_32.h -+++ b/arch/x86/include/asm/pgtable_32.h -@@ -25,9 +25,6 @@ - struct mm_struct; - struct vm_area_struct; - --extern pgd_t swapper_pg_dir[1024]; --extern pgd_t initial_page_table[1024]; -- - static inline void pgtable_cache_init(void) { } - static inline void check_pgt_cache(void) { } - void paging_init(void); -@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); - # include <asm/pgtable-2level.h> - #endif - -+extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; -+extern pgd_t initial_page_table[PTRS_PER_PGD]; -+#ifdef CONFIG_X86_PAE -+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; -+#endif -+ - #if defined(CONFIG_HIGHPTE) - #define pte_offset_map(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ -@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); - /* Clear a kernel PTE and flush it from the TLB */ - #define kpte_clear_flush(ptep, vaddr) \ - do { \ -+ pax_open_kernel(); \ - pte_clear(&init_mm, (vaddr), (ptep)); \ -+ pax_close_kernel(); \ - __flush_tlb_one((vaddr)); \ - } while (0) - -@@ -74,6 +79,9 @@ do { \ - - #endif /* !__ASSEMBLY__ */ - -+#define HAVE_ARCH_UNMAPPED_AREA -+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN -+ - /* - * kern_addr_valid() is (1) for FLATMEM and (0) for - * SPARSEMEM and DISCONTIGMEM -diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h -index ed5903b..c7fe163 100644 ---- a/arch/x86/include/asm/pgtable_32_types.h -+++ b/arch/x86/include/asm/pgtable_32_types.h -@@ -8,7 +8,7 @@ - */ - #ifdef CONFIG_X86_PAE - # include <asm/pgtable-3level_types.h> --# define PMD_SIZE (1UL << PMD_SHIFT) -+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) - # define PMD_MASK (~(PMD_SIZE - 1)) - #else - # include <asm/pgtable-2level_types.h> -@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ - # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) - #endif - -+#ifdef CONFIG_PAX_KERNEXEC -+#ifndef __ASSEMBLY__ -+extern unsigned char MODULES_EXEC_VADDR[]; -+extern unsigned char MODULES_EXEC_END[]; -+#endif -+#include <asm/boot.h> -+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET) -+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET) -+#else -+#define ktla_ktva(addr) (addr) -+#define ktva_ktla(addr) (addr) -+#endif -+ - #define MODULES_VADDR VMALLOC_START - #define MODULES_END VMALLOC_END - #define MODULES_LEN (MODULES_VADDR - MODULES_END) -diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h -index 975f709..107976d 100644 ---- a/arch/x86/include/asm/pgtable_64.h -+++ b/arch/x86/include/asm/pgtable_64.h -@@ -16,10 +16,14 @@ - - extern pud_t level3_kernel_pgt[512]; - extern pud_t level3_ident_pgt[512]; -+extern pud_t level3_vmalloc_start_pgt[512]; -+extern pud_t level3_vmalloc_end_pgt[512]; -+extern pud_t level3_vmemmap_pgt[512]; -+extern pud_t level2_vmemmap_pgt[512]; - extern pmd_t level2_kernel_pgt[512]; - extern pmd_t level2_fixmap_pgt[512]; --extern pmd_t level2_ident_pgt[512]; --extern pgd_t init_level4_pgt[]; -+extern pmd_t level2_ident_pgt[512*2]; -+extern pgd_t init_level4_pgt[512]; - - #define swapper_pg_dir init_level4_pgt - -@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) - - static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) - { -+ pax_open_kernel(); - *pmdp = pmd; -+ pax_close_kernel(); - } - - static inline void native_pmd_clear(pmd_t *pmd) -@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud) - - static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) - { -+ pax_open_kernel(); -+ *pgdp = pgd; -+ pax_close_kernel(); -+} -+ -+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd) -+{ - *pgdp = pgd; - } - -diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h -index 766ea16..5b96cb3 100644 ---- a/arch/x86/include/asm/pgtable_64_types.h -+++ b/arch/x86/include/asm/pgtable_64_types.h -@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t; - #define MODULES_VADDR _AC(0xffffffffa0000000, UL) - #define MODULES_END _AC(0xffffffffff000000, UL) - #define MODULES_LEN (MODULES_END - MODULES_VADDR) -+#define MODULES_EXEC_VADDR MODULES_VADDR -+#define MODULES_EXEC_END MODULES_END -+ -+#define ktla_ktva(addr) (addr) -+#define ktva_ktla(addr) (addr) - - #endif /* _ASM_X86_PGTABLE_64_DEFS_H */ -diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h -index 013286a..8b42f4f 100644 ---- a/arch/x86/include/asm/pgtable_types.h -+++ b/arch/x86/include/asm/pgtable_types.h -@@ -16,13 +16,12 @@ - #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ - #define _PAGE_BIT_PAT 7 /* on 4KB pages */ - #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ --#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ -+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */ - #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ - #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ - #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ --#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 --#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 --#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */ -+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL -+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */ - #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ - - /* If _PAGE_BIT_PRESENT is clear, we use these: */ -@@ -40,7 +39,6 @@ - #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) - #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) - #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) --#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) - #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) - #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) - #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) -@@ -57,8 +55,10 @@ - - #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) - #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) --#else -+#elif defined(CONFIG_KMEMCHECK) - #define _PAGE_NX (_AT(pteval_t, 0)) -+#else -+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) - #endif - - #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) -@@ -96,6 +96,9 @@ - #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_ACCESSED) - -+#define PAGE_READONLY_NOEXEC PAGE_READONLY -+#define PAGE_SHARED_NOEXEC PAGE_SHARED -+ - #define __PAGE_KERNEL_EXEC \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) - #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) -@@ -106,7 +109,7 @@ - #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) - #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) - #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) --#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) -+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER) - #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) - #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT) - #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) -@@ -168,8 +171,8 @@ - * bits are combined, this will alow user to access the high address mapped - * VDSO in the presence of CONFIG_COMPAT_VDSO - */ --#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ --#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ -+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ -+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ - #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ - #endif - -@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd) - { - return native_pgd_val(pgd) & PTE_FLAGS_MASK; - } -+#endif - -+#if PAGETABLE_LEVELS == 3 -+#include <asm-generic/pgtable-nopud.h> -+#endif -+ -+#if PAGETABLE_LEVELS == 2 -+#include <asm-generic/pgtable-nopmd.h> -+#endif -+ -+#ifndef __ASSEMBLY__ - #if PAGETABLE_LEVELS > 3 - typedef struct { pudval_t pud; } pud_t; - -@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud) - return pud.pud; - } - #else --#include <asm-generic/pgtable-nopud.h> -- - static inline pudval_t native_pud_val(pud_t pud) - { - return native_pgd_val(pud.pgd); -@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) - return pmd.pmd; - } - #else --#include <asm-generic/pgtable-nopmd.h> -- - static inline pmdval_t native_pmd_val(pmd_t pmd) - { - return native_pgd_val(pmd.pud.pgd); -@@ -283,7 +292,6 @@ typedef struct page *pgtable_t; - - extern pteval_t __supported_pte_mask; - extern void set_nx(void); --extern int nx_enabled; - - #define pgprot_writecombine pgprot_writecombine - extern pgprot_t pgprot_writecombine(pgprot_t prot); -diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h -index 0d1171c..36571a9 100644 ---- a/arch/x86/include/asm/processor.h -+++ b/arch/x86/include/asm/processor.h -@@ -266,7 +266,7 @@ struct tss_struct { - - } ____cacheline_aligned; - --DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); -+extern struct tss_struct init_tss[NR_CPUS]; - - /* - * Save the original ist values for checking stack pointers during debugging -@@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(const void *x) - */ - #define TASK_SIZE PAGE_OFFSET - #define TASK_SIZE_MAX TASK_SIZE -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2) -+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE) -+#else - #define STACK_TOP TASK_SIZE --#define STACK_TOP_MAX STACK_TOP -+#endif -+ -+#define STACK_TOP_MAX TASK_SIZE - - #define INIT_THREAD { \ -- .sp0 = sizeof(init_stack) + (long)&init_stack, \ -+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ - .vm86_info = NULL, \ - .sysenter_cs = __KERNEL_CS, \ - .io_bitmap_ptr = NULL, \ -@@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(const void *x) - */ - #define INIT_TSS { \ - .x86_tss = { \ -- .sp0 = sizeof(init_stack) + (long)&init_stack, \ -+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ - .ss0 = __KERNEL_DS, \ - .ss1 = __KERNEL_CS, \ - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ -@@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(const void *x) - extern unsigned long thread_saved_pc(struct task_struct *tsk); - - #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) --#define KSTK_TOP(info) \ --({ \ -- unsigned long *__ptr = (unsigned long *)(info); \ -- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ --}) -+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0) - - /* - * The below -8 is to reserve 8 bytes on top of the ring0 stack. -@@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); - #define task_pt_regs(task) \ - ({ \ - struct pt_regs *__regs__; \ -- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ -+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \ - __regs__ - 1; \ - }) - -@@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); - /* - * User space process size. 47bits minus one guard page. - */ --#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) -+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE) - - /* This decides where the kernel will search for a free chunk of vm - * space during mmap's. - */ - #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ -- 0xc0000000 : 0xFFFFe000) -+ 0xc0000000 : 0xFFFFf000) - - #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ - IA32_PAGE_OFFSET : TASK_SIZE_MAX) -@@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); - #define STACK_TOP_MAX TASK_SIZE_MAX - - #define INIT_THREAD { \ -- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ -+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \ - } - - #define INIT_TSS { \ -- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ -+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \ - } - - /* -@@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, - */ - #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) - -+#ifdef CONFIG_PAX_SEGMEXEC -+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3)) -+#endif -+ - #define KSTK_EIP(task) (task_pt_regs(task)->ip) - - /* Get/set a process' ability to use the timestamp counter instruction */ -diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h -index 3566454..4bdfb8c 100644 ---- a/arch/x86/include/asm/ptrace.h -+++ b/arch/x86/include/asm/ptrace.h -@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) - } - - /* -- * user_mode_vm(regs) determines whether a register set came from user mode. -+ * user_mode(regs) determines whether a register set came from user mode. - * This is true if V8086 mode was enabled OR if the register set was from - * protected mode with RPL-3 CS value. This tricky test checks that with - * one comparison. Many places in the kernel can bypass this full check -- * if they have already ruled out V8086 mode, so user_mode(regs) can be used. -+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can -+ * be used. - */ --static inline int user_mode(struct pt_regs *regs) -+static inline int user_mode_novm(struct pt_regs *regs) - { - #ifdef CONFIG_X86_32 - return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; - #else -- return !!(regs->cs & 3); -+ return !!(regs->cs & SEGMENT_RPL_MASK); - #endif - } - --static inline int user_mode_vm(struct pt_regs *regs) -+static inline int user_mode(struct pt_regs *regs) - { - #ifdef CONFIG_X86_32 - return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= - USER_RPL; - #else -- return user_mode(regs); -+ return user_mode_novm(regs); - #endif - } - -@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs) - #ifdef CONFIG_X86_64 - static inline bool user_64bit_mode(struct pt_regs *regs) - { -+ unsigned long cs = regs->cs & 0xffff; - #ifndef CONFIG_PARAVIRT - /* - * On non-paravirt systems, this is the only long mode CPL 3 - * selector. We do not allow long mode selectors in the LDT. - */ -- return regs->cs == __USER_CS; -+ return cs == __USER_CS; - #else - /* Headers are too twisted for this to go in paravirt.h. */ -- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; -+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs; - #endif - } - #endif -diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h -index 3250e3d..20db631 100644 ---- a/arch/x86/include/asm/reboot.h -+++ b/arch/x86/include/asm/reboot.h -@@ -6,19 +6,19 @@ - struct pt_regs; - - struct machine_ops { -- void (*restart)(char *cmd); -- void (*halt)(void); -- void (*power_off)(void); -+ void (* __noreturn restart)(char *cmd); -+ void (* __noreturn halt)(void); -+ void (* __noreturn power_off)(void); - void (*shutdown)(void); - void (*crash_shutdown)(struct pt_regs *); -- void (*emergency_restart)(void); --}; -+ void (* __noreturn emergency_restart)(void); -+} __no_const; - - extern struct machine_ops machine_ops; - - void native_machine_crash_shutdown(struct pt_regs *regs); - void native_machine_shutdown(void); --void machine_real_restart(unsigned int type); -+void machine_real_restart(unsigned int type) __noreturn; - /* These must match dispatch_table in reboot_32.S */ - #define MRR_BIOS 0 - #define MRR_APM 1 -diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h -index df4cd32..27ae072 100644 ---- a/arch/x86/include/asm/rwsem.h -+++ b/arch/x86/include/asm/rwsem.h -@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem) - { - asm volatile("# beginning down_read\n\t" - LOCK_PREFIX _ASM_INC "(%1)\n\t" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX _ASM_DEC "(%1)\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - /* adds 0x00000001 */ - " jns 1f\n" - " call call_rwsem_down_read_failed\n" -@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) - "1:\n\t" - " mov %1,%2\n\t" - " add %3,%2\n\t" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "sub %3,%2\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - " jle 2f\n\t" - LOCK_PREFIX " cmpxchg %2,%0\n\t" - " jnz 1b\n\t" -@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) - long tmp; - asm volatile("# beginning down_write\n\t" - LOCK_PREFIX " xadd %1,(%2)\n\t" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "mov %1,(%2)\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - /* adds 0xffff0001, returns the old value */ - " test %1,%1\n\t" - /* was the count 0 before? */ -@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem) - long tmp; - asm volatile("# beginning __up_read\n\t" - LOCK_PREFIX " xadd %1,(%2)\n\t" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "mov %1,(%2)\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - /* subtracts 1, returns the old value */ - " jns 1f\n\t" - " call call_rwsem_wake\n" /* expects old value in %edx */ -@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem) - long tmp; - asm volatile("# beginning __up_write\n\t" - LOCK_PREFIX " xadd %1,(%2)\n\t" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "mov %1,(%2)\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - /* subtracts 0xffff0001, returns the old value */ - " jns 1f\n\t" - " call call_rwsem_wake\n" /* expects old value in %edx */ -@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem) - { - asm volatile("# beginning __downgrade_write\n\t" - LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - /* - * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) - * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) -@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem) - */ - static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) - { -- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" -+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX _ASM_SUB "%1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - : "+m" (sem->count) - : "er" (delta)); - } -@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) - { - long tmp = delta; - -- asm volatile(LOCK_PREFIX "xadd %0,%1" -+ asm volatile(LOCK_PREFIX "xadd %0,%1\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "mov %0,%1\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - : "+r" (tmp), "+m" (sem->count) - : : "memory"); - -diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h -index 5e64171..f58957e 100644 ---- a/arch/x86/include/asm/segment.h -+++ b/arch/x86/include/asm/segment.h -@@ -64,10 +64,15 @@ - * 26 - ESPFIX small SS - * 27 - per-cpu [ offset to per-cpu data area ] - * 28 - stack_canary-20 [ for stack protector ] -- * 29 - unused -- * 30 - unused -+ * 29 - PCI BIOS CS -+ * 30 - PCI BIOS DS - * 31 - TSS for double fault handler - */ -+#define GDT_ENTRY_KERNEXEC_EFI_CS (1) -+#define GDT_ENTRY_KERNEXEC_EFI_DS (2) -+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8) -+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8) -+ - #define GDT_ENTRY_TLS_MIN 6 - #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) - -@@ -79,6 +84,8 @@ - - #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0) - -+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4) -+ - #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1) - - #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4) -@@ -104,6 +111,12 @@ - #define __KERNEL_STACK_CANARY 0 - #endif - -+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17) -+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8) -+ -+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18) -+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8) -+ - #define GDT_ENTRY_DOUBLEFAULT_TSS 31 - - /* -@@ -141,7 +154,7 @@ - */ - - /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ --#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) -+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16) - - - #else -@@ -165,6 +178,8 @@ - #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3) - #define __USER32_DS __USER_DS - -+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7 -+ - #define GDT_ENTRY_TSS 8 /* needs two entries */ - #define GDT_ENTRY_LDT 10 /* needs two entries */ - #define GDT_ENTRY_TLS_MIN 12 -@@ -185,6 +200,7 @@ - #endif - - #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) -+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8) - #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) - #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) - #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) -diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h -index 73b11bc..d4a3b63 100644 ---- a/arch/x86/include/asm/smp.h -+++ b/arch/x86/include/asm/smp.h -@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); - /* cpus sharing the last level cache: */ - DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); - DECLARE_PER_CPU(u16, cpu_llc_id); --DECLARE_PER_CPU(int, cpu_number); -+DECLARE_PER_CPU(unsigned int, cpu_number); - - static inline struct cpumask *cpu_sibling_mask(int cpu) - { -@@ -77,7 +77,7 @@ struct smp_ops { - - void (*send_call_func_ipi)(const struct cpumask *mask); - void (*send_call_func_single_ipi)(int cpu); --}; -+} __no_const; - - /* Globals due to paravirt */ - extern void set_cpu_sibling_map(int cpu); -@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata; - extern int safe_smp_processor_id(void); - - #elif defined(CONFIG_X86_64_SMP) --#define raw_smp_processor_id() (percpu_read(cpu_number)) -- --#define stack_smp_processor_id() \ --({ \ -- struct thread_info *ti; \ -- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ -- ti->cpu; \ --}) -+#define raw_smp_processor_id() (percpu_read(cpu_number)) -+#define stack_smp_processor_id() raw_smp_processor_id() - #define safe_smp_processor_id() smp_processor_id() - - #endif -diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h -index ee67edf..49c796b 100644 ---- a/arch/x86/include/asm/spinlock.h -+++ b/arch/x86/include/asm/spinlock.h -@@ -248,6 +248,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock) - static inline void arch_read_lock(arch_rwlock_t *rw) - { - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - "jns 1f\n" - "call __read_lock_failed\n\t" - "1:\n" -@@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) - static inline void arch_write_lock(arch_rwlock_t *rw) - { - asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - "jz 1f\n" - "call __write_lock_failed\n\t" - "1:\n" -@@ -286,13 +302,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) - - static inline void arch_read_unlock(arch_rwlock_t *rw) - { -- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" -+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - :"+m" (rw->lock) : : "memory"); - } - - static inline void arch_write_unlock(arch_rwlock_t *rw) - { -- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" -+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ - : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); - } - -diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h -index 1575177..cb23f52 100644 ---- a/arch/x86/include/asm/stackprotector.h -+++ b/arch/x86/include/asm/stackprotector.h -@@ -48,7 +48,7 @@ - * head_32 for boot CPU and setup_per_cpu_areas() for others. - */ - #define GDT_STACK_CANARY_INIT \ -- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18), -+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17), - - /* - * Initialize the stackprotector canary value. -@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu) - - static inline void load_stack_canary_segment(void) - { --#ifdef CONFIG_X86_32 -+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF) - asm volatile ("mov %0, %%gs" : : "r" (0)); - #endif - } -diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h -index 70bbe39..4ae2bd4 100644 ---- a/arch/x86/include/asm/stacktrace.h -+++ b/arch/x86/include/asm/stacktrace.h -@@ -11,28 +11,20 @@ - - extern int kstack_depth_to_print; - --struct thread_info; -+struct task_struct; - struct stacktrace_ops; - --typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, -- unsigned long *stack, -- unsigned long bp, -- const struct stacktrace_ops *ops, -- void *data, -- unsigned long *end, -- int *graph); -+typedef unsigned long walk_stack_t(struct task_struct *task, -+ void *stack_start, -+ unsigned long *stack, -+ unsigned long bp, -+ const struct stacktrace_ops *ops, -+ void *data, -+ unsigned long *end, -+ int *graph); - --extern unsigned long --print_context_stack(struct thread_info *tinfo, -- unsigned long *stack, unsigned long bp, -- const struct stacktrace_ops *ops, void *data, -- unsigned long *end, int *graph); -- --extern unsigned long --print_context_stack_bp(struct thread_info *tinfo, -- unsigned long *stack, unsigned long bp, -- const struct stacktrace_ops *ops, void *data, -- unsigned long *end, int *graph); -+extern walk_stack_t print_context_stack; -+extern walk_stack_t print_context_stack_bp; - - /* Generic stack tracer with callbacks */ - -@@ -40,7 +32,7 @@ struct stacktrace_ops { - void (*address)(void *data, unsigned long address, int reliable); - /* On negative return stop dumping */ - int (*stack)(void *data, char *name); -- walk_stack_t walk_stack; -+ walk_stack_t *walk_stack; - }; - - void dump_trace(struct task_struct *tsk, struct pt_regs *regs, -diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h -index cb23852..2dde194 100644 ---- a/arch/x86/include/asm/sys_ia32.h -+++ b/arch/x86/include/asm/sys_ia32.h -@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *, - compat_sigset_t __user *, unsigned int); - asmlinkage long sys32_alarm(unsigned int); - --asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); -+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); - asmlinkage long sys32_sysfs(int, u32, u32); - - asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, -diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h -index c2ff2a1..4349184 100644 ---- a/arch/x86/include/asm/system.h -+++ b/arch/x86/include/asm/system.h -@@ -129,7 +129,7 @@ do { \ - "call __switch_to\n\t" \ - "movq "__percpu_arg([current_task])",%%rsi\n\t" \ - __switch_canary \ -- "movq %P[thread_info](%%rsi),%%r8\n\t" \ -+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \ - "movq %%rax,%%rdi\n\t" \ - "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ - "jnz ret_from_fork\n\t" \ -@@ -140,7 +140,7 @@ do { \ - [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ - [ti_flags] "i" (offsetof(struct thread_info, flags)), \ - [_tif_fork] "i" (_TIF_FORK), \ -- [thread_info] "i" (offsetof(struct task_struct, stack)), \ -+ [thread_info] "m" (current_tinfo), \ - [current_task] "m" (current_task) \ - __switch_canary_iparam \ - : "memory", "cc" __EXTRA_CLOBBER) -@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment) - { - unsigned long __limit; - asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); -- return __limit + 1; -+ return __limit; - } - - static inline void native_clts(void) -@@ -397,12 +397,12 @@ void enable_hlt(void); - - void cpu_idle_wait(void); - --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) ((x) & ~0xfUL) - extern void free_init_pages(char *what, unsigned long begin, unsigned long end); - - void default_idle(void); - --void stop_this_cpu(void *dummy); -+void stop_this_cpu(void *dummy) __noreturn; - - /* - * Force strict CPU ordering. -diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h -index a1fe5c1..ee326d8 100644 ---- a/arch/x86/include/asm/thread_info.h -+++ b/arch/x86/include/asm/thread_info.h -@@ -10,6 +10,7 @@ - #include <linux/compiler.h> - #include <asm/page.h> - #include <asm/types.h> -+#include <asm/percpu.h> - - /* - * low level task data that entry.S needs immediate access to -@@ -24,7 +25,6 @@ struct exec_domain; - #include <linux/atomic.h> - - struct thread_info { -- struct task_struct *task; /* main task structure */ - struct exec_domain *exec_domain; /* execution domain */ - __u32 flags; /* low level flags */ - __u32 status; /* thread synchronous flags */ -@@ -34,18 +34,12 @@ struct thread_info { - mm_segment_t addr_limit; - struct restart_block restart_block; - void __user *sysenter_return; --#ifdef CONFIG_X86_32 -- unsigned long previous_esp; /* ESP of the previous stack in -- case of nested (IRQ) stacks -- */ -- __u8 supervisor_stack[0]; --#endif -+ unsigned long lowest_stack; - int uaccess_err; - }; - --#define INIT_THREAD_INFO(tsk) \ -+#define INIT_THREAD_INFO \ - { \ -- .task = &tsk, \ - .exec_domain = &default_exec_domain, \ - .flags = 0, \ - .cpu = 0, \ -@@ -56,7 +50,7 @@ struct thread_info { - }, \ - } - --#define init_thread_info (init_thread_union.thread_info) -+#define init_thread_info (init_thread_union.stack) - #define init_stack (init_thread_union.stack) - - #else /* !__ASSEMBLY__ */ -@@ -170,45 +164,40 @@ struct thread_info { - ret; \ - }) - --#ifdef CONFIG_X86_32 -- --#define STACK_WARN (THREAD_SIZE/8) --/* -- * macros/functions for gaining access to the thread information structure -- * -- * preempt_count needs to be 1 initially, until the scheduler is functional. -- */ --#ifndef __ASSEMBLY__ -- -- --/* how to get the current stack pointer from C */ --register unsigned long current_stack_pointer asm("esp") __used; -- --/* how to get the thread information struct from C */ --static inline struct thread_info *current_thread_info(void) --{ -- return (struct thread_info *) -- (current_stack_pointer & ~(THREAD_SIZE - 1)); --} -- --#else /* !__ASSEMBLY__ */ -- -+#ifdef __ASSEMBLY__ - /* how to get the thread information struct from ASM */ - #define GET_THREAD_INFO(reg) \ -- movl $-THREAD_SIZE, reg; \ -- andl %esp, reg -+ mov PER_CPU_VAR(current_tinfo), reg - - /* use this one if reg already contains %esp */ --#define GET_THREAD_INFO_WITH_ESP(reg) \ -- andl $-THREAD_SIZE, reg -+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg) -+#else -+/* how to get the thread information struct from C */ -+DECLARE_PER_CPU(struct thread_info *, current_tinfo); -+ -+static __always_inline struct thread_info *current_thread_info(void) -+{ -+ return percpu_read_stable(current_tinfo); -+} -+#endif -+ -+#ifdef CONFIG_X86_32 -+ -+#define STACK_WARN (THREAD_SIZE/8) -+/* -+ * macros/functions for gaining access to the thread information structure -+ * -+ * preempt_count needs to be 1 initially, until the scheduler is functional. -+ */ -+#ifndef __ASSEMBLY__ -+ -+/* how to get the current stack pointer from C */ -+register unsigned long current_stack_pointer asm("esp") __used; - - #endif - - #else /* X86_32 */ - --#include <asm/percpu.h> --#define KERNEL_STACK_OFFSET (5*8) -- - /* - * macros/functions for gaining access to the thread information structure - * preempt_count needs to be 1 initially, until the scheduler is functional. -@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void) - #ifndef __ASSEMBLY__ - DECLARE_PER_CPU(unsigned long, kernel_stack); - --static inline struct thread_info *current_thread_info(void) --{ -- struct thread_info *ti; -- ti = (void *)(percpu_read_stable(kernel_stack) + -- KERNEL_STACK_OFFSET - THREAD_SIZE); -- return ti; --} -- --#else /* !__ASSEMBLY__ */ -- --/* how to get the thread information struct from ASM */ --#define GET_THREAD_INFO(reg) \ -- movq PER_CPU_VAR(kernel_stack),reg ; \ -- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg -- -+/* how to get the current stack pointer from C */ -+register unsigned long current_stack_pointer asm("rsp") __used; - #endif - - #endif /* !X86_32 */ -@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void); - extern void free_thread_info(struct thread_info *ti); - extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); - #define arch_task_cache_init arch_task_cache_init -+ -+#define __HAVE_THREAD_FUNCTIONS -+#define task_thread_info(task) (&(task)->tinfo) -+#define task_stack_page(task) ((task)->stack) -+#define setup_thread_stack(p, org) do {} while (0) -+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1) -+ -+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR -+extern struct task_struct *alloc_task_struct_node(int node); -+extern void free_task_struct(struct task_struct *); -+ - #endif - #endif /* _ASM_X86_THREAD_INFO_H */ -diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h -index 36361bf..324f262 100644 ---- a/arch/x86/include/asm/uaccess.h -+++ b/arch/x86/include/asm/uaccess.h -@@ -7,12 +7,15 @@ - #include <linux/compiler.h> - #include <linux/thread_info.h> - #include <linux/string.h> -+#include <linux/sched.h> - #include <asm/asm.h> - #include <asm/page.h> - - #define VERIFY_READ 0 - #define VERIFY_WRITE 1 - -+extern void check_object_size(const void *ptr, unsigned long n, bool to); -+ - /* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with -@@ -28,7 +31,12 @@ - - #define get_ds() (KERNEL_DS) - #define get_fs() (current_thread_info()->addr_limit) -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) -+void __set_fs(mm_segment_t x); -+void set_fs(mm_segment_t x); -+#else - #define set_fs(x) (current_thread_info()->addr_limit = (x)) -+#endif - - #define segment_eq(a, b) ((a).seg == (b).seg) - -@@ -76,7 +84,33 @@ - * checks that the pointer is in the user space range - after calling - * this function, memory access functions may still return -EFAULT. - */ --#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) -+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) -+#define access_ok(type, addr, size) \ -+({ \ -+ long __size = size; \ -+ unsigned long __addr = (unsigned long)addr; \ -+ unsigned long __addr_ao = __addr & PAGE_MASK; \ -+ unsigned long __end_ao = __addr + __size - 1; \ -+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \ -+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \ -+ while(__addr_ao <= __end_ao) { \ -+ char __c_ao; \ -+ __addr_ao += PAGE_SIZE; \ -+ if (__size > PAGE_SIZE) \ -+ cond_resched(); \ -+ if (__get_user(__c_ao, (char __user *)__addr)) \ -+ break; \ -+ if (type != VERIFY_WRITE) { \ -+ __addr = __addr_ao; \ -+ continue; \ -+ } \ -+ if (__put_user(__c_ao, (char __user *)__addr)) \ -+ break; \ -+ __addr = __addr_ao; \ -+ } \ -+ } \ -+ __ret_ao; \ -+}) - - /* - * The exception table consists of pairs of addresses: the first is the -@@ -182,12 +216,20 @@ extern int __get_user_bad(void); - asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ - : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") - -- -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) -+#define __copyuser_seg "gs;" -+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n" -+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n" -+#else -+#define __copyuser_seg -+#define __COPYUSER_SET_ES -+#define __COPYUSER_RESTORE_ES -+#endif - - #ifdef CONFIG_X86_32 - #define __put_user_asm_u64(x, addr, err, errret) \ -- asm volatile("1: movl %%eax,0(%2)\n" \ -- "2: movl %%edx,4(%2)\n" \ -+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \ -+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \ - "3:\n" \ - ".section .fixup,"ax"\n" \ - "4: movl %3,%0\n" \ -@@ -199,8 +241,8 @@ extern int __get_user_bad(void); - : "A" (x), "r" (addr), "i" (errret), "0" (err)) - - #define __put_user_asm_ex_u64(x, addr) \ -- asm volatile("1: movl %%eax,0(%1)\n" \ -- "2: movl %%edx,4(%1)\n" \ -+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \ -+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \ - "3:\n" \ - _ASM_EXTABLE(1b, 2b - 1b) \ - _ASM_EXTABLE(2b, 3b - 2b) \ -@@ -252,7 +294,7 @@ extern void __put_user_8(void); - __typeof__(*(ptr)) __pu_val; \ - __chk_user_ptr(ptr); \ - might_fault(); \ -- __pu_val = x; \ -+ __pu_val = (x); \ - switch (sizeof(*(ptr))) { \ - case 1: \ - __put_user_x(1, __pu_val, ptr, __ret_pu); \ -@@ -373,7 +415,7 @@ do { \ - } while (0) - - #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ -- asm volatile("1: mov"itype" %2,%"rtype"1\n" \ -+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\ - "2:\n" \ - ".section .fixup,"ax"\n" \ - "3: mov %3,%0\n" \ -@@ -381,7 +423,7 @@ do { \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ -- : "=r" (err), ltype(x) \ -+ : "=r" (err), ltype (x) \ - : "m" (__m(addr)), "i" (errret), "0" (err)) - - #define __get_user_size_ex(x, ptr, size) \ -@@ -406,7 +448,7 @@ do { \ - } while (0) - - #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ -- asm volatile("1: mov"itype" %1,%"rtype"0\n" \ -+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\ - "2:\n" \ - _ASM_EXTABLE(1b, 2b - 1b) \ - : ltype(x) : "m" (__m(addr))) -@@ -423,13 +465,24 @@ do { \ - int __gu_err; \ - unsigned long __gu_val; \ - __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ -- (x) = (__force __typeof__(*(ptr)))__gu_val; \ -+ (x) = (__typeof__(*(ptr)))__gu_val; \ - __gu_err; \ - }) - - /* FIXME: this hack is definitely wrong -AK */ - struct __large_struct { unsigned long buf[100]; }; --#define __m(x) (*(struct __large_struct __user *)(x)) -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+#define ____m(x) \ -+({ \ -+ unsigned long ____x = (unsigned long)(x); \ -+ if (____x < PAX_USER_SHADOW_BASE) \ -+ ____x += PAX_USER_SHADOW_BASE; \ -+ (void __user *)____x; \ -+}) -+#else -+#define ____m(x) (x) -+#endif -+#define __m(x) (*(struct __large_struct __user *)____m(x)) - - /* - * Tell gcc we read from memory instead of writing: this is because -@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; }; - * aliasing issues. - */ - #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ -- asm volatile("1: mov"itype" %"rtype"1,%2\n" \ -+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\ - "2:\n" \ - ".section .fixup,"ax"\n" \ - "3: mov %3,%0\n" \ -@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; }; - ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ - : "=r"(err) \ -- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) -+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err)) - - #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ -- asm volatile("1: mov"itype" %"rtype"0,%1\n" \ -+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\ - "2:\n" \ - _ASM_EXTABLE(1b, 2b - 1b) \ - : : ltype(x), "m" (__m(addr))) -@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; }; - * On error, the variable @x is set to zero. - */ - -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+#define __get_user(x, ptr) get_user((x), (ptr)) -+#else - #define __get_user(x, ptr) \ - __get_user_nocheck((x), (ptr), sizeof(*(ptr))) -+#endif - - /** - * __put_user: - Write a simple value into user space, with less checking. -@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; }; - * Returns zero on success, or -EFAULT on error. - */ - -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+#define __put_user(x, ptr) put_user((x), (ptr)) -+#else - #define __put_user(x, ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -+#endif - - #define __get_user_unaligned __get_user - #define __put_user_unaligned __put_user -@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; }; - #define get_user_ex(x, ptr) do { \ - unsigned long __gue_val; \ - __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ -- (x) = (__force __typeof__(*(ptr)))__gue_val; \ -+ (x) = (__typeof__(*(ptr)))__gue_val; \ - } while (0) - - #ifdef CONFIG_X86_WP_WORKS_OK -diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h -index 566e803..89f1e60 100644 ---- a/arch/x86/include/asm/uaccess_32.h -+++ b/arch/x86/include/asm/uaccess_32.h -@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero - static __always_inline unsigned long __must_check - __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) - { -+ pax_track_stack(); -+ -+ if ((long)n < 0) -+ return n; -+ - if (__builtin_constant_p(n)) { - unsigned long ret; - -@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) - return ret; - } - } -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); - return __copy_to_user_ll(to, from, n); - } - -@@ -82,12 +89,16 @@ static __always_inline unsigned long __must_check - __copy_to_user(void __user *to, const void *from, unsigned long n) - { - might_fault(); -+ - return __copy_to_user_inatomic(to, from, n); - } - - static __always_inline unsigned long - __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - /* Avoid zeroing the tail if the copy fails.. - * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, - * but as the zeroing behaviour is only significant when n is not -@@ -137,6 +148,12 @@ static __always_inline unsigned long - __copy_from_user(void *to, const void __user *from, unsigned long n) - { - might_fault(); -+ -+ pax_track_stack(); -+ -+ if ((long)n < 0) -+ return n; -+ - if (__builtin_constant_p(n)) { - unsigned long ret; - -@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) - return ret; - } - } -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); - return __copy_from_user_ll(to, from, n); - } - -@@ -159,6 +178,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, - const void __user *from, unsigned long n) - { - might_fault(); -+ -+ if ((long)n < 0) -+ return n; -+ - if (__builtin_constant_p(n)) { - unsigned long ret; - -@@ -181,15 +204,19 @@ static __always_inline unsigned long - __copy_from_user_inatomic_nocache(void *to, const void __user *from, - unsigned long n) - { -- return __copy_from_user_ll_nocache_nozero(to, from, n); -+ if ((long)n < 0) -+ return n; -+ -+ return __copy_from_user_ll_nocache_nozero(to, from, n); - } - --unsigned long __must_check copy_to_user(void __user *to, -- const void *from, unsigned long n); --unsigned long __must_check _copy_from_user(void *to, -- const void __user *from, -- unsigned long n); -- -+extern void copy_to_user_overflow(void) -+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS -+ __compiletime_error("copy_to_user() buffer size is not provably correct") -+#else -+ __compiletime_warning("copy_to_user() buffer size is not provably correct") -+#endif -+; - - extern void copy_from_user_overflow(void) - #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS -@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void) - #endif - ; - --static inline unsigned long __must_check copy_from_user(void *to, -- const void __user *from, -- unsigned long n) -+/** -+ * copy_to_user: - Copy a block of data into user space. -+ * @to: Destination address, in user space. -+ * @from: Source address, in kernel space. -+ * @n: Number of bytes to copy. -+ * -+ * Context: User context only. This function may sleep. -+ * -+ * Copy data from kernel space to user space. -+ * -+ * Returns number of bytes that could not be copied. -+ * On success, this will be zero. -+ */ -+static inline unsigned long __must_check -+copy_to_user(void __user *to, const void *from, unsigned long n) -+{ -+ int sz = __compiletime_object_size(from); -+ -+ if (unlikely(sz != -1 && sz < n)) -+ copy_to_user_overflow(); -+ else if (access_ok(VERIFY_WRITE, to, n)) -+ n = __copy_to_user(to, from, n); -+ return n; -+} -+ -+/** -+ * copy_from_user: - Copy a block of data from user space. -+ * @to: Destination address, in kernel space. -+ * @from: Source address, in user space. -+ * @n: Number of bytes to copy. -+ * -+ * Context: User context only. This function may sleep. -+ * -+ * Copy data from user space to kernel space. -+ * -+ * Returns number of bytes that could not be copied. -+ * On success, this will be zero. -+ * -+ * If some data could not be copied, this function will pad the copied -+ * data to the requested size using zero bytes. -+ */ -+static inline unsigned long __must_check -+copy_from_user(void *to, const void __user *from, unsigned long n) - { - int sz = __compiletime_object_size(to); - -- if (likely(sz == -1 || sz >= n)) -- n = _copy_from_user(to, from, n); -- else -+ if (unlikely(sz != -1 && sz < n)) - copy_from_user_overflow(); -- -+ else if (access_ok(VERIFY_READ, from, n)) -+ n = __copy_from_user(to, from, n); -+ else if ((long)n > 0) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); -+ memset(to, 0, n); -+ } - return n; - } - -diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h -index 1c66d30..59bd7d4 100644 ---- a/arch/x86/include/asm/uaccess_64.h -+++ b/arch/x86/include/asm/uaccess_64.h -@@ -10,6 +10,9 @@ - #include <asm/alternative.h> - #include <asm/cpufeature.h> - #include <asm/page.h> -+#include <asm/pgtable.h> -+ -+#define set_fs(x) (current_thread_info()->addr_limit = (x)) - - /* - * Copy To/From Userspace -@@ -17,12 +20,12 @@ - - /* Handles exceptions in both to and from, but doesn't do access_ok */ - __must_check unsigned long --copy_user_generic_string(void *to, const void *from, unsigned len); -+copy_user_generic_string(void *to, const void *from, unsigned long len); - __must_check unsigned long --copy_user_generic_unrolled(void *to, const void *from, unsigned len); -+copy_user_generic_unrolled(void *to, const void *from, unsigned long len); - - static __always_inline __must_check unsigned long --copy_user_generic(void *to, const void *from, unsigned len) -+copy_user_generic(void *to, const void *from, unsigned long len) - { - unsigned ret; - -@@ -36,138 +39,226 @@ copy_user_generic(void *to, const void *from, unsigned len) - return ret; - } - -+static __always_inline __must_check unsigned long -+__copy_to_user(void __user *to, const void *from, unsigned long len); -+static __always_inline __must_check unsigned long -+__copy_from_user(void *to, const void __user *from, unsigned long len); - __must_check unsigned long --_copy_to_user(void __user *to, const void *from, unsigned len); --__must_check unsigned long --_copy_from_user(void *to, const void __user *from, unsigned len); --__must_check unsigned long --copy_in_user(void __user *to, const void __user *from, unsigned len); -+copy_in_user(void __user *to, const void __user *from, unsigned long len); - - static inline unsigned long __must_check copy_from_user(void *to, - const void __user *from, -- unsigned long n) -+ unsigned n) - { -- int sz = __compiletime_object_size(to); -- - might_fault(); -- if (likely(sz == -1 || sz >= n)) -- n = _copy_from_user(to, from, n); --#ifdef CONFIG_DEBUG_VM -- else -- WARN(1, "Buffer overflow detected!\n"); --#endif -+ -+ if (access_ok(VERIFY_READ, from, n)) -+ n = __copy_from_user(to, from, n); -+ else if (n < INT_MAX) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); -+ memset(to, 0, n); -+ } - return n; - } - - static __always_inline __must_check --int copy_to_user(void __user *dst, const void *src, unsigned size) -+int copy_to_user(void __user *dst, const void *src, unsigned long size) - { - might_fault(); - -- return _copy_to_user(dst, src, size); -+ if (access_ok(VERIFY_WRITE, dst, size)) -+ size = __copy_to_user(dst, src, size); -+ return size; - } - - static __always_inline __must_check --int __copy_from_user(void *dst, const void __user *src, unsigned size) -+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) - { -- int ret = 0; -+ int sz = __compiletime_object_size(dst); -+ unsigned ret = 0; - - might_fault(); -- if (!__builtin_constant_p(size)) -- return copy_user_generic(dst, (__force void *)src, size); -+ -+ pax_track_stack(); -+ -+ if (size > INT_MAX) -+ return size; -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (!__access_ok(VERIFY_READ, src, size)) -+ return size; -+#endif -+ -+ if (unlikely(sz != -1 && sz < size)) { -+#ifdef CONFIG_DEBUG_VM -+ WARN(1, "Buffer overflow detected!\n"); -+#endif -+ return size; -+ } -+ -+ if (!__builtin_constant_p(size)) { -+ check_object_size(dst, size, false); -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)src < PAX_USER_SHADOW_BASE) -+ src += PAX_USER_SHADOW_BASE; -+#endif -+ -+ return copy_user_generic(dst, (__force_kernel const void *)src, size); -+ } - switch (size) { -- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, -+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src, - ret, "b", "b", "=q", 1); - return ret; -- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, -+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src, - ret, "w", "w", "=r", 2); - return ret; -- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, -+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src, - ret, "l", "k", "=r", 4); - return ret; -- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, -+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src, - ret, "q", "", "=r", 8); - return ret; - case 10: -- __get_user_asm(*(u64 *)dst, (u64 __user *)src, -+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src, - ret, "q", "", "=r", 10); - if (unlikely(ret)) - return ret; - __get_user_asm(*(u16 *)(8 + (char *)dst), -- (u16 __user *)(8 + (char __user *)src), -+ (const u16 __user *)(8 + (const char __user *)src), - ret, "w", "w", "=r", 2); - return ret; - case 16: -- __get_user_asm(*(u64 *)dst, (u64 __user *)src, -+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src, - ret, "q", "", "=r", 16); - if (unlikely(ret)) - return ret; - __get_user_asm(*(u64 *)(8 + (char *)dst), -- (u64 __user *)(8 + (char __user *)src), -+ (const u64 __user *)(8 + (const char __user *)src), - ret, "q", "", "=r", 8); - return ret; - default: -- return copy_user_generic(dst, (__force void *)src, size); -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)src < PAX_USER_SHADOW_BASE) -+ src += PAX_USER_SHADOW_BASE; -+#endif -+ -+ return copy_user_generic(dst, (__force_kernel const void *)src, size); - } - } - - static __always_inline __must_check --int __copy_to_user(void __user *dst, const void *src, unsigned size) -+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) - { -- int ret = 0; -+ int sz = __compiletime_object_size(src); -+ unsigned ret = 0; - - might_fault(); -- if (!__builtin_constant_p(size)) -- return copy_user_generic((__force void *)dst, src, size); -+ -+ pax_track_stack(); -+ -+ if (size > INT_MAX) -+ return size; -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (!__access_ok(VERIFY_WRITE, dst, size)) -+ return size; -+#endif -+ -+ if (unlikely(sz != -1 && sz < size)) { -+#ifdef CONFIG_DEBUG_VM -+ WARN(1, "Buffer overflow detected!\n"); -+#endif -+ return size; -+ } -+ -+ if (!__builtin_constant_p(size)) { -+ check_object_size(src, size, true); -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) -+ dst += PAX_USER_SHADOW_BASE; -+#endif -+ -+ return copy_user_generic((__force_kernel void *)dst, src, size); -+ } - switch (size) { -- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, -+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst, - ret, "b", "b", "iq", 1); - return ret; -- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, -+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst, - ret, "w", "w", "ir", 2); - return ret; -- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, -+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst, - ret, "l", "k", "ir", 4); - return ret; -- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, -+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst, - ret, "q", "", "er", 8); - return ret; - case 10: -- __put_user_asm(*(u64 *)src, (u64 __user *)dst, -+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst, - ret, "q", "", "er", 10); - if (unlikely(ret)) - return ret; - asm("":::"memory"); -- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, -+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst, - ret, "w", "w", "ir", 2); - return ret; - case 16: -- __put_user_asm(*(u64 *)src, (u64 __user *)dst, -+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst, - ret, "q", "", "er", 16); - if (unlikely(ret)) - return ret; - asm("":::"memory"); -- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, -+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst, - ret, "q", "", "er", 8); - return ret; - default: -- return copy_user_generic((__force void *)dst, src, size); -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) -+ dst += PAX_USER_SHADOW_BASE; -+#endif -+ -+ return copy_user_generic((__force_kernel void *)dst, src, size); - } - } - - static __always_inline __must_check --int __copy_in_user(void __user *dst, const void __user *src, unsigned size) -+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) - { -- int ret = 0; -+ unsigned ret = 0; - - might_fault(); -- if (!__builtin_constant_p(size)) -- return copy_user_generic((__force void *)dst, -- (__force void *)src, size); -+ -+ if (size > INT_MAX) -+ return size; -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (!__access_ok(VERIFY_READ, src, size)) -+ return size; -+ if (!__access_ok(VERIFY_WRITE, dst, size)) -+ return size; -+#endif -+ -+ if (!__builtin_constant_p(size)) { -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)src < PAX_USER_SHADOW_BASE) -+ src += PAX_USER_SHADOW_BASE; -+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) -+ dst += PAX_USER_SHADOW_BASE; -+#endif -+ -+ return copy_user_generic((__force_kernel void *)dst, -+ (__force_kernel const void *)src, size); -+ } - switch (size) { - case 1: { - u8 tmp; -- __get_user_asm(tmp, (u8 __user *)src, -+ __get_user_asm(tmp, (const u8 __user *)src, - ret, "b", "b", "=q", 1); - if (likely(!ret)) - __put_user_asm(tmp, (u8 __user *)dst, -@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) - } - case 2: { - u16 tmp; -- __get_user_asm(tmp, (u16 __user *)src, -+ __get_user_asm(tmp, (const u16 __user *)src, - ret, "w", "w", "=r", 2); - if (likely(!ret)) - __put_user_asm(tmp, (u16 __user *)dst, -@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) - - case 4: { - u32 tmp; -- __get_user_asm(tmp, (u32 __user *)src, -+ __get_user_asm(tmp, (const u32 __user *)src, - ret, "l", "k", "=r", 4); - if (likely(!ret)) - __put_user_asm(tmp, (u32 __user *)dst, -@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) - } - case 8: { - u64 tmp; -- __get_user_asm(tmp, (u64 __user *)src, -+ __get_user_asm(tmp, (const u64 __user *)src, - ret, "q", "", "=r", 8); - if (likely(!ret)) - __put_user_asm(tmp, (u64 __user *)dst, -@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) - return ret; - } - default: -- return copy_user_generic((__force void *)dst, -- (__force void *)src, size); -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)src < PAX_USER_SHADOW_BASE) -+ src += PAX_USER_SHADOW_BASE; -+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) -+ dst += PAX_USER_SHADOW_BASE; -+#endif -+ -+ return copy_user_generic((__force_kernel void *)dst, -+ (__force_kernel const void *)src, size); - } - } - -@@ -219,35 +318,74 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len); - __must_check unsigned long __clear_user(void __user *mem, unsigned long len); - - static __must_check __always_inline int --__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) -+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) - { -- return copy_user_generic(dst, (__force const void *)src, size); -+ pax_track_stack(); -+ -+ if (size > INT_MAX) -+ return size; -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (!__access_ok(VERIFY_READ, src, size)) -+ return size; -+ -+ if ((unsigned long)src < PAX_USER_SHADOW_BASE) -+ src += PAX_USER_SHADOW_BASE; -+#endif -+ -+ return copy_user_generic(dst, (__force_kernel const void *)src, size); - } - --static __must_check __always_inline int --__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) -+static __must_check __always_inline unsigned long -+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) - { -- return copy_user_generic((__force void *)dst, src, size); -+ if (size > INT_MAX) -+ return size; -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (!__access_ok(VERIFY_WRITE, dst, size)) -+ return size; -+ -+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) -+ dst += PAX_USER_SHADOW_BASE; -+#endif -+ -+ return copy_user_generic((__force_kernel void *)dst, src, size); - } - --extern long __copy_user_nocache(void *dst, const void __user *src, -- unsigned size, int zerorest); -+extern unsigned long __copy_user_nocache(void *dst, const void __user *src, -+ unsigned long size, int zerorest); - --static inline int --__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) -+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) - { - might_sleep(); -+ -+ if (size > INT_MAX) -+ return size; -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (!__access_ok(VERIFY_READ, src, size)) -+ return size; -+#endif -+ - return __copy_user_nocache(dst, src, size, 1); - } - --static inline int --__copy_from_user_inatomic_nocache(void *dst, const void __user *src, -- unsigned size) -+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src, -+ unsigned long size) - { -+ if (size > INT_MAX) -+ return size; -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (!__access_ok(VERIFY_READ, src, size)) -+ return size; -+#endif -+ - return __copy_user_nocache(dst, src, size, 0); - } - --unsigned long --copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); -+extern unsigned long -+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest); - - #endif /* _ASM_X86_UACCESS_64_H */ -diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h -index bb05228..d763d5b 100644 ---- a/arch/x86/include/asm/vdso.h -+++ b/arch/x86/include/asm/vdso.h -@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[]; - #define VDSO32_SYMBOL(base, name) \ - ({ \ - extern const char VDSO32_##name[]; \ -- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ -+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ - }) - #endif - -diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h -index d3d8590..d296b5f 100644 ---- a/arch/x86/include/asm/x86_init.h -+++ b/arch/x86/include/asm/x86_init.h -@@ -28,7 +28,7 @@ struct x86_init_mpparse { - void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); - void (*find_smp_config)(void); - void (*get_smp_config)(unsigned int early); --}; -+} __no_const; - - /** - * struct x86_init_resources - platform specific resource related ops -@@ -42,7 +42,7 @@ struct x86_init_resources { - void (*probe_roms)(void); - void (*reserve_resources)(void); - char *(*memory_setup)(void); --}; -+} __no_const; - - /** - * struct x86_init_irqs - platform specific interrupt setup -@@ -55,7 +55,7 @@ struct x86_init_irqs { - void (*pre_vector_init)(void); - void (*intr_init)(void); - void (*trap_init)(void); --}; -+} __no_const; - - /** - * struct x86_init_oem - oem platform specific customizing functions -@@ -65,7 +65,7 @@ struct x86_init_irqs { - struct x86_init_oem { - void (*arch_setup)(void); - void (*banner)(void); --}; -+} __no_const; - - /** - * struct x86_init_mapping - platform specific initial kernel pagetable setup -@@ -76,7 +76,7 @@ struct x86_init_oem { - */ - struct x86_init_mapping { - void (*pagetable_reserve)(u64 start, u64 end); --}; -+} __no_const; - - /** - * struct x86_init_paging - platform specific paging functions -@@ -86,7 +86,7 @@ struct x86_init_mapping { - struct x86_init_paging { - void (*pagetable_setup_start)(pgd_t *base); - void (*pagetable_setup_done)(pgd_t *base); --}; -+} __no_const; - - /** - * struct x86_init_timers - platform specific timer setup -@@ -101,7 +101,7 @@ struct x86_init_timers { - void (*tsc_pre_init)(void); - void (*timer_init)(void); - void (*wallclock_init)(void); --}; -+} __no_const; - - /** - * struct x86_init_iommu - platform specific iommu setup -@@ -109,7 +109,7 @@ struct x86_init_timers { - */ - struct x86_init_iommu { - int (*iommu_init)(void); --}; -+} __no_const; - - /** - * struct x86_init_pci - platform specific pci init functions -@@ -123,7 +123,7 @@ struct x86_init_pci { - int (*init)(void); - void (*init_irq)(void); - void (*fixup_irqs)(void); --}; -+} __no_const; - - /** - * struct x86_init_ops - functions for platform specific setup -@@ -139,7 +139,7 @@ struct x86_init_ops { - struct x86_init_timers timers; - struct x86_init_iommu iommu; - struct x86_init_pci pci; --}; -+} __no_const; - - /** - * struct x86_cpuinit_ops - platform specific cpu hotplug setups -@@ -147,7 +147,7 @@ struct x86_init_ops { - */ - struct x86_cpuinit_ops { - void (*setup_percpu_clockev)(void); --}; -+} __no_const; - - /** - * struct x86_platform_ops - platform specific runtime functions -@@ -166,7 +166,7 @@ struct x86_platform_ops { - bool (*is_untracked_pat_range)(u64 start, u64 end); - void (*nmi_init)(void); - int (*i8042_detect)(void); --}; -+} __no_const; - - struct pci_dev; - -@@ -174,7 +174,7 @@ struct x86_msi_ops { - int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); - void (*teardown_msi_irq)(unsigned int irq); - void (*teardown_msi_irqs)(struct pci_dev *dev); --}; -+} __no_const; - - extern struct x86_init_ops x86_init; - extern struct x86_cpuinit_ops x86_cpuinit; -diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h -index c6ce245..ffbdab7 100644 ---- a/arch/x86/include/asm/xsave.h -+++ b/arch/x86/include/asm/xsave.h -@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf) - { - int err; - -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE) -+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE); -+#endif -+ - /* - * Clear the xsave header first, so that reserved fields are - * initialized to zero. -@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf) - static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) - { - int err; -- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); -+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf); - u32 lmask = mask; - u32 hmask = mask >> 32; - -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE) -+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE); -+#endif -+ - __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" - "2:\n" - ".section .fixup,"ax"\n" -diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile -index 6a564ac..9b1340c 100644 ---- a/arch/x86/kernel/acpi/realmode/Makefile -+++ b/arch/x86/kernel/acpi/realmode/Makefile -@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \ - $(call cc-option, -fno-stack-protector) \ - $(call cc-option, -mpreferred-stack-boundary=2) - KBUILD_CFLAGS += $(call cc-option, -m32) -+ifdef CONSTIFY_PLUGIN -+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify -+endif - KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ - GCOV_PROFILE := n - -diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S -index b4fd836..4358fe3 100644 ---- a/arch/x86/kernel/acpi/realmode/wakeup.S -+++ b/arch/x86/kernel/acpi/realmode/wakeup.S -@@ -108,6 +108,9 @@ wakeup_code: - /* Do any other stuff... */ - - #ifndef CONFIG_64BIT -+ /* Recheck NX bit overrides (64bit path does this in trampoline */ -+ call verify_cpu -+ - /* This could also be done in C code... */ - movl pmode_cr3, %eax - movl %eax, %cr3 -@@ -131,6 +134,7 @@ wakeup_code: - movl pmode_cr0, %eax - movl %eax, %cr0 - jmp pmode_return -+# include "../../verify_cpu.S" - #else - pushw $0 - pushw trampoline_segment -diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c -index 103b6ab..2004d0a 100644 ---- a/arch/x86/kernel/acpi/sleep.c -+++ b/arch/x86/kernel/acpi/sleep.c -@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void) - header->trampoline_segment = trampoline_address() >> 4; - #ifdef CONFIG_SMP - stack_start = (unsigned long)temp_stack + sizeof(temp_stack); -+ -+ pax_open_kernel(); - early_gdt_descr.address = - (unsigned long)get_cpu_gdt_table(smp_processor_id()); -+ pax_close_kernel(); -+ - initial_gs = per_cpu_offset(smp_processor_id()); - #endif - initial_code = (unsigned long)wakeup_long64; -diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S -index 13ab720..95d5442 100644 ---- a/arch/x86/kernel/acpi/wakeup_32.S -+++ b/arch/x86/kernel/acpi/wakeup_32.S -@@ -30,13 +30,11 @@ wakeup_pmode_return: - # and restore the stack ... but you need gdt for this to work - movl saved_context_esp, %esp - -- movl %cs:saved_magic, %eax -- cmpl $0x12345678, %eax -+ cmpl $0x12345678, saved_magic - jne bogus_magic - - # jump to place where we left off -- movl saved_eip, %eax -- jmp *%eax -+ jmp *(saved_eip) - - bogus_magic: - jmp bogus_magic -diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c -index c638228..16dfa8d 100644 ---- a/arch/x86/kernel/alternative.c -+++ b/arch/x86/kernel/alternative.c -@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start, - */ - for (a = start; a < end; a++) { - instr = (u8 *)&a->instr_offset + a->instr_offset; -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; -+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr) -+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; -+#endif -+ - replacement = (u8 *)&a->repl_offset + a->repl_offset; - BUG_ON(a->replacementlen > a->instrlen); - BUG_ON(a->instrlen > sizeof(insnbuf)); -@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end, - for (poff = start; poff < end; poff++) { - u8 *ptr = (u8 *)poff + *poff; - -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; -+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr) -+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; -+#endif -+ - if (!*poff || ptr < text || ptr >= text_end) - continue; - /* turn DS segment override prefix into lock prefix */ -- if (*ptr == 0x3e) -+ if (*ktla_ktva(ptr) == 0x3e) - text_poke(ptr, ((unsigned char []){0xf0}), 1); - }; - mutex_unlock(&text_mutex); -@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end, - for (poff = start; poff < end; poff++) { - u8 *ptr = (u8 *)poff + *poff; - -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; -+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr) -+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; -+#endif -+ - if (!*poff || ptr < text || ptr >= text_end) - continue; - /* turn lock prefix into DS segment override prefix */ -- if (*ptr == 0xf0) -+ if (*ktla_ktva(ptr) == 0xf0) - text_poke(ptr, ((unsigned char []){0x3E}), 1); - }; - mutex_unlock(&text_mutex); -@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, - - BUG_ON(p->len > MAX_PATCH_LEN); - /* prep the buffer with the original instructions */ -- memcpy(insnbuf, p->instr, p->len); -+ memcpy(insnbuf, ktla_ktva(p->instr), p->len); - used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, - (unsigned long)p->instr, p->len); - -@@ -568,7 +587,7 @@ void __init alternative_instructions(void) - if (smp_alt_once) - free_init_pages("SMP alternatives", - (unsigned long)__smp_locks, -- (unsigned long)__smp_locks_end); -+ PAGE_ALIGN((unsigned long)__smp_locks_end)); - - restart_nmi(); - } -@@ -585,13 +604,17 @@ void __init alternative_instructions(void) - * instructions. And on the local CPU you need to be protected again NMI or MCE - * handlers seeing an inconsistent instruction while you patch. - */ --void *__init_or_module text_poke_early(void *addr, const void *opcode, -+void *__kprobes text_poke_early(void *addr, const void *opcode, - size_t len) - { - unsigned long flags; - local_irq_save(flags); -- memcpy(addr, opcode, len); -+ -+ pax_open_kernel(); -+ memcpy(ktla_ktva(addr), opcode, len); - sync_core(); -+ pax_close_kernel(); -+ - local_irq_restore(flags); - /* Could also do a CLFLUSH here to speed up CPU recovery; but - that causes hangs on some VIA CPUs. */ -@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, - */ - void *__kprobes text_poke(void *addr, const void *opcode, size_t len) - { -- unsigned long flags; -- char *vaddr; -+ unsigned char *vaddr = ktla_ktva(addr); - struct page *pages[2]; -- int i; -+ size_t i; - - if (!core_kernel_text((unsigned long)addr)) { -- pages[0] = vmalloc_to_page(addr); -- pages[1] = vmalloc_to_page(addr + PAGE_SIZE); -+ pages[0] = vmalloc_to_page(vaddr); -+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE); - } else { -- pages[0] = virt_to_page(addr); -+ pages[0] = virt_to_page(vaddr); - WARN_ON(!PageReserved(pages[0])); -- pages[1] = virt_to_page(addr + PAGE_SIZE); -+ pages[1] = virt_to_page(vaddr + PAGE_SIZE); - } - BUG_ON(!pages[0]); -- local_irq_save(flags); -- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); -- if (pages[1]) -- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); -- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); -- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); -- clear_fixmap(FIX_TEXT_POKE0); -- if (pages[1]) -- clear_fixmap(FIX_TEXT_POKE1); -- local_flush_tlb(); -- sync_core(); -- /* Could also do a CLFLUSH here to speed up CPU recovery; but -- that causes hangs on some VIA CPUs. */ -+ text_poke_early(addr, opcode, len); - for (i = 0; i < len; i++) -- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); -- local_irq_restore(flags); -+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]); - return addr; - } - -diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c -index 52fa563..5de9d9c 100644 ---- a/arch/x86/kernel/apic/apic.c -+++ b/arch/x86/kernel/apic/apic.c -@@ -174,7 +174,7 @@ int first_system_vector = 0xfe; - /* - * Debug level, exported for io_apic.c - */ --unsigned int apic_verbosity; -+int apic_verbosity; - - int pic_mode; - -@@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs *regs) - apic_write(APIC_ESR, 0); - v1 = apic_read(APIC_ESR); - ack_APIC_irq(); -- atomic_inc(&irq_err_count); -+ atomic_inc_unchecked(&irq_err_count); - - apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)", - smp_processor_id(), v0 , v1); -@@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(void) - u16 *bios_cpu_apicid; - DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); - -+ pax_track_stack(); -+ - bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); - bitmap_zero(clustermap, NUM_APIC_CLUSTERS); - -diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c -index 8eb863e..32e6934 100644 ---- a/arch/x86/kernel/apic/io_apic.c -+++ b/arch/x86/kernel/apic/io_apic.c -@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, - } - EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); - --void lock_vector_lock(void) -+void lock_vector_lock(void) __acquires(vector_lock) - { - /* Used to the online set of cpus does not change - * during assign_irq_vector. -@@ -1036,7 +1036,7 @@ void lock_vector_lock(void) - raw_spin_lock(&vector_lock); - } - --void unlock_vector_lock(void) -+void unlock_vector_lock(void) __releases(vector_lock) - { - raw_spin_unlock(&vector_lock); - } -@@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_data *data) - ack_APIC_irq(); - } - --atomic_t irq_mis_count; -+atomic_unchecked_t irq_mis_count; - - /* - * IO-APIC versions below 0x20 don't support EOI register. -@@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_data *data) - * at the cpu. - */ - if (!(v & (1 << (i & 0x1f)))) { -- atomic_inc(&irq_mis_count); -+ atomic_inc_unchecked(&irq_mis_count); - - eoi_ioapic_irq(irq, cfg); - } -diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c -index 0371c48..54cdf63 100644 ---- a/arch/x86/kernel/apm_32.c -+++ b/arch/x86/kernel/apm_32.c -@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex); - * This is for buggy BIOS's that refer to (real mode) segment 0x40 - * even though they are called in protected mode. - */ --static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, -+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, - (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); - - static const char driver_version[] = "1.16ac"; /* no spaces */ -@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call) - BUG_ON(cpu != 0); - gdt = get_cpu_gdt_table(cpu); - save_desc_40 = gdt[0x40 / 8]; -+ -+ pax_open_kernel(); - gdt[0x40 / 8] = bad_bios_desc; -+ pax_close_kernel(); - - apm_irq_save(flags); - APM_DO_SAVE_SEGS; -@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call) - &call->esi); - APM_DO_RESTORE_SEGS; - apm_irq_restore(flags); -+ -+ pax_open_kernel(); - gdt[0x40 / 8] = save_desc_40; -+ pax_close_kernel(); -+ - put_cpu(); - - return call->eax & 0xff; -@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void *_call) - BUG_ON(cpu != 0); - gdt = get_cpu_gdt_table(cpu); - save_desc_40 = gdt[0x40 / 8]; -+ -+ pax_open_kernel(); - gdt[0x40 / 8] = bad_bios_desc; -+ pax_close_kernel(); - - apm_irq_save(flags); - APM_DO_SAVE_SEGS; -@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void *_call) - &call->eax); - APM_DO_RESTORE_SEGS; - apm_irq_restore(flags); -+ -+ pax_open_kernel(); - gdt[0x40 / 8] = save_desc_40; -+ pax_close_kernel(); -+ - put_cpu(); - return error; - } -@@ -2349,12 +2363,15 @@ static int __init apm_init(void) - * code to that CPU. - */ - gdt = get_cpu_gdt_table(0); -+ -+ pax_open_kernel(); - set_desc_base(&gdt[APM_CS >> 3], - (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); - set_desc_base(&gdt[APM_CS_16 >> 3], - (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); - set_desc_base(&gdt[APM_DS >> 3], - (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); -+ pax_close_kernel(); - - proc_create("apm", 0, NULL, &apm_file_ops); - -diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c -index 4f13faf..87db5d2 100644 ---- a/arch/x86/kernel/asm-offsets.c -+++ b/arch/x86/kernel/asm-offsets.c -@@ -33,6 +33,8 @@ void common(void) { - OFFSET(TI_status, thread_info, status); - OFFSET(TI_addr_limit, thread_info, addr_limit); - OFFSET(TI_preempt_count, thread_info, preempt_count); -+ OFFSET(TI_lowest_stack, thread_info, lowest_stack); -+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo)); - - BLANK(); - OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); -@@ -53,8 +55,26 @@ void common(void) { - OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); - OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); - OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0); - #endif - -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3); -+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3); -+#ifdef CONFIG_X86_64 -+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched); -+#endif -+#endif -+ -+#endif -+ -+ BLANK(); -+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE); -+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); -+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE); -+ - #ifdef CONFIG_XEN - BLANK(); - OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); -diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c -index e72a119..6e2955d 100644 ---- a/arch/x86/kernel/asm-offsets_64.c -+++ b/arch/x86/kernel/asm-offsets_64.c -@@ -69,6 +69,7 @@ int main(void) - BLANK(); - #undef ENTRY - -+ DEFINE(TSS_size, sizeof(struct tss_struct)); - OFFSET(TSS_ist, tss_struct, x86_tss.ist); - BLANK(); - -diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile -index 6042981..e638266 100644 ---- a/arch/x86/kernel/cpu/Makefile -+++ b/arch/x86/kernel/cpu/Makefile -@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg - CFLAGS_REMOVE_perf_event.o = -pg - endif - --# Make sure load_percpu_segment has no stackprotector --nostackp := $(call cc-option, -fno-stack-protector) --CFLAGS_common.o := $(nostackp) -- - obj-y := intel_cacheinfo.o scattered.o topology.o - obj-y += proc.o capflags.o powerflags.o common.o - obj-y += vmware.o hypervisor.o sched.o mshyperv.o -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index b13ed39..603286c 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, - unsigned int size) - { - /* AMD errata T13 (order #21922) */ -- if ((c->x86 == 6)) { -+ if (c->x86 == 6) { - /* Duron Rev A0 */ - if (c->x86_model == 3 && c->x86_mask == 0) - size = 64; -diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c -index 6218439..0f1addc 100644 ---- a/arch/x86/kernel/cpu/common.c -+++ b/arch/x86/kernel/cpu/common.c -@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = { - - static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; - --DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { --#ifdef CONFIG_X86_64 -- /* -- * We need valid kernel segments for data and code in long mode too -- * IRET will check the segment types kkeil 2000/10/28 -- * Also sysret mandates a special GDT layout -- * -- * TLS descriptors are currently at a different place compared to i386. -- * Hopefully nobody expects them at a fixed place (Wine?) -- */ -- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), -- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), -- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), -- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), -- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), -- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), --#else -- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), -- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), -- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), -- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), -- /* -- * Segments used for calling PnP BIOS have byte granularity. -- * They code segments and data segments have fixed 64k limits, -- * the transfer segment sizes are set at run time. -- */ -- /* 32-bit code */ -- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), -- /* 16-bit code */ -- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), -- /* 16-bit data */ -- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), -- /* 16-bit data */ -- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), -- /* 16-bit data */ -- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), -- /* -- * The APM segments have byte granularity and their bases -- * are set at run time. All have 64k limits. -- */ -- /* 32-bit code */ -- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), -- /* 16-bit code */ -- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), -- /* data */ -- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), -- -- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), -- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), -- GDT_STACK_CANARY_INIT --#endif --} }; --EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); -- - static int __init x86_xsave_setup(char *s) - { - setup_clear_cpu_cap(X86_FEATURE_XSAVE); -@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu) - { - struct desc_ptr gdt_descr; - -- gdt_descr.address = (long)get_cpu_gdt_table(cpu); -+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); - gdt_descr.size = GDT_SIZE - 1; - load_gdt(&gdt_descr); - /* Reload the per-cpu base */ -@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) - /* Filter out anything that depends on CPUID levels we don't have */ - filter_cpuid_features(c, true); - -+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32)) -+ setup_clear_cpu_cap(X86_FEATURE_SEP); -+#endif -+ - /* If the model name is still unset, do table lookup. */ - if (!c->x86_model_id[0]) { - const char *p; -@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(char *arg) - } - __setup("clearcpuid=", setup_disablecpuid); - -+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo; -+EXPORT_PER_CPU_SYMBOL(current_tinfo); -+ - #ifdef CONFIG_X86_64 - struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; - -@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = - EXPORT_PER_CPU_SYMBOL(current_task); - - DEFINE_PER_CPU(unsigned long, kernel_stack) = -- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; -+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE; - EXPORT_PER_CPU_SYMBOL(kernel_stack); - - DEFINE_PER_CPU(char *, irq_stack_ptr) = -@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) - { - memset(regs, 0, sizeof(struct pt_regs)); - regs->fs = __KERNEL_PERCPU; -- regs->gs = __KERNEL_STACK_CANARY; -+ savesegment(gs, regs->gs); - - return regs; - } -@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void) - int i; - - cpu = stack_smp_processor_id(); -- t = &per_cpu(init_tss, cpu); -+ t = init_tss + cpu; - oist = &per_cpu(orig_ist, cpu); - - #ifdef CONFIG_NUMA -@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void) - switch_to_new_gdt(cpu); - loadsegment(fs, 0); - -- load_idt((const struct desc_ptr *)&idt_descr); -+ load_idt(&idt_descr); - - memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); - syscall_init(); -@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void) - wrmsrl(MSR_KERNEL_GS_BASE, 0); - barrier(); - -- x86_configure_nx(); - if (cpu != 0) - enable_x2apic(); - -@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void) - { - int cpu = smp_processor_id(); - struct task_struct *curr = current; -- struct tss_struct *t = &per_cpu(init_tss, cpu); -+ struct tss_struct *t = init_tss + cpu; - struct thread_struct *thread = &curr->thread; - - if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { -diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c -index ed6086e..a1dcf29 100644 ---- a/arch/x86/kernel/cpu/intel.c -+++ b/arch/x86/kernel/cpu/intel.c -@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug(void) - * Update the IDT descriptor and reload the IDT so that - * it uses the read-only mapped virtual address. - */ -- idt_descr.address = fix_to_virt(FIX_F00F_IDT); -+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT); - load_idt(&idt_descr); - } - #endif -diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c -index 0ed633c..82cef2a 100644 ---- a/arch/x86/kernel/cpu/mcheck/mce-inject.c -+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c -@@ -215,7 +215,9 @@ static int inject_init(void) - if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL)) - return -ENOMEM; - printk(KERN_INFO "Machine check injector initialized\n"); -- mce_chrdev_ops.write = mce_write; -+ pax_open_kernel(); -+ *(void **)&mce_chrdev_ops.write = mce_write; -+ pax_close_kernel(); - register_die_notifier(&mce_raise_nb); - return 0; - } -diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c -index 08363b0..ee26113 100644 ---- a/arch/x86/kernel/cpu/mcheck/mce.c -+++ b/arch/x86/kernel/cpu/mcheck/mce.c -@@ -42,6 +42,7 @@ - #include <asm/processor.h> - #include <asm/mce.h> - #include <asm/msr.h> -+#include <asm/local.h> - - #include "mce-internal.h" - -@@ -205,7 +206,7 @@ static void print_mce(struct mce *m) - !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", - m->cs, m->ip); - -- if (m->cs == __KERNEL_CS) -+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS) - print_symbol("{%s}", m->ip); - pr_cont("\n"); - } -@@ -233,10 +234,10 @@ static void print_mce(struct mce *m) - - #define PANIC_TIMEOUT 5 /* 5 seconds */ - --static atomic_t mce_paniced; -+static atomic_unchecked_t mce_paniced; - - static int fake_panic; --static atomic_t mce_fake_paniced; -+static atomic_unchecked_t mce_fake_paniced; - - /* Panic in progress. Enable interrupts and wait for final IPI */ - static void wait_for_panic(void) -@@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) - /* - * Make sure only one CPU runs in machine check panic - */ -- if (atomic_inc_return(&mce_paniced) > 1) -+ if (atomic_inc_return_unchecked(&mce_paniced) > 1) - wait_for_panic(); - barrier(); - -@@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) - console_verbose(); - } else { - /* Don't log too much for fake panic */ -- if (atomic_inc_return(&mce_fake_paniced) > 1) -+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1) - return; - } - /* First print corrected ones that are still unlogged */ -@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t) - * might have been modified by someone else. - */ - rmb(); -- if (atomic_read(&mce_paniced)) -+ if (atomic_read_unchecked(&mce_paniced)) - wait_for_panic(); - if (!monarch_timeout) - goto out; -@@ -1392,7 +1393,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code) - } - - /* Call the installed machine check handler for this CPU setup. */ --void (*machine_check_vector)(struct pt_regs *, long error_code) = -+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only = - unexpected_machine_check; - - /* -@@ -1415,7 +1416,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) - return; - } - -+ pax_open_kernel(); - machine_check_vector = do_machine_check; -+ pax_close_kernel(); - - __mcheck_cpu_init_generic(); - __mcheck_cpu_init_vendor(c); -@@ -1429,7 +1432,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) - */ - - static DEFINE_SPINLOCK(mce_chrdev_state_lock); --static int mce_chrdev_open_count; /* #times opened */ -+static local_t mce_chrdev_open_count; /* #times opened */ - static int mce_chrdev_open_exclu; /* already open exclusive? */ - - static int mce_chrdev_open(struct inode *inode, struct file *file) -@@ -1437,7 +1440,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file) - spin_lock(&mce_chrdev_state_lock); - - if (mce_chrdev_open_exclu || -- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) { -+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) { - spin_unlock(&mce_chrdev_state_lock); - - return -EBUSY; -@@ -1445,7 +1448,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file) - - if (file->f_flags & O_EXCL) - mce_chrdev_open_exclu = 1; -- mce_chrdev_open_count++; -+ local_inc(&mce_chrdev_open_count); - - spin_unlock(&mce_chrdev_state_lock); - -@@ -1456,7 +1459,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file) - { - spin_lock(&mce_chrdev_state_lock); - -- mce_chrdev_open_count--; -+ local_dec(&mce_chrdev_open_count); - mce_chrdev_open_exclu = 0; - - spin_unlock(&mce_chrdev_state_lock); -@@ -2147,7 +2150,7 @@ struct dentry *mce_get_debugfs_dir(void) - static void mce_reset(void) - { - cpu_missing = 0; -- atomic_set(&mce_fake_paniced, 0); -+ atomic_set_unchecked(&mce_fake_paniced, 0); - atomic_set(&mce_executing, 0); - atomic_set(&mce_callin, 0); - atomic_set(&global_nwo, 0); -diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c -index 5c0e653..1e82c7c 100644 ---- a/arch/x86/kernel/cpu/mcheck/p5.c -+++ b/arch/x86/kernel/cpu/mcheck/p5.c -@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) - if (!cpu_has(c, X86_FEATURE_MCE)) - return; - -+ pax_open_kernel(); - machine_check_vector = pentium_machine_check; -+ pax_close_kernel(); - /* Make sure the vector pointer is visible before we enable MCEs: */ - wmb(); - -diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c -index 54060f5..e6ba93d 100644 ---- a/arch/x86/kernel/cpu/mcheck/winchip.c -+++ b/arch/x86/kernel/cpu/mcheck/winchip.c -@@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) - { - u32 lo, hi; - -+ pax_open_kernel(); - machine_check_vector = winchip_machine_check; -+ pax_close_kernel(); - /* Make sure the vector pointer is visible before we enable MCEs: */ - wmb(); - -diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c -index 6b96110..0da73eb 100644 ---- a/arch/x86/kernel/cpu/mtrr/main.c -+++ b/arch/x86/kernel/cpu/mtrr/main.c -@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex); - u64 size_or_mask, size_and_mask; - static bool mtrr_aps_delayed_init; - --static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; -+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only; - - const struct mtrr_ops *mtrr_if; - -diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h -index df5e41f..816c719 100644 ---- a/arch/x86/kernel/cpu/mtrr/mtrr.h -+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h -@@ -25,7 +25,7 @@ struct mtrr_ops { - int (*validate_add_page)(unsigned long base, unsigned long size, - unsigned int type); - int (*have_wrcomb)(void); --}; -+} __do_const; - - extern int generic_get_free_region(unsigned long base, unsigned long size, - int replace_reg); -diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c -index cfa62ec..9250dd7 100644 ---- a/arch/x86/kernel/cpu/perf_event.c -+++ b/arch/x86/kernel/cpu/perf_event.c -@@ -795,6 +795,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) - int i, j, w, wmax, num = 0; - struct hw_perf_event *hwc; - -+ pax_track_stack(); -+ - bitmap_zero(used_mask, X86_PMC_IDX_MAX); - - for (i = 0; i < n; i++) { -@@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) - break; - - perf_callchain_store(entry, frame.return_address); -- fp = frame.next_frame; -+ fp = (const void __force_user *)frame.next_frame; - } - } - -diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c -index 764c7c2..c5d9c7b 100644 ---- a/arch/x86/kernel/crash.c -+++ b/arch/x86/kernel/crash.c -@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args) - regs = args->regs; - - #ifdef CONFIG_X86_32 -- if (!user_mode_vm(regs)) { -+ if (!user_mode(regs)) { - crash_fixup_ss_esp(&fixed_regs, regs); - regs = &fixed_regs; - } -diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c -index 37250fe..bf2ec74 100644 ---- a/arch/x86/kernel/doublefault_32.c -+++ b/arch/x86/kernel/doublefault_32.c -@@ -11,7 +11,7 @@ - - #define DOUBLEFAULT_STACKSIZE (1024) - static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; --#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) -+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2) - - #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) - -@@ -21,7 +21,7 @@ static void doublefault_fn(void) - unsigned long gdt, tss; - - store_gdt(&gdt_desc); -- gdt = gdt_desc.address; -+ gdt = (unsigned long)gdt_desc.address; - - printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); - -@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = { - /* 0x2 bit is always set */ - .flags = X86_EFLAGS_SF | 0x2, - .sp = STACK_START, -- .es = __USER_DS, -+ .es = __KERNEL_DS, - .cs = __KERNEL_CS, - .ss = __KERNEL_DS, -- .ds = __USER_DS, -+ .ds = __KERNEL_DS, - .fs = __KERNEL_PERCPU, - - .__cr3 = __pa_nodebug(swapper_pg_dir), -diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c -index 1aae78f..aab3a3d 100644 ---- a/arch/x86/kernel/dumpstack.c -+++ b/arch/x86/kernel/dumpstack.c -@@ -2,6 +2,9 @@ - * Copyright (C) 1991, 1992 Linus Torvalds - * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs - */ -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+#define __INCLUDED_BY_HIDESYM 1 -+#endif - #include <linux/kallsyms.h> - #include <linux/kprobes.h> - #include <linux/uaccess.h> -@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable) - static void - print_ftrace_graph_addr(unsigned long addr, void *data, - const struct stacktrace_ops *ops, -- struct thread_info *tinfo, int *graph) -+ struct task_struct *task, int *graph) - { -- struct task_struct *task = tinfo->task; - unsigned long ret_addr; - int index = task->curr_ret_stack; - -@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data, - static inline void - print_ftrace_graph_addr(unsigned long addr, void *data, - const struct stacktrace_ops *ops, -- struct thread_info *tinfo, int *graph) -+ struct task_struct *task, int *graph) - { } - #endif - -@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data, - * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack - */ - --static inline int valid_stack_ptr(struct thread_info *tinfo, -- void *p, unsigned int size, void *end) -+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end) - { -- void *t = tinfo; - if (end) { - if (p < end && p >= (end-THREAD_SIZE)) - return 1; -@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, - } - - unsigned long --print_context_stack(struct thread_info *tinfo, -+print_context_stack(struct task_struct *task, void *stack_start, - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data, - unsigned long *end, int *graph) - { - struct stack_frame *frame = (struct stack_frame *)bp; - -- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { -+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) { - unsigned long addr; - - addr = *stack; -@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo, - } else { - ops->address(data, addr, 0); - } -- print_ftrace_graph_addr(addr, data, ops, tinfo, graph); -+ print_ftrace_graph_addr(addr, data, ops, task, graph); - } - stack++; - } -@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo, - EXPORT_SYMBOL_GPL(print_context_stack); - - unsigned long --print_context_stack_bp(struct thread_info *tinfo, -+print_context_stack_bp(struct task_struct *task, void *stack_start, - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data, - unsigned long *end, int *graph) -@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo, - struct stack_frame *frame = (struct stack_frame *)bp; - unsigned long *ret_addr = &frame->return_address; - -- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { -+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) { - unsigned long addr = *ret_addr; - - if (!__kernel_text_address(addr)) -@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo, - ops->address(data, addr, 1); - frame = frame->next_frame; - ret_addr = &frame->return_address; -- print_ftrace_graph_addr(addr, data, ops, tinfo, graph); -+ print_ftrace_graph_addr(addr, data, ops, task, graph); - } - - return (unsigned long)frame; -@@ -186,7 +186,7 @@ void dump_stack(void) - - bp = stack_frame(current, NULL); - printk("Pid: %d, comm: %.20s %s %s %.*s\n", -- current->pid, current->comm, print_tainted(), -+ task_pid_nr(current), current->comm, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); -@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void) - } - EXPORT_SYMBOL_GPL(oops_begin); - -+extern void gr_handle_kernel_exploit(void); -+ - void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) - { - if (regs && kexec_should_crash(current)) -@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) - panic("Fatal exception in interrupt"); - if (panic_on_oops) - panic("Fatal exception"); -- do_exit(signr); -+ -+ gr_handle_kernel_exploit(); -+ -+ do_group_exit(signr); - } - - int __kprobes __die(const char *str, struct pt_regs *regs, long err) -@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) - - show_registers(regs); - #ifdef CONFIG_X86_32 -- if (user_mode_vm(regs)) { -+ if (user_mode(regs)) { - sp = regs->sp; - ss = regs->ss & 0xffff; - } else { -@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err) - unsigned long flags = oops_begin(); - int sig = SIGSEGV; - -- if (!user_mode_vm(regs)) -+ if (!user_mode(regs)) - report_bug(regs->ip, regs); - - if (__die(str, regs, err)) -diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c -index 3b97a80..667ce7a 100644 ---- a/arch/x86/kernel/dumpstack_32.c -+++ b/arch/x86/kernel/dumpstack_32.c -@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, - bp = stack_frame(task, regs); - - for (;;) { -- struct thread_info *context; -+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1)); - -- context = (struct thread_info *) -- ((unsigned long)stack & (~(THREAD_SIZE - 1))); -- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph); -+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph); - -- stack = (unsigned long *)context->previous_esp; -- if (!stack) -+ if (stack_start == task_stack_page(task)) - break; -+ stack = *(unsigned long **)stack_start; - if (ops->stack(data, "IRQ") < 0) - break; - touch_nmi_watchdog(); -@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs) - * When in-kernel, we also print out the stack and code at the - * time of the fault.. - */ -- if (!user_mode_vm(regs)) { -+ if (!user_mode(regs)) { - unsigned int code_prologue = code_bytes * 43 / 64; - unsigned int code_len = code_bytes; - unsigned char c; - u8 *ip; -+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]); - - printk(KERN_EMERG "Stack:\n"); - show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); - - printk(KERN_EMERG "Code: "); - -- ip = (u8 *)regs->ip - code_prologue; -+ ip = (u8 *)regs->ip - code_prologue + cs_base; - if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { - /* try starting at IP */ -- ip = (u8 *)regs->ip; -+ ip = (u8 *)regs->ip + cs_base; - code_len = code_len - code_prologue + 1; - } - for (i = 0; i < code_len; i++, ip++) { -@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs) - printk(" Bad EIP value."); - break; - } -- if (ip == (u8 *)regs->ip) -+ if (ip == (u8 *)regs->ip + cs_base) - printk("<%02x> ", c); - else - printk("%02x ", c); -@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip) - { - unsigned short ud2; - -+ ip = ktla_ktva(ip); - if (ip < PAGE_OFFSET) - return 0; - if (probe_kernel_address((unsigned short *)ip, ud2)) -@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip) - - return ud2 == 0x0b0f; - } -+ -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK -+void pax_check_alloca(unsigned long size) -+{ -+ unsigned long sp = (unsigned long)&sp, stack_left; -+ -+ /* all kernel stacks are of the same size */ -+ stack_left = sp & (THREAD_SIZE - 1); -+ BUG_ON(stack_left < 256 || size >= stack_left - 256); -+} -+EXPORT_SYMBOL(pax_check_alloca); -+#endif -diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c -index 19853ad..508ca79 100644 ---- a/arch/x86/kernel/dumpstack_64.c -+++ b/arch/x86/kernel/dumpstack_64.c -@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, - unsigned long *irq_stack_end = - (unsigned long *)per_cpu(irq_stack_ptr, cpu); - unsigned used = 0; -- struct thread_info *tinfo; - int graph = 0; - unsigned long dummy; -+ void *stack_start; - - if (!task) - task = current; -@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, - * current stack address. If the stacks consist of nested - * exceptions - */ -- tinfo = task_thread_info(task); - for (;;) { - char *id; - unsigned long *estack_end; -+ - estack_end = in_exception_stack(cpu, (unsigned long)stack, - &used, &id); - -@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, - if (ops->stack(data, id) < 0) - break; - -- bp = ops->walk_stack(tinfo, stack, bp, ops, -+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops, - data, estack_end, &graph); - ops->stack(data, "<EOE>"); - /* -@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, - if (in_irq_stack(stack, irq_stack, irq_stack_end)) { - if (ops->stack(data, "IRQ") < 0) - break; -- bp = ops->walk_stack(tinfo, stack, bp, -+ bp = ops->walk_stack(task, irq_stack, stack, bp, - ops, data, irq_stack_end, &graph); - /* - * We link to the next stack (which would be -@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, - /* - * This handles the process stack: - */ -- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); -+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1)); -+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph); - put_cpu(); - } - EXPORT_SYMBOL(dump_trace); -@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip) - - return ud2 == 0x0b0f; - } -+ -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK -+void pax_check_alloca(unsigned long size) -+{ -+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end; -+ unsigned cpu, used; -+ char *id; -+ -+ /* check the process stack first */ -+ stack_start = (unsigned long)task_stack_page(current); -+ stack_end = stack_start + THREAD_SIZE; -+ if (likely(stack_start <= sp && sp < stack_end)) { -+ unsigned long stack_left = sp & (THREAD_SIZE - 1); -+ BUG_ON(stack_left < 256 || size >= stack_left - 256); -+ return; -+ } -+ -+ cpu = get_cpu(); -+ -+ /* check the irq stacks */ -+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu); -+ stack_start = stack_end - IRQ_STACK_SIZE; -+ if (stack_start <= sp && sp < stack_end) { -+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1); -+ put_cpu(); -+ BUG_ON(stack_left < 256 || size >= stack_left - 256); -+ return; -+ } -+ -+ /* check the exception stacks */ -+ used = 0; -+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id); -+ stack_start = stack_end - EXCEPTION_STKSZ; -+ if (stack_end && stack_start <= sp && sp < stack_end) { -+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1); -+ put_cpu(); -+ BUG_ON(stack_left < 256 || size >= stack_left - 256); -+ return; -+ } -+ -+ put_cpu(); -+ -+ /* unknown stack */ -+ BUG(); -+} -+EXPORT_SYMBOL(pax_check_alloca); -+#endif -diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c -index cd28a35..2601699 100644 ---- a/arch/x86/kernel/early_printk.c -+++ b/arch/x86/kernel/early_printk.c -@@ -7,6 +7,7 @@ - #include <linux/pci_regs.h> - #include <linux/pci_ids.h> - #include <linux/errno.h> -+#include <linux/sched.h> - #include <asm/io.h> - #include <asm/processor.h> - #include <asm/fcntl.h> -@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char *fmt, ...) - int n; - va_list ap; - -+ pax_track_stack(); -+ - va_start(ap, fmt); - n = vscnprintf(buf, sizeof(buf), fmt, ap); - early_console->write(early_console, buf, n); -diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S -index f3f6f53..0841b66 100644 ---- a/arch/x86/kernel/entry_32.S -+++ b/arch/x86/kernel/entry_32.S -@@ -186,13 +186,146 @@ - /*CFI_REL_OFFSET gs, PT_GS*/ - .endm - .macro SET_KERNEL_GS reg -+ -+#ifdef CONFIG_CC_STACKPROTECTOR - movl $(__KERNEL_STACK_CANARY), \reg -+#elif defined(CONFIG_PAX_MEMORY_UDEREF) -+ movl $(__USER_DS), \reg -+#else -+ xorl \reg, \reg -+#endif -+ - movl \reg, %gs - .endm - - #endif /* CONFIG_X86_32_LAZY_GS */ - --.macro SAVE_ALL -+.macro pax_enter_kernel -+#ifdef CONFIG_PAX_KERNEXEC -+ call pax_enter_kernel -+#endif -+.endm -+ -+.macro pax_exit_kernel -+#ifdef CONFIG_PAX_KERNEXEC -+ call pax_exit_kernel -+#endif -+.endm -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ENTRY(pax_enter_kernel) -+#ifdef CONFIG_PARAVIRT -+ pushl %eax -+ pushl %ecx -+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0) -+ mov %eax, %esi -+#else -+ mov %cr0, %esi -+#endif -+ bts $16, %esi -+ jnc 1f -+ mov %cs, %esi -+ cmp $__KERNEL_CS, %esi -+ jz 3f -+ ljmp $__KERNEL_CS, $3f -+1: ljmp $__KERNEXEC_KERNEL_CS, $2f -+2: -+#ifdef CONFIG_PARAVIRT -+ mov %esi, %eax -+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) -+#else -+ mov %esi, %cr0 -+#endif -+3: -+#ifdef CONFIG_PARAVIRT -+ popl %ecx -+ popl %eax -+#endif -+ ret -+ENDPROC(pax_enter_kernel) -+ -+ENTRY(pax_exit_kernel) -+#ifdef CONFIG_PARAVIRT -+ pushl %eax -+ pushl %ecx -+#endif -+ mov %cs, %esi -+ cmp $__KERNEXEC_KERNEL_CS, %esi -+ jnz 2f -+#ifdef CONFIG_PARAVIRT -+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); -+ mov %eax, %esi -+#else -+ mov %cr0, %esi -+#endif -+ btr $16, %esi -+ ljmp $__KERNEL_CS, $1f -+1: -+#ifdef CONFIG_PARAVIRT -+ mov %esi, %eax -+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0); -+#else -+ mov %esi, %cr0 -+#endif -+2: -+#ifdef CONFIG_PARAVIRT -+ popl %ecx -+ popl %eax -+#endif -+ ret -+ENDPROC(pax_exit_kernel) -+#endif -+ -+.macro pax_erase_kstack -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK -+ call pax_erase_kstack -+#endif -+.endm -+ -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK -+/* -+ * ebp: thread_info -+ * ecx, edx: can be clobbered -+ */ -+ENTRY(pax_erase_kstack) -+ pushl %edi -+ pushl %eax -+ -+ mov TI_lowest_stack(%ebp), %edi -+ mov $-0xBEEF, %eax -+ std -+ -+1: mov %edi, %ecx -+ and $THREAD_SIZE_asm - 1, %ecx -+ shr $2, %ecx -+ repne scasl -+ jecxz 2f -+ -+ cmp $2*16, %ecx -+ jc 2f -+ -+ mov $2*16, %ecx -+ repe scasl -+ jecxz 2f -+ jne 1b -+ -+2: cld -+ mov %esp, %ecx -+ sub %edi, %ecx -+ shr $2, %ecx -+ rep stosl -+ -+ mov TI_task_thread_sp0(%ebp), %edi -+ sub $128, %edi -+ mov %edi, TI_lowest_stack(%ebp) -+ -+ popl %eax -+ popl %edi -+ ret -+ENDPROC(pax_erase_kstack) -+#endif -+ -+.macro __SAVE_ALL _DS - cld - PUSH_GS - pushl_cfi %fs -@@ -215,7 +348,7 @@ - CFI_REL_OFFSET ecx, 0 - pushl_cfi %ebx - CFI_REL_OFFSET ebx, 0 -- movl $(__USER_DS), %edx -+ movl $_DS, %edx - movl %edx, %ds - movl %edx, %es - movl $(__KERNEL_PERCPU), %edx -@@ -223,6 +356,15 @@ - SET_KERNEL_GS %edx - .endm - -+.macro SAVE_ALL -+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) -+ __SAVE_ALL __KERNEL_DS -+ pax_enter_kernel -+#else -+ __SAVE_ALL __USER_DS -+#endif -+.endm -+ - .macro RESTORE_INT_REGS - popl_cfi %ebx - CFI_RESTORE ebx -@@ -308,7 +450,7 @@ ENTRY(ret_from_fork) - popfl_cfi - jmp syscall_exit - CFI_ENDPROC --END(ret_from_fork) -+ENDPROC(ret_from_fork) - - /* - * Interrupt exit functions should be protected against kprobes -@@ -333,7 +475,15 @@ check_userspace: - movb PT_CS(%esp), %al - andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax - cmpl $USER_RPL, %eax -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ jae resume_userspace -+ -+ PAX_EXIT_KERNEL -+ jmp resume_kernel -+#else - jb resume_kernel # not returning to v8086 or userspace -+#endif - - ENTRY(resume_userspace) - LOCKDEP_SYS_EXIT -@@ -345,8 +495,8 @@ ENTRY(resume_userspace) - andl $_TIF_WORK_MASK, %ecx # is there any work to be done on - # int/exception return? - jne work_pending -- jmp restore_all --END(ret_from_exception) -+ jmp restore_all_pax -+ENDPROC(ret_from_exception) - - #ifdef CONFIG_PREEMPT - ENTRY(resume_kernel) -@@ -361,7 +511,7 @@ need_resched: - jz restore_all - call preempt_schedule_irq - jmp need_resched --END(resume_kernel) -+ENDPROC(resume_kernel) - #endif - CFI_ENDPROC - /* -@@ -395,23 +545,34 @@ sysenter_past_esp: - /*CFI_REL_OFFSET cs, 0*/ - /* - * Push current_thread_info()->sysenter_return to the stack. -- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words -- * pushed above; +8 corresponds to copy_thread's esp0 setting. - */ -- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) -+ pushl_cfi $0 - CFI_REL_OFFSET eip, 0 - - pushl_cfi %eax - SAVE_ALL -+ GET_THREAD_INFO(%ebp) -+ movl TI_sysenter_return(%ebp),%ebp -+ movl %ebp,PT_EIP(%esp) - ENABLE_INTERRUPTS(CLBR_NONE) - - /* - * Load the potential sixth argument from user stack. - * Careful about security. - */ -+ movl PT_OLDESP(%esp),%ebp -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ mov PT_OLDSS(%esp),%ds -+1: movl %ds:(%ebp),%ebp -+ push %ss -+ pop %ds -+#else - cmpl $__PAGE_OFFSET-3,%ebp - jae syscall_fault - 1: movl (%ebp),%ebp -+#endif -+ - movl %ebp,PT_EBP(%esp) - .section __ex_table,"a" - .align 4 -@@ -434,12 +595,24 @@ sysenter_do_call: - testl $_TIF_ALLWORK_MASK, %ecx - jne sysexit_audit - sysenter_exit: -+ -+#ifdef CONFIG_PAX_RANDKSTACK -+ pushl_cfi %eax -+ movl %esp, %eax -+ call pax_randomize_kstack -+ popl_cfi %eax -+#endif -+ -+ pax_erase_kstack -+ - /* if something modifies registers it must also disable sysexit */ - movl PT_EIP(%esp), %edx - movl PT_OLDESP(%esp), %ecx - xorl %ebp,%ebp - TRACE_IRQS_ON - 1: mov PT_FS(%esp), %fs -+2: mov PT_DS(%esp), %ds -+3: mov PT_ES(%esp), %es - PTGS_TO_GS - ENABLE_INTERRUPTS_SYSEXIT - -@@ -456,6 +629,9 @@ sysenter_audit: - movl %eax,%edx /* 2nd arg: syscall number */ - movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ - call audit_syscall_entry -+ -+ pax_erase_kstack -+ - pushl_cfi %ebx - movl PT_EAX(%esp),%eax /* reload syscall number */ - jmp sysenter_do_call -@@ -482,11 +658,17 @@ sysexit_audit: - - CFI_ENDPROC - .pushsection .fixup,"ax" --2: movl $0,PT_FS(%esp) -+4: movl $0,PT_FS(%esp) -+ jmp 1b -+5: movl $0,PT_DS(%esp) -+ jmp 1b -+6: movl $0,PT_ES(%esp) - jmp 1b - .section __ex_table,"a" - .align 4 -- .long 1b,2b -+ .long 1b,4b -+ .long 2b,5b -+ .long 3b,6b - .popsection - PTGS_TO_GS_EX - ENDPROC(ia32_sysenter_target) -@@ -519,6 +701,15 @@ syscall_exit: - testl $_TIF_ALLWORK_MASK, %ecx # current->work - jne syscall_exit_work - -+restore_all_pax: -+ -+#ifdef CONFIG_PAX_RANDKSTACK -+ movl %esp, %eax -+ call pax_randomize_kstack -+#endif -+ -+ pax_erase_kstack -+ - restore_all: - TRACE_IRQS_IRET - restore_all_notrace: -@@ -578,14 +769,34 @@ ldt_ss: - * compensating for the offset by changing to the ESPFIX segment with - * a base address that matches for the difference. - */ --#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) -+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx) - mov %esp, %edx /* load kernel esp */ - mov PT_OLDESP(%esp), %eax /* load userspace esp */ - mov %dx, %ax /* eax: new kernel esp */ - sub %eax, %edx /* offset (low word is 0) */ -+#ifdef CONFIG_SMP -+ movl PER_CPU_VAR(cpu_number), %ebx -+ shll $PAGE_SHIFT_asm, %ebx -+ addl $cpu_gdt_table, %ebx -+#else -+ movl $cpu_gdt_table, %ebx -+#endif - shr $16, %edx -- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ -- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ mov %cr0, %esi -+ btr $16, %esi -+ mov %esi, %cr0 -+#endif -+ -+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */ -+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ bts $16, %esi -+ mov %esi, %cr0 -+#endif -+ - pushl_cfi $__ESPFIX_SS - pushl_cfi %eax /* new kernel esp */ - /* Disable interrupts, but do not irqtrace this section: we -@@ -614,34 +825,28 @@ work_resched: - movl TI_flags(%ebp), %ecx - andl $_TIF_WORK_MASK, %ecx # is there any work to be done other - # than syscall tracing? -- jz restore_all -+ jz restore_all_pax - testb $_TIF_NEED_RESCHED, %cl - jnz work_resched - - work_notifysig: # deal with pending signals and - # notify-resume requests -+ movl %esp, %eax - #ifdef CONFIG_VM86 - testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) -- movl %esp, %eax -- jne work_notifysig_v86 # returning to kernel-space or -+ jz 1f # returning to kernel-space or - # vm86-space -- xorl %edx, %edx -- call do_notify_resume -- jmp resume_userspace_sig - -- ALIGN --work_notifysig_v86: - pushl_cfi %ecx # save ti_flags for do_notify_resume - call save_v86_state # %eax contains pt_regs pointer - popl_cfi %ecx - movl %eax, %esp --#else -- movl %esp, %eax -+1: - #endif - xorl %edx, %edx - call do_notify_resume - jmp resume_userspace_sig --END(work_pending) -+ENDPROC(work_pending) - - # perform syscall exit tracing - ALIGN -@@ -649,11 +854,14 @@ syscall_trace_entry: - movl $-ENOSYS,PT_EAX(%esp) - movl %esp, %eax - call syscall_trace_enter -+ -+ pax_erase_kstack -+ - /* What it returned is what we'll actually use. */ - cmpl $(nr_syscalls), %eax - jnae syscall_call - jmp syscall_exit --END(syscall_trace_entry) -+ENDPROC(syscall_trace_entry) - - # perform syscall exit tracing - ALIGN -@@ -666,20 +874,24 @@ syscall_exit_work: - movl %esp, %eax - call syscall_trace_leave - jmp resume_userspace --END(syscall_exit_work) -+ENDPROC(syscall_exit_work) - CFI_ENDPROC - - RING0_INT_FRAME # can't unwind into user space anyway - syscall_fault: -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ push %ss -+ pop %ds -+#endif - GET_THREAD_INFO(%ebp) - movl $-EFAULT,PT_EAX(%esp) - jmp resume_userspace --END(syscall_fault) -+ENDPROC(syscall_fault) - - syscall_badsys: - movl $-ENOSYS,PT_EAX(%esp) - jmp resume_userspace --END(syscall_badsys) -+ENDPROC(syscall_badsys) - CFI_ENDPROC - /* - * End of kprobes section -@@ -753,6 +965,36 @@ ptregs_clone: - CFI_ENDPROC - ENDPROC(ptregs_clone) - -+ ALIGN; -+ENTRY(kernel_execve) -+ CFI_STARTPROC -+ pushl_cfi %ebp -+ sub $PT_OLDSS+4,%esp -+ pushl_cfi %edi -+ pushl_cfi %ecx -+ pushl_cfi %eax -+ lea 3*4(%esp),%edi -+ mov $PT_OLDSS/4+1,%ecx -+ xorl %eax,%eax -+ rep stosl -+ popl_cfi %eax -+ popl_cfi %ecx -+ popl_cfi %edi -+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp) -+ pushl_cfi %esp -+ call sys_execve -+ add $4,%esp -+ CFI_ADJUST_CFA_OFFSET -4 -+ GET_THREAD_INFO(%ebp) -+ test %eax,%eax -+ jz syscall_exit -+ add $PT_OLDSS+4,%esp -+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4 -+ popl_cfi %ebp -+ ret -+ CFI_ENDPROC -+ENDPROC(kernel_execve) -+ - .macro FIXUP_ESPFIX_STACK - /* - * Switch back for ESPFIX stack to the normal zerobased stack -@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone) - * normal stack and adjusts ESP with the matching offset. - */ - /* fixup the stack */ -- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ -- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ -+#ifdef CONFIG_SMP -+ movl PER_CPU_VAR(cpu_number), %ebx -+ shll $PAGE_SHIFT_asm, %ebx -+ addl $cpu_gdt_table, %ebx -+#else -+ movl $cpu_gdt_table, %ebx -+#endif -+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */ -+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */ - shl $16, %eax - addl %esp, %eax /* the adjusted stack pointer */ - pushl_cfi $__KERNEL_DS -@@ -816,7 +1065,7 @@ vector=vector+1 - .endr - 2: jmp common_interrupt - .endr --END(irq_entries_start) -+ENDPROC(irq_entries_start) - - .previous - END(interrupt) -@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error) - pushl_cfi $do_coprocessor_error - jmp error_code - CFI_ENDPROC --END(coprocessor_error) -+ENDPROC(coprocessor_error) - - ENTRY(simd_coprocessor_error) - RING0_INT_FRAME -@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error) - #endif - jmp error_code - CFI_ENDPROC --END(simd_coprocessor_error) -+ENDPROC(simd_coprocessor_error) - - ENTRY(device_not_available) - RING0_INT_FRAME -@@ -893,7 +1142,7 @@ ENTRY(device_not_available) - pushl_cfi $do_device_not_available - jmp error_code - CFI_ENDPROC --END(device_not_available) -+ENDPROC(device_not_available) - - #ifdef CONFIG_PARAVIRT - ENTRY(native_iret) -@@ -902,12 +1151,12 @@ ENTRY(native_iret) - .align 4 - .long native_iret, iret_exc - .previous --END(native_iret) -+ENDPROC(native_iret) - - ENTRY(native_irq_enable_sysexit) - sti - sysexit --END(native_irq_enable_sysexit) -+ENDPROC(native_irq_enable_sysexit) - #endif - - ENTRY(overflow) -@@ -916,7 +1165,7 @@ ENTRY(overflow) - pushl_cfi $do_overflow - jmp error_code - CFI_ENDPROC --END(overflow) -+ENDPROC(overflow) - - ENTRY(bounds) - RING0_INT_FRAME -@@ -924,7 +1173,7 @@ ENTRY(bounds) - pushl_cfi $do_bounds - jmp error_code - CFI_ENDPROC --END(bounds) -+ENDPROC(bounds) - - ENTRY(invalid_op) - RING0_INT_FRAME -@@ -932,7 +1181,7 @@ ENTRY(invalid_op) - pushl_cfi $do_invalid_op - jmp error_code - CFI_ENDPROC --END(invalid_op) -+ENDPROC(invalid_op) - - ENTRY(coprocessor_segment_overrun) - RING0_INT_FRAME -@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun) - pushl_cfi $do_coprocessor_segment_overrun - jmp error_code - CFI_ENDPROC --END(coprocessor_segment_overrun) -+ENDPROC(coprocessor_segment_overrun) - - ENTRY(invalid_TSS) - RING0_EC_FRAME - pushl_cfi $do_invalid_TSS - jmp error_code - CFI_ENDPROC --END(invalid_TSS) -+ENDPROC(invalid_TSS) - - ENTRY(segment_not_present) - RING0_EC_FRAME - pushl_cfi $do_segment_not_present - jmp error_code - CFI_ENDPROC --END(segment_not_present) -+ENDPROC(segment_not_present) - - ENTRY(stack_segment) - RING0_EC_FRAME - pushl_cfi $do_stack_segment - jmp error_code - CFI_ENDPROC --END(stack_segment) -+ENDPROC(stack_segment) - - ENTRY(alignment_check) - RING0_EC_FRAME - pushl_cfi $do_alignment_check - jmp error_code - CFI_ENDPROC --END(alignment_check) -+ENDPROC(alignment_check) - - ENTRY(divide_error) - RING0_INT_FRAME -@@ -976,7 +1225,7 @@ ENTRY(divide_error) - pushl_cfi $do_divide_error - jmp error_code - CFI_ENDPROC --END(divide_error) -+ENDPROC(divide_error) - - #ifdef CONFIG_X86_MCE - ENTRY(machine_check) -@@ -985,7 +1234,7 @@ ENTRY(machine_check) - pushl_cfi machine_check_vector - jmp error_code - CFI_ENDPROC --END(machine_check) -+ENDPROC(machine_check) - #endif - - ENTRY(spurious_interrupt_bug) -@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug) - pushl_cfi $do_spurious_interrupt_bug - jmp error_code - CFI_ENDPROC --END(spurious_interrupt_bug) -+ENDPROC(spurious_interrupt_bug) - /* - * End of kprobes section - */ -@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, - - ENTRY(mcount) - ret --END(mcount) -+ENDPROC(mcount) - - ENTRY(ftrace_caller) - cmpl $0, function_trace_stop -@@ -1138,7 +1387,7 @@ ftrace_graph_call: - .globl ftrace_stub - ftrace_stub: - ret --END(ftrace_caller) -+ENDPROC(ftrace_caller) - - #else /* ! CONFIG_DYNAMIC_FTRACE */ - -@@ -1174,7 +1423,7 @@ trace: - popl %ecx - popl %eax - jmp ftrace_stub --END(mcount) -+ENDPROC(mcount) - #endif /* CONFIG_DYNAMIC_FTRACE */ - #endif /* CONFIG_FUNCTION_TRACER */ - -@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller) - popl %ecx - popl %eax - ret --END(ftrace_graph_caller) -+ENDPROC(ftrace_graph_caller) - - .globl return_to_handler - return_to_handler: -@@ -1209,7 +1458,6 @@ return_to_handler: - jmp *%ecx - #endif - --.section .rodata,"a" - #include "syscall_table_32.S" - - syscall_table_size=(.-sys_call_table) -@@ -1255,15 +1503,18 @@ error_code: - movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart - REG_TO_PTGS %ecx - SET_KERNEL_GS %ecx -- movl $(__USER_DS), %ecx -+ movl $(__KERNEL_DS), %ecx - movl %ecx, %ds - movl %ecx, %es -+ -+ pax_enter_kernel -+ - TRACE_IRQS_OFF - movl %esp,%eax # pt_regs pointer - call *%edi - jmp ret_from_exception - CFI_ENDPROC --END(page_fault) -+ENDPROC(page_fault) - - /* - * Debug traps and NMI can happen at the one SYSENTER instruction -@@ -1305,7 +1556,7 @@ debug_stack_correct: - call do_debug - jmp ret_from_exception - CFI_ENDPROC --END(debug) -+ENDPROC(debug) - - /* - * NMI is doubly nasty. It can happen _while_ we're handling -@@ -1342,6 +1593,9 @@ nmi_stack_correct: - xorl %edx,%edx # zero error code - movl %esp,%eax # pt_regs pointer - call do_nmi -+ -+ pax_exit_kernel -+ - jmp restore_all_notrace - CFI_ENDPROC - -@@ -1378,12 +1632,15 @@ nmi_espfix_stack: - FIXUP_ESPFIX_STACK # %eax == %esp - xorl %edx,%edx # zero error code - call do_nmi -+ -+ pax_exit_kernel -+ - RESTORE_REGS - lss 12+4(%esp), %esp # back to espfix stack - CFI_ADJUST_CFA_OFFSET -24 - jmp irq_return - CFI_ENDPROC --END(nmi) -+ENDPROC(nmi) - - ENTRY(int3) - RING0_INT_FRAME -@@ -1395,14 +1652,14 @@ ENTRY(int3) - call do_int3 - jmp ret_from_exception - CFI_ENDPROC --END(int3) -+ENDPROC(int3) - - ENTRY(general_protection) - RING0_EC_FRAME - pushl_cfi $do_general_protection - jmp error_code - CFI_ENDPROC --END(general_protection) -+ENDPROC(general_protection) - - #ifdef CONFIG_KVM_GUEST - ENTRY(async_page_fault) -@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault) - pushl_cfi $do_async_page_fault - jmp error_code - CFI_ENDPROC --END(async_page_fault) -+ENDPROC(async_page_fault) - #endif - - /* -diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S -index 6419bb0..00440bf 100644 ---- a/arch/x86/kernel/entry_64.S -+++ b/arch/x86/kernel/entry_64.S -@@ -55,6 +55,8 @@ - #include <asm/paravirt.h> - #include <asm/ftrace.h> - #include <asm/percpu.h> -+#include <asm/pgtable.h> -+#include <asm/alternative-asm.h> - - /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ - #include <linux/elf-em.h> -@@ -68,8 +70,9 @@ - #ifdef CONFIG_FUNCTION_TRACER - #ifdef CONFIG_DYNAMIC_FTRACE - ENTRY(mcount) -+ pax_force_retaddr - retq --END(mcount) -+ENDPROC(mcount) - - ENTRY(ftrace_caller) - cmpl $0, function_trace_stop -@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call) - #endif - - GLOBAL(ftrace_stub) -+ pax_force_retaddr - retq --END(ftrace_caller) -+ENDPROC(ftrace_caller) - - #else /* ! CONFIG_DYNAMIC_FTRACE */ - ENTRY(mcount) -@@ -112,6 +116,7 @@ ENTRY(mcount) - #endif - - GLOBAL(ftrace_stub) -+ pax_force_retaddr - retq - - trace: -@@ -121,12 +126,13 @@ trace: - movq 8(%rbp), %rsi - subq $MCOUNT_INSN_SIZE, %rdi - -+ pax_force_fptr ftrace_trace_function - call *ftrace_trace_function - - MCOUNT_RESTORE_FRAME - - jmp ftrace_stub --END(mcount) -+ENDPROC(mcount) - #endif /* CONFIG_DYNAMIC_FTRACE */ - #endif /* CONFIG_FUNCTION_TRACER */ - -@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller) - - MCOUNT_RESTORE_FRAME - -+ pax_force_retaddr - retq --END(ftrace_graph_caller) -+ENDPROC(ftrace_graph_caller) - - GLOBAL(return_to_handler) - subq $24, %rsp -@@ -163,6 +170,7 @@ GLOBAL(return_to_handler) - movq 8(%rsp), %rdx - movq (%rsp), %rax - addq $24, %rsp -+ pax_force_fptr %rdi - jmp *%rdi - #endif - -@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64) - ENDPROC(native_usergs_sysret64) - #endif /* CONFIG_PARAVIRT */ - -+ .macro ljmpq sel, off -+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM) -+ .byte 0x48; ljmp *1234f(%rip) -+ .pushsection .rodata -+ .align 16 -+ 1234: .quad \off; .word \sel -+ .popsection -+#else -+ pushq $\sel -+ pushq $\off -+ lretq -+#endif -+ .endm -+ -+ .macro pax_enter_kernel -+ pax_set_fptr_mask -+#ifdef CONFIG_PAX_KERNEXEC -+ call pax_enter_kernel -+#endif -+ .endm -+ -+ .macro pax_exit_kernel -+#ifdef CONFIG_PAX_KERNEXEC -+ call pax_exit_kernel -+#endif -+ .endm -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ENTRY(pax_enter_kernel) -+ pushq %rdi -+ -+#ifdef CONFIG_PARAVIRT -+ PV_SAVE_REGS(CLBR_RDI) -+#endif -+ -+ GET_CR0_INTO_RDI -+ bts $16,%rdi -+ jnc 3f -+ mov %cs,%edi -+ cmp $__KERNEL_CS,%edi -+ jnz 2f -+1: -+ -+#ifdef CONFIG_PARAVIRT -+ PV_RESTORE_REGS(CLBR_RDI) -+#endif -+ -+ popq %rdi -+ pax_force_retaddr -+ retq -+ -+2: ljmpq __KERNEL_CS,1f -+3: ljmpq __KERNEXEC_KERNEL_CS,4f -+4: SET_RDI_INTO_CR0 -+ jmp 1b -+ENDPROC(pax_enter_kernel) -+ -+ENTRY(pax_exit_kernel) -+ pushq %rdi -+ -+#ifdef CONFIG_PARAVIRT -+ PV_SAVE_REGS(CLBR_RDI) -+#endif -+ -+ mov %cs,%rdi -+ cmp $__KERNEXEC_KERNEL_CS,%edi -+ jz 2f -+1: -+ -+#ifdef CONFIG_PARAVIRT -+ PV_RESTORE_REGS(CLBR_RDI); -+#endif -+ -+ popq %rdi -+ pax_force_retaddr -+ retq -+ -+2: GET_CR0_INTO_RDI -+ btr $16,%rdi -+ ljmpq __KERNEL_CS,3f -+3: SET_RDI_INTO_CR0 -+ jmp 1b -+#ifdef CONFIG_PARAVIRT -+ PV_RESTORE_REGS(CLBR_RDI); -+#endif -+ -+ popq %rdi -+ pax_force_retaddr -+ retq -+ENDPROC(pax_exit_kernel) -+#endif -+ -+ .macro pax_enter_kernel_user -+ pax_set_fptr_mask -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ call pax_enter_kernel_user -+#endif -+ .endm -+ -+ .macro pax_exit_kernel_user -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ call pax_exit_kernel_user -+#endif -+#ifdef CONFIG_PAX_RANDKSTACK -+ push %rax -+ call pax_randomize_kstack -+ pop %rax -+#endif -+ .endm -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ENTRY(pax_enter_kernel_user) -+ pushq %rdi -+ pushq %rbx -+ -+#ifdef CONFIG_PARAVIRT -+ PV_SAVE_REGS(CLBR_RDI) -+#endif -+ -+ GET_CR3_INTO_RDI -+ mov %rdi,%rbx -+ add $__START_KERNEL_map,%rbx -+ sub phys_base(%rip),%rbx -+ -+#ifdef CONFIG_PARAVIRT -+ pushq %rdi -+ cmpl $0, pv_info+PARAVIRT_enabled -+ jz 1f -+ i = 0 -+ .rept USER_PGD_PTRS -+ mov i*8(%rbx),%rsi -+ mov $0,%sil -+ lea i*8(%rbx),%rdi -+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched) -+ i = i + 1 -+ .endr -+ jmp 2f -+1: -+#endif -+ -+ i = 0 -+ .rept USER_PGD_PTRS -+ movb $0,i*8(%rbx) -+ i = i + 1 -+ .endr -+ -+#ifdef CONFIG_PARAVIRT -+2: popq %rdi -+#endif -+ SET_RDI_INTO_CR3 -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ GET_CR0_INTO_RDI -+ bts $16,%rdi -+ SET_RDI_INTO_CR0 -+#endif -+ -+#ifdef CONFIG_PARAVIRT -+ PV_RESTORE_REGS(CLBR_RDI) -+#endif -+ -+ popq %rbx -+ popq %rdi -+ pax_force_retaddr -+ retq -+ENDPROC(pax_enter_kernel_user) -+ -+ENTRY(pax_exit_kernel_user) -+ push %rdi -+ -+#ifdef CONFIG_PARAVIRT -+ pushq %rbx -+ PV_SAVE_REGS(CLBR_RDI) -+#endif -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ GET_CR0_INTO_RDI -+ btr $16,%rdi -+ SET_RDI_INTO_CR0 -+#endif -+ -+ GET_CR3_INTO_RDI -+ add $__START_KERNEL_map,%rdi -+ sub phys_base(%rip),%rdi -+ -+#ifdef CONFIG_PARAVIRT -+ cmpl $0, pv_info+PARAVIRT_enabled -+ jz 1f -+ mov %rdi,%rbx -+ i = 0 -+ .rept USER_PGD_PTRS -+ mov i*8(%rbx),%rsi -+ mov $0x67,%sil -+ lea i*8(%rbx),%rdi -+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched) -+ i = i + 1 -+ .endr -+ jmp 2f -+1: -+#endif -+ -+ i = 0 -+ .rept USER_PGD_PTRS -+ movb $0x67,i*8(%rdi) -+ i = i + 1 -+ .endr -+ -+#ifdef CONFIG_PARAVIRT -+2: PV_RESTORE_REGS(CLBR_RDI) -+ popq %rbx -+#endif -+ -+ popq %rdi -+ pax_force_retaddr -+ retq -+ENDPROC(pax_exit_kernel_user) -+#endif -+ -+.macro pax_erase_kstack -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK -+ call pax_erase_kstack -+#endif -+.endm -+ -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK -+/* -+ * r11: thread_info -+ * rcx, rdx: can be clobbered -+ */ -+ENTRY(pax_erase_kstack) -+ pushq %rdi -+ pushq %rax -+ pushq %r11 -+ -+ GET_THREAD_INFO(%r11) -+ mov TI_lowest_stack(%r11), %rdi -+ mov $-0xBEEF, %rax -+ std -+ -+1: mov %edi, %ecx -+ and $THREAD_SIZE_asm - 1, %ecx -+ shr $3, %ecx -+ repne scasq -+ jecxz 2f -+ -+ cmp $2*8, %ecx -+ jc 2f -+ -+ mov $2*8, %ecx -+ repe scasq -+ jecxz 2f -+ jne 1b -+ -+2: cld -+ mov %esp, %ecx -+ sub %edi, %ecx -+ -+ cmp $THREAD_SIZE_asm, %rcx -+ jb 3f -+ ud2 -+3: -+ -+ shr $3, %ecx -+ rep stosq -+ -+ mov TI_task_thread_sp0(%r11), %rdi -+ sub $256, %rdi -+ mov %rdi, TI_lowest_stack(%r11) -+ -+ popq %r11 -+ popq %rax -+ popq %rdi -+ pax_force_retaddr -+ ret -+ENDPROC(pax_erase_kstack) -+#endif - - .macro TRACE_IRQS_IRETQ offset=ARGOFFSET - #ifdef CONFIG_TRACE_IRQFLAGS -@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64) - .endm - - .macro UNFAKE_STACK_FRAME -- addq $8*6, %rsp -- CFI_ADJUST_CFA_OFFSET -(6*8) -+ addq $8*6 + ARG_SKIP, %rsp -+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP) - .endm - - /* -@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64) - movq %rsp, %rsi - - leaq -RBP(%rsp),%rdi /* arg1 for handler */ -- testl $3, CS(%rdi) -+ testb $3, CS(%rdi) - je 1f - SWAPGS - /* -@@ -350,9 +634,10 @@ ENTRY(save_rest) - movq_cfi r15, R15+16 - movq %r11, 8(%rsp) /* return address */ - FIXUP_TOP_OF_STACK %r11, 16 -+ pax_force_retaddr - ret - CFI_ENDPROC --END(save_rest) -+ENDPROC(save_rest) - - /* save complete stack frame */ - .pushsection .kprobes.text, "ax" -@@ -381,9 +666,10 @@ ENTRY(save_paranoid) - js 1f /* negative -> in kernel */ - SWAPGS - xorl %ebx,%ebx --1: ret -+1: pax_force_retaddr_bts -+ ret - CFI_ENDPROC --END(save_paranoid) -+ENDPROC(save_paranoid) - .popsection - - /* -@@ -405,7 +691,7 @@ ENTRY(ret_from_fork) - - RESTORE_REST - -- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? -+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread? - je int_ret_from_sys_call - - testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET -@@ -415,7 +701,7 @@ ENTRY(ret_from_fork) - jmp ret_from_sys_call # go to the SYSRET fastpath - - CFI_ENDPROC --END(ret_from_fork) -+ENDPROC(ret_from_fork) - - /* - * System call entry. Up to 6 arguments in registers are supported. -@@ -451,7 +737,7 @@ END(ret_from_fork) - ENTRY(system_call) - CFI_STARTPROC simple - CFI_SIGNAL_FRAME -- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET -+ CFI_DEF_CFA rsp,0 - CFI_REGISTER rip,rcx - /*CFI_REGISTER rflags,r11*/ - SWAPGS_UNSAFE_STACK -@@ -464,12 +750,13 @@ ENTRY(system_call_after_swapgs) - - movq %rsp,PER_CPU_VAR(old_rsp) - movq PER_CPU_VAR(kernel_stack),%rsp -+ SAVE_ARGS 8*6,0 -+ pax_enter_kernel_user - /* - * No need to follow this irqs off/on section - it's straight - * and short: - */ - ENABLE_INTERRUPTS(CLBR_NONE) -- SAVE_ARGS 8,0 - movq %rax,ORIG_RAX-ARGOFFSET(%rsp) - movq %rcx,RIP-ARGOFFSET(%rsp) - CFI_REL_OFFSET rip,RIP-ARGOFFSET -@@ -479,7 +766,7 @@ ENTRY(system_call_after_swapgs) - system_call_fastpath: - cmpq $__NR_syscall_max,%rax - ja badsys -- movq %r10,%rcx -+ movq R10-ARGOFFSET(%rsp),%rcx - call *sys_call_table(,%rax,8) # XXX: rip relative - movq %rax,RAX-ARGOFFSET(%rsp) - /* -@@ -498,6 +785,8 @@ sysret_check: - andl %edi,%edx - jnz sysret_careful - CFI_REMEMBER_STATE -+ pax_exit_kernel_user -+ pax_erase_kstack - /* - * sysretq will re-enable interrupts: - */ -@@ -549,14 +838,18 @@ badsys: - * jump back to the normal fast path. - */ - auditsys: -- movq %r10,%r9 /* 6th arg: 4th syscall arg */ -+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */ - movq %rdx,%r8 /* 5th arg: 3rd syscall arg */ - movq %rsi,%rcx /* 4th arg: 2nd syscall arg */ - movq %rdi,%rdx /* 3rd arg: 1st syscall arg */ - movq %rax,%rsi /* 2nd arg: syscall number */ - movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ - call audit_syscall_entry -+ -+ pax_erase_kstack -+ - LOAD_ARGS 0 /* reload call-clobbered registers */ -+ pax_set_fptr_mask - jmp system_call_fastpath - - /* -@@ -586,16 +879,20 @@ tracesys: - FIXUP_TOP_OF_STACK %rdi - movq %rsp,%rdi - call syscall_trace_enter -+ -+ pax_erase_kstack -+ - /* - * Reload arg registers from stack in case ptrace changed them. - * We don't reload %rax because syscall_trace_enter() returned - * the value it wants us to use in the table lookup. - */ - LOAD_ARGS ARGOFFSET, 1 -+ pax_set_fptr_mask - RESTORE_REST - cmpq $__NR_syscall_max,%rax - ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ -- movq %r10,%rcx /* fixup for C */ -+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */ - call *sys_call_table(,%rax,8) - movq %rax,RAX-ARGOFFSET(%rsp) - /* Use IRET because user could have changed frame */ -@@ -607,7 +904,7 @@ tracesys: - GLOBAL(int_ret_from_sys_call) - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF -- testl $3,CS-ARGOFFSET(%rsp) -+ testb $3,CS-ARGOFFSET(%rsp) - je retint_restore_args - movl $_TIF_ALLWORK_MASK,%edi - /* edi: mask to check */ -@@ -664,7 +961,7 @@ int_restore_rest: - TRACE_IRQS_OFF - jmp int_with_check - CFI_ENDPROC --END(system_call) -+ENDPROC(system_call) - - /* - * Certain special system calls that need to save a complete full stack frame. -@@ -680,7 +977,7 @@ ENTRY(\label) - call \func - jmp ptregscall_common - CFI_ENDPROC --END(\label) -+ENDPROC(\label) - .endm - - PTREGSCALL stub_clone, sys_clone, %r8 -@@ -698,9 +995,10 @@ ENTRY(ptregscall_common) - movq_cfi_restore R12+8, r12 - movq_cfi_restore RBP+8, rbp - movq_cfi_restore RBX+8, rbx -+ pax_force_retaddr - ret $REST_SKIP /* pop extended registers */ - CFI_ENDPROC --END(ptregscall_common) -+ENDPROC(ptregscall_common) - - ENTRY(stub_execve) - CFI_STARTPROC -@@ -715,7 +1013,7 @@ ENTRY(stub_execve) - RESTORE_REST - jmp int_ret_from_sys_call - CFI_ENDPROC --END(stub_execve) -+ENDPROC(stub_execve) - - /* - * sigreturn is special because it needs to restore all registers on return. -@@ -733,7 +1031,7 @@ ENTRY(stub_rt_sigreturn) - RESTORE_REST - jmp int_ret_from_sys_call - CFI_ENDPROC --END(stub_rt_sigreturn) -+ENDPROC(stub_rt_sigreturn) - - /* - * Build the entry stubs and pointer table with some assembler magic. -@@ -768,7 +1066,7 @@ vector=vector+1 - 2: jmp common_interrupt - .endr - CFI_ENDPROC --END(irq_entries_start) -+ENDPROC(irq_entries_start) - - .previous - END(interrupt) -@@ -789,6 +1087,16 @@ END(interrupt) - CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP - SAVE_ARGS_IRQ - PARTIAL_FRAME 0 -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ testb $3, CS(%rdi) -+ jnz 1f -+ pax_enter_kernel -+ jmp 2f -+1: pax_enter_kernel_user -+2: -+#else -+ pax_enter_kernel -+#endif - call \func - .endm - -@@ -820,7 +1128,7 @@ ret_from_intr: - - exit_intr: - GET_THREAD_INFO(%rcx) -- testl $3,CS-ARGOFFSET(%rsp) -+ testb $3,CS-ARGOFFSET(%rsp) - je retint_kernel - - /* Interrupt came from user space */ -@@ -842,12 +1150,16 @@ retint_swapgs: /* return to user-space */ - * The iretq could re-enable interrupts: - */ - DISABLE_INTERRUPTS(CLBR_ANY) -+ pax_exit_kernel_user -+ pax_erase_kstack - TRACE_IRQS_IRETQ - SWAPGS - jmp restore_args - - retint_restore_args: /* return to kernel space */ - DISABLE_INTERRUPTS(CLBR_ANY) -+ pax_exit_kernel -+ pax_force_retaddr RIP-ARGOFFSET - /* - * The iretq could re-enable interrupts: - */ -@@ -936,7 +1248,7 @@ ENTRY(retint_kernel) - #endif - - CFI_ENDPROC --END(common_interrupt) -+ENDPROC(common_interrupt) - /* - * End of kprobes section - */ -@@ -952,7 +1264,7 @@ ENTRY(\sym) - interrupt \do_sym - jmp ret_from_intr - CFI_ENDPROC --END(\sym) -+ENDPROC(\sym) - .endm - - #ifdef CONFIG_SMP -@@ -1017,12 +1329,22 @@ ENTRY(\sym) - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call error_entry - DEFAULT_FRAME 0 -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ testb $3, CS(%rsp) -+ jnz 1f -+ pax_enter_kernel -+ jmp 2f -+1: pax_enter_kernel_user -+2: -+#else -+ pax_enter_kernel -+#endif - movq %rsp,%rdi /* pt_regs pointer */ - xorl %esi,%esi /* no error code */ - call \do_sym - jmp error_exit /* %ebx: no swapgs flag */ - CFI_ENDPROC --END(\sym) -+ENDPROC(\sym) - .endm - - .macro paranoidzeroentry sym do_sym -@@ -1034,15 +1356,25 @@ ENTRY(\sym) - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call save_paranoid - TRACE_IRQS_OFF -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ testb $3, CS(%rsp) -+ jnz 1f -+ pax_enter_kernel -+ jmp 2f -+1: pax_enter_kernel_user -+2: -+#else -+ pax_enter_kernel -+#endif - movq %rsp,%rdi /* pt_regs pointer */ - xorl %esi,%esi /* no error code */ - call \do_sym - jmp paranoid_exit /* %ebx: no swapgs flag */ - CFI_ENDPROC --END(\sym) -+ENDPROC(\sym) - .endm - --#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) -+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12) - .macro paranoidzeroentry_ist sym do_sym ist - ENTRY(\sym) - INTR_FRAME -@@ -1052,14 +1384,30 @@ ENTRY(\sym) - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call save_paranoid - TRACE_IRQS_OFF -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ testb $3, CS(%rsp) -+ jnz 1f -+ pax_enter_kernel -+ jmp 2f -+1: pax_enter_kernel_user -+2: -+#else -+ pax_enter_kernel -+#endif - movq %rsp,%rdi /* pt_regs pointer */ - xorl %esi,%esi /* no error code */ -+#ifdef CONFIG_SMP -+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d -+ lea init_tss(%r12), %r12 -+#else -+ lea init_tss(%rip), %r12 -+#endif - subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) - call \do_sym - addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) - jmp paranoid_exit /* %ebx: no swapgs flag */ - CFI_ENDPROC --END(\sym) -+ENDPROC(\sym) - .endm - - .macro errorentry sym do_sym -@@ -1070,13 +1418,23 @@ ENTRY(\sym) - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call error_entry - DEFAULT_FRAME 0 -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ testb $3, CS(%rsp) -+ jnz 1f -+ pax_enter_kernel -+ jmp 2f -+1: pax_enter_kernel_user -+2: -+#else -+ pax_enter_kernel -+#endif - movq %rsp,%rdi /* pt_regs pointer */ - movq ORIG_RAX(%rsp),%rsi /* get error code */ - movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ - call \do_sym - jmp error_exit /* %ebx: no swapgs flag */ - CFI_ENDPROC --END(\sym) -+ENDPROC(\sym) - .endm - - /* error code is on the stack already */ -@@ -1089,13 +1447,23 @@ ENTRY(\sym) - call save_paranoid - DEFAULT_FRAME 0 - TRACE_IRQS_OFF -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ testb $3, CS(%rsp) -+ jnz 1f -+ pax_enter_kernel -+ jmp 2f -+1: pax_enter_kernel_user -+2: -+#else -+ pax_enter_kernel -+#endif - movq %rsp,%rdi /* pt_regs pointer */ - movq ORIG_RAX(%rsp),%rsi /* get error code */ - movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ - call \do_sym - jmp paranoid_exit /* %ebx: no swapgs flag */ - CFI_ENDPROC --END(\sym) -+ENDPROC(\sym) - .endm - - zeroentry divide_error do_divide_error -@@ -1125,9 +1493,10 @@ gs_change: - 2: mfence /* workaround */ - SWAPGS - popfq_cfi -+ pax_force_retaddr - ret - CFI_ENDPROC --END(native_load_gs_index) -+ENDPROC(native_load_gs_index) - - .section __ex_table,"a" - .align 8 -@@ -1149,13 +1518,14 @@ ENTRY(kernel_thread_helper) - * Here we are in the child and the registers are set as they were - * at kernel_thread() invocation in the parent. - */ -+ pax_force_fptr %rsi - call *%rsi - # exit - mov %eax, %edi - call do_exit - ud2 # padding for call trace - CFI_ENDPROC --END(kernel_thread_helper) -+ENDPROC(kernel_thread_helper) - - /* - * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. -@@ -1182,11 +1552,11 @@ ENTRY(kernel_execve) - RESTORE_REST - testq %rax,%rax - je int_ret_from_sys_call -- RESTORE_ARGS - UNFAKE_STACK_FRAME -+ pax_force_retaddr - ret - CFI_ENDPROC --END(kernel_execve) -+ENDPROC(kernel_execve) - - /* Call softirq on interrupt stack. Interrupts are off. */ - ENTRY(call_softirq) -@@ -1204,9 +1574,10 @@ ENTRY(call_softirq) - CFI_DEF_CFA_REGISTER rsp - CFI_ADJUST_CFA_OFFSET -8 - decl PER_CPU_VAR(irq_count) -+ pax_force_retaddr - ret - CFI_ENDPROC --END(call_softirq) -+ENDPROC(call_softirq) - - #ifdef CONFIG_XEN - zeroentry xen_hypervisor_callback xen_do_hypervisor_callback -@@ -1244,7 +1615,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) - decl PER_CPU_VAR(irq_count) - jmp error_exit - CFI_ENDPROC --END(xen_do_hypervisor_callback) -+ENDPROC(xen_do_hypervisor_callback) - - /* - * Hypervisor uses this for application faults while it executes. -@@ -1303,7 +1674,7 @@ ENTRY(xen_failsafe_callback) - SAVE_ALL - jmp error_exit - CFI_ENDPROC --END(xen_failsafe_callback) -+ENDPROC(xen_failsafe_callback) - - apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ - xen_hvm_callback_vector xen_evtchn_do_upcall -@@ -1352,16 +1723,31 @@ ENTRY(paranoid_exit) - TRACE_IRQS_OFF - testl %ebx,%ebx /* swapgs needed? */ - jnz paranoid_restore -- testl $3,CS(%rsp) -+ testb $3,CS(%rsp) - jnz paranoid_userspace -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ pax_exit_kernel -+ TRACE_IRQS_IRETQ 0 -+ SWAPGS_UNSAFE_STACK -+ RESTORE_ALL 8 -+ pax_force_retaddr_bts -+ jmp irq_return -+#endif - paranoid_swapgs: -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ pax_exit_kernel_user -+#else -+ pax_exit_kernel -+#endif - TRACE_IRQS_IRETQ 0 - SWAPGS_UNSAFE_STACK - RESTORE_ALL 8 - jmp irq_return - paranoid_restore: -+ pax_exit_kernel - TRACE_IRQS_IRETQ 0 - RESTORE_ALL 8 -+ pax_force_retaddr_bts - jmp irq_return - paranoid_userspace: - GET_THREAD_INFO(%rcx) -@@ -1390,7 +1776,7 @@ paranoid_schedule: - TRACE_IRQS_OFF - jmp paranoid_userspace - CFI_ENDPROC --END(paranoid_exit) -+ENDPROC(paranoid_exit) - - /* - * Exception entry point. This expects an error code/orig_rax on the stack. -@@ -1417,12 +1803,13 @@ ENTRY(error_entry) - movq_cfi r14, R14+8 - movq_cfi r15, R15+8 - xorl %ebx,%ebx -- testl $3,CS+8(%rsp) -+ testb $3,CS+8(%rsp) - je error_kernelspace - error_swapgs: - SWAPGS - error_sti: - TRACE_IRQS_OFF -+ pax_force_retaddr_bts - ret - - /* -@@ -1449,7 +1836,7 @@ bstep_iret: - movq %rcx,RIP+8(%rsp) - jmp error_swapgs - CFI_ENDPROC --END(error_entry) -+ENDPROC(error_entry) - - - /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ -@@ -1469,7 +1856,7 @@ ENTRY(error_exit) - jnz retint_careful - jmp retint_swapgs - CFI_ENDPROC --END(error_exit) -+ENDPROC(error_exit) - - - /* runs on exception stack */ -@@ -1481,6 +1868,16 @@ ENTRY(nmi) - CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 - call save_paranoid - DEFAULT_FRAME 0 -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ testb $3, CS(%rsp) -+ jnz 1f -+ pax_enter_kernel -+ jmp 2f -+1: pax_enter_kernel_user -+2: -+#else -+ pax_enter_kernel -+#endif - /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ - movq %rsp,%rdi - movq $-1,%rsi -@@ -1491,12 +1888,28 @@ ENTRY(nmi) - DISABLE_INTERRUPTS(CLBR_NONE) - testl %ebx,%ebx /* swapgs needed? */ - jnz nmi_restore -- testl $3,CS(%rsp) -+ testb $3,CS(%rsp) - jnz nmi_userspace -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ pax_exit_kernel -+ SWAPGS_UNSAFE_STACK -+ RESTORE_ALL 8 -+ pax_force_retaddr_bts -+ jmp irq_return -+#endif - nmi_swapgs: -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ pax_exit_kernel_user -+#else -+ pax_exit_kernel -+#endif - SWAPGS_UNSAFE_STACK -+ RESTORE_ALL 8 -+ jmp irq_return - nmi_restore: -+ pax_exit_kernel - RESTORE_ALL 8 -+ pax_force_retaddr_bts - jmp irq_return - nmi_userspace: - GET_THREAD_INFO(%rcx) -@@ -1525,14 +1938,14 @@ nmi_schedule: - jmp paranoid_exit - CFI_ENDPROC - #endif --END(nmi) -+ENDPROC(nmi) - - ENTRY(ignore_sysret) - CFI_STARTPROC - mov $-ENOSYS,%eax - sysret - CFI_ENDPROC --END(ignore_sysret) -+ENDPROC(ignore_sysret) - - /* - * End of kprobes section -diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c -index c9a281f..ce2f317 100644 ---- a/arch/x86/kernel/ftrace.c -+++ b/arch/x86/kernel/ftrace.c -@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */ - static const void *mod_code_newcode; /* holds the text to write to the IP */ - - static unsigned nmi_wait_count; --static atomic_t nmi_update_count = ATOMIC_INIT(0); -+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0); - - int ftrace_arch_read_dyn_info(char *buf, int size) - { -@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size) - - r = snprintf(buf, size, "%u %u", - nmi_wait_count, -- atomic_read(&nmi_update_count)); -+ atomic_read_unchecked(&nmi_update_count)); - return r; - } - -@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void) - - if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { - smp_rmb(); -+ pax_open_kernel(); - ftrace_mod_code(); -- atomic_inc(&nmi_update_count); -+ pax_close_kernel(); -+ atomic_inc_unchecked(&nmi_update_count); - } - /* Must have previous changes seen before executions */ - smp_mb(); -@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code, - { - unsigned char replaced[MCOUNT_INSN_SIZE]; - -+ ip = ktla_ktva(ip); -+ - /* - * Note: Due to modules and __init, code can - * disappear and change, we need to protect against faulting -@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) - unsigned char old[MCOUNT_INSN_SIZE], *new; - int ret; - -- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); -+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE); - new = ftrace_call_replace(ip, (unsigned long)func); - ret = ftrace_modify_code(ip, old, new); - -@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip, - { - unsigned char code[MCOUNT_INSN_SIZE]; - -+ ip = ktla_ktva(ip); -+ - if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) - return -EFAULT; - -diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c -index 3bb0850..55a56f4 100644 ---- a/arch/x86/kernel/head32.c -+++ b/arch/x86/kernel/head32.c -@@ -19,6 +19,7 @@ - #include <asm/io_apic.h> - #include <asm/bios_ebda.h> - #include <asm/tlbflush.h> -+#include <asm/boot.h> - - static void __init i386_default_early_setup(void) - { -@@ -33,7 +34,7 @@ void __init i386_start_kernel(void) - { - memblock_init(); - -- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); -+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS"); - - #ifdef CONFIG_BLK_DEV_INITRD - /* Reserve INITRD */ -diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S -index ce0be7c..c41476e 100644 ---- a/arch/x86/kernel/head_32.S -+++ b/arch/x86/kernel/head_32.S -@@ -25,6 +25,12 @@ - /* Physical address */ - #define pa(X) ((X) - __PAGE_OFFSET) - -+#ifdef CONFIG_PAX_KERNEXEC -+#define ta(X) (X) -+#else -+#define ta(X) ((X) - __PAGE_OFFSET) -+#endif -+ - /* - * References to members of the new_cpu_data structure. - */ -@@ -54,11 +60,7 @@ - * and small than max_low_pfn, otherwise will waste some page table entries - */ - --#if PTRS_PER_PMD > 1 --#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) --#else --#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) --#endif -+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE) - - /* Number of possible pages in the lowmem region */ - LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) -@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE - RESERVE_BRK(pagetables, INIT_MAP_SIZE) - - /* -+ * Real beginning of normal "text" segment -+ */ -+ENTRY(stext) -+ENTRY(_stext) -+ -+/* - * 32-bit kernel entrypoint; only used by the boot CPU. On entry, - * %esi points to the real-mode code as a 32-bit pointer. - * CS and DS must be 4 GB flat segments, but we don't depend on -@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) - * can. - */ - __HEAD -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ jmp startup_32 -+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */ -+.fill PAGE_SIZE-5,1,0xcc -+#endif -+ - ENTRY(startup_32) - movl pa(stack_start),%ecx - -@@ -105,6 +120,57 @@ ENTRY(startup_32) - 2: - leal -__PAGE_OFFSET(%ecx),%esp - -+#ifdef CONFIG_SMP -+ movl $pa(cpu_gdt_table),%edi -+ movl $__per_cpu_load,%eax -+ movw %ax,__KERNEL_PERCPU + 2(%edi) -+ rorl $16,%eax -+ movb %al,__KERNEL_PERCPU + 4(%edi) -+ movb %ah,__KERNEL_PERCPU + 7(%edi) -+ movl $__per_cpu_end - 1,%eax -+ subl $__per_cpu_start,%eax -+ movw %ax,__KERNEL_PERCPU + 0(%edi) -+#endif -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ movl $NR_CPUS,%ecx -+ movl $pa(cpu_gdt_table),%edi -+1: -+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi) -+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi) -+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi) -+ addl $PAGE_SIZE_asm,%edi -+ loop 1b -+#endif -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ movl $pa(boot_gdt),%edi -+ movl $__LOAD_PHYSICAL_ADDR,%eax -+ movw %ax,__BOOT_CS + 2(%edi) -+ rorl $16,%eax -+ movb %al,__BOOT_CS + 4(%edi) -+ movb %ah,__BOOT_CS + 7(%edi) -+ rorl $16,%eax -+ -+ ljmp $(__BOOT_CS),$1f -+1: -+ -+ movl $NR_CPUS,%ecx -+ movl $pa(cpu_gdt_table),%edi -+ addl $__PAGE_OFFSET,%eax -+1: -+ movw %ax,__KERNEL_CS + 2(%edi) -+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi) -+ rorl $16,%eax -+ movb %al,__KERNEL_CS + 4(%edi) -+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi) -+ movb %ah,__KERNEL_CS + 7(%edi) -+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi) -+ rorl $16,%eax -+ addl $PAGE_SIZE_asm,%edi -+ loop 1b -+#endif -+ - /* - * Clear BSS first so that there are no surprises... - */ -@@ -195,8 +261,11 @@ ENTRY(startup_32) - movl %eax, pa(max_pfn_mapped) - - /* Do early initialization of the fixmap area */ -- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax -- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) -+#ifdef CONFIG_COMPAT_VDSO -+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8) -+#else -+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8) -+#endif - #else /* Not PAE */ - - page_pde_offset = (__PAGE_OFFSET >> 20); -@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20); - movl %eax, pa(max_pfn_mapped) - - /* Do early initialization of the fixmap area */ -- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax -- movl %eax,pa(initial_page_table+0xffc) -+#ifdef CONFIG_COMPAT_VDSO -+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc) -+#else -+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc) -+#endif - #endif - - #ifdef CONFIG_PARAVIRT -@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20); - cmpl $num_subarch_entries, %eax - jae bad_subarch - -- movl pa(subarch_entries)(,%eax,4), %eax -- subl $__PAGE_OFFSET, %eax -- jmp *%eax -+ jmp *pa(subarch_entries)(,%eax,4) - - bad_subarch: - WEAK(lguest_entry) -@@ -255,10 +325,10 @@ WEAK(xen_entry) - __INITDATA - - subarch_entries: -- .long default_entry /* normal x86/PC */ -- .long lguest_entry /* lguest hypervisor */ -- .long xen_entry /* Xen hypervisor */ -- .long default_entry /* Moorestown MID */ -+ .long ta(default_entry) /* normal x86/PC */ -+ .long ta(lguest_entry) /* lguest hypervisor */ -+ .long ta(xen_entry) /* Xen hypervisor */ -+ .long ta(default_entry) /* Moorestown MID */ - num_subarch_entries = (. - subarch_entries) / 4 - .previous - #else -@@ -312,6 +382,7 @@ default_entry: - orl %edx,%eax - movl %eax,%cr4 - -+#ifdef CONFIG_X86_PAE - testb $X86_CR4_PAE, %al # check if PAE is enabled - jz 6f - -@@ -340,6 +411,9 @@ default_entry: - /* Make changes effective */ - wrmsr - -+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4) -+#endif -+ - 6: - - /* -@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP - 1: movl $(__KERNEL_DS),%eax # reload all the segment registers - movl %eax,%ss # after changing gdt. - -- movl $(__USER_DS),%eax # DS/ES contains default USER segment -+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment - movl %eax,%ds - movl %eax,%es - -@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP - */ - cmpb $0,ready - jne 1f -- movl $gdt_page,%eax -+ movl $cpu_gdt_table,%eax - movl $stack_canary,%ecx -+#ifdef CONFIG_SMP -+ addl $__per_cpu_load,%ecx -+#endif - movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) - shrl $16, %ecx - movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) - movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) - 1: --#endif - movl $(__KERNEL_STACK_CANARY),%eax -+#elif defined(CONFIG_PAX_MEMORY_UDEREF) -+ movl $(__USER_DS),%eax -+#else -+ xorl %eax,%eax -+#endif - movl %eax,%gs - - xorl %eax,%eax # Clear LDT -@@ -558,22 +639,22 @@ early_page_fault: - jmp early_fault - - early_fault: -- cld - #ifdef CONFIG_PRINTK -+ cmpl $1,%ss:early_recursion_flag -+ je hlt_loop -+ incl %ss:early_recursion_flag -+ cld - pusha - movl $(__KERNEL_DS),%eax - movl %eax,%ds - movl %eax,%es -- cmpl $2,early_recursion_flag -- je hlt_loop -- incl early_recursion_flag - movl %cr2,%eax - pushl %eax - pushl %edx /* trapno */ - pushl $fault_msg - call printk -+; call dump_stack - #endif -- call dump_stack - hlt_loop: - hlt - jmp hlt_loop -@@ -581,8 +662,11 @@ hlt_loop: - /* This is the default interrupt "handler" :-) */ - ALIGN - ignore_int: -- cld - #ifdef CONFIG_PRINTK -+ cmpl $2,%ss:early_recursion_flag -+ je hlt_loop -+ incl %ss:early_recursion_flag -+ cld - pushl %eax - pushl %ecx - pushl %edx -@@ -591,9 +675,6 @@ ignore_int: - movl $(__KERNEL_DS),%eax - movl %eax,%ds - movl %eax,%es -- cmpl $2,early_recursion_flag -- je hlt_loop -- incl early_recursion_flag - pushl 16(%esp) - pushl 24(%esp) - pushl 32(%esp) -@@ -622,29 +703,43 @@ ENTRY(initial_code) - /* - * BSS section - */ --__PAGE_ALIGNED_BSS -- .align PAGE_SIZE - #ifdef CONFIG_X86_PAE -+.section .initial_pg_pmd,"a",@progbits - initial_pg_pmd: - .fill 1024*KPMDS,4,0 - #else -+.section .initial_page_table,"a",@progbits - ENTRY(initial_page_table) - .fill 1024,4,0 - #endif -+.section .initial_pg_fixmap,"a",@progbits - initial_pg_fixmap: - .fill 1024,4,0 -+.section .empty_zero_page,"a",@progbits - ENTRY(empty_zero_page) - .fill 4096,1,0 -+.section .swapper_pg_dir,"a",@progbits - ENTRY(swapper_pg_dir) -+#ifdef CONFIG_X86_PAE -+ .fill 4,8,0 -+#else - .fill 1024,4,0 -+#endif -+ -+/* -+ * The IDT has to be page-aligned to simplify the Pentium -+ * F0 0F bug workaround.. We have a special link segment -+ * for this. -+ */ -+.section .idt,"a",@progbits -+ENTRY(idt_table) -+ .fill 256,8,0 - - /* - * This starts the data section. - */ - #ifdef CONFIG_X86_PAE --__PAGE_ALIGNED_DATA -- /* Page-aligned for the benefit of paravirt? */ -- .align PAGE_SIZE -+.section .initial_page_table,"a",@progbits - ENTRY(initial_page_table) - .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ - # if KPMDS == 3 -@@ -663,18 +758,27 @@ ENTRY(initial_page_table) - # error "Kernel PMDs should be 1, 2 or 3" - # endif - .align PAGE_SIZE /* needs to be page-sized too */ -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ENTRY(cpu_pgd) -+ .rept NR_CPUS -+ .fill 4,8,0 -+ .endr -+#endif -+ - #endif - - .data - .balign 4 - ENTRY(stack_start) -- .long init_thread_union+THREAD_SIZE -+ .long init_thread_union+THREAD_SIZE-8 - -+ready: .byte 0 -+ -+.section .rodata,"a",@progbits - early_recursion_flag: - .long 0 - --ready: .byte 0 -- - int_msg: - .asciz "Unknown interrupt or fault at: %p %p %p\n" - -@@ -707,7 +811,7 @@ fault_msg: - .word 0 # 32 bit align gdt_desc.address - boot_gdt_descr: - .word __BOOT_DS+7 -- .long boot_gdt - __PAGE_OFFSET -+ .long pa(boot_gdt) - - .word 0 # 32-bit align idt_desc.address - idt_descr: -@@ -718,7 +822,7 @@ idt_descr: - .word 0 # 32 bit align gdt_desc.address - ENTRY(early_gdt_descr) - .word GDT_ENTRIES*8-1 -- .long gdt_page /* Overwritten for secondary CPUs */ -+ .long cpu_gdt_table /* Overwritten for secondary CPUs */ - - /* - * The boot_gdt must mirror the equivalent in setup.S and is -@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr) - .align L1_CACHE_BYTES - ENTRY(boot_gdt) - .fill GDT_ENTRY_BOOT_CS,8,0 -- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ -- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ -+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */ -+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */ -+ -+ .align PAGE_SIZE_asm -+ENTRY(cpu_gdt_table) -+ .rept NR_CPUS -+ .quad 0x0000000000000000 /* NULL descriptor */ -+ .quad 0x0000000000000000 /* 0x0b reserved */ -+ .quad 0x0000000000000000 /* 0x13 reserved */ -+ .quad 0x0000000000000000 /* 0x1b reserved */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */ -+#else -+ .quad 0x0000000000000000 /* 0x20 unused */ -+#endif -+ -+ .quad 0x0000000000000000 /* 0x28 unused */ -+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */ -+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */ -+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */ -+ .quad 0x0000000000000000 /* 0x4b reserved */ -+ .quad 0x0000000000000000 /* 0x53 reserved */ -+ .quad 0x0000000000000000 /* 0x5b reserved */ -+ -+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */ -+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */ -+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */ -+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */ -+ -+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */ -+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */ -+ -+ /* -+ * Segments used for calling PnP BIOS have byte granularity. -+ * The code segments and data segments have fixed 64k limits, -+ * the transfer segment sizes are set at run time. -+ */ -+ .quad 0x00409b000000ffff /* 0x90 32-bit code */ -+ .quad 0x00009b000000ffff /* 0x98 16-bit code */ -+ .quad 0x000093000000ffff /* 0xa0 16-bit data */ -+ .quad 0x0000930000000000 /* 0xa8 16-bit data */ -+ .quad 0x0000930000000000 /* 0xb0 16-bit data */ -+ -+ /* -+ * The APM segments have byte granularity and their bases -+ * are set at run time. All have 64k limits. -+ */ -+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */ -+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */ -+ .quad 0x004093000000ffff /* 0xc8 APM DS data */ -+ -+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */ -+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */ -+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */ -+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */ -+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */ -+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ -+ -+ /* Be sure this is zeroed to avoid false validations in Xen */ -+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0 -+ .endr -diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S -index e11e394..9aebc5d 100644 ---- a/arch/x86/kernel/head_64.S -+++ b/arch/x86/kernel/head_64.S -@@ -19,6 +19,8 @@ - #include <asm/cache.h> - #include <asm/processor-flags.h> - #include <asm/percpu.h> -+#include <asm/cpufeature.h> -+#include <asm/alternative-asm.h> - - #ifdef CONFIG_PARAVIRT - #include <asm/asm-offsets.h> -@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) - L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) - L4_START_KERNEL = pgd_index(__START_KERNEL_map) - L3_START_KERNEL = pud_index(__START_KERNEL_map) -+L4_VMALLOC_START = pgd_index(VMALLOC_START) -+L3_VMALLOC_START = pud_index(VMALLOC_START) -+L4_VMALLOC_END = pgd_index(VMALLOC_END) -+L3_VMALLOC_END = pud_index(VMALLOC_END) -+L4_VMEMMAP_START = pgd_index(VMEMMAP_START) -+L3_VMEMMAP_START = pud_index(VMEMMAP_START) - - .text - __HEAD -@@ -85,35 +93,23 @@ startup_64: - */ - addq %rbp, init_level4_pgt + 0(%rip) - addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) -+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip) -+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip) -+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip) - addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) - - addq %rbp, level3_ident_pgt + 0(%rip) -+#ifndef CONFIG_XEN -+ addq %rbp, level3_ident_pgt + 8(%rip) -+#endif - -- addq %rbp, level3_kernel_pgt + (510*8)(%rip) -- addq %rbp, level3_kernel_pgt + (511*8)(%rip) -+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip) -+ -+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip) -+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip) - - addq %rbp, level2_fixmap_pgt + (506*8)(%rip) -- -- /* Add an Identity mapping if I am above 1G */ -- leaq _text(%rip), %rdi -- andq $PMD_PAGE_MASK, %rdi -- -- movq %rdi, %rax -- shrq $PUD_SHIFT, %rax -- andq $(PTRS_PER_PUD - 1), %rax -- jz ident_complete -- -- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx -- leaq level3_ident_pgt(%rip), %rbx -- movq %rdx, 0(%rbx, %rax, 8) -- -- movq %rdi, %rax -- shrq $PMD_SHIFT, %rax -- andq $(PTRS_PER_PMD - 1), %rax -- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx -- leaq level2_spare_pgt(%rip), %rbx -- movq %rdx, 0(%rbx, %rax, 8) --ident_complete: -+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip) - - /* - * Fixup the kernel text+data virtual addresses. Note that -@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64) - * after the boot processor executes this code. - */ - -- /* Enable PAE mode and PGE */ -- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax -+ /* Enable PAE mode and PSE/PGE */ -+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax - movq %rax, %cr4 - - /* Setup early boot stage 4 level pagetables. */ -@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64) - movl $MSR_EFER, %ecx - rdmsr - btsl $_EFER_SCE, %eax /* Enable System Call */ -- btl $20,%edi /* No Execute supported? */ -+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */ - jnc 1f - btsl $_EFER_NX, %eax -+ leaq init_level4_pgt(%rip), %rdi -+#ifndef CONFIG_EFI -+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi) -+#endif -+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi) -+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi) -+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi) -+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip) - 1: wrmsr /* Make changes effective */ - - /* Setup cr0 */ -@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64) - * jump. In addition we need to ensure %cs is set so we make this - * a far return. - */ -+ pax_set_fptr_mask - movq initial_code(%rip),%rax - pushq $0 # fake return address to stop unwinder - pushq $__KERNEL_CS # set correct cs -@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64) - bad_address: - jmp bad_address - -- .section ".init.text","ax" -+ __INIT - #ifdef CONFIG_EARLY_PRINTK - .globl early_idt_handlers - early_idt_handlers: -@@ -314,18 +319,23 @@ ENTRY(early_idt_handler) - #endif /* EARLY_PRINTK */ - 1: hlt - jmp 1b -+ .previous - - #ifdef CONFIG_EARLY_PRINTK -+ __INITDATA - early_recursion_flag: - .long 0 -+ .previous - -+ .section .rodata,"a",@progbits - early_idt_msg: - .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" - early_idt_ripmsg: - .asciz "RIP %s\n" -+ .previous - #endif /* CONFIG_EARLY_PRINTK */ -- .previous - -+ .section .rodata,"a",@progbits - #define NEXT_PAGE(name) \ - .balign PAGE_SIZE; \ - ENTRY(name) -@@ -338,7 +348,6 @@ ENTRY(name) - i = i + 1 ; \ - .endr - -- .data - /* - * This default setting generates an ident mapping at address 0x100000 - * and a mapping for the kernel that precisely maps virtual address -@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt) - .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE - .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 - .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE -+ .org init_level4_pgt + L4_VMALLOC_START*8, 0 -+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE -+ .org init_level4_pgt + L4_VMALLOC_END*8, 0 -+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE -+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0 -+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE - .org init_level4_pgt + L4_START_KERNEL*8, 0 - /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ - .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+NEXT_PAGE(cpu_pgd) -+ .rept NR_CPUS -+ .fill 512,8,0 -+ .endr -+#endif -+ - NEXT_PAGE(level3_ident_pgt) - .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE -+#ifdef CONFIG_XEN - .fill 511,8,0 -+#else -+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE -+ .fill 510,8,0 -+#endif -+ -+NEXT_PAGE(level3_vmalloc_start_pgt) -+ .fill 512,8,0 -+ -+NEXT_PAGE(level3_vmalloc_end_pgt) -+ .fill 512,8,0 -+ -+NEXT_PAGE(level3_vmemmap_pgt) -+ .fill L3_VMEMMAP_START,8,0 -+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE - - NEXT_PAGE(level3_kernel_pgt) - .fill L3_START_KERNEL,8,0 -@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt) - .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE - .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE - -+NEXT_PAGE(level2_vmemmap_pgt) -+ .fill 512,8,0 -+ - NEXT_PAGE(level2_fixmap_pgt) -- .fill 506,8,0 -- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE -- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ -- .fill 5,8,0 -+ .fill 507,8,0 -+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE -+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */ -+ .fill 4,8,0 - --NEXT_PAGE(level1_fixmap_pgt) -+NEXT_PAGE(level1_vsyscall_pgt) - .fill 512,8,0 - --NEXT_PAGE(level2_ident_pgt) -- /* Since I easily can, map the first 1G. -+ /* Since I easily can, map the first 2G. - * Don't set NX because code runs from these pages. - */ -- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) -+NEXT_PAGE(level2_ident_pgt) -+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD) - - NEXT_PAGE(level2_kernel_pgt) - /* -@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt) - * If you want to increase this then increase MODULES_VADDR - * too.) - */ -- PMDS(0, __PAGE_KERNEL_LARGE_EXEC, -- KERNEL_IMAGE_SIZE/PMD_SIZE) -- --NEXT_PAGE(level2_spare_pgt) -- .fill 512, 8, 0 -+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) - - #undef PMDS - #undef NEXT_PAGE - -- .data -+ .align PAGE_SIZE -+ENTRY(cpu_gdt_table) -+ .rept NR_CPUS -+ .quad 0x0000000000000000 /* NULL descriptor */ -+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ -+ .quad 0x00af9b000000ffff /* __KERNEL_CS */ -+ .quad 0x00cf93000000ffff /* __KERNEL_DS */ -+ .quad 0x00cffb000000ffff /* __USER32_CS */ -+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ -+ .quad 0x00affb000000ffff /* __USER_CS */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */ -+#else -+ .quad 0x0 /* unused */ -+#endif -+ -+ .quad 0,0 /* TSS */ -+ .quad 0,0 /* LDT */ -+ .quad 0,0,0 /* three TLS descriptors */ -+ .quad 0x0000f40000000000 /* node/CPU stored in limit */ -+ /* asm/segment.h:GDT_ENTRIES must match this */ -+ -+ /* zero the remaining page */ -+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 -+ .endr -+ - .align 16 - .globl early_gdt_descr - early_gdt_descr: - .word GDT_ENTRIES*8-1 - early_gdt_descr_base: -- .quad INIT_PER_CPU_VAR(gdt_page) -+ .quad cpu_gdt_table - - ENTRY(phys_base) - /* This must match the first entry in level2_kernel_pgt */ - .quad 0x0000000000000000 - - #include "../../x86/xen/xen-head.S" -- -- .section .bss, "aw", @nobits -+ -+ .section .rodata,"a",@progbits - .align L1_CACHE_BYTES - ENTRY(idt_table) -- .skip IDT_ENTRIES * 16 -+ .fill 512,8,0 - - __PAGE_ALIGNED_BSS - .align PAGE_SIZE -diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c -index 9c3bd4a..e1d9b35 100644 ---- a/arch/x86/kernel/i386_ksyms_32.c -+++ b/arch/x86/kernel/i386_ksyms_32.c -@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void); - EXPORT_SYMBOL(cmpxchg8b_emu); - #endif - -+EXPORT_SYMBOL_GPL(cpu_gdt_table); -+ - /* Networking helper routines. */ - EXPORT_SYMBOL(csum_partial_copy_generic); -+EXPORT_SYMBOL(csum_partial_copy_generic_to_user); -+EXPORT_SYMBOL(csum_partial_copy_generic_from_user); - - EXPORT_SYMBOL(__get_user_1); - EXPORT_SYMBOL(__get_user_2); -@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr); - - EXPORT_SYMBOL(csum_partial); - EXPORT_SYMBOL(empty_zero_page); -+ -+#ifdef CONFIG_PAX_KERNEXEC -+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR); -+#endif -diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c -index 6104852..6114160 100644 ---- a/arch/x86/kernel/i8259.c -+++ b/arch/x86/kernel/i8259.c -@@ -210,7 +210,7 @@ spurious_8259A_irq: - "spurious 8259A interrupt: IRQ%d.\n", irq); - spurious_irq_mask |= irqmask; - } -- atomic_inc(&irq_err_count); -+ atomic_inc_unchecked(&irq_err_count); - /* - * Theoretically we do not have to handle this IRQ, - * but in Linux this does not cause problems and is -diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c -index 43e9ccf..44ccf6f 100644 ---- a/arch/x86/kernel/init_task.c -+++ b/arch/x86/kernel/init_task.c -@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); - * way process stacks are handled. This is done by having a special - * "init_task" linker map entry.. - */ --union thread_union init_thread_union __init_task_data = -- { INIT_THREAD_INFO(init_task) }; -+union thread_union init_thread_union __init_task_data; - - /* - * Initial task structure. -@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task); - * section. Since TSS's are completely CPU-local, we want them - * on exact cacheline boundaries, to eliminate cacheline ping-pong. - */ --DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; -- -+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS }; -+EXPORT_SYMBOL(init_tss); -diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c -index 8c96897..be66bfa 100644 ---- a/arch/x86/kernel/ioport.c -+++ b/arch/x86/kernel/ioport.c -@@ -6,6 +6,7 @@ - #include <linux/sched.h> - #include <linux/kernel.h> - #include <linux/capability.h> -+#include <linux/security.h> - #include <linux/errno.h> - #include <linux/types.h> - #include <linux/ioport.h> -@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) - - if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) - return -EINVAL; -+#ifdef CONFIG_GRKERNSEC_IO -+ if (turn_on && grsec_disable_privio) { -+ gr_handle_ioperm(); -+ return -EPERM; -+ } -+#endif - if (turn_on && !capable(CAP_SYS_RAWIO)) - return -EPERM; - -@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) - * because the ->io_bitmap_max value must match the bitmap - * contents: - */ -- tss = &per_cpu(init_tss, get_cpu()); -+ tss = init_tss + get_cpu(); - - if (turn_on) - bitmap_clear(t->io_bitmap_ptr, from, num); -@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs) - return -EINVAL; - /* Trying to gain more privileges? */ - if (level > old) { -+#ifdef CONFIG_GRKERNSEC_IO -+ if (grsec_disable_privio) { -+ gr_handle_iopl(); -+ return -EPERM; -+ } -+#endif - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - } -diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c -index 6c0802e..bea25ae 100644 ---- a/arch/x86/kernel/irq.c -+++ b/arch/x86/kernel/irq.c -@@ -17,7 +17,7 @@ - #include <asm/mce.h> - #include <asm/hw_irq.h> - --atomic_t irq_err_count; -+atomic_unchecked_t irq_err_count; - - /* Function pointer for generic interrupt vector handling */ - void (*x86_platform_ipi_callback)(void) = NULL; -@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file *p, int prec) - seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); - seq_printf(p, " Machine check polls\n"); - #endif -- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); -+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count)); - #if defined(CONFIG_X86_IO_APIC) -- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); -+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count)); - #endif - return 0; - } -@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu) - - u64 arch_irq_stat(void) - { -- u64 sum = atomic_read(&irq_err_count); -+ u64 sum = atomic_read_unchecked(&irq_err_count); - - #ifdef CONFIG_X86_IO_APIC -- sum += atomic_read(&irq_mis_count); -+ sum += atomic_read_unchecked(&irq_mis_count); - #endif - return sum; - } -diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c -index 7209070..cbcd71a 100644 ---- a/arch/x86/kernel/irq_32.c -+++ b/arch/x86/kernel/irq_32.c -@@ -36,7 +36,7 @@ static int check_stack_overflow(void) - __asm__ __volatile__("andl %%esp,%0" : - "=r" (sp) : "0" (THREAD_SIZE - 1)); - -- return sp < (sizeof(struct thread_info) + STACK_WARN); -+ return sp < STACK_WARN; - } - - static void print_stack_overflow(void) -@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { } - * per-CPU IRQ handling contexts (thread information and stack) - */ - union irq_ctx { -- struct thread_info tinfo; -- u32 stack[THREAD_SIZE/sizeof(u32)]; -+ unsigned long previous_esp; -+ u32 stack[THREAD_SIZE/sizeof(u32)]; - } __attribute__((aligned(THREAD_SIZE))); - - static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); -@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack) - static inline int - execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) - { -- union irq_ctx *curctx, *irqctx; -+ union irq_ctx *irqctx; - u32 *isp, arg1, arg2; - -- curctx = (union irq_ctx *) current_thread_info(); - irqctx = __this_cpu_read(hardirq_ctx); - - /* -@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) - * handler) we can't do that and just have to keep using the - * current stack (which is the irq stack already after all) - */ -- if (unlikely(curctx == irqctx)) -+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE)) - return 0; - - /* build the stack frame on the IRQ stack */ -- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); -- irqctx->tinfo.task = curctx->tinfo.task; -- irqctx->tinfo.previous_esp = current_stack_pointer; -+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); -+ irqctx->previous_esp = current_stack_pointer; - -- /* -- * Copy the softirq bits in preempt_count so that the -- * softirq checks work in the hardirq context. -- */ -- irqctx->tinfo.preempt_count = -- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | -- (curctx->tinfo.preempt_count & SOFTIRQ_MASK); -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ __set_fs(MAKE_MM_SEG(0)); -+#endif - - if (unlikely(overflow)) - call_on_stack(print_stack_overflow, isp); -@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) - : "0" (irq), "1" (desc), "2" (isp), - "D" (desc->handle_irq) - : "memory", "cc", "ecx"); -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ __set_fs(current_thread_info()->addr_limit); -+#endif -+ - return 1; - } - -@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) - */ - void __cpuinit irq_ctx_init(int cpu) - { -- union irq_ctx *irqctx; -- - if (per_cpu(hardirq_ctx, cpu)) - return; - -- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), -- THREAD_FLAGS, -- THREAD_ORDER)); -- memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); -- irqctx->tinfo.cpu = cpu; -- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; -- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); -- -- per_cpu(hardirq_ctx, cpu) = irqctx; -- -- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), -- THREAD_FLAGS, -- THREAD_ORDER)); -- memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); -- irqctx->tinfo.cpu = cpu; -- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); -- -- per_cpu(softirq_ctx, cpu) = irqctx; -+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER)); -+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER)); - - printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", - cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); -@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu) - asmlinkage void do_softirq(void) - { - unsigned long flags; -- struct thread_info *curctx; - union irq_ctx *irqctx; - u32 *isp; - -@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void) - local_irq_save(flags); - - if (local_softirq_pending()) { -- curctx = current_thread_info(); - irqctx = __this_cpu_read(softirq_ctx); -- irqctx->tinfo.task = curctx->task; -- irqctx->tinfo.previous_esp = current_stack_pointer; -+ irqctx->previous_esp = current_stack_pointer; - - /* build the stack frame on the softirq stack */ -- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); -+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ __set_fs(MAKE_MM_SEG(0)); -+#endif - - call_on_stack(__do_softirq, isp); -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ __set_fs(current_thread_info()->addr_limit); -+#endif -+ - /* - * Shouldn't happen, we returned above if in_interrupt(): - */ -diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c -index 00354d4..187ae44 100644 ---- a/arch/x86/kernel/kgdb.c -+++ b/arch/x86/kernel/kgdb.c -@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) - #ifdef CONFIG_X86_32 - switch (regno) { - case GDB_SS: -- if (!user_mode_vm(regs)) -+ if (!user_mode(regs)) - *(unsigned long *)mem = __KERNEL_DS; - break; - case GDB_SP: -- if (!user_mode_vm(regs)) -+ if (!user_mode(regs)) - *(unsigned long *)mem = kernel_stack_pointer(regs); - break; - case GDB_GS: -@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, - case 'k': - /* clear the trace bit */ - linux_regs->flags &= ~X86_EFLAGS_TF; -- atomic_set(&kgdb_cpu_doing_single_step, -1); -+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1); - - /* set the trace bit if we're stepping */ - if (remcomInBuffer[0] == 's') { - linux_regs->flags |= X86_EFLAGS_TF; -- atomic_set(&kgdb_cpu_doing_single_step, -+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, - raw_smp_processor_id()); - } - -@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) - return NOTIFY_DONE; - - case DIE_DEBUG: -- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { -+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) { - if (user_mode(regs)) - return single_step_cont(regs, args); - break; -diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c -index 794bc95..c6e29e9 100644 ---- a/arch/x86/kernel/kprobes.c -+++ b/arch/x86/kernel/kprobes.c -@@ -117,8 +117,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) - } __attribute__((packed)) *insn; - - insn = (struct __arch_relative_insn *)from; -+ -+ pax_open_kernel(); - insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); - insn->op = op; -+ pax_close_kernel(); - } - - /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ -@@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes) - kprobe_opcode_t opcode; - kprobe_opcode_t *orig_opcodes = opcodes; - -- if (search_exception_tables((unsigned long)opcodes)) -+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes))) - return 0; /* Page fault may occur on this address. */ - - retry: -@@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) - } - } - insn_get_length(&insn); -+ pax_open_kernel(); - memcpy(dest, insn.kaddr, insn.length); -+ pax_close_kernel(); - - #ifdef CONFIG_X86_64 - if (insn_rip_relative(&insn)) { -@@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) - (u8 *) dest; - BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ - disp = (u8 *) dest + insn_offset_displacement(&insn); -+ pax_open_kernel(); - *(s32 *) disp = (s32) newdisp; -+ pax_close_kernel(); - } - #endif - return insn.length; -@@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) - */ - __copy_instruction(p->ainsn.insn, p->addr, 0); - -- if (can_boost(p->addr)) -+ if (can_boost(ktla_ktva(p->addr))) - p->ainsn.boostable = 0; - else - p->ainsn.boostable = -1; - -- p->opcode = *p->addr; -+ p->opcode = *(ktla_ktva(p->addr)); - } - - int __kprobes arch_prepare_kprobe(struct kprobe *p) -@@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, - * nor set current_kprobe, because it doesn't use single - * stepping. - */ -- regs->ip = (unsigned long)p->ainsn.insn; -+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); - preempt_enable_no_resched(); - return; - } -@@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, - if (p->opcode == BREAKPOINT_INSTRUCTION) - regs->ip = (unsigned long)p->addr; - else -- regs->ip = (unsigned long)p->ainsn.insn; -+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); - } - - /* -@@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) - setup_singlestep(p, regs, kcb, 0); - return 1; - } -- } else if (*addr != BREAKPOINT_INSTRUCTION) { -+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) { - /* - * The breakpoint instruction was removed right - * after we hit it. Another cpu has removed -@@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void) - " movq %rax, 152(%rsp)\n" - RESTORE_REGS_STRING - " popfq\n" -+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN -+ " btsq $63,(%rsp)\n" -+#endif - #else - " pushf\n" - SAVE_REGS_STRING -@@ -819,7 +829,7 @@ static void __kprobes resume_execution(struct kprobe *p, - struct pt_regs *regs, struct kprobe_ctlblk *kcb) - { - unsigned long *tos = stack_addr(regs); -- unsigned long copy_ip = (unsigned long)p->ainsn.insn; -+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn); - unsigned long orig_ip = (unsigned long)p->addr; - kprobe_opcode_t *insn = p->ainsn.insn; - -@@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, - struct die_args *args = data; - int ret = NOTIFY_DONE; - -- if (args->regs && user_mode_vm(args->regs)) -+ if (args->regs && user_mode(args->regs)) - return ret; - - switch (val) { -@@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) - * Verify if the address gap is in 2GB range, because this uses - * a relative jump. - */ -- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; -+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE; - if (abs(rel) > 0x7fffffff) - return -ERANGE; - -@@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) - synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); - - /* Set probe function call */ -- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); -+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback)); - - /* Set returning jmp instruction at the tail of out-of-line buffer */ - synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, -- (u8 *)op->kp.addr + op->optinsn.size); -+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size); - - flush_icache_range((unsigned long) buf, - (unsigned long) buf + TMPL_END_IDX + -@@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, - ((long)op->kp.addr + RELATIVEJUMP_SIZE)); - - /* Backup instructions which will be replaced by jump address */ -- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, -+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE, - RELATIVE_ADDR_SIZE); - - insn_buf[0] = RELATIVEJUMP_OPCODE; -diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c -index a9c2116..a52d4fc 100644 ---- a/arch/x86/kernel/kvm.c -+++ b/arch/x86/kernel/kvm.c -@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void) - pv_mmu_ops.set_pud = kvm_set_pud; - #if PAGETABLE_LEVELS == 4 - pv_mmu_ops.set_pgd = kvm_set_pgd; -+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd; - #endif - #endif - pv_mmu_ops.flush_tlb_user = kvm_flush_tlb; -diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c -index ea69726..604d066 100644 ---- a/arch/x86/kernel/ldt.c -+++ b/arch/x86/kernel/ldt.c -@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) - if (reload) { - #ifdef CONFIG_SMP - preempt_disable(); -- load_LDT(pc); -+ load_LDT_nolock(pc); - if (!cpumask_equal(mm_cpumask(current->mm), - cpumask_of(smp_processor_id()))) - smp_call_function(flush_ldt, current->mm, 1); - preempt_enable(); - #else -- load_LDT(pc); -+ load_LDT_nolock(pc); - #endif - } - if (oldsize) { -@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) - return err; - - for (i = 0; i < old->size; i++) -- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); -+ write_ldt_entry(new->ldt, i, old->ldt + i); - return 0; - } - -@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) - retval = copy_ldt(&mm->context, &old_mm->context); - mutex_unlock(&old_mm->context.lock); - } -+ -+ if (tsk == current) { -+ mm->context.vdso = 0; -+ -+#ifdef CONFIG_X86_32 -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+ mm->context.user_cs_base = 0UL; -+ mm->context.user_cs_limit = ~0UL; -+ -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) -+ cpus_clear(mm->context.cpu_user_cs_mask); -+#endif -+ -+#endif -+#endif -+ -+ } -+ - return retval; - } - -@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) - } - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) { -+ error = -EINVAL; -+ goto out_unlock; -+ } -+#endif -+ - fill_ldt(&ldt, &ldt_info); - if (oldmode) - ldt.avl = 0; -diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c -index a3fa43b..8966f4c 100644 ---- a/arch/x86/kernel/machine_kexec_32.c -+++ b/arch/x86/kernel/machine_kexec_32.c -@@ -27,7 +27,7 @@ - #include <asm/cacheflush.h> - #include <asm/debugreg.h> - --static void set_idt(void *newidt, __u16 limit) -+static void set_idt(struct desc_struct *newidt, __u16 limit) - { - struct desc_ptr curidt; - -@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit) - } - - --static void set_gdt(void *newgdt, __u16 limit) -+static void set_gdt(struct desc_struct *newgdt, __u16 limit) - { - struct desc_ptr curgdt; - -@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image) - } - - control_page = page_address(image->control_code_page); -- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); -+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE); - - relocate_kernel_ptr = control_page; - page_list[PA_CONTROL_PAGE] = __pa(control_page); -diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c -index 1a1b606..5c89b55 100644 ---- a/arch/x86/kernel/microcode_intel.c -+++ b/arch/x86/kernel/microcode_intel.c -@@ -440,13 +440,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) - - static int get_ucode_user(void *to, const void *from, size_t n) - { -- return copy_from_user(to, from, n); -+ return copy_from_user(to, (const void __force_user *)from, n); - } - - static enum ucode_state - request_microcode_user(int cpu, const void __user *buf, size_t size) - { -- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); -+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user); - } - - static void microcode_fini_cpu(int cpu) -diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c -index 925179f..85bec6c 100644 ---- a/arch/x86/kernel/module.c -+++ b/arch/x86/kernel/module.c -@@ -36,15 +36,60 @@ - #define DEBUGP(fmt...) - #endif - --void *module_alloc(unsigned long size) -+static inline void *__module_alloc(unsigned long size, pgprot_t prot) - { - if (PAGE_ALIGN(size) > MODULES_LEN) - return NULL; - return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, -- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, -+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot, - -1, __builtin_return_address(0)); - } - -+void *module_alloc(unsigned long size) -+{ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ return __module_alloc(size, PAGE_KERNEL); -+#else -+ return __module_alloc(size, PAGE_KERNEL_EXEC); -+#endif -+ -+} -+ -+#ifdef CONFIG_PAX_KERNEXEC -+#ifdef CONFIG_X86_32 -+void *module_alloc_exec(unsigned long size) -+{ -+ struct vm_struct *area; -+ -+ if (size == 0) -+ return NULL; -+ -+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END); -+ return area ? area->addr : NULL; -+} -+EXPORT_SYMBOL(module_alloc_exec); -+ -+void module_free_exec(struct module *mod, void *module_region) -+{ -+ vunmap(module_region); -+} -+EXPORT_SYMBOL(module_free_exec); -+#else -+void module_free_exec(struct module *mod, void *module_region) -+{ -+ module_free(mod, module_region); -+} -+EXPORT_SYMBOL(module_free_exec); -+ -+void *module_alloc_exec(unsigned long size) -+{ -+ return __module_alloc(size, PAGE_KERNEL_RX); -+} -+EXPORT_SYMBOL(module_alloc_exec); -+#endif -+#endif -+ - #ifdef CONFIG_X86_32 - int apply_relocate(Elf32_Shdr *sechdrs, - const char *strtab, -@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs, - unsigned int i; - Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; - Elf32_Sym *sym; -- uint32_t *location; -+ uint32_t *plocation, location; - - DEBUGP("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { - /* This is where to make the change */ -- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr -- + rel[i].r_offset; -+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; -+ location = (uint32_t)plocation; -+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR) -+ plocation = ktla_ktva((void *)plocation); - /* This is the symbol it is referring to. Note that all - undefined symbols have been resolved. */ - sym = (Elf32_Sym *)sechdrs[symindex].sh_addr -@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs, - switch (ELF32_R_TYPE(rel[i].r_info)) { - case R_386_32: - /* We add the value into the location given */ -- *location += sym->st_value; -+ pax_open_kernel(); -+ *plocation += sym->st_value; -+ pax_close_kernel(); - break; - case R_386_PC32: - /* Add the value, subtract its postition */ -- *location += sym->st_value - (uint32_t)location; -+ pax_open_kernel(); -+ *plocation += sym->st_value - location; -+ pax_close_kernel(); - break; - default: - printk(KERN_ERR "module %s: Unknown relocation: %u\n", -@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, - case R_X86_64_NONE: - break; - case R_X86_64_64: -+ pax_open_kernel(); - *(u64 *)loc = val; -+ pax_close_kernel(); - break; - case R_X86_64_32: -+ pax_open_kernel(); - *(u32 *)loc = val; -+ pax_close_kernel(); - if (val != *(u32 *)loc) - goto overflow; - break; - case R_X86_64_32S: -+ pax_open_kernel(); - *(s32 *)loc = val; -+ pax_close_kernel(); - if ((s64)val != *(s32 *)loc) - goto overflow; - break; - case R_X86_64_PC32: - val -= (u64)loc; -+ pax_open_kernel(); - *(u32 *)loc = val; -+ pax_close_kernel(); -+ - #if 0 - if ((s64)val != *(s32 *)loc) - goto overflow; -diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c -index 676b8c7..870ba04 100644 ---- a/arch/x86/kernel/paravirt-spinlocks.c -+++ b/arch/x86/kernel/paravirt-spinlocks.c -@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) - arch_spin_lock(lock); - } - --struct pv_lock_ops pv_lock_ops = { -+struct pv_lock_ops pv_lock_ops __read_only = { - #ifdef CONFIG_SMP - .spin_is_locked = __ticket_spin_is_locked, - .spin_is_contended = __ticket_spin_is_contended, -diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c -index d90272e..2d54e8e 100644 ---- a/arch/x86/kernel/paravirt.c -+++ b/arch/x86/kernel/paravirt.c -@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x) - { - return x; - } -+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) -+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64); -+#endif - - void __init default_banner(void) - { -@@ -133,6 +136,9 @@ static void *get_call_destination(u8 type) - .pv_lock_ops = pv_lock_ops, - #endif - }; -+ -+ pax_track_stack(); -+ - return *((void **)&tmpl + type); - } - -@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, - if (opfunc == NULL) - /* If there's no function, patch it with a ud2a (BUG) */ - ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); -- else if (opfunc == _paravirt_nop) -+ else if (opfunc == (void *)_paravirt_nop) - /* If the operation is a nop, then nop the callsite */ - ret = paravirt_patch_nop(); - - /* identity functions just return their single argument */ -- else if (opfunc == _paravirt_ident_32) -+ else if (opfunc == (void *)_paravirt_ident_32) - ret = paravirt_patch_ident_32(insnbuf, len); -- else if (opfunc == _paravirt_ident_64) -+ else if (opfunc == (void *)_paravirt_ident_64) - ret = paravirt_patch_ident_64(insnbuf, len); -+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) -+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64) -+ ret = paravirt_patch_ident_64(insnbuf, len); -+#endif - - else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || - type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || -@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len, - if (insn_len > len || start == NULL) - insn_len = len; - else -- memcpy(insnbuf, start, insn_len); -+ memcpy(insnbuf, ktla_ktva(start), insn_len); - - return insn_len; - } -@@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void) - preempt_enable(); - } - --struct pv_info pv_info = { -+struct pv_info pv_info __read_only = { - .name = "bare hardware", - .paravirt_enabled = 0, - .kernel_rpl = 0, -@@ -313,16 +323,16 @@ struct pv_info pv_info = { - #endif - }; - --struct pv_init_ops pv_init_ops = { -+struct pv_init_ops pv_init_ops __read_only = { - .patch = native_patch, - }; - --struct pv_time_ops pv_time_ops = { -+struct pv_time_ops pv_time_ops __read_only = { - .sched_clock = native_sched_clock, - .steal_clock = native_steal_clock, - }; - --struct pv_irq_ops pv_irq_ops = { -+struct pv_irq_ops pv_irq_ops __read_only = { - .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), - .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), - .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), -@@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = { - #endif - }; - --struct pv_cpu_ops pv_cpu_ops = { -+struct pv_cpu_ops pv_cpu_ops __read_only = { - .cpuid = native_cpuid, - .get_debugreg = native_get_debugreg, - .set_debugreg = native_set_debugreg, -@@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = { - .end_context_switch = paravirt_nop, - }; - --struct pv_apic_ops pv_apic_ops = { -+struct pv_apic_ops pv_apic_ops __read_only = { - #ifdef CONFIG_X86_LOCAL_APIC - .startup_ipi_hook = paravirt_nop, - #endif - }; - --#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) -+#ifdef CONFIG_X86_32 -+#ifdef CONFIG_X86_PAE -+/* 64-bit pagetable entries */ -+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64) -+#else - /* 32-bit pagetable entries */ - #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) -+#endif - #else - /* 64-bit pagetable entries */ - #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) - #endif - --struct pv_mmu_ops pv_mmu_ops = { -+struct pv_mmu_ops pv_mmu_ops __read_only = { - - .read_cr2 = native_read_cr2, - .write_cr2 = native_write_cr2, -@@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = { - .make_pud = PTE_IDENT, - - .set_pgd = native_set_pgd, -+ .set_pgd_batched = native_set_pgd_batched, - #endif - #endif /* PAGETABLE_LEVELS >= 3 */ - -@@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = { - }, - - .set_fixmap = native_set_fixmap, -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ .pax_open_kernel = native_pax_open_kernel, -+ .pax_close_kernel = native_pax_close_kernel, -+#endif -+ - }; - - EXPORT_SYMBOL_GPL(pv_time_ops); -diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c -index 35ccf75..67e7d4d 100644 ---- a/arch/x86/kernel/pci-iommu_table.c -+++ b/arch/x86/kernel/pci-iommu_table.c -@@ -2,7 +2,7 @@ - #include <asm/iommu_table.h> - #include <linux/string.h> - #include <linux/kallsyms.h> -- -+#include <linux/sched.h> - - #define DEBUG 1 - -@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct iommu_table_entry *start, - { - struct iommu_table_entry *p, *q, *x; - -+ pax_track_stack(); -+ - /* Simple cyclic dependency checker. */ - for (p = start; p < finish; p++) { - q = find_dependents_of(start, finish, p); -diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c -index e7e3b01..43c5af3 100644 ---- a/arch/x86/kernel/process.c -+++ b/arch/x86/kernel/process.c -@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk) - - void free_thread_info(struct thread_info *ti) - { -- free_thread_xstate(ti->task); - free_pages((unsigned long)ti, get_order(THREAD_SIZE)); - } - -+static struct kmem_cache *task_struct_cachep; -+ - void arch_task_cache_init(void) - { -- task_xstate_cachep = -- kmem_cache_create("task_xstate", xstate_size, -+ /* create a slab on which task_structs can be allocated */ -+ task_struct_cachep = -+ kmem_cache_create("task_struct", sizeof(struct task_struct), -+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); -+ -+ task_xstate_cachep = -+ kmem_cache_create("task_xstate", xstate_size, - __alignof__(union thread_xstate), -- SLAB_PANIC | SLAB_NOTRACK, NULL); -+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL); -+} -+ -+struct task_struct *alloc_task_struct_node(int node) -+{ -+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); -+} -+ -+void free_task_struct(struct task_struct *task) -+{ -+ free_thread_xstate(task); -+ kmem_cache_free(task_struct_cachep, task); - } - - /* -@@ -70,7 +87,7 @@ void exit_thread(void) - unsigned long *bp = t->io_bitmap_ptr; - - if (bp) { -- struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); -+ struct tss_struct *tss = init_tss + get_cpu(); - - t->io_bitmap_ptr = NULL; - clear_thread_flag(TIF_IO_BITMAP); -@@ -106,7 +123,7 @@ void show_regs_common(void) - - printk(KERN_CONT "\n"); - printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", -- current->pid, current->comm, print_tainted(), -+ task_pid_nr(current), current->comm, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); -@@ -120,6 +137,9 @@ void flush_thread(void) - { - struct task_struct *tsk = current; - -+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF) -+ loadsegment(gs, 0); -+#endif - flush_ptrace_hw_breakpoint(tsk); - memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); - /* -@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) - regs.di = (unsigned long) arg; - - #ifdef CONFIG_X86_32 -- regs.ds = __USER_DS; -- regs.es = __USER_DS; -+ regs.ds = __KERNEL_DS; -+ regs.es = __KERNEL_DS; - regs.fs = __KERNEL_PERCPU; -- regs.gs = __KERNEL_STACK_CANARY; -+ savesegment(gs, regs.gs); - #else - regs.ss = __KERNEL_DS; - #endif -@@ -403,7 +423,7 @@ void default_idle(void) - EXPORT_SYMBOL(default_idle); - #endif - --void stop_this_cpu(void *dummy) -+__noreturn void stop_this_cpu(void *dummy) - { - local_irq_disable(); - /* -@@ -645,16 +665,37 @@ static int __init idle_setup(char *str) - } - early_param("idle", idle_setup); - --unsigned long arch_align_stack(unsigned long sp) -+#ifdef CONFIG_PAX_RANDKSTACK -+void pax_randomize_kstack(struct pt_regs *regs) - { -- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) -- sp -= get_random_int() % 8192; -- return sp & ~0xf; --} -+ struct thread_struct *thread = ¤t->thread; -+ unsigned long time; - --unsigned long arch_randomize_brk(struct mm_struct *mm) --{ -- unsigned long range_end = mm->brk + 0x02000000; -- return randomize_range(mm->brk, range_end, 0) ? : mm->brk; --} -+ if (!randomize_va_space) -+ return; -+ -+ if (v8086_mode(regs)) -+ return; - -+ rdtscl(time); -+ -+ /* P4 seems to return a 0 LSB, ignore it */ -+#ifdef CONFIG_MPENTIUM4 -+ time &= 0x3EUL; -+ time <<= 2; -+#elif defined(CONFIG_X86_64) -+ time &= 0xFUL; -+ time <<= 4; -+#else -+ time &= 0x1FUL; -+ time <<= 3; -+#endif -+ -+ thread->sp0 ^= time; -+ load_sp0(init_tss + smp_processor_id(), thread); -+ -+#ifdef CONFIG_X86_64 -+ percpu_write(kernel_stack, thread->sp0); -+#endif -+} -+#endif -diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c -index 7a3b651..5a946f6 100644 ---- a/arch/x86/kernel/process_32.c -+++ b/arch/x86/kernel/process_32.c -@@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); - unsigned long thread_saved_pc(struct task_struct *tsk) - { - return ((unsigned long *)tsk->thread.sp)[3]; -+//XXX return tsk->thread.eip; - } - - #ifndef CONFIG_SMP -@@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, int all) - unsigned long sp; - unsigned short ss, gs; - -- if (user_mode_vm(regs)) { -+ if (user_mode(regs)) { - sp = regs->sp; - ss = regs->ss & 0xffff; -- gs = get_user_gs(regs); - } else { - sp = kernel_stack_pointer(regs); - savesegment(ss, ss); -- savesegment(gs, gs); - } -+ gs = get_user_gs(regs); - - show_regs_common(); - -@@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, - struct task_struct *tsk; - int err; - -- childregs = task_pt_regs(p); -+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8; - *childregs = *regs; - childregs->ax = 0; - childregs->sp = sp; - - p->thread.sp = (unsigned long) childregs; - p->thread.sp0 = (unsigned long) (childregs+1); -+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p); - - p->thread.ip = (unsigned long) ret_from_fork; - -@@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) - struct thread_struct *prev = &prev_p->thread, - *next = &next_p->thread; - int cpu = smp_processor_id(); -- struct tss_struct *tss = &per_cpu(init_tss, cpu); -+ struct tss_struct *tss = init_tss + cpu; - bool preload_fpu; - - /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ -@@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) - */ - lazy_save_gs(prev->gs); - -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ __set_fs(task_thread_info(next_p)->addr_limit); -+#endif -+ - /* - * Load the per-thread Thread-Local Storage descriptor. - */ -@@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) - */ - arch_end_context_switch(next_p); - -+ percpu_write(current_task, next_p); -+ percpu_write(current_tinfo, &next_p->tinfo); -+ - if (preload_fpu) - __math_state_restore(); - -@@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) - if (prev->gs | next->gs) - lazy_load_gs(next->gs); - -- percpu_write(current_task, next_p); -- - return prev_p; - } - -@@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_struct *p) - } while (count++ < 16); - return 0; - } -- -diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c -index f693e44..3c979b2 100644 ---- a/arch/x86/kernel/process_64.c -+++ b/arch/x86/kernel/process_64.c -@@ -88,7 +88,7 @@ static void __exit_idle(void) - void exit_idle(void) - { - /* idle loop has pid 0 */ -- if (current->pid) -+ if (task_pid_nr(current)) - return; - __exit_idle(); - } -@@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, - struct pt_regs *childregs; - struct task_struct *me = current; - -- childregs = ((struct pt_regs *) -- (THREAD_SIZE + task_stack_page(p))) - 1; -+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16; - *childregs = *regs; - - childregs->ax = 0; -@@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, - p->thread.sp = (unsigned long) childregs; - p->thread.sp0 = (unsigned long) (childregs+1); - p->thread.usersp = me->thread.usersp; -+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p); - - set_tsk_thread_flag(p, TIF_FORK); - -@@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) - struct thread_struct *prev = &prev_p->thread; - struct thread_struct *next = &next_p->thread; - int cpu = smp_processor_id(); -- struct tss_struct *tss = &per_cpu(init_tss, cpu); -+ struct tss_struct *tss = init_tss + cpu; - unsigned fsindex, gsindex; - bool preload_fpu; - -@@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) - prev->usersp = percpu_read(old_rsp); - percpu_write(old_rsp, next->usersp); - percpu_write(current_task, next_p); -+ percpu_write(current_tinfo, &next_p->tinfo); - -- percpu_write(kernel_stack, -- (unsigned long)task_stack_page(next_p) + -- THREAD_SIZE - KERNEL_STACK_OFFSET); -+ percpu_write(kernel_stack, next->sp0); - - /* - * Now maybe reload the debug registers and handle I/O bitmaps -@@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_struct *p) - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - stack = (unsigned long)task_stack_page(p); -- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) -+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64)) - return 0; - fp = *(u64 *)(p->thread.sp); - do { -- if (fp < (unsigned long)stack || -- fp >= (unsigned long)stack+THREAD_SIZE) -+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64)) - return 0; - ip = *(u64 *)(fp+8); - if (!in_sched_functions(ip)) -diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c -index 8252879..d3219e0 100644 ---- a/arch/x86/kernel/ptrace.c -+++ b/arch/x86/kernel/ptrace.c -@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request, - unsigned long addr, unsigned long data) - { - int ret; -- unsigned long __user *datap = (unsigned long __user *)data; -+ unsigned long __user *datap = (__force unsigned long __user *)data; - - switch (request) { - /* read the word at location addr in the USER area. */ -@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request, - if ((int) addr < 0) - return -EIO; - ret = do_get_thread_area(child, addr, -- (struct user_desc __user *)data); -+ (__force struct user_desc __user *) data); - break; - - case PTRACE_SET_THREAD_AREA: - if ((int) addr < 0) - return -EIO; - ret = do_set_thread_area(child, addr, -- (struct user_desc __user *)data, 0); -+ (__force struct user_desc __user *) data, 0); - break; - #endif - -@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk, - memset(info, 0, sizeof(*info)); - info->si_signo = SIGTRAP; - info->si_code = si_code; -- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; -+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL; - } - - void user_single_step_siginfo(struct task_struct *tsk, -diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c -index 42eb330..139955c 100644 ---- a/arch/x86/kernel/pvclock.c -+++ b/arch/x86/kernel/pvclock.c -@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src) - return pv_tsc_khz; - } - --static atomic64_t last_value = ATOMIC64_INIT(0); -+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0); - - void pvclock_resume(void) - { -- atomic64_set(&last_value, 0); -+ atomic64_set_unchecked(&last_value, 0); - } - - cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) -@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) - * updating at the same time, and one of them could be slightly behind, - * making the assumption that last_value always go forward fail to hold. - */ -- last = atomic64_read(&last_value); -+ last = atomic64_read_unchecked(&last_value); - do { - if (ret < last) - return last; -- last = atomic64_cmpxchg(&last_value, last, ret); -+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret); - } while (unlikely(last != ret)); - - return ret; -diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c -index d4a705f..ef8f1a9 100644 ---- a/arch/x86/kernel/reboot.c -+++ b/arch/x86/kernel/reboot.c -@@ -35,7 +35,7 @@ void (*pm_power_off)(void); - EXPORT_SYMBOL(pm_power_off); - - static const struct desc_ptr no_idt = {}; --static int reboot_mode; -+static unsigned short reboot_mode; - enum reboot_type reboot_type = BOOT_ACPI; - int reboot_force; - -@@ -324,13 +324,17 @@ core_initcall(reboot_init); - extern const unsigned char machine_real_restart_asm[]; - extern const u64 machine_real_restart_gdt[3]; - --void machine_real_restart(unsigned int type) -+__noreturn void machine_real_restart(unsigned int type) - { - void *restart_va; - unsigned long restart_pa; -- void (*restart_lowmem)(unsigned int); -+ void (* __noreturn restart_lowmem)(unsigned int); - u64 *lowmem_gdt; - -+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) -+ struct desc_struct *gdt; -+#endif -+ - local_irq_disable(); - - /* Write zero to CMOS register number 0x0f, which the BIOS POST -@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type) - boot)". This seems like a fairly standard thing that gets set by - REBOOT.COM programs, and the previous reset routine did this - too. */ -- *((unsigned short *)0x472) = reboot_mode; -+ *(unsigned short *)(__va(0x472)) = reboot_mode; - - /* Patch the GDT in the low memory trampoline */ - lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt); - - restart_va = TRAMPOLINE_SYM(machine_real_restart_asm); - restart_pa = virt_to_phys(restart_va); -- restart_lowmem = (void (*)(unsigned int))restart_pa; -+ restart_lowmem = (void *)restart_pa; - - /* GDT[0]: GDT self-pointer */ - lowmem_gdt[0] = -@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type) - GDT_ENTRY(0x009b, restart_pa, 0xffff); - - /* Jump to the identity-mapped low memory code */ -+ -+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) -+ gdt = get_cpu_gdt_table(smp_processor_id()); -+ pax_open_kernel(); -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ gdt[GDT_ENTRY_KERNEL_DS].type = 3; -+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf; -+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory"); -+#endif -+#ifdef CONFIG_PAX_KERNEXEC -+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0; -+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0; -+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0; -+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff; -+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf; -+ gdt[GDT_ENTRY_KERNEL_CS].g = 1; -+#endif -+ pax_close_kernel(); -+#endif -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type)); -+ unreachable(); -+#else - restart_lowmem(type); -+#endif -+ - } - #ifdef CONFIG_APM_MODULE - EXPORT_SYMBOL(machine_real_restart); -@@ -532,7 +562,7 @@ void __attribute__((weak)) mach_reboot_fixups(void) - * try to force a triple fault and then cycle between hitting the keyboard - * controller and doing that - */ --static void native_machine_emergency_restart(void) -+__noreturn static void native_machine_emergency_restart(void) - { - int i; - int attempt = 0; -@@ -656,13 +686,13 @@ void native_machine_shutdown(void) - #endif - } - --static void __machine_emergency_restart(int emergency) -+static __noreturn void __machine_emergency_restart(int emergency) - { - reboot_emergency = emergency; - machine_ops.emergency_restart(); - } - --static void native_machine_restart(char *__unused) -+static __noreturn void native_machine_restart(char *__unused) - { - printk("machine restart\n"); - -@@ -671,7 +701,7 @@ static void native_machine_restart(char *__unused) - __machine_emergency_restart(0); - } - --static void native_machine_halt(void) -+static __noreturn void native_machine_halt(void) - { - /* stop other cpus and apics */ - machine_shutdown(); -@@ -682,7 +712,7 @@ static void native_machine_halt(void) - stop_this_cpu(NULL); - } - --static void native_machine_power_off(void) -+__noreturn static void native_machine_power_off(void) - { - if (pm_power_off) { - if (!reboot_force) -@@ -691,6 +721,7 @@ static void native_machine_power_off(void) - } - /* a fallback in case there is no PM info available */ - tboot_shutdown(TB_SHUTDOWN_HALT); -+ unreachable(); - } - - struct machine_ops machine_ops = { -diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S -index 7a6f3b3..bed145d7 100644 ---- a/arch/x86/kernel/relocate_kernel_64.S -+++ b/arch/x86/kernel/relocate_kernel_64.S -@@ -11,6 +11,7 @@ - #include <asm/kexec.h> - #include <asm/processor-flags.h> - #include <asm/pgtable_types.h> -+#include <asm/alternative-asm.h> - - /* - * Must be relocatable PIC code callable as a C function -@@ -160,13 +161,14 @@ identity_mapped: - xorq %rbp, %rbp - xorq %r8, %r8 - xorq %r9, %r9 -- xorq %r10, %r9 -+ xorq %r10, %r10 - xorq %r11, %r11 - xorq %r12, %r12 - xorq %r13, %r13 - xorq %r14, %r14 - xorq %r15, %r15 - -+ pax_force_retaddr 0, 1 - ret - - 1: -diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c -index afaf384..1a101fe 100644 ---- a/arch/x86/kernel/setup.c -+++ b/arch/x86/kernel/setup.c -@@ -447,7 +447,7 @@ static void __init parse_setup_data(void) - - switch (data->type) { - case SETUP_E820_EXT: -- parse_e820_ext(data); -+ parse_e820_ext((struct setup_data __force_kernel *)data); - break; - case SETUP_DTB: - add_dtb(pa_data); -@@ -650,7 +650,7 @@ static void __init trim_bios_range(void) - * area (640->1Mb) as ram even though it is not. - * take them out. - */ -- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); -+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1); - sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); - } - -@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p) - - if (!boot_params.hdr.root_flags) - root_mountflags &= ~MS_RDONLY; -- init_mm.start_code = (unsigned long) _text; -- init_mm.end_code = (unsigned long) _etext; -+ init_mm.start_code = ktla_ktva((unsigned long) _text); -+ init_mm.end_code = ktla_ktva((unsigned long) _etext); - init_mm.end_data = (unsigned long) _edata; - init_mm.brk = _brk_end; - -- code_resource.start = virt_to_phys(_text); -- code_resource.end = virt_to_phys(_etext)-1; -- data_resource.start = virt_to_phys(_etext); -+ code_resource.start = virt_to_phys(ktla_ktva(_text)); -+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1; -+ data_resource.start = virt_to_phys(_sdata); - data_resource.end = virt_to_phys(_edata)-1; - bss_resource.start = virt_to_phys(&__bss_start); - bss_resource.end = virt_to_phys(&__bss_stop)-1; -diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c -index 71f4727..16dc9f7 100644 ---- a/arch/x86/kernel/setup_percpu.c -+++ b/arch/x86/kernel/setup_percpu.c -@@ -21,19 +21,17 @@ - #include <asm/cpu.h> - #include <asm/stackprotector.h> - --DEFINE_PER_CPU(int, cpu_number); -+#ifdef CONFIG_SMP -+DEFINE_PER_CPU(unsigned int, cpu_number); - EXPORT_PER_CPU_SYMBOL(cpu_number); -+#endif - --#ifdef CONFIG_X86_64 - #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) --#else --#define BOOT_PERCPU_OFFSET 0 --#endif - - DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; - EXPORT_PER_CPU_SYMBOL(this_cpu_off); - --unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { -+unsigned long __per_cpu_offset[NR_CPUS] __read_only = { - [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, - }; - EXPORT_SYMBOL(__per_cpu_offset); -@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu) - { - #ifdef CONFIG_X86_32 - struct desc_struct gdt; -+ unsigned long base = per_cpu_offset(cpu); - -- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, -- 0x2 | DESCTYPE_S, 0x8); -- gdt.s = 1; -+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT, -+ 0x83 | DESCTYPE_S, 0xC); - write_gdt_entry(get_cpu_gdt_table(cpu), - GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); - #endif -@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void) - /* alrighty, percpu areas up and running */ - delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; - for_each_possible_cpu(cpu) { -+#ifdef CONFIG_CC_STACKPROTECTOR -+#ifdef CONFIG_X86_32 -+ unsigned long canary = per_cpu(stack_canary.canary, cpu); -+#endif -+#endif - per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; - per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); - per_cpu(cpu_number, cpu) = cpu; -@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void) - */ - set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); - #endif -+#ifdef CONFIG_CC_STACKPROTECTOR -+#ifdef CONFIG_X86_32 -+ if (!cpu) -+ per_cpu(stack_canary.canary, cpu) = canary; -+#endif -+#endif - /* - * Up to this point, the boot CPU has been using .init.data - * area. Reload any changed state for the boot CPU. -diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c -index 54ddaeb2..a6aa4d2 100644 ---- a/arch/x86/kernel/signal.c -+++ b/arch/x86/kernel/signal.c -@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp) - * Align the stack pointer according to the i386 ABI, - * i.e. so that on function entry ((sp + 4) & 15) == 0. - */ -- sp = ((sp + 4) & -16ul) - 4; -+ sp = ((sp - 12) & -16ul) - 4; - #else /* !CONFIG_X86_32 */ - sp = round_down(sp, 16) - 8; - #endif -@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, - * Return an always-bogus address instead so we will die with SIGSEGV. - */ - if (onsigstack && !likely(on_sig_stack(sp))) -- return (void __user *)-1L; -+ return (__force void __user *)-1L; - - /* save i387 state */ - if (used_math() && save_i387_xstate(*fpstate) < 0) -- return (void __user *)-1L; -+ return (__force void __user *)-1L; - - return (void __user *)sp; - } -@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, - } - - if (current->mm->context.vdso) -- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); -+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); - else -- restorer = &frame->retcode; -+ restorer = (void __user *)&frame->retcode; - if (ka->sa.sa_flags & SA_RESTORER) - restorer = ka->sa.sa_restorer; - -@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, - * reasons and because gdb uses it as a signature to notice - * signal handler stack frames. - */ -- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); -+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode); - - if (err) - return -EFAULT; -@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - - /* Set up to return from userspace. */ -- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); -+ if (current->mm->context.vdso) -+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); -+ else -+ restorer = (void __user *)&frame->retcode; - if (ka->sa.sa_flags & SA_RESTORER) - restorer = ka->sa.sa_restorer; - put_user_ex(restorer, &frame->pretcode); -@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - * reasons and because gdb uses it as a signature to notice - * signal handler stack frames. - */ -- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); -+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode); - } put_user_catch(err); - - if (err) -@@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *regs) - siginfo_t info; - int signr; - -+ pax_track_stack(); -+ - /* - * We want the common case to go fast, which is why we may in certain - * cases get here from kernel mode. Just return without doing anything -@@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *regs) - * X86_32: vm86 regs switched out by assembly code before reaching - * here, so testing against kernel CS suffices. - */ -- if (!user_mode(regs)) -+ if (!user_mode_novm(regs)) - return; - - signr = get_signal_to_deliver(&info, &ka, regs, NULL); -diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c -index 9f548cb..caf76f7 100644 ---- a/arch/x86/kernel/smpboot.c -+++ b/arch/x86/kernel/smpboot.c -@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) - set_idle_for_cpu(cpu, c_idle.idle); - do_rest: - per_cpu(current_task, cpu) = c_idle.idle; -+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo; - #ifdef CONFIG_X86_32 - /* Stack for startup_32 can be just as for start_secondary onwards */ - irq_ctx_init(cpu); - #else - clear_tsk_thread_flag(c_idle.idle, TIF_FORK); - initial_gs = per_cpu_offset(cpu); -- per_cpu(kernel_stack, cpu) = -- (unsigned long)task_stack_page(c_idle.idle) - -- KERNEL_STACK_OFFSET + THREAD_SIZE; -+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE; - #endif -+ -+ pax_open_kernel(); - early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); -+ pax_close_kernel(); -+ - initial_code = (unsigned long)start_secondary; - stack_start = c_idle.idle->thread.sp; - -@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu) - - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY, -+ swapper_pg_dir + KERNEL_PGD_BOUNDARY, -+ KERNEL_PGD_PTRS); -+#endif -+ - err = do_boot_cpu(apicid, cpu); - if (err) { - pr_debug("do_boot_cpu failed %d\n", err); -diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c -index c346d11..d43b163 100644 ---- a/arch/x86/kernel/step.c -+++ b/arch/x86/kernel/step.c -@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re - struct desc_struct *desc; - unsigned long base; - -- seg &= ~7UL; -+ seg >>= 3; - - mutex_lock(&child->mm->context.lock); -- if (unlikely((seg >> 3) >= child->mm->context.size)) -+ if (unlikely(seg >= child->mm->context.size)) - addr = -1L; /* bogus selector, access would fault */ - else { - desc = child->mm->context.ldt + seg; -@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re - addr += base; - } - mutex_unlock(&child->mm->context.lock); -- } -+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS) -+ addr = ktla_ktva(addr); - - return addr; - } -@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) - unsigned char opcode[15]; - unsigned long addr = convert_ip_to_linear(child, regs); - -+ if (addr == -EINVAL) -+ return 0; -+ - copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); - for (i = 0; i < copied; i++) { - switch (opcode[i]) { -diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c -index 0b0cb5f..db6b9ed 100644 ---- a/arch/x86/kernel/sys_i386_32.c -+++ b/arch/x86/kernel/sys_i386_32.c -@@ -24,17 +24,224 @@ - - #include <asm/syscalls.h> - --/* -- * Do a system call from kernel instead of calling sys_execve so we -- * end up with proper pt_regs. -- */ --int kernel_execve(const char *filename, -- const char *const argv[], -- const char *const envp[]) -+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) - { -- long __res; -- asm volatile ("int $0x80" -- : "=a" (__res) -- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory"); -- return __res; -+ unsigned long pax_task_size = TASK_SIZE; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+#endif -+ -+ if (len > pax_task_size || addr > pax_task_size - len) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+unsigned long -+arch_get_unmapped_area(struct file *filp, unsigned long addr, -+ unsigned long len, unsigned long pgoff, unsigned long flags) -+{ -+ struct mm_struct *mm = current->mm; -+ struct vm_area_struct *vma; -+ unsigned long start_addr, pax_task_size = TASK_SIZE; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+#endif -+ -+ pax_task_size -= PAGE_SIZE; -+ -+ if (len > pax_task_size) -+ return -ENOMEM; -+ -+ if (flags & MAP_FIXED) -+ return addr; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ -+ if (addr) { -+ addr = PAGE_ALIGN(addr); -+ if (pax_task_size - len >= addr) { -+ vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len)) -+ return addr; -+ } -+ } -+ if (len > mm->cached_hole_size) { -+ start_addr = addr = mm->free_area_cache; -+ } else { -+ start_addr = addr = mm->mmap_base; -+ mm->cached_hole_size = 0; -+ } -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) { -+ start_addr = 0x00110000UL; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ start_addr += mm->delta_mmap & 0x03FFF000UL; -+#endif -+ -+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base) -+ start_addr = addr = mm->mmap_base; -+ else -+ addr = start_addr; -+ } -+#endif -+ -+full_search: -+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { -+ /* At this point: (!vma || addr < vma->vm_end). */ -+ if (pax_task_size - len < addr) { -+ /* -+ * Start a new search - just in case we missed -+ * some holes. -+ */ -+ if (start_addr != mm->mmap_base) { -+ start_addr = addr = mm->mmap_base; -+ mm->cached_hole_size = 0; -+ goto full_search; -+ } -+ return -ENOMEM; -+ } -+ if (check_heap_stack_gap(vma, addr, len)) -+ break; -+ if (addr + mm->cached_hole_size < vma->vm_start) -+ mm->cached_hole_size = vma->vm_start - addr; -+ addr = vma->vm_end; -+ if (mm->start_brk <= addr && addr < mm->mmap_base) { -+ start_addr = addr = mm->mmap_base; -+ mm->cached_hole_size = 0; -+ goto full_search; -+ } -+ } -+ -+ /* -+ * Remember the place where we stopped the search: -+ */ -+ mm->free_area_cache = addr + len; -+ return addr; -+} -+ -+unsigned long -+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, -+ const unsigned long len, const unsigned long pgoff, -+ const unsigned long flags) -+{ -+ struct vm_area_struct *vma; -+ struct mm_struct *mm = current->mm; -+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+#endif -+ -+ pax_task_size -= PAGE_SIZE; -+ -+ /* requested length too big for entire address space */ -+ if (len > pax_task_size) -+ return -ENOMEM; -+ -+ if (flags & MAP_FIXED) -+ return addr; -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) -+ goto bottomup; -+#endif -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ -+ /* requesting a specific address */ -+ if (addr) { -+ addr = PAGE_ALIGN(addr); -+ if (pax_task_size - len >= addr) { -+ vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len)) -+ return addr; -+ } -+ } -+ -+ /* check if free_area_cache is useful for us */ -+ if (len <= mm->cached_hole_size) { -+ mm->cached_hole_size = 0; -+ mm->free_area_cache = mm->mmap_base; -+ } -+ -+ /* either no address requested or can't fit in requested address hole */ -+ addr = mm->free_area_cache; -+ -+ /* make sure it can fit in the remaining address space */ -+ if (addr > len) { -+ vma = find_vma(mm, addr-len); -+ if (check_heap_stack_gap(vma, addr - len, len)) -+ /* remember the address as a hint for next time */ -+ return (mm->free_area_cache = addr-len); -+ } -+ -+ if (mm->mmap_base < len) -+ goto bottomup; -+ -+ addr = mm->mmap_base-len; -+ -+ do { -+ /* -+ * Lookup failure means no vma is above this address, -+ * else if new region fits below vma->vm_start, -+ * return with success: -+ */ -+ vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len)) -+ /* remember the address as a hint for next time */ -+ return (mm->free_area_cache = addr); -+ -+ /* remember the largest hole we saw so far */ -+ if (addr + mm->cached_hole_size < vma->vm_start) -+ mm->cached_hole_size = vma->vm_start - addr; -+ -+ /* try just below the current vma->vm_start */ -+ addr = skip_heap_stack_gap(vma, len); -+ } while (!IS_ERR_VALUE(addr)); -+ -+bottomup: -+ /* -+ * A failed mmap() very likely causes application failure, -+ * so fall back to the bottom-up function here. This scenario -+ * can happen with large stack limits and large mmap() -+ * allocations. -+ */ -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) -+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; -+ else -+#endif -+ -+ mm->mmap_base = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ -+ mm->free_area_cache = mm->mmap_base; -+ mm->cached_hole_size = ~0UL; -+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); -+ /* -+ * Restore the topdown base: -+ */ -+ mm->mmap_base = base; -+ mm->free_area_cache = base; -+ mm->cached_hole_size = ~0UL; -+ -+ return addr; - } -diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c -index ff14a50..35626c3 100644 ---- a/arch/x86/kernel/sys_x86_64.c -+++ b/arch/x86/kernel/sys_x86_64.c -@@ -32,8 +32,8 @@ out: - return error; - } - --static void find_start_end(unsigned long flags, unsigned long *begin, -- unsigned long *end) -+static void find_start_end(struct mm_struct *mm, unsigned long flags, -+ unsigned long *begin, unsigned long *end) - { - if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { - unsigned long new_begin; -@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, - *begin = new_begin; - } - } else { -- *begin = TASK_UNMAPPED_BASE; -+ *begin = mm->mmap_base; - *end = TASK_SIZE; - } - } -@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - if (flags & MAP_FIXED) - return addr; - -- find_start_end(flags, &begin, &end); -+ find_start_end(mm, flags, &begin, &end); - - if (len > end) - return -ENOMEM; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); -- if (end - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len)) - return addr; - } - if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) -@@ -106,7 +109,7 @@ full_search: - } - return -ENOMEM; - } -- if (!vma || addr + len <= vma->vm_start) { -+ if (check_heap_stack_gap(vma, addr, len)) { - /* - * Remember the place where we stopped the search: - */ -@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - { - struct vm_area_struct *vma; - struct mm_struct *mm = current->mm; -- unsigned long addr = addr0; -+ unsigned long base = mm->mmap_base, addr = addr0; - - /* requested length too big for entire address space */ - if (len > TASK_SIZE) -@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) - goto bottomup; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - /* requesting a specific address */ - if (addr) { - addr = PAGE_ALIGN(addr); -- vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -- return addr; -+ if (TASK_SIZE - len >= addr) { -+ vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len)) -+ return addr; -+ } - } - - /* check if free_area_cache is useful for us */ -@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - /* make sure it can fit in the remaining address space */ - if (addr > len) { - vma = find_vma(mm, addr-len); -- if (!vma || addr <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr - len, len)) - /* remember the address as a hint for next time */ - return mm->free_area_cache = addr-len; - } -@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - * return with success: - */ - vma = find_vma(mm, addr); -- if (!vma || addr+len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len)) - /* remember the address as a hint for next time */ - return mm->free_area_cache = addr; - -@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ -- addr = vma->vm_start-len; -- } while (len < vma->vm_start); -+ addr = skip_heap_stack_gap(vma, len); -+ } while (!IS_ERR_VALUE(addr)); - - bottomup: - /* -@@ -198,13 +206,21 @@ bottomup: - * can happen with large stack limits and large mmap() - * allocations. - */ -+ mm->mmap_base = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ -+ mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; -- mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ -- mm->free_area_cache = mm->mmap_base; -+ mm->mmap_base = base; -+ mm->free_area_cache = base; - mm->cached_hole_size = ~0UL; - - return addr; -diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S -index bc19be3..0f5fbf7 100644 ---- a/arch/x86/kernel/syscall_table_32.S -+++ b/arch/x86/kernel/syscall_table_32.S -@@ -1,3 +1,4 @@ -+.section .rodata,"a",@progbits - ENTRY(sys_call_table) - .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ - .long sys_exit -diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c -index e07a2fc..db0369d 100644 ---- a/arch/x86/kernel/tboot.c -+++ b/arch/x86/kernel/tboot.c -@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void) - - void tboot_shutdown(u32 shutdown_type) - { -- void (*shutdown)(void); -+ void (* __noreturn shutdown)(void); - - if (!tboot_enabled()) - return; -@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type) - - switch_to_tboot_pt(); - -- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry; -+ shutdown = (void *)tboot->shutdown_entry; - shutdown(); - - /* should not reach here */ -@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control) - tboot_shutdown(acpi_shutdown_map[sleep_state]); - } - --static atomic_t ap_wfs_count; -+static atomic_unchecked_t ap_wfs_count; - - static int tboot_wait_for_aps(int num_aps) - { -@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, - { - switch (action) { - case CPU_DYING: -- atomic_inc(&ap_wfs_count); -+ atomic_inc_unchecked(&ap_wfs_count); - if (num_online_cpus() == 1) -- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) -+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count))) - return NOTIFY_BAD; - break; - } -@@ -342,7 +342,7 @@ static __init int tboot_late_init(void) - - tboot_create_trampoline(); - -- atomic_set(&ap_wfs_count, 0); -+ atomic_set_unchecked(&ap_wfs_count, 0); - register_hotcpu_notifier(&tboot_cpu_notifier); - return 0; - } -diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c -index 5a64d05..804587b 100644 ---- a/arch/x86/kernel/time.c -+++ b/arch/x86/kernel/time.c -@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs) - { - unsigned long pc = instruction_pointer(regs); - -- if (!user_mode_vm(regs) && in_lock_functions(pc)) { -+ if (!user_mode(regs) && in_lock_functions(pc)) { - #ifdef CONFIG_FRAME_POINTER -- return *(unsigned long *)(regs->bp + sizeof(long)); -+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long))); - #else - unsigned long *sp = - (unsigned long *)kernel_stack_pointer(regs); -@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs) - * or above a saved flags. Eflags has bits 22-31 zero, - * kernel addresses don't. - */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ return ktla_ktva(sp[0]); -+#else - if (sp[0] >> 22) - return sp[0]; - if (sp[1] >> 22) - return sp[1]; - #endif -+ -+#endif - } - return pc; - } -diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c -index 6bb7b85..dd853e1 100644 ---- a/arch/x86/kernel/tls.c -+++ b/arch/x86/kernel/tls.c -@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx, - if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) - return -EINVAL; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) -+ return -EINVAL; -+#endif -+ - set_tls_desc(p, idx, &info, 1); - - return 0; -diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S -index 451c0a7..e57f551 100644 ---- a/arch/x86/kernel/trampoline_32.S -+++ b/arch/x86/kernel/trampoline_32.S -@@ -32,6 +32,12 @@ - #include <asm/segment.h> - #include <asm/page_types.h> - -+#ifdef CONFIG_PAX_KERNEXEC -+#define ta(X) (X) -+#else -+#define ta(X) ((X) - __PAGE_OFFSET) -+#endif -+ - #ifdef CONFIG_SMP - - .section ".x86_trampoline","a" -@@ -62,7 +68,7 @@ r_base = . - inc %ax # protected mode (PE) bit - lmsw %ax # into protected mode - # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S -- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET) -+ ljmpl $__BOOT_CS, $ta(startup_32_smp) - - # These need to be in the same 64K segment as the above; - # hence we don't use the boot_gdt_descr defined in head.S -diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S -index 09ff517..df19fbff 100644 ---- a/arch/x86/kernel/trampoline_64.S -+++ b/arch/x86/kernel/trampoline_64.S -@@ -90,7 +90,7 @@ startup_32: - movl $__KERNEL_DS, %eax # Initialize the %ds segment register - movl %eax, %ds - -- movl $X86_CR4_PAE, %eax -+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax - movl %eax, %cr4 # Enable PAE mode - - # Setup trampoline 4 level pagetables -@@ -138,7 +138,7 @@ tidt: - # so the kernel can live anywhere - .balign 4 - tgdt: -- .short tgdt_end - tgdt # gdt limit -+ .short tgdt_end - tgdt - 1 # gdt limit - .long tgdt - r_base - .short 0 - .quad 0x00cf9b000000ffff # __KERNEL32_CS -diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c -index 6913369..7e7dff6 100644 ---- a/arch/x86/kernel/traps.c -+++ b/arch/x86/kernel/traps.c -@@ -70,12 +70,6 @@ asmlinkage int system_call(void); - - /* Do we ignore FPU interrupts ? */ - char ignore_fpu_irq; -- --/* -- * The IDT has to be page-aligned to simplify the Pentium -- * F0 0F bug workaround. -- */ --gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; - #endif - - DECLARE_BITMAP(used_vectors, NR_VECTORS); -@@ -117,13 +111,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) - } - - static void __kprobes --do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, -+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs, - long error_code, siginfo_t *info) - { - struct task_struct *tsk = current; - - #ifdef CONFIG_X86_32 -- if (regs->flags & X86_VM_MASK) { -+ if (v8086_mode(regs)) { - /* - * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. - * On nmi (interrupt 2), do_trap should not be called. -@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, - } - #endif - -- if (!user_mode(regs)) -+ if (!user_mode_novm(regs)) - goto kernel_trap; - - #ifdef CONFIG_X86_32 -@@ -157,7 +151,7 @@ trap_signal: - printk_ratelimit()) { - printk(KERN_INFO - "%s[%d] trap %s ip:%lx sp:%lx error:%lx", -- tsk->comm, tsk->pid, str, -+ tsk->comm, task_pid_nr(tsk), str, - regs->ip, regs->sp, error_code); - print_vma_addr(" in ", regs->ip); - printk("\n"); -@@ -174,8 +168,20 @@ kernel_trap: - if (!fixup_exception(regs)) { - tsk->thread.error_code = error_code; - tsk->thread.trap_no = trapnr; -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)) -+ str = "PAX: suspicious stack segment fault"; -+#endif -+ - die(str, regs, error_code); - } -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ if (trapnr == 4) -+ pax_report_refcount_overflow(regs); -+#endif -+ - return; - - #ifdef CONFIG_X86_32 -@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *regs, long error_code) - conditional_sti(regs); - - #ifdef CONFIG_X86_32 -- if (regs->flags & X86_VM_MASK) -+ if (v8086_mode(regs)) - goto gp_in_vm86; - #endif - - tsk = current; -- if (!user_mode(regs)) -+ if (!user_mode_novm(regs)) - goto gp_in_kernel; - -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) -+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) { -+ struct mm_struct *mm = tsk->mm; -+ unsigned long limit; -+ -+ down_write(&mm->mmap_sem); -+ limit = mm->context.user_cs_limit; -+ if (limit < TASK_SIZE) { -+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC); -+ up_write(&mm->mmap_sem); -+ return; -+ } -+ up_write(&mm->mmap_sem); -+ } -+#endif -+ - tsk->thread.error_code = error_code; - tsk->thread.trap_no = 13; - -@@ -304,6 +326,13 @@ gp_in_kernel: - if (notify_die(DIE_GPF, "general protection fault", regs, - error_code, 13, SIGSEGV) == NOTIFY_STOP) - return; -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS) -+ die("PAX: suspicious general protection fault", regs, error_code); -+ else -+#endif -+ - die("general protection fault", regs, error_code); - } - -@@ -433,6 +462,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs) - dotraplinkage notrace __kprobes void - do_nmi(struct pt_regs *regs, long error_code) - { -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ if (!user_mode(regs)) { -+ unsigned long cs = regs->cs & 0xFFFF; -+ unsigned long ip = ktva_ktla(regs->ip); -+ -+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext) -+ regs->ip = ip; -+ } -+#endif -+ - nmi_enter(); - - inc_irq_stat(__nmi_count); -@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) - /* It's safe to allow irq's after DR6 has been saved */ - preempt_conditional_sti(regs); - -- if (regs->flags & X86_VM_MASK) { -+ if (v8086_mode(regs)) { - handle_vm86_trap((struct kernel_vm86_regs *) regs, - error_code, 1); - preempt_conditional_cli(regs); -@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) - * We already checked v86 mode above, so we can check for kernel mode - * by just checking the CPL of CS. - */ -- if ((dr6 & DR_STEP) && !user_mode(regs)) { -+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) { - tsk->thread.debugreg6 &= ~DR_STEP; - set_tsk_thread_flag(tsk, TIF_SINGLESTEP); - regs->flags &= ~X86_EFLAGS_TF; -@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr) - return; - conditional_sti(regs); - -- if (!user_mode_vm(regs)) -+ if (!user_mode(regs)) - { - if (!fixup_exception(regs)) { - task->thread.error_code = error_code; -@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) - void __math_state_restore(void) - { - struct thread_info *thread = current_thread_info(); -- struct task_struct *tsk = thread->task; -+ struct task_struct *tsk = current; - - /* - * Paranoid restore. send a SIGSEGV if we fail to restore the state. -@@ -750,8 +790,7 @@ void __math_state_restore(void) - */ - asmlinkage void math_state_restore(void) - { -- struct thread_info *thread = current_thread_info(); -- struct task_struct *tsk = thread->task; -+ struct task_struct *tsk = current; - - if (!tsk_used_math(tsk)) { - local_irq_enable(); -diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S -index b9242ba..50c5edd 100644 ---- a/arch/x86/kernel/verify_cpu.S -+++ b/arch/x86/kernel/verify_cpu.S -@@ -20,6 +20,7 @@ - * arch/x86/boot/compressed/head_64.S: Boot cpu verification - * arch/x86/kernel/trampoline_64.S: secondary processor verification - * arch/x86/kernel/head_32.S: processor startup -+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume - * - * verify_cpu, returns the status of longmode and SSE in register %eax. - * 0: Success 1: Failure -diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c -index 863f875..4307295 100644 ---- a/arch/x86/kernel/vm86_32.c -+++ b/arch/x86/kernel/vm86_32.c -@@ -41,6 +41,7 @@ - #include <linux/ptrace.h> - #include <linux/audit.h> - #include <linux/stddef.h> -+#include <linux/grsecurity.h> - - #include <asm/uaccess.h> - #include <asm/io.h> -@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) - do_exit(SIGSEGV); - } - -- tss = &per_cpu(init_tss, get_cpu()); -+ tss = init_tss + get_cpu(); - current->thread.sp0 = current->thread.saved_sp0; - current->thread.sysenter_cs = __KERNEL_CS; - load_sp0(tss, ¤t->thread); -@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs) - struct task_struct *tsk; - int tmp, ret = -EPERM; - -+#ifdef CONFIG_GRKERNSEC_VM86 -+ if (!capable(CAP_SYS_RAWIO)) { -+ gr_handle_vm86(); -+ goto out; -+ } -+#endif -+ - tsk = current; - if (tsk->thread.saved_sp0) - goto out; -@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs) - int tmp, ret; - struct vm86plus_struct __user *v86; - -+#ifdef CONFIG_GRKERNSEC_VM86 -+ if (!capable(CAP_SYS_RAWIO)) { -+ gr_handle_vm86(); -+ ret = -EPERM; -+ goto out; -+ } -+#endif -+ - tsk = current; - switch (cmd) { - case VM86_REQUEST_IRQ: -@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk - tsk->thread.saved_fs = info->regs32->fs; - tsk->thread.saved_gs = get_user_gs(info->regs32); - -- tss = &per_cpu(init_tss, get_cpu()); -+ tss = init_tss + get_cpu(); - tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; - if (cpu_has_sep) - tsk->thread.sysenter_cs = 0; -@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i, - goto cannot_handle; - if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) - goto cannot_handle; -- intr_ptr = (unsigned long __user *) (i << 2); -+ intr_ptr = (__force unsigned long __user *) (i << 2); - if (get_user(segoffs, intr_ptr)) - goto cannot_handle; - if ((segoffs >> 16) == BIOSSEG) -diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S -index 0f703f1..9e15f64 100644 ---- a/arch/x86/kernel/vmlinux.lds.S -+++ b/arch/x86/kernel/vmlinux.lds.S -@@ -26,6 +26,13 @@ - #include <asm/page_types.h> - #include <asm/cache.h> - #include <asm/boot.h> -+#include <asm/segment.h> -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR) -+#else -+#define __KERNEL_TEXT_OFFSET 0 -+#endif - - #undef i386 /* in case the preprocessor is a 32bit one */ - -@@ -69,30 +76,43 @@ jiffies_64 = jiffies; - - PHDRS { - text PT_LOAD FLAGS(5); /* R_E */ -+#ifdef CONFIG_X86_32 -+ module PT_LOAD FLAGS(5); /* R_E */ -+#endif -+#ifdef CONFIG_XEN -+ rodata PT_LOAD FLAGS(5); /* R_E */ -+#else -+ rodata PT_LOAD FLAGS(4); /* R__ */ -+#endif - data PT_LOAD FLAGS(6); /* RW_ */ --#ifdef CONFIG_X86_64 -+ init.begin PT_LOAD FLAGS(6); /* RW_ */ - #ifdef CONFIG_SMP - percpu PT_LOAD FLAGS(6); /* RW_ */ - #endif -+ text.init PT_LOAD FLAGS(5); /* R_E */ -+ text.exit PT_LOAD FLAGS(5); /* R_E */ - init PT_LOAD FLAGS(7); /* RWE */ --#endif - note PT_NOTE FLAGS(0); /* ___ */ - } - - SECTIONS - { - #ifdef CONFIG_X86_32 -- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; -- phys_startup_32 = startup_32 - LOAD_OFFSET; -+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR; - #else -- . = __START_KERNEL; -- phys_startup_64 = startup_64 - LOAD_OFFSET; -+ . = __START_KERNEL; - #endif - - /* Text and read-only data */ -- .text : AT(ADDR(.text) - LOAD_OFFSET) { -- _text = .; -+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { - /* bootstrapping code */ -+#ifdef CONFIG_X86_32 -+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; -+#else -+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; -+#endif -+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; -+ _text = .; - HEAD_TEXT - #ifdef CONFIG_X86_32 - . = ALIGN(PAGE_SIZE); -@@ -108,13 +128,47 @@ SECTIONS - IRQENTRY_TEXT - *(.fixup) - *(.gnu.warning) -- /* End of text section */ -- _etext = .; - } :text = 0x9090 - -- NOTES :text :note -+ . += __KERNEL_TEXT_OFFSET; - -- EXCEPTION_TABLE(16) :text = 0x9090 -+#ifdef CONFIG_X86_32 -+ . = ALIGN(PAGE_SIZE); -+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) { -+ -+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES) -+ MODULES_EXEC_VADDR = .; -+ BYTE(0) -+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024); -+ . = ALIGN(HPAGE_SIZE); -+ MODULES_EXEC_END = . - 1; -+#endif -+ -+ } :module -+#endif -+ -+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) { -+ /* End of text section */ -+ _etext = . - __KERNEL_TEXT_OFFSET; -+ } -+ -+#ifdef CONFIG_X86_32 -+ . = ALIGN(PAGE_SIZE); -+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) { -+ *(.idt) -+ . = ALIGN(PAGE_SIZE); -+ *(.empty_zero_page) -+ *(.initial_pg_fixmap) -+ *(.initial_pg_pmd) -+ *(.initial_page_table) -+ *(.swapper_pg_dir) -+ } :rodata -+#endif -+ -+ . = ALIGN(PAGE_SIZE); -+ NOTES :rodata :note -+ -+ EXCEPTION_TABLE(16) :rodata - - #if defined(CONFIG_DEBUG_RODATA) - /* .text should occupy whole number of pages */ -@@ -126,16 +180,20 @@ SECTIONS - - /* Data */ - .data : AT(ADDR(.data) - LOAD_OFFSET) { -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ . = ALIGN(HPAGE_SIZE); -+#else -+ . = ALIGN(PAGE_SIZE); -+#endif -+ - /* Start of data section */ - _sdata = .; - - /* init_task */ - INIT_TASK_DATA(THREAD_SIZE) - --#ifdef CONFIG_X86_32 -- /* 32 bit has nosave before _edata */ - NOSAVE_DATA --#endif - - PAGE_ALIGNED_DATA(PAGE_SIZE) - -@@ -176,12 +234,19 @@ SECTIONS - #endif /* CONFIG_X86_64 */ - - /* Init code and data - will be freed after init */ -- . = ALIGN(PAGE_SIZE); - .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { -+ BYTE(0) -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ . = ALIGN(HPAGE_SIZE); -+#else -+ . = ALIGN(PAGE_SIZE); -+#endif -+ - __init_begin = .; /* paired with __init_end */ -- } -+ } :init.begin - --#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) -+#ifdef CONFIG_SMP - /* - * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the - * output PHDR, so the next output section - .init.text - should -@@ -190,12 +255,27 @@ SECTIONS - PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) - #endif - -- INIT_TEXT_SECTION(PAGE_SIZE) --#ifdef CONFIG_X86_64 -- :init --#endif -+ . = ALIGN(PAGE_SIZE); -+ init_begin = .; -+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) { -+ VMLINUX_SYMBOL(_sinittext) = .; -+ INIT_TEXT -+ VMLINUX_SYMBOL(_einittext) = .; -+ . = ALIGN(PAGE_SIZE); -+ } :text.init - -- INIT_DATA_SECTION(16) -+ /* -+ * .exit.text is discard at runtime, not link time, to deal with -+ * references from .altinstructions and .eh_frame -+ */ -+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { -+ EXIT_TEXT -+ . = ALIGN(16); -+ } :text.exit -+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text); -+ -+ . = ALIGN(PAGE_SIZE); -+ INIT_DATA_SECTION(16) :init - - /* - * Code and data for a variety of lowlevel trampolines, to be -@@ -269,19 +349,12 @@ SECTIONS - } - - . = ALIGN(8); -- /* -- * .exit.text is discard at runtime, not link time, to deal with -- * references from .altinstructions and .eh_frame -- */ -- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { -- EXIT_TEXT -- } - - .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { - EXIT_DATA - } - --#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) -+#ifndef CONFIG_SMP - PERCPU_SECTION(INTERNODE_CACHE_BYTES) - #endif - -@@ -300,16 +373,10 @@ SECTIONS - .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { - __smp_locks = .; - *(.smp_locks) -- . = ALIGN(PAGE_SIZE); - __smp_locks_end = .; -+ . = ALIGN(PAGE_SIZE); - } - --#ifdef CONFIG_X86_64 -- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { -- NOSAVE_DATA -- } --#endif -- - /* BSS */ - . = ALIGN(PAGE_SIZE); - .bss : AT(ADDR(.bss) - LOAD_OFFSET) { -@@ -325,6 +392,7 @@ SECTIONS - __brk_base = .; - . += 64 * 1024; /* 64k alignment slop space */ - *(.brk_reservation) /* areas brk users have reserved */ -+ . = ALIGN(HPAGE_SIZE); - __brk_limit = .; - } - -@@ -351,13 +419,12 @@ SECTIONS - * for the boot processor. - */ - #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load --INIT_PER_CPU(gdt_page); - INIT_PER_CPU(irq_stack_union); - - /* - * Build-time check on the image size: - */ --. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), -+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE), - "kernel image bigger than KERNEL_IMAGE_SIZE"); - - #ifdef CONFIG_SMP -diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c -index b56c65de..561a55b 100644 ---- a/arch/x86/kernel/vsyscall_64.c -+++ b/arch/x86/kernel/vsyscall_64.c -@@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = - .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), - }; - --static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE; -+static enum { EMULATE, NONE } vsyscall_mode = EMULATE; - - static int __init vsyscall_setup(char *str) - { - if (str) { - if (!strcmp("emulate", str)) - vsyscall_mode = EMULATE; -- else if (!strcmp("native", str)) -- vsyscall_mode = NATIVE; - else if (!strcmp("none", str)) - vsyscall_mode = NONE; - else -@@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) - - tsk = current; - if (seccomp_mode(&tsk->seccomp)) -- do_exit(SIGKILL); -+ do_group_exit(SIGKILL); - - switch (vsyscall_nr) { - case 0: -@@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) - return true; - - sigsegv: -- force_sig(SIGSEGV, current); -- return true; -+ do_group_exit(SIGKILL); - } - - /* -@@ -273,10 +270,7 @@ void __init map_vsyscall(void) - extern char __vvar_page; - unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); - -- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, -- vsyscall_mode == NATIVE -- ? PAGE_KERNEL_VSYSCALL -- : PAGE_KERNEL_VVAR); -+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR); - BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) != - (unsigned long)VSYSCALL_START); - -diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c -index 9796c2f..f686fbf 100644 ---- a/arch/x86/kernel/x8664_ksyms_64.c -+++ b/arch/x86/kernel/x8664_ksyms_64.c -@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8); - EXPORT_SYMBOL(copy_user_generic_string); - EXPORT_SYMBOL(copy_user_generic_unrolled); - EXPORT_SYMBOL(__copy_user_nocache); --EXPORT_SYMBOL(_copy_from_user); --EXPORT_SYMBOL(_copy_to_user); - - EXPORT_SYMBOL(copy_page); - EXPORT_SYMBOL(clear_page); -diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c -index a391134..d0b63b6e 100644 ---- a/arch/x86/kernel/xsave.c -+++ b/arch/x86/kernel/xsave.c -@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf, - fx_sw_user->xstate_size > fx_sw_user->extended_size) - return -EINVAL; - -- err = __get_user(magic2, (__u32 *) (((void *)fpstate) + -+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) + - fx_sw_user->extended_size - - FP_XSTATE_MAGIC2_SIZE)); - if (err) -@@ -267,7 +267,7 @@ fx_only: - * the other extended state. - */ - xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); -- return fxrstor_checking((__force struct i387_fxsave_struct *)buf); -+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf); - } - - /* -@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf) - if (use_xsave()) - err = restore_user_xstate(buf); - else -- err = fxrstor_checking((__force struct i387_fxsave_struct *) -+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *) - buf); - if (unlikely(err)) { - /* -diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index 8b4cc5f..f086b5b 100644 ---- a/arch/x86/kvm/emulate.c -+++ b/arch/x86/kvm/emulate.c -@@ -96,7 +96,7 @@ - #define Src2ImmByte (2<<29) - #define Src2One (3<<29) - #define Src2Imm (4<<29) --#define Src2Mask (7<<29) -+#define Src2Mask (7U<<29) - - #define X2(x...) x, x - #define X3(x...) X2(x), x -@@ -207,6 +207,7 @@ struct gprefix { - - #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \ - do { \ -+ unsigned long _tmp; \ - __asm__ __volatile__ ( \ - _PRE_EFLAGS("0", "4", "2") \ - _op _suffix " %"_x"3,%1; " \ -@@ -220,8 +221,6 @@ struct gprefix { - /* Raw emulation: instruction has two explicit operands. */ - #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \ - do { \ -- unsigned long _tmp; \ -- \ - switch ((_dst).bytes) { \ - case 2: \ - ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\ -@@ -237,7 +236,6 @@ struct gprefix { - - #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ - do { \ -- unsigned long _tmp; \ - switch ((_dst).bytes) { \ - case 1: \ - ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \ -diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c -index 57dcbd4..79aba9b 100644 ---- a/arch/x86/kvm/lapic.c -+++ b/arch/x86/kvm/lapic.c -@@ -53,7 +53,7 @@ - #define APIC_BUS_CYCLE_NS 1 - - /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ --#define apic_debug(fmt, arg...) -+#define apic_debug(fmt, arg...) do {} while (0) - - #define APIC_LVT_NUM 6 - /* 14 is the version for Xeon and Pentium 8.4.8*/ -diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c -index 8e8da79..13bc641 100644 ---- a/arch/x86/kvm/mmu.c -+++ b/arch/x86/kvm/mmu.c -@@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, - - pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); - -- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter); -+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter); - - /* - * Assume that the pte write on a page table of the same type -@@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, - } - - spin_lock(&vcpu->kvm->mmu_lock); -- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) -+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) - gentry = 0; - kvm_mmu_free_some_pages(vcpu); - ++vcpu->kvm->stat.mmu_pte_write; -diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h -index 507e2b8..fc55f89 100644 ---- a/arch/x86/kvm/paging_tmpl.h -+++ b/arch/x86/kvm/paging_tmpl.h -@@ -197,7 +197,7 @@ retry_walk: - if (unlikely(kvm_is_error_hva(host_addr))) - goto error; - -- ptep_user = (pt_element_t __user *)((void *)host_addr + offset); -+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset); - if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) - goto error; - -@@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, - unsigned long mmu_seq; - bool map_writable; - -+ pax_track_stack(); -+ - pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); - - if (unlikely(error_code & PFERR_RSVD_MASK)) -@@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) - if (need_flush) - kvm_flush_remote_tlbs(vcpu->kvm); - -- atomic_inc(&vcpu->kvm->arch.invlpg_counter); -+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter); - - spin_unlock(&vcpu->kvm->mmu_lock); - -diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c -index 475d1c9..33658ff 100644 ---- a/arch/x86/kvm/svm.c -+++ b/arch/x86/kvm/svm.c -@@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *vcpu) - int cpu = raw_smp_processor_id(); - - struct svm_cpu_data *sd = per_cpu(svm_data, cpu); -+ -+ pax_open_kernel(); - sd->tss_desc->type = 9; /* available 32/64-bit TSS */ -+ pax_close_kernel(); -+ - load_TR_desc(); - } - -@@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) - #endif - #endif - -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ __set_fs(current_thread_info()->addr_limit); -+#endif -+ - reload_tss(vcpu); - - local_irq_disable(); -diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c -index e65a158..656dc24 100644 ---- a/arch/x86/kvm/vmx.c -+++ b/arch/x86/kvm/vmx.c -@@ -1251,7 +1251,11 @@ static void reload_tss(void) - struct desc_struct *descs; - - descs = (void *)gdt->address; -+ -+ pax_open_kernel(); - descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ -+ pax_close_kernel(); -+ - load_TR_desc(); - } - -@@ -2520,8 +2524,11 @@ static __init int hardware_setup(void) - if (!cpu_has_vmx_flexpriority()) - flexpriority_enabled = 0; - -- if (!cpu_has_vmx_tpr_shadow()) -- kvm_x86_ops->update_cr8_intercept = NULL; -+ if (!cpu_has_vmx_tpr_shadow()) { -+ pax_open_kernel(); -+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL; -+ pax_close_kernel(); -+ } - - if (enable_ept && !cpu_has_vmx_ept_2m_page()) - kvm_disable_largepages(); -@@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(void) - vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ - - asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl)); -- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */ -+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */ - - rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); - vmcs_write32(HOST_IA32_SYSENTER_CS, low32); -@@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) - "jmp .Lkvm_vmx_return \n\t" - ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" - ".Lkvm_vmx_return: " -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t" -+ ".Lkvm_vmx_return2: " -+#endif -+ - /* Save guest registers, load host registers, keep flags */ - "mov %0, %c[wordsize](%%"R"sp) \n\t" - "pop %0 \n\t" -@@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) - #endif - [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), - [wordsize]"i"(sizeof(ulong)) -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ ,[cs]"i"(__KERNEL_CS) -+#endif -+ - : "cc", "memory" - , R"ax", R"bx", R"di", R"si" - #ifdef CONFIG_X86_64 -@@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) - } - } - -- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); -+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS)); -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ loadsegment(fs, __KERNEL_PERCPU); -+#endif -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ __set_fs(current_thread_info()->addr_limit); -+#endif -+ - vmx->loaded_vmcs->launched = 1; - - vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 84a28ea..9326501 100644 ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) - { - struct kvm *kvm = vcpu->kvm; - int lm = is_long_mode(vcpu); -- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 -- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; -+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64 -+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32; - u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 - : kvm->arch.xen_hvm_config.blob_size_32; - u32 page_num = data & ~PAGE_MASK; -@@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *filp, - if (n < msr_list.nmsrs) - goto out; - r = -EFAULT; -+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save)) -+ goto out; - if (copy_to_user(user_msr_list->indices, &msrs_to_save, - num_msrs_to_save * sizeof(u32))) - goto out; -@@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, - struct kvm_cpuid2 *cpuid, - struct kvm_cpuid_entry2 __user *entries) - { -- int r; -+ int r, i; - - r = -E2BIG; - if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) - goto out; - r = -EFAULT; -- if (copy_from_user(&vcpu->arch.cpuid_entries, entries, -- cpuid->nent * sizeof(struct kvm_cpuid_entry2))) -+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2))) - goto out; -+ for (i = 0; i < cpuid->nent; ++i) { -+ struct kvm_cpuid_entry2 cpuid_entry; -+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry))) -+ goto out; -+ vcpu->arch.cpuid_entries[i] = cpuid_entry; -+ } - vcpu->arch.cpuid_nent = cpuid->nent; - kvm_apic_set_version(vcpu); - kvm_x86_ops->cpuid_update(vcpu); -@@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, - struct kvm_cpuid2 *cpuid, - struct kvm_cpuid_entry2 __user *entries) - { -- int r; -+ int r, i; - - r = -E2BIG; - if (cpuid->nent < vcpu->arch.cpuid_nent) - goto out; - r = -EFAULT; -- if (copy_to_user(entries, &vcpu->arch.cpuid_entries, -- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) -+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) - goto out; -+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { -+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i]; -+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry))) -+ goto out; -+ } - return 0; - - out: -@@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, - static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, - struct kvm_interrupt *irq) - { -- if (irq->irq < 0 || irq->irq >= 256) -+ if (irq->irq >= 256) - return -EINVAL; - if (irqchip_in_kernel(vcpu->kvm)) - return -ENXIO; -@@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void) - kvm_mmu_set_mmio_spte_mask(mask); - } - --int kvm_arch_init(void *opaque) -+int kvm_arch_init(const void *opaque) - { - int r; - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; -diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c -index 13ee258..b9632f6 100644 ---- a/arch/x86/lguest/boot.c -+++ b/arch/x86/lguest/boot.c -@@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) - * Rebooting also tells the Host we're finished, but the RESTART flag tells the - * Launcher to reboot us. - */ --static void lguest_restart(char *reason) -+static __noreturn void lguest_restart(char *reason) - { - hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0); -+ BUG(); - } - - /*G:050 -diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c -index 042f682..c92afb6 100644 ---- a/arch/x86/lib/atomic64_32.c -+++ b/arch/x86/lib/atomic64_32.c -@@ -8,18 +8,30 @@ - - long long atomic64_read_cx8(long long, const atomic64_t *v); - EXPORT_SYMBOL(atomic64_read_cx8); -+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_read_unchecked_cx8); - long long atomic64_set_cx8(long long, const atomic64_t *v); - EXPORT_SYMBOL(atomic64_set_cx8); -+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_set_unchecked_cx8); - long long atomic64_xchg_cx8(long long, unsigned high); - EXPORT_SYMBOL(atomic64_xchg_cx8); - long long atomic64_add_return_cx8(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_add_return_cx8); -+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8); - long long atomic64_sub_return_cx8(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_sub_return_cx8); -+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8); - long long atomic64_inc_return_cx8(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_inc_return_cx8); -+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8); - long long atomic64_dec_return_cx8(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_dec_return_cx8); -+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8); - long long atomic64_dec_if_positive_cx8(atomic64_t *v); - EXPORT_SYMBOL(atomic64_dec_if_positive_cx8); - int atomic64_inc_not_zero_cx8(atomic64_t *v); -@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8); - #ifndef CONFIG_X86_CMPXCHG64 - long long atomic64_read_386(long long, const atomic64_t *v); - EXPORT_SYMBOL(atomic64_read_386); -+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_read_unchecked_386); - long long atomic64_set_386(long long, const atomic64_t *v); - EXPORT_SYMBOL(atomic64_set_386); -+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_set_unchecked_386); - long long atomic64_xchg_386(long long, unsigned high); - EXPORT_SYMBOL(atomic64_xchg_386); - long long atomic64_add_return_386(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_add_return_386); -+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_add_return_unchecked_386); - long long atomic64_sub_return_386(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_sub_return_386); -+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386); - long long atomic64_inc_return_386(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_inc_return_386); -+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386); - long long atomic64_dec_return_386(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_dec_return_386); -+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386); - long long atomic64_add_386(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_add_386); -+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_add_unchecked_386); - long long atomic64_sub_386(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_sub_386); -+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_sub_unchecked_386); - long long atomic64_inc_386(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_inc_386); -+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_inc_unchecked_386); - long long atomic64_dec_386(long long a, atomic64_t *v); - EXPORT_SYMBOL(atomic64_dec_386); -+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v); -+EXPORT_SYMBOL(atomic64_dec_unchecked_386); - long long atomic64_dec_if_positive_386(atomic64_t *v); - EXPORT_SYMBOL(atomic64_dec_if_positive_386); - int atomic64_inc_not_zero_386(atomic64_t *v); -diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S -index e8e7e0d..56fd1b0 100644 ---- a/arch/x86/lib/atomic64_386_32.S -+++ b/arch/x86/lib/atomic64_386_32.S -@@ -48,6 +48,10 @@ BEGIN(read) - movl (v), %eax - movl 4(v), %edx - RET_ENDP -+BEGIN(read_unchecked) -+ movl (v), %eax -+ movl 4(v), %edx -+RET_ENDP - #undef v - - #define v %esi -@@ -55,6 +59,10 @@ BEGIN(set) - movl %ebx, (v) - movl %ecx, 4(v) - RET_ENDP -+BEGIN(set_unchecked) -+ movl %ebx, (v) -+ movl %ecx, 4(v) -+RET_ENDP - #undef v - - #define v %esi -@@ -70,6 +78,20 @@ RET_ENDP - BEGIN(add) - addl %eax, (v) - adcl %edx, 4(v) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ jno 0f -+ subl %eax, (v) -+ sbbl %edx, 4(v) -+ int $4 -+0: -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+RET_ENDP -+BEGIN(add_unchecked) -+ addl %eax, (v) -+ adcl %edx, 4(v) - RET_ENDP - #undef v - -@@ -77,6 +99,24 @@ RET_ENDP - BEGIN(add_return) - addl (v), %eax - adcl 4(v), %edx -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+1234: -+ _ASM_EXTABLE(1234b, 2f) -+#endif -+ -+ movl %eax, (v) -+ movl %edx, 4(v) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+2: -+#endif -+ -+RET_ENDP -+BEGIN(add_return_unchecked) -+ addl (v), %eax -+ adcl 4(v), %edx - movl %eax, (v) - movl %edx, 4(v) - RET_ENDP -@@ -86,6 +126,20 @@ RET_ENDP - BEGIN(sub) - subl %eax, (v) - sbbl %edx, 4(v) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ jno 0f -+ addl %eax, (v) -+ adcl %edx, 4(v) -+ int $4 -+0: -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+RET_ENDP -+BEGIN(sub_unchecked) -+ subl %eax, (v) -+ sbbl %edx, 4(v) - RET_ENDP - #undef v - -@@ -96,6 +150,27 @@ BEGIN(sub_return) - sbbl $0, %edx - addl (v), %eax - adcl 4(v), %edx -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+1234: -+ _ASM_EXTABLE(1234b, 2f) -+#endif -+ -+ movl %eax, (v) -+ movl %edx, 4(v) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+2: -+#endif -+ -+RET_ENDP -+BEGIN(sub_return_unchecked) -+ negl %edx -+ negl %eax -+ sbbl $0, %edx -+ addl (v), %eax -+ adcl 4(v), %edx - movl %eax, (v) - movl %edx, 4(v) - RET_ENDP -@@ -105,6 +180,20 @@ RET_ENDP - BEGIN(inc) - addl $1, (v) - adcl $0, 4(v) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ jno 0f -+ subl $1, (v) -+ sbbl $0, 4(v) -+ int $4 -+0: -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+RET_ENDP -+BEGIN(inc_unchecked) -+ addl $1, (v) -+ adcl $0, 4(v) - RET_ENDP - #undef v - -@@ -114,6 +203,26 @@ BEGIN(inc_return) - movl 4(v), %edx - addl $1, %eax - adcl $0, %edx -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+1234: -+ _ASM_EXTABLE(1234b, 2f) -+#endif -+ -+ movl %eax, (v) -+ movl %edx, 4(v) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+2: -+#endif -+ -+RET_ENDP -+BEGIN(inc_return_unchecked) -+ movl (v), %eax -+ movl 4(v), %edx -+ addl $1, %eax -+ adcl $0, %edx - movl %eax, (v) - movl %edx, 4(v) - RET_ENDP -@@ -123,6 +232,20 @@ RET_ENDP - BEGIN(dec) - subl $1, (v) - sbbl $0, 4(v) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ jno 0f -+ addl $1, (v) -+ adcl $0, 4(v) -+ int $4 -+0: -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+RET_ENDP -+BEGIN(dec_unchecked) -+ subl $1, (v) -+ sbbl $0, 4(v) - RET_ENDP - #undef v - -@@ -132,6 +255,26 @@ BEGIN(dec_return) - movl 4(v), %edx - subl $1, %eax - sbbl $0, %edx -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+1234: -+ _ASM_EXTABLE(1234b, 2f) -+#endif -+ -+ movl %eax, (v) -+ movl %edx, 4(v) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+2: -+#endif -+ -+RET_ENDP -+BEGIN(dec_return_unchecked) -+ movl (v), %eax -+ movl 4(v), %edx -+ subl $1, %eax -+ sbbl $0, %edx - movl %eax, (v) - movl %edx, 4(v) - RET_ENDP -@@ -143,6 +286,13 @@ BEGIN(add_unless) - adcl %edx, %edi - addl (v), %eax - adcl 4(v), %edx -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+1234: -+ _ASM_EXTABLE(1234b, 2f) -+#endif -+ - cmpl %eax, %esi - je 3f - 1: -@@ -168,6 +318,13 @@ BEGIN(inc_not_zero) - 1: - addl $1, %eax - adcl $0, %edx -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+1234: -+ _ASM_EXTABLE(1234b, 2f) -+#endif -+ - movl %eax, (v) - movl %edx, 4(v) - movl $1, %eax -@@ -186,6 +343,13 @@ BEGIN(dec_if_positive) - movl 4(v), %edx - subl $1, %eax - sbbl $0, %edx -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+1234: -+ _ASM_EXTABLE(1234b, 1f) -+#endif -+ - js 1f - movl %eax, (v) - movl %edx, 4(v) -diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S -index 391a083..d658e9f 100644 ---- a/arch/x86/lib/atomic64_cx8_32.S -+++ b/arch/x86/lib/atomic64_cx8_32.S -@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8) - CFI_STARTPROC - - read64 %ecx -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(atomic64_read_cx8) - -+ENTRY(atomic64_read_unchecked_cx8) -+ CFI_STARTPROC -+ -+ read64 %ecx -+ pax_force_retaddr -+ ret -+ CFI_ENDPROC -+ENDPROC(atomic64_read_unchecked_cx8) -+ - ENTRY(atomic64_set_cx8) - CFI_STARTPROC - -@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8) - cmpxchg8b (%esi) - jne 1b - -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(atomic64_set_cx8) - -+ENTRY(atomic64_set_unchecked_cx8) -+ CFI_STARTPROC -+ -+1: -+/* we don't need LOCK_PREFIX since aligned 64-bit writes -+ * are atomic on 586 and newer */ -+ cmpxchg8b (%esi) -+ jne 1b -+ -+ pax_force_retaddr -+ ret -+ CFI_ENDPROC -+ENDPROC(atomic64_set_unchecked_cx8) -+ - ENTRY(atomic64_xchg_cx8) - CFI_STARTPROC - -@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8) - cmpxchg8b (%esi) - jne 1b - -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(atomic64_xchg_cx8) - --.macro addsub_return func ins insc --ENTRY(atomic64_\func()_return_cx8) -+.macro addsub_return func ins insc unchecked="" -+ENTRY(atomic64_\func()_return\unchecked()_cx8) - CFI_STARTPROC - SAVE ebp - SAVE ebx -@@ -84,27 +110,44 @@ ENTRY(atomic64_\func()_return_cx8) - movl %edx, %ecx - \ins()l %esi, %ebx - \insc()l %edi, %ecx -+ -+.ifb \unchecked -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+2: -+ _ASM_EXTABLE(2b, 3f) -+#endif -+.endif -+ - LOCK_PREFIX - cmpxchg8b (%ebp) - jne 1b -- --10: - movl %ebx, %eax - movl %ecx, %edx -+ -+.ifb \unchecked -+#ifdef CONFIG_PAX_REFCOUNT -+3: -+#endif -+.endif -+ - RESTORE edi - RESTORE esi - RESTORE ebx - RESTORE ebp -+ pax_force_retaddr - ret - CFI_ENDPROC --ENDPROC(atomic64_\func()_return_cx8) -+ENDPROC(atomic64_\func()_return\unchecked()_cx8) - .endm - - addsub_return add add adc - addsub_return sub sub sbb -+addsub_return add add adc _unchecked -+addsub_return sub sub sbb _unchecked - --.macro incdec_return func ins insc --ENTRY(atomic64_\func()_return_cx8) -+.macro incdec_return func ins insc unchecked -+ENTRY(atomic64_\func()_return\unchecked()_cx8) - CFI_STARTPROC - SAVE ebx - -@@ -114,21 +157,39 @@ ENTRY(atomic64_\func()_return_cx8) - movl %edx, %ecx - \ins()l $1, %ebx - \insc()l $0, %ecx -+ -+.ifb \unchecked -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+2: -+ _ASM_EXTABLE(2b, 3f) -+#endif -+.endif -+ - LOCK_PREFIX - cmpxchg8b (%esi) - jne 1b - --10: - movl %ebx, %eax - movl %ecx, %edx -+ -+.ifb \unchecked -+#ifdef CONFIG_PAX_REFCOUNT -+3: -+#endif -+.endif -+ - RESTORE ebx -+ pax_force_retaddr - ret - CFI_ENDPROC --ENDPROC(atomic64_\func()_return_cx8) -+ENDPROC(atomic64_\func()_return\unchecked()_cx8) - .endm - - incdec_return inc add adc - incdec_return dec sub sbb -+incdec_return inc add adc _unchecked -+incdec_return dec sub sbb _unchecked - - ENTRY(atomic64_dec_if_positive_cx8) - CFI_STARTPROC -@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8) - movl %edx, %ecx - subl $1, %ebx - sbb $0, %ecx -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+1234: -+ _ASM_EXTABLE(1234b, 2f) -+#endif -+ - js 2f - LOCK_PREFIX - cmpxchg8b (%esi) -@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8) - movl %ebx, %eax - movl %ecx, %edx - RESTORE ebx -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(atomic64_dec_if_positive_cx8) -@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8) - movl %edx, %ecx - addl %esi, %ebx - adcl %edi, %ecx -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+1234: -+ _ASM_EXTABLE(1234b, 3f) -+#endif -+ - LOCK_PREFIX - cmpxchg8b (%ebp) - jne 1b -@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8) - CFI_ADJUST_CFA_OFFSET -8 - RESTORE ebx - RESTORE ebp -+ pax_force_retaddr - ret - 4: - cmpl %edx, 4(%esp) -@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8) - movl %edx, %ecx - addl $1, %ebx - adcl $0, %ecx -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ into -+1234: -+ _ASM_EXTABLE(1234b, 3f) -+#endif -+ - LOCK_PREFIX - cmpxchg8b (%esi) - jne 1b -@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8) - movl $1, %eax - 3: - RESTORE ebx -+ pax_force_retaddr - ret - 4: - testl %edx, %edx -diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S -index 78d16a5..fbcf666 100644 ---- a/arch/x86/lib/checksum_32.S -+++ b/arch/x86/lib/checksum_32.S -@@ -28,7 +28,8 @@ - #include <linux/linkage.h> - #include <asm/dwarf2.h> - #include <asm/errno.h> -- -+#include <asm/segment.h> -+ - /* - * computes a partial checksum, e.g. for TCP/UDP fragments - */ -@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, - - #define ARGBASE 16 - #define FP 12 -- --ENTRY(csum_partial_copy_generic) -+ -+ENTRY(csum_partial_copy_generic_to_user) - CFI_STARTPROC -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ pushl_cfi %gs -+ popl_cfi %es -+ jmp csum_partial_copy_generic -+#endif -+ -+ENTRY(csum_partial_copy_generic_from_user) -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ pushl_cfi %gs -+ popl_cfi %ds -+#endif -+ -+ENTRY(csum_partial_copy_generic) - subl $4,%esp - CFI_ADJUST_CFA_OFFSET 4 - pushl_cfi %edi -@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic) - jmp 4f - SRC(1: movw (%esi), %bx ) - addl $2, %esi --DST( movw %bx, (%edi) ) -+DST( movw %bx, %es:(%edi) ) - addl $2, %edi - addw %bx, %ax - adcl $0, %eax -@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) ) - SRC(1: movl (%esi), %ebx ) - SRC( movl 4(%esi), %edx ) - adcl %ebx, %eax --DST( movl %ebx, (%edi) ) -+DST( movl %ebx, %es:(%edi) ) - adcl %edx, %eax --DST( movl %edx, 4(%edi) ) -+DST( movl %edx, %es:4(%edi) ) - - SRC( movl 8(%esi), %ebx ) - SRC( movl 12(%esi), %edx ) - adcl %ebx, %eax --DST( movl %ebx, 8(%edi) ) -+DST( movl %ebx, %es:8(%edi) ) - adcl %edx, %eax --DST( movl %edx, 12(%edi) ) -+DST( movl %edx, %es:12(%edi) ) - - SRC( movl 16(%esi), %ebx ) - SRC( movl 20(%esi), %edx ) - adcl %ebx, %eax --DST( movl %ebx, 16(%edi) ) -+DST( movl %ebx, %es:16(%edi) ) - adcl %edx, %eax --DST( movl %edx, 20(%edi) ) -+DST( movl %edx, %es:20(%edi) ) - - SRC( movl 24(%esi), %ebx ) - SRC( movl 28(%esi), %edx ) - adcl %ebx, %eax --DST( movl %ebx, 24(%edi) ) -+DST( movl %ebx, %es:24(%edi) ) - adcl %edx, %eax --DST( movl %edx, 28(%edi) ) -+DST( movl %edx, %es:28(%edi) ) - - lea 32(%esi), %esi - lea 32(%edi), %edi -@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) ) - shrl $2, %edx # This clears CF - SRC(3: movl (%esi), %ebx ) - adcl %ebx, %eax --DST( movl %ebx, (%edi) ) -+DST( movl %ebx, %es:(%edi) ) - lea 4(%esi), %esi - lea 4(%edi), %edi - dec %edx -@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) ) - jb 5f - SRC( movw (%esi), %cx ) - leal 2(%esi), %esi --DST( movw %cx, (%edi) ) -+DST( movw %cx, %es:(%edi) ) - leal 2(%edi), %edi - je 6f - shll $16,%ecx - SRC(5: movb (%esi), %cl ) --DST( movb %cl, (%edi) ) -+DST( movb %cl, %es:(%edi) ) - 6: addl %ecx, %eax - adcl $0, %eax - 7: -@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) ) - - 6001: - movl ARGBASE+20(%esp), %ebx # src_err_ptr -- movl $-EFAULT, (%ebx) -+ movl $-EFAULT, %ss:(%ebx) - - # zero the complete destination - computing the rest - # is too much work -@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) ) - - 6002: - movl ARGBASE+24(%esp), %ebx # dst_err_ptr -- movl $-EFAULT,(%ebx) -+ movl $-EFAULT,%ss:(%ebx) - jmp 5000b - - .previous - -+ pushl_cfi %ss -+ popl_cfi %ds -+ pushl_cfi %ss -+ popl_cfi %es - popl_cfi %ebx - CFI_RESTORE ebx - popl_cfi %esi -@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) ) - popl_cfi %ecx # equivalent to addl $4,%esp - ret - CFI_ENDPROC --ENDPROC(csum_partial_copy_generic) -+ENDPROC(csum_partial_copy_generic_to_user) - - #else - - /* Version for PentiumII/PPro */ - - #define ROUND1(x) \ -+ nop; nop; nop; \ - SRC(movl x(%esi), %ebx ) ; \ - addl %ebx, %eax ; \ -- DST(movl %ebx, x(%edi) ) ; -+ DST(movl %ebx, %es:x(%edi)) ; - - #define ROUND(x) \ -+ nop; nop; nop; \ - SRC(movl x(%esi), %ebx ) ; \ - adcl %ebx, %eax ; \ -- DST(movl %ebx, x(%edi) ) ; -+ DST(movl %ebx, %es:x(%edi)) ; - - #define ARGBASE 12 -- --ENTRY(csum_partial_copy_generic) -+ -+ENTRY(csum_partial_copy_generic_to_user) - CFI_STARTPROC -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ pushl_cfi %gs -+ popl_cfi %es -+ jmp csum_partial_copy_generic -+#endif -+ -+ENTRY(csum_partial_copy_generic_from_user) -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ pushl_cfi %gs -+ popl_cfi %ds -+#endif -+ -+ENTRY(csum_partial_copy_generic) - pushl_cfi %ebx - CFI_REL_OFFSET ebx, 0 - pushl_cfi %edi -@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic) - subl %ebx, %edi - lea -1(%esi),%edx - andl $-32,%edx -- lea 3f(%ebx,%ebx), %ebx -+ lea 3f(%ebx,%ebx,2), %ebx - testl %esi, %esi - jmp *%ebx - 1: addl $64,%esi -@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic) - jb 5f - SRC( movw (%esi), %dx ) - leal 2(%esi), %esi --DST( movw %dx, (%edi) ) -+DST( movw %dx, %es:(%edi) ) - leal 2(%edi), %edi - je 6f - shll $16,%edx - 5: - SRC( movb (%esi), %dl ) --DST( movb %dl, (%edi) ) -+DST( movb %dl, %es:(%edi) ) - 6: addl %edx, %eax - adcl $0, %eax - 7: - .section .fixup, "ax" - 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr -- movl $-EFAULT, (%ebx) -+ movl $-EFAULT, %ss:(%ebx) - # zero the complete destination (computing the rest is too much work) - movl ARGBASE+8(%esp),%edi # dst - movl ARGBASE+12(%esp),%ecx # len -@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) ) - rep; stosb - jmp 7b - 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr -- movl $-EFAULT, (%ebx) -+ movl $-EFAULT, %ss:(%ebx) - jmp 7b - .previous - -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ pushl_cfi %ss -+ popl_cfi %ds -+ pushl_cfi %ss -+ popl_cfi %es -+#endif -+ - popl_cfi %esi - CFI_RESTORE esi - popl_cfi %edi -@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) ) - CFI_RESTORE ebx - ret - CFI_ENDPROC --ENDPROC(csum_partial_copy_generic) -+ENDPROC(csum_partial_copy_generic_to_user) - - #undef ROUND - #undef ROUND1 -diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S -index f2145cf..cea889d 100644 ---- a/arch/x86/lib/clear_page_64.S -+++ b/arch/x86/lib/clear_page_64.S -@@ -11,6 +11,7 @@ ENTRY(clear_page_c) - movl $4096/8,%ecx - xorl %eax,%eax - rep stosq -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(clear_page_c) -@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e) - movl $4096,%ecx - xorl %eax,%eax - rep stosb -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(clear_page_c_e) -@@ -43,6 +45,7 @@ ENTRY(clear_page) - leaq 64(%rdi),%rdi - jnz .Lloop - nop -+ pax_force_retaddr - ret - CFI_ENDPROC - .Lclear_page_end: -@@ -58,7 +61,7 @@ ENDPROC(clear_page) - - #include <asm/cpufeature.h> - -- .section .altinstr_replacement,"ax" -+ .section .altinstr_replacement,"a" - 1: .byte 0xeb /* jmp <disp8> */ - .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ - 2: .byte 0xeb /* jmp <disp8> */ -diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S -index 1e572c5..2a162cd 100644 ---- a/arch/x86/lib/cmpxchg16b_emu.S -+++ b/arch/x86/lib/cmpxchg16b_emu.S -@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu: - - popf - mov $1, %al -+ pax_force_retaddr - ret - - not_same: - popf - xor %al,%al -+ pax_force_retaddr - ret - - CFI_ENDPROC -diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S -index 01c805b..dccb07f 100644 ---- a/arch/x86/lib/copy_page_64.S -+++ b/arch/x86/lib/copy_page_64.S -@@ -9,6 +9,7 @@ copy_page_c: - CFI_STARTPROC - movl $4096/8,%ecx - rep movsq -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(copy_page_c) -@@ -39,7 +40,7 @@ ENTRY(copy_page) - movq 16 (%rsi), %rdx - movq 24 (%rsi), %r8 - movq 32 (%rsi), %r9 -- movq 40 (%rsi), %r10 -+ movq 40 (%rsi), %r13 - movq 48 (%rsi), %r11 - movq 56 (%rsi), %r12 - -@@ -50,7 +51,7 @@ ENTRY(copy_page) - movq %rdx, 16 (%rdi) - movq %r8, 24 (%rdi) - movq %r9, 32 (%rdi) -- movq %r10, 40 (%rdi) -+ movq %r13, 40 (%rdi) - movq %r11, 48 (%rdi) - movq %r12, 56 (%rdi) - -@@ -69,7 +70,7 @@ ENTRY(copy_page) - movq 16 (%rsi), %rdx - movq 24 (%rsi), %r8 - movq 32 (%rsi), %r9 -- movq 40 (%rsi), %r10 -+ movq 40 (%rsi), %r13 - movq 48 (%rsi), %r11 - movq 56 (%rsi), %r12 - -@@ -78,7 +79,7 @@ ENTRY(copy_page) - movq %rdx, 16 (%rdi) - movq %r8, 24 (%rdi) - movq %r9, 32 (%rdi) -- movq %r10, 40 (%rdi) -+ movq %r13, 40 (%rdi) - movq %r11, 48 (%rdi) - movq %r12, 56 (%rdi) - -@@ -95,6 +96,7 @@ ENTRY(copy_page) - CFI_RESTORE r13 - addq $3*8,%rsp - CFI_ADJUST_CFA_OFFSET -3*8 -+ pax_force_retaddr - ret - .Lcopy_page_end: - CFI_ENDPROC -@@ -105,7 +107,7 @@ ENDPROC(copy_page) - - #include <asm/cpufeature.h> - -- .section .altinstr_replacement,"ax" -+ .section .altinstr_replacement,"a" - 1: .byte 0xeb /* jmp <disp8> */ - .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */ - 2: -diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S -index 0248402..821c786 100644 ---- a/arch/x86/lib/copy_user_64.S -+++ b/arch/x86/lib/copy_user_64.S -@@ -16,6 +16,7 @@ - #include <asm/thread_info.h> - #include <asm/cpufeature.h> - #include <asm/alternative-asm.h> -+#include <asm/pgtable.h> - - /* - * By placing feature2 after feature1 in altinstructions section, we logically -@@ -29,7 +30,7 @@ - .byte 0xe9 /* 32bit jump */ - .long \orig-1f /* by default jump to orig */ - 1: -- .section .altinstr_replacement,"ax" -+ .section .altinstr_replacement,"a" - 2: .byte 0xe9 /* near jump with 32bit immediate */ - .long \alt1-1b /* offset */ /* or alternatively to alt1 */ - 3: .byte 0xe9 /* near jump with 32bit immediate */ -@@ -71,47 +72,20 @@ - #endif - .endm - --/* Standard copy_to_user with segment limit checking */ --ENTRY(_copy_to_user) -- CFI_STARTPROC -- GET_THREAD_INFO(%rax) -- movq %rdi,%rcx -- addq %rdx,%rcx -- jc bad_to_user -- cmpq TI_addr_limit(%rax),%rcx -- ja bad_to_user -- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ -- copy_user_generic_unrolled,copy_user_generic_string, \ -- copy_user_enhanced_fast_string -- CFI_ENDPROC --ENDPROC(_copy_to_user) -- --/* Standard copy_from_user with segment limit checking */ --ENTRY(_copy_from_user) -- CFI_STARTPROC -- GET_THREAD_INFO(%rax) -- movq %rsi,%rcx -- addq %rdx,%rcx -- jc bad_from_user -- cmpq TI_addr_limit(%rax),%rcx -- ja bad_from_user -- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ -- copy_user_generic_unrolled,copy_user_generic_string, \ -- copy_user_enhanced_fast_string -- CFI_ENDPROC --ENDPROC(_copy_from_user) -- - .section .fixup,"ax" - /* must zero dest */ - ENTRY(bad_from_user) - bad_from_user: - CFI_STARTPROC -+ testl %edx,%edx -+ js bad_to_user - movl %edx,%ecx - xorl %eax,%eax - rep - stosb - bad_to_user: - movl %edx,%eax -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(bad_from_user) -@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled) - jz 17f - 1: movq (%rsi),%r8 - 2: movq 1*8(%rsi),%r9 --3: movq 2*8(%rsi),%r10 -+3: movq 2*8(%rsi),%rax - 4: movq 3*8(%rsi),%r11 - 5: movq %r8,(%rdi) - 6: movq %r9,1*8(%rdi) --7: movq %r10,2*8(%rdi) -+7: movq %rax,2*8(%rdi) - 8: movq %r11,3*8(%rdi) - 9: movq 4*8(%rsi),%r8 - 10: movq 5*8(%rsi),%r9 --11: movq 6*8(%rsi),%r10 -+11: movq 6*8(%rsi),%rax - 12: movq 7*8(%rsi),%r11 - 13: movq %r8,4*8(%rdi) - 14: movq %r9,5*8(%rdi) --15: movq %r10,6*8(%rdi) -+15: movq %rax,6*8(%rdi) - 16: movq %r11,7*8(%rdi) - leaq 64(%rsi),%rsi - leaq 64(%rdi),%rdi -@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled) - decl %ecx - jnz 21b - 23: xor %eax,%eax -+ pax_force_retaddr - ret - - .section .fixup,"ax" -@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string) - 3: rep - movsb - 4: xorl %eax,%eax -+ pax_force_retaddr - ret - - .section .fixup,"ax" -@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string) - 1: rep - movsb - 2: xorl %eax,%eax -+ pax_force_retaddr - ret - - .section .fixup,"ax" -diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S -index cb0c112..e3a6895 100644 ---- a/arch/x86/lib/copy_user_nocache_64.S -+++ b/arch/x86/lib/copy_user_nocache_64.S -@@ -8,12 +8,14 @@ - - #include <linux/linkage.h> - #include <asm/dwarf2.h> -+#include <asm/alternative-asm.h> - - #define FIX_ALIGNMENT 1 - - #include <asm/current.h> - #include <asm/asm-offsets.h> - #include <asm/thread_info.h> -+#include <asm/pgtable.h> - - .macro ALIGN_DESTINATION - #ifdef FIX_ALIGNMENT -@@ -50,6 +52,15 @@ - */ - ENTRY(__copy_user_nocache) - CFI_STARTPROC -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ mov $PAX_USER_SHADOW_BASE,%rcx -+ cmp %rcx,%rsi -+ jae 1f -+ add %rcx,%rsi -+1: -+#endif -+ - cmpl $8,%edx - jb 20f /* less then 8 bytes, go to byte copy loop */ - ALIGN_DESTINATION -@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache) - jz 17f - 1: movq (%rsi),%r8 - 2: movq 1*8(%rsi),%r9 --3: movq 2*8(%rsi),%r10 -+3: movq 2*8(%rsi),%rax - 4: movq 3*8(%rsi),%r11 - 5: movnti %r8,(%rdi) - 6: movnti %r9,1*8(%rdi) --7: movnti %r10,2*8(%rdi) -+7: movnti %rax,2*8(%rdi) - 8: movnti %r11,3*8(%rdi) - 9: movq 4*8(%rsi),%r8 - 10: movq 5*8(%rsi),%r9 --11: movq 6*8(%rsi),%r10 -+11: movq 6*8(%rsi),%rax - 12: movq 7*8(%rsi),%r11 - 13: movnti %r8,4*8(%rdi) - 14: movnti %r9,5*8(%rdi) --15: movnti %r10,6*8(%rdi) -+15: movnti %rax,6*8(%rdi) - 16: movnti %r11,7*8(%rdi) - leaq 64(%rsi),%rsi - leaq 64(%rdi),%rdi -@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache) - jnz 21b - 23: xorl %eax,%eax - sfence -+ pax_force_retaddr - ret - - .section .fixup,"ax" -diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S -index fb903b7..c92b7f7 100644 ---- a/arch/x86/lib/csum-copy_64.S -+++ b/arch/x86/lib/csum-copy_64.S -@@ -8,6 +8,7 @@ - #include <linux/linkage.h> - #include <asm/dwarf2.h> - #include <asm/errno.h> -+#include <asm/alternative-asm.h> - - /* - * Checksum copy with exception handling. -@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic) - CFI_RESTORE rbp - addq $7*8, %rsp - CFI_ADJUST_CFA_OFFSET -7*8 -+ pax_force_retaddr 0, 1 - ret - CFI_RESTORE_STATE - -diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c -index 459b58a..9570bc7 100644 ---- a/arch/x86/lib/csum-wrappers_64.c -+++ b/arch/x86/lib/csum-wrappers_64.c -@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst, - len -= 2; - } - } -- isum = csum_partial_copy_generic((__force const void *)src, -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)src < PAX_USER_SHADOW_BASE) -+ src += PAX_USER_SHADOW_BASE; -+#endif -+ -+ isum = csum_partial_copy_generic((const void __force_kernel *)src, - dst, len, isum, errp, NULL); - if (unlikely(*errp)) - goto out_err; -@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst, - } - - *errp = 0; -- return csum_partial_copy_generic(src, (void __force *)dst, -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) -+ dst += PAX_USER_SHADOW_BASE; -+#endif -+ -+ return csum_partial_copy_generic(src, (void __force_kernel *)dst, - len, isum, NULL, errp); - } - EXPORT_SYMBOL(csum_partial_copy_to_user); -diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S -index 51f1504..ddac4c1 100644 ---- a/arch/x86/lib/getuser.S -+++ b/arch/x86/lib/getuser.S -@@ -33,15 +33,38 @@ - #include <asm/asm-offsets.h> - #include <asm/thread_info.h> - #include <asm/asm.h> -+#include <asm/segment.h> -+#include <asm/pgtable.h> -+#include <asm/alternative-asm.h> -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) -+#define __copyuser_seg gs; -+#else -+#define __copyuser_seg -+#endif - - .text - ENTRY(__get_user_1) - CFI_STARTPROC -+ -+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) - GET_THREAD_INFO(%_ASM_DX) - cmp TI_addr_limit(%_ASM_DX),%_ASM_AX - jae bad_get_user --1: movzb (%_ASM_AX),%edx -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX -+ cmp %_ASM_DX,%_ASM_AX -+ jae 1234f -+ add %_ASM_DX,%_ASM_AX -+1234: -+#endif -+ -+#endif -+ -+1: __copyuser_seg movzb (%_ASM_AX),%edx - xor %eax,%eax -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(__get_user_1) -@@ -49,12 +72,26 @@ ENDPROC(__get_user_1) - ENTRY(__get_user_2) - CFI_STARTPROC - add $1,%_ASM_AX -+ -+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) - jc bad_get_user - GET_THREAD_INFO(%_ASM_DX) - cmp TI_addr_limit(%_ASM_DX),%_ASM_AX - jae bad_get_user --2: movzwl -1(%_ASM_AX),%edx -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX -+ cmp %_ASM_DX,%_ASM_AX -+ jae 1234f -+ add %_ASM_DX,%_ASM_AX -+1234: -+#endif -+ -+#endif -+ -+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx - xor %eax,%eax -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(__get_user_2) -@@ -62,12 +99,26 @@ ENDPROC(__get_user_2) - ENTRY(__get_user_4) - CFI_STARTPROC - add $3,%_ASM_AX -+ -+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) - jc bad_get_user - GET_THREAD_INFO(%_ASM_DX) - cmp TI_addr_limit(%_ASM_DX),%_ASM_AX - jae bad_get_user --3: mov -3(%_ASM_AX),%edx -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX -+ cmp %_ASM_DX,%_ASM_AX -+ jae 1234f -+ add %_ASM_DX,%_ASM_AX -+1234: -+#endif -+ -+#endif -+ -+3: __copyuser_seg mov -3(%_ASM_AX),%edx - xor %eax,%eax -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(__get_user_4) -@@ -80,8 +131,18 @@ ENTRY(__get_user_8) - GET_THREAD_INFO(%_ASM_DX) - cmp TI_addr_limit(%_ASM_DX),%_ASM_AX - jae bad_get_user -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX -+ cmp %_ASM_DX,%_ASM_AX -+ jae 1234f -+ add %_ASM_DX,%_ASM_AX -+1234: -+#endif -+ - 4: movq -7(%_ASM_AX),%_ASM_DX - xor %eax,%eax -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(__get_user_8) -@@ -91,6 +152,7 @@ bad_get_user: - CFI_STARTPROC - xor %edx,%edx - mov $(-EFAULT),%_ASM_AX -+ pax_force_retaddr - ret - CFI_ENDPROC - END(bad_get_user) -diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c -index 9f33b98..dfc7678 100644 ---- a/arch/x86/lib/insn.c -+++ b/arch/x86/lib/insn.c -@@ -21,6 +21,11 @@ - #include <linux/string.h> - #include <asm/inat.h> - #include <asm/insn.h> -+#ifdef __KERNEL__ -+#include <asm/pgtable_types.h> -+#else -+#define ktla_ktva(addr) addr -+#endif - - #define get_next(t, insn) \ - ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) -@@ -40,8 +45,8 @@ - void insn_init(struct insn *insn, const void *kaddr, int x86_64) - { - memset(insn, 0, sizeof(*insn)); -- insn->kaddr = kaddr; -- insn->next_byte = kaddr; -+ insn->kaddr = ktla_ktva(kaddr); -+ insn->next_byte = ktla_ktva(kaddr); - insn->x86_64 = x86_64 ? 1 : 0; - insn->opnd_bytes = 4; - if (x86_64) -diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S -index 05a95e7..326f2fa 100644 ---- a/arch/x86/lib/iomap_copy_64.S -+++ b/arch/x86/lib/iomap_copy_64.S -@@ -17,6 +17,7 @@ - - #include <linux/linkage.h> - #include <asm/dwarf2.h> -+#include <asm/alternative-asm.h> - - /* - * override generic version in lib/iomap_copy.c -@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy) - CFI_STARTPROC - movl %edx,%ecx - rep movsd -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(__iowrite32_copy) -diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S -index efbf2a0..8893637 100644 ---- a/arch/x86/lib/memcpy_64.S -+++ b/arch/x86/lib/memcpy_64.S -@@ -34,6 +34,7 @@ - rep movsq - movl %edx, %ecx - rep movsb -+ pax_force_retaddr - ret - .Lmemcpy_e: - .previous -@@ -51,6 +52,7 @@ - - movl %edx, %ecx - rep movsb -+ pax_force_retaddr - ret - .Lmemcpy_e_e: - .previous -@@ -81,13 +83,13 @@ ENTRY(memcpy) - */ - movq 0*8(%rsi), %r8 - movq 1*8(%rsi), %r9 -- movq 2*8(%rsi), %r10 -+ movq 2*8(%rsi), %rcx - movq 3*8(%rsi), %r11 - leaq 4*8(%rsi), %rsi - - movq %r8, 0*8(%rdi) - movq %r9, 1*8(%rdi) -- movq %r10, 2*8(%rdi) -+ movq %rcx, 2*8(%rdi) - movq %r11, 3*8(%rdi) - leaq 4*8(%rdi), %rdi - jae .Lcopy_forward_loop -@@ -110,12 +112,12 @@ ENTRY(memcpy) - subq $0x20, %rdx - movq -1*8(%rsi), %r8 - movq -2*8(%rsi), %r9 -- movq -3*8(%rsi), %r10 -+ movq -3*8(%rsi), %rcx - movq -4*8(%rsi), %r11 - leaq -4*8(%rsi), %rsi - movq %r8, -1*8(%rdi) - movq %r9, -2*8(%rdi) -- movq %r10, -3*8(%rdi) -+ movq %rcx, -3*8(%rdi) - movq %r11, -4*8(%rdi) - leaq -4*8(%rdi), %rdi - jae .Lcopy_backward_loop -@@ -135,12 +137,13 @@ ENTRY(memcpy) - */ - movq 0*8(%rsi), %r8 - movq 1*8(%rsi), %r9 -- movq -2*8(%rsi, %rdx), %r10 -+ movq -2*8(%rsi, %rdx), %rcx - movq -1*8(%rsi, %rdx), %r11 - movq %r8, 0*8(%rdi) - movq %r9, 1*8(%rdi) -- movq %r10, -2*8(%rdi, %rdx) -+ movq %rcx, -2*8(%rdi, %rdx) - movq %r11, -1*8(%rdi, %rdx) -+ pax_force_retaddr - retq - .p2align 4 - .Lless_16bytes: -@@ -153,6 +156,7 @@ ENTRY(memcpy) - movq -1*8(%rsi, %rdx), %r9 - movq %r8, 0*8(%rdi) - movq %r9, -1*8(%rdi, %rdx) -+ pax_force_retaddr - retq - .p2align 4 - .Lless_8bytes: -@@ -166,6 +170,7 @@ ENTRY(memcpy) - movl -4(%rsi, %rdx), %r8d - movl %ecx, (%rdi) - movl %r8d, -4(%rdi, %rdx) -+ pax_force_retaddr - retq - .p2align 4 - .Lless_3bytes: -@@ -183,6 +188,7 @@ ENTRY(memcpy) - jnz .Lloop_1 - - .Lend: -+ pax_force_retaddr - retq - CFI_ENDPROC - ENDPROC(memcpy) -diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S -index ee16461..c39c199 100644 ---- a/arch/x86/lib/memmove_64.S -+++ b/arch/x86/lib/memmove_64.S -@@ -61,13 +61,13 @@ ENTRY(memmove) - 5: - sub $0x20, %rdx - movq 0*8(%rsi), %r11 -- movq 1*8(%rsi), %r10 -+ movq 1*8(%rsi), %rcx - movq 2*8(%rsi), %r9 - movq 3*8(%rsi), %r8 - leaq 4*8(%rsi), %rsi - - movq %r11, 0*8(%rdi) -- movq %r10, 1*8(%rdi) -+ movq %rcx, 1*8(%rdi) - movq %r9, 2*8(%rdi) - movq %r8, 3*8(%rdi) - leaq 4*8(%rdi), %rdi -@@ -81,10 +81,10 @@ ENTRY(memmove) - 4: - movq %rdx, %rcx - movq -8(%rsi, %rdx), %r11 -- lea -8(%rdi, %rdx), %r10 -+ lea -8(%rdi, %rdx), %r9 - shrq $3, %rcx - rep movsq -- movq %r11, (%r10) -+ movq %r11, (%r9) - jmp 13f - .Lmemmove_end_forward: - -@@ -95,14 +95,14 @@ ENTRY(memmove) - 7: - movq %rdx, %rcx - movq (%rsi), %r11 -- movq %rdi, %r10 -+ movq %rdi, %r9 - leaq -8(%rsi, %rdx), %rsi - leaq -8(%rdi, %rdx), %rdi - shrq $3, %rcx - std - rep movsq - cld -- movq %r11, (%r10) -+ movq %r11, (%r9) - jmp 13f - - /* -@@ -127,13 +127,13 @@ ENTRY(memmove) - 8: - subq $0x20, %rdx - movq -1*8(%rsi), %r11 -- movq -2*8(%rsi), %r10 -+ movq -2*8(%rsi), %rcx - movq -3*8(%rsi), %r9 - movq -4*8(%rsi), %r8 - leaq -4*8(%rsi), %rsi - - movq %r11, -1*8(%rdi) -- movq %r10, -2*8(%rdi) -+ movq %rcx, -2*8(%rdi) - movq %r9, -3*8(%rdi) - movq %r8, -4*8(%rdi) - leaq -4*8(%rdi), %rdi -@@ -151,11 +151,11 @@ ENTRY(memmove) - * Move data from 16 bytes to 31 bytes. - */ - movq 0*8(%rsi), %r11 -- movq 1*8(%rsi), %r10 -+ movq 1*8(%rsi), %rcx - movq -2*8(%rsi, %rdx), %r9 - movq -1*8(%rsi, %rdx), %r8 - movq %r11, 0*8(%rdi) -- movq %r10, 1*8(%rdi) -+ movq %rcx, 1*8(%rdi) - movq %r9, -2*8(%rdi, %rdx) - movq %r8, -1*8(%rdi, %rdx) - jmp 13f -@@ -167,9 +167,9 @@ ENTRY(memmove) - * Move data from 8 bytes to 15 bytes. - */ - movq 0*8(%rsi), %r11 -- movq -1*8(%rsi, %rdx), %r10 -+ movq -1*8(%rsi, %rdx), %r9 - movq %r11, 0*8(%rdi) -- movq %r10, -1*8(%rdi, %rdx) -+ movq %r9, -1*8(%rdi, %rdx) - jmp 13f - 10: - cmpq $4, %rdx -@@ -178,9 +178,9 @@ ENTRY(memmove) - * Move data from 4 bytes to 7 bytes. - */ - movl (%rsi), %r11d -- movl -4(%rsi, %rdx), %r10d -+ movl -4(%rsi, %rdx), %r9d - movl %r11d, (%rdi) -- movl %r10d, -4(%rdi, %rdx) -+ movl %r9d, -4(%rdi, %rdx) - jmp 13f - 11: - cmp $2, %rdx -@@ -189,9 +189,9 @@ ENTRY(memmove) - * Move data from 2 bytes to 3 bytes. - */ - movw (%rsi), %r11w -- movw -2(%rsi, %rdx), %r10w -+ movw -2(%rsi, %rdx), %r9w - movw %r11w, (%rdi) -- movw %r10w, -2(%rdi, %rdx) -+ movw %r9w, -2(%rdi, %rdx) - jmp 13f - 12: - cmp $1, %rdx -@@ -202,6 +202,7 @@ ENTRY(memmove) - movb (%rsi), %r11b - movb %r11b, (%rdi) - 13: -+ pax_force_retaddr - retq - CFI_ENDPROC - -@@ -210,6 +211,7 @@ ENTRY(memmove) - /* Forward moving data. */ - movq %rdx, %rcx - rep movsb -+ pax_force_retaddr - retq - .Lmemmove_end_forward_efs: - .previous -diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S -index 79bd454..dff325a 100644 ---- a/arch/x86/lib/memset_64.S -+++ b/arch/x86/lib/memset_64.S -@@ -31,6 +31,7 @@ - movl %r8d,%ecx - rep stosb - movq %r9,%rax -+ pax_force_retaddr - ret - .Lmemset_e: - .previous -@@ -53,6 +54,7 @@ - movl %edx,%ecx - rep stosb - movq %r9,%rax -+ pax_force_retaddr - ret - .Lmemset_e_e: - .previous -@@ -60,13 +62,13 @@ - ENTRY(memset) - ENTRY(__memset) - CFI_STARTPROC -- movq %rdi,%r10 - movq %rdx,%r11 - - /* expand byte value */ - movzbl %sil,%ecx - movabs $0x0101010101010101,%rax - mul %rcx /* with rax, clobbers rdx */ -+ movq %rdi,%rdx - - /* align dst */ - movl %edi,%r9d -@@ -120,7 +122,8 @@ ENTRY(__memset) - jnz .Lloop_1 - - .Lende: -- movq %r10,%rax -+ movq %rdx,%rax -+ pax_force_retaddr - ret - - CFI_RESTORE_STATE -diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c -index c9f2d9b..e7fd2c0 100644 ---- a/arch/x86/lib/mmx_32.c -+++ b/arch/x86/lib/mmx_32.c -@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) - { - void *p; - int i; -+ unsigned long cr0; - - if (unlikely(in_interrupt())) - return __memcpy(to, from, len); -@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) - kernel_fpu_begin(); - - __asm__ __volatile__ ( -- "1: prefetch (%0)\n" /* This set is 28 bytes */ -- " prefetch 64(%0)\n" -- " prefetch 128(%0)\n" -- " prefetch 192(%0)\n" -- " prefetch 256(%0)\n" -+ "1: prefetch (%1)\n" /* This set is 28 bytes */ -+ " prefetch 64(%1)\n" -+ " prefetch 128(%1)\n" -+ " prefetch 192(%1)\n" -+ " prefetch 256(%1)\n" - "2: \n" - ".section .fixup, "ax"\n" -- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ -+ "3: \n" -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %%cr0, %0\n" -+ " movl %0, %%eax\n" -+ " andl $0xFFFEFFFF, %%eax\n" -+ " movl %%eax, %%cr0\n" -+#endif -+ -+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %0, %%cr0\n" -+#endif -+ - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b, 3b) -- : : "r" (from)); -+ : "=&r" (cr0) : "r" (from) : "ax"); - - for ( ; i > 5; i--) { - __asm__ __volatile__ ( -- "1: prefetch 320(%0)\n" -- "2: movq (%0), %%mm0\n" -- " movq 8(%0), %%mm1\n" -- " movq 16(%0), %%mm2\n" -- " movq 24(%0), %%mm3\n" -- " movq %%mm0, (%1)\n" -- " movq %%mm1, 8(%1)\n" -- " movq %%mm2, 16(%1)\n" -- " movq %%mm3, 24(%1)\n" -- " movq 32(%0), %%mm0\n" -- " movq 40(%0), %%mm1\n" -- " movq 48(%0), %%mm2\n" -- " movq 56(%0), %%mm3\n" -- " movq %%mm0, 32(%1)\n" -- " movq %%mm1, 40(%1)\n" -- " movq %%mm2, 48(%1)\n" -- " movq %%mm3, 56(%1)\n" -+ "1: prefetch 320(%1)\n" -+ "2: movq (%1), %%mm0\n" -+ " movq 8(%1), %%mm1\n" -+ " movq 16(%1), %%mm2\n" -+ " movq 24(%1), %%mm3\n" -+ " movq %%mm0, (%2)\n" -+ " movq %%mm1, 8(%2)\n" -+ " movq %%mm2, 16(%2)\n" -+ " movq %%mm3, 24(%2)\n" -+ " movq 32(%1), %%mm0\n" -+ " movq 40(%1), %%mm1\n" -+ " movq 48(%1), %%mm2\n" -+ " movq 56(%1), %%mm3\n" -+ " movq %%mm0, 32(%2)\n" -+ " movq %%mm1, 40(%2)\n" -+ " movq %%mm2, 48(%2)\n" -+ " movq %%mm3, 56(%2)\n" - ".section .fixup, "ax"\n" -- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ -+ "3:\n" -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %%cr0, %0\n" -+ " movl %0, %%eax\n" -+ " andl $0xFFFEFFFF, %%eax\n" -+ " movl %%eax, %%cr0\n" -+#endif -+ -+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %0, %%cr0\n" -+#endif -+ - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b, 3b) -- : : "r" (from), "r" (to) : "memory"); -+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); - - from += 64; - to += 64; -@@ -158,6 +187,7 @@ static void fast_clear_page(void *page) - static void fast_copy_page(void *to, void *from) - { - int i; -+ unsigned long cr0; - - kernel_fpu_begin(); - -@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from) - * but that is for later. -AV - */ - __asm__ __volatile__( -- "1: prefetch (%0)\n" -- " prefetch 64(%0)\n" -- " prefetch 128(%0)\n" -- " prefetch 192(%0)\n" -- " prefetch 256(%0)\n" -+ "1: prefetch (%1)\n" -+ " prefetch 64(%1)\n" -+ " prefetch 128(%1)\n" -+ " prefetch 192(%1)\n" -+ " prefetch 256(%1)\n" - "2: \n" - ".section .fixup, "ax"\n" -- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ -+ "3: \n" -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %%cr0, %0\n" -+ " movl %0, %%eax\n" -+ " andl $0xFFFEFFFF, %%eax\n" -+ " movl %%eax, %%cr0\n" -+#endif -+ -+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %0, %%cr0\n" -+#endif -+ - " jmp 2b\n" - ".previous\n" -- _ASM_EXTABLE(1b, 3b) : : "r" (from)); -+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); - - for (i = 0; i < (4096-320)/64; i++) { - __asm__ __volatile__ ( -- "1: prefetch 320(%0)\n" -- "2: movq (%0), %%mm0\n" -- " movntq %%mm0, (%1)\n" -- " movq 8(%0), %%mm1\n" -- " movntq %%mm1, 8(%1)\n" -- " movq 16(%0), %%mm2\n" -- " movntq %%mm2, 16(%1)\n" -- " movq 24(%0), %%mm3\n" -- " movntq %%mm3, 24(%1)\n" -- " movq 32(%0), %%mm4\n" -- " movntq %%mm4, 32(%1)\n" -- " movq 40(%0), %%mm5\n" -- " movntq %%mm5, 40(%1)\n" -- " movq 48(%0), %%mm6\n" -- " movntq %%mm6, 48(%1)\n" -- " movq 56(%0), %%mm7\n" -- " movntq %%mm7, 56(%1)\n" -+ "1: prefetch 320(%1)\n" -+ "2: movq (%1), %%mm0\n" -+ " movntq %%mm0, (%2)\n" -+ " movq 8(%1), %%mm1\n" -+ " movntq %%mm1, 8(%2)\n" -+ " movq 16(%1), %%mm2\n" -+ " movntq %%mm2, 16(%2)\n" -+ " movq 24(%1), %%mm3\n" -+ " movntq %%mm3, 24(%2)\n" -+ " movq 32(%1), %%mm4\n" -+ " movntq %%mm4, 32(%2)\n" -+ " movq 40(%1), %%mm5\n" -+ " movntq %%mm5, 40(%2)\n" -+ " movq 48(%1), %%mm6\n" -+ " movntq %%mm6, 48(%2)\n" -+ " movq 56(%1), %%mm7\n" -+ " movntq %%mm7, 56(%2)\n" - ".section .fixup, "ax"\n" -- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ -+ "3:\n" -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %%cr0, %0\n" -+ " movl %0, %%eax\n" -+ " andl $0xFFFEFFFF, %%eax\n" -+ " movl %%eax, %%cr0\n" -+#endif -+ -+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %0, %%cr0\n" -+#endif -+ - " jmp 2b\n" - ".previous\n" -- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); -+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); - - from += 64; - to += 64; -@@ -280,47 +338,76 @@ static void fast_clear_page(void *page) - static void fast_copy_page(void *to, void *from) - { - int i; -+ unsigned long cr0; - - kernel_fpu_begin(); - - __asm__ __volatile__ ( -- "1: prefetch (%0)\n" -- " prefetch 64(%0)\n" -- " prefetch 128(%0)\n" -- " prefetch 192(%0)\n" -- " prefetch 256(%0)\n" -+ "1: prefetch (%1)\n" -+ " prefetch 64(%1)\n" -+ " prefetch 128(%1)\n" -+ " prefetch 192(%1)\n" -+ " prefetch 256(%1)\n" - "2: \n" - ".section .fixup, "ax"\n" -- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ -+ "3: \n" -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %%cr0, %0\n" -+ " movl %0, %%eax\n" -+ " andl $0xFFFEFFFF, %%eax\n" -+ " movl %%eax, %%cr0\n" -+#endif -+ -+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %0, %%cr0\n" -+#endif -+ - " jmp 2b\n" - ".previous\n" -- _ASM_EXTABLE(1b, 3b) : : "r" (from)); -+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); - - for (i = 0; i < 4096/64; i++) { - __asm__ __volatile__ ( -- "1: prefetch 320(%0)\n" -- "2: movq (%0), %%mm0\n" -- " movq 8(%0), %%mm1\n" -- " movq 16(%0), %%mm2\n" -- " movq 24(%0), %%mm3\n" -- " movq %%mm0, (%1)\n" -- " movq %%mm1, 8(%1)\n" -- " movq %%mm2, 16(%1)\n" -- " movq %%mm3, 24(%1)\n" -- " movq 32(%0), %%mm0\n" -- " movq 40(%0), %%mm1\n" -- " movq 48(%0), %%mm2\n" -- " movq 56(%0), %%mm3\n" -- " movq %%mm0, 32(%1)\n" -- " movq %%mm1, 40(%1)\n" -- " movq %%mm2, 48(%1)\n" -- " movq %%mm3, 56(%1)\n" -+ "1: prefetch 320(%1)\n" -+ "2: movq (%1), %%mm0\n" -+ " movq 8(%1), %%mm1\n" -+ " movq 16(%1), %%mm2\n" -+ " movq 24(%1), %%mm3\n" -+ " movq %%mm0, (%2)\n" -+ " movq %%mm1, 8(%2)\n" -+ " movq %%mm2, 16(%2)\n" -+ " movq %%mm3, 24(%2)\n" -+ " movq 32(%1), %%mm0\n" -+ " movq 40(%1), %%mm1\n" -+ " movq 48(%1), %%mm2\n" -+ " movq 56(%1), %%mm3\n" -+ " movq %%mm0, 32(%2)\n" -+ " movq %%mm1, 40(%2)\n" -+ " movq %%mm2, 48(%2)\n" -+ " movq %%mm3, 56(%2)\n" - ".section .fixup, "ax"\n" -- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ -+ "3:\n" -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %%cr0, %0\n" -+ " movl %0, %%eax\n" -+ " andl $0xFFFEFFFF, %%eax\n" -+ " movl %%eax, %%cr0\n" -+#endif -+ -+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ " movl %0, %%cr0\n" -+#endif -+ - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b, 3b) -- : : "r" (from), "r" (to) : "memory"); -+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); - - from += 64; - to += 64; -diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S -index 69fa106..adda88b 100644 ---- a/arch/x86/lib/msr-reg.S -+++ b/arch/x86/lib/msr-reg.S -@@ -3,6 +3,7 @@ - #include <asm/dwarf2.h> - #include <asm/asm.h> - #include <asm/msr.h> -+#include <asm/alternative-asm.h> - - #ifdef CONFIG_X86_64 - /* -@@ -16,7 +17,7 @@ ENTRY(native_\op()_safe_regs) - CFI_STARTPROC - pushq_cfi %rbx - pushq_cfi %rbp -- movq %rdi, %r10 /* Save pointer */ -+ movq %rdi, %r9 /* Save pointer */ - xorl %r11d, %r11d /* Return value */ - movl (%rdi), %eax - movl 4(%rdi), %ecx -@@ -27,16 +28,17 @@ ENTRY(native_\op()_safe_regs) - movl 28(%rdi), %edi - CFI_REMEMBER_STATE - 1: \op --2: movl %eax, (%r10) -+2: movl %eax, (%r9) - movl %r11d, %eax /* Return value */ -- movl %ecx, 4(%r10) -- movl %edx, 8(%r10) -- movl %ebx, 12(%r10) -- movl %ebp, 20(%r10) -- movl %esi, 24(%r10) -- movl %edi, 28(%r10) -+ movl %ecx, 4(%r9) -+ movl %edx, 8(%r9) -+ movl %ebx, 12(%r9) -+ movl %ebp, 20(%r9) -+ movl %esi, 24(%r9) -+ movl %edi, 28(%r9) - popq_cfi %rbp - popq_cfi %rbx -+ pax_force_retaddr - ret - 3: - CFI_RESTORE_STATE -diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S -index 36b0d15..d381858 100644 ---- a/arch/x86/lib/putuser.S -+++ b/arch/x86/lib/putuser.S -@@ -15,7 +15,9 @@ - #include <asm/thread_info.h> - #include <asm/errno.h> - #include <asm/asm.h> -- -+#include <asm/segment.h> -+#include <asm/pgtable.h> -+#include <asm/alternative-asm.h> - - /* - * __put_user_X -@@ -29,52 +31,119 @@ - * as they get called from within inline assembly. - */ - --#define ENTER CFI_STARTPROC ; \ -- GET_THREAD_INFO(%_ASM_BX) --#define EXIT ret ; \ -+#define ENTER CFI_STARTPROC -+#define EXIT pax_force_retaddr; ret ; \ - CFI_ENDPROC - -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+#define _DEST %_ASM_CX,%_ASM_BX -+#else -+#define _DEST %_ASM_CX -+#endif -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) -+#define __copyuser_seg gs; -+#else -+#define __copyuser_seg -+#endif -+ - .text - ENTRY(__put_user_1) - ENTER -+ -+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) -+ GET_THREAD_INFO(%_ASM_BX) - cmp TI_addr_limit(%_ASM_BX),%_ASM_CX - jae bad_put_user --1: movb %al,(%_ASM_CX) -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX -+ cmp %_ASM_BX,%_ASM_CX -+ jb 1234f -+ xor %ebx,%ebx -+1234: -+#endif -+ -+#endif -+ -+1: __copyuser_seg movb %al,(_DEST) - xor %eax,%eax - EXIT - ENDPROC(__put_user_1) - - ENTRY(__put_user_2) - ENTER -+ -+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) -+ GET_THREAD_INFO(%_ASM_BX) - mov TI_addr_limit(%_ASM_BX),%_ASM_BX - sub $1,%_ASM_BX - cmp %_ASM_BX,%_ASM_CX - jae bad_put_user --2: movw %ax,(%_ASM_CX) -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX -+ cmp %_ASM_BX,%_ASM_CX -+ jb 1234f -+ xor %ebx,%ebx -+1234: -+#endif -+ -+#endif -+ -+2: __copyuser_seg movw %ax,(_DEST) - xor %eax,%eax - EXIT - ENDPROC(__put_user_2) - - ENTRY(__put_user_4) - ENTER -+ -+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) -+ GET_THREAD_INFO(%_ASM_BX) - mov TI_addr_limit(%_ASM_BX),%_ASM_BX - sub $3,%_ASM_BX - cmp %_ASM_BX,%_ASM_CX - jae bad_put_user --3: movl %eax,(%_ASM_CX) -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX -+ cmp %_ASM_BX,%_ASM_CX -+ jb 1234f -+ xor %ebx,%ebx -+1234: -+#endif -+ -+#endif -+ -+3: __copyuser_seg movl %eax,(_DEST) - xor %eax,%eax - EXIT - ENDPROC(__put_user_4) - - ENTRY(__put_user_8) - ENTER -+ -+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) -+ GET_THREAD_INFO(%_ASM_BX) - mov TI_addr_limit(%_ASM_BX),%_ASM_BX - sub $7,%_ASM_BX - cmp %_ASM_BX,%_ASM_CX - jae bad_put_user --4: mov %_ASM_AX,(%_ASM_CX) -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX -+ cmp %_ASM_BX,%_ASM_CX -+ jb 1234f -+ xor %ebx,%ebx -+1234: -+#endif -+ -+#endif -+ -+4: __copyuser_seg mov %_ASM_AX,(_DEST) - #ifdef CONFIG_X86_32 --5: movl %edx,4(%_ASM_CX) -+5: __copyuser_seg movl %edx,4(_DEST) - #endif - xor %eax,%eax - EXIT -diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S -index 1cad221..de671ee 100644 ---- a/arch/x86/lib/rwlock.S -+++ b/arch/x86/lib/rwlock.S -@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed) - FRAME - 0: LOCK_PREFIX - WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ jno 1234f -+ LOCK_PREFIX -+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr) -+ int $4 -+1234: -+ _ASM_EXTABLE(1234b, 1234b) -+#endif -+ - 1: rep; nop - cmpl $WRITE_LOCK_CMP, (%__lock_ptr) - jne 1b - LOCK_PREFIX - WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ jno 1234f -+ LOCK_PREFIX -+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr) -+ int $4 -+1234: -+ _ASM_EXTABLE(1234b, 1234b) -+#endif -+ - jnz 0b - ENDFRAME -+ pax_force_retaddr - ret - CFI_ENDPROC - END(__write_lock_failed) -@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed) - FRAME - 0: LOCK_PREFIX - READ_LOCK_SIZE(inc) (%__lock_ptr) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ jno 1234f -+ LOCK_PREFIX -+ READ_LOCK_SIZE(dec) (%__lock_ptr) -+ int $4 -+1234: -+ _ASM_EXTABLE(1234b, 1234b) -+#endif -+ - 1: rep; nop - READ_LOCK_SIZE(cmp) $1, (%__lock_ptr) - js 1b - LOCK_PREFIX - READ_LOCK_SIZE(dec) (%__lock_ptr) -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ jno 1234f -+ LOCK_PREFIX -+ READ_LOCK_SIZE(inc) (%__lock_ptr) -+ int $4 -+1234: -+ _ASM_EXTABLE(1234b, 1234b) -+#endif -+ - js 0b - ENDFRAME -+ pax_force_retaddr - ret - CFI_ENDPROC - END(__read_lock_failed) -diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S -index 5dff5f0..cadebf4 100644 ---- a/arch/x86/lib/rwsem.S -+++ b/arch/x86/lib/rwsem.S -@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed) - __ASM_SIZE(pop,_cfi) %__ASM_REG(dx) - CFI_RESTORE __ASM_REG(dx) - restore_common_regs -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(call_rwsem_down_read_failed) -@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed) - movq %rax,%rdi - call rwsem_down_write_failed - restore_common_regs -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(call_rwsem_down_write_failed) -@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake) - movq %rax,%rdi - call rwsem_wake - restore_common_regs --1: ret -+1: pax_force_retaddr -+ ret - CFI_ENDPROC - ENDPROC(call_rwsem_wake) - -@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake) - __ASM_SIZE(pop,_cfi) %__ASM_REG(dx) - CFI_RESTORE __ASM_REG(dx) - restore_common_regs -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(call_rwsem_downgrade_wake) -diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S -index a63efd6..ccecad8 100644 ---- a/arch/x86/lib/thunk_64.S -+++ b/arch/x86/lib/thunk_64.S -@@ -8,6 +8,7 @@ - #include <linux/linkage.h> - #include <asm/dwarf2.h> - #include <asm/calling.h> -+#include <asm/alternative-asm.h> - - /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ - .macro THUNK name, func, put_ret_addr_in_rdi=0 -@@ -41,5 +42,6 @@ - SAVE_ARGS - restore: - RESTORE_ARGS -+ pax_force_retaddr - ret - CFI_ENDPROC -diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c -index e218d5d..35679b4 100644 ---- a/arch/x86/lib/usercopy_32.c -+++ b/arch/x86/lib/usercopy_32.c -@@ -43,7 +43,7 @@ do { \ - __asm__ __volatile__( \ - " testl %1,%1\n" \ - " jz 2f\n" \ -- "0: lodsb\n" \ -+ "0: "__copyuser_seg"lodsb\n" \ - " stosb\n" \ - " testb %%al,%%al\n" \ - " jz 1f\n" \ -@@ -128,10 +128,12 @@ do { \ - int __d0; \ - might_fault(); \ - __asm__ __volatile__( \ -+ __COPYUSER_SET_ES \ - "0: rep; stosl\n" \ - " movl %2,%0\n" \ - "1: rep; stosb\n" \ - "2:\n" \ -+ __COPYUSER_RESTORE_ES \ - ".section .fixup,"ax"\n" \ - "3: lea 0(%2,%0,4),%0\n" \ - " jmp 2b\n" \ -@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n) - might_fault(); - - __asm__ __volatile__( -+ __COPYUSER_SET_ES - " testl %0, %0\n" - " jz 3f\n" - " andl %0,%%ecx\n" -@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n) - " subl %%ecx,%0\n" - " addl %0,%%eax\n" - "1:\n" -+ __COPYUSER_RESTORE_ES - ".section .fixup,"ax"\n" - "2: xorl %%eax,%%eax\n" - " jmp 1b\n" -@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user); - - #ifdef CONFIG_X86_INTEL_USERCOPY - static unsigned long --__copy_user_intel(void __user *to, const void *from, unsigned long size) -+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size) - { - int d0, d1; - __asm__ __volatile__( -@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) - " .align 2,0x90\n" - "3: movl 0(%4), %%eax\n" - "4: movl 4(%4), %%edx\n" -- "5: movl %%eax, 0(%3)\n" -- "6: movl %%edx, 4(%3)\n" -+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n" -+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n" - "7: movl 8(%4), %%eax\n" - "8: movl 12(%4),%%edx\n" -- "9: movl %%eax, 8(%3)\n" -- "10: movl %%edx, 12(%3)\n" -+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n" -+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n" - "11: movl 16(%4), %%eax\n" - "12: movl 20(%4), %%edx\n" -- "13: movl %%eax, 16(%3)\n" -- "14: movl %%edx, 20(%3)\n" -+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n" -+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n" - "15: movl 24(%4), %%eax\n" - "16: movl 28(%4), %%edx\n" -- "17: movl %%eax, 24(%3)\n" -- "18: movl %%edx, 28(%3)\n" -+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n" -+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n" - "19: movl 32(%4), %%eax\n" - "20: movl 36(%4), %%edx\n" -- "21: movl %%eax, 32(%3)\n" -- "22: movl %%edx, 36(%3)\n" -+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n" -+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n" - "23: movl 40(%4), %%eax\n" - "24: movl 44(%4), %%edx\n" -- "25: movl %%eax, 40(%3)\n" -- "26: movl %%edx, 44(%3)\n" -+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n" -+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n" - "27: movl 48(%4), %%eax\n" - "28: movl 52(%4), %%edx\n" -- "29: movl %%eax, 48(%3)\n" -- "30: movl %%edx, 52(%3)\n" -+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n" -+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n" - "31: movl 56(%4), %%eax\n" - "32: movl 60(%4), %%edx\n" -- "33: movl %%eax, 56(%3)\n" -- "34: movl %%edx, 60(%3)\n" -+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n" -+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n" - " addl $-64, %0\n" - " addl $64, %4\n" - " addl $64, %3\n" -@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) - " shrl $2, %0\n" - " andl $3, %%eax\n" - " cld\n" -+ __COPYUSER_SET_ES - "99: rep; movsl\n" - "36: movl %%eax, %0\n" - "37: rep; movsb\n" - "100:\n" -+ __COPYUSER_RESTORE_ES -+ ".section .fixup,"ax"\n" -+ "101: lea 0(%%eax,%0,4),%0\n" -+ " jmp 100b\n" -+ ".previous\n" -+ ".section __ex_table,"a"\n" -+ " .align 4\n" -+ " .long 1b,100b\n" -+ " .long 2b,100b\n" -+ " .long 3b,100b\n" -+ " .long 4b,100b\n" -+ " .long 5b,100b\n" -+ " .long 6b,100b\n" -+ " .long 7b,100b\n" -+ " .long 8b,100b\n" -+ " .long 9b,100b\n" -+ " .long 10b,100b\n" -+ " .long 11b,100b\n" -+ " .long 12b,100b\n" -+ " .long 13b,100b\n" -+ " .long 14b,100b\n" -+ " .long 15b,100b\n" -+ " .long 16b,100b\n" -+ " .long 17b,100b\n" -+ " .long 18b,100b\n" -+ " .long 19b,100b\n" -+ " .long 20b,100b\n" -+ " .long 21b,100b\n" -+ " .long 22b,100b\n" -+ " .long 23b,100b\n" -+ " .long 24b,100b\n" -+ " .long 25b,100b\n" -+ " .long 26b,100b\n" -+ " .long 27b,100b\n" -+ " .long 28b,100b\n" -+ " .long 29b,100b\n" -+ " .long 30b,100b\n" -+ " .long 31b,100b\n" -+ " .long 32b,100b\n" -+ " .long 33b,100b\n" -+ " .long 34b,100b\n" -+ " .long 35b,100b\n" -+ " .long 36b,100b\n" -+ " .long 37b,100b\n" -+ " .long 99b,101b\n" -+ ".previous" -+ : "=&c"(size), "=&D" (d0), "=&S" (d1) -+ : "1"(to), "2"(from), "0"(size) -+ : "eax", "edx", "memory"); -+ return size; -+} -+ -+static unsigned long -+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size) -+{ -+ int d0, d1; -+ __asm__ __volatile__( -+ " .align 2,0x90\n" -+ "1: "__copyuser_seg" movl 32(%4), %%eax\n" -+ " cmpl $67, %0\n" -+ " jbe 3f\n" -+ "2: "__copyuser_seg" movl 64(%4), %%eax\n" -+ " .align 2,0x90\n" -+ "3: "__copyuser_seg" movl 0(%4), %%eax\n" -+ "4: "__copyuser_seg" movl 4(%4), %%edx\n" -+ "5: movl %%eax, 0(%3)\n" -+ "6: movl %%edx, 4(%3)\n" -+ "7: "__copyuser_seg" movl 8(%4), %%eax\n" -+ "8: "__copyuser_seg" movl 12(%4),%%edx\n" -+ "9: movl %%eax, 8(%3)\n" -+ "10: movl %%edx, 12(%3)\n" -+ "11: "__copyuser_seg" movl 16(%4), %%eax\n" -+ "12: "__copyuser_seg" movl 20(%4), %%edx\n" -+ "13: movl %%eax, 16(%3)\n" -+ "14: movl %%edx, 20(%3)\n" -+ "15: "__copyuser_seg" movl 24(%4), %%eax\n" -+ "16: "__copyuser_seg" movl 28(%4), %%edx\n" -+ "17: movl %%eax, 24(%3)\n" -+ "18: movl %%edx, 28(%3)\n" -+ "19: "__copyuser_seg" movl 32(%4), %%eax\n" -+ "20: "__copyuser_seg" movl 36(%4), %%edx\n" -+ "21: movl %%eax, 32(%3)\n" -+ "22: movl %%edx, 36(%3)\n" -+ "23: "__copyuser_seg" movl 40(%4), %%eax\n" -+ "24: "__copyuser_seg" movl 44(%4), %%edx\n" -+ "25: movl %%eax, 40(%3)\n" -+ "26: movl %%edx, 44(%3)\n" -+ "27: "__copyuser_seg" movl 48(%4), %%eax\n" -+ "28: "__copyuser_seg" movl 52(%4), %%edx\n" -+ "29: movl %%eax, 48(%3)\n" -+ "30: movl %%edx, 52(%3)\n" -+ "31: "__copyuser_seg" movl 56(%4), %%eax\n" -+ "32: "__copyuser_seg" movl 60(%4), %%edx\n" -+ "33: movl %%eax, 56(%3)\n" -+ "34: movl %%edx, 60(%3)\n" -+ " addl $-64, %0\n" -+ " addl $64, %4\n" -+ " addl $64, %3\n" -+ " cmpl $63, %0\n" -+ " ja 1b\n" -+ "35: movl %0, %%eax\n" -+ " shrl $2, %0\n" -+ " andl $3, %%eax\n" -+ " cld\n" -+ "99: rep; "__copyuser_seg" movsl\n" -+ "36: movl %%eax, %0\n" -+ "37: rep; "__copyuser_seg" movsb\n" -+ "100:\n" - ".section .fixup,"ax"\n" - "101: lea 0(%%eax,%0,4),%0\n" - " jmp 100b\n" -@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) - int d0, d1; - __asm__ __volatile__( - " .align 2,0x90\n" -- "0: movl 32(%4), %%eax\n" -+ "0: "__copyuser_seg" movl 32(%4), %%eax\n" - " cmpl $67, %0\n" - " jbe 2f\n" -- "1: movl 64(%4), %%eax\n" -+ "1: "__copyuser_seg" movl 64(%4), %%eax\n" - " .align 2,0x90\n" -- "2: movl 0(%4), %%eax\n" -- "21: movl 4(%4), %%edx\n" -+ "2: "__copyuser_seg" movl 0(%4), %%eax\n" -+ "21: "__copyuser_seg" movl 4(%4), %%edx\n" - " movl %%eax, 0(%3)\n" - " movl %%edx, 4(%3)\n" -- "3: movl 8(%4), %%eax\n" -- "31: movl 12(%4),%%edx\n" -+ "3: "__copyuser_seg" movl 8(%4), %%eax\n" -+ "31: "__copyuser_seg" movl 12(%4),%%edx\n" - " movl %%eax, 8(%3)\n" - " movl %%edx, 12(%3)\n" -- "4: movl 16(%4), %%eax\n" -- "41: movl 20(%4), %%edx\n" -+ "4: "__copyuser_seg" movl 16(%4), %%eax\n" -+ "41: "__copyuser_seg" movl 20(%4), %%edx\n" - " movl %%eax, 16(%3)\n" - " movl %%edx, 20(%3)\n" -- "10: movl 24(%4), %%eax\n" -- "51: movl 28(%4), %%edx\n" -+ "10: "__copyuser_seg" movl 24(%4), %%eax\n" -+ "51: "__copyuser_seg" movl 28(%4), %%edx\n" - " movl %%eax, 24(%3)\n" - " movl %%edx, 28(%3)\n" -- "11: movl 32(%4), %%eax\n" -- "61: movl 36(%4), %%edx\n" -+ "11: "__copyuser_seg" movl 32(%4), %%eax\n" -+ "61: "__copyuser_seg" movl 36(%4), %%edx\n" - " movl %%eax, 32(%3)\n" - " movl %%edx, 36(%3)\n" -- "12: movl 40(%4), %%eax\n" -- "71: movl 44(%4), %%edx\n" -+ "12: "__copyuser_seg" movl 40(%4), %%eax\n" -+ "71: "__copyuser_seg" movl 44(%4), %%edx\n" - " movl %%eax, 40(%3)\n" - " movl %%edx, 44(%3)\n" -- "13: movl 48(%4), %%eax\n" -- "81: movl 52(%4), %%edx\n" -+ "13: "__copyuser_seg" movl 48(%4), %%eax\n" -+ "81: "__copyuser_seg" movl 52(%4), %%edx\n" - " movl %%eax, 48(%3)\n" - " movl %%edx, 52(%3)\n" -- "14: movl 56(%4), %%eax\n" -- "91: movl 60(%4), %%edx\n" -+ "14: "__copyuser_seg" movl 56(%4), %%eax\n" -+ "91: "__copyuser_seg" movl 60(%4), %%edx\n" - " movl %%eax, 56(%3)\n" - " movl %%edx, 60(%3)\n" - " addl $-64, %0\n" -@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) - " shrl $2, %0\n" - " andl $3, %%eax\n" - " cld\n" -- "6: rep; movsl\n" -+ "6: rep; "__copyuser_seg" movsl\n" - " movl %%eax,%0\n" -- "7: rep; movsb\n" -+ "7: rep; "__copyuser_seg" movsb\n" - "8:\n" - ".section .fixup,"ax"\n" - "9: lea 0(%%eax,%0,4),%0\n" -@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, - - __asm__ __volatile__( - " .align 2,0x90\n" -- "0: movl 32(%4), %%eax\n" -+ "0: "__copyuser_seg" movl 32(%4), %%eax\n" - " cmpl $67, %0\n" - " jbe 2f\n" -- "1: movl 64(%4), %%eax\n" -+ "1: "__copyuser_seg" movl 64(%4), %%eax\n" - " .align 2,0x90\n" -- "2: movl 0(%4), %%eax\n" -- "21: movl 4(%4), %%edx\n" -+ "2: "__copyuser_seg" movl 0(%4), %%eax\n" -+ "21: "__copyuser_seg" movl 4(%4), %%edx\n" - " movnti %%eax, 0(%3)\n" - " movnti %%edx, 4(%3)\n" -- "3: movl 8(%4), %%eax\n" -- "31: movl 12(%4),%%edx\n" -+ "3: "__copyuser_seg" movl 8(%4), %%eax\n" -+ "31: "__copyuser_seg" movl 12(%4),%%edx\n" - " movnti %%eax, 8(%3)\n" - " movnti %%edx, 12(%3)\n" -- "4: movl 16(%4), %%eax\n" -- "41: movl 20(%4), %%edx\n" -+ "4: "__copyuser_seg" movl 16(%4), %%eax\n" -+ "41: "__copyuser_seg" movl 20(%4), %%edx\n" - " movnti %%eax, 16(%3)\n" - " movnti %%edx, 20(%3)\n" -- "10: movl 24(%4), %%eax\n" -- "51: movl 28(%4), %%edx\n" -+ "10: "__copyuser_seg" movl 24(%4), %%eax\n" -+ "51: "__copyuser_seg" movl 28(%4), %%edx\n" - " movnti %%eax, 24(%3)\n" - " movnti %%edx, 28(%3)\n" -- "11: movl 32(%4), %%eax\n" -- "61: movl 36(%4), %%edx\n" -+ "11: "__copyuser_seg" movl 32(%4), %%eax\n" -+ "61: "__copyuser_seg" movl 36(%4), %%edx\n" - " movnti %%eax, 32(%3)\n" - " movnti %%edx, 36(%3)\n" -- "12: movl 40(%4), %%eax\n" -- "71: movl 44(%4), %%edx\n" -+ "12: "__copyuser_seg" movl 40(%4), %%eax\n" -+ "71: "__copyuser_seg" movl 44(%4), %%edx\n" - " movnti %%eax, 40(%3)\n" - " movnti %%edx, 44(%3)\n" -- "13: movl 48(%4), %%eax\n" -- "81: movl 52(%4), %%edx\n" -+ "13: "__copyuser_seg" movl 48(%4), %%eax\n" -+ "81: "__copyuser_seg" movl 52(%4), %%edx\n" - " movnti %%eax, 48(%3)\n" - " movnti %%edx, 52(%3)\n" -- "14: movl 56(%4), %%eax\n" -- "91: movl 60(%4), %%edx\n" -+ "14: "__copyuser_seg" movl 56(%4), %%eax\n" -+ "91: "__copyuser_seg" movl 60(%4), %%edx\n" - " movnti %%eax, 56(%3)\n" - " movnti %%edx, 60(%3)\n" - " addl $-64, %0\n" -@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, - " shrl $2, %0\n" - " andl $3, %%eax\n" - " cld\n" -- "6: rep; movsl\n" -+ "6: rep; "__copyuser_seg" movsl\n" - " movl %%eax,%0\n" -- "7: rep; movsb\n" -+ "7: rep; "__copyuser_seg" movsb\n" - "8:\n" - ".section .fixup,"ax"\n" - "9: lea 0(%%eax,%0,4),%0\n" -@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to, - - __asm__ __volatile__( - " .align 2,0x90\n" -- "0: movl 32(%4), %%eax\n" -+ "0: "__copyuser_seg" movl 32(%4), %%eax\n" - " cmpl $67, %0\n" - " jbe 2f\n" -- "1: movl 64(%4), %%eax\n" -+ "1: "__copyuser_seg" movl 64(%4), %%eax\n" - " .align 2,0x90\n" -- "2: movl 0(%4), %%eax\n" -- "21: movl 4(%4), %%edx\n" -+ "2: "__copyuser_seg" movl 0(%4), %%eax\n" -+ "21: "__copyuser_seg" movl 4(%4), %%edx\n" - " movnti %%eax, 0(%3)\n" - " movnti %%edx, 4(%3)\n" -- "3: movl 8(%4), %%eax\n" -- "31: movl 12(%4),%%edx\n" -+ "3: "__copyuser_seg" movl 8(%4), %%eax\n" -+ "31: "__copyuser_seg" movl 12(%4),%%edx\n" - " movnti %%eax, 8(%3)\n" - " movnti %%edx, 12(%3)\n" -- "4: movl 16(%4), %%eax\n" -- "41: movl 20(%4), %%edx\n" -+ "4: "__copyuser_seg" movl 16(%4), %%eax\n" -+ "41: "__copyuser_seg" movl 20(%4), %%edx\n" - " movnti %%eax, 16(%3)\n" - " movnti %%edx, 20(%3)\n" -- "10: movl 24(%4), %%eax\n" -- "51: movl 28(%4), %%edx\n" -+ "10: "__copyuser_seg" movl 24(%4), %%eax\n" -+ "51: "__copyuser_seg" movl 28(%4), %%edx\n" - " movnti %%eax, 24(%3)\n" - " movnti %%edx, 28(%3)\n" -- "11: movl 32(%4), %%eax\n" -- "61: movl 36(%4), %%edx\n" -+ "11: "__copyuser_seg" movl 32(%4), %%eax\n" -+ "61: "__copyuser_seg" movl 36(%4), %%edx\n" - " movnti %%eax, 32(%3)\n" - " movnti %%edx, 36(%3)\n" -- "12: movl 40(%4), %%eax\n" -- "71: movl 44(%4), %%edx\n" -+ "12: "__copyuser_seg" movl 40(%4), %%eax\n" -+ "71: "__copyuser_seg" movl 44(%4), %%edx\n" - " movnti %%eax, 40(%3)\n" - " movnti %%edx, 44(%3)\n" -- "13: movl 48(%4), %%eax\n" -- "81: movl 52(%4), %%edx\n" -+ "13: "__copyuser_seg" movl 48(%4), %%eax\n" -+ "81: "__copyuser_seg" movl 52(%4), %%edx\n" - " movnti %%eax, 48(%3)\n" - " movnti %%edx, 52(%3)\n" -- "14: movl 56(%4), %%eax\n" -- "91: movl 60(%4), %%edx\n" -+ "14: "__copyuser_seg" movl 56(%4), %%eax\n" -+ "91: "__copyuser_seg" movl 60(%4), %%edx\n" - " movnti %%eax, 56(%3)\n" - " movnti %%edx, 60(%3)\n" - " addl $-64, %0\n" -@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to, - " shrl $2, %0\n" - " andl $3, %%eax\n" - " cld\n" -- "6: rep; movsl\n" -+ "6: rep; "__copyuser_seg" movsl\n" - " movl %%eax,%0\n" -- "7: rep; movsb\n" -+ "7: rep; "__copyuser_seg" movsb\n" - "8:\n" - ".section .fixup,"ax"\n" - "9: lea 0(%%eax,%0,4),%0\n" -@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to, - */ - unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, - unsigned long size); --unsigned long __copy_user_intel(void __user *to, const void *from, -+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from, -+ unsigned long size); -+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from, - unsigned long size); - unsigned long __copy_user_zeroing_intel_nocache(void *to, - const void __user *from, unsigned long size); - #endif /* CONFIG_X86_INTEL_USERCOPY */ - - /* Generic arbitrary sized copy. */ --#define __copy_user(to, from, size) \ -+#define __copy_user(to, from, size, prefix, set, restore) \ - do { \ - int __d0, __d1, __d2; \ - __asm__ __volatile__( \ -+ set \ - " cmp $7,%0\n" \ - " jbe 1f\n" \ - " movl %1,%0\n" \ - " negl %0\n" \ - " andl $7,%0\n" \ - " subl %0,%3\n" \ -- "4: rep; movsb\n" \ -+ "4: rep; "prefix"movsb\n" \ - " movl %3,%0\n" \ - " shrl $2,%0\n" \ - " andl $3,%3\n" \ - " .align 2,0x90\n" \ -- "0: rep; movsl\n" \ -+ "0: rep; "prefix"movsl\n" \ - " movl %3,%0\n" \ -- "1: rep; movsb\n" \ -+ "1: rep; "prefix"movsb\n" \ - "2:\n" \ -+ restore \ - ".section .fixup,"ax"\n" \ - "5: addl %3,%0\n" \ - " jmp 2b\n" \ -@@ -682,14 +799,14 @@ do { \ - " negl %0\n" \ - " andl $7,%0\n" \ - " subl %0,%3\n" \ -- "4: rep; movsb\n" \ -+ "4: rep; "__copyuser_seg"movsb\n" \ - " movl %3,%0\n" \ - " shrl $2,%0\n" \ - " andl $3,%3\n" \ - " .align 2,0x90\n" \ -- "0: rep; movsl\n" \ -+ "0: rep; "__copyuser_seg"movsl\n" \ - " movl %3,%0\n" \ -- "1: rep; movsb\n" \ -+ "1: rep; "__copyuser_seg"movsb\n" \ - "2:\n" \ - ".section .fixup,"ax"\n" \ - "5: addl %3,%0\n" \ -@@ -775,9 +892,9 @@ survive: - } - #endif - if (movsl_is_ok(to, from, n)) -- __copy_user(to, from, n); -+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES); - else -- n = __copy_user_intel(to, from, n); -+ n = __generic_copy_to_user_intel(to, from, n); - return n; - } - EXPORT_SYMBOL(__copy_to_user_ll); -@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, - unsigned long n) - { - if (movsl_is_ok(to, from, n)) -- __copy_user(to, from, n); -+ __copy_user(to, from, n, __copyuser_seg, "", ""); - else -- n = __copy_user_intel((void __user *)to, -- (const void *)from, n); -+ n = __generic_copy_from_user_intel(to, from, n); - return n; - } - EXPORT_SYMBOL(__copy_from_user_ll_nozero); -@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr - if (n > 64 && cpu_has_xmm2) - n = __copy_user_intel_nocache(to, from, n); - else -- __copy_user(to, from, n); -+ __copy_user(to, from, n, __copyuser_seg, "", ""); - #else -- __copy_user(to, from, n); -+ __copy_user(to, from, n, __copyuser_seg, "", ""); - #endif - return n; - } - EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); - --/** -- * copy_to_user: - Copy a block of data into user space. -- * @to: Destination address, in user space. -- * @from: Source address, in kernel space. -- * @n: Number of bytes to copy. -- * -- * Context: User context only. This function may sleep. -- * -- * Copy data from kernel space to user space. -- * -- * Returns number of bytes that could not be copied. -- * On success, this will be zero. -- */ --unsigned long --copy_to_user(void __user *to, const void *from, unsigned long n) --{ -- if (access_ok(VERIFY_WRITE, to, n)) -- n = __copy_to_user(to, from, n); -- return n; --} --EXPORT_SYMBOL(copy_to_user); -- --/** -- * copy_from_user: - Copy a block of data from user space. -- * @to: Destination address, in kernel space. -- * @from: Source address, in user space. -- * @n: Number of bytes to copy. -- * -- * Context: User context only. This function may sleep. -- * -- * Copy data from user space to kernel space. -- * -- * Returns number of bytes that could not be copied. -- * On success, this will be zero. -- * -- * If some data could not be copied, this function will pad the copied -- * data to the requested size using zero bytes. -- */ --unsigned long --_copy_from_user(void *to, const void __user *from, unsigned long n) --{ -- if (access_ok(VERIFY_READ, from, n)) -- n = __copy_from_user(to, from, n); -- else -- memset(to, 0, n); -- return n; --} --EXPORT_SYMBOL(_copy_from_user); -- - void copy_from_user_overflow(void) - { - WARN(1, "Buffer overflow detected!\n"); - } - EXPORT_SYMBOL(copy_from_user_overflow); -+ -+void copy_to_user_overflow(void) -+{ -+ WARN(1, "Buffer overflow detected!\n"); -+} -+EXPORT_SYMBOL(copy_to_user_overflow); -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+void __set_fs(mm_segment_t x) -+{ -+ switch (x.seg) { -+ case 0: -+ loadsegment(gs, 0); -+ break; -+ case TASK_SIZE_MAX: -+ loadsegment(gs, __USER_DS); -+ break; -+ case -1UL: -+ loadsegment(gs, __KERNEL_DS); -+ break; -+ default: -+ BUG(); -+ } -+ return; -+} -+EXPORT_SYMBOL(__set_fs); -+ -+void set_fs(mm_segment_t x) -+{ -+ current_thread_info()->addr_limit = x; -+ __set_fs(x); -+} -+EXPORT_SYMBOL(set_fs); -+#endif -diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c -index b7c2849..8633ad8 100644 ---- a/arch/x86/lib/usercopy_64.c -+++ b/arch/x86/lib/usercopy_64.c -@@ -42,6 +42,12 @@ long - __strncpy_from_user(char *dst, const char __user *src, long count) - { - long res; -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)src < PAX_USER_SHADOW_BASE) -+ src += PAX_USER_SHADOW_BASE; -+#endif -+ - __do_strncpy_from_user(dst, src, count, res); - return res; - } -@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size) - { - long __d0; - might_fault(); -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE) -+ addr += PAX_USER_SHADOW_BASE; -+#endif -+ - /* no memory constraint because it doesn't change any memory gcc knows - about */ - asm volatile( -@@ -149,12 +161,20 @@ long strlen_user(const char __user *s) - } - EXPORT_SYMBOL(strlen_user); - --unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) -+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len) - { -- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { -- return copy_user_generic((__force void *)to, (__force void *)from, len); -- } -- return len; -+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if ((unsigned long)to < PAX_USER_SHADOW_BASE) -+ to += PAX_USER_SHADOW_BASE; -+ if ((unsigned long)from < PAX_USER_SHADOW_BASE) -+ from += PAX_USER_SHADOW_BASE; -+#endif -+ -+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len); -+ } -+ return len; - } - EXPORT_SYMBOL(copy_in_user); - -@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user); - * it is not necessary to optimize tail handling. - */ - unsigned long --copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) -+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) - { - char c; - unsigned zero_len; -diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c -index d0474ad..36e9257 100644 ---- a/arch/x86/mm/extable.c -+++ b/arch/x86/mm/extable.c -@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs) - const struct exception_table_entry *fixup; - - #ifdef CONFIG_PNPBIOS -- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { -+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) { - extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; - extern u32 pnp_bios_is_utter_crap; - pnp_bios_is_utter_crap = 1; -diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c -index 0d17c8c..4f4764f 100644 ---- a/arch/x86/mm/fault.c -+++ b/arch/x86/mm/fault.c -@@ -13,11 +13,18 @@ - #include <linux/perf_event.h> /* perf_sw_event */ - #include <linux/hugetlb.h> /* hstate_index_to_shift */ - #include <linux/prefetch.h> /* prefetchw */ -+#include <linux/unistd.h> -+#include <linux/compiler.h> - - #include <asm/traps.h> /* dotraplinkage, ... */ - #include <asm/pgalloc.h> /* pgd_*(), ... */ - #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ - #include <asm/vsyscall.h> -+#include <asm/tlbflush.h> -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+#include <asm/stacktrace.h> -+#endif - - /* - * Page fault error code bits: -@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs) - int ret = 0; - - /* kprobe_running() needs smp_processor_id() */ -- if (kprobes_built_in() && !user_mode_vm(regs)) { -+ if (kprobes_built_in() && !user_mode(regs)) { - preempt_disable(); - if (kprobe_running() && kprobe_fault_handler(regs, 14)) - ret = 1; -@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, - return !instr_lo || (instr_lo>>1) == 1; - case 0x00: - /* Prefetch instruction is 0x0F0D or 0x0F18 */ -- if (probe_kernel_address(instr, opcode)) -+ if (user_mode(regs)) { -+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1)) -+ return 0; -+ } else if (probe_kernel_address(instr, opcode)) - return 0; - - *prefetch = (instr_lo == 0xF) && -@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) - while (instr < max_instr) { - unsigned char opcode; - -- if (probe_kernel_address(instr, opcode)) -+ if (user_mode(regs)) { -+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1)) -+ break; -+ } else if (probe_kernel_address(instr, opcode)) - break; - - instr++; -@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, - force_sig_info(si_signo, &info, tsk); - } - -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address); -+#endif -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+static int pax_handle_fetch_fault(struct pt_regs *regs); -+#endif -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address) -+{ -+ pgd_t *pgd; -+ pud_t *pud; -+ pmd_t *pmd; -+ -+ pgd = pgd_offset(mm, address); -+ if (!pgd_present(*pgd)) -+ return NULL; -+ pud = pud_offset(pgd, address); -+ if (!pud_present(*pud)) -+ return NULL; -+ pmd = pmd_offset(pud, address); -+ if (!pmd_present(*pmd)) -+ return NULL; -+ return pmd; -+} -+#endif -+ - DEFINE_SPINLOCK(pgd_lock); - LIST_HEAD(pgd_list); - -@@ -231,10 +272,22 @@ void vmalloc_sync_all(void) - for (address = VMALLOC_START & PMD_MASK; - address >= TASK_SIZE && address < FIXADDR_TOP; - address += PMD_SIZE) { -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ unsigned long cpu; -+#else - struct page *page; -+#endif - - spin_lock(&pgd_lock); -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ for (cpu = 0; cpu < NR_CPUS; ++cpu) { -+ pgd_t *pgd = get_cpu_pgd(cpu); -+ pmd_t *ret; -+#else - list_for_each_entry(page, &pgd_list, lru) { -+ pgd_t *pgd = page_address(page); - spinlock_t *pgt_lock; - pmd_t *ret; - -@@ -242,8 +295,13 @@ void vmalloc_sync_all(void) - pgt_lock = &pgd_page_get_mm(page)->page_table_lock; - - spin_lock(pgt_lock); -- ret = vmalloc_sync_one(page_address(page), address); -+#endif -+ -+ ret = vmalloc_sync_one(pgd, address); -+ -+#ifndef CONFIG_PAX_PER_CPU_PGD - spin_unlock(pgt_lock); -+#endif - - if (!ret) - break; -@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) - * an interrupt in the middle of a task switch.. - */ - pgd_paddr = read_cr3(); -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK)); -+#endif -+ - pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); - if (!pmd_k) - return -1; -@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) - * happen within a race in page table update. In the later - * case just flush: - */ -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK)); -+ pgd = pgd_offset_cpu(smp_processor_id(), address); -+#else - pgd = pgd_offset(current->active_mm, address); -+#endif -+ - pgd_ref = pgd_offset_k(address); - if (pgd_none(*pgd_ref)) - return -1; -@@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) - static int is_errata100(struct pt_regs *regs, unsigned long address) - { - #ifdef CONFIG_X86_64 -- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) -+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32)) - return 1; - #endif - return 0; -@@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) - } - - static const char nx_warning[] = KERN_CRIT --"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; -+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n"; - - static void - show_fault_oops(struct pt_regs *regs, unsigned long error_code, -@@ -570,15 +640,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, - if (!oops_may_print()) - return; - -- if (error_code & PF_INSTR) { -+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) { - unsigned int level; - - pte_t *pte = lookup_address(address, &level); - - if (pte && pte_present(*pte) && !pte_exec(*pte)) -- printk(nx_warning, current_uid()); -+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current)); - } - -+#ifdef CONFIG_PAX_KERNEXEC -+ if (init_mm.start_code <= address && address < init_mm.end_code) { -+ if (current->signal->curr_ip) -+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", -+ ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); -+ else -+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", -+ current->comm, task_pid_nr(current), current_uid(), current_euid()); -+ } -+#endif -+ - printk(KERN_ALERT "BUG: unable to handle kernel "); - if (address < PAGE_SIZE) - printk(KERN_CONT "NULL pointer dereference"); -@@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - } - #endif - -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+ if (pax_is_fetch_fault(regs, error_code, address)) { -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ switch (pax_handle_fetch_fault(regs)) { -+ case 2: -+ return; -+ } -+#endif -+ -+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - if (unlikely(show_unhandled_signals)) - show_signal_msg(regs, error_code, address, tsk); - -@@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, - if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { - printk(KERN_ERR - "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", -- tsk->comm, tsk->pid, address); -+ tsk->comm, task_pid_nr(tsk), address); - code = BUS_MCEERR_AR; - } - #endif -@@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) - return 1; - } - -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) -+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code) -+{ -+ pte_t *pte; -+ pmd_t *pmd; -+ spinlock_t *ptl; -+ unsigned char pte_mask; -+ -+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) || -+ !(mm->pax_flags & MF_PAX_PAGEEXEC)) -+ return 0; -+ -+ /* PaX: it's our fault, let's handle it if we can */ -+ -+ /* PaX: take a look at read faults before acquiring any locks */ -+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) { -+ /* instruction fetch attempt from a protected page in user mode */ -+ up_read(&mm->mmap_sem); -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ switch (pax_handle_fetch_fault(regs)) { -+ case 2: -+ return 1; -+ } -+#endif -+ -+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); -+ do_group_exit(SIGKILL); -+ } -+ -+ pmd = pax_get_pmd(mm, address); -+ if (unlikely(!pmd)) -+ return 0; -+ -+ pte = pte_offset_map_lock(mm, pmd, address, &ptl); -+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) { -+ pte_unmap_unlock(pte, ptl); -+ return 0; -+ } -+ -+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) { -+ /* write attempt to a protected page in user mode */ -+ pte_unmap_unlock(pte, ptl); -+ return 0; -+ } -+ -+#ifdef CONFIG_SMP -+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask))) -+#else -+ if (likely(address > get_limit(regs->cs))) -+#endif -+ { -+ set_pte(pte, pte_mkread(*pte)); -+ __flush_tlb_one(address); -+ pte_unmap_unlock(pte, ptl); -+ up_read(&mm->mmap_sem); -+ return 1; -+ } -+ -+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1)); -+ -+ /* -+ * PaX: fill DTLB with user rights and retry -+ */ -+ __asm__ __volatile__ ( -+ "orb %2,(%1)\n" -+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC) -+/* -+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's -+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any* -+ * page fault when examined during a TLB load attempt. this is true not only -+ * for PTEs holding a non-present entry but also present entries that will -+ * raise a page fault (such as those set up by PaX, or the copy-on-write -+ * mechanism). in effect it means that we do *not* need to flush the TLBs -+ * for our target pages since their PTEs are simply not in the TLBs at all. -+ -+ * the best thing in omitting it is that we gain around 15-20% speed in the -+ * fast path of the page fault handler and can get rid of tracing since we -+ * can no longer flush unintended entries. -+ */ -+ "invlpg (%0)\n" -+#endif -+ __copyuser_seg"testb $0,(%0)\n" -+ "xorb %3,(%1)\n" -+ : -+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER) -+ : "memory", "cc"); -+ pte_unmap_unlock(pte, ptl); -+ up_read(&mm->mmap_sem); -+ return 1; -+} -+#endif -+ - /* - * Handle a spurious fault caused by a stale TLB entry. - * -@@ -956,6 +1145,9 @@ int show_unhandled_signals = 1; - static inline int - access_error(unsigned long error_code, struct vm_area_struct *vma) - { -+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC)) -+ return 1; -+ - if (error_code & PF_WRITE) { - /* write, present and write, not present: */ - if (unlikely(!(vma->vm_flags & VM_WRITE))) -@@ -989,18 +1181,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) - { - struct vm_area_struct *vma; - struct task_struct *tsk; -- unsigned long address; - struct mm_struct *mm; - int fault; - int write = error_code & PF_WRITE; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | - (write ? FAULT_FLAG_WRITE : 0); - -- tsk = current; -- mm = tsk->mm; -- - /* Get the faulting address: */ -- address = read_cr2(); -+ unsigned long address = read_cr2(); -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) { -+ if (!search_exception_tables(regs->ip)) { -+ bad_area_nosemaphore(regs, error_code, address); -+ return; -+ } -+ if (address < PAX_USER_SHADOW_BASE) { -+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n"); -+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip); -+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR); -+ } else -+ address -= PAX_USER_SHADOW_BASE; -+ } -+#endif -+ -+ tsk = current; -+ mm = tsk->mm; - - /* - * Detect and handle instructions that would cause a page fault for -@@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) - * User-mode registers count as a user access even for any - * potential system fault or CPU buglet: - */ -- if (user_mode_vm(regs)) { -+ if (user_mode(regs)) { - local_irq_enable(); - error_code |= PF_USER; - } else { -@@ -1116,6 +1322,11 @@ retry: - might_sleep(); - } - -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) -+ if (pax_handle_pageexec_fault(regs, mm, address, error_code)) -+ return; -+#endif -+ - vma = find_vma(mm, address); - if (unlikely(!vma)) { - bad_area(regs, error_code, address); -@@ -1127,18 +1338,24 @@ retry: - bad_area(regs, error_code, address); - return; - } -- if (error_code & PF_USER) { -- /* -- * Accessing the stack below %sp is always a bug. -- * The large cushion allows instructions like enter -- * and pusha to work. ("enter $65535, $31" pushes -- * 32 pointers and then decrements %sp by 65535.) -- */ -- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { -- bad_area(regs, error_code, address); -- return; -- } -+ /* -+ * Accessing the stack below %sp is always a bug. -+ * The large cushion allows instructions like enter -+ * and pusha to work. ("enter $65535, $31" pushes -+ * 32 pointers and then decrements %sp by 65535.) -+ */ -+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) { -+ bad_area(regs, error_code, address); -+ return; - } -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) { -+ bad_area(regs, error_code, address); -+ return; -+ } -+#endif -+ - if (unlikely(expand_stack(vma, address))) { - bad_area(regs, error_code, address); - return; -@@ -1193,3 +1410,240 @@ good_area: - - up_read(&mm->mmap_sem); - } -+ -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) -+{ -+ struct mm_struct *mm = current->mm; -+ unsigned long ip = regs->ip; -+ -+ if (v8086_mode(regs)) -+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff); -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (mm->pax_flags & MF_PAX_PAGEEXEC) { -+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) -+ return true; -+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address) -+ return true; -+ return false; -+ } -+#endif -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) { -+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) -+ return true; -+ return false; -+ } -+#endif -+ -+ return false; -+} -+#endif -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+static int pax_handle_fetch_fault_32(struct pt_regs *regs) -+{ -+ int err; -+ -+ do { /* PaX: gcc trampoline emulation #1 */ -+ unsigned char mov1, mov2; -+ unsigned short jmp; -+ unsigned int addr1, addr2; -+ -+#ifdef CONFIG_X86_64 -+ if ((regs->ip + 11) >> 32) -+ break; -+#endif -+ -+ err = get_user(mov1, (unsigned char __user *)regs->ip); -+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); -+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5)); -+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); -+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10)); -+ -+ if (err) -+ break; -+ -+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) { -+ regs->cx = addr1; -+ regs->ax = addr2; -+ regs->ip = addr2; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: gcc trampoline emulation #2 */ -+ unsigned char mov, jmp; -+ unsigned int addr1, addr2; -+ -+#ifdef CONFIG_X86_64 -+ if ((regs->ip + 9) >> 32) -+ break; -+#endif -+ -+ err = get_user(mov, (unsigned char __user *)regs->ip); -+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); -+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5)); -+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); -+ -+ if (err) -+ break; -+ -+ if (mov == 0xB9 && jmp == 0xE9) { -+ regs->cx = addr1; -+ regs->ip = (unsigned int)(regs->ip + addr2 + 10); -+ return 2; -+ } -+ } while (0); -+ -+ return 1; /* PaX in action */ -+} -+ -+#ifdef CONFIG_X86_64 -+static int pax_handle_fetch_fault_64(struct pt_regs *regs) -+{ -+ int err; -+ -+ do { /* PaX: gcc trampoline emulation #1 */ -+ unsigned short mov1, mov2, jmp1; -+ unsigned char jmp2; -+ unsigned int addr1; -+ unsigned long addr2; -+ -+ err = get_user(mov1, (unsigned short __user *)regs->ip); -+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2)); -+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6)); -+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8)); -+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16)); -+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18)); -+ -+ if (err) -+ break; -+ -+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { -+ regs->r11 = addr1; -+ regs->r10 = addr2; -+ regs->ip = addr1; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: gcc trampoline emulation #2 */ -+ unsigned short mov1, mov2, jmp1; -+ unsigned char jmp2; -+ unsigned long addr1, addr2; -+ -+ err = get_user(mov1, (unsigned short __user *)regs->ip); -+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2)); -+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10)); -+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12)); -+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20)); -+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22)); -+ -+ if (err) -+ break; -+ -+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { -+ regs->r11 = addr1; -+ regs->r10 = addr2; -+ regs->ip = addr1; -+ return 2; -+ } -+ } while (0); -+ -+ return 1; /* PaX in action */ -+} -+#endif -+ -+/* -+ * PaX: decide what to do with offenders (regs->ip = fault address) -+ * -+ * returns 1 when task should be killed -+ * 2 when gcc trampoline was detected -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ if (v8086_mode(regs)) -+ return 1; -+ -+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) -+ return 1; -+ -+#ifdef CONFIG_X86_32 -+ return pax_handle_fetch_fault_32(regs); -+#else -+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) -+ return pax_handle_fetch_fault_32(regs); -+ else -+ return pax_handle_fetch_fault_64(regs); -+#endif -+} -+#endif -+ -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 20; i++) { -+ unsigned char c; -+ if (get_user(c, (unsigned char __force_user *)pc+i)) -+ printk(KERN_CONT "?? "); -+ else -+ printk(KERN_CONT "%02x ", c); -+ } -+ printk("\n"); -+ -+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long)); -+ for (i = -1; i < 80 / (long)sizeof(long); i++) { -+ unsigned long c; -+ if (get_user(c, (unsigned long __force_user *)sp+i)) { -+#ifdef CONFIG_X86_32 -+ printk(KERN_CONT "???????? "); -+#else -+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) -+ printk(KERN_CONT "???????? ???????? "); -+ else -+ printk(KERN_CONT "???????????????? "); -+#endif -+ } else { -+#ifdef CONFIG_X86_64 -+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) { -+ printk(KERN_CONT "%08x ", (unsigned int)c); -+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32)); -+ } else -+#endif -+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c); -+ } -+ } -+ printk("\n"); -+} -+#endif -+ -+/** -+ * probe_kernel_write(): safely attempt to write to a location -+ * @dst: address to write to -+ * @src: pointer to the data that shall be written -+ * @size: size of the data chunk -+ * -+ * Safely write to address @dst from the buffer at @src. If a kernel fault -+ * happens, handle that and return -EFAULT. -+ */ -+long notrace probe_kernel_write(void *dst, const void *src, size_t size) -+{ -+ long ret; -+ mm_segment_t old_fs = get_fs(); -+ -+ set_fs(KERNEL_DS); -+ pagefault_disable(); -+ pax_open_kernel(); -+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size); -+ pax_close_kernel(); -+ pagefault_enable(); -+ set_fs(old_fs); -+ -+ return ret ? -EFAULT : 0; -+} -diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c -index ea30585..7d26398 100644 ---- a/arch/x86/mm/gup.c -+++ b/arch/x86/mm/gup.c -@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr, - do { - VM_BUG_ON(compound_head(page) != head); - pages[*nr] = page; -+ if (PageTail(page)) -+ get_huge_page_tail(page); - (*nr)++; - page++; - refs++; -@@ -253,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, - addr = start; - len = (unsigned long) nr_pages << PAGE_SHIFT; - end = start + len; -- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, -+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ, - (void __user *)start, len))) - return 0; - -diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c -index f4f29b1..5cac4fb 100644 ---- a/arch/x86/mm/highmem_32.c -+++ b/arch/x86/mm/highmem_32.c -@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte-idx))); -+ -+ pax_open_kernel(); - set_pte(kmap_pte-idx, mk_pte(page, prot)); -+ pax_close_kernel(); -+ - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c -index f581a18..29efd37 100644 ---- a/arch/x86/mm/hugetlbpage.c -+++ b/arch/x86/mm/hugetlbpage.c -@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, - struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; -- unsigned long start_addr; -+ unsigned long start_addr, pax_task_size = TASK_SIZE; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+#endif -+ -+ pax_task_size -= PAGE_SIZE; - - if (len > mm->cached_hole_size) { -- start_addr = mm->free_area_cache; -+ start_addr = mm->free_area_cache; - } else { -- start_addr = TASK_UNMAPPED_BASE; -- mm->cached_hole_size = 0; -+ start_addr = mm->mmap_base; -+ mm->cached_hole_size = 0; - } - - full_search: -@@ -280,26 +287,27 @@ full_search: - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ -- if (TASK_SIZE - len < addr) { -+ if (pax_task_size - len < addr) { - /* - * Start a new search - just in case we missed - * some holes. - */ -- if (start_addr != TASK_UNMAPPED_BASE) { -- start_addr = TASK_UNMAPPED_BASE; -+ if (start_addr != mm->mmap_base) { -+ start_addr = mm->mmap_base; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } -- if (!vma || addr + len <= vma->vm_start) { -- mm->free_area_cache = addr + len; -- return addr; -- } -+ if (check_heap_stack_gap(vma, addr, len)) -+ break; - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - addr = ALIGN(vma->vm_end, huge_page_size(h)); - } -+ -+ mm->free_area_cache = addr + len; -+ return addr; - } - - static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, -@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, - { - struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; -- struct vm_area_struct *vma, *prev_vma; -- unsigned long base = mm->mmap_base, addr = addr0; -+ struct vm_area_struct *vma; -+ unsigned long base = mm->mmap_base, addr; - unsigned long largest_hole = mm->cached_hole_size; -- int first_time = 1; - - /* don't allow allocations above current base */ - if (mm->free_area_cache > base) -@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, - largest_hole = 0; - mm->free_area_cache = base; - } --try_again: -+ - /* make sure it can fit in the remaining address space */ - if (mm->free_area_cache < len) - goto fail; - - /* either no address requested or can't fit in requested address hole */ -- addr = (mm->free_area_cache - len) & huge_page_mask(h); -+ addr = (mm->free_area_cache - len); - do { -+ addr &= huge_page_mask(h); -+ vma = find_vma(mm, addr); - /* - * Lookup failure means no vma is above this address, - * i.e. return with success: -- */ -- if (!(vma = find_vma_prev(mm, addr, &prev_vma))) -- return addr; -- -- /* - * new region fits between prev_vma->vm_end and - * vma->vm_start, use it: - */ -- if (addr + len <= vma->vm_start && -- (!prev_vma || (addr >= prev_vma->vm_end))) { -+ if (check_heap_stack_gap(vma, addr, len)) { - /* remember the address as a hint for next time */ -- mm->cached_hole_size = largest_hole; -- return (mm->free_area_cache = addr); -- } else { -- /* pull free_area_cache down to the first hole */ -- if (mm->free_area_cache == vma->vm_end) { -- mm->free_area_cache = vma->vm_start; -- mm->cached_hole_size = largest_hole; -- } -+ mm->cached_hole_size = largest_hole; -+ return (mm->free_area_cache = addr); -+ } -+ /* pull free_area_cache down to the first hole */ -+ if (mm->free_area_cache == vma->vm_end) { -+ mm->free_area_cache = vma->vm_start; -+ mm->cached_hole_size = largest_hole; - } - - /* remember the largest hole we saw so far */ - if (addr + largest_hole < vma->vm_start) -- largest_hole = vma->vm_start - addr; -+ largest_hole = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ -- addr = (vma->vm_start - len) & huge_page_mask(h); -- } while (len <= vma->vm_start); -+ addr = skip_heap_stack_gap(vma, len); -+ } while (!IS_ERR_VALUE(addr)); - - fail: - /* -- * if hint left us with no space for the requested -- * mapping then try again: -- */ -- if (first_time) { -- mm->free_area_cache = base; -- largest_hole = 0; -- first_time = 0; -- goto try_again; -- } -- /* - * A failed mmap() very likely causes application failure, - * so fall back to the bottom-up function here. This scenario - * can happen with large stack limits and large mmap() - * allocations. - */ -- mm->free_area_cache = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) -+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; -+ else -+#endif -+ -+ mm->mmap_base = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ -+ mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; - addr = hugetlb_get_unmapped_area_bottomup(file, addr0, - len, pgoff, flags); -@@ -386,6 +392,7 @@ fail: - /* - * Restore the topdown base: - */ -+ mm->mmap_base = base; - mm->free_area_cache = base; - mm->cached_hole_size = ~0UL; - -@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; -+ unsigned long pax_task_size = TASK_SIZE; - - if (len & ~huge_page_mask(h)) - return -EINVAL; -- if (len > TASK_SIZE) -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+#endif -+ -+ pax_task_size -= PAGE_SIZE; -+ -+ if (len > pax_task_size) - return -ENOMEM; - - if (flags & MAP_FIXED) { -@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - if (addr) { - addr = ALIGN(addr, huge_page_size(h)); - vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) - return addr; - } - if (mm->get_unmapped_area == arch_get_unmapped_area) -diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c -index 87488b9..7129f32 100644 ---- a/arch/x86/mm/init.c -+++ b/arch/x86/mm/init.c -@@ -31,7 +31,7 @@ int direct_gbpages - static void __init find_early_table_space(unsigned long end, int use_pse, - int use_gbpages) - { -- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; -+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end; - phys_addr_t base; - - puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; -@@ -312,8 +312,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, - */ - int devmem_is_allowed(unsigned long pagenr) - { -+#ifdef CONFIG_GRKERNSEC_KMEM -+ /* allow BDA */ -+ if (!pagenr) -+ return 1; -+ /* allow EBDA */ -+ if ((0x9f000 >> PAGE_SHIFT) == pagenr) -+ return 1; -+#else -+ if (!pagenr) -+ return 1; -+#ifdef CONFIG_VM86 -+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT)) -+ return 1; -+#endif -+#endif -+ -+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT)) -+ return 1; -+#ifdef CONFIG_GRKERNSEC_KMEM -+ /* throw out everything else below 1MB */ - if (pagenr <= 256) -- return 1; -+ return 0; -+#endif - if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) - return 0; - if (!page_is_ram(pagenr)) -@@ -372,6 +393,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) - - void free_initmem(void) - { -+ -+#ifdef CONFIG_PAX_KERNEXEC -+#ifdef CONFIG_X86_32 -+ /* PaX: limit KERNEL_CS to actual size */ -+ unsigned long addr, limit; -+ struct desc_struct d; -+ int cpu; -+ -+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext; -+ limit = (limit - 1UL) >> PAGE_SHIFT; -+ -+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE); -+ for (cpu = 0; cpu < NR_CPUS; cpu++) { -+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC); -+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S); -+ } -+ -+ /* PaX: make KERNEL_CS read-only */ -+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text)); -+ if (!paravirt_enabled()) -+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT); -+/* -+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) { -+ pgd = pgd_offset_k(addr); -+ pud = pud_offset(pgd, addr); -+ pmd = pmd_offset(pud, addr); -+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); -+ } -+*/ -+#ifdef CONFIG_X86_PAE -+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT); -+/* -+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) { -+ pgd = pgd_offset_k(addr); -+ pud = pud_offset(pgd, addr); -+ pmd = pmd_offset(pud, addr); -+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); -+ } -+*/ -+#endif -+ -+#ifdef CONFIG_MODULES -+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT); -+#endif -+ -+#else -+ pgd_t *pgd; -+ pud_t *pud; -+ pmd_t *pmd; -+ unsigned long addr, end; -+ -+ /* PaX: make kernel code/rodata read-only, rest non-executable */ -+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) { -+ pgd = pgd_offset_k(addr); -+ pud = pud_offset(pgd, addr); -+ pmd = pmd_offset(pud, addr); -+ if (!pmd_present(*pmd)) -+ continue; -+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata) -+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); -+ else -+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); -+ } -+ -+ addr = (unsigned long)__va(__pa(__START_KERNEL_map)); -+ end = addr + KERNEL_IMAGE_SIZE; -+ for (; addr < end; addr += PMD_SIZE) { -+ pgd = pgd_offset_k(addr); -+ pud = pud_offset(pgd, addr); -+ pmd = pmd_offset(pud, addr); -+ if (!pmd_present(*pmd)) -+ continue; -+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata))) -+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); -+ } -+#endif -+ -+ flush_tlb_all(); -+#endif -+ - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); -diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c -index 29f7c6d..b46b35b 100644 ---- a/arch/x86/mm/init_32.c -+++ b/arch/x86/mm/init_32.c -@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void) - } - - /* -- * Creates a middle page table and puts a pointer to it in the -- * given global directory entry. This only returns the gd entry -- * in non-PAE compilation mode, since the middle layer is folded. -- */ --static pmd_t * __init one_md_table_init(pgd_t *pgd) --{ -- pud_t *pud; -- pmd_t *pmd_table; -- --#ifdef CONFIG_X86_PAE -- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { -- if (after_bootmem) -- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); -- else -- pmd_table = (pmd_t *)alloc_low_page(); -- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); -- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); -- pud = pud_offset(pgd, 0); -- BUG_ON(pmd_table != pmd_offset(pud, 0)); -- -- return pmd_table; -- } --#endif -- pud = pud_offset(pgd, 0); -- pmd_table = pmd_offset(pud, 0); -- -- return pmd_table; --} -- --/* - * Create a page table and place a pointer to it in a middle page - * directory entry: - */ -@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) - page_table = (pte_t *)alloc_low_page(); - - paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE)); -+#else - set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); -+#endif - BUG_ON(page_table != pte_offset_kernel(pmd, 0)); - } - - return pte_offset_kernel(pmd, 0); - } - -+static pmd_t * __init one_md_table_init(pgd_t *pgd) -+{ -+ pud_t *pud; -+ pmd_t *pmd_table; -+ -+ pud = pud_offset(pgd, 0); -+ pmd_table = pmd_offset(pud, 0); -+ -+ return pmd_table; -+} -+ - pmd_t * __init populate_extra_pmd(unsigned long vaddr) - { - int pgd_idx = pgd_index(vaddr); -@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) - int pgd_idx, pmd_idx; - unsigned long vaddr; - pgd_t *pgd; -+ pud_t *pud; - pmd_t *pmd; - pte_t *pte = NULL; - -@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) - pgd = pgd_base + pgd_idx; - - for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { -- pmd = one_md_table_init(pgd); -- pmd = pmd + pmd_index(vaddr); -+ pud = pud_offset(pgd, vaddr); -+ pmd = pmd_offset(pud, vaddr); -+ -+#ifdef CONFIG_X86_PAE -+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); -+#endif -+ - for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); - pmd++, pmd_idx++) { - pte = page_table_kmap_check(one_page_table_init(pmd), -@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) - } - } - --static inline int is_kernel_text(unsigned long addr) -+static inline int is_kernel_text(unsigned long start, unsigned long end) - { -- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) -- return 1; -- return 0; -+ if ((start > ktla_ktva((unsigned long)_etext) || -+ end <= ktla_ktva((unsigned long)_stext)) && -+ (start > ktla_ktva((unsigned long)_einittext) || -+ end <= ktla_ktva((unsigned long)_sinittext)) && -+ -+#ifdef CONFIG_ACPI_SLEEP -+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) && -+#endif -+ -+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000))) -+ return 0; -+ return 1; - } - - /* -@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start, - unsigned long last_map_addr = end; - unsigned long start_pfn, end_pfn; - pgd_t *pgd_base = swapper_pg_dir; -- int pgd_idx, pmd_idx, pte_ofs; -+ unsigned int pgd_idx, pmd_idx, pte_ofs; - unsigned long pfn; - pgd_t *pgd; -+ pud_t *pud; - pmd_t *pmd; - pte_t *pte; - unsigned pages_2m, pages_4k; -@@ -281,8 +282,13 @@ repeat: - pfn = start_pfn; - pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); - pgd = pgd_base + pgd_idx; -- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { -- pmd = one_md_table_init(pgd); -+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) { -+ pud = pud_offset(pgd, 0); -+ pmd = pmd_offset(pud, 0); -+ -+#ifdef CONFIG_X86_PAE -+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); -+#endif - - if (pfn >= end_pfn) - continue; -@@ -294,14 +300,13 @@ repeat: - #endif - for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; - pmd++, pmd_idx++) { -- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; -+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET; - - /* - * Map with big pages if possible, otherwise - * create normal page tables: - */ - if (use_pse) { -- unsigned int addr2; - pgprot_t prot = PAGE_KERNEL_LARGE; - /* - * first pass will use the same initial -@@ -311,11 +316,7 @@ repeat: - __pgprot(PTE_IDENT_ATTR | - _PAGE_PSE); - -- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + -- PAGE_OFFSET + PAGE_SIZE-1; -- -- if (is_kernel_text(addr) || -- is_kernel_text(addr2)) -+ if (is_kernel_text(address, address + PMD_SIZE)) - prot = PAGE_KERNEL_LARGE_EXEC; - - pages_2m++; -@@ -332,7 +333,7 @@ repeat: - pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); - pte += pte_ofs; - for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; -- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { -+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) { - pgprot_t prot = PAGE_KERNEL; - /* - * first pass will use the same initial -@@ -340,7 +341,7 @@ repeat: - */ - pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); - -- if (is_kernel_text(addr)) -+ if (is_kernel_text(address, address + PAGE_SIZE)) - prot = PAGE_KERNEL_EXEC; - - pages_4k++; -@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base) - - pud = pud_offset(pgd, va); - pmd = pmd_offset(pud, va); -- if (!pmd_present(*pmd)) -+ if (!pmd_present(*pmd) || pmd_huge(*pmd)) - break; - - pte = pte_offset_kernel(pmd, va); -@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void) - - static void __init pagetable_init(void) - { -- pgd_t *pgd_base = swapper_pg_dir; -- -- permanent_kmaps_init(pgd_base); -+ permanent_kmaps_init(swapper_pg_dir); - } - --pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); -+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); - EXPORT_SYMBOL_GPL(__supported_pte_mask); - - /* user-defined highmem size */ -@@ -757,6 +756,12 @@ void __init mem_init(void) - - pci_iommu_alloc(); - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, -+ swapper_pg_dir + KERNEL_PGD_BOUNDARY, -+ KERNEL_PGD_PTRS); -+#endif -+ - #ifdef CONFIG_FLATMEM - BUG_ON(!mem_map); - #endif -@@ -774,7 +779,7 @@ void __init mem_init(void) - set_highmem_pages_init(); - - codesize = (unsigned long) &_etext - (unsigned long) &_text; -- datasize = (unsigned long) &_edata - (unsigned long) &_etext; -+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " -@@ -815,10 +820,10 @@ void __init mem_init(void) - ((unsigned long)&__init_end - - (unsigned long)&__init_begin) >> 10, - -- (unsigned long)&_etext, (unsigned long)&_edata, -- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, -+ (unsigned long)&_sdata, (unsigned long)&_edata, -+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10, - -- (unsigned long)&_text, (unsigned long)&_etext, -+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext), - ((unsigned long)&_etext - (unsigned long)&_text) >> 10); - - /* -@@ -896,6 +901,7 @@ void set_kernel_text_rw(void) - if (!kernel_set_to_readonly) - return; - -+ start = ktla_ktva(start); - pr_debug("Set kernel text: %lx - %lx for read write\n", - start, start+size); - -@@ -910,6 +916,7 @@ void set_kernel_text_ro(void) - if (!kernel_set_to_readonly) - return; - -+ start = ktla_ktva(start); - pr_debug("Set kernel text: %lx - %lx for read only\n", - start, start+size); - -@@ -938,6 +945,7 @@ void mark_rodata_ro(void) - unsigned long start = PFN_ALIGN(_text); - unsigned long size = PFN_ALIGN(_etext) - start; - -+ start = ktla_ktva(start); - set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Write protecting the kernel text: %luk\n", - size >> 10); -diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c -index bbaaa00..16dffad 100644 ---- a/arch/x86/mm/init_64.c -+++ b/arch/x86/mm/init_64.c -@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on); - * around without checking the pgd every time. - */ - --pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; -+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP); - EXPORT_SYMBOL_GPL(__supported_pte_mask); - - int force_personality32; -@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end) - - for (address = start; address <= end; address += PGDIR_SIZE) { - const pgd_t *pgd_ref = pgd_offset_k(address); -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ unsigned long cpu; -+#else - struct page *page; -+#endif - - if (pgd_none(*pgd_ref)) - continue; - - spin_lock(&pgd_lock); -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ for (cpu = 0; cpu < NR_CPUS; ++cpu) { -+ pgd_t *pgd = pgd_offset_cpu(cpu, address); -+#else - list_for_each_entry(page, &pgd_list, lru) { - pgd_t *pgd; - spinlock_t *pgt_lock; -@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) - /* the pgt_lock only for Xen */ - pgt_lock = &pgd_page_get_mm(page)->page_table_lock; - spin_lock(pgt_lock); -+#endif - - if (pgd_none(*pgd)) - set_pgd(pgd, *pgd_ref); -@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end) - BUG_ON(pgd_page_vaddr(*pgd) - != pgd_page_vaddr(*pgd_ref)); - -+#ifndef CONFIG_PAX_PER_CPU_PGD - spin_unlock(pgt_lock); -+#endif -+ - } - spin_unlock(&pgd_lock); - } -@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) - pmd = fill_pmd(pud, vaddr); - pte = fill_pte(pmd, vaddr); - -+ pax_open_kernel(); - set_pte(pte, new_pte); -+ pax_close_kernel(); - - /* - * It's enough to flush this one mapping. -@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size, - pgd = pgd_offset_k((unsigned long)__va(phys)); - if (pgd_none(*pgd)) { - pud = (pud_t *) spp_getpage(); -- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | -- _PAGE_USER)); -+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE)); - } - pud = pud_offset(pgd, (unsigned long)__va(phys)); - if (pud_none(*pud)) { - pmd = (pmd_t *) spp_getpage(); -- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | -- _PAGE_USER)); -+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); - } - pmd = pmd_offset(pud, phys); - BUG_ON(!pmd_none(*pmd)); -@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys) - if (pfn >= pgt_buf_top) - panic("alloc_low_page: ran out of memory"); - -- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); -+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); - clear_page(adr); - *phys = pfn * PAGE_SIZE; - return adr; -@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt) - - phys = __pa(virt); - left = phys & (PAGE_SIZE - 1); -- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE); -+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE); - adr = (void *)(((unsigned long)adr) | left); - - return adr; -@@ -693,6 +707,12 @@ void __init mem_init(void) - - pci_iommu_alloc(); - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, -+ swapper_pg_dir + KERNEL_PGD_BOUNDARY, -+ KERNEL_PGD_PTRS); -+#endif -+ - /* clear_bss() already clear the empty_zero_page */ - - reservedpages = 0; -@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr) - static struct vm_area_struct gate_vma = { - .vm_start = VSYSCALL_START, - .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), -- .vm_page_prot = PAGE_READONLY_EXEC, -- .vm_flags = VM_READ | VM_EXEC -+ .vm_page_prot = PAGE_READONLY, -+ .vm_flags = VM_READ - }; - - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) -@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr) - - const char *arch_vma_name(struct vm_area_struct *vma) - { -- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) -+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) - return "[vdso]"; - if (vma == &gate_vma) - return "[vsyscall]"; -diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c -index 7b179b4..6bd1777 100644 ---- a/arch/x86/mm/iomap_32.c -+++ b/arch/x86/mm/iomap_32.c -@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -+ -+ pax_open_kernel(); - set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); -+ pax_close_kernel(); -+ - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c -index be1ef57..9680edc 100644 ---- a/arch/x86/mm/ioremap.c -+++ b/arch/x86/mm/ioremap.c -@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, - for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { - int is_ram = page_is_ram(pfn); - -- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) -+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn)))) - return NULL; - WARN_ON_ONCE(is_ram); - } -@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_setup(char *str) - early_param("early_ioremap_debug", early_ioremap_debug_setup); - - static __initdata int after_paging_init; --static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; -+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE); - - static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) - { -@@ -381,8 +381,7 @@ void __init early_ioremap_init(void) - slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); - - pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); -- memset(bm_pte, 0, sizeof(bm_pte)); -- pmd_populate_kernel(&init_mm, pmd, bm_pte); -+ pmd_populate_user(&init_mm, pmd, bm_pte); - - /* - * The boot-ioremap range spans multiple pmds, for which -diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c -index d87dd6d..bf3fa66 100644 ---- a/arch/x86/mm/kmemcheck/kmemcheck.c -+++ b/arch/x86/mm/kmemcheck/kmemcheck.c -@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, - * memory (e.g. tracked pages)? For now, we need this to avoid - * invoking kmemcheck for PnP BIOS calls. - */ -- if (regs->flags & X86_VM_MASK) -+ if (v8086_mode(regs)) - return false; -- if (regs->cs != __KERNEL_CS) -+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS) - return false; - - pte = kmemcheck_pte_lookup(address); -diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c -index 1dab519..60a7e5f 100644 ---- a/arch/x86/mm/mmap.c -+++ b/arch/x86/mm/mmap.c -@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void) - * Leave an at least ~128 MB hole with possible stack randomization. - */ - #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size()) --#define MAX_GAP (TASK_SIZE/6*5) -+#define MAX_GAP (pax_task_size/6*5) - - /* - * True on X86_32 or when emulating IA32 on X86_64 -@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void) - return rnd << PAGE_SHIFT; - } - --static unsigned long mmap_base(void) -+static unsigned long mmap_base(struct mm_struct *mm) - { - unsigned long gap = rlimit(RLIMIT_STACK); -+ unsigned long pax_task_size = TASK_SIZE; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+#endif - - if (gap < MIN_GAP) - gap = MIN_GAP; - else if (gap > MAX_GAP) - gap = MAX_GAP; - -- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); -+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd()); - } - - /* - * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 - * does, but not when emulating X86_32 - */ --static unsigned long mmap_legacy_base(void) -+static unsigned long mmap_legacy_base(struct mm_struct *mm) - { -- if (mmap_is_ia32()) -+ if (mmap_is_ia32()) { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) -+ return SEGMEXEC_TASK_UNMAPPED_BASE; -+ else -+#endif -+ - return TASK_UNMAPPED_BASE; -- else -+ } else - return TASK_UNMAPPED_BASE + mmap_rnd(); - } - -@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void) - void arch_pick_mmap_layout(struct mm_struct *mm) - { - if (mmap_is_legacy()) { -- mm->mmap_base = mmap_legacy_base(); -+ mm->mmap_base = mmap_legacy_base(mm); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; - } else { -- mm->mmap_base = mmap_base(); -+ mm->mmap_base = mmap_base(mm); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; - } -diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c -index 67421f3..8d6b107 100644 ---- a/arch/x86/mm/mmio-mod.c -+++ b/arch/x86/mm/mmio-mod.c -@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs, - break; - default: - { -- unsigned char *ip = (unsigned char *)instptr; -+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr); - my_trace->opcode = MMIO_UNKNOWN_OP; - my_trace->width = 0; - my_trace->value = (*ip) << 16 | *(ip + 1) << 8 | -@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p, unsigned long condition, - static void ioremap_trace_core(resource_size_t offset, unsigned long size, - void __iomem *addr) - { -- static atomic_t next_id; -+ static atomic_unchecked_t next_id; - struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL); - /* These are page-unaligned. */ - struct mmiotrace_map map = { -@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size, - .private = trace - }, - .phys = offset, -- .id = atomic_inc_return(&next_id) -+ .id = atomic_inc_return_unchecked(&next_id) - }; - map.map_id = trace->id; - -diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c -index b008656..773eac2 100644 ---- a/arch/x86/mm/pageattr-test.c -+++ b/arch/x86/mm/pageattr-test.c -@@ -36,7 +36,7 @@ enum { - - static int pte_testbit(pte_t pte) - { -- return pte_flags(pte) & _PAGE_UNUSED1; -+ return pte_flags(pte) & _PAGE_CPA_TEST; - } - - struct split_state { -diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c -index f9e5267..6f6e27f 100644 ---- a/arch/x86/mm/pageattr.c -+++ b/arch/x86/mm/pageattr.c -@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, - */ - #ifdef CONFIG_PCI_BIOS - if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) -- pgprot_val(forbidden) |= _PAGE_NX; -+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; - #endif - - /* -@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, - * Does not cover __inittext since that is gone later on. On - * 64bit we do not enforce !NX on the low mapping - */ -- if (within(address, (unsigned long)_text, (unsigned long)_etext)) -- pgprot_val(forbidden) |= _PAGE_NX; -+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext))) -+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; - -+#ifdef CONFIG_DEBUG_RODATA - /* - * The .rodata section needs to be read-only. Using the pfn - * catches all aliases. -@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, - if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, - __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) - pgprot_val(forbidden) |= _PAGE_RW; -+#endif - - #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) - /* -@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, - } - #endif - -+#ifdef CONFIG_PAX_KERNEXEC -+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) { -+ pgprot_val(forbidden) |= _PAGE_RW; -+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; -+ } -+#endif -+ - prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); - - return prot; -@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address); - static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) - { - /* change init_mm */ -+ pax_open_kernel(); - set_pte_atomic(kpte, pte); -+ - #ifdef CONFIG_X86_32 - if (!SHARED_KERNEL_PMD) { -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ unsigned long cpu; -+#else - struct page *page; -+#endif - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ for (cpu = 0; cpu < NR_CPUS; ++cpu) { -+ pgd_t *pgd = get_cpu_pgd(cpu); -+#else - list_for_each_entry(page, &pgd_list, lru) { -- pgd_t *pgd; -+ pgd_t *pgd = (pgd_t *)page_address(page); -+#endif -+ - pud_t *pud; - pmd_t *pmd; - -- pgd = (pgd_t *)page_address(page) + pgd_index(address); -+ pgd += pgd_index(address); - pud = pud_offset(pgd, address); - pmd = pmd_offset(pud, address); - set_pte_atomic((pte_t *)pmd, pte); - } - } - #endif -+ pax_close_kernel(); - } - - static int -diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c -index f6ff57b..481690f 100644 ---- a/arch/x86/mm/pat.c -+++ b/arch/x86/mm/pat.c -@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end) - - if (!entry) { - printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", -- current->comm, current->pid, start, end); -+ current->comm, task_pid_nr(current), start, end); - return -EINVAL; - } - -@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) - while (cursor < to) { - if (!devmem_is_allowed(pfn)) { - printk(KERN_INFO -- "Program %s tried to access /dev/mem between %Lx->%Lx.\n", -- current->comm, from, to); -+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n", -+ current->comm, from, to, cursor); - return 0; - } - cursor += PAGE_SIZE; -@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) - printk(KERN_INFO - "%s:%d ioremap_change_attr failed %s " - "for %Lx-%Lx\n", -- current->comm, current->pid, -+ current->comm, task_pid_nr(current), - cattr_name(flags), - base, (unsigned long long)(base + size)); - return -EINVAL; -@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, - if (want_flags != flags) { - printk(KERN_WARNING - "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n", -- current->comm, current->pid, -+ current->comm, task_pid_nr(current), - cattr_name(want_flags), - (unsigned long long)paddr, - (unsigned long long)(paddr + size), -@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, - free_memtype(paddr, paddr + size); - printk(KERN_ERR "%s:%d map pfn expected mapping type %s" - " for %Lx-%Lx, got %s\n", -- current->comm, current->pid, -+ current->comm, task_pid_nr(current), - cattr_name(want_flags), - (unsigned long long)paddr, - (unsigned long long)(paddr + size), -diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c -index 9f0614d..92ae64a 100644 ---- a/arch/x86/mm/pf_in.c -+++ b/arch/x86/mm/pf_in.c -@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr) - int i; - enum reason_type rv = OTHERS; - -- p = (unsigned char *)ins_addr; -+ p = (unsigned char *)ktla_ktva(ins_addr); - p += skip_prefix(p, &prf); - p += get_opcode(p, &opcode); - -@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr) - struct prefix_bits prf; - int i; - -- p = (unsigned char *)ins_addr; -+ p = (unsigned char *)ktla_ktva(ins_addr); - p += skip_prefix(p, &prf); - p += get_opcode(p, &opcode); - -@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr) - struct prefix_bits prf; - int i; - -- p = (unsigned char *)ins_addr; -+ p = (unsigned char *)ktla_ktva(ins_addr); - p += skip_prefix(p, &prf); - p += get_opcode(p, &opcode); - -@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs) - struct prefix_bits prf; - int i; - -- p = (unsigned char *)ins_addr; -+ p = (unsigned char *)ktla_ktva(ins_addr); - p += skip_prefix(p, &prf); - p += get_opcode(p, &opcode); - for (i = 0; i < ARRAY_SIZE(reg_rop); i++) -@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr) - struct prefix_bits prf; - int i; - -- p = (unsigned char *)ins_addr; -+ p = (unsigned char *)ktla_ktva(ins_addr); - p += skip_prefix(p, &prf); - p += get_opcode(p, &opcode); - for (i = 0; i < ARRAY_SIZE(imm_wop); i++) -diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c -index 8573b83..6372501 100644 ---- a/arch/x86/mm/pgtable.c -+++ b/arch/x86/mm/pgtable.c -@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd) - list_del(&page->lru); - } - --#define UNSHARED_PTRS_PER_PGD \ -- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT; - -+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) -+{ -+ while (count--) -+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER); -+} -+#endif - -+#ifdef CONFIG_PAX_PER_CPU_PGD -+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count) -+{ -+ while (count--) -+ -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask); -+#else -+ *dst++ = *src++; -+#endif -+ -+} -+#endif -+ -+#ifdef CONFIG_X86_64 -+#define pxd_t pud_t -+#define pyd_t pgd_t -+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn) -+#define pxd_free(mm, pud) pud_free((mm), (pud)) -+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud)) -+#define pyd_offset(mm ,address) pgd_offset((mm), (address)) -+#define PYD_SIZE PGDIR_SIZE -+#else -+#define pxd_t pmd_t -+#define pyd_t pud_t -+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn) -+#define pxd_free(mm, pud) pmd_free((mm), (pud)) -+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud)) -+#define pyd_offset(mm ,address) pud_offset((mm), (address)) -+#define PYD_SIZE PUD_SIZE -+#endif -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {} -+static inline void pgd_dtor(pgd_t *pgd) {} -+#else - static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) - { - BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); -@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd) - pgd_list_del(pgd); - spin_unlock(&pgd_lock); - } -+#endif - - /* - * List of all pgd's needed for non-PAE so it can invalidate entries -@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd) - * -- wli - */ - --#ifdef CONFIG_X86_PAE -+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) - /* - * In PAE mode, we need to do a cr3 reload (=tlb flush) when - * updating the top-level pagetable entries to guarantee the -@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd) - * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate - * and initialize the kernel pmds here. - */ --#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD -+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) - - void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) - { -@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) - */ - flush_tlb_mm(mm); - } -+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD) -+#define PREALLOCATED_PXDS USER_PGD_PTRS - #else /* !CONFIG_X86_PAE */ - - /* No need to prepopulate any pagetable entries in non-PAE modes. */ --#define PREALLOCATED_PMDS 0 -+#define PREALLOCATED_PXDS 0 - - #endif /* CONFIG_X86_PAE */ - --static void free_pmds(pmd_t *pmds[]) -+static void free_pxds(pxd_t *pxds[]) - { - int i; - -- for(i = 0; i < PREALLOCATED_PMDS; i++) -- if (pmds[i]) -- free_page((unsigned long)pmds[i]); -+ for(i = 0; i < PREALLOCATED_PXDS; i++) -+ if (pxds[i]) -+ free_page((unsigned long)pxds[i]); - } - --static int preallocate_pmds(pmd_t *pmds[]) -+static int preallocate_pxds(pxd_t *pxds[]) - { - int i; - bool failed = false; - -- for(i = 0; i < PREALLOCATED_PMDS; i++) { -- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); -- if (pmd == NULL) -+ for(i = 0; i < PREALLOCATED_PXDS; i++) { -+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP); -+ if (pxd == NULL) - failed = true; -- pmds[i] = pmd; -+ pxds[i] = pxd; - } - - if (failed) { -- free_pmds(pmds); -+ free_pxds(pxds); - return -ENOMEM; - } - -@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[]) - * preallocate which never got a corresponding vma will need to be - * freed manually. - */ --static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) -+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp) - { - int i; - -- for(i = 0; i < PREALLOCATED_PMDS; i++) { -+ for(i = 0; i < PREALLOCATED_PXDS; i++) { - pgd_t pgd = pgdp[i]; - - if (pgd_val(pgd) != 0) { -- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); -+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd); - -- pgdp[i] = native_make_pgd(0); -+ set_pgd(pgdp + i, native_make_pgd(0)); - -- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); -- pmd_free(mm, pmd); -+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT); -+ pxd_free(mm, pxd); - } - } - } - --static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) -+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[]) - { -- pud_t *pud; -+ pyd_t *pyd; - unsigned long addr; - int i; - -- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ -+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */ - return; - -- pud = pud_offset(pgd, 0); -+#ifdef CONFIG_X86_64 -+ pyd = pyd_offset(mm, 0L); -+#else -+ pyd = pyd_offset(pgd, 0L); -+#endif - -- for (addr = i = 0; i < PREALLOCATED_PMDS; -- i++, pud++, addr += PUD_SIZE) { -- pmd_t *pmd = pmds[i]; -+ for (addr = i = 0; i < PREALLOCATED_PXDS; -+ i++, pyd++, addr += PYD_SIZE) { -+ pxd_t *pxd = pxds[i]; - - if (i >= KERNEL_PGD_BOUNDARY) -- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), -- sizeof(pmd_t) * PTRS_PER_PMD); -+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]), -+ sizeof(pxd_t) * PTRS_PER_PMD); - -- pud_populate(mm, pud, pmd); -+ pyd_populate(mm, pyd, pxd); - } - } - - pgd_t *pgd_alloc(struct mm_struct *mm) - { - pgd_t *pgd; -- pmd_t *pmds[PREALLOCATED_PMDS]; -+ pxd_t *pxds[PREALLOCATED_PXDS]; - - pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); - -@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) - - mm->pgd = pgd; - -- if (preallocate_pmds(pmds) != 0) -+ if (preallocate_pxds(pxds) != 0) - goto out_free_pgd; - - if (paravirt_pgd_alloc(mm) != 0) -- goto out_free_pmds; -+ goto out_free_pxds; - - /* - * Make sure that pre-populating the pmds is atomic with -@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) - spin_lock(&pgd_lock); - - pgd_ctor(mm, pgd); -- pgd_prepopulate_pmd(mm, pgd, pmds); -+ pgd_prepopulate_pxd(mm, pgd, pxds); - - spin_unlock(&pgd_lock); - - return pgd; - --out_free_pmds: -- free_pmds(pmds); -+out_free_pxds: -+ free_pxds(pxds); - out_free_pgd: - free_page((unsigned long)pgd); - out: -@@ -295,7 +344,7 @@ out: - - void pgd_free(struct mm_struct *mm, pgd_t *pgd) - { -- pgd_mop_up_pmds(mm, pgd); -+ pgd_mop_up_pxds(mm, pgd); - pgd_dtor(pgd); - paravirt_pgd_free(mm, pgd); - free_page((unsigned long)pgd); -diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c -index cac7184..09a39fa 100644 ---- a/arch/x86/mm/pgtable_32.c -+++ b/arch/x86/mm/pgtable_32.c -@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) - return; - } - pte = pte_offset_kernel(pmd, vaddr); -+ -+ pax_open_kernel(); - if (pte_val(pteval)) - set_pte_at(&init_mm, vaddr, pte, pteval); - else - pte_clear(&init_mm, vaddr, pte); -+ pax_close_kernel(); - - /* - * It's enough to flush this one mapping. -diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c -index 410531d..0f16030 100644 ---- a/arch/x86/mm/setup_nx.c -+++ b/arch/x86/mm/setup_nx.c -@@ -5,8 +5,10 @@ - #include <asm/pgtable.h> - #include <asm/proto.h> - -+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) - static int disable_nx __cpuinitdata; - -+#ifndef CONFIG_PAX_PAGEEXEC - /* - * noexec = on|off - * -@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str) - return 0; - } - early_param("noexec", noexec_setup); -+#endif -+ -+#endif - - void __cpuinit x86_configure_nx(void) - { -+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) - if (cpu_has_nx && !disable_nx) - __supported_pte_mask |= _PAGE_NX; - else -+#endif - __supported_pte_mask &= ~_PAGE_NX; - } - -diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c -index d6c0418..06a0ad5 100644 ---- a/arch/x86/mm/tlb.c -+++ b/arch/x86/mm/tlb.c -@@ -65,7 +65,11 @@ void leave_mm(int cpu) - BUG(); - cpumask_clear_cpu(cpu, - mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); -+ -+#ifndef CONFIG_PAX_PER_CPU_PGD - load_cr3(swapper_pg_dir); -+#endif -+ - } - EXPORT_SYMBOL_GPL(leave_mm); - -diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S -index 6687022..ceabcfa 100644 ---- a/arch/x86/net/bpf_jit.S -+++ b/arch/x86/net/bpf_jit.S -@@ -9,6 +9,7 @@ - */ - #include <linux/linkage.h> - #include <asm/dwarf2.h> -+#include <asm/alternative-asm.h> - - /* - * Calling convention : -@@ -35,6 +36,7 @@ sk_load_word: - jle bpf_slow_path_word - mov (SKBDATA,%rsi),%eax - bswap %eax /* ntohl() */ -+ pax_force_retaddr - ret - - -@@ -53,6 +55,7 @@ sk_load_half: - jle bpf_slow_path_half - movzwl (SKBDATA,%rsi),%eax - rol $8,%ax # ntohs() -+ pax_force_retaddr - ret - - sk_load_byte_ind: -@@ -66,6 +69,7 @@ sk_load_byte: - cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ - jle bpf_slow_path_byte - movzbl (SKBDATA,%rsi),%eax -+ pax_force_retaddr - ret - - /** -@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh) - movzbl (SKBDATA,%rsi),%ebx - and $15,%bl - shl $2,%bl -+ pax_force_retaddr - ret - CFI_ENDPROC - ENDPROC(sk_load_byte_msh) -@@ -91,6 +96,7 @@ bpf_error: - xor %eax,%eax - mov -8(%rbp),%rbx - leaveq -+ pax_force_retaddr - ret - - /* rsi contains offset and can be scratched */ -@@ -113,6 +119,7 @@ bpf_slow_path_word: - js bpf_error - mov -12(%rbp),%eax - bswap %eax -+ pax_force_retaddr - ret - - bpf_slow_path_half: -@@ -121,12 +128,14 @@ bpf_slow_path_half: - mov -12(%rbp),%ax - rol $8,%ax - movzwl %ax,%eax -+ pax_force_retaddr - ret - - bpf_slow_path_byte: - bpf_slow_path_common(1) - js bpf_error - movzbl -12(%rbp),%eax -+ pax_force_retaddr - ret - - bpf_slow_path_byte_msh: -@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh: - and $15,%al - shl $2,%al - xchg %eax,%ebx -+ pax_force_retaddr - ret -diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c -index bfab3fa..05aac3a 100644 ---- a/arch/x86/net/bpf_jit_comp.c -+++ b/arch/x86/net/bpf_jit_comp.c -@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end) - set_fs(old_fs); - } - -+struct bpf_jit_work { -+ struct work_struct work; -+ void *image; -+}; - - void bpf_jit_compile(struct sk_filter *fp) - { -@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp) - if (addrs == NULL) - return; - -+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL); -+ if (!fp->work) -+ goto out; -+ - /* Before first pass, make a rough estimation of addrs[] - * each bpf instruction is translated to less than 64 bytes - */ -@@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; - if (image) { - if (unlikely(proglen + ilen > oldproglen)) { - pr_err("bpb_jit_compile fatal error\n"); -- kfree(addrs); -- module_free(NULL, image); -- return; -+ module_free_exec(NULL, image); -+ goto out; - } -+ pax_open_kernel(); - memcpy(image + proglen, temp, ilen); -+ pax_close_kernel(); - } - proglen += ilen; - addrs[i] = proglen; -@@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; - break; - } - if (proglen == oldproglen) { -- image = module_alloc(max_t(unsigned int, -+ image = module_alloc_exec(max_t(unsigned int, - proglen, - sizeof(struct work_struct))); - if (!image) -@@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; - fp->bpf_func = (void *)image; - } - out: -+ kfree(fp->work); - kfree(addrs); - return; - } - - static void jit_free_defer(struct work_struct *arg) - { -- module_free(NULL, arg); -+ module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image); -+ kfree(arg); - } - - /* run from softirq, we must use a work_struct to call -- * module_free() from process context -+ * module_free_exec() from process context - */ - void bpf_jit_free(struct sk_filter *fp) - { - if (fp->bpf_func != sk_run_filter) { -- struct work_struct *work = (struct work_struct *)fp->bpf_func; -+ struct work_struct *work = &fp->work->work; - - INIT_WORK(work, jit_free_defer); -+ fp->work->image = fp->bpf_func; - schedule_work(work); - } - } -diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c -index bff89df..377758a 100644 ---- a/arch/x86/oprofile/backtrace.c -+++ b/arch/x86/oprofile/backtrace.c -@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head) - struct stack_frame_ia32 *fp; - unsigned long bytes; - -- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); -+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead)); - if (bytes != sizeof(bufhead)) - return NULL; - -- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); -+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame); - - oprofile_add_trace(bufhead[0].return_address); - -@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head) - struct stack_frame bufhead[2]; - unsigned long bytes; - -- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); -+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead)); - if (bytes != sizeof(bufhead)) - return NULL; - -@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth) - { - struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); - -- if (!user_mode_vm(regs)) { -+ if (!user_mode(regs)) { - unsigned long stack = kernel_stack_pointer(regs); - if (depth) - dump_trace(NULL, regs, (unsigned long *)stack, 0, -diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c -index cb29191..036766d 100644 ---- a/arch/x86/pci/mrst.c -+++ b/arch/x86/pci/mrst.c -@@ -234,7 +234,9 @@ int __init pci_mrst_init(void) - printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n"); - pci_mmcfg_late_init(); - pcibios_enable_irq = mrst_pci_irq_enable; -- pci_root_ops = pci_mrst_ops; -+ pax_open_kernel(); -+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops)); -+ pax_close_kernel(); - /* Continue with standard init */ - return 1; - } -diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c -index f685535..2b76a81 100644 ---- a/arch/x86/pci/pcbios.c -+++ b/arch/x86/pci/pcbios.c -@@ -79,50 +79,93 @@ union bios32 { - static struct { - unsigned long address; - unsigned short segment; --} bios32_indirect = { 0, __KERNEL_CS }; -+} bios32_indirect __read_only = { 0, __PCIBIOS_CS }; - - /* - * Returns the entry point for the given service, NULL on error - */ - --static unsigned long bios32_service(unsigned long service) -+static unsigned long __devinit bios32_service(unsigned long service) - { - unsigned char return_code; /* %al */ - unsigned long address; /* %ebx */ - unsigned long length; /* %ecx */ - unsigned long entry; /* %edx */ - unsigned long flags; -+ struct desc_struct d, *gdt; - - local_irq_save(flags); -- __asm__("lcall *(%%edi); cld" -+ -+ gdt = get_cpu_gdt_table(smp_processor_id()); -+ -+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC); -+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); -+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC); -+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); -+ -+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld" - : "=a" (return_code), - "=b" (address), - "=c" (length), - "=d" (entry) - : "0" (service), - "1" (0), -- "D" (&bios32_indirect)); -+ "D" (&bios32_indirect), -+ "r"(__PCIBIOS_DS) -+ : "memory"); -+ -+ pax_open_kernel(); -+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0; -+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0; -+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0; -+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0; -+ pax_close_kernel(); -+ - local_irq_restore(flags); - - switch (return_code) { -- case 0: -- return address + entry; -- case 0x80: /* Not present */ -- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); -- return 0; -- default: /* Shouldn't happen */ -- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", -- service, return_code); -+ case 0: { -+ int cpu; -+ unsigned char flags; -+ -+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry); -+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) { -+ printk(KERN_WARNING "bios32_service: not valid\n"); - return 0; -+ } -+ address = address + PAGE_OFFSET; -+ length += 16UL; /* some BIOSs underreport this... */ -+ flags = 4; -+ if (length >= 64*1024*1024) { -+ length >>= PAGE_SHIFT; -+ flags |= 8; -+ } -+ -+ for (cpu = 0; cpu < NR_CPUS; cpu++) { -+ gdt = get_cpu_gdt_table(cpu); -+ pack_descriptor(&d, address, length, 0x9b, flags); -+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); -+ pack_descriptor(&d, address, length, 0x93, flags); -+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); -+ } -+ return entry; -+ } -+ case 0x80: /* Not present */ -+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); -+ return 0; -+ default: /* Shouldn't happen */ -+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", -+ service, return_code); -+ return 0; - } - } - - static struct { - unsigned long address; - unsigned short segment; --} pci_indirect = { 0, __KERNEL_CS }; -+} pci_indirect __read_only = { 0, __PCIBIOS_CS }; - --static int pci_bios_present; -+static int pci_bios_present __read_only; - - static int __devinit check_pcibios(void) - { -@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void) - unsigned long flags, pcibios_entry; - - if ((pcibios_entry = bios32_service(PCI_SERVICE))) { -- pci_indirect.address = pcibios_entry + PAGE_OFFSET; -+ pci_indirect.address = pcibios_entry; - - local_irq_save(flags); -- __asm__( -- "lcall *(%%edi); cld\n\t" -+ __asm__("movw %w6, %%ds\n\t" -+ "lcall *%%ss:(%%edi); cld\n\t" -+ "push %%ss\n\t" -+ "pop %%ds\n\t" - "jc 1f\n\t" - "xor %%ah, %%ah\n" - "1:" -@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void) - "=b" (ebx), - "=c" (ecx) - : "1" (PCIBIOS_PCI_BIOS_PRESENT), -- "D" (&pci_indirect) -+ "D" (&pci_indirect), -+ "r" (__PCIBIOS_DS) - : "memory"); - local_irq_restore(flags); - -@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, - - switch (len) { - case 1: -- __asm__("lcall *(%%esi); cld\n\t" -+ __asm__("movw %w6, %%ds\n\t" -+ "lcall *%%ss:(%%esi); cld\n\t" -+ "push %%ss\n\t" -+ "pop %%ds\n\t" - "jc 1f\n\t" - "xor %%ah, %%ah\n" - "1:" -@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, - : "1" (PCIBIOS_READ_CONFIG_BYTE), - "b" (bx), - "D" ((long)reg), -- "S" (&pci_indirect)); -+ "S" (&pci_indirect), -+ "r" (__PCIBIOS_DS)); - /* - * Zero-extend the result beyond 8 bits, do not trust the - * BIOS having done it: -@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, - *value &= 0xff; - break; - case 2: -- __asm__("lcall *(%%esi); cld\n\t" -+ __asm__("movw %w6, %%ds\n\t" -+ "lcall *%%ss:(%%esi); cld\n\t" -+ "push %%ss\n\t" -+ "pop %%ds\n\t" - "jc 1f\n\t" - "xor %%ah, %%ah\n" - "1:" -@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, - : "1" (PCIBIOS_READ_CONFIG_WORD), - "b" (bx), - "D" ((long)reg), -- "S" (&pci_indirect)); -+ "S" (&pci_indirect), -+ "r" (__PCIBIOS_DS)); - /* - * Zero-extend the result beyond 16 bits, do not trust the - * BIOS having done it: -@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, - *value &= 0xffff; - break; - case 4: -- __asm__("lcall *(%%esi); cld\n\t" -+ __asm__("movw %w6, %%ds\n\t" -+ "lcall *%%ss:(%%esi); cld\n\t" -+ "push %%ss\n\t" -+ "pop %%ds\n\t" - "jc 1f\n\t" - "xor %%ah, %%ah\n" - "1:" -@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, - : "1" (PCIBIOS_READ_CONFIG_DWORD), - "b" (bx), - "D" ((long)reg), -- "S" (&pci_indirect)); -+ "S" (&pci_indirect), -+ "r" (__PCIBIOS_DS)); - break; - } - -@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, - - switch (len) { - case 1: -- __asm__("lcall *(%%esi); cld\n\t" -+ __asm__("movw %w6, %%ds\n\t" -+ "lcall *%%ss:(%%esi); cld\n\t" -+ "push %%ss\n\t" -+ "pop %%ds\n\t" - "jc 1f\n\t" - "xor %%ah, %%ah\n" - "1:" -@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, - "c" (value), - "b" (bx), - "D" ((long)reg), -- "S" (&pci_indirect)); -+ "S" (&pci_indirect), -+ "r" (__PCIBIOS_DS)); - break; - case 2: -- __asm__("lcall *(%%esi); cld\n\t" -+ __asm__("movw %w6, %%ds\n\t" -+ "lcall *%%ss:(%%esi); cld\n\t" -+ "push %%ss\n\t" -+ "pop %%ds\n\t" - "jc 1f\n\t" - "xor %%ah, %%ah\n" - "1:" -@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, - "c" (value), - "b" (bx), - "D" ((long)reg), -- "S" (&pci_indirect)); -+ "S" (&pci_indirect), -+ "r" (__PCIBIOS_DS)); - break; - case 4: -- __asm__("lcall *(%%esi); cld\n\t" -+ __asm__("movw %w6, %%ds\n\t" -+ "lcall *%%ss:(%%esi); cld\n\t" -+ "push %%ss\n\t" -+ "pop %%ds\n\t" - "jc 1f\n\t" - "xor %%ah, %%ah\n" - "1:" -@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, - "c" (value), - "b" (bx), - "D" ((long)reg), -- "S" (&pci_indirect)); -+ "S" (&pci_indirect), -+ "r" (__PCIBIOS_DS)); - break; - } - -@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void) - - DBG("PCI: Fetching IRQ routing table... "); - __asm__("push %%es\n\t" -+ "movw %w8, %%ds\n\t" - "push %%ds\n\t" - "pop %%es\n\t" -- "lcall *(%%esi); cld\n\t" -+ "lcall *%%ss:(%%esi); cld\n\t" - "pop %%es\n\t" -+ "push %%ss\n\t" -+ "pop %%ds\n" - "jc 1f\n\t" - "xor %%ah, %%ah\n" - "1:" -@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void) - "1" (0), - "D" ((long) &opt), - "S" (&pci_indirect), -- "m" (opt) -+ "m" (opt), -+ "r" (__PCIBIOS_DS) - : "memory"); - DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map); - if (ret & 0xff00) -@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) - { - int ret; - -- __asm__("lcall *(%%esi); cld\n\t" -+ __asm__("movw %w5, %%ds\n\t" -+ "lcall *%%ss:(%%esi); cld\n\t" -+ "push %%ss\n\t" -+ "pop %%ds\n" - "jc 1f\n\t" - "xor %%ah, %%ah\n" - "1:" -@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) - : "0" (PCIBIOS_SET_PCI_HW_INT), - "b" ((dev->bus->number << 8) | dev->devfn), - "c" ((irq << 8) | (pin + 10)), -- "S" (&pci_indirect)); -+ "S" (&pci_indirect), -+ "r" (__PCIBIOS_DS)); - return !(ret & 0xff00); - } - EXPORT_SYMBOL(pcibios_set_irq_routing); -diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c -index 5cab48e..b025f9b 100644 ---- a/arch/x86/platform/efi/efi_32.c -+++ b/arch/x86/platform/efi/efi_32.c -@@ -38,70 +38,56 @@ - */ - - static unsigned long efi_rt_eflags; --static pgd_t efi_bak_pg_dir_pointer[2]; -+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS]; - --void efi_call_phys_prelog(void) -+void __init efi_call_phys_prelog(void) - { -- unsigned long cr4; -- unsigned long temp; - struct desc_ptr gdt_descr; - -+#ifdef CONFIG_PAX_KERNEXEC -+ struct desc_struct d; -+#endif -+ - local_irq_save(efi_rt_eflags); - -- /* -- * If I don't have PAE, I should just duplicate two entries in page -- * directory. If I have PAE, I just need to duplicate one entry in -- * page directory. -- */ -- cr4 = read_cr4_safe(); -- -- if (cr4 & X86_CR4_PAE) { -- efi_bak_pg_dir_pointer[0].pgd = -- swapper_pg_dir[pgd_index(0)].pgd; -- swapper_pg_dir[0].pgd = -- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; -- } else { -- efi_bak_pg_dir_pointer[0].pgd = -- swapper_pg_dir[pgd_index(0)].pgd; -- efi_bak_pg_dir_pointer[1].pgd = -- swapper_pg_dir[pgd_index(0x400000)].pgd; -- swapper_pg_dir[pgd_index(0)].pgd = -- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; -- temp = PAGE_OFFSET + 0x400000; -- swapper_pg_dir[pgd_index(0x400000)].pgd = -- swapper_pg_dir[pgd_index(temp)].pgd; -- } -+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS); -+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, -+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); - - /* - * After the lock is released, the original page table is restored. - */ - __flush_tlb_all(); - -+#ifdef CONFIG_PAX_KERNEXEC -+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC); -+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S); -+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC); -+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S); -+#endif -+ - gdt_descr.address = __pa(get_cpu_gdt_table(0)); - gdt_descr.size = GDT_SIZE - 1; - load_gdt(&gdt_descr); - } - --void efi_call_phys_epilog(void) -+void __init efi_call_phys_epilog(void) - { -- unsigned long cr4; - struct desc_ptr gdt_descr; - -+#ifdef CONFIG_PAX_KERNEXEC -+ struct desc_struct d; -+ -+ memset(&d, 0, sizeof d); -+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S); -+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S); -+#endif -+ - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); - gdt_descr.size = GDT_SIZE - 1; - load_gdt(&gdt_descr); - -- cr4 = read_cr4_safe(); -- -- if (cr4 & X86_CR4_PAE) { -- swapper_pg_dir[pgd_index(0)].pgd = -- efi_bak_pg_dir_pointer[0].pgd; -- } else { -- swapper_pg_dir[pgd_index(0)].pgd = -- efi_bak_pg_dir_pointer[0].pgd; -- swapper_pg_dir[pgd_index(0x400000)].pgd = -- efi_bak_pg_dir_pointer[1].pgd; -- } -+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS); - - /* - * After the lock is released, the original page table is restored. -diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S -index fbe66e6..c5c0dd2 100644 ---- a/arch/x86/platform/efi/efi_stub_32.S -+++ b/arch/x86/platform/efi/efi_stub_32.S -@@ -6,7 +6,9 @@ - */ - - #include <linux/linkage.h> -+#include <linux/init.h> - #include <asm/page_types.h> -+#include <asm/segment.h> - - /* - * efi_call_phys(void *, ...) is a function with variable parameters. -@@ -20,7 +22,7 @@ - * service functions will comply with gcc calling convention, too. - */ - --.text -+__INIT - ENTRY(efi_call_phys) - /* - * 0. The function can only be called in Linux kernel. So CS has been -@@ -36,9 +38,11 @@ ENTRY(efi_call_phys) - * The mapping of lower virtual memory has been created in prelog and - * epilog. - */ -- movl $1f, %edx -- subl $__PAGE_OFFSET, %edx -- jmp *%edx -+ movl $(__KERNEXEC_EFI_DS), %edx -+ mov %edx, %ds -+ mov %edx, %es -+ mov %edx, %ss -+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET - 1: - - /* -@@ -47,14 +51,8 @@ ENTRY(efi_call_phys) - * parameter 2, ..., param n. To make things easy, we save the return - * address of efi_call_phys in a global variable. - */ -- popl %edx -- movl %edx, saved_return_addr -- /* get the function pointer into ECX*/ -- popl %ecx -- movl %ecx, efi_rt_function_ptr -- movl $2f, %edx -- subl $__PAGE_OFFSET, %edx -- pushl %edx -+ popl (saved_return_addr) -+ popl (efi_rt_function_ptr) - - /* - * 3. Clear PG bit in %CR0. -@@ -73,9 +71,8 @@ ENTRY(efi_call_phys) - /* - * 5. Call the physical function. - */ -- jmp *%ecx -+ call *(efi_rt_function_ptr-__PAGE_OFFSET) - --2: - /* - * 6. After EFI runtime service returns, control will return to - * following instruction. We'd better readjust stack pointer first. -@@ -88,35 +85,32 @@ ENTRY(efi_call_phys) - movl %cr0, %edx - orl $0x80000000, %edx - movl %edx, %cr0 -- jmp 1f --1: -+ - /* - * 8. Now restore the virtual mode from flat mode by - * adding EIP with PAGE_OFFSET. - */ -- movl $1f, %edx -- jmp *%edx -+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET - 1: -+ movl $(__KERNEL_DS), %edx -+ mov %edx, %ds -+ mov %edx, %es -+ mov %edx, %ss - - /* - * 9. Balance the stack. And because EAX contain the return value, - * we'd better not clobber it. - */ -- leal efi_rt_function_ptr, %edx -- movl (%edx), %ecx -- pushl %ecx -+ pushl (efi_rt_function_ptr) - - /* -- * 10. Push the saved return address onto the stack and return. -+ * 10. Return to the saved return address. - */ -- leal saved_return_addr, %edx -- movl (%edx), %ecx -- pushl %ecx -- ret -+ jmpl *(saved_return_addr) - ENDPROC(efi_call_phys) - .previous - --.data -+__INITDATA - saved_return_addr: - .long 0 - efi_rt_function_ptr: -diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S -index 4c07cca..2c8427d 100644 ---- a/arch/x86/platform/efi/efi_stub_64.S -+++ b/arch/x86/platform/efi/efi_stub_64.S -@@ -7,6 +7,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - #define SAVE_XMM \ - mov %rsp, %rax; \ -@@ -40,6 +41,7 @@ ENTRY(efi_call0) - call *%rdi - addq $32, %rsp - RESTORE_XMM -+ pax_force_retaddr 0, 1 - ret - ENDPROC(efi_call0) - -@@ -50,6 +52,7 @@ ENTRY(efi_call1) - call *%rdi - addq $32, %rsp - RESTORE_XMM -+ pax_force_retaddr 0, 1 - ret - ENDPROC(efi_call1) - -@@ -60,6 +63,7 @@ ENTRY(efi_call2) - call *%rdi - addq $32, %rsp - RESTORE_XMM -+ pax_force_retaddr 0, 1 - ret - ENDPROC(efi_call2) - -@@ -71,6 +75,7 @@ ENTRY(efi_call3) - call *%rdi - addq $32, %rsp - RESTORE_XMM -+ pax_force_retaddr 0, 1 - ret - ENDPROC(efi_call3) - -@@ -83,6 +88,7 @@ ENTRY(efi_call4) - call *%rdi - addq $32, %rsp - RESTORE_XMM -+ pax_force_retaddr 0, 1 - ret - ENDPROC(efi_call4) - -@@ -96,6 +102,7 @@ ENTRY(efi_call5) - call *%rdi - addq $48, %rsp - RESTORE_XMM -+ pax_force_retaddr 0, 1 - ret - ENDPROC(efi_call5) - -@@ -112,5 +119,6 @@ ENTRY(efi_call6) - call *%rdi - addq $48, %rsp - RESTORE_XMM -+ pax_force_retaddr 0, 1 - ret - ENDPROC(efi_call6) -diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c -index fe73276..70fe25a 100644 ---- a/arch/x86/platform/mrst/mrst.c -+++ b/arch/x86/platform/mrst/mrst.c -@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void) - } - - /* Reboot and power off are handled by the SCU on a MID device */ --static void mrst_power_off(void) -+static __noreturn void mrst_power_off(void) - { - intel_scu_ipc_simple_command(0xf1, 1); -+ BUG(); - } - --static void mrst_reboot(void) -+static __noreturn void mrst_reboot(void) - { - intel_scu_ipc_simple_command(0xf1, 0); -+ BUG(); - } - - /* -diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c -index 5b55219..b326540 100644 ---- a/arch/x86/platform/uv/tlb_uv.c -+++ b/arch/x86/platform/uv/tlb_uv.c -@@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) - struct bau_control *smaster = bcp->socket_master; - struct reset_args reset_args; - -+ pax_track_stack(); -+ - reset_args.sender = sender; - cpus_clear(*mask); - /* find a single cpu for each uvhub in this distribution mask */ -diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c -index 87bb35e..eff2da8 100644 ---- a/arch/x86/power/cpu.c -+++ b/arch/x86/power/cpu.c -@@ -130,7 +130,7 @@ static void do_fpu_end(void) - static void fix_processor_context(void) - { - int cpu = smp_processor_id(); -- struct tss_struct *t = &per_cpu(init_tss, cpu); -+ struct tss_struct *t = init_tss + cpu; - - set_tss_desc(cpu, t); /* - * This just modifies memory; should not be -@@ -140,7 +140,9 @@ static void fix_processor_context(void) - */ - - #ifdef CONFIG_X86_64 -+ pax_open_kernel(); - get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; -+ pax_close_kernel(); - - syscall_init(); /* This sets MSR_*STAR and related */ - #endif -diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile -index 5d17950..2253fc9 100644 ---- a/arch/x86/vdso/Makefile -+++ b/arch/x86/vdso/Makefile -@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@ - -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ - sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' - --VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) -+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) - GCOV_PROFILE := n - - # -diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c -index 468d591..8e80a0a 100644 ---- a/arch/x86/vdso/vdso32-setup.c -+++ b/arch/x86/vdso/vdso32-setup.c -@@ -25,6 +25,7 @@ - #include <asm/tlbflush.h> - #include <asm/vdso.h> - #include <asm/proto.h> -+#include <asm/mman.h> - - enum { - VDSO_DISABLED = 0, -@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map) - void enable_sep_cpu(void) - { - int cpu = get_cpu(); -- struct tss_struct *tss = &per_cpu(init_tss, cpu); -+ struct tss_struct *tss = init_tss + cpu; - - if (!boot_cpu_has(X86_FEATURE_SEP)) { - put_cpu(); -@@ -249,7 +250,7 @@ static int __init gate_vma_init(void) - gate_vma.vm_start = FIXADDR_USER_START; - gate_vma.vm_end = FIXADDR_USER_END; - gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; -- gate_vma.vm_page_prot = __P101; -+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); - /* - * Make sure the vDSO gets into every core dump. - * Dumping its contents makes post-mortem fully interpretable later -@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) - if (compat) - addr = VDSO_HIGH_BASE; - else { -- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); -+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE); - if (IS_ERR_VALUE(addr)) { - ret = addr; - goto up_fail; - } - } - -- current->mm->context.vdso = (void *)addr; -+ current->mm->context.vdso = addr; - - if (compat_uses_vma || !compat) { - /* -@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) - } - - current_thread_info()->sysenter_return = -- VDSO32_SYMBOL(addr, SYSENTER_RETURN); -+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN); - - up_fail: - if (ret) -- current->mm->context.vdso = NULL; -+ current->mm->context.vdso = 0; - - up_write(&mm->mmap_sem); - -@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init); - - const char *arch_vma_name(struct vm_area_struct *vma) - { -- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) -+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) - return "[vdso]"; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso) -+ return "[vdso]"; -+#endif -+ - return NULL; - } - -@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm) - * Check to see if the corresponding task was created in compat vdso - * mode. - */ -- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) -+ if (mm && mm->context.vdso == VDSO_HIGH_BASE) - return &gate_vma; - return NULL; - } -diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c -index 316fbca..4638633 100644 ---- a/arch/x86/vdso/vma.c -+++ b/arch/x86/vdso/vma.c -@@ -16,8 +16,6 @@ - #include <asm/vdso.h> - #include <asm/page.h> - --unsigned int __read_mostly vdso_enabled = 1; -- - extern char vdso_start[], vdso_end[]; - extern unsigned short vdso_sync_cpuid; - -@@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) - int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) - { - struct mm_struct *mm = current->mm; -- unsigned long addr; -+ unsigned long addr = 0; - int ret; - -- if (!vdso_enabled) -- return 0; -- - down_write(&mm->mmap_sem); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - addr = vdso_addr(mm->start_stack, vdso_size); - addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0); - if (IS_ERR_VALUE(addr)) { -@@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) - goto up_fail; - } - -- current->mm->context.vdso = (void *)addr; -+ mm->context.vdso = addr; - - ret = install_special_mapping(mm, addr, vdso_size, - VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| - VM_ALWAYSDUMP, - vdso_pages); -- if (ret) { -- current->mm->context.vdso = NULL; -- goto up_fail; -- } -+ -+ if (ret) -+ mm->context.vdso = 0; - - up_fail: - up_write(&mm->mmap_sem); - return ret; - } -- --static __init int vdso_setup(char *s) --{ -- vdso_enabled = simple_strtoul(s, NULL, 0); -- return 0; --} --__setup("vdso=", vdso_setup); -diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c -index 46c8069..6330d3c 100644 ---- a/arch/x86/xen/enlighten.c -+++ b/arch/x86/xen/enlighten.c -@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); - - struct shared_info xen_dummy_shared_info; - --void *xen_initial_gdt; -- - RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); - __read_mostly int xen_have_vector_callback; - EXPORT_SYMBOL_GPL(xen_have_vector_callback); -@@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = { - #endif - }; - --static void xen_reboot(int reason) -+static __noreturn void xen_reboot(int reason) - { - struct sched_shutdown r = { .reason = reason }; - -@@ -1036,17 +1034,17 @@ static void xen_reboot(int reason) - BUG(); - } - --static void xen_restart(char *msg) -+static __noreturn void xen_restart(char *msg) - { - xen_reboot(SHUTDOWN_reboot); - } - --static void xen_emergency_restart(void) -+static __noreturn void xen_emergency_restart(void) - { - xen_reboot(SHUTDOWN_reboot); - } - --static void xen_machine_halt(void) -+static __noreturn void xen_machine_halt(void) - { - xen_reboot(SHUTDOWN_poweroff); - } -@@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(void) - __userpte_alloc_gfp &= ~__GFP_HIGHMEM; - - /* Work out if we support NX */ -- x86_configure_nx(); -+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) -+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 && -+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) { -+ unsigned l, h; -+ -+ __supported_pte_mask |= _PAGE_NX; -+ rdmsr(MSR_EFER, l, h); -+ l |= EFER_NX; -+ wrmsr(MSR_EFER, l, h); -+ } -+#endif - - xen_setup_features(); - -@@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(void) - - machine_ops = xen_machine_ops; - -- /* -- * The only reliable way to retain the initial address of the -- * percpu gdt_page is to remember it here, so we can go and -- * mark it RW later, when the initial percpu area is freed. -- */ -- xen_initial_gdt = &per_cpu(gdt_page, 0); -- - xen_smp_init(); - - #ifdef CONFIG_ACPI_NUMA -diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c -index 3dd53f9..5aa5df3 100644 ---- a/arch/x86/xen/mmu.c -+++ b/arch/x86/xen/mmu.c -@@ -1768,6 +1768,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, - convert_pfn_mfn(init_level4_pgt); - convert_pfn_mfn(level3_ident_pgt); - convert_pfn_mfn(level3_kernel_pgt); -+ convert_pfn_mfn(level3_vmalloc_start_pgt); -+ convert_pfn_mfn(level3_vmalloc_end_pgt); -+ convert_pfn_mfn(level3_vmemmap_pgt); - - l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); - l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); -@@ -1786,7 +1789,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, - set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); - set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); - set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); -+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO); -+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO); -+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO); - set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); -+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO); - set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); - set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); - -@@ -2000,6 +2007,7 @@ static void __init xen_post_allocator_init(void) - pv_mmu_ops.set_pud = xen_set_pud; - #if PAGETABLE_LEVELS == 4 - pv_mmu_ops.set_pgd = xen_set_pgd; -+ pv_mmu_ops.set_pgd_batched = xen_set_pgd; - #endif - - /* This will work as long as patching hasn't happened yet -@@ -2081,6 +2089,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { - .pud_val = PV_CALLEE_SAVE(xen_pud_val), - .make_pud = PV_CALLEE_SAVE(xen_make_pud), - .set_pgd = xen_set_pgd_hyper, -+ .set_pgd_batched = xen_set_pgd_hyper, - - .alloc_pud = xen_alloc_pmd_init, - .release_pud = xen_release_pmd_init, -diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c -index 041d4fe..7666b7e 100644 ---- a/arch/x86/xen/smp.c -+++ b/arch/x86/xen/smp.c -@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void) - { - BUG_ON(smp_processor_id() != 0); - native_smp_prepare_boot_cpu(); -- -- /* We've switched to the "real" per-cpu gdt, so make sure the -- old memory can be recycled */ -- make_lowmem_page_readwrite(xen_initial_gdt); -- - xen_filter_cpu_maps(); - xen_setup_vcpu_info_placement(); - } -@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) - gdt = get_cpu_gdt_table(cpu); - - ctxt->flags = VGCF_IN_KERNEL; -- ctxt->user_regs.ds = __USER_DS; -- ctxt->user_regs.es = __USER_DS; -+ ctxt->user_regs.ds = __KERNEL_DS; -+ ctxt->user_regs.es = __KERNEL_DS; - ctxt->user_regs.ss = __KERNEL_DS; - #ifdef CONFIG_X86_32 - ctxt->user_regs.fs = __KERNEL_PERCPU; -- ctxt->user_regs.gs = __KERNEL_STACK_CANARY; -+ savesegment(gs, ctxt->user_regs.gs); - #else - ctxt->gs_base_kernel = per_cpu_offset(cpu); - #endif -@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) - int rc; - - per_cpu(current_task, cpu) = idle; -+ per_cpu(current_tinfo, cpu) = &idle->tinfo; - #ifdef CONFIG_X86_32 - irq_ctx_init(cpu); - #else - clear_tsk_thread_flag(idle, TIF_FORK); -- per_cpu(kernel_stack, cpu) = -- (unsigned long)task_stack_page(idle) - -- KERNEL_STACK_OFFSET + THREAD_SIZE; -+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE; - #endif - xen_setup_runstate_info(cpu); - xen_setup_timer(cpu); -diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S -index b040b0e..8cc4fe0 100644 ---- a/arch/x86/xen/xen-asm_32.S -+++ b/arch/x86/xen/xen-asm_32.S -@@ -83,14 +83,14 @@ ENTRY(xen_iret) - ESP_OFFSET=4 # bytes pushed onto stack - - /* -- * Store vcpu_info pointer for easy access. Do it this way to -- * avoid having to reload %fs -+ * Store vcpu_info pointer for easy access. - */ - #ifdef CONFIG_SMP -- GET_THREAD_INFO(%eax) -- movl TI_cpu(%eax), %eax -- movl __per_cpu_offset(,%eax,4), %eax -- mov xen_vcpu(%eax), %eax -+ push %fs -+ mov $(__KERNEL_PERCPU), %eax -+ mov %eax, %fs -+ mov PER_CPU_VAR(xen_vcpu), %eax -+ pop %fs - #else - movl xen_vcpu, %eax - #endif -diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S -index aaa7291..3f77960 100644 ---- a/arch/x86/xen/xen-head.S -+++ b/arch/x86/xen/xen-head.S -@@ -19,6 +19,17 @@ ENTRY(startup_xen) - #ifdef CONFIG_X86_32 - mov %esi,xen_start_info - mov $init_thread_union+THREAD_SIZE,%esp -+#ifdef CONFIG_SMP -+ movl $cpu_gdt_table,%edi -+ movl $__per_cpu_load,%eax -+ movw %ax,__KERNEL_PERCPU + 2(%edi) -+ rorl $16,%eax -+ movb %al,__KERNEL_PERCPU + 4(%edi) -+ movb %ah,__KERNEL_PERCPU + 7(%edi) -+ movl $__per_cpu_end - 1,%eax -+ subl $__per_cpu_start,%eax -+ movw %ax,__KERNEL_PERCPU + 0(%edi) -+#endif - #else - mov %rsi,xen_start_info - mov $init_thread_union+THREAD_SIZE,%rsp -diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h -index b095739..8c17bcd 100644 ---- a/arch/x86/xen/xen-ops.h -+++ b/arch/x86/xen/xen-ops.h -@@ -10,8 +10,6 @@ - extern const char xen_hypervisor_callback[]; - extern const char xen_failsafe_callback[]; - --extern void *xen_initial_gdt; -- - struct trap_info; - void xen_copy_trap_info(struct trap_info *traps); - -diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c -index 58916af..9cb880b 100644 ---- a/block/blk-iopoll.c -+++ b/block/blk-iopoll.c -@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll) - } - EXPORT_SYMBOL(blk_iopoll_complete); - --static void blk_iopoll_softirq(struct softirq_action *h) -+static void blk_iopoll_softirq(void) - { - struct list_head *list = &__get_cpu_var(blk_cpu_iopoll); - int rearm = 0, budget = blk_iopoll_budget; -diff --git a/block/blk-map.c b/block/blk-map.c -index 164cd00..6d96fc1 100644 ---- a/block/blk-map.c -+++ b/block/blk-map.c -@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, - if (!len || !kbuf) - return -EINVAL; - -- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); -+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf); - if (do_copy) - bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); - else -diff --git a/block/blk-softirq.c b/block/blk-softirq.c -index 1366a89..e17f54b 100644 ---- a/block/blk-softirq.c -+++ b/block/blk-softirq.c -@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); - * Softirq action handler - move entries to local list and loop over them - * while passing them to the queue registered handler. - */ --static void blk_done_softirq(struct softirq_action *h) -+static void blk_done_softirq(void) - { - struct list_head *cpu_list, local_list; - -diff --git a/block/bsg.c b/block/bsg.c -index 702f131..37808bf 100644 ---- a/block/bsg.c -+++ b/block/bsg.c -@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, - struct sg_io_v4 *hdr, struct bsg_device *bd, - fmode_t has_write_perm) - { -+ unsigned char tmpcmd[sizeof(rq->__cmd)]; -+ unsigned char *cmdptr; -+ - if (hdr->request_len > BLK_MAX_CDB) { - rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); - if (!rq->cmd) - return -ENOMEM; -- } -+ cmdptr = rq->cmd; -+ } else -+ cmdptr = tmpcmd; - -- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, -+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request, - hdr->request_len)) - return -EFAULT; - -+ if (cmdptr != rq->cmd) -+ memcpy(rq->cmd, cmdptr, hdr->request_len); -+ - if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { - if (blk_verify_command(rq->cmd, has_write_perm)) - return -EPERM; -diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c -index 7b72502..646105c 100644 ---- a/block/compat_ioctl.c -+++ b/block/compat_ioctl.c -@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode, - err |= __get_user(f->spec1, &uf->spec1); - err |= __get_user(f->fmt_gap, &uf->fmt_gap); - err |= __get_user(name, &uf->name); -- f->name = compat_ptr(name); -+ f->name = (void __force_kernel *)compat_ptr(name); - if (err) { - err = -EFAULT; - goto out; -diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c -index 4f4230b..0feae9a 100644 ---- a/block/scsi_ioctl.c -+++ b/block/scsi_ioctl.c -@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command); - static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, - struct sg_io_hdr *hdr, fmode_t mode) - { -- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) -+ unsigned char tmpcmd[sizeof(rq->__cmd)]; -+ unsigned char *cmdptr; -+ -+ if (rq->cmd != rq->__cmd) -+ cmdptr = rq->cmd; -+ else -+ cmdptr = tmpcmd; -+ -+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len)) - return -EFAULT; -+ -+ if (cmdptr != rq->cmd) -+ memcpy(rq->cmd, cmdptr, hdr->cmd_len); -+ - if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) - return -EPERM; - -@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, - int err; - unsigned int in_len, out_len, bytes, opcode, cmdlen; - char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; -+ unsigned char tmpcmd[sizeof(rq->__cmd)]; -+ unsigned char *cmdptr; - - if (!sic) - return -EINVAL; -@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, - */ - err = -EFAULT; - rq->cmd_len = cmdlen; -- if (copy_from_user(rq->cmd, sic->data, cmdlen)) -+ -+ if (rq->cmd != rq->__cmd) -+ cmdptr = rq->cmd; -+ else -+ cmdptr = tmpcmd; -+ -+ if (copy_from_user(cmdptr, sic->data, cmdlen)) - goto error; - -+ if (rq->cmd != cmdptr) -+ memcpy(rq->cmd, cmdptr, cmdlen); -+ - if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) - goto error; - -diff --git a/crypto/cryptd.c b/crypto/cryptd.c -index 671d4d6..5f24030 100644 ---- a/crypto/cryptd.c -+++ b/crypto/cryptd.c -@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx { - - struct cryptd_blkcipher_request_ctx { - crypto_completion_t complete; --}; -+} __no_const; - - struct cryptd_hash_ctx { - struct crypto_shash *child; -@@ -80,7 +80,7 @@ struct cryptd_aead_ctx { - - struct cryptd_aead_request_ctx { - crypto_completion_t complete; --}; -+} __no_const; - - static void cryptd_queue_worker(struct work_struct *work); - -diff --git a/crypto/serpent.c b/crypto/serpent.c -index b651a55..a9ddd79b 100644 ---- a/crypto/serpent.c -+++ b/crypto/serpent.c -@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, - u32 r0,r1,r2,r3,r4; - int i; - -+ pax_track_stack(); -+ - /* Copy key, add padding */ - - for (i = 0; i < keylen; ++i) -diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c -index 5d41894..22021e4 100644 ---- a/drivers/acpi/apei/cper.c -+++ b/drivers/acpi/apei/cper.c -@@ -38,12 +38,12 @@ - */ - u64 cper_next_record_id(void) - { -- static atomic64_t seq; -+ static atomic64_unchecked_t seq; - -- if (!atomic64_read(&seq)) -- atomic64_set(&seq, ((u64)get_seconds()) << 32); -+ if (!atomic64_read_unchecked(&seq)) -+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32); - -- return atomic64_inc_return(&seq); -+ return atomic64_inc_return_unchecked(&seq); - } - EXPORT_SYMBOL_GPL(cper_next_record_id); - -diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c -index 22f918b..9fafb84 100644 ---- a/drivers/acpi/ec_sys.c -+++ b/drivers/acpi/ec_sys.c -@@ -11,6 +11,7 @@ - #include <linux/kernel.h> - #include <linux/acpi.h> - #include <linux/debugfs.h> -+#include <asm/uaccess.h> - #include "internal.h" - - MODULE_AUTHOR("Thomas Renninger trenn@suse.de"); -@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf, - * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private; - */ - unsigned int size = EC_SPACE_SIZE; -- u8 *data = (u8 *) buf; -+ u8 data; - loff_t init_off = *off; - int err = 0; - -@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf, - size = count; - - while (size) { -- err = ec_read(*off, &data[*off - init_off]); -+ err = ec_read(*off, &data); - if (err) - return err; -+ if (put_user(data, &buf[*off - init_off])) -+ return -EFAULT; - *off += 1; - size--; - } -@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf, - - unsigned int size = count; - loff_t init_off = *off; -- u8 *data = (u8 *) buf; - int err = 0; - - if (*off >= EC_SPACE_SIZE) -@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf, - } - - while (size) { -- u8 byte_write = data[*off - init_off]; -+ u8 byte_write; -+ if (get_user(byte_write, &buf[*off - init_off])) -+ return -EFAULT; - err = ec_write(*off, byte_write); - if (err) - return err; -diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c -index f5f9869..da87aeb 100644 ---- a/drivers/acpi/proc.c -+++ b/drivers/acpi/proc.c -@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct file *file, - size_t count, loff_t * ppos) - { - struct list_head *node, *next; -- char strbuf[5]; -- char str[5] = ""; -- unsigned int len = count; -+ char strbuf[5] = {0}; - -- if (len > 4) -- len = 4; -- if (len < 0) -+ if (count > 4) -+ count = 4; -+ if (copy_from_user(strbuf, buffer, count)) - return -EFAULT; -- -- if (copy_from_user(strbuf, buffer, len)) -- return -EFAULT; -- strbuf[len] = '\0'; -- sscanf(strbuf, "%s", str); -+ strbuf[count] = '\0'; - - mutex_lock(&acpi_device_lock); - list_for_each_safe(node, next, &acpi_wakeup_device_list) { -@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct file *file, - if (!dev->wakeup.flags.valid) - continue; - -- if (!strncmp(dev->pnp.bus_id, str, 4)) { -+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) { - if (device_can_wakeup(&dev->dev)) { - bool enable = !device_may_wakeup(&dev->dev); - device_set_wakeup_enable(&dev->dev, enable); -diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c -index a4e0f1b..9793b28 100644 ---- a/drivers/acpi/processor_driver.c -+++ b/drivers/acpi/processor_driver.c -@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) - return 0; - #endif - -- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); -+ BUG_ON(pr->id >= nr_cpu_ids); - - /* - * Buggy BIOS check -diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c -index 4a3a5ae..cbee192 100644 ---- a/drivers/ata/libata-core.c -+++ b/drivers/ata/libata-core.c -@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) - struct ata_port *ap; - unsigned int tag; - -- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ -+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ - ap = qc->ap; - - qc->flags = 0; -@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) - struct ata_port *ap; - struct ata_link *link; - -- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ -+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ - WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); - ap = qc->ap; - link = qc->dev->link; -@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) - return; - - spin_lock(&lock); -+ pax_open_kernel(); - - for (cur = ops->inherits; cur; cur = cur->inherits) { - void **inherit = (void **)cur; -@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) - if (IS_ERR(*pp)) - *pp = NULL; - -- ops->inherits = NULL; -+ *(struct ata_port_operations **)&ops->inherits = NULL; - -+ pax_close_kernel(); - spin_unlock(&lock); - } - -diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c -index ed16fbe..fc92cb8 100644 ---- a/drivers/ata/libata-eh.c -+++ b/drivers/ata/libata-eh.c -@@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap) - { - struct ata_link *link; - -+ pax_track_stack(); -+ - ata_for_each_link(link, ap, HOST_FIRST) - ata_eh_link_report(link); - } -diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c -index 719bb73..79ce858 100644 ---- a/drivers/ata/pata_arasan_cf.c -+++ b/drivers/ata/pata_arasan_cf.c -@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev) - /* Handle platform specific quirks */ - if (pdata->quirk) { - if (pdata->quirk & CF_BROKEN_PIO) { -- ap->ops->set_piomode = NULL; -+ pax_open_kernel(); -+ *(void **)&ap->ops->set_piomode = NULL; -+ pax_close_kernel(); - ap->pio_mask = 0; - } - if (pdata->quirk & CF_BROKEN_MWDMA) -diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c -index f9b983a..887b9d8 100644 ---- a/drivers/atm/adummy.c -+++ b/drivers/atm/adummy.c -@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb) - vcc->pop(vcc, skb); - else - dev_kfree_skb_any(skb); -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - - return 0; - } -diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c -index f8f41e0..1f987dd 100644 ---- a/drivers/atm/ambassador.c -+++ b/drivers/atm/ambassador.c -@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) { - PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); - - // VC layer stats -- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); -+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); - - // free the descriptor - kfree (tx_descr); -@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) { - dump_skb ("<<<", vc, skb); - - // VC layer stats -- atomic_inc(&atm_vcc->stats->rx); -+ atomic_inc_unchecked(&atm_vcc->stats->rx); - __net_timestamp(skb); - // end of our responsibility - atm_vcc->push (atm_vcc, skb); -@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) { - } else { - PRINTK (KERN_INFO, "dropped over-size frame"); - // should we count this? -- atomic_inc(&atm_vcc->stats->rx_drop); -+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); - } - - } else { -@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { - } - - if (check_area (skb->data, skb->len)) { -- atomic_inc(&atm_vcc->stats->tx_err); -+ atomic_inc_unchecked(&atm_vcc->stats->tx_err); - return -ENOMEM; // ? - } - -diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c -index b22d71c..d6e1049 100644 ---- a/drivers/atm/atmtcp.c -+++ b/drivers/atm/atmtcp.c -@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) - if (vcc->pop) vcc->pop(vcc,skb); - else dev_kfree_skb(skb); - if (dev_data) return 0; -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - return -ENOLINK; - } - size = skb->len+sizeof(struct atmtcp_hdr); -@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) - if (!new_skb) { - if (vcc->pop) vcc->pop(vcc,skb); - else dev_kfree_skb(skb); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - return -ENOBUFS; - } - hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr)); -@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) - if (vcc->pop) vcc->pop(vcc,skb); - else dev_kfree_skb(skb); - out_vcc->push(out_vcc,new_skb); -- atomic_inc(&vcc->stats->tx); -- atomic_inc(&out_vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->tx); -+ atomic_inc_unchecked(&out_vcc->stats->rx); - return 0; - } - -@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) - out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci)); - read_unlock(&vcc_sklist_lock); - if (!out_vcc) { -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - goto done; - } - skb_pull(skb,sizeof(struct atmtcp_hdr)); -@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) - __net_timestamp(new_skb); - skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); - out_vcc->push(out_vcc,new_skb); -- atomic_inc(&vcc->stats->tx); -- atomic_inc(&out_vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->tx); -+ atomic_inc_unchecked(&out_vcc->stats->rx); - done: - if (vcc->pop) vcc->pop(vcc,skb); - else dev_kfree_skb(skb); -diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c -index 9307141..d8521bf 100644 ---- a/drivers/atm/eni.c -+++ b/drivers/atm/eni.c -@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc) - DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", - vcc->dev->number); - length = 0; -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - } - else { - length = ATM_CELL_SIZE-1; /* no HEC */ -@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc) - size); - } - eff = length = 0; -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - } - else { - size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); -@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc) - "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n", - vcc->dev->number,vcc->vci,length,size << 2,descr); - length = eff = 0; -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - } - } - skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL; -@@ -771,7 +771,7 @@ rx_dequeued++; - vcc->push(vcc,skb); - pushed++; - } -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - } - wake_up(&eni_dev->rx_wait); - } -@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *dev) - PCI_DMA_TODEVICE); - if (vcc->pop) vcc->pop(vcc,skb); - else dev_kfree_skb_irq(skb); -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - wake_up(&eni_dev->tx_wait); - dma_complete++; - } -@@ -1568,7 +1568,7 @@ tx_complete++; - /*--------------------------------- entries ---------------------------------*/ - - --static const char *media_name[] __devinitdata = { -+static const char *media_name[] __devinitconst = { - "MMF", "SMF", "MMF", "03?", /* 0- 3 */ - "UTP", "05?", "06?", "07?", /* 4- 7 */ - "TAXI","09?", "10?", "11?", /* 8-11 */ -diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c -index 5072f8a..fa52520 100644 ---- a/drivers/atm/firestream.c -+++ b/drivers/atm/firestream.c -@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q) - } - } - -- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); -+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); - - fs_dprintk (FS_DEBUG_TXMEM, "i"); - fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb); -@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q) - #endif - skb_put (skb, qe->p1 & 0xffff); - ATM_SKB(skb)->vcc = atm_vcc; -- atomic_inc(&atm_vcc->stats->rx); -+ atomic_inc_unchecked(&atm_vcc->stats->rx); - __net_timestamp(skb); - fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); - atm_vcc->push (atm_vcc, skb); -@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q) - kfree (pe); - } - if (atm_vcc) -- atomic_inc(&atm_vcc->stats->rx_drop); -+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); - break; - case 0x1f: /* Reassembly abort: no buffers. */ - /* Silently increment error counter. */ - if (atm_vcc) -- atomic_inc(&atm_vcc->stats->rx_drop); -+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); - break; - default: /* Hmm. Haven't written the code to handle the others yet... -- REW */ - printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", -diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c -index 361f5ae..7fc552d 100644 ---- a/drivers/atm/fore200e.c -+++ b/drivers/atm/fore200e.c -@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e) - #endif - /* check error condition */ - if (*entry->status & STATUS_ERROR) -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - else -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - } - } - -@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp - if (skb == NULL) { - DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); - -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - return -ENOMEM; - } - -@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp - - dev_kfree_skb_any(skb); - -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - return -ENOMEM; - } - - ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); - - vcc->push(vcc, skb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - - ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); - -@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e) - DPRINTK(2, "damaged PDU on %d.%d.%d\n", - fore200e->atm_dev->number, - entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - } - } - -@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) - goto retry_here; - } - -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - - fore200e->tx_sat++; - DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", -diff --git a/drivers/atm/he.c b/drivers/atm/he.c -index 9a51df4..f3bb5f8 100644 ---- a/drivers/atm/he.c -+++ b/drivers/atm/he.c -@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) - - if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { - hprintk("HBUF_ERR! (cid 0x%x)\n", cid); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - goto return_host_buffers; - } - -@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) - RBRQ_LEN_ERR(he_dev->rbrq_head) - ? "LEN_ERR" : "", - vcc->vpi, vcc->vci); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - goto return_host_buffers; - } - -@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) - vcc->push(vcc, skb); - spin_lock(&he_dev->global_lock); - -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - - return_host_buffers: - ++pdus_assembled; -@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) - tpd->vcc->pop(tpd->vcc, tpd->skb); - else - dev_kfree_skb_any(tpd->skb); -- atomic_inc(&tpd->vcc->stats->tx_err); -+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err); - } - pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); - return; -@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) - vcc->pop(vcc, skb); - else - dev_kfree_skb_any(skb); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - return -EINVAL; - } - -@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) - vcc->pop(vcc, skb); - else - dev_kfree_skb_any(skb); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - return -EINVAL; - } - #endif -@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) - vcc->pop(vcc, skb); - else - dev_kfree_skb_any(skb); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - spin_unlock_irqrestore(&he_dev->global_lock, flags); - return -ENOMEM; - } -@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) - vcc->pop(vcc, skb); - else - dev_kfree_skb_any(skb); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - spin_unlock_irqrestore(&he_dev->global_lock, flags); - return -ENOMEM; - } -@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) - __enqueue_tpd(he_dev, tpd, cid); - spin_unlock_irqrestore(&he_dev->global_lock, flags); - -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - - return 0; - } -diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c -index b812103..e391a49 100644 ---- a/drivers/atm/horizon.c -+++ b/drivers/atm/horizon.c -@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) { - { - struct atm_vcc * vcc = ATM_SKB(skb)->vcc; - // VC layer stats -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - __net_timestamp(skb); - // end of our responsibility - vcc->push (vcc, skb); -@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) { - dev->tx_iovec = NULL; - - // VC layer stats -- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); -+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); - - // free the skb - hrz_kfree_skb (skb); -diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c -index db06f34..dcebb61 100644 ---- a/drivers/atm/idt77252.c -+++ b/drivers/atm/idt77252.c -@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc) - else - dev_kfree_skb(skb); - -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - } - - atomic_dec(&scq->used); -@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) - if ((sb = dev_alloc_skb(64)) == NULL) { - printk("%s: Can't allocate buffers for aal0.\n", - card->name); -- atomic_add(i, &vcc->stats->rx_drop); -+ atomic_add_unchecked(i, &vcc->stats->rx_drop); - break; - } - if (!atm_charge(vcc, sb->truesize)) { - RXPRINTK("%s: atm_charge() dropped aal0 packets.\n", - card->name); -- atomic_add(i - 1, &vcc->stats->rx_drop); -+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); - dev_kfree_skb(sb); - break; - } -@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) - ATM_SKB(sb)->vcc = vcc; - __net_timestamp(sb); - vcc->push(vcc, sb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - - cell += ATM_CELL_PAYLOAD; - } -@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) - "(CDC: %08x)\n", - card->name, len, rpp->len, readl(SAR_REG_CDC)); - recycle_rx_pool_skb(card, rpp); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - return; - } - if (stat & SAR_RSQE_CRC) { - RXPRINTK("%s: AAL5 CRC error.\n", card->name); - recycle_rx_pool_skb(card, rpp); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - return; - } - if (skb_queue_len(&rpp->queue) > 1) { -@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) - RXPRINTK("%s: Can't alloc RX skb.\n", - card->name); - recycle_rx_pool_skb(card, rpp); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - return; - } - if (!atm_charge(vcc, skb->truesize)) { -@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) - __net_timestamp(skb); - - vcc->push(vcc, skb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - - return; - } -@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) - __net_timestamp(skb); - - vcc->push(vcc, skb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - - if (skb->truesize > SAR_FB_SIZE_3) - add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); -@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card) - if (vcc->qos.aal != ATM_AAL0) { - RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n", - card->name, vpi, vci); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - goto drop; - } - - if ((sb = dev_alloc_skb(64)) == NULL) { - printk("%s: Can't allocate buffers for AAL0.\n", - card->name); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - goto drop; - } - -@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card) - ATM_SKB(sb)->vcc = vcc; - __net_timestamp(sb); - vcc->push(vcc, sb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - - drop: - skb_pull(queue, 64); -@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) - - if (vc == NULL) { - printk("%s: NULL connection in send().\n", card->name); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - dev_kfree_skb(skb); - return -EINVAL; - } - if (!test_bit(VCF_TX, &vc->flags)) { - printk("%s: Trying to transmit on a non-tx VC.\n", card->name); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - dev_kfree_skb(skb); - return -EINVAL; - } -@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) - break; - default: - printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - dev_kfree_skb(skb); - return -EINVAL; - } - - if (skb_shinfo(skb)->nr_frags != 0) { - printk("%s: No scatter-gather yet.\n", card->name); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - dev_kfree_skb(skb); - return -EINVAL; - } -@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) - - err = queue_skb(card, vc, skb, oam); - if (err) { -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - dev_kfree_skb(skb); - return err; - } -@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags) - skb = dev_alloc_skb(64); - if (!skb) { - printk("%s: Out of memory in send_oam().\n", card->name); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - return -ENOMEM; - } - atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); -diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c -index cb90f7a..bd33566 100644 ---- a/drivers/atm/iphase.c -+++ b/drivers/atm/iphase.c -@@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev) - status = (u_short) (buf_desc_ptr->desc_mode); - if (status & (RX_CER | RX_PTE | RX_OFL)) - { -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - IF_ERR(printk("IA: bad packet, dropping it");) - if (status & RX_CER) { - IF_ERR(printk(" cause: packet CRC error\n");) -@@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev) - len = dma_addr - buf_addr; - if (len > iadev->rx_buf_sz) { - printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - goto out_free_desc; - } - -@@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *dev) - ia_vcc = INPH_IA_VCC(vcc); - if (ia_vcc == NULL) - { -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - dev_kfree_skb_any(skb); - atm_return(vcc, atm_guess_pdu2truesize(len)); - goto INCR_DLE; -@@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *dev) - if ((length > iadev->rx_buf_sz) || (length > - (skb->len - sizeof(struct cpcs_trailer)))) - { -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", - length, skb->len);) - dev_kfree_skb_any(skb); -@@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *dev) - - IF_RX(printk("rx_dle_intr: skb push");) - vcc->push(vcc,skb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - iadev->rx_pkt_cnt++; - } - INCR_DLE: -@@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) - { - struct k_sonet_stats *stats; - stats = &PRIV(_ia_dev[board])->sonet_stats; -- printk("section_bip: %d\n", atomic_read(&stats->section_bip)); -- printk("line_bip : %d\n", atomic_read(&stats->line_bip)); -- printk("path_bip : %d\n", atomic_read(&stats->path_bip)); -- printk("line_febe : %d\n", atomic_read(&stats->line_febe)); -- printk("path_febe : %d\n", atomic_read(&stats->path_febe)); -- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs)); -- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs)); -- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells)); -- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells)); -+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip)); -+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip)); -+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip)); -+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe)); -+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe)); -+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs)); -+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs)); -+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells)); -+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells)); - } - ia_cmds.status = 0; - break; -@@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { - if ((desc == 0) || (desc > iadev->num_tx_desc)) - { - IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - if (vcc->pop) - vcc->pop(vcc, skb); - else -@@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { - ATM_DESC(skb) = vcc->vci; - skb_queue_tail(&iadev->tx_dma_q, skb); - -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - iadev->tx_pkt_cnt++; - /* Increment transaction counter */ - writel(2, iadev->dma+IPHASE5575_TX_COUNTER); - - #if 0 - /* add flow control logic */ -- if (atomic_read(&vcc->stats->tx) % 20 == 0) { -+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) { - if (iavcc->vc_desc_cnt > 10) { - vcc->tx_quota = vcc->tx_quota * 3 / 4; - printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); -diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c -index e828c54..ae83976 100644 ---- a/drivers/atm/lanai.c -+++ b/drivers/atm/lanai.c -@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai, - vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0); - lanai_endtx(lanai, lvcc); - lanai_free_skb(lvcc->tx.atmvcc, skb); -- atomic_inc(&lvcc->tx.atmvcc->stats->tx); -+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx); - } - - /* Try to fill the buffer - don't call unless there is backlog */ -@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr) - ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; - __net_timestamp(skb); - lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); -- atomic_inc(&lvcc->rx.atmvcc->stats->rx); -+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx); - out: - lvcc->rx.buf.ptr = end; - cardvcc_write(lvcc, endptr, vcc_rxreadptr); -@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) - DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 " - "vcc %d\n", lanai->number, (unsigned int) s, vci); - lanai->stats.service_rxnotaal5++; -- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); -+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); - return 0; - } - if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) { -@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) - int bytes; - read_unlock(&vcc_sklist_lock); - DPRINTK("got trashed rx pdu on vci %d\n", vci); -- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); -+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); - lvcc->stats.x.aal5.service_trash++; - bytes = (SERVICE_GET_END(s) * 16) - - (((unsigned long) lvcc->rx.buf.ptr) - -@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) - } - if (s & SERVICE_STREAM) { - read_unlock(&vcc_sklist_lock); -- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); -+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); - lvcc->stats.x.aal5.service_stream++; - printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream " - "PDU on VCI %d!\n", lanai->number, vci); -@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) - return 0; - } - DPRINTK("got rx crc error on vci %d\n", vci); -- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); -+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); - lvcc->stats.x.aal5.service_rxcrc++; - lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4]; - cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr); -diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c -index 1c70c45..300718d 100644 ---- a/drivers/atm/nicstar.c -+++ b/drivers/atm/nicstar.c -@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) - if ((vc = (vc_map *) vcc->dev_data) == NULL) { - printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", - card->index); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - dev_kfree_skb_any(skb); - return -EINVAL; - } -@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) - if (!vc->tx) { - printk("nicstar%d: Trying to transmit on a non-tx VC.\n", - card->index); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - dev_kfree_skb_any(skb); - return -EINVAL; - } -@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) - if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { - printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", - card->index); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - dev_kfree_skb_any(skb); - return -EINVAL; - } - - if (skb_shinfo(skb)->nr_frags != 0) { - printk("nicstar%d: No scatter-gather yet.\n", card->index); -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - dev_kfree_skb_any(skb); - return -EINVAL; - } -@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) - } - - if (push_scqe(card, vc, scq, &scqe, skb) != 0) { -- atomic_inc(&vcc->stats->tx_err); -+ atomic_inc_unchecked(&vcc->stats->tx_err); - dev_kfree_skb_any(skb); - return -EIO; - } -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - - return 0; - } -@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - printk - ("nicstar%d: Can't allocate buffers for aal0.\n", - card->index); -- atomic_add(i, &vcc->stats->rx_drop); -+ atomic_add_unchecked(i, &vcc->stats->rx_drop); - break; - } - if (!atm_charge(vcc, sb->truesize)) { - RXPRINTK - ("nicstar%d: atm_charge() dropped aal0 packets.\n", - card->index); -- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ -+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ - dev_kfree_skb_any(sb); - break; - } -@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - ATM_SKB(sb)->vcc = vcc; - __net_timestamp(sb); - vcc->push(vcc, sb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - cell += ATM_CELL_PAYLOAD; - } - -@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - if (iovb == NULL) { - printk("nicstar%d: Out of iovec buffers.\n", - card->index); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - recycle_rx_buf(card, skb); - return; - } -@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - small or large buffer itself. */ - } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { - printk("nicstar%d: received too big AAL5 SDU.\n", card->index); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, - NS_MAX_IOVECS); - NS_PRV_IOVCNT(iovb) = 0; -@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - ("nicstar%d: Expected a small buffer, and this is not one.\n", - card->index); - which_list(card, skb); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - recycle_rx_buf(card, skb); - vc->rx_iov = NULL; - recycle_iov_buf(card, iovb); -@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - ("nicstar%d: Expected a large buffer, and this is not one.\n", - card->index); - which_list(card, skb); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, - NS_PRV_IOVCNT(iovb)); - vc->rx_iov = NULL; -@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - printk(" - PDU size mismatch.\n"); - else - printk(".\n"); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, - NS_PRV_IOVCNT(iovb)); - vc->rx_iov = NULL; -@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - /* skb points to a small buffer */ - if (!atm_charge(vcc, skb->truesize)) { - push_rxbufs(card, skb); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - } else { - skb_put(skb, len); - dequeue_sm_buf(card, skb); -@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - ATM_SKB(skb)->vcc = vcc; - __net_timestamp(skb); - vcc->push(vcc, skb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - } - } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ - struct sk_buff *sb; -@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - if (len <= NS_SMBUFSIZE) { - if (!atm_charge(vcc, sb->truesize)) { - push_rxbufs(card, sb); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - } else { - skb_put(sb, len); - dequeue_sm_buf(card, sb); -@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - ATM_SKB(sb)->vcc = vcc; - __net_timestamp(sb); - vcc->push(vcc, sb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - } - - push_rxbufs(card, skb); -@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - - if (!atm_charge(vcc, skb->truesize)) { - push_rxbufs(card, skb); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - } else { - dequeue_lg_buf(card, skb); - #ifdef NS_USE_DESTRUCTORS -@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - ATM_SKB(skb)->vcc = vcc; - __net_timestamp(skb); - vcc->push(vcc, skb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - } - - push_rxbufs(card, sb); -@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - printk - ("nicstar%d: Out of huge buffers.\n", - card->index); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - recycle_iovec_rx_bufs(card, - (struct iovec *) - iovb->data, -@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - card->hbpool.count++; - } else - dev_kfree_skb_any(hb); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - } else { - /* Copy the small buffer to the huge buffer */ - sb = (struct sk_buff *)iov->iov_base; -@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) - #endif /* NS_USE_DESTRUCTORS */ - __net_timestamp(hb); - vcc->push(vcc, hb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - } - } - -diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c -index 5d1d076..4f31f42 100644 ---- a/drivers/atm/solos-pci.c -+++ b/drivers/atm/solos-pci.c -@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg) - } - atm_charge(vcc, skb->truesize); - vcc->push(vcc, skb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - break; - - case PKT_STATUS: -@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *buf) - char msg[500]; - char item[10]; - -+ pax_track_stack(); -+ - len = buf->len; - for (i = 0; i < len; i++){ - if(i % 8 == 0) -@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card) - vcc = SKB_CB(oldskb)->vcc; - - if (vcc) { -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - solos_pop(vcc, oldskb); - } else - dev_kfree_skb_irq(oldskb); -diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c -index 90f1ccc..04c4a1e 100644 ---- a/drivers/atm/suni.c -+++ b/drivers/atm/suni.c -@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock); - - - #define ADD_LIMITED(s,v) \ -- atomic_add((v),&stats->s); \ -- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX); -+ atomic_add_unchecked((v),&stats->s); \ -+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX); - - - static void suni_hz(unsigned long from_timer) -diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c -index 5120a96..e2572bd 100644 ---- a/drivers/atm/uPD98402.c -+++ b/drivers/atm/uPD98402.c -@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze - struct sonet_stats tmp; - int error = 0; - -- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); -+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); - sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp); - if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp)); - if (zero && !error) { -@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) - - - #define ADD_LIMITED(s,v) \ -- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \ -- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \ -- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); } -+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \ -+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \ -+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); } - - - static void stat_event(struct atm_dev *dev) -@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev) - if (reason & uPD98402_INT_PFM) stat_event(dev); - if (reason & uPD98402_INT_PCO) { - (void) GET(PCOCR); /* clear interrupt cause */ -- atomic_add(GET(HECCT), -+ atomic_add_unchecked(GET(HECCT), - &PRIV(dev)->sonet_stats.uncorr_hcs); - } - if ((reason & uPD98402_INT_RFO) && -@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev) - PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO | - uPD98402_INT_LOS),PIMR); /* enable them */ - (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ -- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1); -- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1); -- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1); -+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1); -+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1); -+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1); - return 0; - } - -diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c -index d889f56..17eb71e 100644 ---- a/drivers/atm/zatm.c -+++ b/drivers/atm/zatm.c -@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); - } - if (!size) { - dev_kfree_skb_irq(skb); -- if (vcc) atomic_inc(&vcc->stats->rx_err); -+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err); - continue; - } - if (!atm_charge(vcc,skb->truesize)) { -@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); - skb->len = size; - ATM_SKB(skb)->vcc = vcc; - vcc->push(vcc,skb); -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - } - zout(pos & 0xffff,MTA(mbx)); - #if 0 /* probably a stupid idea */ -@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | - skb_queue_head(&zatm_vcc->backlog,skb); - break; - } -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - wake_up(&zatm_vcc->tx_wait); - } - -diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c -index a4760e0..51283cf 100644 ---- a/drivers/base/devtmpfs.c -+++ b/drivers/base/devtmpfs.c -@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir) - if (!thread) - return 0; - -- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL); -+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL); - if (err) - printk(KERN_INFO "devtmpfs: error mounting %i\n", err); - else -diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c -index 84f7c7d..37cfd87 100644 ---- a/drivers/base/power/wakeup.c -+++ b/drivers/base/power/wakeup.c -@@ -29,14 +29,14 @@ bool events_check_enabled; - * They need to be modified together atomically, so it's better to use one - * atomic variable to hold them both. - */ --static atomic_t combined_event_count = ATOMIC_INIT(0); -+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0); - - #define IN_PROGRESS_BITS (sizeof(int) * 4) - #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) - - static void split_counters(unsigned int *cnt, unsigned int *inpr) - { -- unsigned int comb = atomic_read(&combined_event_count); -+ unsigned int comb = atomic_read_unchecked(&combined_event_count); - - *cnt = (comb >> IN_PROGRESS_BITS); - *inpr = comb & MAX_IN_PROGRESS; -@@ -350,7 +350,7 @@ static void wakeup_source_activate(struct wakeup_source *ws) - ws->last_time = ktime_get(); - - /* Increment the counter of events in progress. */ -- atomic_inc(&combined_event_count); -+ atomic_inc_unchecked(&combined_event_count); - } - - /** -@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) - * Increment the counter of registered wakeup events and decrement the - * couter of wakeup events in progress simultaneously. - */ -- atomic_add(MAX_IN_PROGRESS, &combined_event_count); -+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count); - } - - /** -diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c -index e086fbb..398e1fe 100644 ---- a/drivers/block/DAC960.c -+++ b/drivers/block/DAC960.c -@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T - unsigned long flags; - int Channel, TargetID; - -+ pax_track_stack(); -+ - if (!init_dma_loaf(Controller->PCIDevice, &local_dma, - DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) + - sizeof(DAC960_SCSI_Inquiry_T) + -diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c -index c2f9b3e..5911988 100644 ---- a/drivers/block/cciss.c -+++ b/drivers/block/cciss.c -@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, - int err; - u32 cp; - -+ memset(&arg64, 0, sizeof(arg64)); -+ - err = 0; - err |= - copy_from_user(&arg64.LUN_info, &arg32->LUN_info, -@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h) - while (!list_empty(&h->reqQ)) { - c = list_entry(h->reqQ.next, CommandList_struct, list); - /* can't do anything if fifo is full */ -- if ((h->access.fifo_full(h))) { -+ if ((h->access->fifo_full(h))) { - dev_warn(&h->pdev->dev, "fifo full\n"); - break; - } -@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h) - h->Qdepth--; - - /* Tell the controller execute command */ -- h->access.submit_command(h, c); -+ h->access->submit_command(h, c); - - /* Put job onto the completed Q */ - addQ(&h->cmpQ, c); -@@ -3422,17 +3424,17 @@ startio: - - static inline unsigned long get_next_completion(ctlr_info_t *h) - { -- return h->access.command_completed(h); -+ return h->access->command_completed(h); - } - - static inline int interrupt_pending(ctlr_info_t *h) - { -- return h->access.intr_pending(h); -+ return h->access->intr_pending(h); - } - - static inline long interrupt_not_for_us(ctlr_info_t *h) - { -- return ((h->access.intr_pending(h) == 0) || -+ return ((h->access->intr_pending(h) == 0) || - (h->interrupts_enabled == 0)); - } - -@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info_t *h) - u32 a; - - if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) -- return h->access.command_completed(h); -+ return h->access->command_completed(h); - - if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { - a = *(h->reply_pool_head); /* Next cmd in ring buffer */ -@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h) - trans_support & CFGTBL_Trans_use_short_tags); - - /* Change the access methods to the performant access methods */ -- h->access = SA5_performant_access; -+ h->access = &SA5_performant_access; - h->transMethod = CFGTBL_Trans_Performant; - - return; -@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h) - if (prod_index < 0) - return -ENODEV; - h->product_name = products[prod_index].product_name; -- h->access = *(products[prod_index].access); -+ h->access = products[prod_index].access; - - if (cciss_board_disabled(h)) { - dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); -@@ -5009,7 +5011,7 @@ reinit_after_soft_reset: - } - - /* make sure the board interrupts are off */ -- h->access.set_intr_mask(h, CCISS_INTR_OFF); -+ h->access->set_intr_mask(h, CCISS_INTR_OFF); - rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx); - if (rc) - goto clean2; -@@ -5061,7 +5063,7 @@ reinit_after_soft_reset: - * fake ones to scoop up any residual completions. - */ - spin_lock_irqsave(&h->lock, flags); -- h->access.set_intr_mask(h, CCISS_INTR_OFF); -+ h->access->set_intr_mask(h, CCISS_INTR_OFF); - spin_unlock_irqrestore(&h->lock, flags); - free_irq(h->intr[PERF_MODE_INT], h); - rc = cciss_request_irq(h, cciss_msix_discard_completions, -@@ -5081,9 +5083,9 @@ reinit_after_soft_reset: - dev_info(&h->pdev->dev, "Board READY.\n"); - dev_info(&h->pdev->dev, - "Waiting for stale completions to drain.\n"); -- h->access.set_intr_mask(h, CCISS_INTR_ON); -+ h->access->set_intr_mask(h, CCISS_INTR_ON); - msleep(10000); -- h->access.set_intr_mask(h, CCISS_INTR_OFF); -+ h->access->set_intr_mask(h, CCISS_INTR_OFF); - - rc = controller_reset_failed(h->cfgtable); - if (rc) -@@ -5106,7 +5108,7 @@ reinit_after_soft_reset: - cciss_scsi_setup(h); - - /* Turn the interrupts on so we can service requests */ -- h->access.set_intr_mask(h, CCISS_INTR_ON); -+ h->access->set_intr_mask(h, CCISS_INTR_ON); - - /* Get the firmware version */ - inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); -@@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_dev *pdev) - kfree(flush_buf); - if (return_code != IO_OK) - dev_warn(&h->pdev->dev, "Error flushing cache\n"); -- h->access.set_intr_mask(h, CCISS_INTR_OFF); -+ h->access->set_intr_mask(h, CCISS_INTR_OFF); - free_irq(h->intr[PERF_MODE_INT], h); - } - -diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h -index c049548..a09cb6e 100644 ---- a/drivers/block/cciss.h -+++ b/drivers/block/cciss.h -@@ -100,7 +100,7 @@ struct ctlr_info - /* information about each logical volume */ - drive_info_struct *drv[CISS_MAX_LUN]; - -- struct access_method access; -+ struct access_method *access; - - /* queue and queue Info */ - struct list_head reqQ; -diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c -index b2fceb5..87fec83 100644 ---- a/drivers/block/cpqarray.c -+++ b/drivers/block/cpqarray.c -@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev) - if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) { - goto Enomem4; - } -- hba[i]->access.set_intr_mask(hba[i], 0); -+ hba[i]->access->set_intr_mask(hba[i], 0); - if (request_irq(hba[i]->intr, do_ida_intr, - IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i])) - { -@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev) - add_timer(&hba[i]->timer); - - /* Enable IRQ now that spinlock and rate limit timer are set up */ -- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY); -+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY); - - for(j=0; j<NWD; j++) { - struct gendisk *disk = ida_gendisk[i][j]; -@@ -694,7 +694,7 @@ DBGINFO( - for(i=0; i<NR_PRODUCTS; i++) { - if (board_id == products[i].board_id) { - c->product_name = products[i].product_name; -- c->access = *(products[i].access); -+ c->access = products[i].access; - break; - } - } -@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void) - hba[ctlr]->intr = intr; - sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr); - hba[ctlr]->product_name = products[j].product_name; -- hba[ctlr]->access = *(products[j].access); -+ hba[ctlr]->access = products[j].access; - hba[ctlr]->ctlr = ctlr; - hba[ctlr]->board_id = board_id; - hba[ctlr]->pci_dev = NULL; /* not PCI */ -@@ -911,6 +911,8 @@ static void do_ida_request(struct request_queue *q) - struct scatterlist tmp_sg[SG_MAX]; - int i, dir, seg; - -+ pax_track_stack(); -+ - queue_next: - creq = blk_peek_request(q); - if (!creq) -@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h) - - while((c = h->reqQ) != NULL) { - /* Can't do anything if we're busy */ -- if (h->access.fifo_full(h) == 0) -+ if (h->access->fifo_full(h) == 0) - return; - - /* Get the first entry from the request Q */ -@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h) - h->Qdepth--; - - /* Tell the controller to do our bidding */ -- h->access.submit_command(h, c); -+ h->access->submit_command(h, c); - - /* Get onto the completion Q */ - addQ(&h->cmpQ, c); -@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id) - unsigned long flags; - __u32 a,a1; - -- istat = h->access.intr_pending(h); -+ istat = h->access->intr_pending(h); - /* Is this interrupt for us? */ - if (istat == 0) - return IRQ_NONE; -@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id) - */ - spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); - if (istat & FIFO_NOT_EMPTY) { -- while((a = h->access.command_completed(h))) { -+ while((a = h->access->command_completed(h))) { - a1 = a; a &= ~3; - if ((c = h->cmpQ) == NULL) - { -@@ -1449,11 +1451,11 @@ static int sendcmd( - /* - * Disable interrupt - */ -- info_p->access.set_intr_mask(info_p, 0); -+ info_p->access->set_intr_mask(info_p, 0); - /* Make sure there is room in the command FIFO */ - /* Actually it should be completely empty at this time. */ - for (i = 200000; i > 0; i--) { -- temp = info_p->access.fifo_full(info_p); -+ temp = info_p->access->fifo_full(info_p); - if (temp != 0) { - break; - } -@@ -1466,7 +1468,7 @@ DBG( - /* - * Send the cmd - */ -- info_p->access.submit_command(info_p, c); -+ info_p->access->submit_command(info_p, c); - complete = pollcomplete(ctlr); - - pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, -@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t *host) - * we check the new geometry. Then turn interrupts back on when - * we're done. - */ -- host->access.set_intr_mask(host, 0); -+ host->access->set_intr_mask(host, 0); - getgeometry(ctlr); -- host->access.set_intr_mask(host, FIFO_NOT_EMPTY); -+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY); - - for(i=0; i<NWD; i++) { - struct gendisk *disk = ida_gendisk[ctlr][i]; -@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr) - /* Wait (up to 2 seconds) for a command to complete */ - - for (i = 200000; i > 0; i--) { -- done = hba[ctlr]->access.command_completed(hba[ctlr]); -+ done = hba[ctlr]->access->command_completed(hba[ctlr]); - if (done == 0) { - udelay(10); /* a short fixed delay */ - } else -diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h -index be73e9d..7fbf140 100644 ---- a/drivers/block/cpqarray.h -+++ b/drivers/block/cpqarray.h -@@ -99,7 +99,7 @@ struct ctlr_info { - drv_info_t drv[NWD]; - struct proc_dir_entry *proc; - -- struct access_method access; -+ struct access_method *access; - - cmdlist_t *reqQ; - cmdlist_t *cmpQ; -diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h -index ef2ceed..c9cb18e 100644 ---- a/drivers/block/drbd/drbd_int.h -+++ b/drivers/block/drbd/drbd_int.h -@@ -737,7 +737,7 @@ struct drbd_request; - struct drbd_epoch { - struct list_head list; - unsigned int barrier_nr; -- atomic_t epoch_size; /* increased on every request added. */ -+ atomic_unchecked_t epoch_size; /* increased on every request added. */ - atomic_t active; /* increased on every req. added, and dec on every finished. */ - unsigned long flags; - }; -@@ -1109,7 +1109,7 @@ struct drbd_conf { - void *int_dig_in; - void *int_dig_vv; - wait_queue_head_t seq_wait; -- atomic_t packet_seq; -+ atomic_unchecked_t packet_seq; - unsigned int peer_seq; - spinlock_t peer_seq_lock; - unsigned int minor; -@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname, - - static inline void drbd_tcp_cork(struct socket *sock) - { -- int __user val = 1; -+ int val = 1; - (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, -- (char __user *)&val, sizeof(val)); -+ (char __force_user *)&val, sizeof(val)); - } - - static inline void drbd_tcp_uncork(struct socket *sock) - { -- int __user val = 0; -+ int val = 0; - (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, -- (char __user *)&val, sizeof(val)); -+ (char __force_user *)&val, sizeof(val)); - } - - static inline void drbd_tcp_nodelay(struct socket *sock) - { -- int __user val = 1; -+ int val = 1; - (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY, -- (char __user *)&val, sizeof(val)); -+ (char __force_user *)&val, sizeof(val)); - } - - static inline void drbd_tcp_quickack(struct socket *sock) - { -- int __user val = 2; -+ int val = 2; - (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, -- (char __user *)&val, sizeof(val)); -+ (char __force_user *)&val, sizeof(val)); - } - - void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo); -diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c -index 0358e55..bc33689 100644 ---- a/drivers/block/drbd/drbd_main.c -+++ b/drivers/block/drbd/drbd_main.c -@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, - p.sector = sector; - p.block_id = block_id; - p.blksize = blksize; -- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); -+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq)); - - if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) - return false; -@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) - p.sector = cpu_to_be64(req->sector); - p.block_id = (unsigned long)req; - p.seq_num = cpu_to_be32(req->seq_num = -- atomic_add_return(1, &mdev->packet_seq)); -+ atomic_add_return_unchecked(1, &mdev->packet_seq)); - - dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw); - -@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) - atomic_set(&mdev->unacked_cnt, 0); - atomic_set(&mdev->local_cnt, 0); - atomic_set(&mdev->net_cnt, 0); -- atomic_set(&mdev->packet_seq, 0); -+ atomic_set_unchecked(&mdev->packet_seq, 0); - atomic_set(&mdev->pp_in_use, 0); - atomic_set(&mdev->pp_in_use_by_net, 0); - atomic_set(&mdev->rs_sect_in, 0); -@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) - mdev->receiver.t_state); - - /* no need to lock it, I'm the only thread alive */ -- if (atomic_read(&mdev->current_epoch->epoch_size) != 0) -- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size)); -+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0) -+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size)); - mdev->al_writ_cnt = - mdev->bm_writ_cnt = - mdev->read_cnt = -diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c -index 0feab26..5d9b3dd 100644 ---- a/drivers/block/drbd/drbd_nl.c -+++ b/drivers/block/drbd/drbd_nl.c -@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms - module_put(THIS_MODULE); - } - --static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ -+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ - - static unsigned short * - __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, -@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) - cn_reply->id.idx = CN_IDX_DRBD; - cn_reply->id.val = CN_VAL_DRBD; - -- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); -+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq); - cn_reply->ack = 0; /* not used here. */ - cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + - (int)((char *)tl - (char *)reply->tag_list); -@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) - cn_reply->id.idx = CN_IDX_DRBD; - cn_reply->id.val = CN_VAL_DRBD; - -- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); -+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq); - cn_reply->ack = 0; /* not used here. */ - cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + - (int)((char *)tl - (char *)reply->tag_list); -@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev, - cn_reply->id.idx = CN_IDX_DRBD; - cn_reply->id.val = CN_VAL_DRBD; - -- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); -+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq); - cn_reply->ack = 0; // not used here. - cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + - (int)((char*)tl - (char*)reply->tag_list); -@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev) - cn_reply->id.idx = CN_IDX_DRBD; - cn_reply->id.val = CN_VAL_DRBD; - -- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); -+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq); - cn_reply->ack = 0; /* not used here. */ - cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + - (int)((char *)tl - (char *)reply->tag_list); -diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c -index 43beaca..4a5b1dd 100644 ---- a/drivers/block/drbd/drbd_receiver.c -+++ b/drivers/block/drbd/drbd_receiver.c -@@ -894,7 +894,7 @@ retry: - sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; - sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; - -- atomic_set(&mdev->packet_seq, 0); -+ atomic_set_unchecked(&mdev->packet_seq, 0); - mdev->peer_seq = 0; - - drbd_thread_start(&mdev->asender); -@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, - do { - next_epoch = NULL; - -- epoch_size = atomic_read(&epoch->epoch_size); -+ epoch_size = atomic_read_unchecked(&epoch->epoch_size); - - switch (ev & ~EV_CLEANUP) { - case EV_PUT: -@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, - rv = FE_DESTROYED; - } else { - epoch->flags = 0; -- atomic_set(&epoch->epoch_size, 0); -+ atomic_set_unchecked(&epoch->epoch_size, 0); - /* atomic_set(&epoch->active, 0); is already zero */ - if (rv == FE_STILL_LIVE) - rv = FE_RECYCLED; -@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign - drbd_wait_ee_list_empty(mdev, &mdev->active_ee); - drbd_flush(mdev); - -- if (atomic_read(&mdev->current_epoch->epoch_size)) { -+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) { - epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); - if (epoch) - break; - } - - epoch = mdev->current_epoch; -- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0); -+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0); - - D_ASSERT(atomic_read(&epoch->active) == 0); - D_ASSERT(epoch->flags == 0); -@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign - } - - epoch->flags = 0; -- atomic_set(&epoch->epoch_size, 0); -+ atomic_set_unchecked(&epoch->epoch_size, 0); - atomic_set(&epoch->active, 0); - - spin_lock(&mdev->epoch_lock); -- if (atomic_read(&mdev->current_epoch->epoch_size)) { -+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) { - list_add(&epoch->list, &mdev->current_epoch->list); - mdev->current_epoch = epoch; - mdev->epochs++; -@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned - spin_unlock(&mdev->peer_seq_lock); - - drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); -- atomic_inc(&mdev->current_epoch->epoch_size); -+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size); - return drbd_drain_block(mdev, data_size); - } - -@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned - - spin_lock(&mdev->epoch_lock); - e->epoch = mdev->current_epoch; -- atomic_inc(&e->epoch->epoch_size); -+ atomic_inc_unchecked(&e->epoch->epoch_size); - atomic_inc(&e->epoch->active); - spin_unlock(&mdev->epoch_lock); - -@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev) - D_ASSERT(list_empty(&mdev->done_ee)); - - /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ -- atomic_set(&mdev->current_epoch->epoch_size, 0); -+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0); - D_ASSERT(list_empty(&mdev->current_epoch->list)); - } - -diff --git a/drivers/block/loop.c b/drivers/block/loop.c -index 4720c7a..2c49af1 100644 ---- a/drivers/block/loop.c -+++ b/drivers/block/loop.c -@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct file *file, - mm_segment_t old_fs = get_fs(); - - set_fs(get_ds()); -- bw = file->f_op->write(file, buf, len, &pos); -+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos); - set_fs(old_fs); - if (likely(bw == len)) - return 0; -diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c -index f533f33..6177bcb 100644 ---- a/drivers/block/nbd.c -+++ b/drivers/block/nbd.c -@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, - struct kvec iov; - sigset_t blocked, oldset; - -+ pax_track_stack(); -+ - if (unlikely(!sock)) { - printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n", - lo->disk->disk_name, (send ? "send" : "recv")); -@@ -572,6 +574,8 @@ static void do_nbd_request(struct request_queue *q) - static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, - unsigned int cmd, unsigned long arg) - { -+ pax_track_stack(); -+ - switch (cmd) { - case NBD_DISCONNECT: { - struct request sreq; -diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig -index 423fd56..06d3be0 100644 ---- a/drivers/char/Kconfig -+++ b/drivers/char/Kconfig -@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig" - - config DEVKMEM - bool "/dev/kmem virtual device support" -- default y -+ default n -+ depends on !GRKERNSEC_KMEM - help - Say Y here if you want to support the /dev/kmem device. The - /dev/kmem device is rarely used, but can be used for certain -@@ -596,6 +597,7 @@ config DEVPORT - bool - depends on !M68K - depends on ISA || PCI -+ depends on !GRKERNSEC_KMEM - default y - - source "drivers/s390/char/Kconfig" -diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c -index 2e04433..22afc64 100644 ---- a/drivers/char/agp/frontend.c -+++ b/drivers/char/agp/frontend.c -@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) - if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) - return -EFAULT; - -- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) -+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv)) - return -EFAULT; - - client = agp_find_client_by_pid(reserve.pid); -diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c -index 095ab90..afad0a4 100644 ---- a/drivers/char/briq_panel.c -+++ b/drivers/char/briq_panel.c -@@ -9,6 +9,7 @@ - #include <linux/types.h> - #include <linux/errno.h> - #include <linux/tty.h> -+#include <linux/mutex.h> - #include <linux/timer.h> - #include <linux/kernel.h> - #include <linux/wait.h> -@@ -34,6 +35,7 @@ static int vfd_is_open; - static unsigned char vfd[40]; - static int vfd_cursor; - static unsigned char ledpb, led; -+static DEFINE_MUTEX(vfd_mutex); - - static void update_vfd(void) - { -@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_ - if (!vfd_is_open) - return -EBUSY; - -+ mutex_lock(&vfd_mutex); - for (;;) { - char c; - if (!indx) - break; -- if (get_user(c, buf)) -+ if (get_user(c, buf)) { -+ mutex_unlock(&vfd_mutex); - return -EFAULT; -+ } - if (esc) { - set_led(c); - esc = 0; -@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_ - buf++; - } - update_vfd(); -+ mutex_unlock(&vfd_mutex); - - return len; - } -diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c -index f773a9d..65cd683 100644 ---- a/drivers/char/genrtc.c -+++ b/drivers/char/genrtc.c -@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file, - switch (cmd) { - - case RTC_PLL_GET: -+ memset(&pll, 0, sizeof(pll)); - if (get_rtc_pll(&pll)) - return -EINVAL; - else -diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c -index 0833896..cccce52 100644 ---- a/drivers/char/hpet.c -+++ b/drivers/char/hpet.c -@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, - } - - static int --hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, -+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, - struct hpet_info *info) - { - struct hpet_timer __iomem *timer; -diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c -index 58c0e63..25aed94 100644 ---- a/drivers/char/ipmi/ipmi_msghandler.c -+++ b/drivers/char/ipmi/ipmi_msghandler.c -@@ -415,7 +415,7 @@ struct ipmi_smi { - struct proc_dir_entry *proc_dir; - char proc_dir_name[10]; - -- atomic_t stats[IPMI_NUM_STATS]; -+ atomic_unchecked_t stats[IPMI_NUM_STATS]; - - /* - * run_to_completion duplicate of smb_info, smi_info -@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex); - - - #define ipmi_inc_stat(intf, stat) \ -- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) -+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]) - #define ipmi_get_stat(intf, stat) \ -- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) -+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])) - - static int is_lan_addr(struct ipmi_addr *addr) - { -@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, - INIT_LIST_HEAD(&intf->cmd_rcvrs); - init_waitqueue_head(&intf->waitq); - for (i = 0; i < IPMI_NUM_STATS; i++) -- atomic_set(&intf->stats[i], 0); -+ atomic_set_unchecked(&intf->stats[i], 0); - - intf->proc_dir = NULL; - -@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str) - struct ipmi_smi_msg smi_msg; - struct ipmi_recv_msg recv_msg; - -+ pax_track_stack(); -+ - si = (struct ipmi_system_interface_addr *) &addr; - si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; - si->channel = IPMI_BMC_CHANNEL; -diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c -index 9397ab4..d01bee1 100644 ---- a/drivers/char/ipmi/ipmi_si_intf.c -+++ b/drivers/char/ipmi/ipmi_si_intf.c -@@ -277,7 +277,7 @@ struct smi_info { - unsigned char slave_addr; - - /* Counters and things for the proc filesystem. */ -- atomic_t stats[SI_NUM_STATS]; -+ atomic_unchecked_t stats[SI_NUM_STATS]; - - struct task_struct *thread; - -@@ -286,9 +286,9 @@ struct smi_info { - }; - - #define smi_inc_stat(smi, stat) \ -- atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) -+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat]) - #define smi_get_stat(smi, stat) \ -- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) -+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat])) - - #define SI_MAX_PARMS 4 - -@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi) - atomic_set(&new_smi->req_events, 0); - new_smi->run_to_completion = 0; - for (i = 0; i < SI_NUM_STATS; i++) -- atomic_set(&new_smi->stats[i], 0); -+ atomic_set_unchecked(&new_smi->stats[i], 0); - - new_smi->interrupt_disabled = 1; - atomic_set(&new_smi->stop_operation, 0); -diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c -index 1aeaaba..e018570 100644 ---- a/drivers/char/mbcs.c -+++ b/drivers/char/mbcs.c -@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev) - return 0; - } - --static const struct cx_device_id __devinitdata mbcs_id_table[] = { -+static const struct cx_device_id __devinitconst mbcs_id_table[] = { - { - .part_num = MBCS_PART_NUM, - .mfg_num = MBCS_MFG_NUM, -diff --git a/drivers/char/mem.c b/drivers/char/mem.c -index 8fc04b4..cebdeec 100644 ---- a/drivers/char/mem.c -+++ b/drivers/char/mem.c -@@ -18,6 +18,7 @@ - #include <linux/raw.h> - #include <linux/tty.h> - #include <linux/capability.h> -+#include <linux/security.h> - #include <linux/ptrace.h> - #include <linux/device.h> - #include <linux/highmem.h> -@@ -34,6 +35,10 @@ - # include <linux/efi.h> - #endif - -+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) -+extern const struct file_operations grsec_fops; -+#endif -+ - static inline unsigned long size_inside_page(unsigned long start, - unsigned long size) - { -@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) - - while (cursor < to) { - if (!devmem_is_allowed(pfn)) { -+#ifdef CONFIG_GRKERNSEC_KMEM -+ gr_handle_mem_readwrite(from, to); -+#else - printk(KERN_INFO - "Program %s tried to access /dev/mem between %Lx->%Lx.\n", - current->comm, from, to); -+#endif - return 0; - } - cursor += PAGE_SIZE; -@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) - } - return 1; - } -+#elif defined(CONFIG_GRKERNSEC_KMEM) -+static inline int range_is_allowed(unsigned long pfn, unsigned long size) -+{ -+ return 0; -+} - #else - static inline int range_is_allowed(unsigned long pfn, unsigned long size) - { -@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *file, char __user *buf, - - while (count > 0) { - unsigned long remaining; -+ char *temp; - - sz = size_inside_page(p, count); - -@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *file, char __user *buf, - if (!ptr) - return -EFAULT; - -- remaining = copy_to_user(buf, ptr, sz); -+#ifdef CONFIG_PAX_USERCOPY -+ temp = kmalloc(sz, GFP_KERNEL); -+ if (!temp) { -+ unxlate_dev_mem_ptr(p, ptr); -+ return -ENOMEM; -+ } -+ memcpy(temp, ptr, sz); -+#else -+ temp = ptr; -+#endif -+ -+ remaining = copy_to_user(buf, temp, sz); -+ -+#ifdef CONFIG_PAX_USERCOPY -+ kfree(temp); -+#endif -+ - unxlate_dev_mem_ptr(p, ptr); - if (remaining) - return -EFAULT; -@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, - size_t count, loff_t *ppos) - { - unsigned long p = *ppos; -- ssize_t low_count, read, sz; -+ ssize_t low_count, read, sz, err = 0; - char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ -- int err = 0; - - read = 0; - if (p < (unsigned long) high_memory) { -@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, - } - #endif - while (low_count > 0) { -+ char *temp; -+ - sz = size_inside_page(p, low_count); - - /* -@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf, - */ - kbuf = xlate_dev_kmem_ptr((char *)p); - -- if (copy_to_user(buf, kbuf, sz)) -+#ifdef CONFIG_PAX_USERCOPY -+ temp = kmalloc(sz, GFP_KERNEL); -+ if (!temp) -+ return -ENOMEM; -+ memcpy(temp, kbuf, sz); -+#else -+ temp = kbuf; -+#endif -+ -+ err = copy_to_user(buf, temp, sz); -+ -+#ifdef CONFIG_PAX_USERCOPY -+ kfree(temp); -+#endif -+ -+ if (err) - return -EFAULT; - buf += sz; - p += sz; -@@ -866,6 +913,9 @@ static const struct memdev { - #ifdef CONFIG_CRASH_DUMP - [12] = { "oldmem", 0, &oldmem_fops, NULL }, - #endif -+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) -+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL }, -+#endif - }; - - static int memory_open(struct inode *inode, struct file *filp) -diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c -index da3cfee..a5a6606 100644 ---- a/drivers/char/nvram.c -+++ b/drivers/char/nvram.c -@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf, - - spin_unlock_irq(&rtc_lock); - -- if (copy_to_user(buf, contents, tmp - contents)) -+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents)) - return -EFAULT; - - *ppos = i; -diff --git a/drivers/char/random.c b/drivers/char/random.c -index c35a785..6d82202 100644 ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -261,8 +261,13 @@ - /* - * Configuration information - */ -+#ifdef CONFIG_GRKERNSEC_RANDNET -+#define INPUT_POOL_WORDS 512 -+#define OUTPUT_POOL_WORDS 128 -+#else - #define INPUT_POOL_WORDS 128 - #define OUTPUT_POOL_WORDS 32 -+#endif - #define SEC_XFER_SIZE 512 - #define EXTRACT_SIZE 10 - -@@ -300,10 +305,17 @@ static struct poolinfo { - int poolwords; - int tap1, tap2, tap3, tap4, tap5; - } poolinfo_table[] = { -+#ifdef CONFIG_GRKERNSEC_RANDNET -+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */ -+ { 512, 411, 308, 208, 104, 1 }, -+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */ -+ { 128, 103, 76, 51, 25, 1 }, -+#else - /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */ - { 128, 103, 76, 51, 25, 1 }, - /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */ - { 32, 26, 20, 14, 7, 1 }, -+#endif - #if 0 - /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ - { 2048, 1638, 1231, 819, 411, 1 }, -@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, - - extract_buf(r, tmp); - i = min_t(int, nbytes, EXTRACT_SIZE); -- if (copy_to_user(buf, tmp, i)) { -+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) { - ret = -EFAULT; - break; - } -@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid); - #include <linux/sysctl.h> - - static int min_read_thresh = 8, min_write_thresh; --static int max_read_thresh = INPUT_POOL_WORDS * 32; -+static int max_read_thresh = OUTPUT_POOL_WORDS * 32; - static int max_write_thresh = INPUT_POOL_WORDS * 32; - static char sysctl_bootid[16]; - -diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c -index 1ee8ce7..b778bef 100644 ---- a/drivers/char/sonypi.c -+++ b/drivers/char/sonypi.c -@@ -55,6 +55,7 @@ - #include <asm/uaccess.h> - #include <asm/io.h> - #include <asm/system.h> -+#include <asm/local.h> - - #include <linux/sonypi.h> - -@@ -491,7 +492,7 @@ static struct sonypi_device { - spinlock_t fifo_lock; - wait_queue_head_t fifo_proc_list; - struct fasync_struct *fifo_async; -- int open_count; -+ local_t open_count; - int model; - struct input_dev *input_jog_dev; - struct input_dev *input_key_dev; -@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on) - static int sonypi_misc_release(struct inode *inode, struct file *file) - { - mutex_lock(&sonypi_device.lock); -- sonypi_device.open_count--; -+ local_dec(&sonypi_device.open_count); - mutex_unlock(&sonypi_device.lock); - return 0; - } -@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file) - { - mutex_lock(&sonypi_device.lock); - /* Flush input queue on first open */ -- if (!sonypi_device.open_count) -+ if (!local_read(&sonypi_device.open_count)) - kfifo_reset(&sonypi_device.fifo); -- sonypi_device.open_count++; -+ local_inc(&sonypi_device.open_count); - mutex_unlock(&sonypi_device.lock); - - return 0; -diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c -index 9ca5c02..7ce352c 100644 ---- a/drivers/char/tpm/tpm.c -+++ b/drivers/char/tpm/tpm.c -@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, - chip->vendor.req_complete_val) - goto out_recv; - -- if ((status == chip->vendor.req_canceled)) { -+ if (status == chip->vendor.req_canceled) { - dev_err(chip->dev, "Operation Canceled\n"); - rc = -ECANCELED; - goto out; -@@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr, - - struct tpm_chip *chip = dev_get_drvdata(dev); - -+ pax_track_stack(); -+ - tpm_cmd.header.in = tpm_readpubek_header; - err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, - "attempting to read the PUBEK"); -diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c -index 0636520..169c1d0 100644 ---- a/drivers/char/tpm/tpm_bios.c -+++ b/drivers/char/tpm/tpm_bios.c -@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos) - event = addr; - - if ((event->event_type == 0 && event->event_size == 0) || -- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit)) -+ (event->event_size >= limit - addr - sizeof(struct tcpa_event))) - return NULL; - - return addr; -@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v, - return NULL; - - if ((event->event_type == 0 && event->event_size == 0) || -- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit)) -+ (event->event_size >= limit - v - sizeof(struct tcpa_event))) - return NULL; - - (*pos)++; -@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v) - int i; - - for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) -- seq_putc(m, data[i]); -+ if (!seq_putc(m, data[i])) -+ return -EFAULT; - - return 0; - } -@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log) - log->bios_event_log_end = log->bios_event_log + len; - - virt = acpi_os_map_memory(start, len); -+ if (!virt) { -+ kfree(log->bios_event_log); -+ log->bios_event_log = NULL; -+ return -EFAULT; -+ } - -- memcpy(log->bios_event_log, virt, len); -+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len); - - acpi_os_unmap_memory(virt, len); - return 0; -diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c -index fb68b12..0f6c6ca 100644 ---- a/drivers/char/virtio_console.c -+++ b/drivers/char/virtio_console.c -@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, - if (to_user) { - ssize_t ret; - -- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); -+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count); - if (ret) - return -EFAULT; - } else { -@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, - if (!port_has_data(port) && !port->host_connected) - return 0; - -- return fill_readbuf(port, ubuf, count, true); -+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true); - } - - static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, -diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c -index a84250a..68c725e 100644 ---- a/drivers/crypto/hifn_795x.c -+++ b/drivers/crypto/hifn_795x.c -@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) - 0xCA, 0x34, 0x2B, 0x2E}; - struct scatterlist sg; - -+ pax_track_stack(); -+ - memset(src, 0, sizeof(src)); - memset(ctx.key, 0, sizeof(ctx.key)); - -diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c -index db33d30..7823369 100644 ---- a/drivers/crypto/padlock-aes.c -+++ b/drivers/crypto/padlock-aes.c -@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - struct crypto_aes_ctx gen_aes; - int cpu; - -+ pax_track_stack(); -+ - if (key_len % 8) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; -diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c -index 9a8bebc..b1e4989 100644 ---- a/drivers/edac/amd64_edac.c -+++ b/drivers/edac/amd64_edac.c -@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) - * PCI core identifies what devices are on a system during boot, and then - * inquiry this table to see if this driver is for a given device found. - */ --static const struct pci_device_id amd64_pci_table[] __devinitdata = { -+static const struct pci_device_id amd64_pci_table[] __devinitconst = { - { - .vendor = PCI_VENDOR_ID_AMD, - .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, -diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c -index e47e73b..348e0bd 100644 ---- a/drivers/edac/amd76x_edac.c -+++ b/drivers/edac/amd76x_edac.c -@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { -+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = { - { - PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - AMD762}, -diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c -index 1af531a..3a8ff27 100644 ---- a/drivers/edac/e752x_edac.c -+++ b/drivers/edac/e752x_edac.c -@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { -+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = { - { - PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7520}, -diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c -index 6ffb6d2..383d8d7 100644 ---- a/drivers/edac/e7xxx_edac.c -+++ b/drivers/edac/e7xxx_edac.c -@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { -+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = { - { - PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7205}, -diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c -index 495198a..ac08c85 100644 ---- a/drivers/edac/edac_pci_sysfs.c -+++ b/drivers/edac/edac_pci_sysfs.c -@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */ - static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */ - static int edac_pci_poll_msec = 1000; /* one second workq period */ - --static atomic_t pci_parity_count = ATOMIC_INIT(0); --static atomic_t pci_nonparity_count = ATOMIC_INIT(0); -+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0); -+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0); - - static struct kobject *edac_pci_top_main_kobj; - static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0); -@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) - edac_printk(KERN_CRIT, EDAC_PCI, - "Signaled System Error on %s\n", - pci_name(dev)); -- atomic_inc(&pci_nonparity_count); -+ atomic_inc_unchecked(&pci_nonparity_count); - } - - if (status & (PCI_STATUS_PARITY)) { -@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) - "Master Data Parity Error on %s\n", - pci_name(dev)); - -- atomic_inc(&pci_parity_count); -+ atomic_inc_unchecked(&pci_parity_count); - } - - if (status & (PCI_STATUS_DETECTED_PARITY)) { -@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) - "Detected Parity Error on %s\n", - pci_name(dev)); - -- atomic_inc(&pci_parity_count); -+ atomic_inc_unchecked(&pci_parity_count); - } - } - -@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) - edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " - "Signaled System Error on %s\n", - pci_name(dev)); -- atomic_inc(&pci_nonparity_count); -+ atomic_inc_unchecked(&pci_nonparity_count); - } - - if (status & (PCI_STATUS_PARITY)) { -@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) - "Master Data Parity Error on " - "%s\n", pci_name(dev)); - -- atomic_inc(&pci_parity_count); -+ atomic_inc_unchecked(&pci_parity_count); - } - - if (status & (PCI_STATUS_DETECTED_PARITY)) { -@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) - "Detected Parity Error on %s\n", - pci_name(dev)); - -- atomic_inc(&pci_parity_count); -+ atomic_inc_unchecked(&pci_parity_count); - } - } - } -@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void) - if (!check_pci_errors) - return; - -- before_count = atomic_read(&pci_parity_count); -+ before_count = atomic_read_unchecked(&pci_parity_count); - - /* scan all PCI devices looking for a Parity Error on devices and - * bridges. -@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void) - /* Only if operator has selected panic on PCI Error */ - if (edac_pci_get_panic_on_pe()) { - /* If the count is different 'after' from 'before' */ -- if (before_count != atomic_read(&pci_parity_count)) -+ if (before_count != atomic_read_unchecked(&pci_parity_count)) - panic("EDAC: PCI Parity Error"); - } - } -diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c -index c0510b3..6e2a954 100644 ---- a/drivers/edac/i3000_edac.c -+++ b/drivers/edac/i3000_edac.c -@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id i3000_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = { - { - PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - I3000}, -diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c -index aa08497..7e6822a 100644 ---- a/drivers/edac/i3200_edac.c -+++ b/drivers/edac/i3200_edac.c -@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id i3200_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = { - { - PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - I3200}, -diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c -index 4dc3ac2..67d05a6 100644 ---- a/drivers/edac/i5000_edac.c -+++ b/drivers/edac/i5000_edac.c -@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev) - * - * The "E500P" device is the first device supported. - */ --static const struct pci_device_id i5000_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16), - .driver_data = I5000P}, - -diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c -index bcbdeec..9886d16 100644 ---- a/drivers/edac/i5100_edac.c -+++ b/drivers/edac/i5100_edac.c -@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id i5100_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = { - /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, - { 0, } -diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c -index 74d6ec34..baff517 100644 ---- a/drivers/edac/i5400_edac.c -+++ b/drivers/edac/i5400_edac.c -@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev) - * - * The "E500P" device is the first device supported. - */ --static const struct pci_device_id i5400_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)}, - {0,} /* 0 terminated list. */ - }; -diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c -index a76fe83..15479e6 100644 ---- a/drivers/edac/i7300_edac.c -+++ b/drivers/edac/i7300_edac.c -@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev) - * - * Has only 8086:360c PCI ID - */ --static const struct pci_device_id i7300_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, - {0,} /* 0 terminated list. */ - }; -diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c -index f6cf448..3f612e9 100644 ---- a/drivers/edac/i7core_edac.c -+++ b/drivers/edac/i7core_edac.c -@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev_table[] = { - /* - * pci_device_id table for which devices we are looking for - */ --static const struct pci_device_id i7core_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, - {0,} /* 0 terminated list. */ -diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c -index 4329d39..f3022ef 100644 ---- a/drivers/edac/i82443bxgx_edac.c -+++ b/drivers/edac/i82443bxgx_edac.c -@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) - - EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one); - --static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)}, -diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c -index 931a057..fd28340 100644 ---- a/drivers/edac/i82860_edac.c -+++ b/drivers/edac/i82860_edac.c -@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = { - { - PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - I82860}, -diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c -index 33864c6..01edc61 100644 ---- a/drivers/edac/i82875p_edac.c -+++ b/drivers/edac/i82875p_edac.c -@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = { - { - PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - I82875P}, -diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c -index a5da732..983363b 100644 ---- a/drivers/edac/i82975x_edac.c -+++ b/drivers/edac/i82975x_edac.c -@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = { -+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = { - { - PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - I82975X -diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h -index 795a320..3bbc3d3 100644 ---- a/drivers/edac/mce_amd.h -+++ b/drivers/edac/mce_amd.h -@@ -83,7 +83,7 @@ struct amd_decoder_ops { - bool (*dc_mce)(u16, u8); - bool (*ic_mce)(u16, u8); - bool (*nb_mce)(u16, u8); --}; -+} __no_const; - - void amd_report_gart_errors(bool); - void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32)); -diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c -index b153674..ad2ba9b 100644 ---- a/drivers/edac/r82600_edac.c -+++ b/drivers/edac/r82600_edac.c -@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id r82600_pci_tbl[] __devinitdata = { -+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = { - { - PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) - }, -diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c -index b6f47de..c5acf3a 100644 ---- a/drivers/edac/x38_edac.c -+++ b/drivers/edac/x38_edac.c -@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev) - edac_mc_free(mci); - } - --static const struct pci_device_id x38_pci_tbl[] __devinitdata = { -+static const struct pci_device_id x38_pci_tbl[] __devinitconst = { - { - PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - X38}, -diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c -index 85661b0..c784559a 100644 ---- a/drivers/firewire/core-card.c -+++ b/drivers/firewire/core-card.c -@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref) - - void fw_core_remove_card(struct fw_card *card) - { -- struct fw_card_driver dummy_driver = dummy_driver_template; -+ fw_card_driver_no_const dummy_driver = dummy_driver_template; - - card->driver->update_phy_reg(card, 4, - PHY_LINK_ACTIVE | PHY_CONTENDER, 0); -diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c -index 4799393..37bd3ab 100644 ---- a/drivers/firewire/core-cdev.c -+++ b/drivers/firewire/core-cdev.c -@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client, - int ret; - - if ((request->channels == 0 && request->bandwidth == 0) || -- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || -- request->bandwidth < 0) -+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) - return -EINVAL; - - r = kmalloc(sizeof(*r), GFP_KERNEL); -diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c -index 334b82a..ea5261d 100644 ---- a/drivers/firewire/core-transaction.c -+++ b/drivers/firewire/core-transaction.c -@@ -37,6 +37,7 @@ - #include <linux/timer.h> - #include <linux/types.h> - #include <linux/workqueue.h> -+#include <linux/sched.h> - - #include <asm/byteorder.h> - -@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, - struct transaction_callback_data d; - struct fw_transaction t; - -+ pax_track_stack(); -+ - init_timer_on_stack(&t.split_timeout_timer); - init_completion(&d.done); - d.payload = payload; -diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h -index b45be57..5fad18b 100644 ---- a/drivers/firewire/core.h -+++ b/drivers/firewire/core.h -@@ -101,6 +101,7 @@ struct fw_card_driver { - - int (*stop_iso)(struct fw_iso_context *ctx); - }; -+typedef struct fw_card_driver __no_const fw_card_driver_no_const; - - void fw_card_initialize(struct fw_card *card, - const struct fw_card_driver *driver, struct device *device); -diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c -index bcb1126..2cc2121 100644 ---- a/drivers/firmware/dmi_scan.c -+++ b/drivers/firmware/dmi_scan.c -@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void) - } - } - else { -- /* -- * no iounmap() for that ioremap(); it would be a no-op, but -- * it's so early in setup that sucker gets confused into doing -- * what it shouldn't if we actually call it. -- */ - p = dmi_ioremap(0xF0000, 0x10000); - if (p == NULL) - goto error; -@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *), - if (buf == NULL) - return -1; - -- dmi_table(buf, dmi_len, dmi_num, decode, private_data); -+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data); - - iounmap(buf); - return 0; -diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c -index 98723cb..10ca85b 100644 ---- a/drivers/gpio/gpio-vr41xx.c -+++ b/drivers/gpio/gpio-vr41xx.c -@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq) - printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", - maskl, pendl, maskh, pendh); - -- atomic_inc(&irq_err_count); -+ atomic_inc_unchecked(&irq_err_count); - - return -EINVAL; - } -diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c -index 2410c40..2d03563 100644 ---- a/drivers/gpu/drm/drm_crtc.c -+++ b/drivers/gpu/drm/drm_crtc.c -@@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, - */ - if ((out_resp->count_modes >= mode_count) && mode_count) { - copied = 0; -- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr; -+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; - list_for_each_entry(mode, &connector->modes, head) { - drm_crtc_convert_to_umode(&u_mode, mode); - if (copy_to_user(mode_ptr + copied, -@@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, - - if ((out_resp->count_props >= props_count) && props_count) { - copied = 0; -- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr); -- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr); -+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr); -+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr); - for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { - if (connector->property_ids[i] != 0) { - if (put_user(connector->property_ids[i], -@@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, - - if ((out_resp->count_encoders >= encoders_count) && encoders_count) { - copied = 0; -- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr); -+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] != 0) { - if (put_user(connector->encoder_ids[i], -@@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, - } - - for (i = 0; i < crtc_req->count_connectors; i++) { -- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr; -+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr; - if (get_user(out_id, &set_connectors_ptr[i])) { - ret = -EFAULT; - goto out; -@@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, - fb = obj_to_fb(obj); - - num_clips = r->num_clips; -- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr; -+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; - - if (!num_clips != !clips_ptr) { - ret = -EINVAL; -@@ -2276,7 +2276,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, - out_resp->flags = property->flags; - - if ((out_resp->count_values >= value_count) && value_count) { -- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr; -+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr; - for (i = 0; i < value_count; i++) { - if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { - ret = -EFAULT; -@@ -2289,7 +2289,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, - if (property->flags & DRM_MODE_PROP_ENUM) { - if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { - copied = 0; -- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr; -+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; - list_for_each_entry(prop_enum, &property->enum_blob_list, head) { - - if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { -@@ -2312,7 +2312,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, - if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { - copied = 0; - blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr; -- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr; -+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr; - - list_for_each_entry(prop_blob, &property->enum_blob_list, head) { - if (put_user(prop_blob->base.id, blob_id_ptr + copied)) { -@@ -2373,7 +2373,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, - struct drm_mode_get_blob *out_resp = data; - struct drm_property_blob *blob; - int ret = 0; -- void *blob_ptr; -+ void __user *blob_ptr; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; -@@ -2387,7 +2387,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, - blob = obj_to_blob(obj); - - if (out_resp->length == blob->length) { -- blob_ptr = (void *)(unsigned long)out_resp->data; -+ blob_ptr = (void __user *)(unsigned long)out_resp->data; - if (copy_to_user(blob_ptr, blob->data, blob->length)){ - ret = -EFAULT; - goto done; -diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c -index f88a9b2..8f4078f 100644 ---- a/drivers/gpu/drm/drm_crtc_helper.c -+++ b/drivers/gpu/drm/drm_crtc_helper.c -@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, - struct drm_crtc *tmp; - int crtc_mask = 1; - -- WARN(!crtc, "checking null crtc?\n"); -+ BUG_ON(!crtc); - - dev = crtc->dev; - -@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, - struct drm_encoder *encoder; - bool ret = true; - -+ pax_track_stack(); -+ - crtc->enabled = drm_helper_crtc_in_use(crtc); - if (!crtc->enabled) - return true; -diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c -index 93a112d..c8b065d 100644 ---- a/drivers/gpu/drm/drm_drv.c -+++ b/drivers/gpu/drm/drm_drv.c -@@ -307,7 +307,7 @@ module_exit(drm_core_exit); - /** - * Copy and IOCTL return string to user space - */ --static int drm_copy_field(char *buf, size_t *buf_len, const char *value) -+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value) - { - int len; - -@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp, - - dev = file_priv->minor->dev; - atomic_inc(&dev->ioctl_count); -- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); -+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]); - ++file_priv->ioctl_count; - - DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", -diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c -index 2ec7d48..be14bb1 100644 ---- a/drivers/gpu/drm/drm_fops.c -+++ b/drivers/gpu/drm/drm_fops.c -@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device * dev) - } - - for (i = 0; i < ARRAY_SIZE(dev->counts); i++) -- atomic_set(&dev->counts[i], 0); -+ atomic_set_unchecked(&dev->counts[i], 0); - - dev->sigdata.lock = NULL; - -@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct file *filp) - - retcode = drm_open_helper(inode, filp, dev); - if (!retcode) { -- atomic_inc(&dev->counts[_DRM_STAT_OPENS]); -- if (!dev->open_count++) -+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]); -+ if (local_inc_return(&dev->open_count) == 1) - retcode = drm_setup(dev); - } - if (!retcode) { -@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, struct file *filp) - - mutex_lock(&drm_global_mutex); - -- DRM_DEBUG("open_count = %d\n", dev->open_count); -+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count)); - - if (dev->driver->preclose) - dev->driver->preclose(dev, file_priv); -@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, struct file *filp) - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", - task_pid_nr(current), - (long)old_encode_dev(file_priv->minor->device), -- dev->open_count); -+ local_read(&dev->open_count)); - - /* if the master has gone away we can't do anything with the lock */ - if (file_priv->minor->master) -@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, struct file *filp) - * End inline drm_release - */ - -- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); -- if (!--dev->open_count) { -+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]); -+ if (local_dec_and_test(&dev->open_count)) { - if (atomic_read(&dev->ioctl_count)) { - DRM_ERROR("Device busy: %d\n", - atomic_read(&dev->ioctl_count)); -diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c -index c87dc96..326055d 100644 ---- a/drivers/gpu/drm/drm_global.c -+++ b/drivers/gpu/drm/drm_global.c -@@ -36,7 +36,7 @@ - struct drm_global_item { - struct mutex mutex; - void *object; -- int refcount; -+ atomic_t refcount; - }; - - static struct drm_global_item glob[DRM_GLOBAL_NUM]; -@@ -49,7 +49,7 @@ void drm_global_init(void) - struct drm_global_item *item = &glob[i]; - mutex_init(&item->mutex); - item->object = NULL; -- item->refcount = 0; -+ atomic_set(&item->refcount, 0); - } - } - -@@ -59,7 +59,7 @@ void drm_global_release(void) - for (i = 0; i < DRM_GLOBAL_NUM; ++i) { - struct drm_global_item *item = &glob[i]; - BUG_ON(item->object != NULL); -- BUG_ON(item->refcount != 0); -+ BUG_ON(atomic_read(&item->refcount) != 0); - } - } - -@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref) - void *object; - - mutex_lock(&item->mutex); -- if (item->refcount == 0) { -+ if (atomic_read(&item->refcount) == 0) { - item->object = kzalloc(ref->size, GFP_KERNEL); - if (unlikely(item->object == NULL)) { - ret = -ENOMEM; -@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref) - goto out_err; - - } -- ++item->refcount; -+ atomic_inc(&item->refcount); - ref->object = item->object; - object = item->object; - mutex_unlock(&item->mutex); -@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref) - struct drm_global_item *item = &glob[ref->global_type]; - - mutex_lock(&item->mutex); -- BUG_ON(item->refcount == 0); -+ BUG_ON(atomic_read(&item->refcount) == 0); - BUG_ON(ref->object != item->object); -- if (--item->refcount == 0) { -+ if (atomic_dec_and_test(&item->refcount)) { - ref->release(ref); - item->object = NULL; - } -diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c -index ab1162d..42587b2 100644 ---- a/drivers/gpu/drm/drm_info.c -+++ b/drivers/gpu/drm/drm_info.c -@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data) - struct drm_local_map *map; - struct drm_map_list *r_list; - -- /* Hardcoded from _DRM_FRAME_BUFFER, -- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and -- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ -- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; -+ static const char * const types[] = { -+ [_DRM_FRAME_BUFFER] = "FB", -+ [_DRM_REGISTERS] = "REG", -+ [_DRM_SHM] = "SHM", -+ [_DRM_AGP] = "AGP", -+ [_DRM_SCATTER_GATHER] = "SG", -+ [_DRM_CONSISTENT] = "PCI", -+ [_DRM_GEM] = "GEM" }; - const char *type; - int i; - -@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data) - map = r_list->map; - if (!map) - continue; -- if (map->type < 0 || map->type > 5) -+ if (map->type >= ARRAY_SIZE(types)) - type = "??"; - else - type = types[map->type]; -@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data) - vma->vm_flags & VM_MAYSHARE ? 's' : 'p', - vma->vm_flags & VM_LOCKED ? 'l' : '-', - vma->vm_flags & VM_IO ? 'i' : '-', -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ 0); -+#else - vma->vm_pgoff); -+#endif - - #if defined(__i386__) - pgprot = pgprot_val(vma->vm_page_prot); -diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c -index 4a058c7..b42cd92 100644 ---- a/drivers/gpu/drm/drm_ioc32.c -+++ b/drivers/gpu/drm/drm_ioc32.c -@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, - request = compat_alloc_user_space(nbytes); - if (!access_ok(VERIFY_WRITE, request, nbytes)) - return -EFAULT; -- list = (struct drm_buf_desc *) (request + 1); -+ list = (struct drm_buf_desc __user *) (request + 1); - - if (__put_user(count, &request->count) - || __put_user(list, &request->list)) -@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, - request = compat_alloc_user_space(nbytes); - if (!access_ok(VERIFY_WRITE, request, nbytes)) - return -EFAULT; -- list = (struct drm_buf_pub *) (request + 1); -+ list = (struct drm_buf_pub __user *) (request + 1); - - if (__put_user(count, &request->count) - || __put_user(list, &request->list)) -diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c -index 904d7e9..ab88581 100644 ---- a/drivers/gpu/drm/drm_ioctl.c -+++ b/drivers/gpu/drm/drm_ioctl.c -@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data, - stats->data[i].value = - (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); - else -- stats->data[i].value = atomic_read(&dev->counts[i]); -+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]); - stats->data[i].type = dev->types[i]; - } - -diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c -index 632ae24..244cf4a 100644 ---- a/drivers/gpu/drm/drm_lock.c -+++ b/drivers/gpu/drm/drm_lock.c -@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) - if (drm_lock_take(&master->lock, lock->context)) { - master->lock.file_priv = file_priv; - master->lock.lock_time = jiffies; -- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); -+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]); - break; /* Got lock */ - } - -@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) - return -EINVAL; - } - -- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); -+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]); - - if (drm_lock_free(&master->lock, lock->context)) { - /* FIXME: Should really bail out here. */ -diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c -index 8f371e8..9f85d52 100644 ---- a/drivers/gpu/drm/i810/i810_dma.c -+++ b/drivers/gpu/drm/i810/i810_dma.c -@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data, - dma->buflist[vertex->idx], - vertex->discard, vertex->used); - -- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); -- atomic_inc(&dev->counts[_DRM_STAT_DMA]); -+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); -+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); - sarea_priv->last_enqueue = dev_priv->counter - 1; - sarea_priv->last_dispatch = (int)hw_status[5]; - -@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data, - i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, - mc->last_render); - -- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); -- atomic_inc(&dev->counts[_DRM_STAT_DMA]); -+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); -+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); - sarea_priv->last_enqueue = dev_priv->counter - 1; - sarea_priv->last_dispatch = (int)hw_status[5]; - -diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h -index c9339f4..f5e1b9d 100644 ---- a/drivers/gpu/drm/i810/i810_drv.h -+++ b/drivers/gpu/drm/i810/i810_drv.h -@@ -108,8 +108,8 @@ typedef struct drm_i810_private { - int page_flipping; - - wait_queue_head_t irq_queue; -- atomic_t irq_received; -- atomic_t irq_emitted; -+ atomic_unchecked_t irq_received; -+ atomic_unchecked_t irq_emitted; - - int front_offset; - } drm_i810_private_t; -diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c -index 3c395a5..02889c2 100644 ---- a/drivers/gpu/drm/i915/i915_debugfs.c -+++ b/drivers/gpu/drm/i915/i915_debugfs.c -@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) - I915_READ(GTIMR)); - } - seq_printf(m, "Interrupts received: %d\n", -- atomic_read(&dev_priv->irq_received)); -+ atomic_read_unchecked(&dev_priv->irq_received)); - for (i = 0; i < I915_NUM_RINGS; i++) { - if (IS_GEN6(dev) || IS_GEN7(dev)) { - seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", -@@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file *m, void *unused) - return ret; - - if (opregion->header) -- seq_write(m, opregion->header, OPREGION_SIZE); -+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE); - - mutex_unlock(&dev->struct_mutex); - -diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c -index 8a3942c..1b73bf1 100644 ---- a/drivers/gpu/drm/i915/i915_dma.c -+++ b/drivers/gpu/drm/i915/i915_dma.c -@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) - bool can_switch; - - spin_lock(&dev->count_lock); -- can_switch = (dev->open_count == 0); -+ can_switch = (local_read(&dev->open_count) == 0); - spin_unlock(&dev->count_lock); - return can_switch; - } -diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h -index 7916bd9..7c17a0f 100644 ---- a/drivers/gpu/drm/i915/i915_drv.h -+++ b/drivers/gpu/drm/i915/i915_drv.h -@@ -222,7 +222,7 @@ struct drm_i915_display_funcs { - /* render clock increase/decrease */ - /* display clock increase/decrease */ - /* pll clock increase/decrease */ --}; -+} __no_const; - - struct intel_device_info { - u8 gen; -@@ -305,7 +305,7 @@ typedef struct drm_i915_private { - int current_page; - int page_flipping; - -- atomic_t irq_received; -+ atomic_unchecked_t irq_received; - - /* protects the irq masks */ - spinlock_t irq_lock; -@@ -882,7 +882,7 @@ struct drm_i915_gem_object { - * will be page flipped away on the next vblank. When it - * reaches 0, dev_priv->pending_flip_queue will be woken up. - */ -- atomic_t pending_flip; -+ atomic_unchecked_t pending_flip; - }; - - #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) -@@ -1262,7 +1262,7 @@ extern int intel_setup_gmbus(struct drm_device *dev); - extern void intel_teardown_gmbus(struct drm_device *dev); - extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); - extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); --extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) -+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) - { - return container_of(adapter, struct intel_gmbus, adapter)->force_bit; - } -diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c -index 4934cf8..1da9c84 100644 ---- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c -+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c -@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, - i915_gem_clflush_object(obj); - - if (obj->base.pending_write_domain) -- cd->flips |= atomic_read(&obj->pending_flip); -+ cd->flips |= atomic_read_unchecked(&obj->pending_flip); - - /* The actual obj->write_domain will be updated with - * pending_write_domain after we emit the accumulated flush for all -@@ -864,9 +864,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) - - static int - validate_exec_list(struct drm_i915_gem_exec_object2 *exec, -- int count) -+ unsigned int count) - { -- int i; -+ unsigned int i; - - for (i = 0; i < count; i++) { - char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; -diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c -index 73248d0..f7bac29 100644 ---- a/drivers/gpu/drm/i915/i915_irq.c -+++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) - u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; - struct drm_i915_master_private *master_priv; - -- atomic_inc(&dev_priv->irq_received); -+ atomic_inc_unchecked(&dev_priv->irq_received); - - /* disable master interrupt before clearing iir */ - de_ier = I915_READ(DEIER); -@@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) - struct drm_i915_master_private *master_priv; - u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; - -- atomic_inc(&dev_priv->irq_received); -+ atomic_inc_unchecked(&dev_priv->irq_received); - - if (IS_GEN6(dev)) - bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; -@@ -1229,7 +1229,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) - int ret = IRQ_NONE, pipe; - bool blc_event = false; - -- atomic_inc(&dev_priv->irq_received); -+ atomic_inc_unchecked(&dev_priv->irq_received); - - iir = I915_READ(IIR); - -@@ -1741,7 +1741,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev) - { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - -- atomic_set(&dev_priv->irq_received, 0); -+ atomic_set_unchecked(&dev_priv->irq_received, 0); - - INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); - INIT_WORK(&dev_priv->error_work, i915_error_work_func); -@@ -1905,7 +1905,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev) - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int pipe; - -- atomic_set(&dev_priv->irq_received, 0); -+ atomic_set_unchecked(&dev_priv->irq_received, 0); - - INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); - INIT_WORK(&dev_priv->error_work, i915_error_work_func); -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index 07e7cf3..c75f312 100644 ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, - - wait_event(dev_priv->pending_flip_queue, - atomic_read(&dev_priv->mm.wedged) || -- atomic_read(&obj->pending_flip) == 0); -+ atomic_read_unchecked(&obj->pending_flip) == 0); - - /* Big Hammer, we also need to ensure that any pending - * MI_WAIT_FOR_EVENT inside a user batch buffer on the -@@ -2826,7 +2826,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) - obj = to_intel_framebuffer(crtc->fb)->obj; - dev_priv = crtc->dev->dev_private; - wait_event(dev_priv->pending_flip_queue, -- atomic_read(&obj->pending_flip) == 0); -+ atomic_read_unchecked(&obj->pending_flip) == 0); - } - - static bool intel_crtc_driving_pch(struct drm_crtc *crtc) -@@ -6676,7 +6676,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, - - atomic_clear_mask(1 << intel_crtc->plane, - &obj->pending_flip.counter); -- if (atomic_read(&obj->pending_flip) == 0) -+ if (atomic_read_unchecked(&obj->pending_flip) == 0) - wake_up(&dev_priv->pending_flip_queue); - - schedule_work(&work->work); -@@ -6965,7 +6965,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, - /* Block clients from rendering to the new back buffer until - * the flip occurs and the object is no longer visible. - */ -- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); -+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); - - ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); - if (ret) -@@ -6979,7 +6979,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, - return 0; - - cleanup_pending: -- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); -+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); - cleanup_objs: - drm_gem_object_unreference(&work->old_fb_obj->base); - drm_gem_object_unreference(&obj->base); -diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h -index 54558a0..2d97005 100644 ---- a/drivers/gpu/drm/mga/mga_drv.h -+++ b/drivers/gpu/drm/mga/mga_drv.h -@@ -120,9 +120,9 @@ typedef struct drm_mga_private { - u32 clear_cmd; - u32 maccess; - -- atomic_t vbl_received; /**< Number of vblanks received. */ -+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */ - wait_queue_head_t fence_queue; -- atomic_t last_fence_retired; -+ atomic_unchecked_t last_fence_retired; - u32 next_fence_to_post; - - unsigned int fb_cpp; -diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c -index 2581202..f230a8d9 100644 ---- a/drivers/gpu/drm/mga/mga_irq.c -+++ b/drivers/gpu/drm/mga/mga_irq.c -@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) - if (crtc != 0) - return 0; - -- return atomic_read(&dev_priv->vbl_received); -+ return atomic_read_unchecked(&dev_priv->vbl_received); - } - - -@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) - /* VBLANK interrupt */ - if (status & MGA_VLINEPEN) { - MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); -- atomic_inc(&dev_priv->vbl_received); -+ atomic_inc_unchecked(&dev_priv->vbl_received); - drm_handle_vblank(dev, 0); - handled = 1; - } -@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) - if ((prim_start & ~0x03) != (prim_end & ~0x03)) - MGA_WRITE(MGA_PRIMEND, prim_end); - -- atomic_inc(&dev_priv->last_fence_retired); -+ atomic_inc_unchecked(&dev_priv->last_fence_retired); - DRM_WAKEUP(&dev_priv->fence_queue); - handled = 1; - } -@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence) - * using fences. - */ - DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, -- (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) -+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired)) - - *sequence) <= (1 << 23))); - - *sequence = cur_fence; -diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c -index b311fab..dc11d6a 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_bios.c -+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c -@@ -201,7 +201,7 @@ struct methods { - const char desc[8]; - void (*loadbios)(struct drm_device *, uint8_t *); - const bool rw; --}; -+} __do_const; - - static struct methods shadow_methods[] = { - { "PRAMIN", load_vbios_pramin, true }, -@@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios, - struct bit_table { - const char id; - int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); --}; -+} __no_const; - - #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) - -diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h -index d7d51de..7c6a7f1 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_drv.h -+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h -@@ -238,7 +238,7 @@ struct nouveau_channel { - struct list_head pending; - uint32_t sequence; - uint32_t sequence_ack; -- atomic_t last_sequence_irq; -+ atomic_unchecked_t last_sequence_irq; - struct nouveau_vma vma; - } fence; - -@@ -319,7 +319,7 @@ struct nouveau_exec_engine { - u32 handle, u16 class); - void (*set_tile_region)(struct drm_device *dev, int i); - void (*tlb_flush)(struct drm_device *, int engine); --}; -+} __no_const; - - struct nouveau_instmem_engine { - void *priv; -@@ -341,13 +341,13 @@ struct nouveau_instmem_engine { - struct nouveau_mc_engine { - int (*init)(struct drm_device *dev); - void (*takedown)(struct drm_device *dev); --}; -+} __no_const; - - struct nouveau_timer_engine { - int (*init)(struct drm_device *dev); - void (*takedown)(struct drm_device *dev); - uint64_t (*read)(struct drm_device *dev); --}; -+} __no_const; - - struct nouveau_fb_engine { - int num_tiles; -@@ -513,7 +513,7 @@ struct nouveau_vram_engine { - void (*put)(struct drm_device *, struct nouveau_mem **); - - bool (*flags_valid)(struct drm_device *, u32 tile_flags); --}; -+} __no_const; - - struct nouveau_engine { - struct nouveau_instmem_engine instmem; -@@ -660,7 +660,7 @@ struct drm_nouveau_private { - struct drm_global_reference mem_global_ref; - struct ttm_bo_global_ref bo_global_ref; - struct ttm_bo_device bdev; -- atomic_t validate_sequence; -+ atomic_unchecked_t validate_sequence; - } ttm; - - struct { -diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c -index ae22dfa..4f09960 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_fence.c -+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c -@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan) - if (USE_REFCNT(dev)) - sequence = nvchan_rd32(chan, 0x48); - else -- sequence = atomic_read(&chan->fence.last_sequence_irq); -+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq); - - if (chan->fence.sequence_ack == sequence) - goto out; -@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) - return ret; - } - -- atomic_set(&chan->fence.last_sequence_irq, 0); -+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0); - return 0; - } - -diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c -index 5f0bc57..eb9fac8 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_gem.c -+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c -@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, - int trycnt = 0; - int ret, i; - -- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); -+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence); - retry: - if (++trycnt > 100000) { - NV_ERROR(dev, "%s failed and gave up.\n", __func__); -diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c -index 10656e4..59bf2a4 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_state.c -+++ b/drivers/gpu/drm/nouveau/nouveau_state.c -@@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) - bool can_switch; - - spin_lock(&dev->count_lock); -- can_switch = (dev->open_count == 0); -+ can_switch = (local_read(&dev->open_count) == 0); - spin_unlock(&dev->count_lock); - return can_switch; - } -diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c -index dbdea8e..cd6eeeb 100644 ---- a/drivers/gpu/drm/nouveau/nv04_graph.c -+++ b/drivers/gpu/drm/nouveau/nv04_graph.c -@@ -554,7 +554,7 @@ static int - nv04_graph_mthd_set_ref(struct nouveau_channel *chan, - u32 class, u32 mthd, u32 data) - { -- atomic_set(&chan->fence.last_sequence_irq, data); -+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data); - return 0; - } - -diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c -index 570e190..084a31a 100644 ---- a/drivers/gpu/drm/r128/r128_cce.c -+++ b/drivers/gpu/drm/r128/r128_cce.c -@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) - - /* GH: Simple idle check. - */ -- atomic_set(&dev_priv->idle_count, 0); -+ atomic_set_unchecked(&dev_priv->idle_count, 0); - - /* We don't support anything other than bus-mastering ring mode, - * but the ring can be in either AGP or PCI space for the ring -diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h -index 930c71b..499aded 100644 ---- a/drivers/gpu/drm/r128/r128_drv.h -+++ b/drivers/gpu/drm/r128/r128_drv.h -@@ -90,14 +90,14 @@ typedef struct drm_r128_private { - int is_pci; - unsigned long cce_buffers_offset; - -- atomic_t idle_count; -+ atomic_unchecked_t idle_count; - - int page_flipping; - int current_page; - u32 crtc_offset; - u32 crtc_offset_cntl; - -- atomic_t vbl_received; -+ atomic_unchecked_t vbl_received; - - u32 color_fmt; - unsigned int front_offset; -diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c -index 429d5a0..7e899ed 100644 ---- a/drivers/gpu/drm/r128/r128_irq.c -+++ b/drivers/gpu/drm/r128/r128_irq.c -@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) - if (crtc != 0) - return 0; - -- return atomic_read(&dev_priv->vbl_received); -+ return atomic_read_unchecked(&dev_priv->vbl_received); - } - - irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) -@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) - /* VBLANK interrupt */ - if (status & R128_CRTC_VBLANK_INT) { - R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); -- atomic_inc(&dev_priv->vbl_received); -+ atomic_inc_unchecked(&dev_priv->vbl_received); - drm_handle_vblank(dev, 0); - return IRQ_HANDLED; - } -diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c -index a9e33ce..09edd4b 100644 ---- a/drivers/gpu/drm/r128/r128_state.c -+++ b/drivers/gpu/drm/r128/r128_state.c -@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv, - - static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv) - { -- if (atomic_read(&dev_priv->idle_count) == 0) -+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) - r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); - else -- atomic_set(&dev_priv->idle_count, 0); -+ atomic_set_unchecked(&dev_priv->idle_count, 0); - } - - #endif -diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c -index 14cc88a..cc7b3a5 100644 ---- a/drivers/gpu/drm/radeon/atom.c -+++ b/drivers/gpu/drm/radeon/atom.c -@@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios) - char name[512]; - int i; - -+ pax_track_stack(); -+ - if (!ctx) - return NULL; - -diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c -index 5a82b6b..9e69c73 100644 ---- a/drivers/gpu/drm/radeon/mkregtable.c -+++ b/drivers/gpu/drm/radeon/mkregtable.c -@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename) - regex_t mask_rex; - regmatch_t match[4]; - char buf[1024]; -- size_t end; -+ long end; - int len; - int done = 0; - int r; - unsigned o; - struct offset *offset; - char last_reg_s[10]; -- int last_reg; -+ unsigned long last_reg; - - if (regcomp - (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) { -diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h -index 184628c..30e1725 100644 ---- a/drivers/gpu/drm/radeon/radeon.h -+++ b/drivers/gpu/drm/radeon/radeon.h -@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev); - */ - struct radeon_fence_driver { - uint32_t scratch_reg; -- atomic_t seq; -+ atomic_unchecked_t seq; - uint32_t last_seq; - unsigned long last_jiffies; - unsigned long last_timeout; -@@ -962,7 +962,7 @@ struct radeon_asic { - void (*pre_page_flip)(struct radeon_device *rdev, int crtc); - u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); - void (*post_page_flip)(struct radeon_device *rdev, int crtc); --}; -+} __no_const; - - /* - * Asic structures -diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c -index 285acc4..f4d909f 100644 ---- a/drivers/gpu/drm/radeon/radeon_atombios.c -+++ b/drivers/gpu/drm/radeon/radeon_atombios.c -@@ -569,6 +569,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) - struct radeon_gpio_rec gpio; - struct radeon_hpd hpd; - -+ pax_track_stack(); -+ - if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) - return false; - -diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c -index b51e157..8f14fb9 100644 ---- a/drivers/gpu/drm/radeon/radeon_device.c -+++ b/drivers/gpu/drm/radeon/radeon_device.c -@@ -684,7 +684,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) - bool can_switch; - - spin_lock(&dev->count_lock); -- can_switch = (dev->open_count == 0); -+ can_switch = (local_read(&dev->open_count) == 0); - spin_unlock(&dev->count_lock); - return can_switch; - } -diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c -index 6adb3e5..b91553e2 100644 ---- a/drivers/gpu/drm/radeon/radeon_display.c -+++ b/drivers/gpu/drm/radeon/radeon_display.c -@@ -925,6 +925,8 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll, - uint32_t post_div; - u32 pll_out_min, pll_out_max; - -+ pax_track_stack(); -+ - DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); - freq = freq * 1000; - -diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h -index a1b59ca..86f2d44 100644 ---- a/drivers/gpu/drm/radeon/radeon_drv.h -+++ b/drivers/gpu/drm/radeon/radeon_drv.h -@@ -255,7 +255,7 @@ typedef struct drm_radeon_private { - - /* SW interrupt */ - wait_queue_head_t swi_queue; -- atomic_t swi_emitted; -+ atomic_unchecked_t swi_emitted; - int vblank_crtc; - uint32_t irq_enable_reg; - uint32_t r500_disp_irq_reg; -diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c -index 7fd4e3e..9748ab5 100644 ---- a/drivers/gpu/drm/radeon/radeon_fence.c -+++ b/drivers/gpu/drm/radeon/radeon_fence.c -@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) - write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); - return 0; - } -- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); -+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq); - if (!rdev->cp.ready) - /* FIXME: cp is not running assume everythings is done right - * away -@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev) - return r; - } - radeon_fence_write(rdev, 0); -- atomic_set(&rdev->fence_drv.seq, 0); -+ atomic_set_unchecked(&rdev->fence_drv.seq, 0); - INIT_LIST_HEAD(&rdev->fence_drv.created); - INIT_LIST_HEAD(&rdev->fence_drv.emited); - INIT_LIST_HEAD(&rdev->fence_drv.signaled); -diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c -index 48b7cea..342236f 100644 ---- a/drivers/gpu/drm/radeon/radeon_ioc32.c -+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c -@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, - request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || __put_user(req32.param, &request->param) -- || __put_user((void __user *)(unsigned long)req32.value, -+ || __put_user((unsigned long)req32.value, - &request->value)) - return -EFAULT; - -diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c -index 465746b..cb2b055 100644 ---- a/drivers/gpu/drm/radeon/radeon_irq.c -+++ b/drivers/gpu/drm/radeon/radeon_irq.c -@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev) - unsigned int ret; - RING_LOCALS; - -- atomic_inc(&dev_priv->swi_emitted); -- ret = atomic_read(&dev_priv->swi_emitted); -+ atomic_inc_unchecked(&dev_priv->swi_emitted); -+ ret = atomic_read_unchecked(&dev_priv->swi_emitted); - - BEGIN_RING(4); - OUT_RING_REG(RADEON_LAST_SWI_REG, ret); -@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev) - drm_radeon_private_t *dev_priv = - (drm_radeon_private_t *) dev->dev_private; - -- atomic_set(&dev_priv->swi_emitted, 0); -+ atomic_set_unchecked(&dev_priv->swi_emitted, 0); - DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); - - dev->max_vblank_count = 0x001fffff; -diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c -index 92e7ea7..147ffad 100644 ---- a/drivers/gpu/drm/radeon/radeon_state.c -+++ b/drivers/gpu/drm/radeon/radeon_state.c -@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * - if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) - sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; - -- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, -+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, - sarea_priv->nbox * sizeof(depth_boxes[0]))) - return -EFAULT; - -@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil - { - drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_getparam_t *param = data; -- int value; -+ int value = 0; - - DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); - -diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c -index 0b5468b..9c4b308 100644 ---- a/drivers/gpu/drm/radeon/radeon_ttm.c -+++ b/drivers/gpu/drm/radeon/radeon_ttm.c -@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) - } - if (unlikely(ttm_vm_ops == NULL)) { - ttm_vm_ops = vma->vm_ops; -- radeon_ttm_vm_ops = *ttm_vm_ops; -- radeon_ttm_vm_ops.fault = &radeon_ttm_fault; -+ pax_open_kernel(); -+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops)); -+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault; -+ pax_close_kernel(); - } - vma->vm_ops = &radeon_ttm_vm_ops; - return 0; -diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c -index a9049ed..501f284 100644 ---- a/drivers/gpu/drm/radeon/rs690.c -+++ b/drivers/gpu/drm/radeon/rs690.c -@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, - if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && - rdev->pm.sideport_bandwidth.full) - rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; -- read_delay_latency.full = dfixed_const(370 * 800 * 1000); -+ read_delay_latency.full = dfixed_const(800 * 1000); - read_delay_latency.full = dfixed_div(read_delay_latency, - rdev->pm.igp_sideport_mclk); -+ a.full = dfixed_const(370); -+ read_delay_latency.full = dfixed_mul(read_delay_latency, a); - } else { - if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && - rdev->pm.k8_bandwidth.full) -diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c -index 727e93d..1565650 100644 ---- a/drivers/gpu/drm/ttm/ttm_page_alloc.c -+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c -@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void) - static int ttm_pool_mm_shrink(struct shrinker *shrink, - struct shrink_control *sc) - { -- static atomic_t start_pool = ATOMIC_INIT(0); -+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0); - unsigned i; -- unsigned pool_offset = atomic_add_return(1, &start_pool); -+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool); - struct ttm_page_pool *pool; - int shrink_pages = sc->nr_to_scan; - -diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h -index 9cf87d9..2000b7d 100644 ---- a/drivers/gpu/drm/via/via_drv.h -+++ b/drivers/gpu/drm/via/via_drv.h -@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer { - typedef uint32_t maskarray_t[5]; - - typedef struct drm_via_irq { -- atomic_t irq_received; -+ atomic_unchecked_t irq_received; - uint32_t pending_mask; - uint32_t enable_mask; - wait_queue_head_t irq_queue; -@@ -75,7 +75,7 @@ typedef struct drm_via_private { - struct timeval last_vblank; - int last_vblank_valid; - unsigned usec_per_vblank; -- atomic_t vbl_received; -+ atomic_unchecked_t vbl_received; - drm_via_state_t hc_state; - char pci_buf[VIA_PCI_BUF_SIZE]; - const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; -diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c -index d391f48..10c8ca3 100644 ---- a/drivers/gpu/drm/via/via_irq.c -+++ b/drivers/gpu/drm/via/via_irq.c -@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc) - if (crtc != 0) - return 0; - -- return atomic_read(&dev_priv->vbl_received); -+ return atomic_read_unchecked(&dev_priv->vbl_received); - } - - irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) -@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) - - status = VIA_READ(VIA_REG_INTERRUPT); - if (status & VIA_IRQ_VBLANK_PENDING) { -- atomic_inc(&dev_priv->vbl_received); -- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { -+ atomic_inc_unchecked(&dev_priv->vbl_received); -+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) { - do_gettimeofday(&cur_vblank); - if (dev_priv->last_vblank_valid) { - dev_priv->usec_per_vblank = -@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) - dev_priv->last_vblank = cur_vblank; - dev_priv->last_vblank_valid = 1; - } -- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { -+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) { - DRM_DEBUG("US per vblank is: %u\n", - dev_priv->usec_per_vblank); - } -@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) - - for (i = 0; i < dev_priv->num_irqs; ++i) { - if (status & cur_irq->pending_mask) { -- atomic_inc(&cur_irq->irq_received); -+ atomic_inc_unchecked(&cur_irq->irq_received); - DRM_WAKEUP(&cur_irq->irq_queue); - handled = 1; - if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) -@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence - DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, - ((VIA_READ(masks[irq][2]) & masks[irq][3]) == - masks[irq][4])); -- cur_irq_sequence = atomic_read(&cur_irq->irq_received); -+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received); - } else { - DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, - (((cur_irq_sequence = -- atomic_read(&cur_irq->irq_received)) - -+ atomic_read_unchecked(&cur_irq->irq_received)) - - *sequence) <= (1 << 23))); - } - *sequence = cur_irq_sequence; -@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev) - } - - for (i = 0; i < dev_priv->num_irqs; ++i) { -- atomic_set(&cur_irq->irq_received, 0); -+ atomic_set_unchecked(&cur_irq->irq_received, 0); - cur_irq->enable_mask = dev_priv->irq_masks[i][0]; - cur_irq->pending_mask = dev_priv->irq_masks[i][1]; - DRM_INIT_WAITQUEUE(&cur_irq->irq_queue); -@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) - switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { - case VIA_IRQ_RELATIVE: - irqwait->request.sequence += -- atomic_read(&cur_irq->irq_received); -+ atomic_read_unchecked(&cur_irq->irq_received); - irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; - case VIA_IRQ_ABSOLUTE: - break; -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h -index 10fc01f..b4e9822 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h -@@ -240,7 +240,7 @@ struct vmw_private { - * Fencing and IRQs. - */ - -- atomic_t fence_seq; -+ atomic_unchecked_t fence_seq; - wait_queue_head_t fence_queue; - wait_queue_head_t fifo_queue; - atomic_t fence_queue_waiters; -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c -index 41b95ed..69ea504 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c -@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, - struct drm_vmw_fence_rep fence_rep; - struct drm_vmw_fence_rep __user *user_fence_rep; - int ret; -- void *user_cmd; -+ void __user *user_cmd; - void *cmd; - uint32_t sequence; - struct vmw_sw_context *sw_context = &dev_priv->ctx; -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c -index 61eacc1..ee38ce8 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c -@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv, - while (!vmw_lag_lt(queue, us)) { - spin_lock(&queue->lock); - if (list_empty(&queue->head)) -- sequence = atomic_read(&dev_priv->fence_seq); -+ sequence = atomic_read_unchecked(&dev_priv->fence_seq); - else { - fence = list_first_entry(&queue->head, - struct vmw_fence, head); -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c -index 635c0ff..2641bbb 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c -@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) - (unsigned int) min, - (unsigned int) fifo->capabilities); - -- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); -+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence); - iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); - vmw_fence_queue_init(&fifo->fence_queue); - return vmw_fifo_send_fence(dev_priv, &dummy); -@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) - if (reserveable) - iowrite32(bytes, fifo_mem + - SVGA_FIFO_RESERVED); -- return fifo_mem + (next_cmd >> 2); -+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2); - } else { - need_bounce = true; - } -@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) - - fm = vmw_fifo_reserve(dev_priv, bytes); - if (unlikely(fm == NULL)) { -- *sequence = atomic_read(&dev_priv->fence_seq); -+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq); - ret = -ENOMEM; - (void)vmw_fallback_wait(dev_priv, false, true, *sequence, - false, 3*HZ); -@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) - } - - do { -- *sequence = atomic_add_return(1, &dev_priv->fence_seq); -+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq); - } while (*sequence == 0); - - if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c -index e92298a..f68f2d6 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c -@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv, - * emitted. Then the fence is stale and signaled. - */ - -- ret = ((atomic_read(&dev_priv->fence_seq) - sequence) -+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence) - > VMW_FENCE_WRAP); - - return ret; -@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, - - if (fifo_idle) - down_read(&fifo_state->rwsem); -- signal_seq = atomic_read(&dev_priv->fence_seq); -+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq); - ret = 0; - - for (;;) { -diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c -index c72f1c0..18376f1 100644 ---- a/drivers/gpu/vga/vgaarb.c -+++ b/drivers/gpu/vga/vgaarb.c -@@ -993,14 +993,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, - uc = &priv->cards[i]; - } - -- if (!uc) -- return -EINVAL; -+ if (!uc) { -+ ret_val = -EINVAL; -+ goto done; -+ } - -- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) -- return -EINVAL; -+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) { -+ ret_val = -EINVAL; -+ goto done; -+ } - -- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) -- return -EINVAL; -+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) { -+ ret_val = -EINVAL; -+ goto done; -+ } - - vga_put(pdev, io_state); - -diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c -index f26ae31..721fe1b 100644 ---- a/drivers/hid/hid-core.c -+++ b/drivers/hid/hid-core.c -@@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device *hdev) - - int hid_add_device(struct hid_device *hdev) - { -- static atomic_t id = ATOMIC_INIT(0); -+ static atomic_unchecked_t id = ATOMIC_INIT(0); - int ret; - - if (WARN_ON(hdev->status & HID_STAT_ADDED)) -@@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hdev) - /* XXX hack, any other cleaner solution after the driver core - * is converted to allow more than 20 bytes as the device name? */ - dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, -- hdev->vendor, hdev->product, atomic_inc_return(&id)); -+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id)); - - hid_debug_register(hdev, dev_name(&hdev->dev)); - ret = device_add(&hdev->dev); -diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c -index 7c1188b..5a64357 100644 ---- a/drivers/hid/usbhid/hiddev.c -+++ b/drivers/hid/usbhid/hiddev.c -@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) - break; - - case HIDIOCAPPLICATION: -- if (arg < 0 || arg >= hid->maxapplication) -+ if (arg >= hid->maxapplication) - break; - - for (i = 0; i < hid->maxcollection; i++) -diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c -index 66f6729..2d6de0a 100644 ---- a/drivers/hwmon/acpi_power_meter.c -+++ b/drivers/hwmon/acpi_power_meter.c -@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr, - return res; - - temp /= 1000; -- if (temp < 0) -- return -EINVAL; - - mutex_lock(&resource->lock); - resource->trip[attr->index - 7] = temp; -diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c -index fe4104c..346febb 100644 ---- a/drivers/hwmon/sht15.c -+++ b/drivers/hwmon/sht15.c -@@ -166,7 +166,7 @@ struct sht15_data { - int supply_uV; - bool supply_uV_valid; - struct work_struct update_supply_work; -- atomic_t interrupt_handled; -+ atomic_unchecked_t interrupt_handled; - }; - - /** -@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data, - return ret; - - gpio_direction_input(data->pdata->gpio_data); -- atomic_set(&data->interrupt_handled, 0); -+ atomic_set_unchecked(&data->interrupt_handled, 0); - - enable_irq(gpio_to_irq(data->pdata->gpio_data)); - if (gpio_get_value(data->pdata->gpio_data) == 0) { - disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); - /* Only relevant if the interrupt hasn't occurred. */ -- if (!atomic_read(&data->interrupt_handled)) -+ if (!atomic_read_unchecked(&data->interrupt_handled)) - schedule_work(&data->read_work); - } - ret = wait_event_timeout(data->wait_queue, -@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d) - - /* First disable the interrupt */ - disable_irq_nosync(irq); -- atomic_inc(&data->interrupt_handled); -+ atomic_inc_unchecked(&data->interrupt_handled); - /* Then schedule a reading work struct */ - if (data->state != SHT15_READING_NOTHING) - schedule_work(&data->read_work); -@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s) - * If not, then start the interrupt again - care here as could - * have gone low in meantime so verify it hasn't! - */ -- atomic_set(&data->interrupt_handled, 0); -+ atomic_set_unchecked(&data->interrupt_handled, 0); - enable_irq(gpio_to_irq(data->pdata->gpio_data)); - /* If still not occurred or another handler has been scheduled */ - if (gpio_get_value(data->pdata->gpio_data) -- || atomic_read(&data->interrupt_handled)) -+ || atomic_read_unchecked(&data->interrupt_handled)) - return; - } - -diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c -index 378fcb5..5e91fa8 100644 ---- a/drivers/i2c/busses/i2c-amd756-s4882.c -+++ b/drivers/i2c/busses/i2c-amd756-s4882.c -@@ -43,7 +43,7 @@ - extern struct i2c_adapter amd756_smbus; - - static struct i2c_adapter *s4882_adapter; --static struct i2c_algorithm *s4882_algo; -+static i2c_algorithm_no_const *s4882_algo; - - /* Wrapper access functions for multiplexed SMBus */ - static DEFINE_MUTEX(amd756_lock); -diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c -index 29015eb..af2d8e9 100644 ---- a/drivers/i2c/busses/i2c-nforce2-s4985.c -+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c -@@ -41,7 +41,7 @@ - extern struct i2c_adapter *nforce2_smbus; - - static struct i2c_adapter *s4985_adapter; --static struct i2c_algorithm *s4985_algo; -+static i2c_algorithm_no_const *s4985_algo; - - /* Wrapper access functions for multiplexed SMBus */ - static DEFINE_MUTEX(nforce2_lock); -diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c -index d7a4833..7fae376 100644 ---- a/drivers/i2c/i2c-mux.c -+++ b/drivers/i2c/i2c-mux.c -@@ -28,7 +28,7 @@ - /* multiplexer per channel data */ - struct i2c_mux_priv { - struct i2c_adapter adap; -- struct i2c_algorithm algo; -+ i2c_algorithm_no_const algo; - - struct i2c_adapter *parent; - void *mux_dev; /* the mux chip/device */ -diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c -index 57d00ca..0145194 100644 ---- a/drivers/ide/aec62xx.c -+++ b/drivers/ide/aec62xx.c -@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = { - .cable_detect = atp86x_cable_detect, - }; - --static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { -+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = { - { /* 0: AEC6210 */ - .name = DRV_NAME, - .init_chipset = init_chipset_aec62xx, -diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c -index 2c8016a..911a27c 100644 ---- a/drivers/ide/alim15x3.c -+++ b/drivers/ide/alim15x3.c -@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = { - .dma_sff_read_status = ide_dma_sff_read_status, - }; - --static const struct ide_port_info ali15x3_chipset __devinitdata = { -+static const struct ide_port_info ali15x3_chipset __devinitconst = { - .name = DRV_NAME, - .init_chipset = init_chipset_ali15x3, - .init_hwif = init_hwif_ali15x3, -diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c -index 3747b25..56fc995 100644 ---- a/drivers/ide/amd74xx.c -+++ b/drivers/ide/amd74xx.c -@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = { - .udma_mask = udma, \ - } - --static const struct ide_port_info amd74xx_chipsets[] __devinitdata = { -+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = { - /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2), - /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4), - /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5), -diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c -index 15f0ead..cb43480 100644 ---- a/drivers/ide/atiixp.c -+++ b/drivers/ide/atiixp.c -@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = { - .cable_detect = atiixp_cable_detect, - }; - --static const struct ide_port_info atiixp_pci_info[] __devinitdata = { -+static const struct ide_port_info atiixp_pci_info[] __devinitconst = { - { /* 0: IXP200/300/400/700 */ - .name = DRV_NAME, - .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}}, -diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c -index 5f80312..d1fc438 100644 ---- a/drivers/ide/cmd64x.c -+++ b/drivers/ide/cmd64x.c -@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = { - .dma_sff_read_status = ide_dma_sff_read_status, - }; - --static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { -+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = { - { /* 0: CMD643 */ - .name = DRV_NAME, - .init_chipset = init_chipset_cmd64x, -diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c -index 2c1e5f7..1444762 100644 ---- a/drivers/ide/cs5520.c -+++ b/drivers/ide/cs5520.c -@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = { - .set_dma_mode = cs5520_set_dma_mode, - }; - --static const struct ide_port_info cyrix_chipset __devinitdata = { -+static const struct ide_port_info cyrix_chipset __devinitconst = { - .name = DRV_NAME, - .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } }, - .port_ops = &cs5520_port_ops, -diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c -index 4dc4eb9..49b40ad 100644 ---- a/drivers/ide/cs5530.c -+++ b/drivers/ide/cs5530.c -@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = { - .udma_filter = cs5530_udma_filter, - }; - --static const struct ide_port_info cs5530_chipset __devinitdata = { -+static const struct ide_port_info cs5530_chipset __devinitconst = { - .name = DRV_NAME, - .init_chipset = init_chipset_cs5530, - .init_hwif = init_hwif_cs5530, -diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c -index 5059faf..18d4c85 100644 ---- a/drivers/ide/cs5535.c -+++ b/drivers/ide/cs5535.c -@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = { - .cable_detect = cs5535_cable_detect, - }; - --static const struct ide_port_info cs5535_chipset __devinitdata = { -+static const struct ide_port_info cs5535_chipset __devinitconst = { - .name = DRV_NAME, - .port_ops = &cs5535_port_ops, - .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE, -diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c -index 67cbcfa..37ea151 100644 ---- a/drivers/ide/cy82c693.c -+++ b/drivers/ide/cy82c693.c -@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = { - .set_dma_mode = cy82c693_set_dma_mode, - }; - --static const struct ide_port_info cy82c693_chipset __devinitdata = { -+static const struct ide_port_info cy82c693_chipset __devinitconst = { - .name = DRV_NAME, - .init_iops = init_iops_cy82c693, - .port_ops = &cy82c693_port_ops, -diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c -index 58c51cd..4aec3b8 100644 ---- a/drivers/ide/hpt366.c -+++ b/drivers/ide/hpt366.c -@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = { - } - }; - --static const struct hpt_info hpt36x __devinitdata = { -+static const struct hpt_info hpt36x __devinitconst = { - .chip_name = "HPT36x", - .chip_type = HPT36x, - .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2, -@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = { - .timings = &hpt36x_timings - }; - --static const struct hpt_info hpt370 __devinitdata = { -+static const struct hpt_info hpt370 __devinitconst = { - .chip_name = "HPT370", - .chip_type = HPT370, - .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, -@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = { - .timings = &hpt37x_timings - }; - --static const struct hpt_info hpt370a __devinitdata = { -+static const struct hpt_info hpt370a __devinitconst = { - .chip_name = "HPT370A", - .chip_type = HPT370A, - .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, -@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = { - .timings = &hpt37x_timings - }; - --static const struct hpt_info hpt374 __devinitdata = { -+static const struct hpt_info hpt374 __devinitconst = { - .chip_name = "HPT374", - .chip_type = HPT374, - .udma_mask = ATA_UDMA5, -@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = { - .timings = &hpt37x_timings - }; - --static const struct hpt_info hpt372 __devinitdata = { -+static const struct hpt_info hpt372 __devinitconst = { - .chip_name = "HPT372", - .chip_type = HPT372, - .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, -@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = { - .timings = &hpt37x_timings - }; - --static const struct hpt_info hpt372a __devinitdata = { -+static const struct hpt_info hpt372a __devinitconst = { - .chip_name = "HPT372A", - .chip_type = HPT372A, - .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, -@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = { - .timings = &hpt37x_timings - }; - --static const struct hpt_info hpt302 __devinitdata = { -+static const struct hpt_info hpt302 __devinitconst = { - .chip_name = "HPT302", - .chip_type = HPT302, - .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, -@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = { - .timings = &hpt37x_timings - }; - --static const struct hpt_info hpt371 __devinitdata = { -+static const struct hpt_info hpt371 __devinitconst = { - .chip_name = "HPT371", - .chip_type = HPT371, - .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, -@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = { - .timings = &hpt37x_timings - }; - --static const struct hpt_info hpt372n __devinitdata = { -+static const struct hpt_info hpt372n __devinitconst = { - .chip_name = "HPT372N", - .chip_type = HPT372N, - .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, -@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = { - .timings = &hpt37x_timings - }; - --static const struct hpt_info hpt302n __devinitdata = { -+static const struct hpt_info hpt302n __devinitconst = { - .chip_name = "HPT302N", - .chip_type = HPT302N, - .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, -@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = { - .timings = &hpt37x_timings - }; - --static const struct hpt_info hpt371n __devinitdata = { -+static const struct hpt_info hpt371n __devinitconst = { - .chip_name = "HPT371N", - .chip_type = HPT371N, - .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, -@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = { - .dma_sff_read_status = ide_dma_sff_read_status, - }; - --static const struct ide_port_info hpt366_chipsets[] __devinitdata = { -+static const struct ide_port_info hpt366_chipsets[] __devinitconst = { - { /* 0: HPT36x */ - .name = DRV_NAME, - .init_chipset = init_chipset_hpt366, -diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c -index 04b0956..f5b47dc 100644 ---- a/drivers/ide/ide-cd.c -+++ b/drivers/ide/ide-cd.c -@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) - alignment = queue_dma_alignment(q) | q->dma_pad_mask; - if ((unsigned long)buf & alignment - || blk_rq_bytes(rq) & q->dma_pad_mask -- || object_is_on_stack(buf)) -+ || object_starts_on_stack(buf)) - drive->dma = 0; - } - } -diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c -index 61fdf54..2834ea6 100644 ---- a/drivers/ide/ide-floppy.c -+++ b/drivers/ide/ide-floppy.c -@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive) - u8 pc_buf[256], header_len, desc_cnt; - int i, rc = 1, blocks, length; - -+ pax_track_stack(); -+ - ide_debug_log(IDE_DBG_FUNC, "enter"); - - drive->bios_cyl = 0; -diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c -index a743e68..1cfd674 100644 ---- a/drivers/ide/ide-pci-generic.c -+++ b/drivers/ide/ide-pci-generic.c -@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = { - .udma_mask = ATA_UDMA6, \ - } - --static const struct ide_port_info generic_chipsets[] __devinitdata = { -+static const struct ide_port_info generic_chipsets[] __devinitconst = { - /* 0: Unknown */ - DECLARE_GENERIC_PCI_DEV(0), - -diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c -index 560e66d..d5dd180 100644 ---- a/drivers/ide/it8172.c -+++ b/drivers/ide/it8172.c -@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = { - .set_dma_mode = it8172_set_dma_mode, - }; - --static const struct ide_port_info it8172_port_info __devinitdata = { -+static const struct ide_port_info it8172_port_info __devinitconst = { - .name = DRV_NAME, - .port_ops = &it8172_port_ops, - .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} }, -diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c -index 46816ba..1847aeb 100644 ---- a/drivers/ide/it8213.c -+++ b/drivers/ide/it8213.c -@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = { - .cable_detect = it8213_cable_detect, - }; - --static const struct ide_port_info it8213_chipset __devinitdata = { -+static const struct ide_port_info it8213_chipset __devinitconst = { - .name = DRV_NAME, - .enablebits = { {0x41, 0x80, 0x80} }, - .port_ops = &it8213_port_ops, -diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c -index 2e3169f..c5611db 100644 ---- a/drivers/ide/it821x.c -+++ b/drivers/ide/it821x.c -@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = { - .cable_detect = it821x_cable_detect, - }; - --static const struct ide_port_info it821x_chipset __devinitdata = { -+static const struct ide_port_info it821x_chipset __devinitconst = { - .name = DRV_NAME, - .init_chipset = init_chipset_it821x, - .init_hwif = init_hwif_it821x, -diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c -index 74c2c4a..efddd7d 100644 ---- a/drivers/ide/jmicron.c -+++ b/drivers/ide/jmicron.c -@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = { - .cable_detect = jmicron_cable_detect, - }; - --static const struct ide_port_info jmicron_chipset __devinitdata = { -+static const struct ide_port_info jmicron_chipset __devinitconst = { - .name = DRV_NAME, - .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } }, - .port_ops = &jmicron_port_ops, -diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c -index 95327a2..73f78d8 100644 ---- a/drivers/ide/ns87415.c -+++ b/drivers/ide/ns87415.c -@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = { - .dma_sff_read_status = superio_dma_sff_read_status, - }; - --static const struct ide_port_info ns87415_chipset __devinitdata = { -+static const struct ide_port_info ns87415_chipset __devinitconst = { - .name = DRV_NAME, - .init_hwif = init_hwif_ns87415, - .tp_ops = &ns87415_tp_ops, -diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c -index 1a53a4c..39edc66 100644 ---- a/drivers/ide/opti621.c -+++ b/drivers/ide/opti621.c -@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = { - .set_pio_mode = opti621_set_pio_mode, - }; - --static const struct ide_port_info opti621_chipset __devinitdata = { -+static const struct ide_port_info opti621_chipset __devinitconst = { - .name = DRV_NAME, - .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} }, - .port_ops = &opti621_port_ops, -diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c -index 9546fe2..2e5ceb6 100644 ---- a/drivers/ide/pdc202xx_new.c -+++ b/drivers/ide/pdc202xx_new.c -@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = { - .udma_mask = udma, \ - } - --static const struct ide_port_info pdcnew_chipsets[] __devinitdata = { -+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = { - /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5), - /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6), - }; -diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c -index 3a35ec6..5634510 100644 ---- a/drivers/ide/pdc202xx_old.c -+++ b/drivers/ide/pdc202xx_old.c -@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = { - .max_sectors = sectors, \ - } - --static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { -+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = { - { /* 0: PDC20246 */ - .name = DRV_NAME, - .init_chipset = init_chipset_pdc202xx, -diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c -index b59d04c..368c2a7 100644 ---- a/drivers/ide/piix.c -+++ b/drivers/ide/piix.c -@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = { - .udma_mask = udma, \ - } - --static const struct ide_port_info piix_pci_info[] __devinitdata = { -+static const struct ide_port_info piix_pci_info[] __devinitconst = { - /* 0: MPIIX */ - { /* - * MPIIX actually has only a single IDE channel mapped to -diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c -index a6414a8..c04173e 100644 ---- a/drivers/ide/rz1000.c -+++ b/drivers/ide/rz1000.c -@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev) - } - } - --static const struct ide_port_info rz1000_chipset __devinitdata = { -+static const struct ide_port_info rz1000_chipset __devinitconst = { - .name = DRV_NAME, - .host_flags = IDE_HFLAG_NO_DMA, - }; -diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c -index 356b9b5..d4758eb 100644 ---- a/drivers/ide/sc1200.c -+++ b/drivers/ide/sc1200.c -@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = { - .dma_sff_read_status = ide_dma_sff_read_status, - }; - --static const struct ide_port_info sc1200_chipset __devinitdata = { -+static const struct ide_port_info sc1200_chipset __devinitconst = { - .name = DRV_NAME, - .port_ops = &sc1200_port_ops, - .dma_ops = &sc1200_dma_ops, -diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c -index b7f5b0c..9701038 100644 ---- a/drivers/ide/scc_pata.c -+++ b/drivers/ide/scc_pata.c -@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = { - .dma_sff_read_status = scc_dma_sff_read_status, - }; - --static const struct ide_port_info scc_chipset __devinitdata = { -+static const struct ide_port_info scc_chipset __devinitconst = { - .name = "sccIDE", - .init_iops = init_iops_scc, - .init_dma = scc_init_dma, -diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c -index 35fb8da..24d72ef 100644 ---- a/drivers/ide/serverworks.c -+++ b/drivers/ide/serverworks.c -@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = { - .cable_detect = svwks_cable_detect, - }; - --static const struct ide_port_info serverworks_chipsets[] __devinitdata = { -+static const struct ide_port_info serverworks_chipsets[] __devinitconst = { - { /* 0: OSB4 */ - .name = DRV_NAME, - .init_chipset = init_chipset_svwks, -diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c -index ab3db61..afed580 100644 ---- a/drivers/ide/setup-pci.c -+++ b/drivers/ide/setup-pci.c -@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, - int ret, i, n_ports = dev2 ? 4 : 2; - struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; - -+ pax_track_stack(); -+ - for (i = 0; i < n_ports / 2; i++) { - ret = ide_setup_pci_controller(pdev[i], d, !i); - if (ret < 0) -diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c -index ddeda44..46f7e30 100644 ---- a/drivers/ide/siimage.c -+++ b/drivers/ide/siimage.c -@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = { - .udma_mask = ATA_UDMA6, \ - } - --static const struct ide_port_info siimage_chipsets[] __devinitdata = { -+static const struct ide_port_info siimage_chipsets[] __devinitconst = { - /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops), - /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops) - }; -diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c -index 4a00225..09e61b4 100644 ---- a/drivers/ide/sis5513.c -+++ b/drivers/ide/sis5513.c -@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = { - .cable_detect = sis_cable_detect, - }; - --static const struct ide_port_info sis5513_chipset __devinitdata = { -+static const struct ide_port_info sis5513_chipset __devinitconst = { - .name = DRV_NAME, - .init_chipset = init_chipset_sis5513, - .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} }, -diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c -index f21dc2a..d051cd2 100644 ---- a/drivers/ide/sl82c105.c -+++ b/drivers/ide/sl82c105.c -@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = { - .dma_sff_read_status = ide_dma_sff_read_status, - }; - --static const struct ide_port_info sl82c105_chipset __devinitdata = { -+static const struct ide_port_info sl82c105_chipset __devinitconst = { - .name = DRV_NAME, - .init_chipset = init_chipset_sl82c105, - .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}}, -diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c -index 864ffe0..863a5e9 100644 ---- a/drivers/ide/slc90e66.c -+++ b/drivers/ide/slc90e66.c -@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = { - .cable_detect = slc90e66_cable_detect, - }; - --static const struct ide_port_info slc90e66_chipset __devinitdata = { -+static const struct ide_port_info slc90e66_chipset __devinitconst = { - .name = DRV_NAME, - .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} }, - .port_ops = &slc90e66_port_ops, -diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c -index e444d24..ba577de 100644 ---- a/drivers/ide/tc86c001.c -+++ b/drivers/ide/tc86c001.c -@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = { - .dma_sff_read_status = ide_dma_sff_read_status, - }; - --static const struct ide_port_info tc86c001_chipset __devinitdata = { -+static const struct ide_port_info tc86c001_chipset __devinitconst = { - .name = DRV_NAME, - .init_hwif = init_hwif_tc86c001, - .port_ops = &tc86c001_port_ops, -diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c -index e53a1b7..d11aff7 100644 ---- a/drivers/ide/triflex.c -+++ b/drivers/ide/triflex.c -@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = { - .set_dma_mode = triflex_set_mode, - }; - --static const struct ide_port_info triflex_device __devinitdata = { -+static const struct ide_port_info triflex_device __devinitconst = { - .name = DRV_NAME, - .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}}, - .port_ops = &triflex_port_ops, -diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c -index 4b42ca0..e494a98 100644 ---- a/drivers/ide/trm290.c -+++ b/drivers/ide/trm290.c -@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = { - .dma_check = trm290_dma_check, - }; - --static const struct ide_port_info trm290_chipset __devinitdata = { -+static const struct ide_port_info trm290_chipset __devinitconst = { - .name = DRV_NAME, - .init_hwif = init_hwif_trm290, - .tp_ops = &trm290_tp_ops, -diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c -index f46f49c..eb77678 100644 ---- a/drivers/ide/via82cxxx.c -+++ b/drivers/ide/via82cxxx.c -@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = { - .cable_detect = via82cxxx_cable_detect, - }; - --static const struct ide_port_info via82cxxx_chipset __devinitdata = { -+static const struct ide_port_info via82cxxx_chipset __devinitconst = { - .name = DRV_NAME, - .init_chipset = init_chipset_via82cxxx, - .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, -diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c -index fc0f2bd..ac2f8a5 100644 ---- a/drivers/infiniband/core/cm.c -+++ b/drivers/infiniband/core/cm.c -@@ -113,7 +113,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS] - - struct cm_counter_group { - struct kobject obj; -- atomic_long_t counter[CM_ATTR_COUNT]; -+ atomic_long_unchecked_t counter[CM_ATTR_COUNT]; - }; - - struct cm_counter_attribute { -@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm_work *work, - struct ib_mad_send_buf *msg = NULL; - int ret; - -- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. -+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. - counter[CM_REQ_COUNTER]); - - /* Quick state check to discard duplicate REQs. */ -@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm_work *work) - if (!cm_id_priv) - return; - -- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. -+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. - counter[CM_REP_COUNTER]); - ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); - if (ret) -@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work *work) - if (cm_id_priv->id.state != IB_CM_REP_SENT && - cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { - spin_unlock_irq(&cm_id_priv->lock); -- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. -+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. - counter[CM_RTU_COUNTER]); - goto out; - } -@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_work *work) - cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, - dreq_msg->local_comm_id); - if (!cm_id_priv) { -- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. -+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. - counter[CM_DREQ_COUNTER]); - cm_issue_drep(work->port, work->mad_recv_wc); - return -EINVAL; -@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_work *work) - case IB_CM_MRA_REP_RCVD: - break; - case IB_CM_TIMEWAIT: -- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. -+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. - counter[CM_DREQ_COUNTER]); - if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) - goto unlock; -@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work) - cm_free_msg(msg); - goto deref; - case IB_CM_DREQ_RCVD: -- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. -+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. - counter[CM_DREQ_COUNTER]); - goto unlock; - default: -@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work *work) - ib_modify_mad(cm_id_priv->av.port->mad_agent, - cm_id_priv->msg, timeout)) { - if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) -- atomic_long_inc(&work->port-> -+ atomic_long_inc_unchecked(&work->port-> - counter_group[CM_RECV_DUPLICATES]. - counter[CM_MRA_COUNTER]); - goto out; -@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work *work) - break; - case IB_CM_MRA_REQ_RCVD: - case IB_CM_MRA_REP_RCVD: -- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. -+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. - counter[CM_MRA_COUNTER]); - /* fall through */ - default: -@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work *work) - case IB_CM_LAP_IDLE: - break; - case IB_CM_MRA_LAP_SENT: -- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. -+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. - counter[CM_LAP_COUNTER]); - if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) - goto unlock; -@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work *work) - cm_free_msg(msg); - goto deref; - case IB_CM_LAP_RCVD: -- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. -+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. - counter[CM_LAP_COUNTER]); - goto unlock; - default: -@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm_work *work) - cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); - if (cur_cm_id_priv) { - spin_unlock_irq(&cm.lock); -- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. -+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. - counter[CM_SIDR_REQ_COUNTER]); - goto out; /* Duplicate message. */ - } -@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, - if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) - msg->retries = 1; - -- atomic_long_add(1 + msg->retries, -+ atomic_long_add_unchecked(1 + msg->retries, - &port->counter_group[CM_XMIT].counter[attr_index]); - if (msg->retries) -- atomic_long_add(msg->retries, -+ atomic_long_add_unchecked(msg->retries, - &port->counter_group[CM_XMIT_RETRIES]. - counter[attr_index]); - -@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, - } - - attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); -- atomic_long_inc(&port->counter_group[CM_RECV]. -+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV]. - counter[attr_id - CM_ATTR_ID_OFFSET]); - - work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, -@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, - cm_attr = container_of(attr, struct cm_counter_attribute, attr); - - return sprintf(buf, "%ld\n", -- atomic_long_read(&group->counter[cm_attr->index])); -+ atomic_long_read_unchecked(&group->counter[cm_attr->index])); - } - - static const struct sysfs_ops cm_counter_ops = { -diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c -index 4507043..14ad522 100644 ---- a/drivers/infiniband/core/fmr_pool.c -+++ b/drivers/infiniband/core/fmr_pool.c -@@ -97,8 +97,8 @@ struct ib_fmr_pool { - - struct task_struct *thread; - -- atomic_t req_ser; -- atomic_t flush_ser; -+ atomic_unchecked_t req_ser; -+ atomic_unchecked_t flush_ser; - - wait_queue_head_t force_wait; - }; -@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) - struct ib_fmr_pool *pool = pool_ptr; - - do { -- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { -+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) { - ib_fmr_batch_release(pool); - -- atomic_inc(&pool->flush_ser); -+ atomic_inc_unchecked(&pool->flush_ser); - wake_up_interruptible(&pool->force_wait); - - if (pool->flush_function) -@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) - } - - set_current_state(TASK_INTERRUPTIBLE); -- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && -+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 && - !kthread_should_stop()) - schedule(); - __set_current_state(TASK_RUNNING); -@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, - pool->dirty_watermark = params->dirty_watermark; - pool->dirty_len = 0; - spin_lock_init(&pool->pool_lock); -- atomic_set(&pool->req_ser, 0); -- atomic_set(&pool->flush_ser, 0); -+ atomic_set_unchecked(&pool->req_ser, 0); -+ atomic_set_unchecked(&pool->flush_ser, 0); - init_waitqueue_head(&pool->force_wait); - - pool->thread = kthread_run(ib_fmr_cleanup_thread, -@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool) - } - spin_unlock_irq(&pool->pool_lock); - -- serial = atomic_inc_return(&pool->req_ser); -+ serial = atomic_inc_return_unchecked(&pool->req_ser); - wake_up_process(pool->thread); - - if (wait_event_interruptible(pool->force_wait, -- atomic_read(&pool->flush_ser) - serial >= 0)) -+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0)) - return -EINTR; - - return 0; -@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) - } else { - list_add_tail(&fmr->list, &pool->dirty_list); - if (++pool->dirty_len >= pool->dirty_watermark) { -- atomic_inc(&pool->req_ser); -+ atomic_inc_unchecked(&pool->req_ser); - wake_up_process(pool->thread); - } - } -diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c -index 40c8353..946b0e4 100644 ---- a/drivers/infiniband/hw/cxgb4/mem.c -+++ b/drivers/infiniband/hw/cxgb4/mem.c -@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, - int err; - struct fw_ri_tpte tpt; - u32 stag_idx; -- static atomic_t key; -+ static atomic_unchecked_t key; - - if (c4iw_fatal_error(rdev)) - return -EIO; -@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, - &rdev->resource.tpt_fifo_lock); - if (!stag_idx) - return -ENOMEM; -- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); -+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff); - } - PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", - __func__, stag_state, type, pdid, stag_idx); -diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c -index 31ae1b1..2f5b038 100644 ---- a/drivers/infiniband/hw/ipath/ipath_fs.c -+++ b/drivers/infiniband/hw/ipath/ipath_fs.c -@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf, - struct infinipath_counters counters; - struct ipath_devdata *dd; - -+ pax_track_stack(); -+ - dd = file->f_path.dentry->d_inode->i_private; - dd->ipath_f_read_counters(dd, &counters); - -diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c -index 79b3dbc..96e5fcc 100644 ---- a/drivers/infiniband/hw/ipath/ipath_rc.c -+++ b/drivers/infiniband/hw/ipath/ipath_rc.c -@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, - struct ib_atomic_eth *ateth; - struct ipath_ack_entry *e; - u64 vaddr; -- atomic64_t *maddr; -+ atomic64_unchecked_t *maddr; - u64 sdata; - u32 rkey; - u8 next; -@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, - IB_ACCESS_REMOTE_ATOMIC))) - goto nack_acc_unlck; - /* Perform atomic OP and save result. */ -- maddr = (atomic64_t *) qp->r_sge.sge.vaddr; -+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr; - sdata = be64_to_cpu(ateth->swap_data); - e = &qp->s_ack_queue[qp->r_head_ack_queue]; - e->atomic_data = (opcode == OP(FETCH_ADD)) ? -- (u64) atomic64_add_return(sdata, maddr) - sdata : -+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata : - (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, - be64_to_cpu(ateth->compare_data), - sdata); -diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c -index 1f95bba..9530f87 100644 ---- a/drivers/infiniband/hw/ipath/ipath_ruc.c -+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c -@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp) - unsigned long flags; - struct ib_wc wc; - u64 sdata; -- atomic64_t *maddr; -+ atomic64_unchecked_t *maddr; - enum ib_wc_status send_status; - - /* -@@ -382,11 +382,11 @@ again: - IB_ACCESS_REMOTE_ATOMIC))) - goto acc_err; - /* Perform atomic OP and save result. */ -- maddr = (atomic64_t *) qp->r_sge.sge.vaddr; -+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr; - sdata = wqe->wr.wr.atomic.compare_add; - *(u64 *) sqp->s_sge.sge.vaddr = - (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? -- (u64) atomic64_add_return(sdata, maddr) - sdata : -+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata : - (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, - sdata, wqe->wr.wr.atomic.swap); - goto send_comp; -diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c -index 2d668c6..3312bb7 100644 ---- a/drivers/infiniband/hw/nes/nes.c -+++ b/drivers/infiniband/hw/nes/nes.c -@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes"); - LIST_HEAD(nes_adapter_list); - static LIST_HEAD(nes_dev_list); - --atomic_t qps_destroyed; -+atomic_unchecked_t qps_destroyed; - - static unsigned int ee_flsh_adapter; - static unsigned int sysfs_nonidx_addr; -@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r - struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; - struct nes_adapter *nesadapter = nesdev->nesadapter; - -- atomic_inc(&qps_destroyed); -+ atomic_inc_unchecked(&qps_destroyed); - - /* Free the control structures */ - -diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h -index 6fe7987..68637b5 100644 ---- a/drivers/infiniband/hw/nes/nes.h -+++ b/drivers/infiniband/hw/nes/nes.h -@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level; - extern unsigned int wqm_quanta; - extern struct list_head nes_adapter_list; - --extern atomic_t cm_connects; --extern atomic_t cm_accepts; --extern atomic_t cm_disconnects; --extern atomic_t cm_closes; --extern atomic_t cm_connecteds; --extern atomic_t cm_connect_reqs; --extern atomic_t cm_rejects; --extern atomic_t mod_qp_timouts; --extern atomic_t qps_created; --extern atomic_t qps_destroyed; --extern atomic_t sw_qps_destroyed; -+extern atomic_unchecked_t cm_connects; -+extern atomic_unchecked_t cm_accepts; -+extern atomic_unchecked_t cm_disconnects; -+extern atomic_unchecked_t cm_closes; -+extern atomic_unchecked_t cm_connecteds; -+extern atomic_unchecked_t cm_connect_reqs; -+extern atomic_unchecked_t cm_rejects; -+extern atomic_unchecked_t mod_qp_timouts; -+extern atomic_unchecked_t qps_created; -+extern atomic_unchecked_t qps_destroyed; -+extern atomic_unchecked_t sw_qps_destroyed; - extern u32 mh_detected; - extern u32 mh_pauses_sent; - extern u32 cm_packets_sent; -@@ -194,14 +194,14 @@ extern u32 cm_packets_created; - extern u32 cm_packets_received; - extern u32 cm_packets_dropped; - extern u32 cm_packets_retrans; --extern atomic_t cm_listens_created; --extern atomic_t cm_listens_destroyed; -+extern atomic_unchecked_t cm_listens_created; -+extern atomic_unchecked_t cm_listens_destroyed; - extern u32 cm_backlog_drops; --extern atomic_t cm_loopbacks; --extern atomic_t cm_nodes_created; --extern atomic_t cm_nodes_destroyed; --extern atomic_t cm_accel_dropped_pkts; --extern atomic_t cm_resets_recvd; -+extern atomic_unchecked_t cm_loopbacks; -+extern atomic_unchecked_t cm_nodes_created; -+extern atomic_unchecked_t cm_nodes_destroyed; -+extern atomic_unchecked_t cm_accel_dropped_pkts; -+extern atomic_unchecked_t cm_resets_recvd; - - extern u32 int_mod_timer_init; - extern u32 int_mod_cq_depth_256; -diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c -index a237547..28a9819 100644 ---- a/drivers/infiniband/hw/nes/nes_cm.c -+++ b/drivers/infiniband/hw/nes/nes_cm.c -@@ -68,14 +68,14 @@ u32 cm_packets_dropped; - u32 cm_packets_retrans; - u32 cm_packets_created; - u32 cm_packets_received; --atomic_t cm_listens_created; --atomic_t cm_listens_destroyed; -+atomic_unchecked_t cm_listens_created; -+atomic_unchecked_t cm_listens_destroyed; - u32 cm_backlog_drops; --atomic_t cm_loopbacks; --atomic_t cm_nodes_created; --atomic_t cm_nodes_destroyed; --atomic_t cm_accel_dropped_pkts; --atomic_t cm_resets_recvd; -+atomic_unchecked_t cm_loopbacks; -+atomic_unchecked_t cm_nodes_created; -+atomic_unchecked_t cm_nodes_destroyed; -+atomic_unchecked_t cm_accel_dropped_pkts; -+atomic_unchecked_t cm_resets_recvd; - - static inline int mini_cm_accelerated(struct nes_cm_core *, - struct nes_cm_node *); -@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = { - - static struct nes_cm_core *g_cm_core; - --atomic_t cm_connects; --atomic_t cm_accepts; --atomic_t cm_disconnects; --atomic_t cm_closes; --atomic_t cm_connecteds; --atomic_t cm_connect_reqs; --atomic_t cm_rejects; -+atomic_unchecked_t cm_connects; -+atomic_unchecked_t cm_accepts; -+atomic_unchecked_t cm_disconnects; -+atomic_unchecked_t cm_closes; -+atomic_unchecked_t cm_connecteds; -+atomic_unchecked_t cm_connect_reqs; -+atomic_unchecked_t cm_rejects; - - - /** -@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, - kfree(listener); - listener = NULL; - ret = 0; -- atomic_inc(&cm_listens_destroyed); -+ atomic_inc_unchecked(&cm_listens_destroyed); - } else { - spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); - } -@@ -1242,7 +1242,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, - cm_node->rem_mac); - - add_hte_node(cm_core, cm_node); -- atomic_inc(&cm_nodes_created); -+ atomic_inc_unchecked(&cm_nodes_created); - - return cm_node; - } -@@ -1300,7 +1300,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, - } - - atomic_dec(&cm_core->node_cnt); -- atomic_inc(&cm_nodes_destroyed); -+ atomic_inc_unchecked(&cm_nodes_destroyed); - nesqp = cm_node->nesqp; - if (nesqp) { - nesqp->cm_node = NULL; -@@ -1367,7 +1367,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, - - static void drop_packet(struct sk_buff *skb) - { -- atomic_inc(&cm_accel_dropped_pkts); -+ atomic_inc_unchecked(&cm_accel_dropped_pkts); - dev_kfree_skb_any(skb); - } - -@@ -1430,7 +1430,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, - { - - int reset = 0; /* whether to send reset in case of err.. */ -- atomic_inc(&cm_resets_recvd); -+ atomic_inc_unchecked(&cm_resets_recvd); - nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." - " refcnt=%d\n", cm_node, cm_node->state, - atomic_read(&cm_node->ref_count)); -@@ -2059,7 +2059,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, - rem_ref_cm_node(cm_node->cm_core, cm_node); - return NULL; - } -- atomic_inc(&cm_loopbacks); -+ atomic_inc_unchecked(&cm_loopbacks); - loopbackremotenode->loopbackpartner = cm_node; - loopbackremotenode->tcp_cntxt.rcv_wscale = - NES_CM_DEFAULT_RCV_WND_SCALE; -@@ -2334,7 +2334,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, - add_ref_cm_node(cm_node); - } else if (cm_node->state == NES_CM_STATE_TSA) { - rem_ref_cm_node(cm_core, cm_node); -- atomic_inc(&cm_accel_dropped_pkts); -+ atomic_inc_unchecked(&cm_accel_dropped_pkts); - dev_kfree_skb_any(skb); - break; - } -@@ -2640,7 +2640,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) - - if ((cm_id) && (cm_id->event_handler)) { - if (issue_disconn) { -- atomic_inc(&cm_disconnects); -+ atomic_inc_unchecked(&cm_disconnects); - cm_event.event = IW_CM_EVENT_DISCONNECT; - cm_event.status = disconn_status; - cm_event.local_addr = cm_id->local_addr; -@@ -2662,7 +2662,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) - } - - if (issue_close) { -- atomic_inc(&cm_closes); -+ atomic_inc_unchecked(&cm_closes); - nes_disconnect(nesqp, 1); - - cm_id->provider_data = nesqp; -@@ -2793,7 +2793,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) - - nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", - nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); -- atomic_inc(&cm_accepts); -+ atomic_inc_unchecked(&cm_accepts); - - nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", - netdev_refcnt_read(nesvnic->netdev)); -@@ -3003,7 +3003,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) - - struct nes_cm_core *cm_core; - -- atomic_inc(&cm_rejects); -+ atomic_inc_unchecked(&cm_rejects); - cm_node = (struct nes_cm_node *) cm_id->provider_data; - loopback = cm_node->loopbackpartner; - cm_core = cm_node->cm_core; -@@ -3069,7 +3069,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) - ntohl(cm_id->local_addr.sin_addr.s_addr), - ntohs(cm_id->local_addr.sin_port)); - -- atomic_inc(&cm_connects); -+ atomic_inc_unchecked(&cm_connects); - nesqp->active_conn = 1; - - /* cache the cm_id in the qp */ -@@ -3175,7 +3175,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) - g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); - return err; - } -- atomic_inc(&cm_listens_created); -+ atomic_inc_unchecked(&cm_listens_created); - } - - cm_id->add_ref(cm_id); -@@ -3280,7 +3280,7 @@ static void cm_event_connected(struct nes_cm_event *event) - if (nesqp->destroyed) { - return; - } -- atomic_inc(&cm_connecteds); -+ atomic_inc_unchecked(&cm_connecteds); - nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" - " local port 0x%04X. jiffies = %lu.\n", - nesqp->hwqp.qp_id, -@@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event) - - cm_id->add_ref(cm_id); - ret = cm_id->event_handler(cm_id, &cm_event); -- atomic_inc(&cm_closes); -+ atomic_inc_unchecked(&cm_closes); - cm_event.event = IW_CM_EVENT_CLOSE; - cm_event.status = 0; - cm_event.provider_data = cm_id->provider_data; -@@ -3531,7 +3531,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) - return; - cm_id = cm_node->cm_id; - -- atomic_inc(&cm_connect_reqs); -+ atomic_inc_unchecked(&cm_connect_reqs); - nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", - cm_node, cm_id, jiffies); - -@@ -3569,7 +3569,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event) - return; - cm_id = cm_node->cm_id; - -- atomic_inc(&cm_connect_reqs); -+ atomic_inc_unchecked(&cm_connect_reqs); - nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", - cm_node, cm_id, jiffies); - -diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c -index 9d7ffeb..a95dd7d 100644 ---- a/drivers/infiniband/hw/nes/nes_nic.c -+++ b/drivers/infiniband/hw/nes/nes_nic.c -@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, - target_stat_values[++index] = mh_detected; - target_stat_values[++index] = mh_pauses_sent; - target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; -- target_stat_values[++index] = atomic_read(&cm_connects); -- target_stat_values[++index] = atomic_read(&cm_accepts); -- target_stat_values[++index] = atomic_read(&cm_disconnects); -- target_stat_values[++index] = atomic_read(&cm_connecteds); -- target_stat_values[++index] = atomic_read(&cm_connect_reqs); -- target_stat_values[++index] = atomic_read(&cm_rejects); -- target_stat_values[++index] = atomic_read(&mod_qp_timouts); -- target_stat_values[++index] = atomic_read(&qps_created); -- target_stat_values[++index] = atomic_read(&sw_qps_destroyed); -- target_stat_values[++index] = atomic_read(&qps_destroyed); -- target_stat_values[++index] = atomic_read(&cm_closes); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects); -+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts); -+ target_stat_values[++index] = atomic_read_unchecked(&qps_created); -+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed); -+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes); - target_stat_values[++index] = cm_packets_sent; - target_stat_values[++index] = cm_packets_bounced; - target_stat_values[++index] = cm_packets_created; - target_stat_values[++index] = cm_packets_received; - target_stat_values[++index] = cm_packets_dropped; - target_stat_values[++index] = cm_packets_retrans; -- target_stat_values[++index] = atomic_read(&cm_listens_created); -- target_stat_values[++index] = atomic_read(&cm_listens_destroyed); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed); - target_stat_values[++index] = cm_backlog_drops; -- target_stat_values[++index] = atomic_read(&cm_loopbacks); -- target_stat_values[++index] = atomic_read(&cm_nodes_created); -- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); -- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); -- target_stat_values[++index] = atomic_read(&cm_resets_recvd); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts); -+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd); - target_stat_values[++index] = nesadapter->free_4kpbl; - target_stat_values[++index] = nesadapter->free_256pbl; - target_stat_values[++index] = int_mod_timer_init; -diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c -index 9f2f7d4..6d2fee2 100644 ---- a/drivers/infiniband/hw/nes/nes_verbs.c -+++ b/drivers/infiniband/hw/nes/nes_verbs.c -@@ -46,9 +46,9 @@ - - #include <rdma/ib_umem.h> - --atomic_t mod_qp_timouts; --atomic_t qps_created; --atomic_t sw_qps_destroyed; -+atomic_unchecked_t mod_qp_timouts; -+atomic_unchecked_t qps_created; -+atomic_unchecked_t sw_qps_destroyed; - - static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); - -@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, - if (init_attr->create_flags) - return ERR_PTR(-EINVAL); - -- atomic_inc(&qps_created); -+ atomic_inc_unchecked(&qps_created); - switch (init_attr->qp_type) { - case IB_QPT_RC: - if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { -@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) - struct iw_cm_event cm_event; - int ret; - -- atomic_inc(&sw_qps_destroyed); -+ atomic_inc_unchecked(&sw_qps_destroyed); - nesqp->destroyed = 1; - - /* Blow away the connection if it exists. */ -diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h -index c9624ea..e025b66 100644 ---- a/drivers/infiniband/hw/qib/qib.h -+++ b/drivers/infiniband/hw/qib/qib.h -@@ -51,6 +51,7 @@ - #include <linux/completion.h> - #include <linux/kref.h> - #include <linux/sched.h> -+#include <linux/slab.h> - - #include "qib_common.h" - #include "qib_verbs.h" -diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c -index c351aa4..e6967c2 100644 ---- a/drivers/input/gameport/gameport.c -+++ b/drivers/input/gameport/gameport.c -@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys); - */ - static void gameport_init_port(struct gameport *gameport) - { -- static atomic_t gameport_no = ATOMIC_INIT(0); -+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0); - - __module_get(THIS_MODULE); - - mutex_init(&gameport->drv_mutex); - device_initialize(&gameport->dev); - dev_set_name(&gameport->dev, "gameport%lu", -- (unsigned long)atomic_inc_return(&gameport_no) - 1); -+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1); - gameport->dev.bus = &gameport_bus; - gameport->dev.release = gameport_release_port; - if (gameport->parent) -diff --git a/drivers/input/input.c b/drivers/input/input.c -index da38d97..2aa0b79 100644 ---- a/drivers/input/input.c -+++ b/drivers/input/input.c -@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev) - */ - int input_register_device(struct input_dev *dev) - { -- static atomic_t input_no = ATOMIC_INIT(0); -+ static atomic_unchecked_t input_no = ATOMIC_INIT(0); - struct input_handler *handler; - const char *path; - int error; -@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev) - dev->setkeycode = input_default_setkeycode; - - dev_set_name(&dev->dev, "input%ld", -- (unsigned long) atomic_inc_return(&input_no) - 1); -+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1); - - error = device_add(&dev->dev); - if (error) -diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c -index b8d8611..15f8d2c 100644 ---- a/drivers/input/joystick/sidewinder.c -+++ b/drivers/input/joystick/sidewinder.c -@@ -30,6 +30,7 @@ - #include <linux/kernel.h> - #include <linux/module.h> - #include <linux/slab.h> -+#include <linux/sched.h> - #include <linux/init.h> - #include <linux/input.h> - #include <linux/gameport.h> -@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw) - unsigned char buf[SW_LENGTH]; - int i; - -+ pax_track_stack(); -+ - i = sw_read_packet(sw->gameport, buf, sw->length, 0); - - if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */ -diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c -index d728875..844c89b 100644 ---- a/drivers/input/joystick/xpad.c -+++ b/drivers/input/joystick/xpad.c -@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev, - - static int xpad_led_probe(struct usb_xpad *xpad) - { -- static atomic_t led_seq = ATOMIC_INIT(0); -+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0); - long led_no; - struct xpad_led *led; - struct led_classdev *led_cdev; -@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad) - if (!led) - return -ENOMEM; - -- led_no = (long)atomic_inc_return(&led_seq) - 1; -+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1; - - snprintf(led->name, sizeof(led->name), "xpad%ld", led_no); - led->xpad = xpad; -diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c -index 0110b5a..d3ad144 100644 ---- a/drivers/input/mousedev.c -+++ b/drivers/input/mousedev.c -@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, - - spin_unlock_irq(&client->packet_lock); - -- if (copy_to_user(buffer, data, count)) -+ if (count > sizeof(data) || copy_to_user(buffer, data, count)) - return -EFAULT; - - return count; -diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c -index ba70058..571d25d 100644 ---- a/drivers/input/serio/serio.c -+++ b/drivers/input/serio/serio.c -@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev) - */ - static void serio_init_port(struct serio *serio) - { -- static atomic_t serio_no = ATOMIC_INIT(0); -+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0); - - __module_get(THIS_MODULE); - -@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio) - mutex_init(&serio->drv_mutex); - device_initialize(&serio->dev); - dev_set_name(&serio->dev, "serio%ld", -- (long)atomic_inc_return(&serio_no) - 1); -+ (long)atomic_inc_return_unchecked(&serio_no) - 1); - serio->dev.bus = &serio_bus; - serio->dev.release = serio_release_port; - serio->dev.groups = serio_device_attr_groups; -diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c -index e44933d..9ba484a 100644 ---- a/drivers/isdn/capi/capi.c -+++ b/drivers/isdn/capi/capi.c -@@ -83,8 +83,8 @@ struct capiminor { - - struct capi20_appl *ap; - u32 ncci; -- atomic_t datahandle; -- atomic_t msgid; -+ atomic_unchecked_t datahandle; -+ atomic_unchecked_t msgid; - - struct tty_port port; - int ttyinstop; -@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb) - capimsg_setu16(s, 2, mp->ap->applid); - capimsg_setu8 (s, 4, CAPI_DATA_B3); - capimsg_setu8 (s, 5, CAPI_RESP); -- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid)); -+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid)); - capimsg_setu32(s, 8, mp->ncci); - capimsg_setu16(s, 12, datahandle); - } -@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp) - mp->outbytes -= len; - spin_unlock_bh(&mp->outlock); - -- datahandle = atomic_inc_return(&mp->datahandle); -+ datahandle = atomic_inc_return_unchecked(&mp->datahandle); - skb_push(skb, CAPI_DATA_B3_REQ_LEN); - memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN); - capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN); - capimsg_setu16(skb->data, 2, mp->ap->applid); - capimsg_setu8 (skb->data, 4, CAPI_DATA_B3); - capimsg_setu8 (skb->data, 5, CAPI_REQ); -- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid)); -+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid)); - capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */ - capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */ - capimsg_setu16(skb->data, 16, len); /* Data length */ -diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c -index db621db..825ea1a 100644 ---- a/drivers/isdn/gigaset/common.c -+++ b/drivers/isdn/gigaset/common.c -@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, - cs->commands_pending = 0; - cs->cur_at_seq = 0; - cs->gotfwver = -1; -- cs->open_count = 0; -+ local_set(&cs->open_count, 0); - cs->dev = NULL; - cs->tty = NULL; - cs->tty_dev = NULL; -diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h -index 212efaf..f187c6b 100644 ---- a/drivers/isdn/gigaset/gigaset.h -+++ b/drivers/isdn/gigaset/gigaset.h -@@ -35,6 +35,7 @@ - #include <linux/tty_driver.h> - #include <linux/list.h> - #include <linux/atomic.h> -+#include <asm/local.h> - - #define GIG_VERSION {0, 5, 0, 0} - #define GIG_COMPAT {0, 4, 0, 0} -@@ -433,7 +434,7 @@ struct cardstate { - spinlock_t cmdlock; - unsigned curlen, cmdbytes; - -- unsigned open_count; -+ local_t open_count; - struct tty_struct *tty; - struct tasklet_struct if_wake_tasklet; - unsigned control_state; -diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c -index e35058b..5898a8b 100644 ---- a/drivers/isdn/gigaset/interface.c -+++ b/drivers/isdn/gigaset/interface.c -@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tty, struct file *filp) - } - tty->driver_data = cs; - -- ++cs->open_count; -- -- if (cs->open_count == 1) { -+ if (local_inc_return(&cs->open_count) == 1) { - spin_lock_irqsave(&cs->lock, flags); - cs->tty = tty; - spin_unlock_irqrestore(&cs->lock, flags); -@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *tty, struct file *filp) - - if (!cs->connected) - gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ -- else if (!cs->open_count) -+ else if (!local_read(&cs->open_count)) - dev_warn(cs->dev, "%s: device not opened\n", __func__); - else { -- if (!--cs->open_count) { -+ if (!local_dec_return(&cs->open_count)) { - spin_lock_irqsave(&cs->lock, flags); - cs->tty = NULL; - spin_unlock_irqrestore(&cs->lock, flags); -@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *tty, - if (!cs->connected) { - gig_dbg(DEBUG_IF, "not connected"); - retval = -ENODEV; -- } else if (!cs->open_count) -+ } else if (!local_read(&cs->open_count)) - dev_warn(cs->dev, "%s: device not opened\n", __func__); - else { - retval = 0; -@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count) - retval = -ENODEV; - goto done; - } -- if (!cs->open_count) { -+ if (!local_read(&cs->open_count)) { - dev_warn(cs->dev, "%s: device not opened\n", __func__); - retval = -ENODEV; - goto done; -@@ -413,7 +411,7 @@ static int if_write_room(struct tty_struct *tty) - if (!cs->connected) { - gig_dbg(DEBUG_IF, "not connected"); - retval = -ENODEV; -- } else if (!cs->open_count) -+ } else if (!local_read(&cs->open_count)) - dev_warn(cs->dev, "%s: device not opened\n", __func__); - else if (cs->mstate != MS_LOCKED) { - dev_warn(cs->dev, "can't write to unlocked device\n"); -@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty_struct *tty) - - if (!cs->connected) - gig_dbg(DEBUG_IF, "not connected"); -- else if (!cs->open_count) -+ else if (!local_read(&cs->open_count)) - dev_warn(cs->dev, "%s: device not opened\n", __func__); - else if (cs->mstate != MS_LOCKED) - dev_warn(cs->dev, "can't write to unlocked device\n"); -@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struct *tty) - - if (!cs->connected) - gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ -- else if (!cs->open_count) -+ else if (!local_read(&cs->open_count)) - dev_warn(cs->dev, "%s: device not opened\n", __func__); - else - gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__); -@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_struct *tty) - - if (!cs->connected) - gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ -- else if (!cs->open_count) -+ else if (!local_read(&cs->open_count)) - dev_warn(cs->dev, "%s: device not opened\n", __func__); - else - gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__); -@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old) - goto out; - } - -- if (!cs->open_count) { -+ if (!local_read(&cs->open_count)) { - dev_warn(cs->dev, "%s: device not opened\n", __func__); - goto out; - } -diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c -index 2a57da59..e7a12ed 100644 ---- a/drivers/isdn/hardware/avm/b1.c -+++ b/drivers/isdn/hardware/avm/b1.c -@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file) - } - if (left) { - if (t4file->user) { -- if (copy_from_user(buf, dp, left)) -+ if (left > sizeof buf || copy_from_user(buf, dp, left)) - return -EFAULT; - } else { - memcpy(buf, dp, left); -@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config) - } - if (left) { - if (config->user) { -- if (copy_from_user(buf, dp, left)) -+ if (left > sizeof buf || copy_from_user(buf, dp, left)) - return -EFAULT; - } else { - memcpy(buf, dp, left); -diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c -index f130724..c373c68 100644 ---- a/drivers/isdn/hardware/eicon/capidtmf.c -+++ b/drivers/isdn/hardware/eicon/capidtmf.c -@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng - byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT]; - short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES]; - -+ pax_track_stack(); - - if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE) - { -diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c -index 4d425c6..a9be6c4 100644 ---- a/drivers/isdn/hardware/eicon/capifunc.c -+++ b/drivers/isdn/hardware/eicon/capifunc.c -@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void) - IDI_SYNC_REQ req; - DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; - -+ pax_track_stack(); -+ - DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); - - for (x = 0; x < MAX_DESCRIPTORS; x++) { -diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c -index 3029234..ef0d9e2 100644 ---- a/drivers/isdn/hardware/eicon/diddfunc.c -+++ b/drivers/isdn/hardware/eicon/diddfunc.c -@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void) - IDI_SYNC_REQ req; - DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; - -+ pax_track_stack(); -+ - DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); - - for (x = 0; x < MAX_DESCRIPTORS; x++) { -diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c -index 0bbee78..a0d0a01 100644 ---- a/drivers/isdn/hardware/eicon/divasfunc.c -+++ b/drivers/isdn/hardware/eicon/divasfunc.c -@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void) - IDI_SYNC_REQ req; - DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; - -+ pax_track_stack(); -+ - DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); - - for (x = 0; x < MAX_DESCRIPTORS; x++) { -diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h -index 85784a7..a19ca98 100644 ---- a/drivers/isdn/hardware/eicon/divasync.h -+++ b/drivers/isdn/hardware/eicon/divasync.h -@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter { - } diva_didd_add_adapter_t; - typedef struct _diva_didd_remove_adapter { - IDI_CALL p_request; --} diva_didd_remove_adapter_t; -+} __no_const diva_didd_remove_adapter_t; - typedef struct _diva_didd_read_adapter_array { - void * buffer; - dword length; -diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c -index db87d51..7d09acf 100644 ---- a/drivers/isdn/hardware/eicon/idifunc.c -+++ b/drivers/isdn/hardware/eicon/idifunc.c -@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void) - IDI_SYNC_REQ req; - DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; - -+ pax_track_stack(); -+ - DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); - - for (x = 0; x < MAX_DESCRIPTORS; x++) { -diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c -index a339598..b6a8bfc 100644 ---- a/drivers/isdn/hardware/eicon/message.c -+++ b/drivers/isdn/hardware/eicon/message.c -@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci) - dword d; - word w; - -+ pax_track_stack(); -+ - a = plci->adapter; - Id = ((word)plci->Id<<8)|a->Id; - PUT_WORD(&SS_Ind[4],0x0000); -@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info, - word j, n, w; - dword d; - -+ pax_track_stack(); -+ - - for(i=0;i<8;i++) bp_parms[i].length = 0; - for(i=0;i<2;i++) global_config[i].length = 0; -@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp) - const byte llc3[] = {4,3,2,2,6,6,0}; - const byte header[] = {0,2,3,3,0,0,0}; - -+ pax_track_stack(); -+ - for(i=0;i<8;i++) bp_parms[i].length = 0; - for(i=0;i<6;i++) b2_config_parms[i].length = 0; - for(i=0;i<5;i++) b3_config_parms[i].length = 0; -@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci) - word appl_number_group_type[MAX_APPL]; - PLCI *auxplci; - -+ pax_track_stack(); -+ - set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */ - - if(!a->group_optimization_enabled) -diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c -index a564b75..f3cf8b5 100644 ---- a/drivers/isdn/hardware/eicon/mntfunc.c -+++ b/drivers/isdn/hardware/eicon/mntfunc.c -@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void) - IDI_SYNC_REQ req; - DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; - -+ pax_track_stack(); -+ - DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); - - for (x = 0; x < MAX_DESCRIPTORS; x++) { -diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h -index a3bd163..8956575 100644 ---- a/drivers/isdn/hardware/eicon/xdi_adapter.h -+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h -@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t { - typedef struct _diva_os_idi_adapter_interface { - diva_init_card_proc_t cleanup_adapter_proc; - diva_cmd_card_proc_t cmd_proc; --} diva_os_idi_adapter_interface_t; -+} __no_const diva_os_idi_adapter_interface_t; - - typedef struct _diva_os_xdi_adapter { - struct list_head link; -diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c -index 6ed82ad..b05ac05 100644 ---- a/drivers/isdn/i4l/isdn_common.c -+++ b/drivers/isdn/i4l/isdn_common.c -@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) - } iocpar; - void __user *argp = (void __user *)arg; - -+ pax_track_stack(); -+ - #define name iocpar.name - #define bname iocpar.bname - #define iocts iocpar.iocts -diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c -index 1f355bb..43f1fea 100644 ---- a/drivers/isdn/icn/icn.c -+++ b/drivers/isdn/icn/icn.c -@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card) - if (count > len) - count = len; - if (user) { -- if (copy_from_user(msg, buf, count)) -+ if (count > sizeof msg || copy_from_user(msg, buf, count)) - return -EFAULT; - } else - memcpy(msg, buf, count); -diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c -index 2535933..09a8e86 100644 ---- a/drivers/lguest/core.c -+++ b/drivers/lguest/core.c -@@ -92,9 +92,17 @@ static __init int map_switcher(void) - * it's worked so far. The end address needs +1 because __get_vm_area - * allocates an extra guard page, so we need space for that. - */ -+ -+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, -+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR -+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); -+#else - switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, - VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR - + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); -+#endif -+ - if (!switcher_vma) { - err = -ENOMEM; - printk("lguest: could not map switcher pages high\n"); -@@ -119,7 +127,7 @@ static __init int map_switcher(void) - * Now the Switcher is mapped at the right address, we can't fail! - * Copy in the compiled-in Switcher code (from x86/switcher_32.S). - */ -- memcpy(switcher_vma->addr, start_switcher_text, -+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text), - end_switcher_text - start_switcher_text); - - printk(KERN_INFO "lguest: mapped switcher at %p\n", -diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c -index 65af42f..530c87a 100644 ---- a/drivers/lguest/x86/core.c -+++ b/drivers/lguest/x86/core.c -@@ -59,7 +59,7 @@ static struct { - /* Offset from where switcher.S was compiled to where we've copied it */ - static unsigned long switcher_offset(void) - { -- return SWITCHER_ADDR - (unsigned long)start_switcher_text; -+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text); - } - - /* This cpu's struct lguest_pages. */ -@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) - * These copies are pretty cheap, so we do them unconditionally: */ - /* Save the current Host top-level page directory. - */ -+ -+#ifdef CONFIG_PAX_PER_CPU_PGD -+ pages->state.host_cr3 = read_cr3(); -+#else - pages->state.host_cr3 = __pa(current->mm->pgd); -+#endif -+ - /* - * Set up the Guest's page tables to see this CPU's pages (and no - * other CPU's pages). -@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void) - * compiled-in switcher code and the high-mapped copy we just made. - */ - for (i = 0; i < IDT_ENTRIES; i++) -- default_idt_entries[i] += switcher_offset(); -+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset(); - - /* - * Set up the Switcher's per-cpu areas. -@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void) - * it will be undisturbed when we switch. To change %cs and jump we - * need this structure to feed to Intel's "lcall" instruction. - */ -- lguest_entry.offset = (long)switch_to_guest + switcher_offset(); -+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset(); - lguest_entry.segment = LGUEST_CS; - - /* -diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S -index 40634b0..4f5855e 100644 ---- a/drivers/lguest/x86/switcher_32.S -+++ b/drivers/lguest/x86/switcher_32.S -@@ -87,6 +87,7 @@ - #include <asm/page.h> - #include <asm/segment.h> - #include <asm/lguest.h> -+#include <asm/processor-flags.h> - - // We mark the start of the code to copy - // It's placed in .text tho it's never run here -@@ -149,6 +150,13 @@ ENTRY(switch_to_guest) - // Changes type when we load it: damn Intel! - // For after we switch over our page tables - // That entry will be read-only: we'd crash. -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ mov %cr0, %edx -+ xor $X86_CR0_WP, %edx -+ mov %edx, %cr0 -+#endif -+ - movl $(GDT_ENTRY_TSS*8), %edx - ltr %dx - -@@ -157,9 +165,15 @@ ENTRY(switch_to_guest) - // Let's clear it again for our return. - // The GDT descriptor of the Host - // Points to the table after two "size" bytes -- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx -+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax - // Clear "used" from type field (byte 5, bit 2) -- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx) -+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax) -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ mov %cr0, %eax -+ xor $X86_CR0_WP, %eax -+ mov %eax, %cr0 -+#endif - - // Once our page table's switched, the Guest is live! - // The Host fades as we run this final step. -@@ -295,13 +309,12 @@ deliver_to_host: - // I consulted gcc, and it gave - // These instructions, which I gladly credit: - leal (%edx,%ebx,8), %eax -- movzwl (%eax),%edx -- movl 4(%eax), %eax -- xorw %ax, %ax -- orl %eax, %edx -+ movl 4(%eax), %edx -+ movw (%eax), %dx - // Now the address of the handler's in %edx - // We call it now: its "iret" drops us home. -- jmp *%edx -+ ljmp $__KERNEL_CS, $1f -+1: jmp *%edx - - // Every interrupt can come to us here - // But we must truly tell each apart. -diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c -index 4daf9e5..b8d1d0f 100644 ---- a/drivers/macintosh/macio_asic.c -+++ b/drivers/macintosh/macio_asic.c -@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev) - * MacIO is matched against any Apple ID, it's probe() function - * will then decide wether it applies or not - */ --static const struct pci_device_id __devinitdata pci_ids [] = { { -+static const struct pci_device_id __devinitconst pci_ids [] = { { - .vendor = PCI_VENDOR_ID_APPLE, - .device = PCI_ANY_ID, - .subvendor = PCI_ANY_ID, -diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c -index 2e9a3ca..c2fb229 100644 ---- a/drivers/md/dm-ioctl.c -+++ b/drivers/md/dm-ioctl.c -@@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param) - cmd == DM_LIST_VERSIONS_CMD) - return 0; - -- if ((cmd == DM_DEV_CREATE_CMD)) { -+ if (cmd == DM_DEV_CREATE_CMD) { - if (!*param->name) { - DMWARN("name not supplied when creating device"); - return -EINVAL; -diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c -index 9bfd057..01180bc 100644 ---- a/drivers/md/dm-raid1.c -+++ b/drivers/md/dm-raid1.c -@@ -40,7 +40,7 @@ enum dm_raid1_error { - - struct mirror { - struct mirror_set *ms; -- atomic_t error_count; -+ atomic_unchecked_t error_count; - unsigned long error_type; - struct dm_dev *dev; - sector_t offset; -@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms) - struct mirror *m; - - for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) -- if (!atomic_read(&m->error_count)) -+ if (!atomic_read_unchecked(&m->error_count)) - return m; - - return NULL; -@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) - * simple way to tell if a device has encountered - * errors. - */ -- atomic_inc(&m->error_count); -+ atomic_inc_unchecked(&m->error_count); - - if (test_and_set_bit(error_type, &m->error_type)) - return; -@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) - struct mirror *m = get_default_mirror(ms); - - do { -- if (likely(!atomic_read(&m->error_count))) -+ if (likely(!atomic_read_unchecked(&m->error_count))) - return m; - - if (m-- == ms->mirror) -@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m) - { - struct mirror *default_mirror = get_default_mirror(m->ms); - -- return !atomic_read(&default_mirror->error_count); -+ return !atomic_read_unchecked(&default_mirror->error_count); - } - - static int mirror_available(struct mirror_set *ms, struct bio *bio) -@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) - */ - if (likely(region_in_sync(ms, region, 1))) - m = choose_mirror(ms, bio->bi_sector); -- else if (m && atomic_read(&m->error_count)) -+ else if (m && atomic_read_unchecked(&m->error_count)) - m = NULL; - - if (likely(m)) -@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, - } - - ms->mirror[mirror].ms = ms; -- atomic_set(&(ms->mirror[mirror].error_count), 0); -+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0); - ms->mirror[mirror].error_type = 0; - ms->mirror[mirror].offset = offset; - -@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti) - */ - static char device_status_char(struct mirror *m) - { -- if (!atomic_read(&(m->error_count))) -+ if (!atomic_read_unchecked(&(m->error_count))) - return 'A'; - - return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : -diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c -index 3d80cf0..b77cc47 100644 ---- a/drivers/md/dm-stripe.c -+++ b/drivers/md/dm-stripe.c -@@ -20,7 +20,7 @@ struct stripe { - struct dm_dev *dev; - sector_t physical_start; - -- atomic_t error_count; -+ atomic_unchecked_t error_count; - }; - - struct stripe_c { -@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) - kfree(sc); - return r; - } -- atomic_set(&(sc->stripe[i].error_count), 0); -+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0); - } - - ti->private = sc; -@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti, - DMEMIT("%d ", sc->stripes); - for (i = 0; i < sc->stripes; i++) { - DMEMIT("%s ", sc->stripe[i].dev->name); -- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ? -+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ? - 'D' : 'A'; - } - buffer[i] = '\0'; -@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, - */ - for (i = 0; i < sc->stripes; i++) - if (!strcmp(sc->stripe[i].dev->name, major_minor)) { -- atomic_inc(&(sc->stripe[i].error_count)); -- if (atomic_read(&(sc->stripe[i].error_count)) < -+ atomic_inc_unchecked(&(sc->stripe[i].error_count)); -+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) < - DM_IO_ERROR_THRESHOLD) - schedule_work(&sc->trigger_event); - } -diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c -index bc04518..7a83b81 100644 ---- a/drivers/md/dm-table.c -+++ b/drivers/md/dm-table.c -@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, - if (!dev_size) - return 0; - -- if ((start >= dev_size) || (start + len > dev_size)) { -+ if ((start >= dev_size) || (len > dev_size - start)) { - DMWARN("%s: %s too small for target: " - "start=%llu, len=%llu, dev_size=%llu", - dm_device_name(ti->table->md), bdevname(bdev, b), -diff --git a/drivers/md/dm.c b/drivers/md/dm.c -index 52b39f3..83a8b6b 100644 ---- a/drivers/md/dm.c -+++ b/drivers/md/dm.c -@@ -165,9 +165,9 @@ struct mapped_device { - /* - * Event handling. - */ -- atomic_t event_nr; -+ atomic_unchecked_t event_nr; - wait_queue_head_t eventq; -- atomic_t uevent_seq; -+ atomic_unchecked_t uevent_seq; - struct list_head uevent_list; - spinlock_t uevent_lock; /* Protect access to uevent_list */ - -@@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(int minor) - rwlock_init(&md->map_lock); - atomic_set(&md->holders, 1); - atomic_set(&md->open_count, 0); -- atomic_set(&md->event_nr, 0); -- atomic_set(&md->uevent_seq, 0); -+ atomic_set_unchecked(&md->event_nr, 0); -+ atomic_set_unchecked(&md->uevent_seq, 0); - INIT_LIST_HEAD(&md->uevent_list); - spin_lock_init(&md->uevent_lock); - -@@ -1978,7 +1978,7 @@ static void event_callback(void *context) - - dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); - -- atomic_inc(&md->event_nr); -+ atomic_inc_unchecked(&md->event_nr); - wake_up(&md->eventq); - } - -@@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, - - uint32_t dm_next_uevent_seq(struct mapped_device *md) - { -- return atomic_add_return(1, &md->uevent_seq); -+ return atomic_add_return_unchecked(1, &md->uevent_seq); - } - - uint32_t dm_get_event_nr(struct mapped_device *md) - { -- return atomic_read(&md->event_nr); -+ return atomic_read_unchecked(&md->event_nr); - } - - int dm_wait_event(struct mapped_device *md, int event_nr) - { - return wait_event_interruptible(md->eventq, -- (event_nr != atomic_read(&md->event_nr))); -+ (event_nr != atomic_read_unchecked(&md->event_nr))); - } - - void dm_uevent_add(struct mapped_device *md, struct list_head *elist) -diff --git a/drivers/md/md.c b/drivers/md/md.c -index 5c95ccb..217fa57 100644 ---- a/drivers/md/md.c -+++ b/drivers/md/md.c -@@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio); - * start build, activate spare - */ - static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); --static atomic_t md_event_count; -+static atomic_unchecked_t md_event_count; - void md_new_event(mddev_t *mddev) - { -- atomic_inc(&md_event_count); -+ atomic_inc_unchecked(&md_event_count); - wake_up(&md_event_waiters); - } - EXPORT_SYMBOL_GPL(md_new_event); -@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event); - */ - static void md_new_event_inintr(mddev_t *mddev) - { -- atomic_inc(&md_event_count); -+ atomic_inc_unchecked(&md_event_count); - wake_up(&md_event_waiters); - } - -@@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) - - rdev->preferred_minor = 0xffff; - rdev->data_offset = le64_to_cpu(sb->data_offset); -- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); -+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); - - rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; - bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; -@@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) - else - sb->resync_offset = cpu_to_le64(0); - -- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); -+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors)); - - sb->raid_disks = cpu_to_le32(mddev->raid_disks); - sb->size = cpu_to_le64(mddev->dev_sectors); -@@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); - static ssize_t - errors_show(mdk_rdev_t *rdev, char *page) - { -- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); -+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors)); - } - - static ssize_t -@@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) - char *e; - unsigned long n = simple_strtoul(buf, &e, 10); - if (*buf && (*e == 0 || *e == '\n')) { -- atomic_set(&rdev->corrected_errors, n); -+ atomic_set_unchecked(&rdev->corrected_errors, n); - return len; - } - return -EINVAL; -@@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev) - rdev->sb_loaded = 0; - rdev->bb_page = NULL; - atomic_set(&rdev->nr_pending, 0); -- atomic_set(&rdev->read_errors, 0); -- atomic_set(&rdev->corrected_errors, 0); -+ atomic_set_unchecked(&rdev->read_errors, 0); -+ atomic_set_unchecked(&rdev->corrected_errors, 0); - - INIT_LIST_HEAD(&rdev->same_set); - init_waitqueue_head(&rdev->blocked_wait); -@@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *seq, void *v) - - spin_unlock(&pers_lock); - seq_printf(seq, "\n"); -- seq->poll_event = atomic_read(&md_event_count); -+ seq->poll_event = atomic_read_unchecked(&md_event_count); - return 0; - } - if (v == (void*)2) { -@@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *seq, void *v) - chunk_kb ? "KB" : "B"); - if (bitmap->file) { - seq_printf(seq, ", file: "); -- seq_path(seq, &bitmap->file->f_path, " \t\n"); -+ seq_path(seq, &bitmap->file->f_path, " \t\n\"); - } - - seq_printf(seq, "\n"); -@@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *inode, struct file *file) - return error; - - seq = file->private_data; -- seq->poll_event = atomic_read(&md_event_count); -+ seq->poll_event = atomic_read_unchecked(&md_event_count); - return error; - } - -@@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait) - /* always allow read */ - mask = POLLIN | POLLRDNORM; - -- if (seq->poll_event != atomic_read(&md_event_count)) -+ if (seq->poll_event != atomic_read_unchecked(&md_event_count)) - mask |= POLLERR | POLLPRI; - return mask; - } -@@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev, int init) - struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; - curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + - (int)part_stat_read(&disk->part0, sectors[1]) - -- atomic_read(&disk->sync_io); -+ atomic_read_unchecked(&disk->sync_io); - /* sync IO will cause sync_io to increase before the disk_stats - * as sync_io is counted when a request starts, and - * disk_stats is counted when it completes. -diff --git a/drivers/md/md.h b/drivers/md/md.h -index 0a309dc..7e01d7f 100644 ---- a/drivers/md/md.h -+++ b/drivers/md/md.h -@@ -124,13 +124,13 @@ struct mdk_rdev_s - * only maintained for arrays that - * support hot removal - */ -- atomic_t read_errors; /* number of consecutive read errors that -+ atomic_unchecked_t read_errors; /* number of consecutive read errors that - * we have tried to ignore. - */ - struct timespec last_read_error; /* monotonic time since our - * last read error - */ -- atomic_t corrected_errors; /* number of corrected read errors, -+ atomic_unchecked_t corrected_errors; /* number of corrected read errors, - * for reporting to userspace and storing - * in superblock. - */ -@@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) - - static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) - { -- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); -+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); - } - - struct mdk_personality -diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c -index d9587df..83a0dc3 100644 ---- a/drivers/md/raid1.c -+++ b/drivers/md/raid1.c -@@ -1541,7 +1541,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio) - if (r1_sync_page_io(rdev, sect, s, - bio->bi_io_vec[idx].bv_page, - READ) != 0) -- atomic_add(s, &rdev->corrected_errors); -+ atomic_add_unchecked(s, &rdev->corrected_errors); - } - sectors -= s; - sect += s; -@@ -1754,7 +1754,7 @@ static void fix_read_error(conf_t *conf, int read_disk, - test_bit(In_sync, &rdev->flags)) { - if (r1_sync_page_io(rdev, sect, s, - conf->tmppage, READ)) { -- atomic_add(s, &rdev->corrected_errors); -+ atomic_add_unchecked(s, &rdev->corrected_errors); - printk(KERN_INFO - "md/raid1:%s: read error corrected " - "(%d sectors at %llu on %s)\n", -diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c -index 1d44228..98db57d 100644 ---- a/drivers/md/raid10.c -+++ b/drivers/md/raid10.c -@@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bio, int error) - /* The write handler will notice the lack of - * R10BIO_Uptodate and record any errors etc - */ -- atomic_add(r10_bio->sectors, -+ atomic_add_unchecked(r10_bio->sectors, - &conf->mirrors[d].rdev->corrected_errors); - - /* for reconstruct, we always reschedule after a read. -@@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev) - { - struct timespec cur_time_mon; - unsigned long hours_since_last; -- unsigned int read_errors = atomic_read(&rdev->read_errors); -+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors); - - ktime_get_ts(&cur_time_mon); - -@@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev) - * overflowing the shift of read_errors by hours_since_last. - */ - if (hours_since_last >= 8 * sizeof(read_errors)) -- atomic_set(&rdev->read_errors, 0); -+ atomic_set_unchecked(&rdev->read_errors, 0); - else -- atomic_set(&rdev->read_errors, read_errors >> hours_since_last); -+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last); - } - - static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector, -@@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) - return; - - check_decay_read_errors(mddev, rdev); -- atomic_inc(&rdev->read_errors); -- if (atomic_read(&rdev->read_errors) > max_read_errors) { -+ atomic_inc_unchecked(&rdev->read_errors); -+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) { - char b[BDEVNAME_SIZE]; - bdevname(rdev->bdev, b); - -@@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) - "md/raid10:%s: %s: Raid device exceeded " - "read_error threshold [cur %d:max %d]\n", - mdname(mddev), b, -- atomic_read(&rdev->read_errors), max_read_errors); -+ atomic_read_unchecked(&rdev->read_errors), max_read_errors); - printk(KERN_NOTICE - "md/raid10:%s: %s: Failing raid device\n", - mdname(mddev), b); -@@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) - (unsigned long long)( - sect + rdev->data_offset), - bdevname(rdev->bdev, b)); -- atomic_add(s, &rdev->corrected_errors); -+ atomic_add_unchecked(s, &rdev->corrected_errors); - } - - rdev_dec_pending(rdev, mddev); -diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c -index b6200c3..02e8702 100644 ---- a/drivers/md/raid5.c -+++ b/drivers/md/raid5.c -@@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struct bio * bi, int error) - (unsigned long long)(sh->sector - + rdev->data_offset), - bdevname(rdev->bdev, b)); -- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); -+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors); - clear_bit(R5_ReadError, &sh->dev[i].flags); - clear_bit(R5_ReWrite, &sh->dev[i].flags); - } -- if (atomic_read(&conf->disks[i].rdev->read_errors)) -- atomic_set(&conf->disks[i].rdev->read_errors, 0); -+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors)) -+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0); - } else { - const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); - int retry = 0; - rdev = conf->disks[i].rdev; - - clear_bit(R5_UPTODATE, &sh->dev[i].flags); -- atomic_inc(&rdev->read_errors); -+ atomic_inc_unchecked(&rdev->read_errors); - if (conf->mddev->degraded >= conf->max_degraded) - printk_ratelimited( - KERN_WARNING -@@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error) - (unsigned long long)(sh->sector - + rdev->data_offset), - bdn); -- else if (atomic_read(&rdev->read_errors) -+ else if (atomic_read_unchecked(&rdev->read_errors) - > conf->max_nr_stripes) - printk(KERN_WARNING - "md/raid:%s: Too many read errors, failing device %s.\n", -@@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) - sector_t r_sector; - struct stripe_head sh2; - -+ pax_track_stack(); - - chunk_offset = sector_div(new_sector, sectors_per_chunk); - stripe = new_sector; -diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c -index 1d1d8d2..6c6837a 100644 ---- a/drivers/media/common/saa7146_hlp.c -+++ b/drivers/media/common/saa7146_hlp.c -@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa - - int x[32], y[32], w[32], h[32]; - -+ pax_track_stack(); -+ - /* clear out memory */ - memset(&line_list[0], 0x00, sizeof(u32)*32); - memset(&pixel_list[0], 0x00, sizeof(u32)*32); -diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c -index 573d540..16f78f3 100644 ---- a/drivers/media/dvb/ddbridge/ddbridge-core.c -+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c -@@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = { - .subvendor = _subvend, .subdevice = _subdev, \ - .driver_data = (unsigned long)&_driverdata } - --static const struct pci_device_id ddb_id_tbl[] __devinitdata = { -+static const struct pci_device_id ddb_id_tbl[] __devinitconst = { - DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus), - DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus), - DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le), -diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c -index 7ea517b..252fe54 100644 ---- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c -+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c -@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb - u8 buf[HOST_LINK_BUF_SIZE]; - int i; - -+ pax_track_stack(); -+ - dprintk("%s\n", __func__); - - /* check if we have space for a link buf in the rx_buffer */ -@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file, - unsigned long timeout; - int written; - -+ pax_track_stack(); -+ - dprintk("%s\n", __func__); - - /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */ -diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h -index a7d876f..8c21b61 100644 ---- a/drivers/media/dvb/dvb-core/dvb_demux.h -+++ b/drivers/media/dvb/dvb-core/dvb_demux.h -@@ -73,7 +73,7 @@ struct dvb_demux_feed { - union { - dmx_ts_cb ts; - dmx_section_cb sec; -- } cb; -+ } __no_const cb; - - struct dvb_demux *demux; - void *priv; -diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c -index f732877..d38c35a 100644 ---- a/drivers/media/dvb/dvb-core/dvbdev.c -+++ b/drivers/media/dvb/dvb-core/dvbdev.c -@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, - const struct dvb_device *template, void *priv, int type) - { - struct dvb_device *dvbdev; -- struct file_operations *dvbdevfops; -+ file_operations_no_const *dvbdevfops; - struct device *clsdev; - int minor; - int id; -diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c -index acb5fb2..2413f1d 100644 ---- a/drivers/media/dvb/dvb-usb/cxusb.c -+++ b/drivers/media/dvb/dvb-usb/cxusb.c -@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_dib0070_config = { - struct dib0700_adapter_state { - int (*set_param_save) (struct dvb_frontend *, - struct dvb_frontend_parameters *); --}; -+} __no_const; - - static int dib7070_set_param_override(struct dvb_frontend *fe, - struct dvb_frontend_parameters *fep) -diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c -index a224e94..503b76a 100644 ---- a/drivers/media/dvb/dvb-usb/dib0700_core.c -+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c -@@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw - if (!buf) - return -ENOMEM; - -+ pax_track_stack(); -+ - while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) { - deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n", - hx.addr, hx.len, hx.chk); -diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c -index 058b231..183d2b3 100644 ---- a/drivers/media/dvb/dvb-usb/dw2102.c -+++ b/drivers/media/dvb/dvb-usb/dw2102.c -@@ -95,7 +95,7 @@ struct su3000_state { - - struct s6x0_state { - int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v); --}; -+} __no_const; - - /* debug */ - static int dvb_usb_dw2102_debug; -diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c -index 37b1469..28a6f6f 100644 ---- a/drivers/media/dvb/dvb-usb/lmedm04.c -+++ b/drivers/media/dvb/dvb-usb/lmedm04.c -@@ -742,6 +742,7 @@ static int lme2510_download_firmware(struct usb_device *dev, - usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), - 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000); - -+ pax_track_stack(); - - data[0] = 0x8a; - len_in = 1; -@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_device *dev) - int ret = 0, len_in; - u8 data[512] = {0}; - -+ pax_track_stack(); -+ - data[0] = 0x0a; - len_in = 1; - info("FRM Firmware Cold Reset"); -diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h -index ba91735..4261d84 100644 ---- a/drivers/media/dvb/frontends/dib3000.h -+++ b/drivers/media/dvb/frontends/dib3000.h -@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops - int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff); - int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff); - int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl); --}; -+} __no_const; - - #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE)) - extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config, -diff --git a/drivers/media/dvb/frontends/mb86a16.c b/drivers/media/dvb/frontends/mb86a16.c -index c283112..7f367a7 100644 ---- a/drivers/media/dvb/frontends/mb86a16.c -+++ b/drivers/media/dvb/frontends/mb86a16.c -@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16_state *state) - int ret = -1; - int sync; - -+ pax_track_stack(); -+ - dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate); - - fcp = 3000; -diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c -index c709ce6..b3fe620 100644 ---- a/drivers/media/dvb/frontends/or51211.c -+++ b/drivers/media/dvb/frontends/or51211.c -@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe, - u8 tudata[585]; - int i; - -+ pax_track_stack(); -+ - dprintk("Firmware is %zd bytes\n",fw->size); - - /* Get eprom data */ -diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c -index 0564192..75b16f5 100644 ---- a/drivers/media/dvb/ngene/ngene-cards.c -+++ b/drivers/media/dvb/ngene/ngene-cards.c -@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = { - - /****************************************************************************/ - --static const struct pci_device_id ngene_id_tbl[] __devinitdata = { -+static const struct pci_device_id ngene_id_tbl[] __devinitconst = { - NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2), - NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2), - NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2), -diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c -index 16a089f..ab1667d 100644 ---- a/drivers/media/radio/radio-cadet.c -+++ b/drivers/media/radio/radio-cadet.c -@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo - unsigned char readbuf[RDS_BUFFER]; - int i = 0; - -+ if (count > RDS_BUFFER) -+ return -EFAULT; - mutex_lock(&dev->lock); - if (dev->rdsstat == 0) { - dev->rdsstat = 1; -diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h -index 9cde353..8c6a1c3 100644 ---- a/drivers/media/video/au0828/au0828.h -+++ b/drivers/media/video/au0828/au0828.h -@@ -191,7 +191,7 @@ struct au0828_dev { - - /* I2C */ - struct i2c_adapter i2c_adap; -- struct i2c_algorithm i2c_algo; -+ i2c_algorithm_no_const i2c_algo; - struct i2c_client i2c_client; - u32 i2c_rc; - -diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c -index 9e2f870..22e3a08 100644 ---- a/drivers/media/video/cx18/cx18-driver.c -+++ b/drivers/media/video/cx18/cx18-driver.c -@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv) - struct i2c_client c; - u8 eedata[256]; - -+ pax_track_stack(); -+ - memset(&c, 0, sizeof(c)); - strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name)); - c.adapter = &cx->i2c_adap[0]; -diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c -index ce765e3..f9e1b04 100644 ---- a/drivers/media/video/cx23885/cx23885-input.c -+++ b/drivers/media/video/cx23885/cx23885-input.c -@@ -53,6 +53,8 @@ static void cx23885_input_process_measurements(struct cx23885_dev *dev, - bool handle = false; - struct ir_raw_event ir_core_event[64]; - -+ pax_track_stack(); -+ - do { - num = 0; - v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event, -diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c -index 68d1240..46b32eb 100644 ---- a/drivers/media/video/cx88/cx88-alsa.c -+++ b/drivers/media/video/cx88/cx88-alsa.c -@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = { - * Only boards with eeprom and byte 1 at eeprom=1 have it - */ - --static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = { -+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = { - {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0}, - {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0}, - {0, } -diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c -index 9515f3a..c9ecb85 100644 ---- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c -+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c -@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw) - u8 *eeprom; - struct tveeprom tvdata; - -+ pax_track_stack(); -+ - memset(&tvdata,0,sizeof(tvdata)); - - eeprom = pvr2_eeprom_fetch(hdw); -diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h -index 305e6aa..0143317 100644 ---- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h -+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h -@@ -196,7 +196,7 @@ struct pvr2_hdw { - - /* I2C stuff */ - struct i2c_adapter i2c_adap; -- struct i2c_algorithm i2c_algo; -+ i2c_algorithm_no_const i2c_algo; - pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT]; - int i2c_cx25840_hack_state; - int i2c_linked; -diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c -index f9f29cc..5a2e330 100644 ---- a/drivers/media/video/saa7134/saa6752hs.c -+++ b/drivers/media/video/saa7134/saa6752hs.c -@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes) - unsigned char localPAT[256]; - unsigned char localPMT[256]; - -+ pax_track_stack(); -+ - /* Set video format - must be done first as it resets other settings */ - set_reg8(client, 0x41, h->video_format); - -diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c -index 62fac7f..f29e0b9 100644 ---- a/drivers/media/video/saa7164/saa7164-cmd.c -+++ b/drivers/media/video/saa7164/saa7164-cmd.c -@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev) - u8 tmp[512]; - dprintk(DBGLVL_CMD, "%s()\n", __func__); - -+ pax_track_stack(); -+ - /* While any outstand message on the bus exists... */ - do { - -@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev) - u8 tmp[512]; - dprintk(DBGLVL_CMD, "%s()\n", __func__); - -+ pax_track_stack(); -+ - while (loop) { - - struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 }; -diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c -index 84cd1b6..f741e07 100644 ---- a/drivers/media/video/timblogiw.c -+++ b/drivers/media/video/timblogiw.c -@@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma) - - /* Platform device functions */ - --static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = { -+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = { - .vidioc_querycap = timblogiw_querycap, - .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt, - .vidioc_g_fmt_vid_cap = timblogiw_g_fmt, -@@ -766,7 +766,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = { - .vidioc_enum_framesizes = timblogiw_enum_framesizes, - }; - --static __devinitconst struct v4l2_file_operations timblogiw_fops = { -+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = { - .owner = THIS_MODULE, - .open = timblogiw_open, - .release = timblogiw_close, -diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c -index f344411..6ae9974 100644 ---- a/drivers/media/video/usbvision/usbvision-core.c -+++ b/drivers/media/video/usbvision/usbvision-core.c -@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision - unsigned char rv, gv, bv; - static unsigned char *Y, *U, *V; - -+ pax_track_stack(); -+ - frame = usbvision->cur_frame; - image_size = frame->frmwidth * frame->frmheight; - if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) || -diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c -index f300dea..04834ba 100644 ---- a/drivers/media/video/videobuf-dma-sg.c -+++ b/drivers/media/video/videobuf-dma-sg.c -@@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size) - { - struct videobuf_queue q; - -+ pax_track_stack(); -+ - /* Required to make generic handler to call __videobuf_alloc */ - q.int_ops = &sg_ops; - -diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c -index 7956a10..f39232f 100644 ---- a/drivers/message/fusion/mptbase.c -+++ b/drivers/message/fusion/mptbase.c -@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v) - seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); - seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); - -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL); -+#else - seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", - (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma); -+#endif -+ - /* - * Rounding UP to nearest 4-kB boundary here... - */ -diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c -index 7596aec..f7ae9aa 100644 ---- a/drivers/message/fusion/mptsas.c -+++ b/drivers/message/fusion/mptsas.c -@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached) - return 0; - } - -+static inline void -+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) -+{ -+ if (phy_info->port_details) { -+ phy_info->port_details->rphy = rphy; -+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", -+ ioc->name, rphy)); -+ } -+ -+ if (rphy) { -+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG, -+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); -+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", -+ ioc->name, rphy, rphy->dev.release)); -+ } -+} -+ - /* no mutex */ - static void - mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details) -@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info) - return NULL; - } - --static inline void --mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) --{ -- if (phy_info->port_details) { -- phy_info->port_details->rphy = rphy; -- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", -- ioc->name, rphy)); -- } -- -- if (rphy) { -- dsaswideprintk(ioc, dev_printk(KERN_DEBUG, -- &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); -- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", -- ioc->name, rphy, rphy->dev.release)); -- } --} -- - static inline struct sas_port * - mptsas_get_port(struct mptsas_phyinfo *phy_info) - { -diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c -index ce61a57..3da8862 100644 ---- a/drivers/message/fusion/mptscsih.c -+++ b/drivers/message/fusion/mptscsih.c -@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost) - - h = shost_priv(SChost); - -- if (h) { -- if (h->info_kbuf == NULL) -- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) -- return h->info_kbuf; -- h->info_kbuf[0] = '\0'; -+ if (!h) -+ return NULL; - -- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); -- h->info_kbuf[size-1] = '\0'; -- } -+ if (h->info_kbuf == NULL) -+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) -+ return h->info_kbuf; -+ h->info_kbuf[0] = '\0'; -+ -+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); -+ h->info_kbuf[size-1] = '\0'; - - return h->info_kbuf; - } -diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c -index 098de2b..fbb922c 100644 ---- a/drivers/message/i2o/i2o_config.c -+++ b/drivers/message/i2o/i2o_config.c -@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned long arg) - struct i2o_message *msg; - unsigned int iop; - -+ pax_track_stack(); -+ - if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) - return -EFAULT; - -diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c -index 07dbeaf..5533142 100644 ---- a/drivers/message/i2o/i2o_proc.c -+++ b/drivers/message/i2o/i2o_proc.c -@@ -255,13 +255,6 @@ static char *scsi_devices[] = { - "Array Controller Device" - }; - --static char *chtostr(u8 * chars, int n) --{ -- char tmp[256]; -- tmp[0] = 0; -- return strncat(tmp, (char *)chars, n); --} -- - static int i2o_report_query_status(struct seq_file *seq, int block_status, - char *group) - { -@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) - - seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); - seq_printf(seq, "%-#8x", ddm_table.module_id); -- seq_printf(seq, "%-29s", -- chtostr(ddm_table.module_name_version, 28)); -+ seq_printf(seq, "%-.28s", ddm_table.module_name_version); - seq_printf(seq, "%9d ", ddm_table.data_size); - seq_printf(seq, "%8d", ddm_table.code_size); - -@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) - - seq_printf(seq, "%-#7x", dst->i2o_vendor_id); - seq_printf(seq, "%-#8x", dst->module_id); -- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28)); -- seq_printf(seq, "%-9s", chtostr(dst->date, 8)); -+ seq_printf(seq, "%-.28s", dst->module_name_version); -+ seq_printf(seq, "%-.8s", dst->date); - seq_printf(seq, "%8d ", dst->module_size); - seq_printf(seq, "%8d ", dst->mpb_size); - seq_printf(seq, "0x%04x", dst->module_flags); -@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) - seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); - seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); - seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); -- seq_printf(seq, "Vendor info : %s\n", -- chtostr((u8 *) (work32 + 2), 16)); -- seq_printf(seq, "Product info : %s\n", -- chtostr((u8 *) (work32 + 6), 16)); -- seq_printf(seq, "Description : %s\n", -- chtostr((u8 *) (work32 + 10), 16)); -- seq_printf(seq, "Product rev. : %s\n", -- chtostr((u8 *) (work32 + 14), 8)); -+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2)); -+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6)); -+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10)); -+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14)); - - seq_printf(seq, "Serial number : "); - print_serial_number(seq, (u8 *) (work32 + 16), -@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) - } - - seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); -- seq_printf(seq, "Module name : %s\n", -- chtostr(result.module_name, 24)); -- seq_printf(seq, "Module revision : %s\n", -- chtostr(result.module_rev, 8)); -+ seq_printf(seq, "Module name : %.24s\n", result.module_name); -+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev); - - seq_printf(seq, "Serial number : "); - print_serial_number(seq, result.serial_number, sizeof(result) - 36); -@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) - return 0; - } - -- seq_printf(seq, "Device name : %s\n", -- chtostr(result.device_name, 64)); -- seq_printf(seq, "Service name : %s\n", -- chtostr(result.service_name, 64)); -- seq_printf(seq, "Physical name : %s\n", -- chtostr(result.physical_location, 64)); -- seq_printf(seq, "Instance number : %s\n", -- chtostr(result.instance_number, 4)); -+ seq_printf(seq, "Device name : %.64s\n", result.device_name); -+ seq_printf(seq, "Service name : %.64s\n", result.service_name); -+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location); -+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number); - - return 0; - } -diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c -index a8c08f3..155fe3d 100644 ---- a/drivers/message/i2o/iop.c -+++ b/drivers/message/i2o/iop.c -@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr) - - spin_lock_irqsave(&c->context_list_lock, flags); - -- if (unlikely(atomic_inc_and_test(&c->context_list_counter))) -- atomic_inc(&c->context_list_counter); -+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter))) -+ atomic_inc_unchecked(&c->context_list_counter); - -- entry->context = atomic_read(&c->context_list_counter); -+ entry->context = atomic_read_unchecked(&c->context_list_counter); - - list_add(&entry->list, &c->context_list); - -@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void) - - #if BITS_PER_LONG == 64 - spin_lock_init(&c->context_list_lock); -- atomic_set(&c->context_list_counter, 0); -+ atomic_set_unchecked(&c->context_list_counter, 0); - INIT_LIST_HEAD(&c->context_list); - #endif - -diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c -index a20e1c4..4f57255 100644 ---- a/drivers/mfd/ab3100-core.c -+++ b/drivers/mfd/ab3100-core.c -@@ -809,7 +809,7 @@ struct ab_family_id { - char *name; - }; - --static const struct ab_family_id ids[] __devinitdata = { -+static const struct ab_family_id ids[] __devinitconst = { - /* AB3100 */ - { - .id = 0xc0, -diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c -index f12720d..3c251fd 100644 ---- a/drivers/mfd/abx500-core.c -+++ b/drivers/mfd/abx500-core.c -@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list); - - struct abx500_device_entry { - struct list_head list; -- struct abx500_ops ops; -+ abx500_ops_no_const ops; - struct device *dev; - }; - -diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c -index 5c2a06a..8fa077c 100644 ---- a/drivers/mfd/janz-cmodio.c -+++ b/drivers/mfd/janz-cmodio.c -@@ -13,6 +13,7 @@ - - #include <linux/kernel.h> - #include <linux/module.h> -+#include <linux/slab.h> - #include <linux/init.h> - #include <linux/pci.h> - #include <linux/interrupt.h> -diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c -index 5fe5de1..af64f53 100644 ---- a/drivers/mfd/wm8350-i2c.c -+++ b/drivers/mfd/wm8350-i2c.c -@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg, - u8 msg[(WM8350_MAX_REGISTER << 1) + 1]; - int ret; - -+ pax_track_stack(); -+ - if (bytes > ((WM8350_MAX_REGISTER << 1) + 1)) - return -EINVAL; - -diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c -index 8b51cd6..f628f8d 100644 ---- a/drivers/misc/lis3lv02d/lis3lv02d.c -+++ b/drivers/misc/lis3lv02d/lis3lv02d.c -@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy) - * the lid is closed. This leads to interrupts as soon as a little move - * is done. - */ -- atomic_inc(&lis3_dev.count); -+ atomic_inc_unchecked(&lis3_dev.count); - - wake_up_interruptible(&lis3_dev.misc_wait); - kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); -@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file) - if (lis3_dev.pm_dev) - pm_runtime_get_sync(lis3_dev.pm_dev); - -- atomic_set(&lis3_dev.count, 0); -+ atomic_set_unchecked(&lis3_dev.count, 0); - return 0; - } - -@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf, - add_wait_queue(&lis3_dev.misc_wait, &wait); - while (true) { - set_current_state(TASK_INTERRUPTIBLE); -- data = atomic_xchg(&lis3_dev.count, 0); -+ data = atomic_xchg_unchecked(&lis3_dev.count, 0); - if (data) - break; - -@@ -585,7 +585,7 @@ out: - static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait) - { - poll_wait(file, &lis3_dev.misc_wait, wait); -- if (atomic_read(&lis3_dev.count)) -+ if (atomic_read_unchecked(&lis3_dev.count)) - return POLLIN | POLLRDNORM; - return 0; - } -diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h -index a193958..4d7ecd2 100644 ---- a/drivers/misc/lis3lv02d/lis3lv02d.h -+++ b/drivers/misc/lis3lv02d/lis3lv02d.h -@@ -265,7 +265,7 @@ struct lis3lv02d { - struct input_polled_dev *idev; /* input device */ - struct platform_device *pdev; /* platform device */ - struct regulator_bulk_data regulators[2]; -- atomic_t count; /* interrupt count after last read */ -+ atomic_unchecked_t count; /* interrupt count after last read */ - union axis_conversion ac; /* hw -> logical axis */ - int mapped_btns[3]; - -diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c -index 2f30bad..c4c13d0 100644 ---- a/drivers/misc/sgi-gru/gruhandles.c -+++ b/drivers/misc/sgi-gru/gruhandles.c -@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks) - unsigned long nsec; - - nsec = CLKS2NSEC(clks); -- atomic_long_inc(&mcs_op_statistics[op].count); -- atomic_long_add(nsec, &mcs_op_statistics[op].total); -+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count); -+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total); - if (mcs_op_statistics[op].max < nsec) - mcs_op_statistics[op].max = nsec; - } -diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c -index 7768b87..f8aac38 100644 ---- a/drivers/misc/sgi-gru/gruprocfs.c -+++ b/drivers/misc/sgi-gru/gruprocfs.c -@@ -32,9 +32,9 @@ - - #define printstat(s, f) printstat_val(s, &gru_stats.f, #f) - --static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) -+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id) - { -- unsigned long val = atomic_long_read(v); -+ unsigned long val = atomic_long_read_unchecked(v); - - seq_printf(s, "%16lu %s\n", val, id); - } -@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p) - - seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks"); - for (op = 0; op < mcsop_last; op++) { -- count = atomic_long_read(&mcs_op_statistics[op].count); -- total = atomic_long_read(&mcs_op_statistics[op].total); -+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count); -+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total); - max = mcs_op_statistics[op].max; - seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, - count ? total / count : 0, max); -diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h -index 5c3ce24..4915ccb 100644 ---- a/drivers/misc/sgi-gru/grutables.h -+++ b/drivers/misc/sgi-gru/grutables.h -@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids; - * GRU statistics. - */ - struct gru_stats_s { -- atomic_long_t vdata_alloc; -- atomic_long_t vdata_free; -- atomic_long_t gts_alloc; -- atomic_long_t gts_free; -- atomic_long_t gms_alloc; -- atomic_long_t gms_free; -- atomic_long_t gts_double_allocate; -- atomic_long_t assign_context; -- atomic_long_t assign_context_failed; -- atomic_long_t free_context; -- atomic_long_t load_user_context; -- atomic_long_t load_kernel_context; -- atomic_long_t lock_kernel_context; -- atomic_long_t unlock_kernel_context; -- atomic_long_t steal_user_context; -- atomic_long_t steal_kernel_context; -- atomic_long_t steal_context_failed; -- atomic_long_t nopfn; -- atomic_long_t asid_new; -- atomic_long_t asid_next; -- atomic_long_t asid_wrap; -- atomic_long_t asid_reuse; -- atomic_long_t intr; -- atomic_long_t intr_cbr; -- atomic_long_t intr_tfh; -- atomic_long_t intr_spurious; -- atomic_long_t intr_mm_lock_failed; -- atomic_long_t call_os; -- atomic_long_t call_os_wait_queue; -- atomic_long_t user_flush_tlb; -- atomic_long_t user_unload_context; -- atomic_long_t user_exception; -- atomic_long_t set_context_option; -- atomic_long_t check_context_retarget_intr; -- atomic_long_t check_context_unload; -- atomic_long_t tlb_dropin; -- atomic_long_t tlb_preload_page; -- atomic_long_t tlb_dropin_fail_no_asid; -- atomic_long_t tlb_dropin_fail_upm; -- atomic_long_t tlb_dropin_fail_invalid; -- atomic_long_t tlb_dropin_fail_range_active; -- atomic_long_t tlb_dropin_fail_idle; -- atomic_long_t tlb_dropin_fail_fmm; -- atomic_long_t tlb_dropin_fail_no_exception; -- atomic_long_t tfh_stale_on_fault; -- atomic_long_t mmu_invalidate_range; -- atomic_long_t mmu_invalidate_page; -- atomic_long_t flush_tlb; -- atomic_long_t flush_tlb_gru; -- atomic_long_t flush_tlb_gru_tgh; -- atomic_long_t flush_tlb_gru_zero_asid; -+ atomic_long_unchecked_t vdata_alloc; -+ atomic_long_unchecked_t vdata_free; -+ atomic_long_unchecked_t gts_alloc; -+ atomic_long_unchecked_t gts_free; -+ atomic_long_unchecked_t gms_alloc; -+ atomic_long_unchecked_t gms_free; -+ atomic_long_unchecked_t gts_double_allocate; -+ atomic_long_unchecked_t assign_context; -+ atomic_long_unchecked_t assign_context_failed; -+ atomic_long_unchecked_t free_context; -+ atomic_long_unchecked_t load_user_context; -+ atomic_long_unchecked_t load_kernel_context; -+ atomic_long_unchecked_t lock_kernel_context; -+ atomic_long_unchecked_t unlock_kernel_context; -+ atomic_long_unchecked_t steal_user_context; -+ atomic_long_unchecked_t steal_kernel_context; -+ atomic_long_unchecked_t steal_context_failed; -+ atomic_long_unchecked_t nopfn; -+ atomic_long_unchecked_t asid_new; -+ atomic_long_unchecked_t asid_next; -+ atomic_long_unchecked_t asid_wrap; -+ atomic_long_unchecked_t asid_reuse; -+ atomic_long_unchecked_t intr; -+ atomic_long_unchecked_t intr_cbr; -+ atomic_long_unchecked_t intr_tfh; -+ atomic_long_unchecked_t intr_spurious; -+ atomic_long_unchecked_t intr_mm_lock_failed; -+ atomic_long_unchecked_t call_os; -+ atomic_long_unchecked_t call_os_wait_queue; -+ atomic_long_unchecked_t user_flush_tlb; -+ atomic_long_unchecked_t user_unload_context; -+ atomic_long_unchecked_t user_exception; -+ atomic_long_unchecked_t set_context_option; -+ atomic_long_unchecked_t check_context_retarget_intr; -+ atomic_long_unchecked_t check_context_unload; -+ atomic_long_unchecked_t tlb_dropin; -+ atomic_long_unchecked_t tlb_preload_page; -+ atomic_long_unchecked_t tlb_dropin_fail_no_asid; -+ atomic_long_unchecked_t tlb_dropin_fail_upm; -+ atomic_long_unchecked_t tlb_dropin_fail_invalid; -+ atomic_long_unchecked_t tlb_dropin_fail_range_active; -+ atomic_long_unchecked_t tlb_dropin_fail_idle; -+ atomic_long_unchecked_t tlb_dropin_fail_fmm; -+ atomic_long_unchecked_t tlb_dropin_fail_no_exception; -+ atomic_long_unchecked_t tfh_stale_on_fault; -+ atomic_long_unchecked_t mmu_invalidate_range; -+ atomic_long_unchecked_t mmu_invalidate_page; -+ atomic_long_unchecked_t flush_tlb; -+ atomic_long_unchecked_t flush_tlb_gru; -+ atomic_long_unchecked_t flush_tlb_gru_tgh; -+ atomic_long_unchecked_t flush_tlb_gru_zero_asid; - -- atomic_long_t copy_gpa; -- atomic_long_t read_gpa; -+ atomic_long_unchecked_t copy_gpa; -+ atomic_long_unchecked_t read_gpa; - -- atomic_long_t mesq_receive; -- atomic_long_t mesq_receive_none; -- atomic_long_t mesq_send; -- atomic_long_t mesq_send_failed; -- atomic_long_t mesq_noop; -- atomic_long_t mesq_send_unexpected_error; -- atomic_long_t mesq_send_lb_overflow; -- atomic_long_t mesq_send_qlimit_reached; -- atomic_long_t mesq_send_amo_nacked; -- atomic_long_t mesq_send_put_nacked; -- atomic_long_t mesq_page_overflow; -- atomic_long_t mesq_qf_locked; -- atomic_long_t mesq_qf_noop_not_full; -- atomic_long_t mesq_qf_switch_head_failed; -- atomic_long_t mesq_qf_unexpected_error; -- atomic_long_t mesq_noop_unexpected_error; -- atomic_long_t mesq_noop_lb_overflow; -- atomic_long_t mesq_noop_qlimit_reached; -- atomic_long_t mesq_noop_amo_nacked; -- atomic_long_t mesq_noop_put_nacked; -- atomic_long_t mesq_noop_page_overflow; -+ atomic_long_unchecked_t mesq_receive; -+ atomic_long_unchecked_t mesq_receive_none; -+ atomic_long_unchecked_t mesq_send; -+ atomic_long_unchecked_t mesq_send_failed; -+ atomic_long_unchecked_t mesq_noop; -+ atomic_long_unchecked_t mesq_send_unexpected_error; -+ atomic_long_unchecked_t mesq_send_lb_overflow; -+ atomic_long_unchecked_t mesq_send_qlimit_reached; -+ atomic_long_unchecked_t mesq_send_amo_nacked; -+ atomic_long_unchecked_t mesq_send_put_nacked; -+ atomic_long_unchecked_t mesq_page_overflow; -+ atomic_long_unchecked_t mesq_qf_locked; -+ atomic_long_unchecked_t mesq_qf_noop_not_full; -+ atomic_long_unchecked_t mesq_qf_switch_head_failed; -+ atomic_long_unchecked_t mesq_qf_unexpected_error; -+ atomic_long_unchecked_t mesq_noop_unexpected_error; -+ atomic_long_unchecked_t mesq_noop_lb_overflow; -+ atomic_long_unchecked_t mesq_noop_qlimit_reached; -+ atomic_long_unchecked_t mesq_noop_amo_nacked; -+ atomic_long_unchecked_t mesq_noop_put_nacked; -+ atomic_long_unchecked_t mesq_noop_page_overflow; - - }; - -@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, - tghop_invalidate, mcsop_last}; - - struct mcs_op_statistic { -- atomic_long_t count; -- atomic_long_t total; -+ atomic_long_unchecked_t count; -+ atomic_long_unchecked_t total; - unsigned long max; - }; - -@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; - - #define STAT(id) do { \ - if (gru_options & OPT_STATS) \ -- atomic_long_inc(&gru_stats.id); \ -+ atomic_long_inc_unchecked(&gru_stats.id); \ - } while (0) - - #ifdef CONFIG_SGI_GRU_DEBUG -diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h -index 851b2f2..a4ec097 100644 ---- a/drivers/misc/sgi-xp/xp.h -+++ b/drivers/misc/sgi-xp/xp.h -@@ -289,7 +289,7 @@ struct xpc_interface { - xpc_notify_func, void *); - void (*received) (short, int, void *); - enum xp_retval (*partid_to_nasids) (short, void *); --}; -+} __no_const; - - extern struct xpc_interface xpc_interface; - -diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h -index b94d5f7..7f494c5 100644 ---- a/drivers/misc/sgi-xp/xpc.h -+++ b/drivers/misc/sgi-xp/xpc.h -@@ -835,6 +835,7 @@ struct xpc_arch_operations { - void (*received_payload) (struct xpc_channel *, void *); - void (*notify_senders_of_disconnect) (struct xpc_channel *); - }; -+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const; - - /* struct xpc_partition act_state values (for XPC HB) */ - -@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[]; - /* found in xpc_main.c */ - extern struct device *xpc_part; - extern struct device *xpc_chan; --extern struct xpc_arch_operations xpc_arch_ops; -+extern xpc_arch_operations_no_const xpc_arch_ops; - extern int xpc_disengage_timelimit; - extern int xpc_disengage_timedout; - extern int xpc_activate_IRQ_rcvd; -diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c -index 8d082b4..aa749ae 100644 ---- a/drivers/misc/sgi-xp/xpc_main.c -+++ b/drivers/misc/sgi-xp/xpc_main.c -@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = { - .notifier_call = xpc_system_die, - }; - --struct xpc_arch_operations xpc_arch_ops; -+xpc_arch_operations_no_const xpc_arch_ops; - - /* - * Timer function to enforce the timelimit on the partition disengage. -diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c -index 26c5286..292d261 100644 ---- a/drivers/mmc/host/sdhci-pci.c -+++ b/drivers/mmc/host/sdhci-pci.c -@@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhci_via = { - .probe = via_probe, - }; - --static const struct pci_device_id pci_ids[] __devinitdata = { -+static const struct pci_device_id pci_ids[] __devinitconst = { - { - .vendor = PCI_VENDOR_ID_RICOH, - .device = PCI_DEVICE_ID_RICOH_R5C822, -diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c -index e1e122f..d99a6ea 100644 ---- a/drivers/mtd/chips/cfi_cmdset_0001.c -+++ b/drivers/mtd/chips/cfi_cmdset_0001.c -@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long - struct cfi_pri_intelext *cfip = cfi->cmdset_priv; - unsigned long timeo = jiffies + HZ; - -+ pax_track_stack(); -+ - /* Prevent setting state FL_SYNCING for chip in suspended state. */ - if (mode == FL_SYNCING && chip->oldstate != FL_READY) - goto sleep; -@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, - unsigned long initial_adr; - int initial_len = len; - -+ pax_track_stack(); -+ - wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; - adr += chip->start; - initial_adr = adr; -@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, - int retries = 3; - int ret; - -+ pax_track_stack(); -+ - adr += chip->start; - - retry: -diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c -index 179814a..abe9d60 100644 ---- a/drivers/mtd/chips/cfi_cmdset_0020.c -+++ b/drivers/mtd/chips/cfi_cmdset_0020.c -@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof - unsigned long cmd_addr; - struct cfi_private *cfi = map->fldrv_priv; - -+ pax_track_stack(); -+ - adr += chip->start; - - /* Ensure cmd read/writes are aligned. */ -@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, - DECLARE_WAITQUEUE(wait, current); - int wbufsize, z; - -+ pax_track_stack(); -+ - /* M58LW064A requires bus alignment for buffer wriets -- saw */ - if (adr & (map_bankwidth(map)-1)) - return -EINVAL; -@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u - DECLARE_WAITQUEUE(wait, current); - int ret = 0; - -+ pax_track_stack(); -+ - adr += chip->start; - - /* Let's determine this according to the interleave only once */ -@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un - unsigned long timeo = jiffies + HZ; - DECLARE_WAITQUEUE(wait, current); - -+ pax_track_stack(); -+ - adr += chip->start; - - /* Let's determine this according to the interleave only once */ -@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, - unsigned long timeo = jiffies + HZ; - DECLARE_WAITQUEUE(wait, current); - -+ pax_track_stack(); -+ - adr += chip->start; - - /* Let's determine this according to the interleave only once */ -diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c -index f7fbf60..9866457 100644 ---- a/drivers/mtd/devices/doc2000.c -+++ b/drivers/mtd/devices/doc2000.c -@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - - /* The ECC will not be calculated correctly if less than 512 is written */ - /* DBB- -- if (len != 0x200 && eccbuf) -+ if (len != 0x200) - printk(KERN_WARNING - "ECC needs a full sector write (adr: %lx size %lx)\n", - (long) to, (long) len); -diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c -index 241192f..d0c35a3 100644 ---- a/drivers/mtd/devices/doc2001.c -+++ b/drivers/mtd/devices/doc2001.c -@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len, - struct Nand *mychip = &this->chips[from >> (this->chipshift)]; - - /* Don't allow read past end of device */ -- if (from >= this->totlen) -+ if (from >= this->totlen || !len) - return -EINVAL; - - /* Don't allow a single read to cross a 512-byte block boundary */ -diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c -index 037b399..225a71d 100644 ---- a/drivers/mtd/ftl.c -+++ b/drivers/mtd/ftl.c -@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit, - loff_t offset; - uint16_t srcunitswap = cpu_to_le16(srcunit); - -+ pax_track_stack(); -+ - eun = &part->EUNInfo[srcunit]; - xfer = &part->XferInfo[xferunit]; - DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n", -diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c -index d7592e6..31c505c 100644 ---- a/drivers/mtd/inftlcore.c -+++ b/drivers/mtd/inftlcore.c -@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned - struct inftl_oob oob; - size_t retlen; - -+ pax_track_stack(); -+ - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d," - "pending=%d)\n", inftl, thisVUC, pendingblock); - -diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c -index 104052e..6232be5 100644 ---- a/drivers/mtd/inftlmount.c -+++ b/drivers/mtd/inftlmount.c -@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTLrecord *inftl) - struct INFTLPartition *ip; - size_t retlen; - -+ pax_track_stack(); -+ - DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl); - - /* -diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c -index dbfe17b..c7b0918 100644 ---- a/drivers/mtd/lpddr/qinfo_probe.c -+++ b/drivers/mtd/lpddr/qinfo_probe.c -@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr) - { - map_word pfow_val[4]; - -+ pax_track_stack(); -+ - /* Check identification string */ - pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P); - pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F); -diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c -index 49e20a4..60fbfa5 100644 ---- a/drivers/mtd/mtdchar.c -+++ b/drivers/mtd/mtdchar.c -@@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) - u_long size; - struct mtd_info_user info; - -+ pax_track_stack(); -+ - DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); - - size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; -diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c -index d527621..2491fab 100644 ---- a/drivers/mtd/nand/denali.c -+++ b/drivers/mtd/nand/denali.c -@@ -26,6 +26,7 @@ - #include <linux/pci.h> - #include <linux/mtd/mtd.h> - #include <linux/module.h> -+#include <linux/slab.h> - - #include "denali.h" - -diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c -index b155666..611b801 100644 ---- a/drivers/mtd/nftlcore.c -+++ b/drivers/mtd/nftlcore.c -@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p - int inplace = 1; - size_t retlen; - -+ pax_track_stack(); -+ - memset(BlockMap, 0xff, sizeof(BlockMap)); - memset(BlockFreeFound, 0, sizeof(BlockFreeFound)); - -diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c -index e3cd1ff..0ea79a3 100644 ---- a/drivers/mtd/nftlmount.c -+++ b/drivers/mtd/nftlmount.c -@@ -24,6 +24,7 @@ - #include <asm/errno.h> - #include <linux/delay.h> - #include <linux/slab.h> -+#include <linux/sched.h> - #include <linux/mtd/mtd.h> - #include <linux/mtd/nand.h> - #include <linux/mtd/nftl.h> -@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLrecord *nftl) - struct mtd_info *mtd = nftl->mbd.mtd; - unsigned int i; - -+ pax_track_stack(); -+ - /* Assume logical EraseSize == physical erasesize for starting the scan. - We'll sort it out later if we find a MediaHeader which says otherwise */ - /* Actually, we won't. The new DiskOnChip driver has already scanned -diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c -index 6c3fb5a..c542a81 100644 ---- a/drivers/mtd/ubi/build.c -+++ b/drivers/mtd/ubi/build.c -@@ -1311,7 +1311,7 @@ module_exit(ubi_exit); - static int __init bytes_str_to_int(const char *str) - { - char *endp; -- unsigned long result; -+ unsigned long result, scale = 1; - - result = simple_strtoul(str, &endp, 0); - if (str == endp || result >= INT_MAX) { -@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str) - - switch (*endp) { - case 'G': -- result *= 1024; -+ scale *= 1024; - case 'M': -- result *= 1024; -+ scale *= 1024; - case 'K': -- result *= 1024; -+ scale *= 1024; - if (endp[1] == 'i' && endp[2] == 'B') - endp += 2; - case '\0': -@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str) - return -EINVAL; - } - -- return result; -+ if ((intoverflow_t)result*scale >= INT_MAX) { -+ printk(KERN_ERR "UBI error: incorrect bytes count: "%s"\n", -+ str); -+ return -EINVAL; -+ } -+ -+ return result*scale; - } - - /** -diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c -index d4f7dda..d627d46 100644 ---- a/drivers/net/atlx/atl2.c -+++ b/drivers/net/atlx/atl2.c -@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw) - */ - - #define ATL2_PARAM(X, desc) \ -- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ -+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ - MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ - MODULE_PARM_DESC(X, desc); - #else -diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c -index 87aecdf..ec23470 100644 ---- a/drivers/net/bna/bfa_ioc_ct.c -+++ b/drivers/net/bna/bfa_ioc_ct.c -@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); - static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); - static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); - --static struct bfa_ioc_hwif nw_hwif_ct; -+static struct bfa_ioc_hwif nw_hwif_ct = { -+ .ioc_pll_init = bfa_ioc_ct_pll_init, -+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, -+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, -+ .ioc_reg_init = bfa_ioc_ct_reg_init, -+ .ioc_map_port = bfa_ioc_ct_map_port, -+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set, -+ .ioc_notify_fail = bfa_ioc_ct_notify_fail, -+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, -+ .ioc_sync_start = bfa_ioc_ct_sync_start, -+ .ioc_sync_join = bfa_ioc_ct_sync_join, -+ .ioc_sync_leave = bfa_ioc_ct_sync_leave, -+ .ioc_sync_ack = bfa_ioc_ct_sync_ack, -+ .ioc_sync_complete = bfa_ioc_ct_sync_complete -+}; - - /** - * Called from bfa_ioc_attach() to map asic specific calls. -@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct; - void - bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) - { -- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; -- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock; -- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; -- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; -- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; -- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; -- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; -- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; -- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start; -- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; -- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; -- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; -- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete; -- - ioc->ioc_hwif = &nw_hwif_ct; - } - -diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c -index 8e35b25..c39f205 100644 ---- a/drivers/net/bna/bnad.c -+++ b/drivers/net/bna/bnad.c -@@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id) - struct bna_intr_info *intr_info = - &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; - struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; -- struct bna_tx_event_cbfn tx_cbfn; -+ static struct bna_tx_event_cbfn tx_cbfn = { -+ /* Initialize the tx event handlers */ -+ .tcb_setup_cbfn = bnad_cb_tcb_setup, -+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy, -+ .tx_stall_cbfn = bnad_cb_tx_stall, -+ .tx_resume_cbfn = bnad_cb_tx_resume, -+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup -+ }; - struct bna_tx *tx; - unsigned long flags; - -@@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id) - tx_config->txq_depth = bnad->txq_depth; - tx_config->tx_type = BNA_TX_T_REGULAR; - -- /* Initialize the tx event handlers */ -- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup; -- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy; -- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall; -- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume; -- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup; -- - /* Get BNA's resource requirement for one tx object */ - spin_lock_irqsave(&bnad->bna_lock, flags); - bna_tx_res_req(bnad->num_txq_per_tx, -@@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id) - struct bna_intr_info *intr_info = - &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; - struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; -- struct bna_rx_event_cbfn rx_cbfn; -+ static struct bna_rx_event_cbfn rx_cbfn = { -+ /* Initialize the Rx event handlers */ -+ .rcb_setup_cbfn = bnad_cb_rcb_setup, -+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy, -+ .ccb_setup_cbfn = bnad_cb_ccb_setup, -+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy, -+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup, -+ .rx_post_cbfn = bnad_cb_rx_post -+ }; - struct bna_rx *rx; - unsigned long flags; - - /* Initialize the Rx object configuration */ - bnad_init_rx_config(bnad, rx_config); - -- /* Initialize the Rx event handlers */ -- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; -- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; -- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; -- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; -- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup; -- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post; -- - /* Get BNA's resource requirement for one Rx object */ - spin_lock_irqsave(&bnad->bna_lock, flags); - bna_rx_res_req(rx_config, res_info); -diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c -index 4b2b570..31033f4 100644 ---- a/drivers/net/bnx2.c -+++ b/drivers/net/bnx2.c -@@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp) - int rc = 0; - u32 magic, csum; - -+ pax_track_stack(); -+ - if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0) - goto test_nvram_done; - -diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c -index cf3e479..5dc0ecc 100644 ---- a/drivers/net/bnx2x/bnx2x_ethtool.c -+++ b/drivers/net/bnx2x/bnx2x_ethtool.c -@@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x *bp) - int i, rc; - u32 magic, crc; - -+ pax_track_stack(); -+ - if (BP_NOMCP(bp)) - return 0; - -diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h -index 9a517c2..a50cfcb 100644 ---- a/drivers/net/bnx2x/bnx2x_sp.h -+++ b/drivers/net/bnx2x/bnx2x_sp.h -@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj { - - int (*wait_comp)(struct bnx2x *bp, - struct bnx2x_rx_mode_ramrod_params *p); --}; -+} __no_const; - - /********************** Set multicast group ***********************************/ - -diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h -index c5f5479..2e8c260 100644 ---- a/drivers/net/cxgb3/l2t.h -+++ b/drivers/net/cxgb3/l2t.h -@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev, - */ - struct l2t_skb_cb { - arp_failure_handler_func arp_failure_handler; --}; -+} __no_const; - - #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) - -diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c -index b4efa29..c5f2703 100644 ---- a/drivers/net/cxgb4/cxgb4_main.c -+++ b/drivers/net/cxgb4/cxgb4_main.c -@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct adapter *adap) - unsigned int nchan = adap->params.nports; - struct msix_entry entries[MAX_INGQ + 1]; - -+ pax_track_stack(); -+ - for (i = 0; i < ARRAY_SIZE(entries); ++i) - entries[i].entry = i; - -diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c -index d1ec111..12735bc 100644 ---- a/drivers/net/cxgb4/t4_hw.c -+++ b/drivers/net/cxgb4/t4_hw.c -@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) - u8 vpd[VPD_LEN], csum; - unsigned int vpdr_len, kw_offset, id_len; - -+ pax_track_stack(); -+ - ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd); - if (ret < 0) - return ret; -diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c -index 536b3a5..e6f8dcc 100644 ---- a/drivers/net/e1000e/82571.c -+++ b/drivers/net/e1000e/82571.c -@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; - struct e1000_mac_info *mac = &hw->mac; -- struct e1000_mac_operations *func = &mac->ops; -+ e1000_mac_operations_no_const *func = &mac->ops; - u32 swsm = 0; - u32 swsm2 = 0; - bool force_clear_smbi = false; -diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c -index e4f4225..24da2ea 100644 ---- a/drivers/net/e1000e/es2lan.c -+++ b/drivers/net/e1000e/es2lan.c -@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; - struct e1000_mac_info *mac = &hw->mac; -- struct e1000_mac_operations *func = &mac->ops; -+ e1000_mac_operations_no_const *func = &mac->ops; - - /* Set media type */ - switch (adapter->pdev->device) { -diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h -index 2967039..ca8c40c 100644 ---- a/drivers/net/e1000e/hw.h -+++ b/drivers/net/e1000e/hw.h -@@ -778,6 +778,7 @@ struct e1000_mac_operations { - void (*write_vfta)(struct e1000_hw *, u32, u32); - s32 (*read_mac_addr)(struct e1000_hw *); - }; -+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const; - - /* - * When to use various PHY register access functions: -@@ -818,6 +819,7 @@ struct e1000_phy_operations { - void (*power_up)(struct e1000_hw *); - void (*power_down)(struct e1000_hw *); - }; -+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const; - - /* Function pointers for the NVM. */ - struct e1000_nvm_operations { -@@ -829,9 +831,10 @@ struct e1000_nvm_operations { - s32 (*validate)(struct e1000_hw *); - s32 (*write)(struct e1000_hw *, u16, u16, u16 *); - }; -+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const; - - struct e1000_mac_info { -- struct e1000_mac_operations ops; -+ e1000_mac_operations_no_const ops; - u8 addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - -@@ -872,7 +875,7 @@ struct e1000_mac_info { - }; - - struct e1000_phy_info { -- struct e1000_phy_operations ops; -+ e1000_phy_operations_no_const ops; - - enum e1000_phy_type type; - -@@ -906,7 +909,7 @@ struct e1000_phy_info { - }; - - struct e1000_nvm_info { -- struct e1000_nvm_operations ops; -+ e1000_nvm_operations_no_const ops; - - enum e1000_nvm_type type; - enum e1000_nvm_override override; -diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c -index fa8677c..196356f 100644 ---- a/drivers/net/fealnx.c -+++ b/drivers/net/fealnx.c -@@ -150,7 +150,7 @@ struct chip_info { - int flags; - }; - --static const struct chip_info skel_netdrv_tbl[] __devinitdata = { -+static const struct chip_info skel_netdrv_tbl[] __devinitconst = { - { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, - { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, - { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, -diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c -index 2a5a34d..be871cc 100644 ---- a/drivers/net/hamradio/6pack.c -+++ b/drivers/net/hamradio/6pack.c -@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct tty_struct *tty, - unsigned char buf[512]; - int count1; - -+ pax_track_stack(); -+ - if (!count) - return; - -diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h -index 4519a13..f97fcd0 100644 ---- a/drivers/net/igb/e1000_hw.h -+++ b/drivers/net/igb/e1000_hw.h -@@ -314,6 +314,7 @@ struct e1000_mac_operations { - s32 (*read_mac_addr)(struct e1000_hw *); - s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); - }; -+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const; - - struct e1000_phy_operations { - s32 (*acquire)(struct e1000_hw *); -@@ -330,6 +331,7 @@ struct e1000_phy_operations { - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); - s32 (*write_reg)(struct e1000_hw *, u32, u16); - }; -+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const; - - struct e1000_nvm_operations { - s32 (*acquire)(struct e1000_hw *); -@@ -339,6 +341,7 @@ struct e1000_nvm_operations { - s32 (*update)(struct e1000_hw *); - s32 (*validate)(struct e1000_hw *); - }; -+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const; - - struct e1000_info { - s32 (*get_invariants)(struct e1000_hw *); -@@ -350,7 +353,7 @@ struct e1000_info { - extern const struct e1000_info e1000_82575_info; - - struct e1000_mac_info { -- struct e1000_mac_operations ops; -+ e1000_mac_operations_no_const ops; - - u8 addr[6]; - u8 perm_addr[6]; -@@ -388,7 +391,7 @@ struct e1000_mac_info { - }; - - struct e1000_phy_info { -- struct e1000_phy_operations ops; -+ e1000_phy_operations_no_const ops; - - enum e1000_phy_type type; - -@@ -423,7 +426,7 @@ struct e1000_phy_info { - }; - - struct e1000_nvm_info { -- struct e1000_nvm_operations ops; -+ e1000_nvm_operations_no_const ops; - enum e1000_nvm_type type; - enum e1000_nvm_override override; - -@@ -468,6 +471,7 @@ struct e1000_mbx_operations { - s32 (*check_for_ack)(struct e1000_hw *, u16); - s32 (*check_for_rst)(struct e1000_hw *, u16); - }; -+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const; - - struct e1000_mbx_stats { - u32 msgs_tx; -@@ -479,7 +483,7 @@ struct e1000_mbx_stats { - }; - - struct e1000_mbx_info { -- struct e1000_mbx_operations ops; -+ e1000_mbx_operations_no_const ops; - struct e1000_mbx_stats stats; - u32 timeout; - u32 usec_delay; -diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h -index d7ed58f..64cde36 100644 ---- a/drivers/net/igbvf/vf.h -+++ b/drivers/net/igbvf/vf.h -@@ -189,9 +189,10 @@ struct e1000_mac_operations { - s32 (*read_mac_addr)(struct e1000_hw *); - s32 (*set_vfta)(struct e1000_hw *, u16, bool); - }; -+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const; - - struct e1000_mac_info { -- struct e1000_mac_operations ops; -+ e1000_mac_operations_no_const ops; - u8 addr[6]; - u8 perm_addr[6]; - -@@ -213,6 +214,7 @@ struct e1000_mbx_operations { - s32 (*check_for_ack)(struct e1000_hw *); - s32 (*check_for_rst)(struct e1000_hw *); - }; -+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const; - - struct e1000_mbx_stats { - u32 msgs_tx; -@@ -224,7 +226,7 @@ struct e1000_mbx_stats { - }; - - struct e1000_mbx_info { -- struct e1000_mbx_operations ops; -+ e1000_mbx_operations_no_const ops; - struct e1000_mbx_stats stats; - u32 timeout; - u32 usec_delay; -diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c -index 6a130eb..1aeb9e4 100644 ---- a/drivers/net/ixgb/ixgb_main.c -+++ b/drivers/net/ixgb/ixgb_main.c -@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev) - u32 rctl; - int i; - -+ pax_track_stack(); -+ - /* Check for Promiscuous and All Multicast modes */ - - rctl = IXGB_READ_REG(hw, RCTL); -diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c -index dd7fbeb..44b9bbf 100644 ---- a/drivers/net/ixgb/ixgb_param.c -+++ b/drivers/net/ixgb/ixgb_param.c -@@ -261,6 +261,9 @@ void __devinit - ixgb_check_options(struct ixgb_adapter *adapter) - { - int bd = adapter->bd_number; -+ -+ pax_track_stack(); -+ - if (bd >= IXGB_MAX_NIC) { - pr_notice("Warning: no configuration for board #%i\n", bd); - pr_notice("Using defaults for all values\n"); -diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h -index e0d970e..1cfdea5 100644 ---- a/drivers/net/ixgbe/ixgbe_type.h -+++ b/drivers/net/ixgbe/ixgbe_type.h -@@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations { - s32 (*update_checksum)(struct ixgbe_hw *); - u16 (*calc_checksum)(struct ixgbe_hw *); - }; -+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const; - - struct ixgbe_mac_operations { - s32 (*init_hw)(struct ixgbe_hw *); -@@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations { - /* Manageability interface */ - s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); - }; -+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const; - - struct ixgbe_phy_operations { - s32 (*identify)(struct ixgbe_hw *); -@@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations { - s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); - s32 (*check_overtemp)(struct ixgbe_hw *); - }; -+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const; - - struct ixgbe_eeprom_info { -- struct ixgbe_eeprom_operations ops; -+ ixgbe_eeprom_operations_no_const ops; - enum ixgbe_eeprom_type type; - u32 semaphore_delay; - u16 word_size; -@@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info { - - #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 - struct ixgbe_mac_info { -- struct ixgbe_mac_operations ops; -+ ixgbe_mac_operations_no_const ops; - enum ixgbe_mac_type type; - u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; -@@ -2762,7 +2765,7 @@ struct ixgbe_mac_info { - }; - - struct ixgbe_phy_info { -- struct ixgbe_phy_operations ops; -+ ixgbe_phy_operations_no_const ops; - struct mdio_if_info mdio; - enum ixgbe_phy_type type; - u32 id; -@@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations { - s32 (*check_for_ack)(struct ixgbe_hw *, u16); - s32 (*check_for_rst)(struct ixgbe_hw *, u16); - }; -+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const; - - struct ixgbe_mbx_stats { - u32 msgs_tx; -@@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats { - }; - - struct ixgbe_mbx_info { -- struct ixgbe_mbx_operations ops; -+ ixgbe_mbx_operations_no_const ops; - struct ixgbe_mbx_stats stats; - u32 timeout; - u32 usec_delay; -diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h -index 10306b4..28df758 100644 ---- a/drivers/net/ixgbevf/vf.h -+++ b/drivers/net/ixgbevf/vf.h -@@ -70,6 +70,7 @@ struct ixgbe_mac_operations { - s32 (*clear_vfta)(struct ixgbe_hw *); - s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); - }; -+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const; - - enum ixgbe_mac_type { - ixgbe_mac_unknown = 0, -@@ -79,7 +80,7 @@ enum ixgbe_mac_type { - }; - - struct ixgbe_mac_info { -- struct ixgbe_mac_operations ops; -+ ixgbe_mac_operations_no_const ops; - u8 addr[6]; - u8 perm_addr[6]; - -@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations { - s32 (*check_for_ack)(struct ixgbe_hw *); - s32 (*check_for_rst)(struct ixgbe_hw *); - }; -+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const; - - struct ixgbe_mbx_stats { - u32 msgs_tx; -@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats { - }; - - struct ixgbe_mbx_info { -- struct ixgbe_mbx_operations ops; -+ ixgbe_mbx_operations_no_const ops; - struct ixgbe_mbx_stats stats; - u32 timeout; - u32 udelay; -diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c -index 27418d3..adf15bb 100644 ---- a/drivers/net/ksz884x.c -+++ b/drivers/net/ksz884x.c -@@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev, - int rc; - u64 counter[TOTAL_PORT_COUNTER_NUM]; - -+ pax_track_stack(); -+ - mutex_lock(&hw_priv->lock); - n = SWITCH_PORT_NUM; - for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { -diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c -index f0ee35d..3831c8a 100644 ---- a/drivers/net/mlx4/main.c -+++ b/drivers/net/mlx4/main.c -@@ -40,6 +40,7 @@ - #include <linux/dma-mapping.h> - #include <linux/slab.h> - #include <linux/io-mapping.h> -+#include <linux/sched.h> - - #include <linux/mlx4/device.h> - #include <linux/mlx4/doorbell.h> -@@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev) - u64 icm_size; - int err; - -+ pax_track_stack(); -+ - err = mlx4_QUERY_FW(dev); - if (err) { - if (err == -EACCES) -diff --git a/drivers/net/niu.c b/drivers/net/niu.c -index ed47585..5e5be8f 100644 ---- a/drivers/net/niu.c -+++ b/drivers/net/niu.c -@@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) - int i, num_irqs, err; - u8 first_ldg; - -+ pax_track_stack(); -+ - first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; - for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) - ldg_num_map[i] = first_ldg + i; -diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c -index 80b6f36..5cd8938 100644 ---- a/drivers/net/pcnet32.c -+++ b/drivers/net/pcnet32.c -@@ -270,7 +270,7 @@ struct pcnet32_private { - struct sk_buff **rx_skbuff; - dma_addr_t *tx_dma_addr; - dma_addr_t *rx_dma_addr; -- struct pcnet32_access a; -+ struct pcnet32_access *a; - spinlock_t lock; /* Guard lock */ - unsigned int cur_rx, cur_tx; /* The next free ring entry */ - unsigned int rx_ring_size; /* current rx ring size */ -@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct net_device *dev) - u16 val; - - netif_wake_queue(dev); -- val = lp->a.read_csr(ioaddr, CSR3); -+ val = lp->a->read_csr(ioaddr, CSR3); - val &= 0x00ff; -- lp->a.write_csr(ioaddr, CSR3, val); -+ lp->a->write_csr(ioaddr, CSR3, val); - napi_enable(&lp->napi); - } - -@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_device *dev) - r = mii_link_ok(&lp->mii_if); - } else if (lp->chip_version >= PCNET32_79C970A) { - ulong ioaddr = dev->base_addr; /* card base I/O address */ -- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); -+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); - } else { /* can not detect link on really old chips */ - r = 1; - } -@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct net_device *dev, - pcnet32_netif_stop(dev); - - spin_lock_irqsave(&lp->lock, flags); -- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ -+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ - - size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); - -@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct net_device *dev, - static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) - { - struct pcnet32_private *lp = netdev_priv(dev); -- struct pcnet32_access *a = &lp->a; /* access to registers */ -+ struct pcnet32_access *a = lp->a; /* access to registers */ - ulong ioaddr = dev->base_addr; /* card base I/O address */ - struct sk_buff *skb; /* sk buff */ - int x, i; /* counters */ -@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) - pcnet32_netif_stop(dev); - - spin_lock_irqsave(&lp->lock, flags); -- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ -+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ - - numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); - - /* Reset the PCNET32 */ -- lp->a.reset(ioaddr); -- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ -+ lp->a->reset(ioaddr); -+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ - - /* switch pcnet32 to 32bit mode */ -- lp->a.write_bcr(ioaddr, 20, 2); -+ lp->a->write_bcr(ioaddr, 20, 2); - - /* purge & init rings but don't actually restart */ - pcnet32_restart(dev, 0x0000); - -- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ -+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ - - /* Initialize Transmit buffers. */ - size = data_len + 15; -@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) - - /* set int loopback in CSR15 */ - x = a->read_csr(ioaddr, CSR15) & 0xfffc; -- lp->a.write_csr(ioaddr, CSR15, x | 0x0044); -+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044); - - teststatus = cpu_to_le16(0x8000); -- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ -+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ - - /* Check status of descriptors */ - for (x = 0; x < numbuffs; x++) { -@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) - } - } - -- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ -+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ - wmb(); - if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { - netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n"); -@@ -1015,7 +1015,7 @@ clean_up: - pcnet32_restart(dev, CSR0_NORMAL); - } else { - pcnet32_purge_rx_ring(dev); -- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ -+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ - } - spin_unlock_irqrestore(&lp->lock, flags); - -@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct net_device *dev, - enum ethtool_phys_id_state state) - { - struct pcnet32_private *lp = netdev_priv(dev); -- struct pcnet32_access *a = &lp->a; -+ struct pcnet32_access *a = lp->a; - ulong ioaddr = dev->base_addr; - unsigned long flags; - int i; -@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, - { - int csr5; - struct pcnet32_private *lp = netdev_priv(dev); -- struct pcnet32_access *a = &lp->a; -+ struct pcnet32_access *a = lp->a; - ulong ioaddr = dev->base_addr; - int ticks; - -@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) - spin_lock_irqsave(&lp->lock, flags); - if (pcnet32_tx(dev)) { - /* reset the chip to clear the error condition, then restart */ -- lp->a.reset(ioaddr); -- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ -+ lp->a->reset(ioaddr); -+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ - pcnet32_restart(dev, CSR0_START); - netif_wake_queue(dev); - } -@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) - __napi_complete(napi); - - /* clear interrupt masks */ -- val = lp->a.read_csr(ioaddr, CSR3); -+ val = lp->a->read_csr(ioaddr, CSR3); - val &= 0x00ff; -- lp->a.write_csr(ioaddr, CSR3, val); -+ lp->a->write_csr(ioaddr, CSR3, val); - - /* Set interrupt enable. */ -- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); -+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); - - spin_unlock_irqrestore(&lp->lock, flags); - } -@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, - int i, csr0; - u16 *buff = ptr; - struct pcnet32_private *lp = netdev_priv(dev); -- struct pcnet32_access *a = &lp->a; -+ struct pcnet32_access *a = lp->a; - ulong ioaddr = dev->base_addr; - unsigned long flags; - -@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, - for (j = 0; j < PCNET32_MAX_PHYS; j++) { - if (lp->phymask & (1 << j)) { - for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { -- lp->a.write_bcr(ioaddr, 33, -+ lp->a->write_bcr(ioaddr, 33, - (j << 5) | i); -- *buff++ = lp->a.read_bcr(ioaddr, 34); -+ *buff++ = lp->a->read_bcr(ioaddr, 34); - } - } - } -@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) - ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) - lp->options |= PCNET32_PORT_FD; - -- lp->a = *a; -+ lp->a = a; - - /* prior to register_netdev, dev->name is not yet correct */ - if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { -@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) - if (lp->mii) { - /* lp->phycount and lp->phymask are set to 0 by memset above */ - -- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f; -+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f; - /* scan for PHYs */ - for (i = 0; i < PCNET32_MAX_PHYS; i++) { - unsigned short id1, id2; -@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) - pr_info("Found PHY %04x:%04x at address %d\n", - id1, id2, i); - } -- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); -+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); - if (lp->phycount > 1) - lp->options |= PCNET32_PORT_MII; - } -@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_device *dev) - } - - /* Reset the PCNET32 */ -- lp->a.reset(ioaddr); -+ lp->a->reset(ioaddr); - - /* switch pcnet32 to 32bit mode */ -- lp->a.write_bcr(ioaddr, 20, 2); -+ lp->a->write_bcr(ioaddr, 20, 2); - - netif_printk(lp, ifup, KERN_DEBUG, dev, - "%s() irq %d tx/rx rings %#x/%#x init %#x\n", -@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_device *dev) - (u32) (lp->init_dma_addr)); - - /* set/reset autoselect bit */ -- val = lp->a.read_bcr(ioaddr, 2) & ~2; -+ val = lp->a->read_bcr(ioaddr, 2) & ~2; - if (lp->options & PCNET32_PORT_ASEL) - val |= 2; -- lp->a.write_bcr(ioaddr, 2, val); -+ lp->a->write_bcr(ioaddr, 2, val); - - /* handle full duplex setting */ - if (lp->mii_if.full_duplex) { -- val = lp->a.read_bcr(ioaddr, 9) & ~3; -+ val = lp->a->read_bcr(ioaddr, 9) & ~3; - if (lp->options & PCNET32_PORT_FD) { - val |= 1; - if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) -@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_device *dev) - if (lp->chip_version == 0x2627) - val |= 3; - } -- lp->a.write_bcr(ioaddr, 9, val); -+ lp->a->write_bcr(ioaddr, 9, val); - } - - /* set/reset GPSI bit in test register */ -- val = lp->a.read_csr(ioaddr, 124) & ~0x10; -+ val = lp->a->read_csr(ioaddr, 124) & ~0x10; - if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) - val |= 0x10; -- lp->a.write_csr(ioaddr, 124, val); -+ lp->a->write_csr(ioaddr, 124, val); - - /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ - if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && -@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_device *dev) - * duplex, and/or enable auto negotiation, and clear DANAS - */ - if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { -- lp->a.write_bcr(ioaddr, 32, -- lp->a.read_bcr(ioaddr, 32) | 0x0080); -+ lp->a->write_bcr(ioaddr, 32, -+ lp->a->read_bcr(ioaddr, 32) | 0x0080); - /* disable Auto Negotiation, set 10Mpbs, HD */ -- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; -+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8; - if (lp->options & PCNET32_PORT_FD) - val |= 0x10; - if (lp->options & PCNET32_PORT_100) - val |= 0x08; -- lp->a.write_bcr(ioaddr, 32, val); -+ lp->a->write_bcr(ioaddr, 32, val); - } else { - if (lp->options & PCNET32_PORT_ASEL) { -- lp->a.write_bcr(ioaddr, 32, -- lp->a.read_bcr(ioaddr, -+ lp->a->write_bcr(ioaddr, 32, -+ lp->a->read_bcr(ioaddr, - 32) | 0x0080); - /* enable auto negotiate, setup, disable fd */ -- val = lp->a.read_bcr(ioaddr, 32) & ~0x98; -+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98; - val |= 0x20; -- lp->a.write_bcr(ioaddr, 32, val); -+ lp->a->write_bcr(ioaddr, 32, val); - } - } - } else { -@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_device *dev) - * There is really no good other way to handle multiple PHYs - * other than turning off all automatics - */ -- val = lp->a.read_bcr(ioaddr, 2); -- lp->a.write_bcr(ioaddr, 2, val & ~2); -- val = lp->a.read_bcr(ioaddr, 32); -- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ -+ val = lp->a->read_bcr(ioaddr, 2); -+ lp->a->write_bcr(ioaddr, 2, val & ~2); -+ val = lp->a->read_bcr(ioaddr, 32); -+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ - - if (!(lp->options & PCNET32_PORT_ASEL)) { - /* setup ecmd */ -@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_device *dev) - ethtool_cmd_speed_set(&ecmd, - (lp->options & PCNET32_PORT_100) ? - SPEED_100 : SPEED_10); -- bcr9 = lp->a.read_bcr(ioaddr, 9); -+ bcr9 = lp->a->read_bcr(ioaddr, 9); - - if (lp->options & PCNET32_PORT_FD) { - ecmd.duplex = DUPLEX_FULL; -@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_device *dev) - ecmd.duplex = DUPLEX_HALF; - bcr9 |= ~(1 << 0); - } -- lp->a.write_bcr(ioaddr, 9, bcr9); -+ lp->a->write_bcr(ioaddr, 9, bcr9); - } - - for (i = 0; i < PCNET32_MAX_PHYS; i++) { -@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_device *dev) - - #ifdef DO_DXSUFLO - if (lp->dxsuflo) { /* Disable transmit stop on underflow */ -- val = lp->a.read_csr(ioaddr, CSR3); -+ val = lp->a->read_csr(ioaddr, CSR3); - val |= 0x40; -- lp->a.write_csr(ioaddr, CSR3, val); -+ lp->a->write_csr(ioaddr, CSR3, val); - } - #endif - -@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_device *dev) - napi_enable(&lp->napi); - - /* Re-initialize the PCNET32, and start it when done. */ -- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); -- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); -+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); -+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); - -- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ -- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); -+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ -+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); - - netif_start_queue(dev); - -@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_device *dev) - - i = 0; - while (i++ < 100) -- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) -+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) - break; - /* - * We used to clear the InitDone bit, 0x0100, here but Mark Stockton - * reports that doing so triggers a bug in the '974. - */ -- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL); -+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL); - - netif_printk(lp, ifup, KERN_DEBUG, dev, - "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n", - i, - (u32) (lp->init_dma_addr), -- lp->a.read_csr(ioaddr, CSR0)); -+ lp->a->read_csr(ioaddr, CSR0)); - - spin_unlock_irqrestore(&lp->lock, flags); - -@@ -2218,7 +2218,7 @@ err_free_ring: - * Switch back to 16bit mode to avoid problems with dumb - * DOS packet driver after a warm reboot - */ -- lp->a.write_bcr(ioaddr, 20, 4); -+ lp->a->write_bcr(ioaddr, 20, 4); - - err_free_irq: - spin_unlock_irqrestore(&lp->lock, flags); -@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) - - /* wait for stop */ - for (i = 0; i < 100; i++) -- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP) -+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP) - break; - - if (i >= 100) -@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) - return; - - /* ReInit Ring */ -- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); -+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); - i = 0; - while (i++ < 1000) -- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) -+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) - break; - -- lp->a.write_csr(ioaddr, CSR0, csr0_bits); -+ lp->a->write_csr(ioaddr, CSR0, csr0_bits); - } - - static void pcnet32_tx_timeout(struct net_device *dev) -@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct net_device *dev) - /* Transmitter timeout, serious problems. */ - if (pcnet32_debug & NETIF_MSG_DRV) - pr_err("%s: transmit timed out, status %4.4x, resetting\n", -- dev->name, lp->a.read_csr(ioaddr, CSR0)); -- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); -+ dev->name, lp->a->read_csr(ioaddr, CSR0)); -+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); - dev->stats.tx_errors++; - if (netif_msg_tx_err(lp)) { - int i; -@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, - - netif_printk(lp, tx_queued, KERN_DEBUG, dev, - "%s() called, csr0 %4.4x\n", -- __func__, lp->a.read_csr(ioaddr, CSR0)); -+ __func__, lp->a->read_csr(ioaddr, CSR0)); - - /* Default status -- will not enable Successful-TxDone - * interrupt when that option is available to us. -@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, - dev->stats.tx_bytes += skb->len; - - /* Trigger an immediate send poll. */ -- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); -+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); - - if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { - lp->tx_full = 1; -@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id) - - spin_lock(&lp->lock); - -- csr0 = lp->a.read_csr(ioaddr, CSR0); -+ csr0 = lp->a->read_csr(ioaddr, CSR0); - while ((csr0 & 0x8f00) && --boguscnt >= 0) { - if (csr0 == 0xffff) - break; /* PCMCIA remove happened */ - /* Acknowledge all of the current interrupt sources ASAP. */ -- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f); -+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f); - - netif_printk(lp, intr, KERN_DEBUG, dev, - "interrupt csr0=%#2.2x new csr=%#2.2x\n", -- csr0, lp->a.read_csr(ioaddr, CSR0)); -+ csr0, lp->a->read_csr(ioaddr, CSR0)); - - /* Log misc errors. */ - if (csr0 & 0x4000) -@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id) - if (napi_schedule_prep(&lp->napi)) { - u16 val; - /* set interrupt masks */ -- val = lp->a.read_csr(ioaddr, CSR3); -+ val = lp->a->read_csr(ioaddr, CSR3); - val |= 0x5f00; -- lp->a.write_csr(ioaddr, CSR3, val); -+ lp->a->write_csr(ioaddr, CSR3, val); - - __napi_schedule(&lp->napi); - break; - } -- csr0 = lp->a.read_csr(ioaddr, CSR0); -+ csr0 = lp->a->read_csr(ioaddr, CSR0); - } - - netif_printk(lp, intr, KERN_DEBUG, dev, - "exiting interrupt, csr0=%#4.4x\n", -- lp->a.read_csr(ioaddr, CSR0)); -+ lp->a->read_csr(ioaddr, CSR0)); - - spin_unlock(&lp->lock); - -@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_device *dev) - - spin_lock_irqsave(&lp->lock, flags); - -- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); -+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); - - netif_printk(lp, ifdown, KERN_DEBUG, dev, - "Shutting down ethercard, status was %2.2x\n", -- lp->a.read_csr(ioaddr, CSR0)); -+ lp->a->read_csr(ioaddr, CSR0)); - - /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ -- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); -+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); - - /* - * Switch back to 16bit mode to avoid problems with dumb - * DOS packet driver after a warm reboot - */ -- lp->a.write_bcr(ioaddr, 20, 4); -+ lp->a->write_bcr(ioaddr, 20, 4); - - spin_unlock_irqrestore(&lp->lock, flags); - -@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) - unsigned long flags; - - spin_lock_irqsave(&lp->lock, flags); -- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); -+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); - spin_unlock_irqrestore(&lp->lock, flags); - - return &dev->stats; -@@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struct net_device *dev) - if (dev->flags & IFF_ALLMULTI) { - ib->filter[0] = cpu_to_le32(~0U); - ib->filter[1] = cpu_to_le32(~0U); -- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); -- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); -- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); -- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); -+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); -+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); -+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); -+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); - return; - } - /* clear the multicast filter */ -@@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struct net_device *dev) - mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); - } - for (i = 0; i < 4; i++) -- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, -+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i, - le16_to_cpu(mcast_table[i])); - } - -@@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(struct net_device *dev) - - spin_lock_irqsave(&lp->lock, flags); - suspended = pcnet32_suspend(dev, &flags, 0); -- csr15 = lp->a.read_csr(ioaddr, CSR15); -+ csr15 = lp->a->read_csr(ioaddr, CSR15); - if (dev->flags & IFF_PROMISC) { - /* Log any net taps. */ - netif_info(lp, hw, dev, "Promiscuous mode enabled\n"); - lp->init_block->mode = - cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << - 7); -- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000); -+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000); - } else { - lp->init_block->mode = - cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); -- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff); -+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff); - pcnet32_load_multicast(dev); - } - - if (suspended) { - int csr5; - /* clear SUSPEND (SPND) - CSR5 bit 0 */ -- csr5 = lp->a.read_csr(ioaddr, CSR5); -- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); -+ csr5 = lp->a->read_csr(ioaddr, CSR5); -+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); - } else { -- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); -+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); - pcnet32_restart(dev, CSR0_NORMAL); - netif_wake_queue(dev); - } -@@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num) - if (!lp->mii) - return 0; - -- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); -- val_out = lp->a.read_bcr(ioaddr, 34); -+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); -+ val_out = lp->a->read_bcr(ioaddr, 34); - - return val_out; - } -@@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) - if (!lp->mii) - return; - -- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); -- lp->a.write_bcr(ioaddr, 34, val); -+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); -+ lp->a->write_bcr(ioaddr, 34, val); - } - - static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -@@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) - curr_link = mii_link_ok(&lp->mii_if); - } else { - ulong ioaddr = dev->base_addr; /* card base I/O address */ -- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0); -+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); - } - if (!curr_link) { - if (prev_link || verbose) { -@@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) - (ecmd.duplex == DUPLEX_FULL) - ? "full" : "half"); - } -- bcr9 = lp->a.read_bcr(dev->base_addr, 9); -+ bcr9 = lp->a->read_bcr(dev->base_addr, 9); - if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { - if (lp->mii_if.full_duplex) - bcr9 |= (1 << 0); - else - bcr9 &= ~(1 << 0); -- lp->a.write_bcr(dev->base_addr, 9, bcr9); -+ lp->a->write_bcr(dev->base_addr, 9, bcr9); - } - } else { - netif_info(lp, link, dev, "link up\n"); -diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c -index edfa15d..002bfa9 100644 ---- a/drivers/net/ppp_generic.c -+++ b/drivers/net/ppp_generic.c -@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) - void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; - struct ppp_stats stats; - struct ppp_comp_stats cstats; -- char *vers; - - switch (cmd) { - case SIOCGPPPSTATS: -@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) - break; - - case SIOCGPPPVER: -- vers = PPP_VERSION; -- if (copy_to_user(addr, vers, strlen(vers) + 1)) -+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION))) - break; - err = 0; - break; -diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c -index 6d657ca..d1be94b 100644 ---- a/drivers/net/r8169.c -+++ b/drivers/net/r8169.c -@@ -663,12 +663,12 @@ struct rtl8169_private { - struct mdio_ops { - void (*write)(void __iomem *, int, int); - int (*read)(void __iomem *, int); -- } mdio_ops; -+ } __no_const mdio_ops; - - struct pll_power_ops { - void (*down)(struct rtl8169_private *); - void (*up)(struct rtl8169_private *); -- } pll_power_ops; -+ } __no_const pll_power_ops; - - int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); - int (*get_settings)(struct net_device *, struct ethtool_cmd *); -diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c -index 3c0f131..17f8b02 100644 ---- a/drivers/net/sis190.c -+++ b/drivers/net/sis190.c -@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, - static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, - struct net_device *dev) - { -- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 }; -+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 }; - struct sis190_private *tp = netdev_priv(dev); - struct pci_dev *isa_bridge; - u8 reg, tmp8; -diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c -index 4793df8..44c9849 100644 ---- a/drivers/net/sundance.c -+++ b/drivers/net/sundance.c -@@ -218,7 +218,7 @@ enum { - struct pci_id_info { - const char *name; - }; --static const struct pci_id_info pci_id_tbl[] __devinitdata = { -+static const struct pci_id_info pci_id_tbl[] __devinitconst = { - {"D-Link DFE-550TX FAST Ethernet Adapter"}, - {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, - {"D-Link DFE-580TX 4 port Server Adapter"}, -diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h -index 2ea456d..3ad9523 100644 ---- a/drivers/net/tg3.h -+++ b/drivers/net/tg3.h -@@ -134,6 +134,7 @@ - #define CHIPREV_ID_5750_A0 0x4000 - #define CHIPREV_ID_5750_A1 0x4001 - #define CHIPREV_ID_5750_A3 0x4003 -+#define CHIPREV_ID_5750_C1 0x4201 - #define CHIPREV_ID_5750_C2 0x4202 - #define CHIPREV_ID_5752_A0_HW 0x5000 - #define CHIPREV_ID_5752_A0 0x6000 -diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c -index 515f122..41dd273 100644 ---- a/drivers/net/tokenring/abyss.c -+++ b/drivers/net/tokenring/abyss.c -@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = { - - static int __init abyss_init (void) - { -- abyss_netdev_ops = tms380tr_netdev_ops; -+ pax_open_kernel(); -+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); - -- abyss_netdev_ops.ndo_open = abyss_open; -- abyss_netdev_ops.ndo_stop = abyss_close; -+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open; -+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close; -+ pax_close_kernel(); - - return pci_register_driver(&abyss_driver); - } -diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c -index 6153cfd..cf69c1c 100644 ---- a/drivers/net/tokenring/madgemc.c -+++ b/drivers/net/tokenring/madgemc.c -@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = { - - static int __init madgemc_init (void) - { -- madgemc_netdev_ops = tms380tr_netdev_ops; -- madgemc_netdev_ops.ndo_open = madgemc_open; -- madgemc_netdev_ops.ndo_stop = madgemc_close; -+ pax_open_kernel(); -+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); -+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open; -+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close; -+ pax_close_kernel(); - - return mca_register_driver (&madgemc_driver); - } -diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c -index 8d362e6..f91cc52 100644 ---- a/drivers/net/tokenring/proteon.c -+++ b/drivers/net/tokenring/proteon.c -@@ -353,9 +353,11 @@ static int __init proteon_init(void) - struct platform_device *pdev; - int i, num = 0, err = 0; - -- proteon_netdev_ops = tms380tr_netdev_ops; -- proteon_netdev_ops.ndo_open = proteon_open; -- proteon_netdev_ops.ndo_stop = tms380tr_close; -+ pax_open_kernel(); -+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); -+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open; -+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close; -+ pax_close_kernel(); - - err = platform_driver_register(&proteon_driver); - if (err) -diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c -index 46db5c5..37c1536 100644 ---- a/drivers/net/tokenring/skisa.c -+++ b/drivers/net/tokenring/skisa.c -@@ -363,9 +363,11 @@ static int __init sk_isa_init(void) - struct platform_device *pdev; - int i, num = 0, err = 0; - -- sk_isa_netdev_ops = tms380tr_netdev_ops; -- sk_isa_netdev_ops.ndo_open = sk_isa_open; -- sk_isa_netdev_ops.ndo_stop = tms380tr_close; -+ pax_open_kernel(); -+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); -+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open; -+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close; -+ pax_close_kernel(); - - err = platform_driver_register(&sk_isa_driver); - if (err) -diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c -index ce90efc..2676f89 100644 ---- a/drivers/net/tulip/de2104x.c -+++ b/drivers/net/tulip/de2104x.c -@@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de) - struct de_srom_info_leaf *il; - void *bufp; - -+ pax_track_stack(); -+ - /* download entire eeprom */ - for (i = 0; i < DE_EEPROM_WORDS; i++) - ((__le16 *)ee_data)[i] = -diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c -index 959b410..c97fac2 100644 ---- a/drivers/net/tulip/de4x5.c -+++ b/drivers/net/tulip/de4x5.c -@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) - for (i=0; i<ETH_ALEN; i++) { - tmp.addr[i] = dev->dev_addr[i]; - } -- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; -+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; - break; - - case DE4X5_SET_HWADDR: /* Set the hardware address */ -@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) - spin_lock_irqsave(&lp->lock, flags); - memcpy(&statbuf, &lp->pktStats, ioc->len); - spin_unlock_irqrestore(&lp->lock, flags); -- if (copy_to_user(ioc->data, &statbuf, ioc->len)) -+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len)) - return -EFAULT; - break; - } -diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c -index fa5eee9..e074432 100644 ---- a/drivers/net/tulip/eeprom.c -+++ b/drivers/net/tulip/eeprom.c -@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = { - {NULL}}; - - --static const char *block_name[] __devinitdata = { -+static const char *block_name[] __devinitconst = { - "21140 non-MII", - "21140 MII PHY", - "21142 Serial PHY", -diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c -index 862eadf..3eee1e6 100644 ---- a/drivers/net/tulip/winbond-840.c -+++ b/drivers/net/tulip/winbond-840.c -@@ -236,7 +236,7 @@ struct pci_id_info { - int drv_flags; /* Driver use, intended as capability flags. */ - }; - --static const struct pci_id_info pci_id_tbl[] __devinitdata = { -+static const struct pci_id_info pci_id_tbl[] __devinitconst = { - { /* Sometime a Level-One switch card. */ - "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, - { "Winbond W89c840", CanHaveMII | HasBrokenTx}, -diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c -index 304fe78..db112fa 100644 ---- a/drivers/net/usb/hso.c -+++ b/drivers/net/usb/hso.c -@@ -71,7 +71,7 @@ - #include <asm/byteorder.h> - #include <linux/serial_core.h> - #include <linux/serial.h> -- -+#include <asm/local.h> - - #define MOD_AUTHOR "Option Wireless" - #define MOD_DESCRIPTION "USB High Speed Option driver" -@@ -257,7 +257,7 @@ struct hso_serial { - - /* from usb_serial_port */ - struct tty_struct *tty; -- int open_count; -+ local_t open_count; - spinlock_t serial_lock; - - int (*write_data) (struct hso_serial *serial); -@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial) - struct urb *urb; - - urb = serial->rx_urb[0]; -- if (serial->open_count > 0) { -+ if (local_read(&serial->open_count) > 0) { - count = put_rxbuf_data(urb, serial); - if (count == -1) - return; -@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) - DUMP1(urb->transfer_buffer, urb->actual_length); - - /* Anyone listening? */ -- if (serial->open_count == 0) -+ if (local_read(&serial->open_count) == 0) - return; - - if (status == 0) { -@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) - spin_unlock_irq(&serial->serial_lock); - - /* check for port already opened, if not set the termios */ -- serial->open_count++; -- if (serial->open_count == 1) { -+ if (local_inc_return(&serial->open_count) == 1) { - serial->rx_state = RX_IDLE; - /* Force default termio settings */ - _hso_serial_set_termios(tty, NULL); -@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) - result = hso_start_serial_device(serial->parent, GFP_KERNEL); - if (result) { - hso_stop_serial_device(serial->parent); -- serial->open_count--; -+ local_dec(&serial->open_count); - kref_put(&serial->parent->ref, hso_serial_ref_free); - } - } else { -@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) - - /* reset the rts and dtr */ - /* do the actual close */ -- serial->open_count--; -+ local_dec(&serial->open_count); - -- if (serial->open_count <= 0) { -- serial->open_count = 0; -+ if (local_read(&serial->open_count) <= 0) { -+ local_set(&serial->open_count, 0); - spin_lock_irq(&serial->serial_lock); - if (serial->tty == tty) { - serial->tty->driver_data = NULL; -@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) - - /* the actual setup */ - spin_lock_irqsave(&serial->serial_lock, flags); -- if (serial->open_count) -+ if (local_read(&serial->open_count)) - _hso_serial_set_termios(tty, old); - else - tty->termios = old; -@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb) - D1("Pending read interrupt on port %d\n", i); - spin_lock(&serial->serial_lock); - if (serial->rx_state == RX_IDLE && -- serial->open_count > 0) { -+ local_read(&serial->open_count) > 0) { - /* Setup and send a ctrl req read on - * port i */ - if (!serial->rx_urb_filled[0]) { -@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface) - /* Start all serial ports */ - for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { - if (serial_table[i] && (serial_table[i]->interface == iface)) { -- if (dev2ser(serial_table[i])->open_count) { -+ if (local_read(&dev2ser(serial_table[i])->open_count)) { - result = - hso_start_serial_device(serial_table[i], GFP_NOIO); - hso_kick_transmit(dev2ser(serial_table[i])); -diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c -index 27400ed..c796e05 100644 ---- a/drivers/net/vmxnet3/vmxnet3_ethtool.c -+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c -@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev, - * Return with error code if any of the queue indices - * is out of range - */ -- if (p->ring_index[i] < 0 || -- p->ring_index[i] >= adapter->num_rx_queues) -+ if (p->ring_index[i] >= adapter->num_rx_queues) - return -EINVAL; - } - -diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h -index dd36258..e47fd31 100644 ---- a/drivers/net/vxge/vxge-config.h -+++ b/drivers/net/vxge/vxge-config.h -@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs { - void (*link_down)(struct __vxge_hw_device *devh); - void (*crit_err)(struct __vxge_hw_device *devh, - enum vxge_hw_event type, u64 ext_data); --}; -+} __no_const; - - /* - * struct __vxge_hw_blockpool_entry - Block private data structure -diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c -index 178348a2..18bb433 100644 ---- a/drivers/net/vxge/vxge-main.c -+++ b/drivers/net/vxge/vxge-main.c -@@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) - struct sk_buff *completed[NR_SKB_COMPLETED]; - int more; - -+ pax_track_stack(); -+ - do { - more = 0; - skb_ptr = completed; -@@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) - u8 mtable[256] = {0}; /* CPU to vpath mapping */ - int index; - -+ pax_track_stack(); -+ - /* - * Filling - * - itable with bucket numbers -diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h -index 4a518a3..936b334 100644 ---- a/drivers/net/vxge/vxge-traffic.h -+++ b/drivers/net/vxge/vxge-traffic.h -@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs { - struct vxge_hw_mempool_dma *dma_object, - u32 index, - u32 is_last); --}; -+} __no_const; - - #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \ - ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next) -diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c -index 56aeb01..547f71f 100644 ---- a/drivers/net/wan/hdlc_x25.c -+++ b/drivers/net/wan/hdlc_x25.c -@@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) - - static int x25_open(struct net_device *dev) - { -- struct lapb_register_struct cb; -+ static struct lapb_register_struct cb = { -+ .connect_confirmation = x25_connected, -+ .connect_indication = x25_connected, -+ .disconnect_confirmation = x25_disconnected, -+ .disconnect_indication = x25_disconnected, -+ .data_indication = x25_data_indication, -+ .data_transmit = x25_data_transmit -+ }; - int result; - -- cb.connect_confirmation = x25_connected; -- cb.connect_indication = x25_connected; -- cb.disconnect_confirmation = x25_disconnected; -- cb.disconnect_indication = x25_disconnected; -- cb.data_indication = x25_data_indication; -- cb.data_transmit = x25_data_transmit; -- - result = lapb_register(dev, &cb); - if (result != LAPB_OK) - return result; -diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c -index 1fda46c..f2858f2 100644 ---- a/drivers/net/wimax/i2400m/usb-fw.c -+++ b/drivers/net/wimax/i2400m/usb-fw.c -@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m, - int do_autopm = 1; - DECLARE_COMPLETION_ONSTACK(notif_completion); - -+ pax_track_stack(); -+ - d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n", - i2400m, ack, ack_size); - BUG_ON(_ack == i2400m->bm_ack_buf); -diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c -index e1b3e3c..e413f18 100644 ---- a/drivers/net/wireless/airo.c -+++ b/drivers/net/wireless/airo.c -@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) { - BSSListElement * loop_net; - BSSListElement * tmp_net; - -+ pax_track_stack(); -+ - /* Blow away current list of scan results */ - list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) { - list_move_tail (&loop_net->list, &ai->network_free_list); -@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) - WepKeyRid wkr; - int rc; - -+ pax_track_stack(); -+ - memset( &mySsid, 0, sizeof( mySsid ) ); - kfree (ai->flash); - ai->flash = NULL; -@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct inode *inode, - __le32 *vals = stats.vals; - int len; - -+ pax_track_stack(); -+ - if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) - return -ENOMEM; - data = file->private_data; -@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { - /* If doLoseSync is not 1, we won't do a Lose Sync */ - int doLoseSync = -1; - -+ pax_track_stack(); -+ - if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) - return -ENOMEM; - data = file->private_data; -@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_device *dev, - int i; - int loseSync = capable(CAP_NET_ADMIN) ? 1: -1; - -+ pax_track_stack(); -+ - qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL); - if (!qual) - return -ENOMEM; -@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(struct airo_info *local) - CapabilityRid cap_rid; - __le32 *vals = stats_rid.vals; - -+ pax_track_stack(); -+ - /* Get stats out of the card */ - clear_bit(JOB_WSTATS, &local->jobs); - if (local->power.event) { -diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h -index 17c4b56..00d836f 100644 ---- a/drivers/net/wireless/ath/ath.h -+++ b/drivers/net/wireless/ath/ath.h -@@ -121,6 +121,7 @@ struct ath_ops { - void (*write_flush) (void *); - u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr); - }; -+typedef struct ath_ops __no_const ath_ops_no_const; - - struct ath_common; - struct ath_bus_ops; -diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c -index ccca724..7afbadc 100644 ---- a/drivers/net/wireless/ath/ath5k/debug.c -+++ b/drivers/net/wireless/ath/ath5k/debug.c -@@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf, - unsigned int v; - u64 tsf; - -+ pax_track_stack(); -+ - v = ath5k_hw_reg_read(ah, AR5K_BEACON); - len += snprintf(buf + len, sizeof(buf) - len, - "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n", -@@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf, - unsigned int len = 0; - unsigned int i; - -+ pax_track_stack(); -+ - len += snprintf(buf + len, sizeof(buf) - len, - "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level); - -@@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf, - unsigned int len = 0; - u32 filt = ath5k_hw_get_rx_filter(ah); - -+ pax_track_stack(); -+ - len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n", - ah->bssidmask); - len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ", -@@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf, - unsigned int len = 0; - int i; - -+ pax_track_stack(); -+ - len += snprintf(buf + len, sizeof(buf) - len, - "RX\n---------------------\n"); - len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n", -@@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf, - char buf[700]; - unsigned int len = 0; - -+ pax_track_stack(); -+ - len += snprintf(buf + len, sizeof(buf) - len, - "HW has PHY error counters:\t%s\n", - ah->ah_capabilities.cap_has_phyerr_counters ? -@@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf, - struct ath5k_buf *bf, *bf0; - int i, n; - -+ pax_track_stack(); -+ - len += snprintf(buf + len, sizeof(buf) - len, - "available txbuffers: %d\n", ah->txbuf_len); - -diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c -index 7c2aaad..ad14dee 100644 ---- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c -+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c -@@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah) - int i, im, j; - int nmeasurement; - -+ pax_track_stack(); -+ - for (i = 0; i < AR9300_MAX_CHAINS; i++) { - if (ah->txchainmask & (1 << i)) - num_chains++; -diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c -index f80d1d6..08b773d 100644 ---- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c -+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c -@@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain) - int theta_low_bin = 0; - int i; - -+ pax_track_stack(); -+ - /* disregard any bin that contains <= 16 samples */ - thresh_accum_cnt = 16; - scale_factor = 5; -diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c -index d1eb896..8b67cd4 100644 ---- a/drivers/net/wireless/ath/ath9k/debug.c -+++ b/drivers/net/wireless/ath/ath9k/debug.c -@@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, - char buf[512]; - unsigned int len = 0; - -+ pax_track_stack(); -+ - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { - len += snprintf(buf + len, sizeof(buf) - len, - "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp); -@@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf, - u8 addr[ETH_ALEN]; - u32 tmp; - -+ pax_track_stack(); -+ - len += snprintf(buf + len, sizeof(buf) - len, - "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n", - wiphy_name(sc->hw->wiphy), -diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c -index d3ff33c..309398e 100644 ---- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c -+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c -@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf, - unsigned int len = 0; - int ret = 0; - -+ pax_track_stack(); -+ - memset(&cmd_rsp, 0, sizeof(cmd_rsp)); - - ath9k_htc_ps_wakeup(priv); -@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf, - unsigned int len = 0; - int ret = 0; - -+ pax_track_stack(); -+ - memset(&cmd_rsp, 0, sizeof(cmd_rsp)); - - ath9k_htc_ps_wakeup(priv); -@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf, - unsigned int len = 0; - int ret = 0; - -+ pax_track_stack(); -+ - memset(&cmd_rsp, 0, sizeof(cmd_rsp)); - - ath9k_htc_ps_wakeup(priv); -@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, - char buf[512]; - unsigned int len = 0; - -+ pax_track_stack(); -+ - len += snprintf(buf + len, sizeof(buf) - len, - "%20s : %10u\n", "Buffers queued", - priv->debug.tx_stats.buf_queued); -@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf, - char buf[512]; - unsigned int len = 0; - -+ pax_track_stack(); -+ - spin_lock_bh(&priv->tx.tx_lock); - - len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : "); -@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf, - char buf[512]; - unsigned int len = 0; - -+ pax_track_stack(); -+ - len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", - "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue)); - -diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h -index c798890..c19a8fb 100644 ---- a/drivers/net/wireless/ath/ath9k/hw.h -+++ b/drivers/net/wireless/ath/ath9k/hw.h -@@ -588,7 +588,7 @@ struct ath_hw_private_ops { - - /* ANI */ - void (*ani_cache_ini_regs)(struct ath_hw *ah); --}; -+} __no_const; - - /** - * struct ath_hw_ops - callbacks used by hardware code and driver code -@@ -639,7 +639,7 @@ struct ath_hw_ops { - void (*antdiv_comb_conf_set)(struct ath_hw *ah, - struct ath_hw_antcomb_conf *antconf); - --}; -+} __no_const; - - struct ath_nf_limits { - s16 max; -@@ -652,7 +652,7 @@ struct ath_nf_limits { - #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */ - - struct ath_hw { -- struct ath_ops reg_ops; -+ ath_ops_no_const reg_ops; - - struct ieee80211_hw *hw; - struct ath_common common; -diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c -index ef9ad79..f5f8d80 100644 ---- a/drivers/net/wireless/ipw2x00/ipw2100.c -+++ b/drivers/net/wireless/ipw2x00/ipw2100.c -@@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, - int err; - DECLARE_SSID_BUF(ssid); - -+ pax_track_stack(); -+ - IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len)); - - if (ssid_len) -@@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv, - struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters; - int err; - -+ pax_track_stack(); -+ - IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n", - idx, keylen, len); - -diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c -index 32a9966..de69787 100644 ---- a/drivers/net/wireless/ipw2x00/libipw_rx.c -+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c -@@ -1565,6 +1565,8 @@ static void libipw_process_probe_response(struct libipw_device - unsigned long flags; - DECLARE_SSID_BUF(ssid); - -+ pax_track_stack(); -+ - LIBIPW_DEBUG_SCAN("'%s' (%pM" - "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", - print_ssid(ssid, info_element->data, info_element->len), -diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c -index 66ee1562..b90412b 100644 ---- a/drivers/net/wireless/iwlegacy/iwl3945-base.c -+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c -@@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e - */ - if (iwl3945_mod_params.disable_hw_scan) { - IWL_DEBUG_INFO(priv, "Disabling hw_scan\n"); -- iwl3945_hw_ops.hw_scan = NULL; -+ pax_open_kernel(); -+ *(void **)&iwl3945_hw_ops.hw_scan = NULL; -+ pax_close_kernel(); - } - - IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); -diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c -index 3789ff4..22ab151 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c -+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c -@@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->common.ctx; - -+ pax_track_stack(); -+ - IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); - - /* Treat uninitialized rate scaling data same as non-existing. */ -@@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, - container_of(lq_sta, struct iwl_station_priv, lq_sta); - struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; - -+ pax_track_stack(); -+ - /* Override starting rate (index 0) if needed for debug purposes */ - rs_dbgfs_set_mcs(lq_sta, &new_rate, index); - -diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h -index f9a407e..a6f2bb7 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-debug.h -+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h -@@ -68,8 +68,8 @@ do { \ - } while (0) - - #else --#define IWL_DEBUG(__priv, level, fmt, args...) --#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) -+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0) -+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0) - static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, - const void *p, u32 len) - {} -diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c -index ec1485b..900c3bd 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c -+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c -@@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file, - int pos = 0; - const size_t bufsz = sizeof(buf); - -+ pax_track_stack(); -+ - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", - test_bit(STATUS_HCMD_ACTIVE, &priv->status)); - pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n", -@@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, - char buf[256 * NUM_IWL_RXON_CTX]; - const size_t bufsz = sizeof(buf); - -+ pax_track_stack(); -+ - for_each_context(priv, ctx) { - pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", - ctx->ctxid); -diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c -index 0a0cc96..fd49ad8 100644 ---- a/drivers/net/wireless/iwmc3200wifi/debugfs.c -+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c -@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp, - int buf_len = 512; - size_t len = 0; - -+ pax_track_stack(); -+ - if (*ppos != 0) - return 0; - if (count < sizeof(buf)) -diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c -index 031cd89..bdc8435 100644 ---- a/drivers/net/wireless/mac80211_hwsim.c -+++ b/drivers/net/wireless/mac80211_hwsim.c -@@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(void) - return -EINVAL; - - if (fake_hw_scan) { -- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; -- mac80211_hwsim_ops.sw_scan_start = NULL; -- mac80211_hwsim_ops.sw_scan_complete = NULL; -+ pax_open_kernel(); -+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; -+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL; -+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL; -+ pax_close_kernel(); - } - - spin_lock_init(&hwsim_radio_lock); -diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h -index 2215c3c..64e6a47 100644 ---- a/drivers/net/wireless/mwifiex/main.h -+++ b/drivers/net/wireless/mwifiex/main.h -@@ -560,7 +560,7 @@ struct mwifiex_if_ops { - - void (*update_mp_end_port) (struct mwifiex_adapter *, u16); - void (*cleanup_mpa_buf) (struct mwifiex_adapter *); --}; -+} __no_const; - - struct mwifiex_adapter { - struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM]; -diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c -index 29f9389..f6d2ce0 100644 ---- a/drivers/net/wireless/rndis_wlan.c -+++ b/drivers/net/wireless/rndis_wlan.c -@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold) - - netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold); - -- if (rts_threshold < 0 || rts_threshold > 2347) -+ if (rts_threshold > 2347) - rts_threshold = 2347; - - tmp = cpu_to_le32(rts_threshold); -diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c -index 3b11642..d6bb049 100644 ---- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c -+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c -@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, - u8 rfpath; - u8 num_total_rfpath = rtlphy->num_total_rfpath; - -+ pax_track_stack(); -+ - precommoncmdcnt = 0; - _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, - MAX_PRECMD_CNT, -diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h -index a77f1bb..c608b2b 100644 ---- a/drivers/net/wireless/wl1251/wl1251.h -+++ b/drivers/net/wireless/wl1251/wl1251.h -@@ -266,7 +266,7 @@ struct wl1251_if_operations { - void (*reset)(struct wl1251 *wl); - void (*enable_irq)(struct wl1251 *wl); - void (*disable_irq)(struct wl1251 *wl); --}; -+} __no_const; - - struct wl1251 { - struct ieee80211_hw *hw; -diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c -index e0b3736..4b466e6 100644 ---- a/drivers/net/wireless/wl12xx/spi.c -+++ b/drivers/net/wireless/wl12xx/spi.c -@@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, - u32 chunk_len; - int i; - -+ pax_track_stack(); -+ - WARN_ON(len > WL1271_AGGR_BUFFER_SIZE); - - spi_message_init(&m); -diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c -index f34b5b2..b5abb9f 100644 ---- a/drivers/oprofile/buffer_sync.c -+++ b/drivers/oprofile/buffer_sync.c -@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm) - if (cookie == NO_COOKIE) - offset = pc; - if (cookie == INVALID_COOKIE) { -- atomic_inc(&oprofile_stats.sample_lost_no_mapping); -+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); - offset = pc; - } - if (cookie != last_cookie) { -@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) - /* add userspace sample */ - - if (!mm) { -- atomic_inc(&oprofile_stats.sample_lost_no_mm); -+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm); - return 0; - } - - cookie = lookup_dcookie(mm, s->eip, &offset); - - if (cookie == INVALID_COOKIE) { -- atomic_inc(&oprofile_stats.sample_lost_no_mapping); -+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); - return 0; - } - -@@ -563,7 +563,7 @@ void sync_buffer(int cpu) - /* ignore backtraces if failed to add a sample */ - if (state == sb_bt_start) { - state = sb_bt_ignore; -- atomic_inc(&oprofile_stats.bt_lost_no_mapping); -+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping); - } - } - release_mm(mm); -diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c -index dd87e86..bc0148c 100644 ---- a/drivers/oprofile/event_buffer.c -+++ b/drivers/oprofile/event_buffer.c -@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value) - } - - if (buffer_pos == buffer_size) { -- atomic_inc(&oprofile_stats.event_lost_overflow); -+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow); - return; - } - -diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c -index f8c752e..28bf4fc 100644 ---- a/drivers/oprofile/oprof.c -+++ b/drivers/oprofile/oprof.c -@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work) - if (oprofile_ops.switch_events()) - return; - -- atomic_inc(&oprofile_stats.multiplex_counter); -+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter); - start_switch_worker(); - } - -diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c -index 917d28e..d62d981 100644 ---- a/drivers/oprofile/oprofile_stats.c -+++ b/drivers/oprofile/oprofile_stats.c -@@ -30,11 +30,11 @@ void oprofile_reset_stats(void) - cpu_buf->sample_invalid_eip = 0; - } - -- atomic_set(&oprofile_stats.sample_lost_no_mm, 0); -- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); -- atomic_set(&oprofile_stats.event_lost_overflow, 0); -- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); -- atomic_set(&oprofile_stats.multiplex_counter, 0); -+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0); -+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0); -+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0); -+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0); -+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0); - } - - -diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h -index 38b6fc0..b5cbfce 100644 ---- a/drivers/oprofile/oprofile_stats.h -+++ b/drivers/oprofile/oprofile_stats.h -@@ -13,11 +13,11 @@ - #include <linux/atomic.h> - - struct oprofile_stat_struct { -- atomic_t sample_lost_no_mm; -- atomic_t sample_lost_no_mapping; -- atomic_t bt_lost_no_mapping; -- atomic_t event_lost_overflow; -- atomic_t multiplex_counter; -+ atomic_unchecked_t sample_lost_no_mm; -+ atomic_unchecked_t sample_lost_no_mapping; -+ atomic_unchecked_t bt_lost_no_mapping; -+ atomic_unchecked_t event_lost_overflow; -+ atomic_unchecked_t multiplex_counter; - }; - - extern struct oprofile_stat_struct oprofile_stats; -diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c -index e9ff6f7..28e259a 100644 ---- a/drivers/oprofile/oprofilefs.c -+++ b/drivers/oprofile/oprofilefs.c -@@ -186,7 +186,7 @@ static const struct file_operations atomic_ro_fops = { - - - int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root, -- char const *name, atomic_t *val) -+ char const *name, atomic_unchecked_t *val) - { - return __oprofilefs_create_file(sb, root, name, - &atomic_ro_fops, 0444, val); -diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c -index 3f56bc0..707d642 100644 ---- a/drivers/parport/procfs.c -+++ b/drivers/parport/procfs.c -@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write, - - *ppos += len; - -- return copy_to_user(result, buffer, len) ? -EFAULT : 0; -+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0; - } - - #ifdef CONFIG_PARPORT_1284 -@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write, - - *ppos += len; - -- return copy_to_user (result, buffer, len) ? -EFAULT : 0; -+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0; - } - #endif /* IEEE1284.3 support. */ - -diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h -index 9fff878..ad0ad53 100644 ---- a/drivers/pci/hotplug/cpci_hotplug.h -+++ b/drivers/pci/hotplug/cpci_hotplug.h -@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops { - int (*hardware_test) (struct slot* slot, u32 value); - u8 (*get_power) (struct slot* slot); - int (*set_power) (struct slot* slot, int value); --}; -+} __no_const; - - struct cpci_hp_controller { - unsigned int irq; -diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c -index 76ba8a1..20ca857 100644 ---- a/drivers/pci/hotplug/cpqphp_nvram.c -+++ b/drivers/pci/hotplug/cpqphp_nvram.c -@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start) - - void compaq_nvram_init (void __iomem *rom_start) - { -+ -+#ifndef CONFIG_PAX_KERNEXEC - if (rom_start) { - compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); - } -+#endif -+ - dbg("int15 entry = %p\n", compaq_int15_entry_point); - - /* initialize our int15 lock */ -diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c -index cbfbab1..6a9fced 100644 ---- a/drivers/pci/pcie/aspm.c -+++ b/drivers/pci/pcie/aspm.c -@@ -27,9 +27,9 @@ - #define MODULE_PARAM_PREFIX "pcie_aspm." - - /* Note: those are not register definitions */ --#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ --#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ --#define ASPM_STATE_L1 (4) /* L1 state */ -+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */ -+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */ -+#define ASPM_STATE_L1 (4U) /* L1 state */ - #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) - #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1) - -diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c -index 6ab6bd3..72bdc69 100644 ---- a/drivers/pci/probe.c -+++ b/drivers/pci/probe.c -@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, - u32 l, sz, mask; - u16 orig_cmd; - -- mask = type ? PCI_ROM_ADDRESS_MASK : ~0; -+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0; - - if (!dev->mmio_always_on) { - pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); -diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c -index 27911b5..5b6db88 100644 ---- a/drivers/pci/proc.c -+++ b/drivers/pci/proc.c -@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = { - static int __init pci_proc_init(void) - { - struct pci_dev *dev = NULL; -+ -+#ifdef CONFIG_GRKERNSEC_PROC_ADD -+#ifdef CONFIG_GRKERNSEC_PROC_USER -+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL); -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); -+#endif -+#else - proc_bus_pci_dir = proc_mkdir("bus/pci", NULL); -+#endif - proc_create("devices", 0, proc_bus_pci_dir, - &proc_bus_pci_dev_operations); - proc_initialized = 1; -diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c -index 90832a9..419089a 100644 ---- a/drivers/pci/xen-pcifront.c -+++ b/drivers/pci/xen-pcifront.c -@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn, - struct pcifront_sd *sd = bus->sysdata; - struct pcifront_device *pdev = pcifront_get_pdev(sd); - -+ pax_track_stack(); -+ - if (verbose_request) - dev_info(&pdev->xdev->dev, - "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n", -@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn, - struct pcifront_sd *sd = bus->sysdata; - struct pcifront_device *pdev = pcifront_get_pdev(sd); - -+ pax_track_stack(); -+ - if (verbose_request) - dev_info(&pdev->xdev->dev, - "write dev=%04x:%02x:%02x.%01x - " -@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(struct pci_dev *dev, - struct pcifront_device *pdev = pcifront_get_pdev(sd); - struct msi_desc *entry; - -+ pax_track_stack(); -+ - if (nvec > SH_INFO_MAX_VEC) { - dev_err(&dev->dev, "too much vector for pci frontend: %x." - " Increase SH_INFO_MAX_VEC.\n", nvec); -@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(struct pci_dev *dev) - struct pcifront_sd *sd = dev->bus->sysdata; - struct pcifront_device *pdev = pcifront_get_pdev(sd); - -+ pax_track_stack(); -+ - err = do_pci_op(pdev, &op); - - /* What should do for error ? */ -@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[]) - struct pcifront_sd *sd = dev->bus->sysdata; - struct pcifront_device *pdev = pcifront_get_pdev(sd); - -+ pax_track_stack(); -+ - err = do_pci_op(pdev, &op); - if (likely(!err)) { - vector[0] = op.value; -diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c -index 7bd829f..a3237ad 100644 ---- a/drivers/platform/x86/thinkpad_acpi.c -+++ b/drivers/platform/x86/thinkpad_acpi.c -@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void) - return 0; - } - --void static hotkey_mask_warn_incomplete_mask(void) -+static void hotkey_mask_warn_incomplete_mask(void) - { - /* log only what the user can fix... */ - const u32 wantedmask = hotkey_driver_mask & -@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m) - } - } - --static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, -- struct tp_nvram_state *newn, -- const u32 event_mask) --{ -- - #define TPACPI_COMPARE_KEY(__scancode, __member) \ - do { \ - if ((event_mask & (1 << __scancode)) && \ -@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, - tpacpi_hotkey_send_key(__scancode); \ - } while (0) - -- void issue_volchange(const unsigned int oldvol, -- const unsigned int newvol) -- { -- unsigned int i = oldvol; -+static void issue_volchange(const unsigned int oldvol, -+ const unsigned int newvol, -+ const u32 event_mask) -+{ -+ unsigned int i = oldvol; - -- while (i > newvol) { -- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN); -- i--; -- } -- while (i < newvol) { -- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); -- i++; -- } -+ while (i > newvol) { -+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN); -+ i--; - } -+ while (i < newvol) { -+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); -+ i++; -+ } -+} - -- void issue_brightnesschange(const unsigned int oldbrt, -- const unsigned int newbrt) -- { -- unsigned int i = oldbrt; -+static void issue_brightnesschange(const unsigned int oldbrt, -+ const unsigned int newbrt, -+ const u32 event_mask) -+{ -+ unsigned int i = oldbrt; - -- while (i > newbrt) { -- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND); -- i--; -- } -- while (i < newbrt) { -- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); -- i++; -- } -+ while (i > newbrt) { -+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND); -+ i--; -+ } -+ while (i < newbrt) { -+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); -+ i++; - } -+} - -+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, -+ struct tp_nvram_state *newn, -+ const u32 event_mask) -+{ - TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle); - TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle); - TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle); -@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, - oldn->volume_level != newn->volume_level) { - /* recently muted, or repeated mute keypress, or - * multiple presses ending in mute */ -- issue_volchange(oldn->volume_level, newn->volume_level); -+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask); - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE); - } - } else { -@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); - } - if (oldn->volume_level != newn->volume_level) { -- issue_volchange(oldn->volume_level, newn->volume_level); -+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask); - } else if (oldn->volume_toggle != newn->volume_toggle) { - /* repeated vol up/down keypress at end of scale ? */ - if (newn->volume_level == 0) -@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, - /* handle brightness */ - if (oldn->brightness_level != newn->brightness_level) { - issue_brightnesschange(oldn->brightness_level, -- newn->brightness_level); -+ newn->brightness_level, -+ event_mask); - } else if (oldn->brightness_toggle != newn->brightness_toggle) { - /* repeated key presses that didn't change state */ - if (newn->brightness_level == 0) -@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, - && !tp_features.bright_unkfw) - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); - } -+} - - #undef TPACPI_COMPARE_KEY - #undef TPACPI_MAY_SEND_KEY --} - - /* - * Polling driver -diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c -index b859d16..5cc6b1a 100644 ---- a/drivers/pnp/pnpbios/bioscalls.c -+++ b/drivers/pnp/pnpbios/bioscalls.c -@@ -59,7 +59,7 @@ do { \ - set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \ - } while(0) - --static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, -+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, - (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); - - /* -@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, - - cpu = get_cpu(); - save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8]; -+ -+ pax_open_kernel(); - get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc; -+ pax_close_kernel(); - - /* On some boxes IRQ's during PnP BIOS calls are deadly. */ - spin_lock_irqsave(&pnp_bios_lock, flags); -@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, - :"memory"); - spin_unlock_irqrestore(&pnp_bios_lock, flags); - -+ pax_open_kernel(); - get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; -+ pax_close_kernel(); -+ - put_cpu(); - - /* If we get here and this is set then the PnP BIOS faulted on us. */ -@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base) - return status; - } - --void pnpbios_calls_init(union pnp_bios_install_struct *header) -+void __init pnpbios_calls_init(union pnp_bios_install_struct *header) - { - int i; - -@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) - pnp_bios_callpoint.offset = header->fields.pm16offset; - pnp_bios_callpoint.segment = PNP_CS16; - -+ pax_open_kernel(); -+ - for_each_possible_cpu(i) { - struct desc_struct *gdt = get_cpu_gdt_table(i); - if (!gdt) -@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) - set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS], - (unsigned long)__va(header->fields.pm16dseg)); - } -+ -+ pax_close_kernel(); - } -diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c -index b0ecacb..7c9da2e 100644 ---- a/drivers/pnp/resource.c -+++ b/drivers/pnp/resource.c -@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res) - return 1; - - /* check if the resource is valid */ -- if (*irq < 0 || *irq > 15) -+ if (*irq > 15) - return 0; - - /* check if the resource is reserved */ -@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res) - return 1; - - /* check if the resource is valid */ -- if (*dma < 0 || *dma == 4 || *dma > 7) -+ if (*dma == 4 || *dma > 7) - return 0; - - /* check if the resource is reserved */ -diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c -index bb16f5b..c751eef 100644 ---- a/drivers/power/bq27x00_battery.c -+++ b/drivers/power/bq27x00_battery.c -@@ -67,7 +67,7 @@ - struct bq27x00_device_info; - struct bq27x00_access_methods { - int (*read)(struct bq27x00_device_info *di, u8 reg, bool single); --}; -+} __no_const; - - enum bq27x00_chip { BQ27000, BQ27500 }; - -diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c -index 33f5d9a..d957d3f 100644 ---- a/drivers/regulator/max8660.c -+++ b/drivers/regulator/max8660.c -@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client, - max8660->shadow_regs[MAX8660_OVER1] = 5; - } else { - /* Otherwise devices can be toggled via software */ -- max8660_dcdc_ops.enable = max8660_dcdc_enable; -- max8660_dcdc_ops.disable = max8660_dcdc_disable; -+ pax_open_kernel(); -+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable; -+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable; -+ pax_close_kernel(); - } - - /* -diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c -index 3285d41..ab7c22a 100644 ---- a/drivers/regulator/mc13892-regulator.c -+++ b/drivers/regulator/mc13892-regulator.c -@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev) - } - mc13xxx_unlock(mc13892); - -- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode -+ pax_open_kernel(); -+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode - = mc13892_vcam_set_mode; -- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode -+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode - = mc13892_vcam_get_mode; -+ pax_close_kernel(); - for (i = 0; i < pdata->num_regulators; i++) { - init_data = &pdata->regulators[i]; - priv->regulators[i] = regulator_register( -diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c -index cace6d3..f623fda 100644 ---- a/drivers/rtc/rtc-dev.c -+++ b/drivers/rtc/rtc-dev.c -@@ -14,6 +14,7 @@ - #include <linux/module.h> - #include <linux/rtc.h> - #include <linux/sched.h> -+#include <linux/grsecurity.h> - #include "rtc-core.h" - - static dev_t rtc_devt; -@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file, - if (copy_from_user(&tm, uarg, sizeof(tm))) - return -EFAULT; - -+ gr_log_timechange(); -+ - return rtc_set_time(rtc, &tm); - - case RTC_PIE_ON: -diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c -index f66c33b..7ae5823 100644 ---- a/drivers/scsi/BusLogic.c -+++ b/drivers/scsi/BusLogic.c -@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda - static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter - *PrototypeHostAdapter) - { -+ pax_track_stack(); -+ - /* - If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint - Host Adapters; otherwise, default to the standard ISA MultiMaster probe. -diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h -index ffb5878..e6d785c 100644 ---- a/drivers/scsi/aacraid/aacraid.h -+++ b/drivers/scsi/aacraid/aacraid.h -@@ -492,7 +492,7 @@ struct adapter_ops - int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd); - /* Administrative operations */ - int (*adapter_comm)(struct aac_dev * dev, int comm); --}; -+} __no_const; - - /* - * Define which interrupt handler needs to be installed -diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c -index 8a0b330..b4286de 100644 ---- a/drivers/scsi/aacraid/commctrl.c -+++ b/drivers/scsi/aacraid/commctrl.c -@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) - u32 actual_fibsize64, actual_fibsize = 0; - int i; - -+ pax_track_stack(); - - if (dev->in_reset) { - dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); -diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c -index c7b6fed..4db0569 100644 ---- a/drivers/scsi/aacraid/linit.c -+++ b/drivers/scsi/aacraid/linit.c -@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = { - #elif defined(__devinitconst) - static const struct pci_device_id aac_pci_tbl[] __devinitconst = { - #else --static const struct pci_device_id aac_pci_tbl[] __devinitdata = { -+static const struct pci_device_id aac_pci_tbl[] __devinitconst = { - #endif - { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */ - { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */ -diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c -index d5ff142..49c0ebb 100644 ---- a/drivers/scsi/aic94xx/aic94xx_init.c -+++ b/drivers/scsi/aic94xx/aic94xx_init.c -@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = { - .lldd_control_phy = asd_control_phy, - }; - --static const struct pci_device_id aic94xx_pci_table[] __devinitdata = { -+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = { - {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1}, - {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1}, - {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1}, -diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h -index a796de9..1ef20e1 100644 ---- a/drivers/scsi/bfa/bfa.h -+++ b/drivers/scsi/bfa/bfa.h -@@ -196,7 +196,7 @@ struct bfa_hwif_s { - u32 *end); - int cpe_vec_q0; - int rme_vec_q0; --}; -+} __no_const; - typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status); - - struct bfa_faa_cbfn_s { -diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c -index e07bd47..dbd260a 100644 ---- a/drivers/scsi/bfa/bfa_fcpim.c -+++ b/drivers/scsi/bfa/bfa_fcpim.c -@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, - void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)) - { - struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); -- struct bfa_itn_s *itn; -+ bfa_itn_s_no_const *itn; - - itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag); - itn->isr = isr; -diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h -index 1080bcb..a3b39e3 100644 ---- a/drivers/scsi/bfa/bfa_fcpim.h -+++ b/drivers/scsi/bfa/bfa_fcpim.h -@@ -37,6 +37,7 @@ struct bfa_iotag_s { - struct bfa_itn_s { - bfa_isr_func_t isr; - }; -+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const; - - void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, - void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); -@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s { - struct list_head iotag_tio_free_q; /* free IO resources */ - struct list_head iotag_unused_q; /* unused IO resources*/ - struct bfa_iotag_s *iotag_arr; -- struct bfa_itn_s *itn_arr; -+ bfa_itn_s_no_const *itn_arr; - int num_ioim_reqs; - int num_fwtio_reqs; - int num_itns; -diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c -index d4f951f..197c350 100644 ---- a/drivers/scsi/bfa/bfa_fcs_lport.c -+++ b/drivers/scsi/bfa/bfa_fcs_lport.c -@@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) - u16 len, count; - u16 templen; - -+ pax_track_stack(); -+ - /* - * get hba attributes - */ -@@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, - u8 count = 0; - u16 templen; - -+ pax_track_stack(); -+ - /* - * get port attributes - */ -diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c -index 52628d5..f89d033 100644 ---- a/drivers/scsi/bfa/bfa_fcs_rport.c -+++ b/drivers/scsi/bfa/bfa_fcs_rport.c -@@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport, - struct fc_rpsc_speed_info_s speeds; - struct bfa_port_attr_s pport_attr; - -+ pax_track_stack(); -+ - bfa_trc(port->fcs, rx_fchs->s_id); - bfa_trc(port->fcs, rx_fchs->d_id); - -diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h -index 546d46b..642fa5b 100644 ---- a/drivers/scsi/bfa/bfa_ioc.h -+++ b/drivers/scsi/bfa/bfa_ioc.h -@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s { - bfa_ioc_disable_cbfn_t disable_cbfn; - bfa_ioc_hbfail_cbfn_t hbfail_cbfn; - bfa_ioc_reset_cbfn_t reset_cbfn; --}; -+} __no_const; - - /* - * IOC event notification mechanism. -@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s { - void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); - bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc); - bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc); --}; -+} __no_const; - - /* - * Queue element to wait for room in request queue. FIFO order is -diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c -index 66fb725..0fe05ab 100644 ---- a/drivers/scsi/bfa/bfad.c -+++ b/drivers/scsi/bfa/bfad.c -@@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) { - struct bfad_vport_s *vport, *vport_new; - struct bfa_fcs_driver_info_s driver_info; - -+ pax_track_stack(); -+ - /* Limit min/max. xfer size to [64k-32MB] */ - if (max_xfer_size < BFAD_MIN_SECTORS >> 1) - max_xfer_size = BFAD_MIN_SECTORS >> 1; -diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c -index b4f6c9a..0eb1938 100644 ---- a/drivers/scsi/dpt_i2o.c -+++ b/drivers/scsi/dpt_i2o.c -@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) - dma_addr_t addr; - ulong flags = 0; - -+ pax_track_stack(); -+ - memset(&msg, 0, MAX_MESSAGE_SIZE*4); - // get user msg size in u32s - if(get_user(size, &user_msg[0])){ -@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d - s32 rcode; - dma_addr_t addr; - -+ pax_track_stack(); -+ - memset(msg, 0 , sizeof(msg)); - len = scsi_bufflen(cmd); - direction = 0x00000000; -diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c -index 94de889..ca4f0cf 100644 ---- a/drivers/scsi/eata.c -+++ b/drivers/scsi/eata.c -@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j, - struct hostdata *ha; - char name[16]; - -+ pax_track_stack(); -+ - sprintf(name, "%s%d", driver_name, j); - - if (!request_region(port_base, REGION_SIZE, driver_name)) { -diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c -index c74c4b8..c41ca3f 100644 ---- a/drivers/scsi/fcoe/fcoe_ctlr.c -+++ b/drivers/scsi/fcoe/fcoe_ctlr.c -@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) - } buf; - int rc; - -+ pax_track_stack(); -+ - fiph = (struct fip_header *)skb->data; - sub = fiph->fip_subcode; - -diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c -index 3242bca..45a83e7 100644 ---- a/drivers/scsi/gdth.c -+++ b/drivers/scsi/gdth.c -@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg) - unsigned long flags; - gdth_ha_str *ha; - -+ pax_track_stack(); -+ - if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv))) - return -EFAULT; - ha = gdth_find_ha(ldrv.ionode); -@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd) - gdth_ha_str *ha; - int rval; - -+ pax_track_stack(); -+ - if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) || - res.number >= MAX_HDRIVES) - return -EFAULT; -@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg, char *cmnd) - gdth_ha_str *ha; - int rval; - -+ pax_track_stack(); -+ - if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general))) - return -EFAULT; - ha = gdth_find_ha(gen.ionode); -@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha) - int i; - gdth_cmd_str gdtcmd; - char cmnd[MAX_COMMAND_SIZE]; -+ -+ pax_track_stack(); -+ - memset(cmnd, 0xff, MAX_COMMAND_SIZE); - - TRACE2(("gdth_flush() hanum %d\n", ha->hanum)); -diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c -index 6527543..81e4fe2 100644 ---- a/drivers/scsi/gdth_proc.c -+++ b/drivers/scsi/gdth_proc.c -@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, - u64 paddr; - - char cmnd[MAX_COMMAND_SIZE]; -+ -+ pax_track_stack(); -+ - memset(cmnd, 0xff, 12); - memset(&gdtcmd, 0, sizeof(gdth_cmd_str)); - -@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, - gdth_hget_str *phg; - char cmnd[MAX_COMMAND_SIZE]; - -+ pax_track_stack(); -+ - gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL); - estr = kmalloc(sizeof(*estr), GFP_KERNEL); - if (!gdtcmd || !estr) -diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c -index 351dc0b..951dc32 100644 ---- a/drivers/scsi/hosts.c -+++ b/drivers/scsi/hosts.c -@@ -42,7 +42,7 @@ - #include "scsi_logging.h" - - --static atomic_t scsi_host_next_hn; /* host_no for next new host */ -+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */ - - - static void scsi_host_cls_release(struct device *dev) -@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) - * subtract one because we increment first then return, but we need to - * know what the next host number was before increment - */ -- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1; -+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1; - shost->dma_channel = 0xff; - - /* These three are default values which can be overridden */ -diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c -index 418ce83..7ee1225 100644 ---- a/drivers/scsi/hpsa.c -+++ b/drivers/scsi/hpsa.c -@@ -499,7 +499,7 @@ static inline u32 next_command(struct ctlr_info *h) - u32 a; - - if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) -- return h->access.command_completed(h); -+ return h->access->command_completed(h); - - if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { - a = *(h->reply_pool_head); /* Next cmd in ring buffer */ -@@ -2956,7 +2956,7 @@ static void start_io(struct ctlr_info *h) - while (!list_empty(&h->reqQ)) { - c = list_entry(h->reqQ.next, struct CommandList, list); - /* can't do anything if fifo is full */ -- if ((h->access.fifo_full(h))) { -+ if ((h->access->fifo_full(h))) { - dev_warn(&h->pdev->dev, "fifo full\n"); - break; - } -@@ -2966,7 +2966,7 @@ static void start_io(struct ctlr_info *h) - h->Qdepth--; - - /* Tell the controller execute command */ -- h->access.submit_command(h, c); -+ h->access->submit_command(h, c); - - /* Put job onto the completed Q */ - addQ(&h->cmpQ, c); -@@ -2975,17 +2975,17 @@ static void start_io(struct ctlr_info *h) - - static inline unsigned long get_next_completion(struct ctlr_info *h) - { -- return h->access.command_completed(h); -+ return h->access->command_completed(h); - } - - static inline bool interrupt_pending(struct ctlr_info *h) - { -- return h->access.intr_pending(h); -+ return h->access->intr_pending(h); - } - - static inline long interrupt_not_for_us(struct ctlr_info *h) - { -- return (h->access.intr_pending(h) == 0) || -+ return (h->access->intr_pending(h) == 0) || - (h->interrupts_enabled == 0); - } - -@@ -3882,7 +3882,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h) - if (prod_index < 0) - return -ENODEV; - h->product_name = products[prod_index].product_name; -- h->access = *(products[prod_index].access); -+ h->access = products[prod_index].access; - - if (hpsa_board_disabled(h->pdev)) { - dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); -@@ -4163,7 +4163,7 @@ reinit_after_soft_reset: - } - - /* make sure the board interrupts are off */ -- h->access.set_intr_mask(h, HPSA_INTR_OFF); -+ h->access->set_intr_mask(h, HPSA_INTR_OFF); - - if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) - goto clean2; -@@ -4197,7 +4197,7 @@ reinit_after_soft_reset: - * fake ones to scoop up any residual completions. - */ - spin_lock_irqsave(&h->lock, flags); -- h->access.set_intr_mask(h, HPSA_INTR_OFF); -+ h->access->set_intr_mask(h, HPSA_INTR_OFF); - spin_unlock_irqrestore(&h->lock, flags); - free_irq(h->intr[h->intr_mode], h); - rc = hpsa_request_irq(h, hpsa_msix_discard_completions, -@@ -4216,9 +4216,9 @@ reinit_after_soft_reset: - dev_info(&h->pdev->dev, "Board READY.\n"); - dev_info(&h->pdev->dev, - "Waiting for stale completions to drain.\n"); -- h->access.set_intr_mask(h, HPSA_INTR_ON); -+ h->access->set_intr_mask(h, HPSA_INTR_ON); - msleep(10000); -- h->access.set_intr_mask(h, HPSA_INTR_OFF); -+ h->access->set_intr_mask(h, HPSA_INTR_OFF); - - rc = controller_reset_failed(h->cfgtable); - if (rc) -@@ -4239,7 +4239,7 @@ reinit_after_soft_reset: - } - - /* Turn the interrupts on so we can service requests */ -- h->access.set_intr_mask(h, HPSA_INTR_ON); -+ h->access->set_intr_mask(h, HPSA_INTR_ON); - - hpsa_hba_inquiry(h); - hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ -@@ -4292,7 +4292,7 @@ static void hpsa_shutdown(struct pci_dev *pdev) - * To write all data in the battery backed cache to disks - */ - hpsa_flush_cache(h); -- h->access.set_intr_mask(h, HPSA_INTR_OFF); -+ h->access->set_intr_mask(h, HPSA_INTR_OFF); - free_irq(h->intr[h->intr_mode], h); - #ifdef CONFIG_PCI_MSI - if (h->msix_vector) -@@ -4455,7 +4455,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h, - return; - } - /* Change the access methods to the performant access methods */ -- h->access = SA5_performant_access; -+ h->access = &SA5_performant_access; - h->transMethod = CFGTBL_Trans_Performant; - } - -diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h -index 7f53cea..a8c7188 100644 ---- a/drivers/scsi/hpsa.h -+++ b/drivers/scsi/hpsa.h -@@ -73,7 +73,7 @@ struct ctlr_info { - unsigned int msix_vector; - unsigned int msi_vector; - int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ -- struct access_method access; -+ struct access_method *access; - - /* queue and queue Info */ - struct list_head reqQ; -diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h -index f2df059..a3a9930 100644 ---- a/drivers/scsi/ips.h -+++ b/drivers/scsi/ips.h -@@ -1027,7 +1027,7 @@ typedef struct { - int (*intr)(struct ips_ha *); - void (*enableint)(struct ips_ha *); - uint32_t (*statupd)(struct ips_ha *); --} ips_hw_func_t; -+} __no_const ips_hw_func_t; - - typedef struct ips_ha { - uint8_t ha_id[IPS_MAX_CHANNELS+1]; -diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c -index d261e98..1e00f35 100644 ---- a/drivers/scsi/libfc/fc_exch.c -+++ b/drivers/scsi/libfc/fc_exch.c -@@ -105,12 +105,12 @@ struct fc_exch_mgr { - * all together if not used XXX - */ - struct { -- atomic_t no_free_exch; -- atomic_t no_free_exch_xid; -- atomic_t xid_not_found; -- atomic_t xid_busy; -- atomic_t seq_not_found; -- atomic_t non_bls_resp; -+ atomic_unchecked_t no_free_exch; -+ atomic_unchecked_t no_free_exch_xid; -+ atomic_unchecked_t xid_not_found; -+ atomic_unchecked_t xid_busy; -+ atomic_unchecked_t seq_not_found; -+ atomic_unchecked_t non_bls_resp; - } stats; - }; - -@@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, - /* allocate memory for exchange */ - ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); - if (!ep) { -- atomic_inc(&mp->stats.no_free_exch); -+ atomic_inc_unchecked(&mp->stats.no_free_exch); - goto out; - } - memset(ep, 0, sizeof(*ep)); -@@ -779,7 +779,7 @@ out: - return ep; - err: - spin_unlock_bh(&pool->lock); -- atomic_inc(&mp->stats.no_free_exch_xid); -+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid); - mempool_free(ep, mp->ep_pool); - return NULL; - } -@@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, - xid = ntohs(fh->fh_ox_id); /* we originated exch */ - ep = fc_exch_find(mp, xid); - if (!ep) { -- atomic_inc(&mp->stats.xid_not_found); -+ atomic_inc_unchecked(&mp->stats.xid_not_found); - reject = FC_RJT_OX_ID; - goto out; - } -@@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, - ep = fc_exch_find(mp, xid); - if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { - if (ep) { -- atomic_inc(&mp->stats.xid_busy); -+ atomic_inc_unchecked(&mp->stats.xid_busy); - reject = FC_RJT_RX_ID; - goto rel; - } -@@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, - } - xid = ep->xid; /* get our XID */ - } else if (!ep) { -- atomic_inc(&mp->stats.xid_not_found); -+ atomic_inc_unchecked(&mp->stats.xid_not_found); - reject = FC_RJT_RX_ID; /* XID not found */ - goto out; - } -@@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, - } else { - sp = &ep->seq; - if (sp->id != fh->fh_seq_id) { -- atomic_inc(&mp->stats.seq_not_found); -+ atomic_inc_unchecked(&mp->stats.seq_not_found); - if (f_ctl & FC_FC_END_SEQ) { - /* - * Update sequence_id based on incoming last -@@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) - - ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); - if (!ep) { -- atomic_inc(&mp->stats.xid_not_found); -+ atomic_inc_unchecked(&mp->stats.xid_not_found); - goto out; - } - if (ep->esb_stat & ESB_ST_COMPLETE) { -- atomic_inc(&mp->stats.xid_not_found); -+ atomic_inc_unchecked(&mp->stats.xid_not_found); - goto rel; - } - if (ep->rxid == FC_XID_UNKNOWN) - ep->rxid = ntohs(fh->fh_rx_id); - if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { -- atomic_inc(&mp->stats.xid_not_found); -+ atomic_inc_unchecked(&mp->stats.xid_not_found); - goto rel; - } - if (ep->did != ntoh24(fh->fh_s_id) && - ep->did != FC_FID_FLOGI) { -- atomic_inc(&mp->stats.xid_not_found); -+ atomic_inc_unchecked(&mp->stats.xid_not_found); - goto rel; - } - sof = fr_sof(fp); -@@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) - sp->ssb_stat |= SSB_ST_RESP; - sp->id = fh->fh_seq_id; - } else if (sp->id != fh->fh_seq_id) { -- atomic_inc(&mp->stats.seq_not_found); -+ atomic_inc_unchecked(&mp->stats.seq_not_found); - goto rel; - } - -@@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) - sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ - - if (!sp) -- atomic_inc(&mp->stats.xid_not_found); -+ atomic_inc_unchecked(&mp->stats.xid_not_found); - else -- atomic_inc(&mp->stats.non_bls_resp); -+ atomic_inc_unchecked(&mp->stats.non_bls_resp); - - fc_frame_free(fp); - } -diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c -index db9238f..4378ed2 100644 ---- a/drivers/scsi/libsas/sas_ata.c -+++ b/drivers/scsi/libsas/sas_ata.c -@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = { - .postreset = ata_std_postreset, - .error_handler = ata_std_error_handler, - .post_internal_cmd = sas_ata_post_internal, -- .qc_defer = ata_std_qc_defer, -+ .qc_defer = ata_std_qc_defer, - .qc_prep = ata_noop_qc_prep, - .qc_issue = sas_ata_qc_issue, - .qc_fill_rtf = sas_ata_qc_fill_rtf, -diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h -index c088a36..01c73b0 100644 ---- a/drivers/scsi/lpfc/lpfc.h -+++ b/drivers/scsi/lpfc/lpfc.h -@@ -425,7 +425,7 @@ struct lpfc_vport { - struct dentry *debug_nodelist; - struct dentry *vport_debugfs_root; - struct lpfc_debugfs_trc *disc_trc; -- atomic_t disc_trc_cnt; -+ atomic_unchecked_t disc_trc_cnt; - #endif - uint8_t stat_data_enabled; - uint8_t stat_data_blocked; -@@ -835,8 +835,8 @@ struct lpfc_hba { - struct timer_list fabric_block_timer; - unsigned long bit_flags; - #define FABRIC_COMANDS_BLOCKED 0 -- atomic_t num_rsrc_err; -- atomic_t num_cmd_success; -+ atomic_unchecked_t num_rsrc_err; -+ atomic_unchecked_t num_cmd_success; - unsigned long last_rsrc_error_time; - unsigned long last_ramp_down_time; - unsigned long last_ramp_up_time; -@@ -850,7 +850,7 @@ struct lpfc_hba { - struct dentry *debug_dumpDif; /* BlockGuard BPL*/ - struct dentry *debug_slow_ring_trc; - struct lpfc_debugfs_trc *slow_ring_trc; -- atomic_t slow_ring_trc_cnt; -+ atomic_unchecked_t slow_ring_trc_cnt; - /* iDiag debugfs sub-directory */ - struct dentry *idiag_root; - struct dentry *idiag_pci_cfg; -diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c -index a0424dd..2499b6b 100644 ---- a/drivers/scsi/lpfc/lpfc_debugfs.c -+++ b/drivers/scsi/lpfc/lpfc_debugfs.c -@@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, - - #include <linux/debugfs.h> - --static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); -+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); - static unsigned long lpfc_debugfs_start_time = 0L; - - /* iDiag */ -@@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) - lpfc_debugfs_enable = 0; - - len = 0; -- index = (atomic_read(&vport->disc_trc_cnt) + 1) & -+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) & - (lpfc_debugfs_max_disc_trc - 1); - for (i = index; i < lpfc_debugfs_max_disc_trc; i++) { - dtp = vport->disc_trc + i; -@@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) - lpfc_debugfs_enable = 0; - - len = 0; -- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) & -+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) & - (lpfc_debugfs_max_slow_ring_trc - 1); - for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) { - dtp = phba->slow_ring_trc + i; -@@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, - !vport || !vport->disc_trc) - return; - -- index = atomic_inc_return(&vport->disc_trc_cnt) & -+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) & - (lpfc_debugfs_max_disc_trc - 1); - dtp = vport->disc_trc + index; - dtp->fmt = fmt; - dtp->data1 = data1; - dtp->data2 = data2; - dtp->data3 = data3; -- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); -+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); - dtp->jif = jiffies; - #endif - return; -@@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, - !phba || !phba->slow_ring_trc) - return; - -- index = atomic_inc_return(&phba->slow_ring_trc_cnt) & -+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) & - (lpfc_debugfs_max_slow_ring_trc - 1); - dtp = phba->slow_ring_trc + index; - dtp->fmt = fmt; - dtp->data1 = data1; - dtp->data2 = data2; - dtp->data3 = data3; -- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); -+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); - dtp->jif = jiffies; - #endif - return; -@@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) - "slow_ring buffer\n"); - goto debug_failed; - } -- atomic_set(&phba->slow_ring_trc_cnt, 0); -+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0); - memset(phba->slow_ring_trc, 0, - (sizeof(struct lpfc_debugfs_trc) * - lpfc_debugfs_max_slow_ring_trc)); -@@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) - "buffer\n"); - goto debug_failed; - } -- atomic_set(&vport->disc_trc_cnt, 0); -+ atomic_set_unchecked(&vport->disc_trc_cnt, 0); - - snprintf(name, sizeof(name), "discovery_trace"); - vport->debug_disc_trc = -diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c -index a3c8200..31e562e 100644 ---- a/drivers/scsi/lpfc/lpfc_init.c -+++ b/drivers/scsi/lpfc/lpfc_init.c -@@ -9969,8 +9969,10 @@ lpfc_init(void) - printk(LPFC_COPYRIGHT "\n"); - - if (lpfc_enable_npiv) { -- lpfc_transport_functions.vport_create = lpfc_vport_create; -- lpfc_transport_functions.vport_delete = lpfc_vport_delete; -+ pax_open_kernel(); -+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create; -+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete; -+ pax_close_kernel(); - } - lpfc_transport_template = - fc_attach_transport(&lpfc_transport_functions); -diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c -index eadd241..26c8e0f 100644 ---- a/drivers/scsi/lpfc/lpfc_scsi.c -+++ b/drivers/scsi/lpfc/lpfc_scsi.c -@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba) - uint32_t evt_posted; - - spin_lock_irqsave(&phba->hbalock, flags); -- atomic_inc(&phba->num_rsrc_err); -+ atomic_inc_unchecked(&phba->num_rsrc_err); - phba->last_rsrc_error_time = jiffies; - - if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { -@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport, - unsigned long flags; - struct lpfc_hba *phba = vport->phba; - uint32_t evt_posted; -- atomic_inc(&phba->num_cmd_success); -+ atomic_inc_unchecked(&phba->num_cmd_success); - - if (vport->cfg_lun_queue_depth <= queue_depth) - return; -@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) - unsigned long num_rsrc_err, num_cmd_success; - int i; - -- num_rsrc_err = atomic_read(&phba->num_rsrc_err); -- num_cmd_success = atomic_read(&phba->num_cmd_success); -+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err); -+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success); - - vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) -@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) - } - } - lpfc_destroy_vport_work_array(phba, vports); -- atomic_set(&phba->num_rsrc_err, 0); -- atomic_set(&phba->num_cmd_success, 0); -+ atomic_set_unchecked(&phba->num_rsrc_err, 0); -+ atomic_set_unchecked(&phba->num_cmd_success, 0); - } - - /** -@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) - } - } - lpfc_destroy_vport_work_array(phba, vports); -- atomic_set(&phba->num_rsrc_err, 0); -- atomic_set(&phba->num_cmd_success, 0); -+ atomic_set_unchecked(&phba->num_rsrc_err, 0); -+ atomic_set_unchecked(&phba->num_cmd_success, 0); - } - - /** -diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c -index 2e6619e..fa64494 100644 ---- a/drivers/scsi/megaraid/megaraid_mbox.c -+++ b/drivers/scsi/megaraid/megaraid_mbox.c -@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter) - int rval; - int i; - -+ pax_track_stack(); -+ - // Allocate memory for the base list of scb for management module. - adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL); - -diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c -index 86afb13f..c912398 100644 ---- a/drivers/scsi/osd/osd_initiator.c -+++ b/drivers/scsi/osd/osd_initiator.c -@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(struct osd_dev *od, - int nelem = ARRAY_SIZE(get_attrs), a = 0; - int ret; - -+ pax_track_stack(); -+ - or = osd_start_request(od, GFP_KERNEL); - if (!or) - return -ENOMEM; -diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c -index d079f9a..d26072c 100644 ---- a/drivers/scsi/pmcraid.c -+++ b/drivers/scsi/pmcraid.c -@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) - res->scsi_dev = scsi_dev; - scsi_dev->hostdata = res; - res->change_detected = 0; -- atomic_set(&res->read_failures, 0); -- atomic_set(&res->write_failures, 0); -+ atomic_set_unchecked(&res->read_failures, 0); -+ atomic_set_unchecked(&res->write_failures, 0); - rc = 0; - } - spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); -@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd) - - /* If this was a SCSI read/write command keep count of errors */ - if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD) -- atomic_inc(&res->read_failures); -+ atomic_inc_unchecked(&res->read_failures); - else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD) -- atomic_inc(&res->write_failures); -+ atomic_inc_unchecked(&res->write_failures); - - if (!RES_IS_GSCSI(res->cfg_entry) && - masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) { -@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck( - * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses - * hrrq_id assigned here in queuecommand - */ -- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % -+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) % - pinstance->num_hrrq; - cmd->cmd_done = pmcraid_io_done; - -@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough( - * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses - * hrrq_id assigned here in queuecommand - */ -- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % -+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) % - pinstance->num_hrrq; - - if (request_size) { -@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(struct work_struct *workp) - - pinstance = container_of(workp, struct pmcraid_instance, worker_q); - /* add resources only after host is added into system */ -- if (!atomic_read(&pinstance->expose_resources)) -+ if (!atomic_read_unchecked(&pinstance->expose_resources)) - return; - - fw_version = be16_to_cpu(pinstance->inq_data->fw_version); -@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instance( - init_waitqueue_head(&pinstance->reset_wait_q); - - atomic_set(&pinstance->outstanding_cmds, 0); -- atomic_set(&pinstance->last_message_id, 0); -- atomic_set(&pinstance->expose_resources, 0); -+ atomic_set_unchecked(&pinstance->last_message_id, 0); -+ atomic_set_unchecked(&pinstance->expose_resources, 0); - - INIT_LIST_HEAD(&pinstance->free_res_q); - INIT_LIST_HEAD(&pinstance->used_res_q); -@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe( - /* Schedule worker thread to handle CCN and take care of adding and - * removing devices to OS - */ -- atomic_set(&pinstance->expose_resources, 1); -+ atomic_set_unchecked(&pinstance->expose_resources, 1); - schedule_work(&pinstance->worker_q); - return rc; - -diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h -index f920baf..4417389 100644 ---- a/drivers/scsi/pmcraid.h -+++ b/drivers/scsi/pmcraid.h -@@ -749,7 +749,7 @@ struct pmcraid_instance { - struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS]; - - /* Message id as filled in last fired IOARCB, used to identify HRRQ */ -- atomic_t last_message_id; -+ atomic_unchecked_t last_message_id; - - /* configuration table */ - struct pmcraid_config_table *cfg_table; -@@ -778,7 +778,7 @@ struct pmcraid_instance { - atomic_t outstanding_cmds; - - /* should add/delete resources to mid-layer now ?*/ -- atomic_t expose_resources; -+ atomic_unchecked_t expose_resources; - - - -@@ -814,8 +814,8 @@ struct pmcraid_resource_entry { - struct pmcraid_config_table_entry_ext cfg_entry_ext; - }; - struct scsi_device *scsi_dev; /* Link scsi_device structure */ -- atomic_t read_failures; /* count of failed READ commands */ -- atomic_t write_failures; /* count of failed WRITE commands */ -+ atomic_unchecked_t read_failures; /* count of failed READ commands */ -+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */ - - /* To indicate add/delete/modify during CCN */ - u8 change_detected; -diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h -index a03eaf4..a6b3fd9 100644 ---- a/drivers/scsi/qla2xxx/qla_def.h -+++ b/drivers/scsi/qla2xxx/qla_def.h -@@ -2244,7 +2244,7 @@ struct isp_operations { - int (*get_flash_version) (struct scsi_qla_host *, void *); - int (*start_scsi) (srb_t *); - int (*abort_isp) (struct scsi_qla_host *); --}; -+} __no_const; - - /* MSI-X Support *************************************************************/ - -diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h -index 473c5c8..4e2f24a 100644 ---- a/drivers/scsi/qla4xxx/ql4_def.h -+++ b/drivers/scsi/qla4xxx/ql4_def.h -@@ -256,7 +256,7 @@ struct ddb_entry { - atomic_t retry_relogin_timer; /* Min Time between relogins - * (4000 only) */ - atomic_t relogin_timer; /* Max Time to wait for relogin to complete */ -- atomic_t relogin_retry_count; /* Num of times relogin has been -+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been - * retried */ - - uint16_t port; -diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c -index 42ed5db..0262f9e 100644 ---- a/drivers/scsi/qla4xxx/ql4_init.c -+++ b/drivers/scsi/qla4xxx/ql4_init.c -@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha, - ddb_entry->fw_ddb_index = fw_ddb_index; - atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); - atomic_set(&ddb_entry->relogin_timer, 0); -- atomic_set(&ddb_entry->relogin_retry_count, 0); -+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0); - atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); - list_add_tail(&ddb_entry->list, &ha->ddb_list); - ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry; -@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, - if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) && - (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) { - atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); -- atomic_set(&ddb_entry->relogin_retry_count, 0); -+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0); - atomic_set(&ddb_entry->relogin_timer, 0); - clear_bit(DF_RELOGIN, &ddb_entry->flags); - iscsi_unblock_session(ddb_entry->sess); -diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c -index f2364ec..44c42b1 100644 ---- a/drivers/scsi/qla4xxx/ql4_os.c -+++ b/drivers/scsi/qla4xxx/ql4_os.c -@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) - ddb_entry->fw_ddb_device_state == - DDB_DS_SESSION_FAILED) { - /* Reset retry relogin timer */ -- atomic_inc(&ddb_entry->relogin_retry_count); -+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count); - DEBUG2(printk("scsi%ld: ddb [%d] relogin" - " timed out-retrying" - " relogin (%d)\n", - ha->host_no, - ddb_entry->fw_ddb_index, -- atomic_read(&ddb_entry-> -+ atomic_read_unchecked(&ddb_entry-> - relogin_retry_count)) - ); - start_dpc++; -diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c -index 2aeb2e9..46e3925 100644 ---- a/drivers/scsi/scsi.c -+++ b/drivers/scsi/scsi.c -@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) - unsigned long timeout; - int rtn = 0; - -- atomic_inc(&cmd->device->iorequest_cnt); -+ atomic_inc_unchecked(&cmd->device->iorequest_cnt); - - /* check if the device is still usable */ - if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { -diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c -index 6888b2c..45befa1 100644 ---- a/drivers/scsi/scsi_debug.c -+++ b/drivers/scsi/scsi_debug.c -@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, - unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; - unsigned char *cmd = (unsigned char *)scp->cmnd; - -+ pax_track_stack(); -+ - if ((errsts = check_readiness(scp, 1, devip))) - return errsts; - memset(arr, 0, sizeof(arr)); -@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cmnd * scp, - unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; - unsigned char *cmd = (unsigned char *)scp->cmnd; - -+ pax_track_stack(); -+ - if ((errsts = check_readiness(scp, 1, devip))) - return errsts; - memset(arr, 0, sizeof(arr)); -diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c -index 6d219e4..eb3ded3 100644 ---- a/drivers/scsi/scsi_lib.c -+++ b/drivers/scsi/scsi_lib.c -@@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) - shost = sdev->host; - scsi_init_cmd_errh(cmd); - cmd->result = DID_NO_CONNECT << 16; -- atomic_inc(&cmd->device->iorequest_cnt); -+ atomic_inc_unchecked(&cmd->device->iorequest_cnt); - - /* - * SCSI request completion path will do scsi_device_unbusy(), -@@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq) - - INIT_LIST_HEAD(&cmd->eh_entry); - -- atomic_inc(&cmd->device->iodone_cnt); -+ atomic_inc_unchecked(&cmd->device->iodone_cnt); - if (cmd->result) -- atomic_inc(&cmd->device->ioerr_cnt); -+ atomic_inc_unchecked(&cmd->device->ioerr_cnt); - - disposition = scsi_decide_disposition(cmd); - if (disposition != SUCCESS && -diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c -index e0bd3f7..816b8a6 100644 ---- a/drivers/scsi/scsi_sysfs.c -+++ b/drivers/scsi/scsi_sysfs.c -@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \ - char *buf) \ - { \ - struct scsi_device *sdev = to_scsi_device(dev); \ -- unsigned long long count = atomic_read(&sdev->field); \ -+ unsigned long long count = atomic_read_unchecked(&sdev->field); \ - return snprintf(buf, 20, "0x%llx\n", count); \ - } \ - static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) -diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c -index 84a1fdf..693b0d6 100644 ---- a/drivers/scsi/scsi_tgt_lib.c -+++ b/drivers/scsi/scsi_tgt_lib.c -@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, - int err; - - dprintk("%lx %u\n", uaddr, len); -- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL); -+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL); - if (err) { - /* - * TODO: need to fixup sg_tablesize, max_segment_size, -diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c -index 1b21491..1b7f60e 100644 ---- a/drivers/scsi/scsi_transport_fc.c -+++ b/drivers/scsi/scsi_transport_fc.c -@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class, - * Netlink Infrastructure - */ - --static atomic_t fc_event_seq; -+static atomic_unchecked_t fc_event_seq; - - /** - * fc_get_event_number - Obtain the next sequential FC event number -@@ -497,7 +497,7 @@ static atomic_t fc_event_seq; - u32 - fc_get_event_number(void) - { -- return atomic_add_return(1, &fc_event_seq); -+ return atomic_add_return_unchecked(1, &fc_event_seq); - } - EXPORT_SYMBOL(fc_get_event_number); - -@@ -645,7 +645,7 @@ static __init int fc_transport_init(void) - { - int error; - -- atomic_set(&fc_event_seq, 0); -+ atomic_set_unchecked(&fc_event_seq, 0); - - error = transport_class_register(&fc_host_class); - if (error) -@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val) - char *cp; - - *val = simple_strtoul(buf, &cp, 0); -- if ((*cp && (*cp != '\n')) || (*val < 0)) -+ if (*cp && (*cp != '\n')) - return -EINVAL; - /* - * Check for overflow; dev_loss_tmo is u32 -diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c -index 3fd16d7..ba0871f 100644 ---- a/drivers/scsi/scsi_transport_iscsi.c -+++ b/drivers/scsi/scsi_transport_iscsi.c -@@ -83,7 +83,7 @@ struct iscsi_internal { - struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1]; - }; - --static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ -+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */ - static struct workqueue_struct *iscsi_eh_timer_workq; - - /* -@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) - int err; - - ihost = shost->shost_data; -- session->sid = atomic_add_return(1, &iscsi_session_nr); -+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr); - - if (id == ISCSI_MAX_TARGET) { - for (id = 0; id < ISCSI_MAX_TARGET; id++) { -@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(void) - printk(KERN_INFO "Loading iSCSI transport class v%s.\n", - ISCSI_TRANSPORT_VERSION); - -- atomic_set(&iscsi_session_nr, 0); -+ atomic_set_unchecked(&iscsi_session_nr, 0); - - err = class_register(&iscsi_transport_class); - if (err) -diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c -index 21a045e..ec89e03 100644 ---- a/drivers/scsi/scsi_transport_srp.c -+++ b/drivers/scsi/scsi_transport_srp.c -@@ -33,7 +33,7 @@ - #include "scsi_transport_srp_internal.h" - - struct srp_host_attrs { -- atomic_t next_port_id; -+ atomic_unchecked_t next_port_id; - }; - #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) - -@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev, - struct Scsi_Host *shost = dev_to_shost(dev); - struct srp_host_attrs *srp_host = to_srp_host_attrs(shost); - -- atomic_set(&srp_host->next_port_id, 0); -+ atomic_set_unchecked(&srp_host->next_port_id, 0); - return 0; - } - -@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, - memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); - rport->roles = ids->roles; - -- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); -+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id); - dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); - - transport_setup_device(&rport->dev); -diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c -index 909ed9e..1ae290a 100644 ---- a/drivers/scsi/sg.c -+++ b/drivers/scsi/sg.c -@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) - sdp->disk->disk_name, - MKDEV(SCSI_GENERIC_MAJOR, sdp->index), - NULL, -- (char *)arg); -+ (char __user *)arg); - case BLKTRACESTART: - return blk_trace_startstop(sdp->device->request_queue, 1); - case BLKTRACESTOP: -@@ -2310,7 +2310,7 @@ struct sg_proc_leaf { - const struct file_operations * fops; - }; - --static struct sg_proc_leaf sg_proc_leaf_arr[] = { -+static const struct sg_proc_leaf sg_proc_leaf_arr[] = { - {"allow_dio", &adio_fops}, - {"debug", &debug_fops}, - {"def_reserved_size", &dressz_fops}, -@@ -2325,7 +2325,7 @@ sg_proc_init(void) - { - int k, mask; - int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); -- struct sg_proc_leaf * leaf; -+ const struct sg_proc_leaf * leaf; - - sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); - if (!sg_proc_sgp) -diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c -index b4543f5..e1b34b8 100644 ---- a/drivers/scsi/sym53c8xx_2/sym_glue.c -+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c -@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev, - int do_iounmap = 0; - int do_disable_device = 1; - -+ pax_track_stack(); -+ - memset(&sym_dev, 0, sizeof(sym_dev)); - memset(&nvram, 0, sizeof(nvram)); - sym_dev.pdev = pdev; -diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c -index a18996d..fe993cb 100644 ---- a/drivers/scsi/vmw_pvscsi.c -+++ b/drivers/scsi/vmw_pvscsi.c -@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) - dma_addr_t base; - unsigned i; - -+ pax_track_stack(); -+ - cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; - cmd.reqRingNumPages = adapter->req_pages; - cmd.cmpRingNumPages = adapter->cmp_pages; -diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c -index c5f37f0..898d202 100644 ---- a/drivers/spi/spi-dw-pci.c -+++ b/drivers/spi/spi-dw-pci.c -@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pdev) - #define spi_resume NULL - #endif - --static const struct pci_device_id pci_ids[] __devinitdata = { -+static const struct pci_device_id pci_ids[] __devinitconst = { - /* Intel MID platform SPI controller 0 */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, - {}, -diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c -index 4d1b9f5..8408fe3 100644 ---- a/drivers/spi/spi.c -+++ b/drivers/spi/spi.c -@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *master) - EXPORT_SYMBOL_GPL(spi_bus_unlock); - - /* portable code must never pass more than 32 bytes */ --#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) -+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES) - - static u8 *buf; - -diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c -index 32ee39a..3004c3d 100644 ---- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c -+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c -@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[MAX_COOKIE_NUM]; - (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0)) - - --static struct net_device_ops ar6000_netdev_ops = { -+static net_device_ops_no_const ar6000_netdev_ops = { - .ndo_init = NULL, - .ndo_open = ar6000_open, - .ndo_stop = ar6000_close, -diff --git a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h -index 39e0873..0925710 100644 ---- a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h -+++ b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h -@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void *pHciPalInfo, void *skb); - typedef struct ar6k_pal_config_s - { - ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt; --}ar6k_pal_config_t; -+} __no_const ar6k_pal_config_t; - - void register_pal_cb(ar6k_pal_config_t *palConfig_p); - #endif /* _AR6K_PAL_H_ */ -diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c -index 05dada9..96171c6 100644 ---- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c -+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c -@@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if *ifp) - free_netdev(ifp->net); - } - /* Allocate etherdev, including space for private structure */ -- ifp->net = alloc_etherdev(sizeof(drvr_priv)); -+ ifp->net = alloc_etherdev(sizeof(*drvr_priv)); - if (!ifp->net) { - BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__)); - ret = -ENOMEM; - } - if (ret == 0) { - strcpy(ifp->net->name, ifp->name); -- memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv)); -+ memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv)); - err = brcmf_net_attach(&drvr_priv->pub, ifp->idx); - if (err != 0) { - BRCMF_ERROR(("%s: brcmf_net_attach failed, " -@@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen) - BRCMF_TRACE(("%s: Enter\n", __func__)); - - /* Allocate etherdev, including space for private structure */ -- net = alloc_etherdev(sizeof(drvr_priv)); -+ net = alloc_etherdev(sizeof(*drvr_priv)); - if (!net) { - BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__)); - goto fail; -@@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen) - /* - * Save the brcmf_info into the priv - */ -- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv)); -+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv)); - - /* Set network interface name if it was provided as module parameter */ - if (iface_name[0]) { -@@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen) - /* - * Save the brcmf_info into the priv - */ -- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv)); -+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv)); - - #if defined(CONFIG_PM_SLEEP) - atomic_set(&brcmf_mmc_suspend, false); -diff --git a/drivers/staging/brcm80211/brcmfmac/sdio_host.h b/drivers/staging/brcm80211/brcmfmac/sdio_host.h -index d345472..cedb19e 100644 ---- a/drivers/staging/brcm80211/brcmfmac/sdio_host.h -+++ b/drivers/staging/brcm80211/brcmfmac/sdio_host.h -@@ -263,7 +263,7 @@ struct brcmf_sdioh_driver { - u16 func, uint bustype, u32 regsva, void *param); - /* detach from device */ - void (*detach) (void *ch); --}; -+} __no_const; - - struct sdioh_info; - -diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h -index a01b01c..b3f721c 100644 ---- a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h -+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h -@@ -591,7 +591,7 @@ struct phy_func_ptr { - initfn_t carrsuppr; - rxsigpwrfn_t rxsigpwr; - detachfn_t detach; --}; -+} __no_const; - - struct brcms_phy { - struct brcms_phy_pub pubpi_ro; -diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c -index 8fb3051..a8b6c67 100644 ---- a/drivers/staging/et131x/et1310_tx.c -+++ b/drivers/staging/et131x/et1310_tx.c -@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev, - struct net_device_stats *stats = &etdev->net_stats; - - if (tcb->flags & fMP_DEST_BROAD) -- atomic_inc(&etdev->stats.brdcstxmt); -+ atomic_inc_unchecked(&etdev->stats.brdcstxmt); - else if (tcb->flags & fMP_DEST_MULTI) -- atomic_inc(&etdev->stats.multixmt); -+ atomic_inc_unchecked(&etdev->stats.multixmt); - else -- atomic_inc(&etdev->stats.unixmt); -+ atomic_inc_unchecked(&etdev->stats.unixmt); - - if (tcb->skb) { - stats->tx_bytes += tcb->skb->len; -diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h -index 408c50b..fd65e9f 100644 ---- a/drivers/staging/et131x/et131x_adapter.h -+++ b/drivers/staging/et131x/et131x_adapter.h -@@ -106,11 +106,11 @@ struct ce_stats { - * operations - */ - u32 unircv; /* # multicast packets received */ -- atomic_t unixmt; /* # multicast packets for Tx */ -+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */ - u32 multircv; /* # multicast packets received */ -- atomic_t multixmt; /* # multicast packets for Tx */ -+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */ - u32 brdcstrcv; /* # broadcast packets received */ -- atomic_t brdcstxmt; /* # broadcast packets for Tx */ -+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */ - u32 norcvbuf; /* # Rx packets discarded */ - u32 noxmtbuf; /* # Tx packets discarded */ - -diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c -index 455f47a..86205ff 100644 ---- a/drivers/staging/hv/channel.c -+++ b/drivers/staging/hv/channel.c -@@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, - int ret = 0; - int t; - -- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle); -- atomic_inc(&vmbus_connection.next_gpadl_handle); -+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle); -+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle); - - ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount); - if (ret) -diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c -index 824f816..a800af7 100644 ---- a/drivers/staging/hv/hv.c -+++ b/drivers/staging/hv/hv.c -@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output) - u64 output_address = (output) ? virt_to_phys(output) : 0; - u32 output_address_hi = output_address >> 32; - u32 output_address_lo = output_address & 0xFFFFFFFF; -- volatile void *hypercall_page = hv_context.hypercall_page; -+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page); - - __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi), - "=a"(hv_status_lo) : "d" (control_hi), -diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/staging/hv/hv_mouse.c -index d957fc2..43cedd9 100644 ---- a/drivers/staging/hv/hv_mouse.c -+++ b/drivers/staging/hv/hv_mouse.c -@@ -878,8 +878,10 @@ static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len) - if (hid_dev) { - DPRINT_INFO(INPUTVSC_DRV, "hid_device created"); - -- hid_dev->ll_driver->open = mousevsc_hid_open; -- hid_dev->ll_driver->close = mousevsc_hid_close; -+ pax_open_kernel(); -+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open; -+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close; -+ pax_close_kernel(); - - hid_dev->bus = BUS_VIRTUAL; - hid_dev->vendor = input_device_ctx->device_info.vendor; -diff --git a/drivers/staging/hv/hyperv_vmbus.h b/drivers/staging/hv/hyperv_vmbus.h -index 349ad80..3f75719 100644 ---- a/drivers/staging/hv/hyperv_vmbus.h -+++ b/drivers/staging/hv/hyperv_vmbus.h -@@ -559,7 +559,7 @@ enum vmbus_connect_state { - struct vmbus_connection { - enum vmbus_connect_state conn_state; - -- atomic_t next_gpadl_handle; -+ atomic_unchecked_t next_gpadl_handle; - - /* - * Represents channel interrupts. Each bit position represents a -diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c -index dbb5201..d6047c6 100644 ---- a/drivers/staging/hv/rndis_filter.c -+++ b/drivers/staging/hv/rndis_filter.c -@@ -43,7 +43,7 @@ struct rndis_device { - - enum rndis_device_state state; - u32 link_stat; -- atomic_t new_req_id; -+ atomic_unchecked_t new_req_id; - - spinlock_t request_lock; - struct list_head req_list; -@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev, - * template - */ - set = &rndis_msg->msg.set_req; -- set->req_id = atomic_inc_return(&dev->new_req_id); -+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id); - - /* Add to the request list */ - spin_lock_irqsave(&dev->request_lock, flags); -@@ -622,7 +622,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev) - - /* Setup the rndis set */ - halt = &request->request_msg.msg.halt_req; -- halt->req_id = atomic_inc_return(&dev->new_req_id); -+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id); - - /* Ignore return since this msg is optional. */ - rndis_filter_send_request(dev, request); -diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c -index 1c949f5..7a8b104 100644 ---- a/drivers/staging/hv/vmbus_drv.c -+++ b/drivers/staging/hv/vmbus_drv.c -@@ -660,11 +660,11 @@ int vmbus_child_device_register(struct hv_device *child_device_obj) - { - int ret = 0; - -- static atomic_t device_num = ATOMIC_INIT(0); -+ static atomic_unchecked_t device_num = ATOMIC_INIT(0); - - /* Set the device name. Otherwise, device_register() will fail. */ - dev_set_name(&child_device_obj->device, "vmbus_0_%d", -- atomic_inc_return(&device_num)); -+ atomic_inc_return_unchecked(&device_num)); - - /* The new device belongs to this bus */ - child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */ -diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h -index 3f26f71..fb5c787 100644 ---- a/drivers/staging/iio/ring_generic.h -+++ b/drivers/staging/iio/ring_generic.h -@@ -62,7 +62,7 @@ struct iio_ring_access_funcs { - - int (*is_enabled)(struct iio_ring_buffer *ring); - int (*enable)(struct iio_ring_buffer *ring); --}; -+} __no_const; - - struct iio_ring_setup_ops { - int (*preenable)(struct iio_dev *); -diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c -index cfec92d..a65dacf 100644 ---- a/drivers/staging/mei/interface.c -+++ b/drivers/staging/mei/interface.c -@@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl) - mei_hdr->reserved = 0; - - mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1]; -- memset(mei_flow_control, 0, sizeof(mei_flow_control)); -+ memset(mei_flow_control, 0, sizeof(*mei_flow_control)); - mei_flow_control->host_addr = cl->host_client_id; - mei_flow_control->me_addr = cl->me_client_id; - mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD; -@@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl) - - mei_cli_disconnect = - (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1]; -- memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect)); -+ memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect)); - mei_cli_disconnect->host_addr = cl->host_client_id; - mei_cli_disconnect->me_addr = cl->me_client_id; - mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD; -diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c -index 8b307b4..a97ac91 100644 ---- a/drivers/staging/octeon/ethernet-rx.c -+++ b/drivers/staging/octeon/ethernet-rx.c -@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) - /* Increment RX stats for virtual ports */ - if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { - #ifdef CONFIG_64BIT -- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); -- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); -+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets); -+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes); - #else -- atomic_add(1, (atomic_t *)&priv->stats.rx_packets); -- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); -+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets); -+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes); - #endif - } - netif_receive_skb(skb); -@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) - dev->name); - */ - #ifdef CONFIG_64BIT -- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); -+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped); - #else -- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); -+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped); - #endif - dev_kfree_skb_irq(skb); - } -diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c -index a8f780e..aef1098 100644 ---- a/drivers/staging/octeon/ethernet.c -+++ b/drivers/staging/octeon/ethernet.c -@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) - * since the RX tasklet also increments it. - */ - #ifdef CONFIG_64BIT -- atomic64_add(rx_status.dropped_packets, -- (atomic64_t *)&priv->stats.rx_dropped); -+ atomic64_add_unchecked(rx_status.dropped_packets, -+ (atomic64_unchecked_t *)&priv->stats.rx_dropped); - #else -- atomic_add(rx_status.dropped_packets, -- (atomic_t *)&priv->stats.rx_dropped); -+ atomic_add_unchecked(rx_status.dropped_packets, -+ (atomic_unchecked_t *)&priv->stats.rx_dropped); - #endif - } - -diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c -index f3c6060..56bf826 100644 ---- a/drivers/staging/pohmelfs/inode.c -+++ b/drivers/staging/pohmelfs/inode.c -@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) - mutex_init(&psb->mcache_lock); - psb->mcache_root = RB_ROOT; - psb->mcache_timeout = msecs_to_jiffies(5000); -- atomic_long_set(&psb->mcache_gen, 0); -+ atomic_long_set_unchecked(&psb->mcache_gen, 0); - - psb->trans_max_pages = 100; - -@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) - INIT_LIST_HEAD(&psb->crypto_ready_list); - INIT_LIST_HEAD(&psb->crypto_active_list); - -- atomic_set(&psb->trans_gen, 1); -+ atomic_set_unchecked(&psb->trans_gen, 1); - atomic_long_set(&psb->total_inodes, 0); - - mutex_init(&psb->state_lock); -diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c -index e22665c..a2a9390 100644 ---- a/drivers/staging/pohmelfs/mcache.c -+++ b/drivers/staging/pohmelfs/mcache.c -@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start - m->data = data; - m->start = start; - m->size = size; -- m->gen = atomic_long_inc_return(&psb->mcache_gen); -+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen); - - mutex_lock(&psb->mcache_lock); - err = pohmelfs_mcache_insert(psb, m); -diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h -index 985b6b7..7699e05 100644 ---- a/drivers/staging/pohmelfs/netfs.h -+++ b/drivers/staging/pohmelfs/netfs.h -@@ -571,14 +571,14 @@ struct pohmelfs_config; - struct pohmelfs_sb { - struct rb_root mcache_root; - struct mutex mcache_lock; -- atomic_long_t mcache_gen; -+ atomic_long_unchecked_t mcache_gen; - unsigned long mcache_timeout; - - unsigned int idx; - - unsigned int trans_retries; - -- atomic_t trans_gen; -+ atomic_unchecked_t trans_gen; - - unsigned int crypto_attached_size; - unsigned int crypto_align_size; -diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c -index 36a2535..0591bf4 100644 ---- a/drivers/staging/pohmelfs/trans.c -+++ b/drivers/staging/pohmelfs/trans.c -@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb) - int err; - struct netfs_cmd *cmd = t->iovec.iov_base; - -- t->gen = atomic_inc_return(&psb->trans_gen); -+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen); - - cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) + - t->attached_size + t->attached_pages * sizeof(struct netfs_cmd); -diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h -index b70cb2b..4db41a7 100644 ---- a/drivers/staging/rtl8712/rtl871x_io.h -+++ b/drivers/staging/rtl8712/rtl871x_io.h -@@ -83,7 +83,7 @@ struct _io_ops { - u8 *pmem); - u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, - u8 *pmem); --}; -+} __no_const; - - struct io_req { - struct list_head list; -diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c -index c7b5e8b..783d6cb 100644 ---- a/drivers/staging/sbe-2t3e3/netdev.c -+++ b/drivers/staging/sbe-2t3e3/netdev.c -@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) - t3e3_if_config(sc, cmd_2t3e3, (char *)¶m, &resp, &rlen); - - if (rlen) -- if (copy_to_user(data, &resp, rlen)) -+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen)) - return -EFAULT; - - return 0; -diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h -index be21617..0954e45 100644 ---- a/drivers/staging/usbip/usbip_common.h -+++ b/drivers/staging/usbip/usbip_common.h -@@ -289,7 +289,7 @@ struct usbip_device { - void (*shutdown)(struct usbip_device *); - void (*reset)(struct usbip_device *); - void (*unusable)(struct usbip_device *); -- } eh_ops; -+ } __no_const eh_ops; - }; - - #if 0 -diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h -index 71a586e..4d8a91a 100644 ---- a/drivers/staging/usbip/vhci.h -+++ b/drivers/staging/usbip/vhci.h -@@ -85,7 +85,7 @@ struct vhci_hcd { - unsigned resuming:1; - unsigned long re_timeout; - -- atomic_t seqnum; -+ atomic_unchecked_t seqnum; - - /* - * NOTE: -diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c -index 2ee97e2..0420b86 100644 ---- a/drivers/staging/usbip/vhci_hcd.c -+++ b/drivers/staging/usbip/vhci_hcd.c -@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb) - return; - } - -- priv->seqnum = atomic_inc_return(&the_controller->seqnum); -+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum); - if (priv->seqnum == 0xffff) - dev_info(&urb->dev->dev, "seqnum max\n"); - -@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) - return -ENOMEM; - } - -- unlink->seqnum = atomic_inc_return(&the_controller->seqnum); -+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum); - if (unlink->seqnum == 0xffff) - pr_info("seqnum max\n"); - -@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd) - vdev->rhport = rhport; - } - -- atomic_set(&vhci->seqnum, 0); -+ atomic_set_unchecked(&vhci->seqnum, 0); - spin_lock_init(&vhci->lock); - - hcd->power_budget = 0; /* no limit */ -diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c -index 3872b8c..fe6d2f4 100644 ---- a/drivers/staging/usbip/vhci_rx.c -+++ b/drivers/staging/usbip/vhci_rx.c -@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, - if (!urb) { - pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); - pr_info("max seqnum %d\n", -- atomic_read(&the_controller->seqnum)); -+ atomic_read_unchecked(&the_controller->seqnum)); - usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); - return; - } -diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c -index 7735027..30eed13 100644 ---- a/drivers/staging/vt6655/hostap.c -+++ b/drivers/staging/vt6655/hostap.c -@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO; - * - */ - -+static net_device_ops_no_const apdev_netdev_ops; -+ - static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) - { - PSDevice apdev_priv; - struct net_device *dev = pDevice->dev; - int ret; -- const struct net_device_ops apdev_netdev_ops = { -- .ndo_start_xmit = pDevice->tx_80211, -- }; - - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name); - -@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) - *apdev_priv = *pDevice; - memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN); - -+ /* only half broken now */ -+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211; - pDevice->apdev->netdev_ops = &apdev_netdev_ops; - - pDevice->apdev->type = ARPHRD_IEEE80211; -diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c -index 51b5adf..098e320 100644 ---- a/drivers/staging/vt6656/hostap.c -+++ b/drivers/staging/vt6656/hostap.c -@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO; - * - */ - -+static net_device_ops_no_const apdev_netdev_ops; -+ - static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) - { - PSDevice apdev_priv; - struct net_device *dev = pDevice->dev; - int ret; -- const struct net_device_ops apdev_netdev_ops = { -- .ndo_start_xmit = pDevice->tx_80211, -- }; - - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name); - -@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) - *apdev_priv = *pDevice; - memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN); - -+ /* only half broken now */ -+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211; - pDevice->apdev->netdev_ops = &apdev_netdev_ops; - - pDevice->apdev->type = ARPHRD_IEEE80211; -diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c -index 7843dfd..3db105f 100644 ---- a/drivers/staging/wlan-ng/hfa384x_usb.c -+++ b/drivers/staging/wlan-ng/hfa384x_usb.c -@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx); - - struct usbctlx_completor { - int (*complete) (struct usbctlx_completor *); --}; -+} __no_const; - - static int - hfa384x_usbctlx_complete_sync(hfa384x_t *hw, -diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c -index 1ca66ea..76f1343 100644 ---- a/drivers/staging/zcache/tmem.c -+++ b/drivers/staging/zcache/tmem.c -@@ -39,7 +39,7 @@ - * A tmem host implementation must use this function to register callbacks - * for memory allocation. - */ --static struct tmem_hostops tmem_hostops; -+static tmem_hostops_no_const tmem_hostops; - - static void tmem_objnode_tree_init(void); - -@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m) - * A tmem host implementation must use this function to register - * callbacks for a page-accessible memory (PAM) implementation - */ --static struct tmem_pamops tmem_pamops; -+static tmem_pamops_no_const tmem_pamops; - - void tmem_register_pamops(struct tmem_pamops *m) - { -diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h -index ed147c4..94fc3c6 100644 ---- a/drivers/staging/zcache/tmem.h -+++ b/drivers/staging/zcache/tmem.h -@@ -180,6 +180,7 @@ struct tmem_pamops { - void (*new_obj)(struct tmem_obj *); - int (*replace_in_obj)(void *, struct tmem_obj *); - }; -+typedef struct tmem_pamops __no_const tmem_pamops_no_const; - extern void tmem_register_pamops(struct tmem_pamops *m); - - /* memory allocation methods provided by the host implementation */ -@@ -189,6 +190,7 @@ struct tmem_hostops { - struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *); - void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *); - }; -+typedef struct tmem_hostops __no_const tmem_hostops_no_const; - extern void tmem_register_hostops(struct tmem_hostops *m); - - /* core tmem accessor functions */ -diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c -index 26a5d8b..74434f8 100644 ---- a/drivers/target/iscsi/iscsi_target.c -+++ b/drivers/target/iscsi/iscsi_target.c -@@ -1368,7 +1368,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) - * outstanding_r2ts reaches zero, go ahead and send the delayed - * TASK_ABORTED status. - */ -- if (atomic_read(&se_cmd->t_transport_aborted) != 0) { -+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) { - if (hdr->flags & ISCSI_FLAG_CMD_FINAL) - if (--cmd->outstanding_r2ts < 1) { - iscsit_stop_dataout_timer(cmd); -diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c -index 8badcb4..94c9ac6 100644 ---- a/drivers/target/target_core_alua.c -+++ b/drivers/target/target_core_alua.c -@@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_metadata( - char path[ALUA_METADATA_PATH_LEN]; - int len; - -+ pax_track_stack(); -+ - memset(path, 0, ALUA_METADATA_PATH_LEN); - - len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, -@@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondary_metadata( - char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; - int len; - -+ pax_track_stack(); -+ - memset(path, 0, ALUA_METADATA_PATH_LEN); - memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); - -diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c -index f04d4ef..7de212b 100644 ---- a/drivers/target/target_core_cdb.c -+++ b/drivers/target/target_core_cdb.c -@@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) - int length = 0; - unsigned char buf[SE_MODE_PAGE_BUF]; - -+ pax_track_stack(); -+ - memset(buf, 0, SE_MODE_PAGE_BUF); - - switch (cdb[2] & 0x3f) { -diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c -index b2575d8..b6b28fd 100644 ---- a/drivers/target/target_core_configfs.c -+++ b/drivers/target/target_core_configfs.c -@@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( - ssize_t len = 0; - int reg_count = 0, prf_isid; - -+ pax_track_stack(); -+ - if (!su_dev->se_dev_ptr) - return -ENODEV; - -diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c -index 7fd3a16..bc2fb3e 100644 ---- a/drivers/target/target_core_pr.c -+++ b/drivers/target/target_core_pr.c -@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_registration( - unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; - u16 tpgt; - -+ pax_track_stack(); -+ - memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN); - memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN); - /* -@@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf( - ssize_t len = 0; - int reg_count = 0; - -+ pax_track_stack(); -+ - memset(buf, 0, pr_aptpl_buf_len); - /* - * Called to clear metadata once APTPL has been deactivated. -@@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_file( - char path[512]; - int ret; - -+ pax_track_stack(); -+ - memset(iov, 0, sizeof(struct iovec)); - memset(path, 0, 512); - -diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c -index 5c1b8c5..0cb7d0e 100644 ---- a/drivers/target/target_core_tmr.c -+++ b/drivers/target/target_core_tmr.c -@@ -255,7 +255,7 @@ static void core_tmr_drain_task_list( - cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, - cmd->t_task_list_num, - atomic_read(&cmd->t_task_cdbs_left), -- atomic_read(&cmd->t_task_cdbs_sent), -+ atomic_read_unchecked(&cmd->t_task_cdbs_sent), - atomic_read(&cmd->t_transport_active), - atomic_read(&cmd->t_transport_stop), - atomic_read(&cmd->t_transport_sent)); -@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list( - pr_debug("LUN_RESET: got t_transport_active = 1 for" - " task: %p, t_fe_count: %d dev: %p\n", task, - fe_count, dev); -- atomic_set(&cmd->t_transport_aborted, 1); -+ atomic_set_unchecked(&cmd->t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); -@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list( - } - pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," - " t_fe_count: %d dev: %p\n", task, fe_count, dev); -- atomic_set(&cmd->t_transport_aborted, 1); -+ atomic_set_unchecked(&cmd->t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); -diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c -index 013c100..8fd2e57 100644 ---- a/drivers/target/target_core_transport.c -+++ b/drivers/target/target_core_transport.c -@@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_to_core_hba( - - dev->queue_depth = dev_limits->queue_depth; - atomic_set(&dev->depth_left, dev->queue_depth); -- atomic_set(&dev->dev_ordered_id, 0); -+ atomic_set_unchecked(&dev->dev_ordered_id, 0); - - se_dev_set_default_attribs(dev, dev_limits); - -@@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) - * Used to determine when ORDERED commands should go from - * Dormant to Active status. - */ -- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); -+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id); - smp_mb__after_atomic_inc(); - pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", - cmd->se_ordered_id, cmd->sam_task_attr, -@@ -1960,7 +1960,7 @@ static void transport_generic_request_failure( - " t_transport_active: %d t_transport_stop: %d" - " t_transport_sent: %d\n", cmd->t_task_list_num, - atomic_read(&cmd->t_task_cdbs_left), -- atomic_read(&cmd->t_task_cdbs_sent), -+ atomic_read_unchecked(&cmd->t_task_cdbs_sent), - atomic_read(&cmd->t_task_cdbs_ex_left), - atomic_read(&cmd->t_transport_active), - atomic_read(&cmd->t_transport_stop), -@@ -2460,9 +2460,9 @@ check_depth: - spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_set(&task->task_active, 1); - atomic_set(&task->task_sent, 1); -- atomic_inc(&cmd->t_task_cdbs_sent); -+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent); - -- if (atomic_read(&cmd->t_task_cdbs_sent) == -+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) == - cmd->t_task_list_num) - atomic_set(&cmd->transport_sent, 1); - -@@ -4665,7 +4665,7 @@ static void transport_generic_wait_for_tasks( - atomic_set(&cmd->transport_lun_stop, 0); - } - if (!atomic_read(&cmd->t_transport_active) || -- atomic_read(&cmd->t_transport_aborted)) -+ atomic_read_unchecked(&cmd->t_transport_aborted)) - goto remove; - - atomic_set(&cmd->t_transport_stop, 1); -@@ -4900,7 +4900,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) - { - int ret = 0; - -- if (atomic_read(&cmd->t_transport_aborted) != 0) { -+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) { - if (!send_status || - (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) - return 1; -@@ -4937,7 +4937,7 @@ void transport_send_task_abort(struct se_cmd *cmd) - */ - if (cmd->data_direction == DMA_TO_DEVICE) { - if (cmd->se_tfo->write_pending_status(cmd) != 0) { -- atomic_inc(&cmd->t_transport_aborted); -+ atomic_inc_unchecked(&cmd->t_transport_aborted); - smp_mb__after_atomic_inc(); - cmd->scsi_status = SAM_STAT_TASK_ABORTED; - transport_new_cmd_failure(cmd); -@@ -5051,7 +5051,7 @@ static void transport_processing_shutdown(struct se_device *dev) - cmd->se_tfo->get_task_tag(cmd), - cmd->t_task_list_num, - atomic_read(&cmd->t_task_cdbs_left), -- atomic_read(&cmd->t_task_cdbs_sent), -+ atomic_read_unchecked(&cmd->t_task_cdbs_sent), - atomic_read(&cmd->t_transport_active), - atomic_read(&cmd->t_transport_stop), - atomic_read(&cmd->t_transport_sent)); -diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c -index d5f923b..9c78228 100644 ---- a/drivers/telephony/ixj.c -+++ b/drivers/telephony/ixj.c -@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j) - bool mContinue; - char *pIn, *pOut; - -+ pax_track_stack(); -+ - if (!SCI_Prepare(j)) - return 0; - -diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c -index 4c8b665..1d931eb 100644 ---- a/drivers/tty/hvc/hvcs.c -+++ b/drivers/tty/hvc/hvcs.c -@@ -83,6 +83,7 @@ - #include <asm/hvcserver.h> - #include <asm/uaccess.h> - #include <asm/vio.h> -+#include <asm/local.h> - - /* - * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00). -@@ -270,7 +271,7 @@ struct hvcs_struct { - unsigned int index; - - struct tty_struct *tty; -- int open_count; -+ local_t open_count; - - /* - * Used to tell the driver kernel_thread what operations need to take -@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut - - spin_lock_irqsave(&hvcsd->lock, flags); - -- if (hvcsd->open_count > 0) { -+ if (local_read(&hvcsd->open_count) > 0) { - spin_unlock_irqrestore(&hvcsd->lock, flags); - printk(KERN_INFO "HVCS: vterm state unchanged. " - "The hvcs device node is still in use.\n"); -@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp) - if ((retval = hvcs_partner_connect(hvcsd))) - goto error_release; - -- hvcsd->open_count = 1; -+ local_set(&hvcsd->open_count, 1); - hvcsd->tty = tty; - tty->driver_data = hvcsd; - -@@ -1179,7 +1180,7 @@ fast_open: - - spin_lock_irqsave(&hvcsd->lock, flags); - kref_get(&hvcsd->kref); -- hvcsd->open_count++; -+ local_inc(&hvcsd->open_count); - hvcsd->todo_mask |= HVCS_SCHED_READ; - spin_unlock_irqrestore(&hvcsd->lock, flags); - -@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) - hvcsd = tty->driver_data; - - spin_lock_irqsave(&hvcsd->lock, flags); -- if (--hvcsd->open_count == 0) { -+ if (local_dec_and_test(&hvcsd->open_count)) { - - vio_disable_interrupts(hvcsd->vdev); - -@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) - free_irq(irq, hvcsd); - kref_put(&hvcsd->kref, destroy_hvcs_struct); - return; -- } else if (hvcsd->open_count < 0) { -+ } else if (local_read(&hvcsd->open_count) < 0) { - printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" - " is missmanaged.\n", -- hvcsd->vdev->unit_address, hvcsd->open_count); -+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count)); - } - - spin_unlock_irqrestore(&hvcsd->lock, flags); -@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty) - - spin_lock_irqsave(&hvcsd->lock, flags); - /* Preserve this so that we know how many kref refs to put */ -- temp_open_count = hvcsd->open_count; -+ temp_open_count = local_read(&hvcsd->open_count); - - /* - * Don't kref put inside the spinlock because the destruction -@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty) - hvcsd->tty->driver_data = NULL; - hvcsd->tty = NULL; - -- hvcsd->open_count = 0; -+ local_set(&hvcsd->open_count, 0); - - /* This will drop any buffered data on the floor which is OK in a hangup - * scenario. */ -@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty, - * the middle of a write operation? This is a crummy place to do this - * but we want to keep it all in the spinlock. - */ -- if (hvcsd->open_count <= 0) { -+ if (local_read(&hvcsd->open_count) <= 0) { - spin_unlock_irqrestore(&hvcsd->lock, flags); - return -ENODEV; - } -@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty) - { - struct hvcs_struct *hvcsd = tty->driver_data; - -- if (!hvcsd || hvcsd->open_count <= 0) -+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0) - return 0; - - return HVCS_BUFF_LEN - hvcsd->chars_in_buffer; -diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c -index ef92869..f4ebd88 100644 ---- a/drivers/tty/ipwireless/tty.c -+++ b/drivers/tty/ipwireless/tty.c -@@ -29,6 +29,7 @@ - #include <linux/tty_driver.h> - #include <linux/tty_flip.h> - #include <linux/uaccess.h> -+#include <asm/local.h> - - #include "tty.h" - #include "network.h" -@@ -51,7 +52,7 @@ struct ipw_tty { - int tty_type; - struct ipw_network *network; - struct tty_struct *linux_tty; -- int open_count; -+ local_t open_count; - unsigned int control_lines; - struct mutex ipw_tty_mutex; - int tx_bytes_queued; -@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp) - mutex_unlock(&tty->ipw_tty_mutex); - return -ENODEV; - } -- if (tty->open_count == 0) -+ if (local_read(&tty->open_count) == 0) - tty->tx_bytes_queued = 0; - -- tty->open_count++; -+ local_inc(&tty->open_count); - - tty->linux_tty = linux_tty; - linux_tty->driver_data = tty; -@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp) - - static void do_ipw_close(struct ipw_tty *tty) - { -- tty->open_count--; -- -- if (tty->open_count == 0) { -+ if (local_dec_return(&tty->open_count) == 0) { - struct tty_struct *linux_tty = tty->linux_tty; - - if (linux_tty != NULL) { -@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty) - return; - - mutex_lock(&tty->ipw_tty_mutex); -- if (tty->open_count == 0) { -+ if (local_read(&tty->open_count) == 0) { - mutex_unlock(&tty->ipw_tty_mutex); - return; - } -@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, - return; - } - -- if (!tty->open_count) { -+ if (!local_read(&tty->open_count)) { - mutex_unlock(&tty->ipw_tty_mutex); - return; - } -@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty, - return -ENODEV; - - mutex_lock(&tty->ipw_tty_mutex); -- if (!tty->open_count) { -+ if (!local_read(&tty->open_count)) { - mutex_unlock(&tty->ipw_tty_mutex); - return -EINVAL; - } -@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty) - if (!tty) - return -ENODEV; - -- if (!tty->open_count) -+ if (!local_read(&tty->open_count)) - return -EINVAL; - - room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued; -@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty) - if (!tty) - return 0; - -- if (!tty->open_count) -+ if (!local_read(&tty->open_count)) - return 0; - - return tty->tx_bytes_queued; -@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty) - if (!tty) - return -ENODEV; - -- if (!tty->open_count) -+ if (!local_read(&tty->open_count)) - return -EINVAL; - - return get_control_lines(tty); -@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, - if (!tty) - return -ENODEV; - -- if (!tty->open_count) -+ if (!local_read(&tty->open_count)) - return -EINVAL; - - return set_control_lines(tty, set, clear); -@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, - if (!tty) - return -ENODEV; - -- if (!tty->open_count) -+ if (!local_read(&tty->open_count)) - return -EINVAL; - - /* FIXME: Exactly how is the tty object locked here .. */ -@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty) - against a parallel ioctl etc */ - mutex_lock(&ttyj->ipw_tty_mutex); - } -- while (ttyj->open_count) -+ while (local_read(&ttyj->open_count)) - do_ipw_close(ttyj); - ipwireless_disassociate_network_ttys(network, - ttyj->channel_idx); -diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c -index 8a50e4e..7d9ca3d 100644 ---- a/drivers/tty/n_gsm.c -+++ b/drivers/tty/n_gsm.c -@@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) - kref_init(&dlci->ref); - mutex_init(&dlci->mutex); - dlci->fifo = &dlci->_fifo; -- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) { -+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) { - kfree(dlci); - return NULL; - } -diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c -index 39d6ab6..eb97f41 100644 ---- a/drivers/tty/n_tty.c -+++ b/drivers/tty/n_tty.c -@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) - { - *ops = tty_ldisc_N_TTY; - ops->owner = NULL; -- ops->refcount = ops->flags = 0; -+ atomic_set(&ops->refcount, 0); -+ ops->flags = 0; - } - EXPORT_SYMBOL_GPL(n_tty_inherit_ops); -diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c -index e18604b..a7d5a11 100644 ---- a/drivers/tty/pty.c -+++ b/drivers/tty/pty.c -@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void) - register_sysctl_table(pty_root_table); - - /* Now create the /dev/ptmx special device */ -+ pax_open_kernel(); - tty_default_fops(&ptmx_fops); -- ptmx_fops.open = ptmx_open; -+ *(void **)&ptmx_fops.open = ptmx_open; -+ pax_close_kernel(); - - cdev_init(&ptmx_cdev, &ptmx_fops); - if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) || -diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c -index 6a1241c..d04ab0d 100644 ---- a/drivers/tty/rocket.c -+++ b/drivers/tty/rocket.c -@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports) - struct rocket_ports tmp; - int board; - -+ pax_track_stack(); -+ - if (!retports) - return -EFAULT; - memset(&tmp, 0, sizeof (tmp)); -diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c -index 87e7e6c..89744e0 100644 ---- a/drivers/tty/serial/kgdboc.c -+++ b/drivers/tty/serial/kgdboc.c -@@ -23,8 +23,9 @@ - #define MAX_CONFIG_LEN 40 - - static struct kgdb_io kgdboc_io_ops; -+static struct kgdb_io kgdboc_io_ops_console; - --/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ -+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */ - static int configured = -1; - - static char config[MAX_CONFIG_LEN]; -@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void) - kgdboc_unregister_kbd(); - if (configured == 1) - kgdb_unregister_io_module(&kgdboc_io_ops); -+ else if (configured == 2) -+ kgdb_unregister_io_module(&kgdboc_io_ops_console); - } - - static int configure_kgdboc(void) -@@ -156,13 +159,13 @@ static int configure_kgdboc(void) - int err; - char *cptr = config; - struct console *cons; -+ int is_console = 0; - - err = kgdboc_option_setup(config); - if (err || !strlen(config) || isspace(config[0])) - goto noconfig; - - err = -ENODEV; -- kgdboc_io_ops.is_console = 0; - kgdb_tty_driver = NULL; - - kgdboc_use_kms = 0; -@@ -183,7 +186,7 @@ static int configure_kgdboc(void) - int idx; - if (cons->device && cons->device(cons, &idx) == p && - idx == tty_line) { -- kgdboc_io_ops.is_console = 1; -+ is_console = 1; - break; - } - cons = cons->next; -@@ -193,12 +196,16 @@ static int configure_kgdboc(void) - kgdb_tty_line = tty_line; - - do_register: -- err = kgdb_register_io_module(&kgdboc_io_ops); -+ if (is_console) { -+ err = kgdb_register_io_module(&kgdboc_io_ops_console); -+ configured = 2; -+ } else { -+ err = kgdb_register_io_module(&kgdboc_io_ops); -+ configured = 1; -+ } - if (err) - goto noconfig; - -- configured = 1; -- - return 0; - - noconfig: -@@ -212,7 +219,7 @@ noconfig: - static int __init init_kgdboc(void) - { - /* Already configured? */ -- if (configured == 1) -+ if (configured >= 1) - return 0; - - return configure_kgdboc(); -@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp) - if (config[len - 1] == '\n') - config[len - 1] = '\0'; - -- if (configured == 1) -+ if (configured >= 1) - cleanup_kgdboc(); - - /* Go and configure with the new params. */ -@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = { - .post_exception = kgdboc_post_exp_handler, - }; - -+static struct kgdb_io kgdboc_io_ops_console = { -+ .name = "kgdboc", -+ .read_char = kgdboc_get_char, -+ .write_char = kgdboc_put_char, -+ .pre_exception = kgdboc_pre_exp_handler, -+ .post_exception = kgdboc_post_exp_handler, -+ .is_console = 1 -+}; -+ - #ifdef CONFIG_KGDB_SERIAL_CONSOLE - /* This is only available if kgdboc is a built in for early debugging */ - static int __init kgdboc_early_init(char *opt) -diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c -index cab52f4..29fc6aa 100644 ---- a/drivers/tty/serial/mfd.c -+++ b/drivers/tty/serial/mfd.c -@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci_dev *pdev) - } - - /* First 3 are UART ports, and the 4th is the DMA */ --static const struct pci_device_id pci_ids[] __devinitdata = { -+static const struct pci_device_id pci_ids[] __devinitconst = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) }, -diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c -index 23bc743..d425c07 100644 ---- a/drivers/tty/serial/mrst_max3110.c -+++ b/drivers/tty/serial/mrst_max3110.c -@@ -393,6 +393,8 @@ static void max3110_con_receive(struct uart_max3110 *max) - int loop = 1, num, total = 0; - u8 recv_buf[512], *pbuf; - -+ pax_track_stack(); -+ - pbuf = recv_buf; - do { - num = max3110_read_multi(max, pbuf); -diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c -index 1a890e2..1d8139c 100644 ---- a/drivers/tty/tty_io.c -+++ b/drivers/tty/tty_io.c -@@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty); - - void tty_default_fops(struct file_operations *fops) - { -- *fops = tty_fops; -+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops)); - } - - /* -diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c -index a76c808..ecbc743 100644 ---- a/drivers/tty/tty_ldisc.c -+++ b/drivers/tty/tty_ldisc.c -@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld) - if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) { - struct tty_ldisc_ops *ldo = ld->ops; - -- ldo->refcount--; -+ atomic_dec(&ldo->refcount); - module_put(ldo->owner); - spin_unlock_irqrestore(&tty_ldisc_lock, flags); - -@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc) - spin_lock_irqsave(&tty_ldisc_lock, flags); - tty_ldiscs[disc] = new_ldisc; - new_ldisc->num = disc; -- new_ldisc->refcount = 0; -+ atomic_set(&new_ldisc->refcount, 0); - spin_unlock_irqrestore(&tty_ldisc_lock, flags); - - return ret; -@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc) - return -EINVAL; - - spin_lock_irqsave(&tty_ldisc_lock, flags); -- if (tty_ldiscs[disc]->refcount) -+ if (atomic_read(&tty_ldiscs[disc]->refcount)) - ret = -EBUSY; - else - tty_ldiscs[disc] = NULL; -@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc) - if (ldops) { - ret = ERR_PTR(-EAGAIN); - if (try_module_get(ldops->owner)) { -- ldops->refcount++; -+ atomic_inc(&ldops->refcount); - ret = ldops; - } - } -@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops) - unsigned long flags; - - spin_lock_irqsave(&tty_ldisc_lock, flags); -- ldops->refcount--; -+ atomic_dec(&ldops->refcount); - module_put(ldops->owner); - spin_unlock_irqrestore(&tty_ldisc_lock, flags); - } -diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c -index 3761ccf..2c613b3 100644 ---- a/drivers/tty/vt/keyboard.c -+++ b/drivers/tty/vt/keyboard.c -@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag) - kbd->kbdmode == VC_OFF) && - value != KVAL(K_SAK)) - return; /* SAK is allowed even in raw mode */ -+ -+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP) -+ { -+ void *func = fn_handler[value]; -+ if (func == fn_show_state || func == fn_show_ptregs || -+ func == fn_show_mem) -+ return; -+ } -+#endif -+ - fn_handler[value](vc); - } - -diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c -index b3915b7..e716839 100644 ---- a/drivers/tty/vt/vt.c -+++ b/drivers/tty/vt/vt.c -@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier); - - static void notify_write(struct vc_data *vc, unsigned int unicode) - { -- struct vt_notifier_param param = { .vc = vc, unicode = unicode }; -+ struct vt_notifier_param param = { .vc = vc, .c = unicode }; - atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, ¶m); - } - -diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c -index 5e096f4..0da1363 100644 ---- a/drivers/tty/vt/vt_ioctl.c -+++ b/drivers/tty/vt/vt_ioctl.c -@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str - if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) - return -EFAULT; - -- if (!capable(CAP_SYS_TTY_CONFIG)) -- perm = 0; -- - switch (cmd) { - case KDGKBENT: - key_map = key_maps[s]; -@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str - val = (i ? K_HOLE : K_NOSUCHMAP); - return put_user(val, &user_kbe->kb_value); - case KDSKBENT: -+ if (!capable(CAP_SYS_TTY_CONFIG)) -+ perm = 0; -+ - if (!perm) - return -EPERM; - if (!i && v == K_NOSUCHMAP) { -@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) - int i, j, k; - int ret; - -- if (!capable(CAP_SYS_TTY_CONFIG)) -- perm = 0; -- - kbs = kmalloc(sizeof(*kbs), GFP_KERNEL); - if (!kbs) { - ret = -ENOMEM; -@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) - kfree(kbs); - return ((p && *p) ? -EOVERFLOW : 0); - case KDSKBSENT: -+ if (!capable(CAP_SYS_TTY_CONFIG)) -+ perm = 0; -+ - if (!perm) { - ret = -EPERM; - goto reterr; -diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c -index d2efe82..9440ab6 100644 ---- a/drivers/uio/uio.c -+++ b/drivers/uio/uio.c -@@ -25,6 +25,7 @@ - #include <linux/kobject.h> - #include <linux/cdev.h> - #include <linux/uio_driver.h> -+#include <asm/local.h> - - #define UIO_MAX_DEVICES (1U << MINORBITS) - -@@ -32,10 +33,10 @@ struct uio_device { - struct module *owner; - struct device *dev; - int minor; -- atomic_t event; -+ atomic_unchecked_t event; - struct fasync_struct *async_queue; - wait_queue_head_t wait; -- int vma_count; -+ local_t vma_count; - struct uio_info *info; - struct kobject *map_dir; - struct kobject *portio_dir; -@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev, - struct device_attribute *attr, char *buf) - { - struct uio_device *idev = dev_get_drvdata(dev); -- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); -+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event)); - } - - static struct device_attribute uio_class_attributes[] = { -@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info) - { - struct uio_device *idev = info->uio_dev; - -- atomic_inc(&idev->event); -+ atomic_inc_unchecked(&idev->event); - wake_up_interruptible(&idev->wait); - kill_fasync(&idev->async_queue, SIGIO, POLL_IN); - } -@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep) - } - - listener->dev = idev; -- listener->event_count = atomic_read(&idev->event); -+ listener->event_count = atomic_read_unchecked(&idev->event); - filep->private_data = listener; - - if (idev->info->open) { -@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait) - return -EIO; - - poll_wait(filep, &idev->wait, wait); -- if (listener->event_count != atomic_read(&idev->event)) -+ if (listener->event_count != atomic_read_unchecked(&idev->event)) - return POLLIN | POLLRDNORM; - return 0; - } -@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf, - do { - set_current_state(TASK_INTERRUPTIBLE); - -- event_count = atomic_read(&idev->event); -+ event_count = atomic_read_unchecked(&idev->event); - if (event_count != listener->event_count) { - if (copy_to_user(buf, &event_count, count)) - retval = -EFAULT; -@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma) - static void uio_vma_open(struct vm_area_struct *vma) - { - struct uio_device *idev = vma->vm_private_data; -- idev->vma_count++; -+ local_inc(&idev->vma_count); - } - - static void uio_vma_close(struct vm_area_struct *vma) - { - struct uio_device *idev = vma->vm_private_data; -- idev->vma_count--; -+ local_dec(&idev->vma_count); - } - - static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -@@ -823,7 +824,7 @@ int __uio_register_device(struct module *owner, - idev->owner = owner; - idev->info = info; - init_waitqueue_head(&idev->wait); -- atomic_set(&idev->event, 0); -+ atomic_set_unchecked(&idev->event, 0); - - ret = uio_get_minor(idev); - if (ret) -diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c -index a845f8b..4f54072 100644 ---- a/drivers/usb/atm/cxacru.c -+++ b/drivers/usb/atm/cxacru.c -@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev, - ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp); - if (ret < 2) - return -EINVAL; -- if (index < 0 || index > 0x7f) -+ if (index > 0x7f) - return -EINVAL; - pos += tmp; - -diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c -index d3448ca..d2864ca 100644 ---- a/drivers/usb/atm/usbatm.c -+++ b/drivers/usb/atm/usbatm.c -@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char - if (printk_ratelimit()) - atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n", - __func__, vpi, vci); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - return; - } - -@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char - if (length > ATM_MAX_AAL5_PDU) { - atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n", - __func__, length, vcc); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - goto out; - } - -@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char - if (sarb->len < pdu_length) { - atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n", - __func__, pdu_length, sarb->len, vcc); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - goto out; - } - - if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { - atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", - __func__, vcc); -- atomic_inc(&vcc->stats->rx_err); -+ atomic_inc_unchecked(&vcc->stats->rx_err); - goto out; - } - -@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char - if (printk_ratelimit()) - atm_err(instance, "%s: no memory for skb (length: %u)!\n", - __func__, length); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - goto out; - } - -@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char - - vcc->push(vcc, skb); - -- atomic_inc(&vcc->stats->rx); -+ atomic_inc_unchecked(&vcc->stats->rx); - out: - skb_trim(sarb, 0); - } -@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data) - struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc; - - usbatm_pop(vcc, skb); -- atomic_inc(&vcc->stats->tx); -+ atomic_inc_unchecked(&vcc->stats->tx); - - skb = skb_dequeue(&instance->sndqueue); - } -@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag - if (!left--) - return sprintf(page, - "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n", -- atomic_read(&atm_dev->stats.aal5.tx), -- atomic_read(&atm_dev->stats.aal5.tx_err), -- atomic_read(&atm_dev->stats.aal5.rx), -- atomic_read(&atm_dev->stats.aal5.rx_err), -- atomic_read(&atm_dev->stats.aal5.rx_drop)); -+ atomic_read_unchecked(&atm_dev->stats.aal5.tx), -+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err), -+ atomic_read_unchecked(&atm_dev->stats.aal5.rx), -+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err), -+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop)); - - if (!left--) { - if (instance->disconnected) -diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c -index 0149c09..f108812 100644 ---- a/drivers/usb/core/devices.c -+++ b/drivers/usb/core/devices.c -@@ -126,7 +126,7 @@ static const char format_endpt[] = - * time it gets called. - */ - static struct device_connect_event { -- atomic_t count; -+ atomic_unchecked_t count; - wait_queue_head_t wait; - } device_event = { - .count = ATOMIC_INIT(1), -@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = { - - void usbfs_conn_disc_event(void) - { -- atomic_add(2, &device_event.count); -+ atomic_add_unchecked(2, &device_event.count); - wake_up(&device_event.wait); - } - -@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file, - - poll_wait(file, &device_event.wait, wait); - -- event_count = atomic_read(&device_event.count); -+ event_count = atomic_read_unchecked(&device_event.count); - if (file->f_version != event_count) { - file->f_version = event_count; - return POLLIN | POLLRDNORM; -diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c -index 0b5ec23..0da3d76 100644 ---- a/drivers/usb/core/message.c -+++ b/drivers/usb/core/message.c -@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index) - buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); - if (buf) { - len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); -- if (len > 0) { -- smallbuf = kmalloc(++len, GFP_NOIO); -+ if (len++ > 0) { -+ smallbuf = kmalloc(len, GFP_NOIO); - if (!smallbuf) - return buf; - memcpy(smallbuf, buf, len); -diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c -index 1fc8f12..20647c1 100644 ---- a/drivers/usb/early/ehci-dbgp.c -+++ b/drivers/usb/early/ehci-dbgp.c -@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len) - - #ifdef CONFIG_KGDB - static struct kgdb_io kgdbdbgp_io_ops; --#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops) -+static struct kgdb_io kgdbdbgp_io_ops_console; -+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console) - #else - #define dbgp_kgdb_mode (0) - #endif -@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = { - .write_char = kgdbdbgp_write_char, - }; - -+static struct kgdb_io kgdbdbgp_io_ops_console = { -+ .name = "kgdbdbgp", -+ .read_char = kgdbdbgp_read_char, -+ .write_char = kgdbdbgp_write_char, -+ .is_console = 1 -+}; -+ - static int kgdbdbgp_wait_time; - - static int __init kgdbdbgp_parse_config(char *str) -@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str) - ptr++; - kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10); - } -- kgdb_register_io_module(&kgdbdbgp_io_ops); -- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1; -+ if (early_dbgp_console.index != -1) -+ kgdb_register_io_module(&kgdbdbgp_io_ops_console); -+ else -+ kgdb_register_io_module(&kgdbdbgp_io_ops); - - return 0; - } -diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c -index d718033..6075579 100644 ---- a/drivers/usb/host/xhci-mem.c -+++ b/drivers/usb/host/xhci-mem.c -@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) - unsigned int num_tests; - int i, ret; - -+ pax_track_stack(); -+ - num_tests = ARRAY_SIZE(simple_test_vector); - for (i = 0; i < num_tests; i++) { - ret = xhci_test_trb_in_td(xhci, -diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h -index d6bea3e..60b250e 100644 ---- a/drivers/usb/wusbcore/wa-hc.h -+++ b/drivers/usb/wusbcore/wa-hc.h -@@ -192,7 +192,7 @@ struct wahc { - struct list_head xfer_delayed_list; - spinlock_t xfer_list_lock; - struct work_struct xfer_work; -- atomic_t xfer_id_count; -+ atomic_unchecked_t xfer_id_count; - }; - - -@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa) - INIT_LIST_HEAD(&wa->xfer_delayed_list); - spin_lock_init(&wa->xfer_list_lock); - INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run); -- atomic_set(&wa->xfer_id_count, 1); -+ atomic_set_unchecked(&wa->xfer_id_count, 1); - } - - /** -diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c -index 4193345..49ae93d 100644 ---- a/drivers/usb/wusbcore/wa-xfer.c -+++ b/drivers/usb/wusbcore/wa-xfer.c -@@ -295,7 +295,7 @@ out: - */ - static void wa_xfer_id_init(struct wa_xfer *xfer) - { -- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); -+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count); - } - - /* -diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c -index c14c42b..f955cc2 100644 ---- a/drivers/vhost/vhost.c -+++ b/drivers/vhost/vhost.c -@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) - return 0; - } - --static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) -+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp) - { - struct file *eventfp, *filep = NULL, - *pollstart = NULL, *pollstop = NULL; -diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c -index b0b2ac3..89a4399 100644 ---- a/drivers/video/aty/aty128fb.c -+++ b/drivers/video/aty/aty128fb.c -@@ -148,7 +148,7 @@ enum { - }; - - /* Must match above enum */ --static const char *r128_family[] __devinitdata = { -+static const char *r128_family[] __devinitconst = { - "AGP", - "PCI", - "PRO AGP", -diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c -index 5c3960d..15cf8fc 100644 ---- a/drivers/video/fbcmap.c -+++ b/drivers/video/fbcmap.c -@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) - rc = -ENODEV; - goto out; - } -- if (cmap->start < 0 || (!info->fbops->fb_setcolreg && -- !info->fbops->fb_setcmap)) { -+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) { - rc = -EINVAL; - goto out1; - } -diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c -index ad93629..ca6a218 100644 ---- a/drivers/video/fbmem.c -+++ b/drivers/video/fbmem.c -@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, - image->dx += image->width + 8; - } - } else if (rotate == FB_ROTATE_UD) { -- for (x = 0; x < num && image->dx >= 0; x++) { -+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) { - info->fbops->fb_imageblit(info, image); - image->dx -= image->width + 8; - } -@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, - image->dy += image->height + 8; - } - } else if (rotate == FB_ROTATE_CCW) { -- for (x = 0; x < num && image->dy >= 0; x++) { -+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) { - info->fbops->fb_imageblit(info, image); - image->dy -= image->height + 8; - } -@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) - int flags = info->flags; - int ret = 0; - -+ pax_track_stack(); -+ - if (var->activate & FB_ACTIVATE_INV_MODE) { - struct fb_videomode mode1, mode2; - -@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, - void __user *argp = (void __user *)arg; - long ret = 0; - -+ pax_track_stack(); -+ - switch (cmd) { - case FBIOGET_VSCREENINFO: - if (!lock_fb_info(info)) -@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, - return -EFAULT; - if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) - return -EINVAL; -- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX) -+ if (con2fb.framebuffer >= FB_MAX) - return -EINVAL; - if (!registered_fb[con2fb.framebuffer]) - request_module("fb%d", con2fb.framebuffer); -diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c -index 5a5d092..265c5ed 100644 ---- a/drivers/video/geode/gx1fb_core.c -+++ b/drivers/video/geode/gx1fb_core.c -@@ -29,7 +29,7 @@ static int crt_option = 1; - static char panel_option[32] = ""; - - /* Modes relevant to the GX1 (taken from modedb.c) */ --static const struct fb_videomode __devinitdata gx1_modedb[] = { -+static const struct fb_videomode __devinitconst gx1_modedb[] = { - /* 640x480-60 VESA */ - { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, - 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, -diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c -index 896e53d..4d87d0b 100644 ---- a/drivers/video/gxt4500.c -+++ b/drivers/video/gxt4500.c -@@ -156,7 +156,7 @@ struct gxt4500_par { - static char *mode_option; - - /* default mode: 1280x1024 @ 60 Hz, 8 bpp */ --static const struct fb_videomode defaultmode __devinitdata = { -+static const struct fb_videomode defaultmode __devinitconst = { - .refresh = 60, - .xres = 1280, - .yres = 1024, -@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info) - return 0; - } - --static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = { -+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = { - .id = "IBM GXT4500P", - .type = FB_TYPE_PACKED_PIXELS, - .visual = FB_VISUAL_PSEUDOCOLOR, -diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c -index 7672d2e..b56437f 100644 ---- a/drivers/video/i810/i810_accel.c -+++ b/drivers/video/i810/i810_accel.c -@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space) - } - } - printk("ringbuffer lockup!!!\n"); -+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space); - i810_report_error(mmio); - par->dev_flags |= LOCKUP; - info->pixmap.scan_align = 1; -diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c -index 318f6fb..9a389c1 100644 ---- a/drivers/video/i810/i810_main.c -+++ b/drivers/video/i810/i810_main.c -@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info); - static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par); - - /* PCI */ --static const char *i810_pci_list[] __devinitdata = { -+static const char *i810_pci_list[] __devinitconst = { - "Intel(R) 810 Framebuffer Device" , - "Intel(R) 810-DC100 Framebuffer Device" , - "Intel(R) 810E Framebuffer Device" , -diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c -index de36693..3c63fc2 100644 ---- a/drivers/video/jz4740_fb.c -+++ b/drivers/video/jz4740_fb.c -@@ -136,7 +136,7 @@ struct jzfb { - uint32_t pseudo_palette[16]; - }; - --static const struct fb_fix_screeninfo jzfb_fix __devinitdata = { -+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = { - .id = "JZ4740 FB", - .type = FB_TYPE_PACKED_PIXELS, - .visual = FB_VISUAL_TRUECOLOR, -diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm -index 3c14e43..eafa544 100644 ---- a/drivers/video/logo/logo_linux_clut224.ppm -+++ b/drivers/video/logo/logo_linux_clut224.ppm -@@ -1,1604 +1,1123 @@ - P3 --# Standard 224-color Linux logo - 80 80 - 255 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 6 6 6 6 6 6 10 10 10 10 10 10 -- 10 10 10 6 6 6 6 6 6 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 6 6 6 10 10 10 14 14 14 -- 22 22 22 26 26 26 30 30 30 34 34 34 -- 30 30 30 30 30 30 26 26 26 18 18 18 -- 14 14 14 10 10 10 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 1 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 6 6 6 14 14 14 26 26 26 42 42 42 -- 54 54 54 66 66 66 78 78 78 78 78 78 -- 78 78 78 74 74 74 66 66 66 54 54 54 -- 42 42 42 26 26 26 18 18 18 10 10 10 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 1 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 22 22 22 42 42 42 66 66 66 86 86 86 -- 66 66 66 38 38 38 38 38 38 22 22 22 -- 26 26 26 34 34 34 54 54 54 66 66 66 -- 86 86 86 70 70 70 46 46 46 26 26 26 -- 14 14 14 6 6 6 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 1 0 0 1 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 10 10 10 26 26 26 -- 50 50 50 82 82 82 58 58 58 6 6 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 6 6 6 54 54 54 86 86 86 66 66 66 -- 38 38 38 18 18 18 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 6 6 6 22 22 22 50 50 50 -- 78 78 78 34 34 34 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 6 6 6 70 70 70 -- 78 78 78 46 46 46 22 22 22 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 1 0 0 1 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 6 6 6 18 18 18 42 42 42 82 82 82 -- 26 26 26 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 14 14 14 -- 46 46 46 34 34 34 6 6 6 2 2 6 -- 42 42 42 78 78 78 42 42 42 18 18 18 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 1 0 0 0 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 10 10 10 30 30 30 66 66 66 58 58 58 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 26 26 26 -- 86 86 86 101 101 101 46 46 46 10 10 10 -- 2 2 6 58 58 58 70 70 70 34 34 34 -- 10 10 10 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 1 0 0 1 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 14 14 14 42 42 42 86 86 86 10 10 10 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 30 30 30 -- 94 94 94 94 94 94 58 58 58 26 26 26 -- 2 2 6 6 6 6 78 78 78 54 54 54 -- 22 22 22 6 6 6 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 22 22 22 62 62 62 62 62 62 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 26 26 26 -- 54 54 54 38 38 38 18 18 18 10 10 10 -- 2 2 6 2 2 6 34 34 34 82 82 82 -- 38 38 38 14 14 14 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 1 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 30 30 30 78 78 78 30 30 30 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 10 10 10 -- 10 10 10 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 78 78 78 -- 50 50 50 18 18 18 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 1 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 38 38 38 86 86 86 14 14 14 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 54 54 54 -- 66 66 66 26 26 26 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 1 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 42 42 42 82 82 82 2 2 6 2 2 6 -- 2 2 6 6 6 6 10 10 10 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 6 6 6 -- 14 14 14 10 10 10 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 18 18 18 -- 82 82 82 34 34 34 10 10 10 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 1 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 46 46 46 86 86 86 2 2 6 2 2 6 -- 6 6 6 6 6 6 22 22 22 34 34 34 -- 6 6 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 18 18 18 34 34 34 -- 10 10 10 50 50 50 22 22 22 2 2 6 -- 2 2 6 2 2 6 2 2 6 10 10 10 -- 86 86 86 42 42 42 14 14 14 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 1 0 0 1 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 46 46 46 86 86 86 2 2 6 2 2 6 -- 38 38 38 116 116 116 94 94 94 22 22 22 -- 22 22 22 2 2 6 2 2 6 2 2 6 -- 14 14 14 86 86 86 138 138 138 162 162 162 --154 154 154 38 38 38 26 26 26 6 6 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 86 86 86 46 46 46 14 14 14 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 46 46 46 86 86 86 2 2 6 14 14 14 --134 134 134 198 198 198 195 195 195 116 116 116 -- 10 10 10 2 2 6 2 2 6 6 6 6 --101 98 89 187 187 187 210 210 210 218 218 218 --214 214 214 134 134 134 14 14 14 6 6 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 86 86 86 50 50 50 18 18 18 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 1 0 0 0 -- 0 0 1 0 0 1 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 46 46 46 86 86 86 2 2 6 54 54 54 --218 218 218 195 195 195 226 226 226 246 246 246 -- 58 58 58 2 2 6 2 2 6 30 30 30 --210 210 210 253 253 253 174 174 174 123 123 123 --221 221 221 234 234 234 74 74 74 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 70 70 70 58 58 58 22 22 22 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 46 46 46 82 82 82 2 2 6 106 106 106 --170 170 170 26 26 26 86 86 86 226 226 226 --123 123 123 10 10 10 14 14 14 46 46 46 --231 231 231 190 190 190 6 6 6 70 70 70 -- 90 90 90 238 238 238 158 158 158 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 70 70 70 58 58 58 22 22 22 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 1 0 0 0 -- 0 0 1 0 0 1 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 42 42 42 86 86 86 6 6 6 116 116 116 --106 106 106 6 6 6 70 70 70 149 149 149 --128 128 128 18 18 18 38 38 38 54 54 54 --221 221 221 106 106 106 2 2 6 14 14 14 -- 46 46 46 190 190 190 198 198 198 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 74 74 74 62 62 62 22 22 22 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 1 0 0 0 -- 0 0 1 0 0 0 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 42 42 42 94 94 94 14 14 14 101 101 101 --128 128 128 2 2 6 18 18 18 116 116 116 --118 98 46 121 92 8 121 92 8 98 78 10 --162 162 162 106 106 106 2 2 6 2 2 6 -- 2 2 6 195 195 195 195 195 195 6 6 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 74 74 74 62 62 62 22 22 22 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 1 0 0 1 -- 0 0 1 0 0 0 0 0 1 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 38 38 38 90 90 90 14 14 14 58 58 58 --210 210 210 26 26 26 54 38 6 154 114 10 --226 170 11 236 186 11 225 175 15 184 144 12 --215 174 15 175 146 61 37 26 9 2 2 6 -- 70 70 70 246 246 246 138 138 138 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 70 70 70 66 66 66 26 26 26 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 38 38 38 86 86 86 14 14 14 10 10 10 --195 195 195 188 164 115 192 133 9 225 175 15 --239 182 13 234 190 10 232 195 16 232 200 30 --245 207 45 241 208 19 232 195 16 184 144 12 --218 194 134 211 206 186 42 42 42 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 50 50 50 74 74 74 30 30 30 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 34 34 34 86 86 86 14 14 14 2 2 6 --121 87 25 192 133 9 219 162 10 239 182 13 --236 186 11 232 195 16 241 208 19 244 214 54 --246 218 60 246 218 38 246 215 20 241 208 19 --241 208 19 226 184 13 121 87 25 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 50 50 50 82 82 82 34 34 34 10 10 10 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 34 34 34 82 82 82 30 30 30 61 42 6 --180 123 7 206 145 10 230 174 11 239 182 13 --234 190 10 238 202 15 241 208 19 246 218 74 --246 218 38 246 215 20 246 215 20 246 215 20 --226 184 13 215 174 15 184 144 12 6 6 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 26 26 26 94 94 94 42 42 42 14 14 14 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 30 30 30 78 78 78 50 50 50 104 69 6 --192 133 9 216 158 10 236 178 12 236 186 11 --232 195 16 241 208 19 244 214 54 245 215 43 --246 215 20 246 215 20 241 208 19 198 155 10 --200 144 11 216 158 10 156 118 10 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 6 6 6 90 90 90 54 54 54 18 18 18 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 30 30 30 78 78 78 46 46 46 22 22 22 --137 92 6 210 162 10 239 182 13 238 190 10 --238 202 15 241 208 19 246 215 20 246 215 20 --241 208 19 203 166 17 185 133 11 210 150 10 --216 158 10 210 150 10 102 78 10 2 2 6 -- 6 6 6 54 54 54 14 14 14 2 2 6 -- 2 2 6 62 62 62 74 74 74 30 30 30 -- 10 10 10 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 34 34 34 78 78 78 50 50 50 6 6 6 -- 94 70 30 139 102 15 190 146 13 226 184 13 --232 200 30 232 195 16 215 174 15 190 146 13 --168 122 10 192 133 9 210 150 10 213 154 11 --202 150 34 182 157 106 101 98 89 2 2 6 -- 2 2 6 78 78 78 116 116 116 58 58 58 -- 2 2 6 22 22 22 90 90 90 46 46 46 -- 18 18 18 6 6 6 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 38 38 38 86 86 86 50 50 50 6 6 6 --128 128 128 174 154 114 156 107 11 168 122 10 --198 155 10 184 144 12 197 138 11 200 144 11 --206 145 10 206 145 10 197 138 11 188 164 115 --195 195 195 198 198 198 174 174 174 14 14 14 -- 2 2 6 22 22 22 116 116 116 116 116 116 -- 22 22 22 2 2 6 74 74 74 70 70 70 -- 30 30 30 10 10 10 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 18 18 18 -- 50 50 50 101 101 101 26 26 26 10 10 10 --138 138 138 190 190 190 174 154 114 156 107 11 --197 138 11 200 144 11 197 138 11 192 133 9 --180 123 7 190 142 34 190 178 144 187 187 187 --202 202 202 221 221 221 214 214 214 66 66 66 -- 2 2 6 2 2 6 50 50 50 62 62 62 -- 6 6 6 2 2 6 10 10 10 90 90 90 -- 50 50 50 18 18 18 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 10 10 10 34 34 34 -- 74 74 74 74 74 74 2 2 6 6 6 6 --144 144 144 198 198 198 190 190 190 178 166 146 --154 121 60 156 107 11 156 107 11 168 124 44 --174 154 114 187 187 187 190 190 190 210 210 210 --246 246 246 253 253 253 253 253 253 182 182 182 -- 6 6 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 62 62 62 -- 74 74 74 34 34 34 14 14 14 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 10 10 10 22 22 22 54 54 54 -- 94 94 94 18 18 18 2 2 6 46 46 46 --234 234 234 221 221 221 190 190 190 190 190 190 --190 190 190 187 187 187 187 187 187 190 190 190 --190 190 190 195 195 195 214 214 214 242 242 242 --253 253 253 253 253 253 253 253 253 253 253 253 -- 82 82 82 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 14 14 14 -- 86 86 86 54 54 54 22 22 22 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 6 6 6 18 18 18 46 46 46 90 90 90 -- 46 46 46 18 18 18 6 6 6 182 182 182 --253 253 253 246 246 246 206 206 206 190 190 190 --190 190 190 190 190 190 190 190 190 190 190 190 --206 206 206 231 231 231 250 250 250 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --202 202 202 14 14 14 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 42 42 42 86 86 86 42 42 42 18 18 18 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 14 14 14 38 38 38 74 74 74 66 66 66 -- 2 2 6 6 6 6 90 90 90 250 250 250 --253 253 253 253 253 253 238 238 238 198 198 198 --190 190 190 190 190 190 195 195 195 221 221 221 --246 246 246 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 82 82 82 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 78 78 78 70 70 70 34 34 34 -- 14 14 14 6 6 6 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 34 34 34 66 66 66 78 78 78 6 6 6 -- 2 2 6 18 18 18 218 218 218 253 253 253 --253 253 253 253 253 253 253 253 253 246 246 246 --226 226 226 231 231 231 246 246 246 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 178 178 178 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 18 18 18 90 90 90 62 62 62 -- 30 30 30 10 10 10 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 10 10 10 26 26 26 -- 58 58 58 90 90 90 18 18 18 2 2 6 -- 2 2 6 110 110 110 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --250 250 250 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 231 231 231 18 18 18 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 18 18 18 94 94 94 -- 54 54 54 26 26 26 10 10 10 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 6 6 6 22 22 22 50 50 50 -- 90 90 90 26 26 26 2 2 6 2 2 6 -- 14 14 14 195 195 195 250 250 250 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --250 250 250 242 242 242 54 54 54 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 38 38 38 -- 86 86 86 50 50 50 22 22 22 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 6 6 6 14 14 14 38 38 38 82 82 82 -- 34 34 34 2 2 6 2 2 6 2 2 6 -- 42 42 42 195 195 195 246 246 246 253 253 253 --253 253 253 253 253 253 253 253 253 250 250 250 --242 242 242 242 242 242 250 250 250 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 250 250 250 246 246 246 238 238 238 --226 226 226 231 231 231 101 101 101 6 6 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 38 38 38 82 82 82 42 42 42 14 14 14 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 10 10 10 26 26 26 62 62 62 66 66 66 -- 2 2 6 2 2 6 2 2 6 6 6 6 -- 70 70 70 170 170 170 206 206 206 234 234 234 --246 246 246 250 250 250 250 250 250 238 238 238 --226 226 226 231 231 231 238 238 238 250 250 250 --250 250 250 250 250 250 246 246 246 231 231 231 --214 214 214 206 206 206 202 202 202 202 202 202 --198 198 198 202 202 202 182 182 182 18 18 18 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 62 62 62 66 66 66 30 30 30 -- 10 10 10 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 14 14 14 42 42 42 82 82 82 18 18 18 -- 2 2 6 2 2 6 2 2 6 10 10 10 -- 94 94 94 182 182 182 218 218 218 242 242 242 --250 250 250 253 253 253 253 253 253 250 250 250 --234 234 234 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 246 246 246 --238 238 238 226 226 226 210 210 210 202 202 202 --195 195 195 195 195 195 210 210 210 158 158 158 -- 6 6 6 14 14 14 50 50 50 14 14 14 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 6 6 6 86 86 86 46 46 46 -- 18 18 18 6 6 6 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 22 22 22 54 54 54 70 70 70 2 2 6 -- 2 2 6 10 10 10 2 2 6 22 22 22 --166 166 166 231 231 231 250 250 250 253 253 253 --253 253 253 253 253 253 253 253 253 250 250 250 --242 242 242 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 246 246 246 --231 231 231 206 206 206 198 198 198 226 226 226 -- 94 94 94 2 2 6 6 6 6 38 38 38 -- 30 30 30 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 62 62 62 66 66 66 -- 26 26 26 10 10 10 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 30 30 30 74 74 74 50 50 50 2 2 6 -- 26 26 26 26 26 26 2 2 6 106 106 106 --238 238 238 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 246 246 246 218 218 218 202 202 202 --210 210 210 14 14 14 2 2 6 2 2 6 -- 30 30 30 22 22 22 2 2 6 2 2 6 -- 2 2 6 2 2 6 18 18 18 86 86 86 -- 42 42 42 14 14 14 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 42 42 42 90 90 90 22 22 22 2 2 6 -- 42 42 42 2 2 6 18 18 18 218 218 218 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 250 250 250 221 221 221 --218 218 218 101 101 101 2 2 6 14 14 14 -- 18 18 18 38 38 38 10 10 10 2 2 6 -- 2 2 6 2 2 6 2 2 6 78 78 78 -- 58 58 58 22 22 22 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 18 18 18 -- 54 54 54 82 82 82 2 2 6 26 26 26 -- 22 22 22 2 2 6 123 123 123 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 250 250 250 --238 238 238 198 198 198 6 6 6 38 38 38 -- 58 58 58 26 26 26 38 38 38 2 2 6 -- 2 2 6 2 2 6 2 2 6 46 46 46 -- 78 78 78 30 30 30 10 10 10 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 10 10 10 30 30 30 -- 74 74 74 58 58 58 2 2 6 42 42 42 -- 2 2 6 22 22 22 231 231 231 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 250 250 250 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 246 246 246 46 46 46 38 38 38 -- 42 42 42 14 14 14 38 38 38 14 14 14 -- 2 2 6 2 2 6 2 2 6 6 6 6 -- 86 86 86 46 46 46 14 14 14 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 6 6 6 14 14 14 42 42 42 -- 90 90 90 18 18 18 18 18 18 26 26 26 -- 2 2 6 116 116 116 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 250 250 250 238 238 238 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 94 94 94 6 6 6 -- 2 2 6 2 2 6 10 10 10 34 34 34 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 74 74 74 58 58 58 22 22 22 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 10 10 10 26 26 26 66 66 66 -- 82 82 82 2 2 6 38 38 38 6 6 6 -- 14 14 14 210 210 210 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 246 246 246 242 242 242 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 144 144 144 2 2 6 -- 2 2 6 2 2 6 2 2 6 46 46 46 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 42 42 42 74 74 74 30 30 30 10 10 10 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 6 6 6 14 14 14 42 42 42 90 90 90 -- 26 26 26 6 6 6 42 42 42 2 2 6 -- 74 74 74 250 250 250 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 242 242 242 242 242 242 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 182 182 182 2 2 6 -- 2 2 6 2 2 6 2 2 6 46 46 46 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 10 10 10 86 86 86 38 38 38 10 10 10 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 10 10 10 26 26 26 66 66 66 82 82 82 -- 2 2 6 22 22 22 18 18 18 2 2 6 --149 149 149 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 234 234 234 242 242 242 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 206 206 206 2 2 6 -- 2 2 6 2 2 6 2 2 6 38 38 38 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 6 6 6 86 86 86 46 46 46 14 14 14 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 18 18 18 46 46 46 86 86 86 18 18 18 -- 2 2 6 34 34 34 10 10 10 6 6 6 --210 210 210 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 234 234 234 242 242 242 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 221 221 221 6 6 6 -- 2 2 6 2 2 6 6 6 6 30 30 30 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 82 82 82 54 54 54 18 18 18 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 26 26 26 66 66 66 62 62 62 2 2 6 -- 2 2 6 38 38 38 10 10 10 26 26 26 --238 238 238 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 231 231 231 238 238 238 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 231 231 231 6 6 6 -- 2 2 6 2 2 6 10 10 10 30 30 30 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 66 66 66 58 58 58 22 22 22 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 38 38 38 78 78 78 6 6 6 2 2 6 -- 2 2 6 46 46 46 14 14 14 42 42 42 --246 246 246 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 231 231 231 242 242 242 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 234 234 234 10 10 10 -- 2 2 6 2 2 6 22 22 22 14 14 14 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 66 66 66 62 62 62 22 22 22 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 18 18 18 -- 50 50 50 74 74 74 2 2 6 2 2 6 -- 14 14 14 70 70 70 34 34 34 62 62 62 --250 250 250 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 231 231 231 246 246 246 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 234 234 234 14 14 14 -- 2 2 6 2 2 6 30 30 30 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 66 66 66 62 62 62 22 22 22 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 18 18 18 -- 54 54 54 62 62 62 2 2 6 2 2 6 -- 2 2 6 30 30 30 46 46 46 70 70 70 --250 250 250 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 231 231 231 246 246 246 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 226 226 226 10 10 10 -- 2 2 6 6 6 6 30 30 30 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 66 66 66 58 58 58 22 22 22 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 22 22 22 -- 58 58 58 62 62 62 2 2 6 2 2 6 -- 2 2 6 2 2 6 30 30 30 78 78 78 --250 250 250 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 231 231 231 246 246 246 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 206 206 206 2 2 6 -- 22 22 22 34 34 34 18 14 6 22 22 22 -- 26 26 26 18 18 18 6 6 6 2 2 6 -- 2 2 6 82 82 82 54 54 54 18 18 18 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 26 26 26 -- 62 62 62 106 106 106 74 54 14 185 133 11 --210 162 10 121 92 8 6 6 6 62 62 62 --238 238 238 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 231 231 231 246 246 246 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 158 158 158 18 18 18 -- 14 14 14 2 2 6 2 2 6 2 2 6 -- 6 6 6 18 18 18 66 66 66 38 38 38 -- 6 6 6 94 94 94 50 50 50 18 18 18 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 10 10 10 10 10 10 18 18 18 38 38 38 -- 78 78 78 142 134 106 216 158 10 242 186 14 --246 190 14 246 190 14 156 118 10 10 10 10 -- 90 90 90 238 238 238 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 231 231 231 250 250 250 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 246 230 190 --238 204 91 238 204 91 181 142 44 37 26 9 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 38 38 38 46 46 46 -- 26 26 26 106 106 106 54 54 54 18 18 18 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 6 6 6 14 14 14 22 22 22 -- 30 30 30 38 38 38 50 50 50 70 70 70 --106 106 106 190 142 34 226 170 11 242 186 14 --246 190 14 246 190 14 246 190 14 154 114 10 -- 6 6 6 74 74 74 226 226 226 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 231 231 231 250 250 250 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 228 184 62 --241 196 14 241 208 19 232 195 16 38 30 10 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 6 6 6 30 30 30 26 26 26 --203 166 17 154 142 90 66 66 66 26 26 26 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 6 6 6 18 18 18 38 38 38 58 58 58 -- 78 78 78 86 86 86 101 101 101 123 123 123 --175 146 61 210 150 10 234 174 13 246 186 14 --246 190 14 246 190 14 246 190 14 238 190 10 --102 78 10 2 2 6 46 46 46 198 198 198 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 234 234 234 242 242 242 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 224 178 62 --242 186 14 241 196 14 210 166 10 22 18 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 6 6 6 121 92 8 --238 202 15 232 195 16 82 82 82 34 34 34 -- 10 10 10 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 14 14 14 38 38 38 70 70 70 154 122 46 --190 142 34 200 144 11 197 138 11 197 138 11 --213 154 11 226 170 11 242 186 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --225 175 15 46 32 6 2 2 6 22 22 22 --158 158 158 250 250 250 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 250 250 250 242 242 242 224 178 62 --239 182 13 236 186 11 213 154 11 46 32 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 61 42 6 225 175 15 --238 190 10 236 186 11 112 100 78 42 42 42 -- 14 14 14 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 22 22 22 54 54 54 154 122 46 213 154 11 --226 170 11 230 174 11 226 170 11 226 170 11 --236 178 12 242 186 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --241 196 14 184 144 12 10 10 10 2 2 6 -- 6 6 6 116 116 116 242 242 242 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 231 231 231 198 198 198 214 170 54 --236 178 12 236 178 12 210 150 10 137 92 6 -- 18 14 6 2 2 6 2 2 6 2 2 6 -- 6 6 6 70 47 6 200 144 11 236 178 12 --239 182 13 239 182 13 124 112 88 58 58 58 -- 22 22 22 6 6 6 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 30 30 30 70 70 70 180 133 36 226 170 11 --239 182 13 242 186 14 242 186 14 246 186 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 232 195 16 98 70 6 2 2 6 -- 2 2 6 2 2 6 66 66 66 221 221 221 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 206 206 206 198 198 198 214 166 58 --230 174 11 230 174 11 216 158 10 192 133 9 --163 110 8 116 81 8 102 78 10 116 81 8 --167 114 7 197 138 11 226 170 11 239 182 13 --242 186 14 242 186 14 162 146 94 78 78 78 -- 34 34 34 14 14 14 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 30 30 30 78 78 78 190 142 34 226 170 11 --239 182 13 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 241 196 14 203 166 17 22 18 6 -- 2 2 6 2 2 6 2 2 6 38 38 38 --218 218 218 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --250 250 250 206 206 206 198 198 198 202 162 69 --226 170 11 236 178 12 224 166 10 210 150 10 --200 144 11 197 138 11 192 133 9 197 138 11 --210 150 10 226 170 11 242 186 14 246 190 14 --246 190 14 246 186 14 225 175 15 124 112 88 -- 62 62 62 30 30 30 14 14 14 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 30 30 30 78 78 78 174 135 50 224 166 10 --239 182 13 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 241 196 14 139 102 15 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 78 78 78 250 250 250 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --250 250 250 214 214 214 198 198 198 190 150 46 --219 162 10 236 178 12 234 174 13 224 166 10 --216 158 10 213 154 11 213 154 11 216 158 10 --226 170 11 239 182 13 246 190 14 246 190 14 --246 190 14 246 190 14 242 186 14 206 162 42 --101 101 101 58 58 58 30 30 30 14 14 14 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 30 30 30 74 74 74 174 135 50 216 158 10 --236 178 12 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 241 196 14 226 184 13 -- 61 42 6 2 2 6 2 2 6 2 2 6 -- 22 22 22 238 238 238 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 226 226 226 187 187 187 180 133 36 --216 158 10 236 178 12 239 182 13 236 178 12 --230 174 11 226 170 11 226 170 11 230 174 11 --236 178 12 242 186 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 186 14 239 182 13 --206 162 42 106 106 106 66 66 66 34 34 34 -- 14 14 14 6 6 6 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 26 26 26 70 70 70 163 133 67 213 154 11 --236 178 12 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 241 196 14 --190 146 13 18 14 6 2 2 6 2 2 6 -- 46 46 46 246 246 246 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 221 221 221 86 86 86 156 107 11 --216 158 10 236 178 12 242 186 14 246 186 14 --242 186 14 239 182 13 239 182 13 242 186 14 --242 186 14 246 186 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --242 186 14 225 175 15 142 122 72 66 66 66 -- 30 30 30 10 10 10 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 26 26 26 70 70 70 163 133 67 210 150 10 --236 178 12 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --232 195 16 121 92 8 34 34 34 106 106 106 --221 221 221 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --242 242 242 82 82 82 18 14 6 163 110 8 --216 158 10 236 178 12 242 186 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 242 186 14 163 133 67 -- 46 46 46 18 18 18 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 10 10 10 -- 30 30 30 78 78 78 163 133 67 210 150 10 --236 178 12 246 186 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --241 196 14 215 174 15 190 178 144 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 218 218 218 -- 58 58 58 2 2 6 22 18 6 167 114 7 --216 158 10 236 178 12 246 186 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 186 14 242 186 14 190 150 46 -- 54 54 54 22 22 22 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 38 38 38 86 86 86 180 133 36 213 154 11 --236 178 12 246 186 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 232 195 16 190 146 13 214 214 214 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 250 250 250 170 170 170 26 26 26 -- 2 2 6 2 2 6 37 26 9 163 110 8 --219 162 10 239 182 13 246 186 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 186 14 236 178 12 224 166 10 142 122 72 -- 46 46 46 18 18 18 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 18 18 18 -- 50 50 50 109 106 95 192 133 9 224 166 10 --242 186 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --242 186 14 226 184 13 210 162 10 142 110 46 --226 226 226 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --253 253 253 253 253 253 253 253 253 253 253 253 --198 198 198 66 66 66 2 2 6 2 2 6 -- 2 2 6 2 2 6 50 34 6 156 107 11 --219 162 10 239 182 13 246 186 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 242 186 14 --234 174 13 213 154 11 154 122 46 66 66 66 -- 30 30 30 10 10 10 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 22 22 22 -- 58 58 58 154 121 60 206 145 10 234 174 13 --242 186 14 246 186 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 186 14 236 178 12 210 162 10 163 110 8 -- 61 42 6 138 138 138 218 218 218 250 250 250 --253 253 253 253 253 253 253 253 253 250 250 250 --242 242 242 210 210 210 144 144 144 66 66 66 -- 6 6 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 61 42 6 163 110 8 --216 158 10 236 178 12 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 239 182 13 230 174 11 216 158 10 --190 142 34 124 112 88 70 70 70 38 38 38 -- 18 18 18 6 6 6 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 22 22 22 -- 62 62 62 168 124 44 206 145 10 224 166 10 --236 178 12 239 182 13 242 186 14 242 186 14 --246 186 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 236 178 12 216 158 10 175 118 6 -- 80 54 7 2 2 6 6 6 6 30 30 30 -- 54 54 54 62 62 62 50 50 50 38 38 38 -- 14 14 14 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 6 6 6 80 54 7 167 114 7 --213 154 11 236 178 12 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 190 14 242 186 14 239 182 13 239 182 13 --230 174 11 210 150 10 174 135 50 124 112 88 -- 82 82 82 54 54 54 34 34 34 18 18 18 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 18 18 18 -- 50 50 50 158 118 36 192 133 9 200 144 11 --216 158 10 219 162 10 224 166 10 226 170 11 --230 174 11 236 178 12 239 182 13 239 182 13 --242 186 14 246 186 14 246 190 14 246 190 14 --246 190 14 246 190 14 246 190 14 246 190 14 --246 186 14 230 174 11 210 150 10 163 110 8 --104 69 6 10 10 10 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 6 6 6 91 60 6 167 114 7 --206 145 10 230 174 11 242 186 14 246 190 14 --246 190 14 246 190 14 246 186 14 242 186 14 --239 182 13 230 174 11 224 166 10 213 154 11 --180 133 36 124 112 88 86 86 86 58 58 58 -- 38 38 38 22 22 22 10 10 10 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 14 14 14 -- 34 34 34 70 70 70 138 110 50 158 118 36 --167 114 7 180 123 7 192 133 9 197 138 11 --200 144 11 206 145 10 213 154 11 219 162 10 --224 166 10 230 174 11 239 182 13 242 186 14 --246 186 14 246 186 14 246 186 14 246 186 14 --239 182 13 216 158 10 185 133 11 152 99 6 --104 69 6 18 14 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 2 2 6 2 2 6 2 2 6 -- 2 2 6 6 6 6 80 54 7 152 99 6 --192 133 9 219 162 10 236 178 12 239 182 13 --246 186 14 242 186 14 239 182 13 236 178 12 --224 166 10 206 145 10 192 133 9 154 121 60 -- 94 94 94 62 62 62 42 42 42 22 22 22 -- 14 14 14 6 6 6 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 18 18 18 34 34 34 58 58 58 78 78 78 --101 98 89 124 112 88 142 110 46 156 107 11 --163 110 8 167 114 7 175 118 6 180 123 7 --185 133 11 197 138 11 210 150 10 219 162 10 --226 170 11 236 178 12 236 178 12 234 174 13 --219 162 10 197 138 11 163 110 8 130 83 6 -- 91 60 6 10 10 10 2 2 6 2 2 6 -- 18 18 18 38 38 38 38 38 38 38 38 38 -- 38 38 38 38 38 38 38 38 38 38 38 38 -- 38 38 38 38 38 38 26 26 26 2 2 6 -- 2 2 6 6 6 6 70 47 6 137 92 6 --175 118 6 200 144 11 219 162 10 230 174 11 --234 174 13 230 174 11 219 162 10 210 150 10 --192 133 9 163 110 8 124 112 88 82 82 82 -- 50 50 50 30 30 30 14 14 14 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 6 6 6 14 14 14 22 22 22 34 34 34 -- 42 42 42 58 58 58 74 74 74 86 86 86 --101 98 89 122 102 70 130 98 46 121 87 25 --137 92 6 152 99 6 163 110 8 180 123 7 --185 133 11 197 138 11 206 145 10 200 144 11 --180 123 7 156 107 11 130 83 6 104 69 6 -- 50 34 6 54 54 54 110 110 110 101 98 89 -- 86 86 86 82 82 82 78 78 78 78 78 78 -- 78 78 78 78 78 78 78 78 78 78 78 78 -- 78 78 78 82 82 82 86 86 86 94 94 94 --106 106 106 101 101 101 86 66 34 124 80 6 --156 107 11 180 123 7 192 133 9 200 144 11 --206 145 10 200 144 11 192 133 9 175 118 6 --139 102 15 109 106 95 70 70 70 42 42 42 -- 22 22 22 10 10 10 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 6 6 6 10 10 10 -- 14 14 14 22 22 22 30 30 30 38 38 38 -- 50 50 50 62 62 62 74 74 74 90 90 90 --101 98 89 112 100 78 121 87 25 124 80 6 --137 92 6 152 99 6 152 99 6 152 99 6 --138 86 6 124 80 6 98 70 6 86 66 30 --101 98 89 82 82 82 58 58 58 46 46 46 -- 38 38 38 34 34 34 34 34 34 34 34 34 -- 34 34 34 34 34 34 34 34 34 34 34 34 -- 34 34 34 34 34 34 38 38 38 42 42 42 -- 54 54 54 82 82 82 94 86 76 91 60 6 --134 86 6 156 107 11 167 114 7 175 118 6 --175 118 6 167 114 7 152 99 6 121 87 25 --101 98 89 62 62 62 34 34 34 18 18 18 -- 6 6 6 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 6 6 6 6 6 6 10 10 10 -- 18 18 18 22 22 22 30 30 30 42 42 42 -- 50 50 50 66 66 66 86 86 86 101 98 89 --106 86 58 98 70 6 104 69 6 104 69 6 --104 69 6 91 60 6 82 62 34 90 90 90 -- 62 62 62 38 38 38 22 22 22 14 14 14 -- 10 10 10 10 10 10 10 10 10 10 10 10 -- 10 10 10 10 10 10 6 6 6 10 10 10 -- 10 10 10 10 10 10 10 10 10 14 14 14 -- 22 22 22 42 42 42 70 70 70 89 81 66 -- 80 54 7 104 69 6 124 80 6 137 92 6 --134 86 6 116 81 8 100 82 52 86 86 86 -- 58 58 58 30 30 30 14 14 14 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 6 6 6 10 10 10 14 14 14 -- 18 18 18 26 26 26 38 38 38 54 54 54 -- 70 70 70 86 86 86 94 86 76 89 81 66 -- 89 81 66 86 86 86 74 74 74 50 50 50 -- 30 30 30 14 14 14 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 6 6 6 18 18 18 34 34 34 58 58 58 -- 82 82 82 89 81 66 89 81 66 89 81 66 -- 94 86 66 94 86 76 74 74 74 50 50 50 -- 26 26 26 14 14 14 6 6 6 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 6 6 6 6 6 6 14 14 14 18 18 18 -- 30 30 30 38 38 38 46 46 46 54 54 54 -- 50 50 50 42 42 42 30 30 30 18 18 18 -- 10 10 10 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 6 6 6 14 14 14 26 26 26 -- 38 38 38 50 50 50 58 58 58 58 58 58 -- 54 54 54 42 42 42 30 30 30 18 18 18 -- 10 10 10 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 6 6 6 10 10 10 14 14 14 18 18 18 -- 18 18 18 14 14 14 10 10 10 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 6 6 6 -- 14 14 14 18 18 18 22 22 22 22 22 22 -- 18 18 18 14 14 14 10 10 10 6 6 6 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -- 0 0 0 0 0 0 0 0 0 0 0 0 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0 -+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 -+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28 -+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2 -+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6 -+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 -+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0 -+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137 -+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0 -+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125 -+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4 -+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35 -+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0 -+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167 -+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63 -+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 -+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167 -+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5 -+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159 -+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 -+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158 -+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166 -+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4 -+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 -+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158 -+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1 -+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196 -+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 -+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134 -+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157 -+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167 -+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0 -+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0 -+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153 -+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2 -+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193 -+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3 -+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174 -+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155 -+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153 -+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17 -+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 -+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63 -+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174 -+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103 -+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196 -+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0 -+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163 -+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165 -+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155 -+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131 -+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4 -+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174 -+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137 -+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209 -+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122 -+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37 -+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155 -+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155 -+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156 -+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174 -+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0 -+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174 -+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6 -+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196 -+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14 -+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153 -+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156 -+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68 -+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166 -+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158 -+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17 -+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165 -+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21 -+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146 -+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0 -+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 -+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166 -+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158 -+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0 -+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158 -+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156 -+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125 -+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125 -+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188 -+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14 -+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5 -+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0 -+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157 -+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174 -+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0 -+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37 -+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154 -+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163 -+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0 -+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211 -+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2 -+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4 -+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0 -+0 0 0 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 -+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127 -+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156 -+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125 -+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4 -+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0 -+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165 -+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174 -+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3 -+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193 -+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5 -+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1 -+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81 -+2 0 0 0 0 0 -+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2 -+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174 -+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153 -+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6 -+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4 -+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3 -+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174 -+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158 -+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148 -+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13 -+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132 -+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0 -+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165 -+37 38 37 0 0 0 -+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0 -+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165 -+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174 -+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0 -+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5 -+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5 -+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84 -+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27 -+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220 -+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103 -+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196 -+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28 -+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174 -+85 115 134 4 0 0 -+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55 -+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153 -+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154 -+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5 -+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 -+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4 -+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6 -+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0 -+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209 -+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103 -+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93 -+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137 -+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174 -+60 73 81 4 0 0 -+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174 -+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155 -+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37 -+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5 -+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3 -+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4 -+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0 -+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55 -+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209 -+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13 -+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1 -+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174 -+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125 -+16 19 21 4 0 0 -+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174 -+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 -+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0 -+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4 -+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86 -+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1 -+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5 -+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209 -+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 -+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193 -+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0 -+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165 -+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28 -+4 0 0 4 3 3 -+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158 -+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174 -+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 -+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196 -+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0 -+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0 -+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211 -+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193 -+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193 -+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81 -+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153 -+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0 -+3 2 2 4 4 4 -+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158 -+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125 -+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4 -+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0 -+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196 -+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126 -+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209 -+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13 -+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15 -+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163 -+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63 -+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2 -+4 4 4 4 4 4 -+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153 -+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6 -+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4 -+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18 -+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196 -+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209 -+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126 -+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 -+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103 -+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5 -+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167 -+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0 -+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4 -+4 4 4 4 4 4 -+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131 -+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0 -+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5 -+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159 -+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196 -+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 -+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211 -+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196 -+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220 -+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17 -+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174 -+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3 -+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5 -+5 5 5 5 5 5 -+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125 -+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0 -+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1 -+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196 -+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 -+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 -+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209 -+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209 -+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141 -+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154 -+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125 -+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5 -+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 -+5 5 5 4 4 4 -+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131 -+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3 -+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0 -+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193 -+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 -+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 -+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 -+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196 -+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8 -+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174 -+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0 -+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137 -+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2 -+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72 -+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193 -+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193 -+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 -+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 -+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 -+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7 -+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166 -+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0 -+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137 -+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2 -+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193 -+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193 -+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 -+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209 -+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196 -+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193 -+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84 -+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157 -+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137 -+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2 -+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193 -+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193 -+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 -+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 -+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 -+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 -+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165 -+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81 -+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153 -+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2 -+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161 -+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193 -+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 -+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 -+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 -+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8 -+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158 -+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37 -+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153 -+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2 -+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161 -+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 -+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 -+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196 -+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 -+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7 -+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154 -+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37 -+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154 -+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2 -+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151 -+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193 -+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 -+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196 -+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141 -+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5 -+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158 -+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63 -+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158 -+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2 -+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144 -+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 -+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193 -+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196 -+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17 -+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5 -+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158 -+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37 -+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163 -+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3 -+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151 -+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161 -+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 -+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196 -+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5 -+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5 -+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137 -+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37 -+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166 -+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3 -+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144 -+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 -+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 -+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161 -+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8 -+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5 -+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125 -+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37 -+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167 -+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3 -+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151 -+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161 -+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 -+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25 -+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3 -+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3 -+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174 -+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3 -+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144 -+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161 -+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193 -+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3 -+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3 -+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4 -+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174 -+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2 -+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151 -+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151 -+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 -+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161 -+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35 -+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3 -+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174 -+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2 -+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144 -+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151 -+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93 -+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193 -+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93 -+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0 -+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174 -+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2 -+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 -+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161 -+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2 -+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34 -+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34 -+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0 -+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174 -+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2 -+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144 -+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144 -+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52 -+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0 -+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93 -+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0 -+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174 -+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2 -+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 -+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14 -+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161 -+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52 -+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161 -+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0 -+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174 -+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2 -+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144 -+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7 -+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161 -+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52 -+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161 -+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0 -+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 -+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2 -+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 -+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2 -+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161 -+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52 -+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161 -+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0 -+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174 -+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2 -+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144 -+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25 -+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161 -+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151 -+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 -+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3 -+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 -+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2 -+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25 -+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 -+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151 -+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161 -+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 -+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4 -+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174 -+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3 -+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5 -+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144 -+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151 -+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151 -+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 -+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4 -+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 -+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2 -+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3 -+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151 -+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144 -+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144 -+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14 -+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4 -+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137 -+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174 -+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0 -+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 -+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93 -+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 -+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161 -+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0 -+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5 -+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137 -+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 -+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34 -+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4 -+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7 -+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144 -+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151 -+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4 -+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0 -+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131 -+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174 -+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203 -+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4 -+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2 -+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144 -+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25 -+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4 -+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2 -+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125 -+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28 -+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221 -+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221 -+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0 -+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5 -+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144 -+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2 -+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4 -+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81 -+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131 -+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8 -+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221 -+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221 -+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6 -+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25 -+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3 -+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166 -+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125 -+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3 -+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167 -+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 -+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125 -+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5 -+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7 -+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4 -+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0 -+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166 -+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137 -+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0 -+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28 -+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203 -+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246 -+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3 -+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3 -+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4 -+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17 -+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163 -+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137 -+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 -+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221 -+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246 -+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0 -+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5 -+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6 -+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156 -+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154 -+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81 -+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6 -+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246 -+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203 -+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37 -+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4 -+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1 -+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221 -+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157 -+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0 -+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6 -+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81 -+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221 -+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215 -+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5 -+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0 -+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174 -+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167 -+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0 -+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 -+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 -+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221 -+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246 -+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0 -+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127 -+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187 -+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137 -+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0 -+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6 -+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221 -+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221 -+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2 -+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214 -+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174 -+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27 -+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 -+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 -+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167 -+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215 -+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137 -+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203 -+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187 -+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0 -+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6 -+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 -+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6 -+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215 -+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221 -+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187 -+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201 -+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 -+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6 -+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0 -+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221 -+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215 -+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174 -+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125 -+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 -+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5 -+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 -+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221 -+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 -+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203 -+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0 -+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 -+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6 -+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81 -+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174 -+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221 -+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0 -+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 -+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3 -+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 -+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203 -+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158 -+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6 -+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3 -+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0 -+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203 -+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6 -+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5 -+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 -+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 -+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125 -+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6 -+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3 -+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6 -+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0 -+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 -+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 -+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 -+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 -+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6 -+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 -+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 -+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6 -+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 -+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6 -+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6 -+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 -+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3 -+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6 -+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 -+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -+4 4 4 4 4 4 -diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c -index 087fc99..f85ed76 100644 ---- a/drivers/video/udlfb.c -+++ b/drivers/video/udlfb.c -@@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y, - dlfb_urb_completion(urb); - - error: -- atomic_add(bytes_sent, &dev->bytes_sent); -- atomic_add(bytes_identical, &dev->bytes_identical); -- atomic_add(width*height*2, &dev->bytes_rendered); -+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent); -+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical); -+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered); - end_cycles = get_cycles(); -- atomic_add(((unsigned int) ((end_cycles - start_cycles) -+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles) - >> 10)), /* Kcycles */ - &dev->cpu_kcycles_used); - -@@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info, - dlfb_urb_completion(urb); - - error: -- atomic_add(bytes_sent, &dev->bytes_sent); -- atomic_add(bytes_identical, &dev->bytes_identical); -- atomic_add(bytes_rendered, &dev->bytes_rendered); -+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent); -+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical); -+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered); - end_cycles = get_cycles(); -- atomic_add(((unsigned int) ((end_cycles - start_cycles) -+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles) - >> 10)), /* Kcycles */ - &dev->cpu_kcycles_used); - } -@@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev, - struct fb_info *fb_info = dev_get_drvdata(fbdev); - struct dlfb_data *dev = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", -- atomic_read(&dev->bytes_rendered)); -+ atomic_read_unchecked(&dev->bytes_rendered)); - } - - static ssize_t metrics_bytes_identical_show(struct device *fbdev, -@@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev, - struct fb_info *fb_info = dev_get_drvdata(fbdev); - struct dlfb_data *dev = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", -- atomic_read(&dev->bytes_identical)); -+ atomic_read_unchecked(&dev->bytes_identical)); - } - - static ssize_t metrics_bytes_sent_show(struct device *fbdev, -@@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev, - struct fb_info *fb_info = dev_get_drvdata(fbdev); - struct dlfb_data *dev = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", -- atomic_read(&dev->bytes_sent)); -+ atomic_read_unchecked(&dev->bytes_sent)); - } - - static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, -@@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, - struct fb_info *fb_info = dev_get_drvdata(fbdev); - struct dlfb_data *dev = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", -- atomic_read(&dev->cpu_kcycles_used)); -+ atomic_read_unchecked(&dev->cpu_kcycles_used)); - } - - static ssize_t edid_show( -@@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struct device *fbdev, - struct fb_info *fb_info = dev_get_drvdata(fbdev); - struct dlfb_data *dev = fb_info->par; - -- atomic_set(&dev->bytes_rendered, 0); -- atomic_set(&dev->bytes_identical, 0); -- atomic_set(&dev->bytes_sent, 0); -- atomic_set(&dev->cpu_kcycles_used, 0); -+ atomic_set_unchecked(&dev->bytes_rendered, 0); -+ atomic_set_unchecked(&dev->bytes_identical, 0); -+ atomic_set_unchecked(&dev->bytes_sent, 0); -+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0); - - return count; - } -diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c -index 7f8472c..9842e87 100644 ---- a/drivers/video/uvesafb.c -+++ b/drivers/video/uvesafb.c -@@ -19,6 +19,7 @@ - #include <linux/io.h> - #include <linux/mutex.h> - #include <linux/slab.h> -+#include <linux/moduleloader.h> - #include <video/edid.h> - #include <video/uvesafb.h> - #ifdef CONFIG_X86 -@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void) - NULL, - }; - -- return call_usermodehelper(v86d_path, argv, envp, 1); -+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC); - } - - /* -@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task, - if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) { - par->pmi_setpal = par->ypan = 0; - } else { -+ -+#ifdef CONFIG_PAX_KERNEXEC -+#ifdef CONFIG_MODULES -+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx); -+#endif -+ if (!par->pmi_code) { -+ par->pmi_setpal = par->ypan = 0; -+ return 0; -+ } -+#endif -+ - par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4) - + task->t.regs.edi); -+ -+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) -+ pax_open_kernel(); -+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx); -+ pax_close_kernel(); -+ -+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]); -+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]); -+#else - par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1]; - par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2]; -+#endif -+ - printk(KERN_INFO "uvesafb: protected mode interface info at " - "%04x:%04x\n", - (u16)task->t.regs.es, (u16)task->t.regs.edi); -@@ -1821,6 +1844,11 @@ out: - if (par->vbe_modes) - kfree(par->vbe_modes); - -+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) -+ if (par->pmi_code) -+ module_free_exec(NULL, par->pmi_code); -+#endif -+ - framebuffer_release(info); - return err; - } -@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev) - kfree(par->vbe_state_orig); - if (par->vbe_state_saved) - kfree(par->vbe_state_saved); -+ -+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) -+ if (par->pmi_code) -+ module_free_exec(NULL, par->pmi_code); -+#endif -+ - } - - framebuffer_release(info); -diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c -index 501b340..86bd4cf 100644 ---- a/drivers/video/vesafb.c -+++ b/drivers/video/vesafb.c -@@ -9,6 +9,7 @@ - */ - - #include <linux/module.h> -+#include <linux/moduleloader.h> - #include <linux/kernel.h> - #include <linux/errno.h> - #include <linux/string.h> -@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */ - static int vram_total __initdata; /* Set total amount of memory */ - static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */ - static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */ --static void (*pmi_start)(void) __read_mostly; --static void (*pmi_pal) (void) __read_mostly; -+static void (*pmi_start)(void) __read_only; -+static void (*pmi_pal) (void) __read_only; - static int depth __read_mostly; - static int vga_compat __read_mostly; - /* --------------------------------------------------------------------- */ -@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev) - unsigned int size_vmode; - unsigned int size_remap; - unsigned int size_total; -+ void *pmi_code = NULL; - - if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) - return -ENODEV; -@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev) - size_remap = size_total; - vesafb_fix.smem_len = size_remap; - --#ifndef __i386__ -- screen_info.vesapm_seg = 0; --#endif -- - if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) { - printk(KERN_WARNING - "vesafb: cannot reserve video memory at 0x%lx\n", -@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev) - printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n", - vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages); - -+#ifdef __i386__ -+ -+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) -+ pmi_code = module_alloc_exec(screen_info.vesapm_size); -+ if (!pmi_code) -+#elif !defined(CONFIG_PAX_KERNEXEC) -+ if (0) -+#endif -+ -+#endif -+ screen_info.vesapm_seg = 0; -+ - if (screen_info.vesapm_seg) { -- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n", -- screen_info.vesapm_seg,screen_info.vesapm_off); -+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n", -+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size); - } - - if (screen_info.vesapm_seg < 0xc000) -@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev) - - if (ypan || pmi_setpal) { - unsigned short *pmi_base; -+ - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off); -- pmi_start = (void*)((char*)pmi_base + pmi_base[1]); -- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]); -+ -+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) -+ pax_open_kernel(); -+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size); -+#else -+ pmi_code = pmi_base; -+#endif -+ -+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]); -+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]); -+ -+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) -+ pmi_start = ktva_ktla(pmi_start); -+ pmi_pal = ktva_ktla(pmi_pal); -+ pax_close_kernel(); -+#endif -+ - printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal); - if (pmi_base[3]) { - printk(KERN_INFO "vesafb: pmi: ports = "); -@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev) - info->node, info->fix.id); - return 0; - err: -+ -+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) -+ module_free_exec(NULL, pmi_code); -+#endif -+ - if (info->screen_base) - iounmap(info->screen_base); - framebuffer_release(info); -diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h -index 88714ae..16c2e11 100644 ---- a/drivers/video/via/via_clock.h -+++ b/drivers/video/via/via_clock.h -@@ -56,7 +56,7 @@ struct via_clock { - - void (*set_engine_pll_state)(u8 state); - void (*set_engine_pll)(struct via_pll_config config); --}; -+} __no_const; - - - static inline u32 get_pll_internal_frequency(u32 ref_freq, -diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c -index e058ace..2424d93 100644 ---- a/drivers/virtio/virtio_balloon.c -+++ b/drivers/virtio/virtio_balloon.c -@@ -174,6 +174,8 @@ static void update_balloon_stats(struct virtio_balloon *vb) - struct sysinfo i; - int idx = 0; - -+ pax_track_stack(); -+ - all_vm_events(events); - si_meminfo(&i); - -diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h -index e56c934..fc22f4b 100644 ---- a/drivers/xen/xen-pciback/conf_space.h -+++ b/drivers/xen/xen-pciback/conf_space.h -@@ -44,15 +44,15 @@ struct config_field { - struct { - conf_dword_write write; - conf_dword_read read; -- } dw; -+ } __no_const dw; - struct { - conf_word_write write; - conf_word_read read; -- } w; -+ } __no_const w; - struct { - conf_byte_write write; - conf_byte_read read; -- } b; -+ } __no_const b; - } u; - struct list_head list; - }; -diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c -index e3c03db..93b0172 100644 ---- a/fs/9p/vfs_inode.c -+++ b/fs/9p/vfs_inode.c -@@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd) - void - v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) - { -- char *s = nd_get_link(nd); -+ const char *s = nd_get_link(nd); - - P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name, - IS_ERR(s) ? "<error>" : s); -diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt -index 79e2ca7..5828ad1 100644 ---- a/fs/Kconfig.binfmt -+++ b/fs/Kconfig.binfmt -@@ -86,7 +86,7 @@ config HAVE_AOUT - - config BINFMT_AOUT - tristate "Kernel support for a.out and ECOFF binaries" -- depends on HAVE_AOUT -+ depends on HAVE_AOUT && BROKEN - ---help--- - A.out (Assembler.OUTput) is a set of formats for libraries and - executables used in the earliest versions of UNIX. Linux used -diff --git a/fs/aio.c b/fs/aio.c -index e29ec48..f083e5e 100644 ---- a/fs/aio.c -+++ b/fs/aio.c -@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx) - size += sizeof(struct io_event) * nr_events; - nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; - -- if (nr_pages < 0) -+ if (nr_pages <= 0) - return -EINVAL; - - nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); -@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ctx, - struct aio_timeout to; - int retry = 0; - -+ pax_track_stack(); -+ - /* needed to zero any padding within an entry (there shouldn't be - * any, but C is fun! - */ -@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *iocb) - static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) - { - ssize_t ret; -+ struct iovec iovstack; - - #ifdef CONFIG_COMPAT - if (compat) - ret = compat_rw_copy_check_uvector(type, - (struct compat_iovec __user *)kiocb->ki_buf, -- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, -+ kiocb->ki_nbytes, 1, &iovstack, - &kiocb->ki_iovec); - else - #endif - ret = rw_copy_check_uvector(type, - (struct iovec __user *)kiocb->ki_buf, -- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, -+ kiocb->ki_nbytes, 1, &iovstack, - &kiocb->ki_iovec); - if (ret < 0) - goto out; - -+ if (kiocb->ki_iovec == &iovstack) { -+ kiocb->ki_inline_vec = iovstack; -+ kiocb->ki_iovec = &kiocb->ki_inline_vec; -+ } - kiocb->ki_nr_segs = kiocb->ki_nbytes; - kiocb->ki_cur_seg = 0; - /* ki_nbytes/left now reflect bytes instead of segs */ -diff --git a/fs/attr.c b/fs/attr.c -index 538e279..046cc6d 100644 ---- a/fs/attr.c -+++ b/fs/attr.c -@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset) - unsigned long limit; - - limit = rlimit(RLIMIT_FSIZE); -+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1); - if (limit != RLIM_INFINITY && offset > limit) - goto out_sig; - if (offset > inode->i_sb->s_maxbytes) -diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c -index e1fbdee..cd5ea56 100644 ---- a/fs/autofs4/waitq.c -+++ b/fs/autofs4/waitq.c -@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes) - { - unsigned long sigpipe, flags; - mm_segment_t fs; -- const char *data = (const char *)addr; -+ const char __user *data = (const char __force_user *)addr; - ssize_t wr = 0; - - /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/ -diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c -index 720d885..012e7f0 100644 ---- a/fs/befs/linuxvfs.c -+++ b/fs/befs/linuxvfs.c -@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) - { - befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); - if (befs_ino->i_flags & BEFS_LONG_SYMLINK) { -- char *link = nd_get_link(nd); -+ const char *link = nd_get_link(nd); - if (!IS_ERR(link)) - kfree(link); - } -diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c -index a6395bd..a5b24c4 100644 ---- a/fs/binfmt_aout.c -+++ b/fs/binfmt_aout.c -@@ -16,6 +16,7 @@ - #include <linux/string.h> - #include <linux/fs.h> - #include <linux/file.h> -+#include <linux/security.h> - #include <linux/stat.h> - #include <linux/fcntl.h> - #include <linux/ptrace.h> -@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm) - #endif - # define START_STACK(u) ((void __user *)u.start_stack) - -+ memset(&dump, 0, sizeof(dump)); -+ - fs = get_fs(); - set_fs(KERNEL_DS); - has_dumped = 1; -@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm) - - /* If the size of the dump file exceeds the rlimit, then see what would happen - if we wrote the stack, but not the data area. */ -+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1); - if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit) - dump.u_dsize = 0; - - /* Make sure we have enough room to write the stack and data areas. */ -+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1); - if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) - dump.u_ssize = 0; - -@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) - rlim = rlimit(RLIMIT_DATA); - if (rlim >= RLIM_INFINITY) - rlim = ~0; -+ -+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1); - if (ex.a_data + ex.a_bss > rlim) - return -ENOMEM; - -@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) - install_exec_creds(bprm); - current->flags &= ~PF_FORKNOEXEC; - -+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) -+ current->mm->pax_flags = 0UL; -+#endif -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) { -+ current->mm->pax_flags |= MF_PAX_PAGEEXEC; -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP) -+ current->mm->pax_flags |= MF_PAX_EMUTRAMP; -+#endif -+ -+#ifdef CONFIG_PAX_MPROTECT -+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT)) -+ current->mm->pax_flags |= MF_PAX_MPROTECT; -+#endif -+ -+ } -+#endif -+ - if (N_MAGIC(ex) == OMAGIC) { - unsigned long text_addr, map_size; - loff_t pos; -@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) - - down_write(¤t->mm->mmap_sem); - error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, -- PROT_READ | PROT_WRITE | PROT_EXEC, -+ PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, - fd_offset + ex.a_text); - up_write(¤t->mm->mmap_sem); -diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c -index 21ac5ee..171b1d0 100644 ---- a/fs/binfmt_elf.c -+++ b/fs/binfmt_elf.c -@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump_params *cprm); - #define elf_core_dump NULL - #endif - -+#ifdef CONFIG_PAX_MPROTECT -+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags); -+#endif -+ - #if ELF_EXEC_PAGESIZE > PAGE_SIZE - #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE - #else -@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format = { - .load_binary = load_elf_binary, - .load_shlib = load_elf_library, - .core_dump = elf_core_dump, -+ -+#ifdef CONFIG_PAX_MPROTECT -+ .handle_mprotect= elf_handle_mprotect, -+#endif -+ - .min_coredump = ELF_EXEC_PAGESIZE, - }; - -@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format = { - - static int set_brk(unsigned long start, unsigned long end) - { -+ unsigned long e = end; -+ - start = ELF_PAGEALIGN(start); - end = ELF_PAGEALIGN(end); - if (end > start) { -@@ -87,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end) - if (BAD_ADDR(addr)) - return addr; - } -- current->mm->start_brk = current->mm->brk = end; -+ current->mm->start_brk = current->mm->brk = e; - return 0; - } - -@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, - elf_addr_t __user *u_rand_bytes; - const char *k_platform = ELF_PLATFORM; - const char *k_base_platform = ELF_BASE_PLATFORM; -- unsigned char k_rand_bytes[16]; -+ u32 k_rand_bytes[4]; - int items; - elf_addr_t *elf_info; - int ei_index = 0; - const struct cred *cred = current_cred(); - struct vm_area_struct *vma; -+ unsigned long saved_auxv[AT_VECTOR_SIZE]; -+ -+ pax_track_stack(); - - /* - * In some cases (e.g. Hyper-Threading), we want to avoid L1 -@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, - * Generate 16 random bytes for userspace PRNG seeding. - */ - get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); -- u_rand_bytes = (elf_addr_t __user *) -- STACK_ALLOC(p, sizeof(k_rand_bytes)); -+ srandom32(k_rand_bytes[0] ^ random32()); -+ srandom32(k_rand_bytes[1] ^ random32()); -+ srandom32(k_rand_bytes[2] ^ random32()); -+ srandom32(k_rand_bytes[3] ^ random32()); -+ p = STACK_ROUND(p, sizeof(k_rand_bytes)); -+ u_rand_bytes = (elf_addr_t __user *) p; - if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) - return -EFAULT; - -@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, - return -EFAULT; - current->mm->env_end = p; - -+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t)); -+ - /* Put the elf_info on the stack in the right place. */ - sp = (elf_addr_t __user *)envp + 1; -- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t))) -+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t))) - return -EFAULT; - return 0; - } -@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, - { - struct elf_phdr *elf_phdata; - struct elf_phdr *eppnt; -- unsigned long load_addr = 0; -+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE; - int load_addr_set = 0; - unsigned long last_bss = 0, elf_bss = 0; -- unsigned long error = ~0UL; -+ unsigned long error = -EINVAL; - unsigned long total_size; - int retval, i, size; - -@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, - goto out_close; - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+#endif -+ - eppnt = elf_phdata; - for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { - if (eppnt->p_type == PT_LOAD) { -@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, - k = load_addr + eppnt->p_vaddr; - if (BAD_ADDR(k) || - eppnt->p_filesz > eppnt->p_memsz || -- eppnt->p_memsz > TASK_SIZE || -- TASK_SIZE - eppnt->p_memsz < k) { -+ eppnt->p_memsz > pax_task_size || -+ pax_task_size - eppnt->p_memsz < k) { - error = -ENOMEM; - goto out_close; - } -@@ -528,6 +553,193 @@ out: - return error; - } - -+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE) -+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata) -+{ -+ unsigned long pax_flags = 0UL; -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (elf_phdata->p_flags & PF_PAGEEXEC) -+ pax_flags |= MF_PAX_PAGEEXEC; -+#endif -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (elf_phdata->p_flags & PF_SEGMEXEC) -+ pax_flags |= MF_PAX_SEGMEXEC; -+#endif -+ -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { -+ if ((__supported_pte_mask & _PAGE_NX)) -+ pax_flags &= ~MF_PAX_SEGMEXEC; -+ else -+ pax_flags &= ~MF_PAX_PAGEEXEC; -+ } -+#endif -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ if (elf_phdata->p_flags & PF_EMUTRAMP) -+ pax_flags |= MF_PAX_EMUTRAMP; -+#endif -+ -+#ifdef CONFIG_PAX_MPROTECT -+ if (elf_phdata->p_flags & PF_MPROTECT) -+ pax_flags |= MF_PAX_MPROTECT; -+#endif -+ -+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) -+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP)) -+ pax_flags |= MF_PAX_RANDMMAP; -+#endif -+ -+ return pax_flags; -+} -+#endif -+ -+#ifdef CONFIG_PAX_PT_PAX_FLAGS -+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata) -+{ -+ unsigned long pax_flags = 0UL; -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC)) -+ pax_flags |= MF_PAX_PAGEEXEC; -+#endif -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC)) -+ pax_flags |= MF_PAX_SEGMEXEC; -+#endif -+ -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { -+ if ((__supported_pte_mask & _PAGE_NX)) -+ pax_flags &= ~MF_PAX_SEGMEXEC; -+ else -+ pax_flags &= ~MF_PAX_PAGEEXEC; -+ } -+#endif -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP)) -+ pax_flags |= MF_PAX_EMUTRAMP; -+#endif -+ -+#ifdef CONFIG_PAX_MPROTECT -+ if (!(elf_phdata->p_flags & PF_NOMPROTECT)) -+ pax_flags |= MF_PAX_MPROTECT; -+#endif -+ -+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) -+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP)) -+ pax_flags |= MF_PAX_RANDMMAP; -+#endif -+ -+ return pax_flags; -+} -+#endif -+ -+#ifdef CONFIG_PAX_EI_PAX -+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex) -+{ -+ unsigned long pax_flags = 0UL; -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC)) -+ pax_flags |= MF_PAX_PAGEEXEC; -+#endif -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC)) -+ pax_flags |= MF_PAX_SEGMEXEC; -+#endif -+ -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { -+ if ((__supported_pte_mask & _PAGE_NX)) -+ pax_flags &= ~MF_PAX_SEGMEXEC; -+ else -+ pax_flags &= ~MF_PAX_PAGEEXEC; -+ } -+#endif -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP)) -+ pax_flags |= MF_PAX_EMUTRAMP; -+#endif -+ -+#ifdef CONFIG_PAX_MPROTECT -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT)) -+ pax_flags |= MF_PAX_MPROTECT; -+#endif -+ -+#ifdef CONFIG_PAX_ASLR -+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP)) -+ pax_flags |= MF_PAX_RANDMMAP; -+#endif -+ -+ return pax_flags; -+} -+#endif -+ -+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) -+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata) -+{ -+ unsigned long pax_flags = 0UL; -+ -+#ifdef CONFIG_PAX_PT_PAX_FLAGS -+ unsigned long i; -+ int found_flags = 0; -+#endif -+ -+#ifdef CONFIG_PAX_EI_PAX -+ pax_flags = pax_parse_ei_pax(elf_ex); -+#endif -+ -+#ifdef CONFIG_PAX_PT_PAX_FLAGS -+ for (i = 0UL; i < elf_ex->e_phnum; i++) -+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) { -+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) || -+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) || -+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) || -+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) || -+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP))) -+ return -EINVAL; -+ -+#ifdef CONFIG_PAX_SOFTMODE -+ if (pax_softmode) -+ pax_flags = pax_parse_softmode(&elf_phdata[i]); -+ else -+#endif -+ -+ pax_flags = pax_parse_hardmode(&elf_phdata[i]); -+ found_flags = 1; -+ break; -+ } -+#endif -+ -+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS) -+ if (found_flags == 0) { -+ struct elf_phdr phdr; -+ memset(&phdr, 0, sizeof(phdr)); -+ phdr.p_flags = PF_NOEMUTRAMP; -+#ifdef CONFIG_PAX_SOFTMODE -+ if (pax_softmode) -+ pax_flags = pax_parse_softmode(&phdr); -+ else -+#endif -+ pax_flags = pax_parse_hardmode(&phdr); -+ } -+#endif -+ -+ if (0 > pax_check_flags(&pax_flags)) -+ return -EINVAL; -+ -+ current->mm->pax_flags = pax_flags; -+ return 0; -+} -+#endif -+ - /* - * These are the functions used to load ELF style executables and shared - * libraries. There is no binary dependent code anywhere else. -@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top) - { - unsigned int random_variable = 0; - -+#ifdef CONFIG_PAX_RANDUSTACK -+ if (randomize_va_space) -+ return stack_top - current->mm->delta_stack; -+#endif -+ - if ((current->flags & PF_RANDOMIZE) && - !(current->personality & ADDR_NO_RANDOMIZE)) { - random_variable = get_random_int() & STACK_RND_MASK; -@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) - unsigned long load_addr = 0, load_bias = 0; - int load_addr_set = 0; - char * elf_interpreter = NULL; -- unsigned long error; -+ unsigned long error = 0; - struct elf_phdr *elf_ppnt, *elf_phdata; - unsigned long elf_bss, elf_brk; - int retval, i; -@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) - unsigned long start_code, end_code, start_data, end_data; - unsigned long reloc_func_desc __maybe_unused = 0; - int executable_stack = EXSTACK_DEFAULT; -- unsigned long def_flags = 0; - struct { - struct elfhdr elf_ex; - struct elfhdr interp_elf_ex; - } *loc; -+ unsigned long pax_task_size = TASK_SIZE; - - loc = kmalloc(sizeof(*loc), GFP_KERNEL); - if (!loc) { -@@ -713,11 +930,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) - - /* OK, This is the point of no return */ - current->flags &= ~PF_FORKNOEXEC; -- current->mm->def_flags = def_flags; -+ -+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) -+ current->mm->pax_flags = 0UL; -+#endif -+ -+#ifdef CONFIG_PAX_DLRESOLVE -+ current->mm->call_dl_resolve = 0UL; -+#endif -+ -+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) -+ current->mm->call_syscall = 0UL; -+#endif -+ -+#ifdef CONFIG_PAX_ASLR -+ current->mm->delta_mmap = 0UL; -+ current->mm->delta_stack = 0UL; -+#endif -+ -+ current->mm->def_flags = 0; -+ -+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) -+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) { -+ send_sig(SIGKILL, current, 0); -+ goto out_free_dentry; -+ } -+#endif -+ -+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS -+ pax_set_initial_flags(bprm); -+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) -+ if (pax_set_initial_flags_func) -+ (pax_set_initial_flags_func)(bprm); -+#endif -+ -+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT -+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) { -+ current->mm->context.user_cs_limit = PAGE_SIZE; -+ current->mm->def_flags |= VM_PAGEEXEC; -+ } -+#endif -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { -+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE; -+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE; -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+ current->mm->def_flags |= VM_NOHUGEPAGE; -+ } -+#endif -+ -+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC) -+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { -+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu()); -+ put_cpu(); -+ } -+#endif - - /* Do this immediately, since STACK_TOP as used in setup_arg_pages - may depend on the personality. */ - SET_PERSONALITY(loc->elf_ex); -+ -+#ifdef CONFIG_PAX_ASLR -+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) { -+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT; -+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT; -+ } -+#endif -+ -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { -+ executable_stack = EXSTACK_DISABLE_X; -+ current->personality &= ~READ_IMPLIES_EXEC; -+ } else -+#endif -+ - if (elf_read_implies_exec(loc->elf_ex, executable_stack)) - current->personality |= READ_IMPLIES_EXEC; - -@@ -808,6 +1095,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) - #else - load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); - #endif -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ /* PaX: randomize base address at the default exe base if requested */ -+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) { -+#ifdef CONFIG_SPARC64 -+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1); -+#else -+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT; -+#endif -+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias); -+ elf_flags |= MAP_FIXED; -+ } -+#endif -+ - } - - error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, -@@ -840,9 +1141,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) - * allowed task size. Note that p_filesz must always be - * <= p_memsz so it is only necessary to check p_memsz. - */ -- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || -- elf_ppnt->p_memsz > TASK_SIZE || -- TASK_SIZE - elf_ppnt->p_memsz < k) { -+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz || -+ elf_ppnt->p_memsz > pax_task_size || -+ pax_task_size - elf_ppnt->p_memsz < k) { - /* set_brk can never work. Avoid overflows. */ - send_sig(SIGKILL, current, 0); - retval = -EINVAL; -@@ -870,6 +1171,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) - start_data += load_bias; - end_data += load_bias; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) -+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4); -+#endif -+ - /* Calling set_brk effectively mmaps the pages that we need - * for the bss and break sections. We must do this before - * mapping in the interpreter, to make sure it doesn't wind -@@ -881,9 +1187,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) - goto out_free_dentry; - } - if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { -- send_sig(SIGSEGV, current, 0); -- retval = -EFAULT; /* Nobody gets to see this, but.. */ -- goto out_free_dentry; -+ /* -+ * This bss-zeroing can fail if the ELF -+ * file specifies odd protections. So -+ * we don't check the return value -+ */ - } - - if (elf_interpreter) { -@@ -1098,7 +1406,7 @@ out: - * Decide what to dump of a segment, part, all or none. - */ - static unsigned long vma_dump_size(struct vm_area_struct *vma, -- unsigned long mm_flags) -+ unsigned long mm_flags, long signr) - { - #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) - -@@ -1132,7 +1440,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, - if (vma->vm_file == NULL) - return 0; - -- if (FILTER(MAPPED_PRIVATE)) -+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE)) - goto whole; - - /* -@@ -1354,9 +1662,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) - { - elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; - int i = 0; -- do -+ do { - i += 2; -- while (auxv[i - 2] != AT_NULL); -+ } while (auxv[i - 2] != AT_NULL); - fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); - } - -@@ -1862,14 +2170,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, - } - - static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma, -- unsigned long mm_flags) -+ struct coredump_params *cprm) - { - struct vm_area_struct *vma; - size_t size = 0; - - for (vma = first_vma(current, gate_vma); vma != NULL; - vma = next_vma(vma, gate_vma)) -- size += vma_dump_size(vma, mm_flags); -+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr); - return size; - } - -@@ -1963,7 +2271,7 @@ static int elf_core_dump(struct coredump_params *cprm) - - dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); - -- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags); -+ offset += elf_core_vma_data_size(gate_vma, cprm); - offset += elf_core_extra_data_size(); - e_shoff = offset; - -@@ -1977,10 +2285,12 @@ static int elf_core_dump(struct coredump_params *cprm) - offset = dataoff; - - size += sizeof(*elf); -+ gr_learn_resource(current, RLIMIT_CORE, size, 1); - if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf))) - goto end_coredump; - - size += sizeof(*phdr4note); -+ gr_learn_resource(current, RLIMIT_CORE, size, 1); - if (size > cprm->limit - || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note))) - goto end_coredump; -@@ -1994,7 +2304,7 @@ static int elf_core_dump(struct coredump_params *cprm) - phdr.p_offset = offset; - phdr.p_vaddr = vma->vm_start; - phdr.p_paddr = 0; -- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags); -+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr); - phdr.p_memsz = vma->vm_end - vma->vm_start; - offset += phdr.p_filesz; - phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; -@@ -2005,6 +2315,7 @@ static int elf_core_dump(struct coredump_params *cprm) - phdr.p_align = ELF_EXEC_PAGESIZE; - - size += sizeof(phdr); -+ gr_learn_resource(current, RLIMIT_CORE, size, 1); - if (size > cprm->limit - || !dump_write(cprm->file, &phdr, sizeof(phdr))) - goto end_coredump; -@@ -2029,7 +2340,7 @@ static int elf_core_dump(struct coredump_params *cprm) - unsigned long addr; - unsigned long end; - -- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags); -+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr); - - for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { - struct page *page; -@@ -2038,6 +2349,7 @@ static int elf_core_dump(struct coredump_params *cprm) - page = get_dump_page(addr); - if (page) { - void *kaddr = kmap(page); -+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1); - stop = ((size += PAGE_SIZE) > cprm->limit) || - !dump_write(cprm->file, kaddr, - PAGE_SIZE); -@@ -2055,6 +2367,7 @@ static int elf_core_dump(struct coredump_params *cprm) - - if (e_phnum == PN_XNUM) { - size += sizeof(*shdr4extnum); -+ gr_learn_resource(current, RLIMIT_CORE, size, 1); - if (size > cprm->limit - || !dump_write(cprm->file, shdr4extnum, - sizeof(*shdr4extnum))) -@@ -2075,6 +2388,97 @@ out: - - #endif /* CONFIG_ELF_CORE */ - -+#ifdef CONFIG_PAX_MPROTECT -+/* PaX: non-PIC ELF libraries need relocations on their executable segments -+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly -+ * we'll remove VM_MAYWRITE for good on RELRO segments. -+ * -+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment -+ * basis because we want to allow the common case and not the special ones. -+ */ -+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags) -+{ -+ struct elfhdr elf_h; -+ struct elf_phdr elf_p; -+ unsigned long i; -+ unsigned long oldflags; -+ bool is_textrel_rw, is_textrel_rx, is_relro; -+ -+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT)) -+ return; -+ -+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ); -+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ; -+ -+#ifdef CONFIG_PAX_ELFRELOCS -+ /* possible TEXTREL */ -+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ); -+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ); -+#else -+ is_textrel_rw = false; -+ is_textrel_rx = false; -+#endif -+ -+ /* possible RELRO */ -+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ); -+ -+ if (!is_textrel_rw && !is_textrel_rx && !is_relro) -+ return; -+ -+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) || -+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) || -+ -+#ifdef CONFIG_PAX_ETEXECRELOCS -+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) || -+#else -+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) || -+#endif -+ -+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) || -+ !elf_check_arch(&elf_h) || -+ elf_h.e_phentsize != sizeof(struct elf_phdr) || -+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr)) -+ return; -+ -+ for (i = 0UL; i < elf_h.e_phnum; i++) { -+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p))) -+ return; -+ switch (elf_p.p_type) { -+ case PT_DYNAMIC: -+ if (!is_textrel_rw && !is_textrel_rx) -+ continue; -+ i = 0UL; -+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) { -+ elf_dyn dyn; -+ -+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn))) -+ return; -+ if (dyn.d_tag == DT_NULL) -+ return; -+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) { -+ gr_log_textrel(vma); -+ if (is_textrel_rw) -+ vma->vm_flags |= VM_MAYWRITE; -+ else -+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */ -+ vma->vm_flags &= ~VM_MAYWRITE; -+ return; -+ } -+ i++; -+ } -+ return; -+ -+ case PT_GNU_RELRO: -+ if (!is_relro) -+ continue; -+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start) -+ vma->vm_flags &= ~VM_MAYWRITE; -+ return; -+ } -+ } -+} -+#endif -+ - static int __init init_elf_binfmt(void) - { - return register_binfmt(&elf_format); -diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c -index 1bffbe0..c8c283e 100644 ---- a/fs/binfmt_flat.c -+++ b/fs/binfmt_flat.c -@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm, - realdatastart = (unsigned long) -ENOMEM; - printk("Unable to allocate RAM for process data, errno %d\n", - (int)-realdatastart); -+ down_write(¤t->mm->mmap_sem); - do_munmap(current->mm, textpos, text_len); -+ up_write(¤t->mm->mmap_sem); - ret = realdatastart; - goto err; - } -@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm, - } - if (IS_ERR_VALUE(result)) { - printk("Unable to read data+bss, errno %d\n", (int)-result); -+ down_write(¤t->mm->mmap_sem); - do_munmap(current->mm, textpos, text_len); - do_munmap(current->mm, realdatastart, len); -+ up_write(¤t->mm->mmap_sem); - ret = result; - goto err; - } -@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm, - } - if (IS_ERR_VALUE(result)) { - printk("Unable to read code+data+bss, errno %d\n",(int)-result); -+ down_write(¤t->mm->mmap_sem); - do_munmap(current->mm, textpos, text_len + data_len + extra + - MAX_SHARED_LIBS * sizeof(unsigned long)); -+ up_write(¤t->mm->mmap_sem); - ret = result; - goto err; - } -diff --git a/fs/bio.c b/fs/bio.c -index 9bfade8..782f3b9 100644 ---- a/fs/bio.c -+++ b/fs/bio.c -@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err) - const int read = bio_data_dir(bio) == READ; - struct bio_map_data *bmd = bio->bi_private; - int i; -- char *p = bmd->sgvecs[0].iov_base; -+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base; - - __bio_for_each_segment(bvec, bio, i, 0) { - char *addr = page_address(bvec->bv_page); -diff --git a/fs/block_dev.c b/fs/block_dev.c -index 1c44b8d..e2507b4 100644 ---- a/fs/block_dev.c -+++ b/fs/block_dev.c -@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, - else if (bdev->bd_contains == bdev) - return true; /* is a whole device which isn't held */ - -- else if (whole->bd_holder == bd_may_claim) -+ else if (whole->bd_holder == (void *)bd_may_claim) - return true; /* is a partition of a device that is being partitioned */ - else if (whole->bd_holder != NULL) - return false; /* is a partition of a held device */ -diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c -index 011cab3..9ace713 100644 ---- a/fs/btrfs/ctree.c -+++ b/fs/btrfs/ctree.c -@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, - free_extent_buffer(buf); - add_root_to_dirty_list(root); - } else { -- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) -- parent_start = parent->start; -- else -+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { -+ if (parent) -+ parent_start = parent->start; -+ else -+ parent_start = 0; -+ } else - parent_start = 0; - - WARN_ON(trans->transid != btrfs_header_generation(parent)); -diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c -index b2d004a..6bb543d 100644 ---- a/fs/btrfs/inode.c -+++ b/fs/btrfs/inode.c -@@ -6922,7 +6922,7 @@ fail: - return -ENOMEM; - } - --static int btrfs_getattr(struct vfsmount *mnt, -+int btrfs_getattr(struct vfsmount *mnt, - struct dentry *dentry, struct kstat *stat) - { - struct inode *inode = dentry->d_inode; -@@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount *mnt, - return 0; - } - -+EXPORT_SYMBOL(btrfs_getattr); -+ -+dev_t get_btrfs_dev_from_inode(struct inode *inode) -+{ -+ return BTRFS_I(inode)->root->anon_dev; -+} -+EXPORT_SYMBOL(get_btrfs_dev_from_inode); -+ - /* - * If a file is moved, it will inherit the cow and compression flags of the new - * directory. -diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c -index dae5dfe..6aa01b1 100644 ---- a/fs/btrfs/ioctl.c -+++ b/fs/btrfs/ioctl.c -@@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) - for (i = 0; i < num_types; i++) { - struct btrfs_space_info *tmp; - -+ /* Don't copy in more than we allocated */ - if (!slot_count) - break; - -+ slot_count--; -+ - info = NULL; - rcu_read_lock(); - list_for_each_entry_rcu(tmp, &root->fs_info->space_info, -@@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) - memcpy(dest, &space, sizeof(space)); - dest++; - space_args.total_spaces++; -- slot_count--; - } -- if (!slot_count) -- break; - } - up_read(&info->groups_sem); - } - -- user_dest = (struct btrfs_ioctl_space_info *) -+ user_dest = (struct btrfs_ioctl_space_info __user *) - (arg + sizeof(struct btrfs_ioctl_space_args)); - - if (copy_to_user(user_dest, dest_orig, alloc_size)) -diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c -index 59bb176..be9977d 100644 ---- a/fs/btrfs/relocation.c -+++ b/fs/btrfs/relocation.c -@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del) - } - spin_unlock(&rc->reloc_root_tree.lock); - -- BUG_ON((struct btrfs_root *)node->data != root); -+ BUG_ON(!node || (struct btrfs_root *)node->data != root); - - if (!del) { - spin_lock(&rc->reloc_root_tree.lock); -diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c -index 622f469..e8d2d55 100644 ---- a/fs/cachefiles/bind.c -+++ b/fs/cachefiles/bind.c -@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) - args); - - /* start by checking things over */ -- ASSERT(cache->fstop_percent >= 0 && -- cache->fstop_percent < cache->fcull_percent && -+ ASSERT(cache->fstop_percent < cache->fcull_percent && - cache->fcull_percent < cache->frun_percent && - cache->frun_percent < 100); - -- ASSERT(cache->bstop_percent >= 0 && -- cache->bstop_percent < cache->bcull_percent && -+ ASSERT(cache->bstop_percent < cache->bcull_percent && - cache->bcull_percent < cache->brun_percent && - cache->brun_percent < 100); - -diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c -index 0a1467b..6a53245 100644 ---- a/fs/cachefiles/daemon.c -+++ b/fs/cachefiles/daemon.c -@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, - if (n > buflen) - return -EMSGSIZE; - -- if (copy_to_user(_buffer, buffer, n) != 0) -+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0) - return -EFAULT; - - return n; -@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file, - if (test_bit(CACHEFILES_DEAD, &cache->flags)) - return -EIO; - -- if (datalen < 0 || datalen > PAGE_SIZE - 1) -+ if (datalen > PAGE_SIZE - 1) - return -EOPNOTSUPP; - - /* drag the command string into the kernel so we can parse it */ -@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args) - if (args[0] != '%' || args[1] != '\0') - return -EINVAL; - -- if (fstop < 0 || fstop >= cache->fcull_percent) -+ if (fstop >= cache->fcull_percent) - return cachefiles_daemon_range_error(cache, args); - - cache->fstop_percent = fstop; -@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args) - if (args[0] != '%' || args[1] != '\0') - return -EINVAL; - -- if (bstop < 0 || bstop >= cache->bcull_percent) -+ if (bstop >= cache->bcull_percent) - return cachefiles_daemon_range_error(cache, args); - - cache->bstop_percent = bstop; -diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h -index bd6bc1b..b627b53 100644 ---- a/fs/cachefiles/internal.h -+++ b/fs/cachefiles/internal.h -@@ -57,7 +57,7 @@ struct cachefiles_cache { - wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */ - struct rb_root active_nodes; /* active nodes (can't be culled) */ - rwlock_t active_lock; /* lock for active_nodes */ -- atomic_t gravecounter; /* graveyard uniquifier */ -+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */ - unsigned frun_percent; /* when to stop culling (% files) */ - unsigned fcull_percent; /* when to start culling (% files) */ - unsigned fstop_percent; /* when to stop allocating (% files) */ -@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache, - * proc.c - */ - #ifdef CONFIG_CACHEFILES_HISTOGRAM --extern atomic_t cachefiles_lookup_histogram[HZ]; --extern atomic_t cachefiles_mkdir_histogram[HZ]; --extern atomic_t cachefiles_create_histogram[HZ]; -+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ]; -+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ]; -+extern atomic_unchecked_t cachefiles_create_histogram[HZ]; - - extern int __init cachefiles_proc_init(void); - extern void cachefiles_proc_cleanup(void); - static inline --void cachefiles_hist(atomic_t histogram[], unsigned long start_jif) -+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif) - { - unsigned long jif = jiffies - start_jif; - if (jif >= HZ) - jif = HZ - 1; -- atomic_inc(&histogram[jif]); -+ atomic_inc_unchecked(&histogram[jif]); - } - - #else -diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c -index a0358c2..d6137f2 100644 ---- a/fs/cachefiles/namei.c -+++ b/fs/cachefiles/namei.c -@@ -318,7 +318,7 @@ try_again: - /* first step is to make up a grave dentry in the graveyard */ - sprintf(nbuffer, "%08x%08x", - (uint32_t) get_seconds(), -- (uint32_t) atomic_inc_return(&cache->gravecounter)); -+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter)); - - /* do the multiway lock magic */ - trap = lock_rename(cache->graveyard, dir); -diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c -index eccd339..4c1d995 100644 ---- a/fs/cachefiles/proc.c -+++ b/fs/cachefiles/proc.c -@@ -14,9 +14,9 @@ - #include <linux/seq_file.h> - #include "internal.h" - --atomic_t cachefiles_lookup_histogram[HZ]; --atomic_t cachefiles_mkdir_histogram[HZ]; --atomic_t cachefiles_create_histogram[HZ]; -+atomic_unchecked_t cachefiles_lookup_histogram[HZ]; -+atomic_unchecked_t cachefiles_mkdir_histogram[HZ]; -+atomic_unchecked_t cachefiles_create_histogram[HZ]; - - /* - * display the latency histogram -@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v) - return 0; - default: - index = (unsigned long) v - 3; -- x = atomic_read(&cachefiles_lookup_histogram[index]); -- y = atomic_read(&cachefiles_mkdir_histogram[index]); -- z = atomic_read(&cachefiles_create_histogram[index]); -+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]); -+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]); -+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]); - if (x == 0 && y == 0 && z == 0) - return 0; - -diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c -index 0e3c092..818480e 100644 ---- a/fs/cachefiles/rdwr.c -+++ b/fs/cachefiles/rdwr.c -@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = file->f_op->write( -- file, (const void __user *) data, len, &pos); -+ file, (const void __force_user *) data, len, &pos); - set_fs(old_fs); - kunmap(page); - if (ret != len) -diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c -index 382abc9..bd89646 100644 ---- a/fs/ceph/dir.c -+++ b/fs/ceph/dir.c -@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) - struct ceph_fs_client *fsc = ceph_inode_to_client(inode); - struct ceph_mds_client *mdsc = fsc->mdsc; - unsigned frag = fpos_frag(filp->f_pos); -- int off = fpos_off(filp->f_pos); -+ unsigned int off = fpos_off(filp->f_pos); - int err; - u32 ftype; - struct ceph_mds_reply_info_parsed *rinfo; -diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c -index 6d40656..bc1f825 100644 ---- a/fs/cifs/cifs_debug.c -+++ b/fs/cifs/cifs_debug.c -@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, - - if (c == '1' || c == 'y' || c == 'Y' || c == '0') { - #ifdef CONFIG_CIFS_STATS2 -- atomic_set(&totBufAllocCount, 0); -- atomic_set(&totSmBufAllocCount, 0); -+ atomic_set_unchecked(&totBufAllocCount, 0); -+ atomic_set_unchecked(&totSmBufAllocCount, 0); - #endif /* CONFIG_CIFS_STATS2 */ - spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp1, &cifs_tcp_ses_list) { -@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file, - tcon = list_entry(tmp3, - struct cifs_tcon, - tcon_list); -- atomic_set(&tcon->num_smbs_sent, 0); -- atomic_set(&tcon->num_writes, 0); -- atomic_set(&tcon->num_reads, 0); -- atomic_set(&tcon->num_oplock_brks, 0); -- atomic_set(&tcon->num_opens, 0); -- atomic_set(&tcon->num_posixopens, 0); -- atomic_set(&tcon->num_posixmkdirs, 0); -- atomic_set(&tcon->num_closes, 0); -- atomic_set(&tcon->num_deletes, 0); -- atomic_set(&tcon->num_mkdirs, 0); -- atomic_set(&tcon->num_rmdirs, 0); -- atomic_set(&tcon->num_renames, 0); -- atomic_set(&tcon->num_t2renames, 0); -- atomic_set(&tcon->num_ffirst, 0); -- atomic_set(&tcon->num_fnext, 0); -- atomic_set(&tcon->num_fclose, 0); -- atomic_set(&tcon->num_hardlinks, 0); -- atomic_set(&tcon->num_symlinks, 0); -- atomic_set(&tcon->num_locks, 0); -+ atomic_set_unchecked(&tcon->num_smbs_sent, 0); -+ atomic_set_unchecked(&tcon->num_writes, 0); -+ atomic_set_unchecked(&tcon->num_reads, 0); -+ atomic_set_unchecked(&tcon->num_oplock_brks, 0); -+ atomic_set_unchecked(&tcon->num_opens, 0); -+ atomic_set_unchecked(&tcon->num_posixopens, 0); -+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0); -+ atomic_set_unchecked(&tcon->num_closes, 0); -+ atomic_set_unchecked(&tcon->num_deletes, 0); -+ atomic_set_unchecked(&tcon->num_mkdirs, 0); -+ atomic_set_unchecked(&tcon->num_rmdirs, 0); -+ atomic_set_unchecked(&tcon->num_renames, 0); -+ atomic_set_unchecked(&tcon->num_t2renames, 0); -+ atomic_set_unchecked(&tcon->num_ffirst, 0); -+ atomic_set_unchecked(&tcon->num_fnext, 0); -+ atomic_set_unchecked(&tcon->num_fclose, 0); -+ atomic_set_unchecked(&tcon->num_hardlinks, 0); -+ atomic_set_unchecked(&tcon->num_symlinks, 0); -+ atomic_set_unchecked(&tcon->num_locks, 0); - } - } - } -@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) - smBufAllocCount.counter, cifs_min_small); - #ifdef CONFIG_CIFS_STATS2 - seq_printf(m, "Total Large %d Small %d Allocations\n", -- atomic_read(&totBufAllocCount), -- atomic_read(&totSmBufAllocCount)); -+ atomic_read_unchecked(&totBufAllocCount), -+ atomic_read_unchecked(&totSmBufAllocCount)); - #endif /* CONFIG_CIFS_STATS2 */ - - seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount)); -@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) - if (tcon->need_reconnect) - seq_puts(m, "\tDISCONNECTED "); - seq_printf(m, "\nSMBs: %d Oplock Breaks: %d", -- atomic_read(&tcon->num_smbs_sent), -- atomic_read(&tcon->num_oplock_brks)); -+ atomic_read_unchecked(&tcon->num_smbs_sent), -+ atomic_read_unchecked(&tcon->num_oplock_brks)); - seq_printf(m, "\nReads: %d Bytes: %lld", -- atomic_read(&tcon->num_reads), -+ atomic_read_unchecked(&tcon->num_reads), - (long long)(tcon->bytes_read)); - seq_printf(m, "\nWrites: %d Bytes: %lld", -- atomic_read(&tcon->num_writes), -+ atomic_read_unchecked(&tcon->num_writes), - (long long)(tcon->bytes_written)); - seq_printf(m, "\nFlushes: %d", -- atomic_read(&tcon->num_flushes)); -+ atomic_read_unchecked(&tcon->num_flushes)); - seq_printf(m, "\nLocks: %d HardLinks: %d " - "Symlinks: %d", -- atomic_read(&tcon->num_locks), -- atomic_read(&tcon->num_hardlinks), -- atomic_read(&tcon->num_symlinks)); -+ atomic_read_unchecked(&tcon->num_locks), -+ atomic_read_unchecked(&tcon->num_hardlinks), -+ atomic_read_unchecked(&tcon->num_symlinks)); - seq_printf(m, "\nOpens: %d Closes: %d " - "Deletes: %d", -- atomic_read(&tcon->num_opens), -- atomic_read(&tcon->num_closes), -- atomic_read(&tcon->num_deletes)); -+ atomic_read_unchecked(&tcon->num_opens), -+ atomic_read_unchecked(&tcon->num_closes), -+ atomic_read_unchecked(&tcon->num_deletes)); - seq_printf(m, "\nPosix Opens: %d " - "Posix Mkdirs: %d", -- atomic_read(&tcon->num_posixopens), -- atomic_read(&tcon->num_posixmkdirs)); -+ atomic_read_unchecked(&tcon->num_posixopens), -+ atomic_read_unchecked(&tcon->num_posixmkdirs)); - seq_printf(m, "\nMkdirs: %d Rmdirs: %d", -- atomic_read(&tcon->num_mkdirs), -- atomic_read(&tcon->num_rmdirs)); -+ atomic_read_unchecked(&tcon->num_mkdirs), -+ atomic_read_unchecked(&tcon->num_rmdirs)); - seq_printf(m, "\nRenames: %d T2 Renames %d", -- atomic_read(&tcon->num_renames), -- atomic_read(&tcon->num_t2renames)); -+ atomic_read_unchecked(&tcon->num_renames), -+ atomic_read_unchecked(&tcon->num_t2renames)); - seq_printf(m, "\nFindFirst: %d FNext %d " - "FClose %d", -- atomic_read(&tcon->num_ffirst), -- atomic_read(&tcon->num_fnext), -- atomic_read(&tcon->num_fclose)); -+ atomic_read_unchecked(&tcon->num_ffirst), -+ atomic_read_unchecked(&tcon->num_fnext), -+ atomic_read_unchecked(&tcon->num_fclose)); - } - } - } -diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c -index 54b8f1e..f6a4c00 100644 ---- a/fs/cifs/cifsfs.c -+++ b/fs/cifs/cifsfs.c -@@ -981,7 +981,7 @@ cifs_init_request_bufs(void) - cifs_req_cachep = kmem_cache_create("cifs_request", - CIFSMaxBufSize + - MAX_CIFS_HDR_SIZE, 0, -- SLAB_HWCACHE_ALIGN, NULL); -+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL); - if (cifs_req_cachep == NULL) - return -ENOMEM; - -@@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void) - efficient to alloc 1 per page off the slab compared to 17K (5page) - alloc of large cifs buffers even when page debugging is on */ - cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", -- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, -+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, - NULL); - if (cifs_sm_req_cachep == NULL) { - mempool_destroy(cifs_req_poolp); -@@ -1093,8 +1093,8 @@ init_cifs(void) - atomic_set(&bufAllocCount, 0); - atomic_set(&smBufAllocCount, 0); - #ifdef CONFIG_CIFS_STATS2 -- atomic_set(&totBufAllocCount, 0); -- atomic_set(&totSmBufAllocCount, 0); -+ atomic_set_unchecked(&totBufAllocCount, 0); -+ atomic_set_unchecked(&totSmBufAllocCount, 0); - #endif /* CONFIG_CIFS_STATS2 */ - - atomic_set(&midCount, 0); -diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h -index 95dad9d..fe7af1a 100644 ---- a/fs/cifs/cifsglob.h -+++ b/fs/cifs/cifsglob.h -@@ -381,28 +381,28 @@ struct cifs_tcon { - __u16 Flags; /* optional support bits */ - enum statusEnum tidStatus; - #ifdef CONFIG_CIFS_STATS -- atomic_t num_smbs_sent; -- atomic_t num_writes; -- atomic_t num_reads; -- atomic_t num_flushes; -- atomic_t num_oplock_brks; -- atomic_t num_opens; -- atomic_t num_closes; -- atomic_t num_deletes; -- atomic_t num_mkdirs; -- atomic_t num_posixopens; -- atomic_t num_posixmkdirs; -- atomic_t num_rmdirs; -- atomic_t num_renames; -- atomic_t num_t2renames; -- atomic_t num_ffirst; -- atomic_t num_fnext; -- atomic_t num_fclose; -- atomic_t num_hardlinks; -- atomic_t num_symlinks; -- atomic_t num_locks; -- atomic_t num_acl_get; -- atomic_t num_acl_set; -+ atomic_unchecked_t num_smbs_sent; -+ atomic_unchecked_t num_writes; -+ atomic_unchecked_t num_reads; -+ atomic_unchecked_t num_flushes; -+ atomic_unchecked_t num_oplock_brks; -+ atomic_unchecked_t num_opens; -+ atomic_unchecked_t num_closes; -+ atomic_unchecked_t num_deletes; -+ atomic_unchecked_t num_mkdirs; -+ atomic_unchecked_t num_posixopens; -+ atomic_unchecked_t num_posixmkdirs; -+ atomic_unchecked_t num_rmdirs; -+ atomic_unchecked_t num_renames; -+ atomic_unchecked_t num_t2renames; -+ atomic_unchecked_t num_ffirst; -+ atomic_unchecked_t num_fnext; -+ atomic_unchecked_t num_fclose; -+ atomic_unchecked_t num_hardlinks; -+ atomic_unchecked_t num_symlinks; -+ atomic_unchecked_t num_locks; -+ atomic_unchecked_t num_acl_get; -+ atomic_unchecked_t num_acl_set; - #ifdef CONFIG_CIFS_STATS2 - unsigned long long time_writes; - unsigned long long time_reads; -@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim) - } - - #ifdef CONFIG_CIFS_STATS --#define cifs_stats_inc atomic_inc -+#define cifs_stats_inc atomic_inc_unchecked - - static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon, - unsigned int bytes) -@@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount; - /* Various Debug counters */ - GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ - #ifdef CONFIG_CIFS_STATS2 --GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ --GLOBAL_EXTERN atomic_t totSmBufAllocCount; -+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */ -+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount; - #endif - GLOBAL_EXTERN atomic_t smBufAllocCount; - GLOBAL_EXTERN atomic_t midCount; -diff --git a/fs/cifs/link.c b/fs/cifs/link.c -index db3f18c..1f5955e 100644 ---- a/fs/cifs/link.c -+++ b/fs/cifs/link.c -@@ -593,7 +593,7 @@ symlink_exit: - - void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie) - { -- char *p = nd_get_link(nd); -+ const char *p = nd_get_link(nd); - if (!IS_ERR(p)) - kfree(p); - } -diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c -index 7c16933..c8212b5 100644 ---- a/fs/cifs/misc.c -+++ b/fs/cifs/misc.c -@@ -156,7 +156,7 @@ cifs_buf_get(void) - memset(ret_buf, 0, sizeof(struct smb_hdr) + 3); - atomic_inc(&bufAllocCount); - #ifdef CONFIG_CIFS_STATS2 -- atomic_inc(&totBufAllocCount); -+ atomic_inc_unchecked(&totBufAllocCount); - #endif /* CONFIG_CIFS_STATS2 */ - } - -@@ -191,7 +191,7 @@ cifs_small_buf_get(void) - /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ - atomic_inc(&smBufAllocCount); - #ifdef CONFIG_CIFS_STATS2 -- atomic_inc(&totSmBufAllocCount); -+ atomic_inc_unchecked(&totSmBufAllocCount); - #endif /* CONFIG_CIFS_STATS2 */ - - } -diff --git a/fs/coda/cache.c b/fs/coda/cache.c -index 6901578..d402eb5 100644 ---- a/fs/coda/cache.c -+++ b/fs/coda/cache.c -@@ -24,7 +24,7 @@ - #include "coda_linux.h" - #include "coda_cache.h" - --static atomic_t permission_epoch = ATOMIC_INIT(0); -+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0); - - /* replace or extend an acl cache hit */ - void coda_cache_enter(struct inode *inode, int mask) -@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask) - struct coda_inode_info *cii = ITOC(inode); - - spin_lock(&cii->c_lock); -- cii->c_cached_epoch = atomic_read(&permission_epoch); -+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch); - if (cii->c_uid != current_fsuid()) { - cii->c_uid = current_fsuid(); - cii->c_cached_perm = mask; -@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode) - { - struct coda_inode_info *cii = ITOC(inode); - spin_lock(&cii->c_lock); -- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1; -+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1; - spin_unlock(&cii->c_lock); - } - - /* remove all acl caches */ - void coda_cache_clear_all(struct super_block *sb) - { -- atomic_inc(&permission_epoch); -+ atomic_inc_unchecked(&permission_epoch); - } - - -@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask) - spin_lock(&cii->c_lock); - hit = (mask & cii->c_cached_perm) == mask && - cii->c_uid == current_fsuid() && -- cii->c_cached_epoch == atomic_read(&permission_epoch); -+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch); - spin_unlock(&cii->c_lock); - - return hit; -diff --git a/fs/compat.c b/fs/compat.c -index 58b1da4..afcd9b8 100644 ---- a/fs/compat.c -+++ b/fs/compat.c -@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim - static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) - { - compat_ino_t ino = stat->ino; -- typeof(ubuf->st_uid) uid = 0; -- typeof(ubuf->st_gid) gid = 0; -+ typeof(((struct compat_stat *)0)->st_uid) uid = 0; -+ typeof(((struct compat_stat *)0)->st_gid) gid = 0; - int err; - - SET_UID(uid, stat->uid); -@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p) - - set_fs(KERNEL_DS); - /* The __user pointer cast is valid because of the set_fs() */ -- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64); -+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64); - set_fs(oldfs); - /* truncating is ok because it's a user address */ - if (!ret) -@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int type, - goto out; - - ret = -EINVAL; -- if (nr_segs > UIO_MAXIOV || nr_segs < 0) -+ if (nr_segs > UIO_MAXIOV) - goto out; - if (nr_segs > fast_segs) { - ret = -ENOMEM; -@@ -848,6 +848,7 @@ struct compat_old_linux_dirent { - - struct compat_readdir_callback { - struct compat_old_linux_dirent __user *dirent; -+ struct file * file; - int result; - }; - -@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen, - buf->result = -EOVERFLOW; - return -EOVERFLOW; - } -+ -+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) -+ return 0; -+ - buf->result++; - dirent = buf->dirent; - if (!access_ok(VERIFY_WRITE, dirent, -@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd, - - buf.result = 0; - buf.dirent = dirent; -+ buf.file = file; - - error = vfs_readdir(file, compat_fillonedir, &buf); - if (buf.result) -@@ -917,6 +923,7 @@ struct compat_linux_dirent { - struct compat_getdents_callback { - struct compat_linux_dirent __user *current_dir; - struct compat_linux_dirent __user *previous; -+ struct file * file; - int count; - int error; - }; -@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen, - buf->error = -EOVERFLOW; - return -EOVERFLOW; - } -+ -+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) -+ return 0; -+ - dirent = buf->previous; - if (dirent) { - if (__put_user(offset, &dirent->d_off)) -@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd, - buf.previous = NULL; - buf.count = count; - buf.error = 0; -+ buf.file = file; - - error = vfs_readdir(file, compat_filldir, &buf); - if (error >= 0) -@@ -1006,6 +1018,7 @@ out: - struct compat_getdents_callback64 { - struct linux_dirent64 __user *current_dir; - struct linux_dirent64 __user *previous; -+ struct file * file; - int count; - int error; - }; -@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t - buf->error = -EINVAL; /* only used if we fail.. */ - if (reclen > buf->count) - return -EINVAL; -+ -+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) -+ return 0; -+ - dirent = buf->previous; - - if (dirent) { -@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, - buf.previous = NULL; - buf.count = count; - buf.error = 0; -+ buf.file = file; - - error = vfs_readdir(file, compat_filldir64, &buf); - if (error >= 0) - error = buf.error; - lastdirent = buf.previous; - if (lastdirent) { -- typeof(lastdirent->d_off) d_off = file->f_pos; -+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos; - if (__put_user_unaligned(d_off, &lastdirent->d_off)) - error = -EFAULT; - else -@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp, - struct fdtable *fdt; - long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; - -+ pax_track_stack(); -+ - if (n < 0) - goto out_nofds; - -diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c -index 112e45a..b59845b 100644 ---- a/fs/compat_binfmt_elf.c -+++ b/fs/compat_binfmt_elf.c -@@ -30,11 +30,13 @@ - #undef elf_phdr - #undef elf_shdr - #undef elf_note -+#undef elf_dyn - #undef elf_addr_t - #define elfhdr elf32_hdr - #define elf_phdr elf32_phdr - #define elf_shdr elf32_shdr - #define elf_note elf32_note -+#define elf_dyn Elf32_Dyn - #define elf_addr_t Elf32_Addr - - /* -diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c -index 51352de..93292ff 100644 ---- a/fs/compat_ioctl.c -+++ b/fs/compat_ioctl.c -@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, - - err = get_user(palp, &up->palette); - err |= get_user(length, &up->length); -+ if (err) -+ return -EFAULT; - - up_native = compat_alloc_user_space(sizeof(struct video_spu_palette)); - err = put_user(compat_ptr(palp), &up_native->palette); -@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, - return -EFAULT; - if (__get_user(udata, &ss32->iomem_base)) - return -EFAULT; -- ss.iomem_base = compat_ptr(udata); -+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata); - if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) || - __get_user(ss.port_high, &ss32->port_high)) - return -EFAULT; -@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file, - copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) || - copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) || - copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) || -- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32))) -+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32))) - return -EFAULT; - - return ioctl_preallocate(file, p); -@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, - static int __init init_sys32_ioctl_cmp(const void *p, const void *q) - { - unsigned int a, b; -- a = *(unsigned int *)p; -- b = *(unsigned int *)q; -+ a = *(const unsigned int *)p; -+ b = *(const unsigned int *)q; - if (a > b) - return 1; - if (a < b) -diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c -index 9a37a9b..35792b6 100644 ---- a/fs/configfs/dir.c -+++ b/fs/configfs/dir.c -@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir - } - for (p=q->next; p!= &parent_sd->s_children; p=p->next) { - struct configfs_dirent *next; -- const char * name; -+ const unsigned char * name; -+ char d_name[sizeof(next->s_dentry->d_iname)]; - int len; - struct inode *inode = NULL; - -@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir - continue; - - name = configfs_get_name(next); -- len = strlen(name); -+ if (next->s_dentry && name == next->s_dentry->d_iname) { -+ len = next->s_dentry->d_name.len; -+ memcpy(d_name, name, len); -+ name = d_name; -+ } else -+ len = strlen(name); - - /* - * We'll have a dentry and an inode for -diff --git a/fs/dcache.c b/fs/dcache.c -index a88948b..1e32160 100644 ---- a/fs/dcache.c -+++ b/fs/dcache.c -@@ -2998,7 +2998,7 @@ void __init vfs_caches_init(unsigned long mempages) - mempages -= reserve; - - names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, -- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); -+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL); - - dcache_init(); - inode_init(); -diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c -index 528da01..bd8c23d 100644 ---- a/fs/ecryptfs/inode.c -+++ b/fs/ecryptfs/inode.c -@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, - old_fs = get_fs(); - set_fs(get_ds()); - rc = lower_dentry->d_inode->i_op->readlink(lower_dentry, -- (char __user *)lower_buf, -+ (char __force_user *)lower_buf, - lower_bufsiz); - set_fs(old_fs); - if (rc < 0) -@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd) - } - old_fs = get_fs(); - set_fs(get_ds()); -- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len); -+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len); - set_fs(old_fs); - if (rc < 0) { - kfree(buf); -@@ -752,7 +752,7 @@ out: - static void - ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr) - { -- char *buf = nd_get_link(nd); -+ const char *buf = nd_get_link(nd); - if (!IS_ERR(buf)) { - /* Free the char* */ - kfree(buf); -diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c -index 940a82e..63af89e 100644 ---- a/fs/ecryptfs/miscdev.c -+++ b/fs/ecryptfs/miscdev.c -@@ -328,7 +328,7 @@ check_list: - goto out_unlock_msg_ctx; - i = 5; - if (msg_ctx->msg) { -- if (copy_to_user(&buf[i], packet_length, packet_length_size)) -+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size)) - goto out_unlock_msg_ctx; - i += packet_length_size; - if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size)) -diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c -index 3745f7c..89cc7a3 100644 ---- a/fs/ecryptfs/read_write.c -+++ b/fs/ecryptfs/read_write.c -@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, - return -EIO; - fs_save = get_fs(); - set_fs(get_ds()); -- rc = vfs_write(lower_file, data, size, &offset); -+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset); - set_fs(fs_save); - mark_inode_dirty_sync(ecryptfs_inode); - return rc; -@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size, - return -EIO; - fs_save = get_fs(); - set_fs(get_ds()); -- rc = vfs_read(lower_file, data, size, &offset); -+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset); - set_fs(fs_save); - return rc; - } -diff --git a/fs/exec.c b/fs/exec.c -index 25dcbe5..4ffaa78 100644 ---- a/fs/exec.c -+++ b/fs/exec.c -@@ -55,12 +55,24 @@ - #include <linux/pipe_fs_i.h> - #include <linux/oom.h> - #include <linux/compat.h> -+#include <linux/random.h> -+#include <linux/seq_file.h> -+ -+#ifdef CONFIG_PAX_REFCOUNT -+#include <linux/kallsyms.h> -+#include <linux/kdebug.h> -+#endif - - #include <asm/uaccess.h> - #include <asm/mmu_context.h> - #include <asm/tlb.h> - #include "internal.h" - -+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS -+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); -+EXPORT_SYMBOL(pax_set_initial_flags_func); -+#endif -+ - int core_uses_pid; - char core_pattern[CORENAME_MAX_SIZE] = "core"; - unsigned int core_pipe_limit; -@@ -70,7 +82,7 @@ struct core_name { - char *corename; - int used, size; - }; --static atomic_t call_count = ATOMIC_INIT(1); -+static atomic_unchecked_t call_count = ATOMIC_INIT(1); - - /* The maximal length of core_pattern is also specified in sysctl.c */ - -@@ -188,18 +200,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, - int write) - { - struct page *page; -- int ret; - --#ifdef CONFIG_STACK_GROWSUP -- if (write) { -- ret = expand_downwards(bprm->vma, pos); -- if (ret < 0) -- return NULL; -- } --#endif -- ret = get_user_pages(current, bprm->mm, pos, -- 1, write, 1, &page, NULL); -- if (ret <= 0) -+ if (0 > expand_downwards(bprm->vma, pos)) -+ return NULL; -+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL)) - return NULL; - - if (write) { -@@ -274,6 +278,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) - vma->vm_end = STACK_TOP_MAX; - vma->vm_start = vma->vm_end - PAGE_SIZE; - vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC); -+#endif -+ - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - INIT_LIST_HEAD(&vma->anon_vma_chain); - -@@ -288,6 +297,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) - mm->stack_vm = mm->total_vm = 1; - up_write(&mm->mmap_sem); - bprm->p = vma->vm_end - sizeof(void *); -+ -+#ifdef CONFIG_PAX_RANDUSTACK -+ if (randomize_va_space) -+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK; -+#endif -+ - return 0; - err: - up_write(&mm->mmap_sem); -@@ -396,19 +411,7 @@ err: - return err; - } - --struct user_arg_ptr { --#ifdef CONFIG_COMPAT -- bool is_compat; --#endif -- union { -- const char __user *const __user *native; --#ifdef CONFIG_COMPAT -- compat_uptr_t __user *compat; --#endif -- } ptr; --}; -- --static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) -+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) - { - const char __user *native; - -@@ -417,14 +420,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) - compat_uptr_t compat; - - if (get_user(compat, argv.ptr.compat + nr)) -- return ERR_PTR(-EFAULT); -+ return (const char __force_user *)ERR_PTR(-EFAULT); - - return compat_ptr(compat); - } - #endif - - if (get_user(native, argv.ptr.native + nr)) -- return ERR_PTR(-EFAULT); -+ return (const char __force_user *)ERR_PTR(-EFAULT); - - return native; - } -@@ -443,7 +446,7 @@ static int count(struct user_arg_ptr argv, int max) - if (!p) - break; - -- if (IS_ERR(p)) -+ if (IS_ERR((const char __force_kernel *)p)) - return -EFAULT; - - if (i++ >= max) -@@ -477,7 +480,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv, - - ret = -EFAULT; - str = get_user_arg_ptr(argv, argc); -- if (IS_ERR(str)) -+ if (IS_ERR((const char __force_kernel *)str)) - goto out; - - len = strnlen_user(str, MAX_ARG_STRLEN); -@@ -559,7 +562,7 @@ int copy_strings_kernel(int argc, const char *const *__argv, - int r; - mm_segment_t oldfs = get_fs(); - struct user_arg_ptr argv = { -- .ptr.native = (const char __user *const __user *)__argv, -+ .ptr.native = (const char __force_user *const __force_user *)__argv, - }; - - set_fs(KERNEL_DS); -@@ -594,7 +597,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) - unsigned long new_end = old_end - shift; - struct mmu_gather tlb; - -- BUG_ON(new_start > new_end); -+ if (new_start >= new_end || new_start < mmap_min_addr) -+ return -ENOMEM; - - /* - * ensure there are no vmas between where we want to go -@@ -603,6 +607,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) - if (vma != find_vma(mm, new_start)) - return -EFAULT; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ BUG_ON(pax_find_mirror_vma(vma)); -+#endif -+ - /* - * cover the whole range: [new_start, old_end) - */ -@@ -683,10 +691,6 @@ int setup_arg_pages(struct linux_binprm *bprm, - stack_top = arch_align_stack(stack_top); - stack_top = PAGE_ALIGN(stack_top); - -- if (unlikely(stack_top < mmap_min_addr) || -- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) -- return -ENOMEM; -- - stack_shift = vma->vm_end - stack_top; - - bprm->p -= stack_shift; -@@ -698,8 +702,28 @@ int setup_arg_pages(struct linux_binprm *bprm, - bprm->exec -= stack_shift; - - down_write(&mm->mmap_sem); -+ -+ /* Move stack pages down in memory. */ -+ if (stack_shift) { -+ ret = shift_arg_pages(vma, stack_shift); -+ if (ret) -+ goto out_unlock; -+ } -+ - vm_flags = VM_STACK_FLAGS; - -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { -+ vm_flags &= ~VM_EXEC; -+ -+#ifdef CONFIG_PAX_MPROTECT -+ if (mm->pax_flags & MF_PAX_MPROTECT) -+ vm_flags &= ~VM_MAYEXEC; -+#endif -+ -+ } -+#endif -+ - /* - * Adjust stack execute permissions; explicitly enable for - * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone -@@ -718,13 +742,6 @@ int setup_arg_pages(struct linux_binprm *bprm, - goto out_unlock; - BUG_ON(prev != vma); - -- /* Move stack pages down in memory. */ -- if (stack_shift) { -- ret = shift_arg_pages(vma, stack_shift); -- if (ret) -- goto out_unlock; -- } -- - /* mprotect_fixup is overkill to remove the temporary stack flags */ - vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; - -@@ -805,7 +822,7 @@ int kernel_read(struct file *file, loff_t offset, - old_fs = get_fs(); - set_fs(get_ds()); - /* The cast to a user pointer is valid due to the set_fs() */ -- result = vfs_read(file, (void __user *)addr, count, &pos); -+ result = vfs_read(file, (void __force_user *)addr, count, &pos); - set_fs(old_fs); - return result; - } -@@ -1251,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) - } - rcu_read_unlock(); - -- if (p->fs->users > n_fs) { -+ if (atomic_read(&p->fs->users) > n_fs) { - bprm->unsafe |= LSM_UNSAFE_SHARE; - } else { - res = -EAGAIN; -@@ -1454,6 +1471,11 @@ static int do_execve_common(const char *filename, - struct user_arg_ptr envp, - struct pt_regs *regs) - { -+#ifdef CONFIG_GRKERNSEC -+ struct file *old_exec_file; -+ struct acl_subject_label *old_acl; -+ struct rlimit old_rlim[RLIM_NLIMITS]; -+#endif - struct linux_binprm *bprm; - struct file *file; - struct files_struct *displaced; -@@ -1461,6 +1483,8 @@ static int do_execve_common(const char *filename, - int retval; - const struct cred *cred = current_cred(); - -+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->cred->user->processes), 1); -+ - /* - * We move the actual failure in case of RLIMIT_NPROC excess from - * set*uid() to execve() because too many poorly written programs -@@ -1507,6 +1531,16 @@ static int do_execve_common(const char *filename, - bprm->filename = filename; - bprm->interp = filename; - -+ if (gr_process_user_ban()) { -+ retval = -EPERM; -+ goto out_file; -+ } -+ -+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) { -+ retval = -EACCES; -+ goto out_file; -+ } -+ - retval = bprm_mm_init(bprm); - if (retval) - goto out_file; -@@ -1536,9 +1570,40 @@ static int do_execve_common(const char *filename, - if (retval < 0) - goto out; - -+ if (!gr_tpe_allow(file)) { -+ retval = -EACCES; -+ goto out; -+ } -+ -+ if (gr_check_crash_exec(file)) { -+ retval = -EACCES; -+ goto out; -+ } -+ -+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); -+ -+ gr_handle_exec_args(bprm, argv); -+ -+#ifdef CONFIG_GRKERNSEC -+ old_acl = current->acl; -+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim)); -+ old_exec_file = current->exec_file; -+ get_file(file); -+ current->exec_file = file; -+#endif -+ -+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt, -+ bprm->unsafe & LSM_UNSAFE_SHARE); -+ if (retval < 0) -+ goto out_fail; -+ - retval = search_binary_handler(bprm,regs); - if (retval < 0) -- goto out; -+ goto out_fail; -+#ifdef CONFIG_GRKERNSEC -+ if (old_exec_file) -+ fput(old_exec_file); -+#endif - - /* execve succeeded */ - current->fs->in_exec = 0; -@@ -1549,6 +1614,14 @@ static int do_execve_common(const char *filename, - put_files_struct(displaced); - return retval; - -+out_fail: -+#ifdef CONFIG_GRKERNSEC -+ current->acl = old_acl; -+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim)); -+ fput(current->exec_file); -+ current->exec_file = old_exec_file; -+#endif -+ - out: - if (bprm->mm) { - acct_arg_size(bprm, 0); -@@ -1622,7 +1695,7 @@ static int expand_corename(struct core_name *cn) - { - char *old_corename = cn->corename; - -- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count); -+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count); - cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL); - - if (!cn->corename) { -@@ -1719,7 +1792,7 @@ static int format_corename(struct core_name *cn, long signr) - int pid_in_pattern = 0; - int err = 0; - -- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count); -+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count); - cn->corename = kmalloc(cn->size, GFP_KERNEL); - cn->used = 0; - -@@ -1816,6 +1889,218 @@ out: - return ispipe; - } - -+int pax_check_flags(unsigned long *flags) -+{ -+ int retval = 0; -+ -+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC) -+ if (*flags & MF_PAX_SEGMEXEC) -+ { -+ *flags &= ~MF_PAX_SEGMEXEC; -+ retval = -EINVAL; -+ } -+#endif -+ -+ if ((*flags & MF_PAX_PAGEEXEC) -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ && (*flags & MF_PAX_SEGMEXEC) -+#endif -+ -+ ) -+ { -+ *flags &= ~MF_PAX_PAGEEXEC; -+ retval = -EINVAL; -+ } -+ -+ if ((*flags & MF_PAX_MPROTECT) -+ -+#ifdef CONFIG_PAX_MPROTECT -+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) -+#endif -+ -+ ) -+ { -+ *flags &= ~MF_PAX_MPROTECT; -+ retval = -EINVAL; -+ } -+ -+ if ((*flags & MF_PAX_EMUTRAMP) -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) -+#endif -+ -+ ) -+ { -+ *flags &= ~MF_PAX_EMUTRAMP; -+ retval = -EINVAL; -+ } -+ -+ return retval; -+} -+ -+EXPORT_SYMBOL(pax_check_flags); -+ -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp) -+{ -+ struct task_struct *tsk = current; -+ struct mm_struct *mm = current->mm; -+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL); -+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL); -+ char *path_exec = NULL; -+ char *path_fault = NULL; -+ unsigned long start = 0UL, end = 0UL, offset = 0UL; -+ -+ if (buffer_exec && buffer_fault) { -+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL; -+ -+ down_read(&mm->mmap_sem); -+ vma = mm->mmap; -+ while (vma && (!vma_exec || !vma_fault)) { -+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) -+ vma_exec = vma; -+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end) -+ vma_fault = vma; -+ vma = vma->vm_next; -+ } -+ if (vma_exec) { -+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE); -+ if (IS_ERR(path_exec)) -+ path_exec = "<path too long>"; -+ else { -+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\"); -+ if (path_exec) { -+ *path_exec = 0; -+ path_exec = buffer_exec; -+ } else -+ path_exec = "<path too long>"; -+ } -+ } -+ if (vma_fault) { -+ start = vma_fault->vm_start; -+ end = vma_fault->vm_end; -+ offset = vma_fault->vm_pgoff << PAGE_SHIFT; -+ if (vma_fault->vm_file) { -+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE); -+ if (IS_ERR(path_fault)) -+ path_fault = "<path too long>"; -+ else { -+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\"); -+ if (path_fault) { -+ *path_fault = 0; -+ path_fault = buffer_fault; -+ } else -+ path_fault = "<path too long>"; -+ } -+ } else -+ path_fault = "<anonymous mapping>"; -+ } -+ up_read(&mm->mmap_sem); -+ } -+ if (tsk->signal->curr_ip) -+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset); -+ else -+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset); -+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, " -+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk), -+ task_uid(tsk), task_euid(tsk), pc, sp); -+ free_page((unsigned long)buffer_exec); -+ free_page((unsigned long)buffer_fault); -+ pax_report_insns(regs, pc, sp); -+ do_coredump(SIGKILL, SIGKILL, regs); -+} -+#endif -+ -+#ifdef CONFIG_PAX_REFCOUNT -+void pax_report_refcount_overflow(struct pt_regs *regs) -+{ -+ if (current->signal->curr_ip) -+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", -+ ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); -+ else -+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", -+ current->comm, task_pid_nr(current), current_uid(), current_euid()); -+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs)); -+ show_regs(regs); -+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current); -+} -+#endif -+ -+#ifdef CONFIG_PAX_USERCOPY -+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */ -+int object_is_on_stack(const void *obj, unsigned long len) -+{ -+ const void * const stack = task_stack_page(current); -+ const void * const stackend = stack + THREAD_SIZE; -+ -+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86) -+ const void *frame = NULL; -+ const void *oldframe; -+#endif -+ -+ if (obj + len < obj) -+ return -1; -+ -+ if (obj + len <= stack || stackend <= obj) -+ return 0; -+ -+ if (obj < stack || stackend < obj + len) -+ return -1; -+ -+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86) -+ oldframe = __builtin_frame_address(1); -+ if (oldframe) -+ frame = __builtin_frame_address(2); -+ /* -+ low ----------------------------------------------> high -+ [saved bp][saved ip][args][local vars][saved bp][saved ip] -+ ^----------------^ -+ allow copies only within here -+ */ -+ while (stack <= frame && frame < stackend) { -+ /* if obj + len extends past the last frame, this -+ check won't pass and the next frame will be 0, -+ causing us to bail out and correctly report -+ the copy as invalid -+ */ -+ if (obj + len <= frame) -+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1; -+ oldframe = frame; -+ frame = *(const void * const *)frame; -+ } -+ return -1; -+#else -+ return 1; -+#endif -+} -+ -+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) -+{ -+ if (current->signal->curr_ip) -+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", -+ ¤t->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len); -+ else -+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", -+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len); -+ dump_stack(); -+ gr_handle_kernel_exploit(); -+ do_group_exit(SIGKILL); -+} -+#endif -+ -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK -+void pax_track_stack(void) -+{ -+ unsigned long sp = (unsigned long)&sp; -+ if (sp < current_thread_info()->lowest_stack && -+ sp > (unsigned long)task_stack_page(current)) -+ current_thread_info()->lowest_stack = sp; -+} -+EXPORT_SYMBOL(pax_track_stack); -+#endif -+ - static int zap_process(struct task_struct *start, int exit_code) - { - struct task_struct *t; -@@ -2027,17 +2312,17 @@ static void wait_for_dump_helpers(struct file *file) - pipe = file->f_path.dentry->d_inode->i_pipe; - - pipe_lock(pipe); -- pipe->readers++; -- pipe->writers--; -+ atomic_inc(&pipe->readers); -+ atomic_dec(&pipe->writers); - -- while ((pipe->readers > 1) && (!signal_pending(current))) { -+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) { - wake_up_interruptible_sync(&pipe->wait); - kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); - pipe_wait(pipe); - } - -- pipe->readers--; -- pipe->writers++; -+ atomic_dec(&pipe->readers); -+ atomic_inc(&pipe->writers); - pipe_unlock(pipe); - - } -@@ -2098,7 +2383,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) - int retval = 0; - int flag = 0; - int ispipe; -- static atomic_t core_dump_count = ATOMIC_INIT(0); -+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0); - struct coredump_params cprm = { - .signr = signr, - .regs = regs, -@@ -2113,6 +2398,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) - - audit_core_dumps(signr); - -+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL) -+ gr_handle_brute_attach(current, cprm.mm_flags); -+ - binfmt = mm->binfmt; - if (!binfmt || !binfmt->core_dump) - goto fail; -@@ -2180,7 +2468,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) - } - cprm.limit = RLIM_INFINITY; - -- dump_count = atomic_inc_return(&core_dump_count); -+ dump_count = atomic_inc_return_unchecked(&core_dump_count); - if (core_pipe_limit && (core_pipe_limit < dump_count)) { - printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", - task_tgid_vnr(current), current->comm); -@@ -2207,6 +2495,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) - } else { - struct inode *inode; - -+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1); -+ - if (cprm.limit < binfmt->min_coredump) - goto fail_unlock; - -@@ -2250,7 +2540,7 @@ close_fail: - filp_close(cprm.file, NULL); - fail_dropcount: - if (ispipe) -- atomic_dec(&core_dump_count); -+ atomic_dec_unchecked(&core_dump_count); - fail_unlock: - kfree(cn.corename); - fail_corename: -@@ -2269,7 +2559,7 @@ fail: - */ - int dump_write(struct file *file, const void *addr, int nr) - { -- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr; -+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr; - } - EXPORT_SYMBOL(dump_write); - -diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c -index 8f44cef..cb07120 100644 ---- a/fs/ext2/balloc.c -+++ b/fs/ext2/balloc.c -@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi) - - free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); - root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); -- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && -+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) && - sbi->s_resuid != current_fsuid() && - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { - return 0; -diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c -index 6386d76..0a266b1 100644 ---- a/fs/ext3/balloc.c -+++ b/fs/ext3/balloc.c -@@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi) - - free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); - root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); -- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && -+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) && - sbi->s_resuid != current_fsuid() && - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { - return 0; -diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c -index f8224ad..fbef97c 100644 ---- a/fs/ext4/balloc.c -+++ b/fs/ext4/balloc.c -@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, - /* Hm, nope. Are (enough) root reserved blocks available? */ - if (sbi->s_resuid == current_fsuid() || - ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || -- capable(CAP_SYS_RESOURCE) || -- (flags & EXT4_MB_USE_ROOT_BLOCKS)) { -+ (flags & EXT4_MB_USE_ROOT_BLOCKS) || -+ capable_nolog(CAP_SYS_RESOURCE)) { - - if (free_blocks >= (nblocks + dirty_blocks)) - return 1; -diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h -index 5c38120..2291d18 100644 ---- a/fs/ext4/ext4.h -+++ b/fs/ext4/ext4.h -@@ -1180,19 +1180,19 @@ struct ext4_sb_info { - unsigned long s_mb_last_start; - - /* stats for buddy allocator */ -- atomic_t s_bal_reqs; /* number of reqs with len > 1 */ -- atomic_t s_bal_success; /* we found long enough chunks */ -- atomic_t s_bal_allocated; /* in blocks */ -- atomic_t s_bal_ex_scanned; /* total extents scanned */ -- atomic_t s_bal_goals; /* goal hits */ -- atomic_t s_bal_breaks; /* too long searches */ -- atomic_t s_bal_2orders; /* 2^order hits */ -+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */ -+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */ -+ atomic_unchecked_t s_bal_allocated; /* in blocks */ -+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */ -+ atomic_unchecked_t s_bal_goals; /* goal hits */ -+ atomic_unchecked_t s_bal_breaks; /* too long searches */ -+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */ - spinlock_t s_bal_lock; - unsigned long s_mb_buddies_generated; - unsigned long long s_mb_generation_time; -- atomic_t s_mb_lost_chunks; -- atomic_t s_mb_preallocated; -- atomic_t s_mb_discarded; -+ atomic_unchecked_t s_mb_lost_chunks; -+ atomic_unchecked_t s_mb_preallocated; -+ atomic_unchecked_t s_mb_discarded; - atomic_t s_lock_busy; - - /* locality groups */ -diff --git a/fs/ext4/file.c b/fs/ext4/file.c -index e4095e9..1c006c5 100644 ---- a/fs/ext4/file.c -+++ b/fs/ext4/file.c -@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp) - path.dentry = mnt->mnt_root; - cp = d_path(&path, buf, sizeof(buf)); - if (!IS_ERR(cp)) { -- memcpy(sbi->s_es->s_last_mounted, cp, -- sizeof(sbi->s_es->s_last_mounted)); -+ strlcpy(sbi->s_es->s_last_mounted, cp, -+ sizeof(sbi->s_es->s_last_mounted)); - ext4_mark_super_dirty(sb); - } - } -diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c -index f18bfe3..43759b1 100644 ---- a/fs/ext4/ioctl.c -+++ b/fs/ext4/ioctl.c -@@ -348,7 +348,7 @@ mext_out: - if (!blk_queue_discard(q)) - return -EOPNOTSUPP; - -- if (copy_from_user(&range, (struct fstrim_range *)arg, -+ if (copy_from_user(&range, (struct fstrim_range __user *)arg, - sizeof(range))) - return -EFAULT; - -@@ -358,7 +358,7 @@ mext_out: - if (ret < 0) - return ret; - -- if (copy_to_user((struct fstrim_range *)arg, &range, -+ if (copy_to_user((struct fstrim_range __user *)arg, &range, - sizeof(range))) - return -EFAULT; - -diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c -index 17a5a57..b6be3c5 100644 ---- a/fs/ext4/mballoc.c -+++ b/fs/ext4/mballoc.c -@@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, - BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); - - if (EXT4_SB(sb)->s_mb_stats) -- atomic_inc(&EXT4_SB(sb)->s_bal_2orders); -+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders); - - break; - } -@@ -2089,7 +2089,7 @@ repeat: - ac->ac_status = AC_STATUS_CONTINUE; - ac->ac_flags |= EXT4_MB_HINT_FIRST; - cr = 3; -- atomic_inc(&sbi->s_mb_lost_chunks); -+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks); - goto repeat; - } - } -@@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) - ext4_grpblk_t counters[16]; - } sg; - -+ pax_track_stack(); -+ - group--; - if (group == 0) - seq_printf(seq, "#%-5s: %-5s %-5s %-5s " -@@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *sb) - if (sbi->s_mb_stats) { - ext4_msg(sb, KERN_INFO, - "mballoc: %u blocks %u reqs (%u success)", -- atomic_read(&sbi->s_bal_allocated), -- atomic_read(&sbi->s_bal_reqs), -- atomic_read(&sbi->s_bal_success)); -+ atomic_read_unchecked(&sbi->s_bal_allocated), -+ atomic_read_unchecked(&sbi->s_bal_reqs), -+ atomic_read_unchecked(&sbi->s_bal_success)); - ext4_msg(sb, KERN_INFO, - "mballoc: %u extents scanned, %u goal hits, " - "%u 2^N hits, %u breaks, %u lost", -- atomic_read(&sbi->s_bal_ex_scanned), -- atomic_read(&sbi->s_bal_goals), -- atomic_read(&sbi->s_bal_2orders), -- atomic_read(&sbi->s_bal_breaks), -- atomic_read(&sbi->s_mb_lost_chunks)); -+ atomic_read_unchecked(&sbi->s_bal_ex_scanned), -+ atomic_read_unchecked(&sbi->s_bal_goals), -+ atomic_read_unchecked(&sbi->s_bal_2orders), -+ atomic_read_unchecked(&sbi->s_bal_breaks), -+ atomic_read_unchecked(&sbi->s_mb_lost_chunks)); - ext4_msg(sb, KERN_INFO, - "mballoc: %lu generated and it took %Lu", - sbi->s_mb_buddies_generated, - sbi->s_mb_generation_time); - ext4_msg(sb, KERN_INFO, - "mballoc: %u preallocated, %u discarded", -- atomic_read(&sbi->s_mb_preallocated), -- atomic_read(&sbi->s_mb_discarded)); -+ atomic_read_unchecked(&sbi->s_mb_preallocated), -+ atomic_read_unchecked(&sbi->s_mb_discarded)); - } - - free_percpu(sbi->s_locality_groups); -@@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) - struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); - - if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { -- atomic_inc(&sbi->s_bal_reqs); -- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); -+ atomic_inc_unchecked(&sbi->s_bal_reqs); -+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); - if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) -- atomic_inc(&sbi->s_bal_success); -- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); -+ atomic_inc_unchecked(&sbi->s_bal_success); -+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned); - if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && - ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) -- atomic_inc(&sbi->s_bal_goals); -+ atomic_inc_unchecked(&sbi->s_bal_goals); - if (ac->ac_found > sbi->s_mb_max_to_scan) -- atomic_inc(&sbi->s_bal_breaks); -+ atomic_inc_unchecked(&sbi->s_bal_breaks); - } - - if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) -@@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) - trace_ext4_mb_new_inode_pa(ac, pa); - - ext4_mb_use_inode_pa(ac, pa); -- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); -+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); - - ei = EXT4_I(ac->ac_inode); - grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); -@@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) - trace_ext4_mb_new_group_pa(ac, pa); - - ext4_mb_use_group_pa(ac, pa); -- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); -+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); - - grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); - lg = ac->ac_lg; -@@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, - * from the bitmap and continue. - */ - } -- atomic_add(free, &sbi->s_mb_discarded); -+ atomic_add_unchecked(free, &sbi->s_mb_discarded); - - return err; - } -@@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, - ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); - BUG_ON(group != e4b->bd_group && pa->pa_len != 0); - mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); -- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); -+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); - trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); - - return 0; -diff --git a/fs/fcntl.c b/fs/fcntl.c -index 22764c7..86372c9 100644 ---- a/fs/fcntl.c -+++ b/fs/fcntl.c -@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, - if (err) - return err; - -+ if (gr_handle_chroot_fowner(pid, type)) -+ return -ENOENT; -+ if (gr_check_protected_task_fowner(pid, type)) -+ return -EACCES; -+ - f_modown(filp, pid, type, force); - return 0; - } -@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp) - - static int f_setown_ex(struct file *filp, unsigned long arg) - { -- struct f_owner_ex * __user owner_p = (void * __user)arg; -+ struct f_owner_ex __user *owner_p = (void __user *)arg; - struct f_owner_ex owner; - struct pid *pid; - int type; -@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg) - - static int f_getown_ex(struct file *filp, unsigned long arg) - { -- struct f_owner_ex * __user owner_p = (void * __user)arg; -+ struct f_owner_ex __user *owner_p = (void __user *)arg; - struct f_owner_ex owner; - int ret = 0; - -@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, - switch (cmd) { - case F_DUPFD: - case F_DUPFD_CLOEXEC: -+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0); - if (arg >= rlimit(RLIMIT_NOFILE)) - break; - err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0); -diff --git a/fs/fifo.c b/fs/fifo.c -index b1a524d..4ee270e 100644 ---- a/fs/fifo.c -+++ b/fs/fifo.c -@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp) - */ - filp->f_op = &read_pipefifo_fops; - pipe->r_counter++; -- if (pipe->readers++ == 0) -+ if (atomic_inc_return(&pipe->readers) == 1) - wake_up_partner(inode); - -- if (!pipe->writers) { -+ if (!atomic_read(&pipe->writers)) { - if ((filp->f_flags & O_NONBLOCK)) { - /* suppress POLLHUP until we have - * seen a writer */ -@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp) - * errno=ENXIO when there is no process reading the FIFO. - */ - ret = -ENXIO; -- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers) -+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers)) - goto err; - - filp->f_op = &write_pipefifo_fops; - pipe->w_counter++; -- if (!pipe->writers++) -+ if (atomic_inc_return(&pipe->writers) == 1) - wake_up_partner(inode); - -- if (!pipe->readers) { -+ if (!atomic_read(&pipe->readers)) { - wait_for_partner(inode, &pipe->r_counter); - if (signal_pending(current)) - goto err_wr; -@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp) - */ - filp->f_op = &rdwr_pipefifo_fops; - -- pipe->readers++; -- pipe->writers++; -+ atomic_inc(&pipe->readers); -+ atomic_inc(&pipe->writers); - pipe->r_counter++; - pipe->w_counter++; -- if (pipe->readers == 1 || pipe->writers == 1) -+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1) - wake_up_partner(inode); - break; - -@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp) - return 0; - - err_rd: -- if (!--pipe->readers) -+ if (atomic_dec_and_test(&pipe->readers)) - wake_up_interruptible(&pipe->wait); - ret = -ERESTARTSYS; - goto err; - - err_wr: -- if (!--pipe->writers) -+ if (atomic_dec_and_test(&pipe->writers)) - wake_up_interruptible(&pipe->wait); - ret = -ERESTARTSYS; - goto err; - - err: -- if (!pipe->readers && !pipe->writers) -+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) - free_pipe_info(inode); - - err_nocleanup: -diff --git a/fs/file.c b/fs/file.c -index 4c6992d..104cdea 100644 ---- a/fs/file.c -+++ b/fs/file.c -@@ -15,6 +15,7 @@ - #include <linux/slab.h> - #include <linux/vmalloc.h> - #include <linux/file.h> -+#include <linux/security.h> - #include <linux/fdtable.h> - #include <linux/bitops.h> - #include <linux/interrupt.h> -@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr) - * N.B. For clone tasks sharing a files structure, this test - * will limit the total number of files that can be opened. - */ -+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0); - if (nr >= rlimit(RLIMIT_NOFILE)) - return -EMFILE; - -diff --git a/fs/filesystems.c b/fs/filesystems.c -index 0845f84..7b4ebef 100644 ---- a/fs/filesystems.c -+++ b/fs/filesystems.c -@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name) - int len = dot ? dot - name : strlen(name); - - fs = __get_fs_type(name, len); -+ -+#ifdef CONFIG_GRKERNSEC_MODHARDEN -+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0)) -+#else - if (!fs && (request_module("%.*s", len, name) == 0)) -+#endif - fs = __get_fs_type(name, len); - - if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { -diff --git a/fs/fs_struct.c b/fs/fs_struct.c -index 78b519c..212c0d0 100644 ---- a/fs/fs_struct.c -+++ b/fs/fs_struct.c -@@ -4,6 +4,7 @@ - #include <linux/path.h> - #include <linux/slab.h> - #include <linux/fs_struct.h> -+#include <linux/grsecurity.h> - #include "internal.h" - - static inline void path_get_longterm(struct path *path) -@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path) - old_root = fs->root; - fs->root = *path; - path_get_longterm(path); -+ gr_set_chroot_entries(current, path); - write_seqcount_end(&fs->seq); - spin_unlock(&fs->lock); - if (old_root.dentry) -@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root) - && fs->root.mnt == old_root->mnt) { - path_get_longterm(new_root); - fs->root = *new_root; -+ gr_set_chroot_entries(p, new_root); - count++; - } - if (fs->pwd.dentry == old_root->dentry -@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk) - spin_lock(&fs->lock); - write_seqcount_begin(&fs->seq); - tsk->fs = NULL; -- kill = !--fs->users; -+ gr_clear_chroot_entries(tsk); -+ kill = !atomic_dec_return(&fs->users); - write_seqcount_end(&fs->seq); - spin_unlock(&fs->lock); - task_unlock(tsk); -@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) - struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); - /* We don't need to lock fs - think why ;-) */ - if (fs) { -- fs->users = 1; -+ atomic_set(&fs->users, 1); - fs->in_exec = 0; - spin_lock_init(&fs->lock); - seqcount_init(&fs->seq); -@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) - spin_lock(&old->lock); - fs->root = old->root; - path_get_longterm(&fs->root); -+ /* instead of calling gr_set_chroot_entries here, -+ we call it from every caller of this function -+ */ - fs->pwd = old->pwd; - path_get_longterm(&fs->pwd); - spin_unlock(&old->lock); -@@ -150,8 +157,9 @@ int unshare_fs_struct(void) - - task_lock(current); - spin_lock(&fs->lock); -- kill = !--fs->users; -+ kill = !atomic_dec_return(&fs->users); - current->fs = new_fs; -+ gr_set_chroot_entries(current, &new_fs->root); - spin_unlock(&fs->lock); - task_unlock(current); - -@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask); - - /* to be mentioned only in INIT_TASK */ - struct fs_struct init_fs = { -- .users = 1, -+ .users = ATOMIC_INIT(1), - .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), - .seq = SEQCNT_ZERO, - .umask = 0022, -@@ -186,12 +194,13 @@ void daemonize_fs_struct(void) - task_lock(current); - - spin_lock(&init_fs.lock); -- init_fs.users++; -+ atomic_inc(&init_fs.users); - spin_unlock(&init_fs.lock); - - spin_lock(&fs->lock); - current->fs = &init_fs; -- kill = !--fs->users; -+ gr_set_chroot_entries(current, ¤t->fs->root); -+ kill = !atomic_dec_return(&fs->users); - spin_unlock(&fs->lock); - - task_unlock(current); -diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c -index 9905350..02eaec4 100644 ---- a/fs/fscache/cookie.c -+++ b/fs/fscache/cookie.c -@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie( - parent ? (char *) parent->def->name : "<no-parent>", - def->name, netfs_data); - -- fscache_stat(&fscache_n_acquires); -+ fscache_stat_unchecked(&fscache_n_acquires); - - /* if there's no parent cookie, then we don't create one here either */ - if (!parent) { -- fscache_stat(&fscache_n_acquires_null); -+ fscache_stat_unchecked(&fscache_n_acquires_null); - _leave(" [no parent]"); - return NULL; - } -@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie( - /* allocate and initialise a cookie */ - cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL); - if (!cookie) { -- fscache_stat(&fscache_n_acquires_oom); -+ fscache_stat_unchecked(&fscache_n_acquires_oom); - _leave(" [ENOMEM]"); - return NULL; - } -@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie( - - switch (cookie->def->type) { - case FSCACHE_COOKIE_TYPE_INDEX: -- fscache_stat(&fscache_n_cookie_index); -+ fscache_stat_unchecked(&fscache_n_cookie_index); - break; - case FSCACHE_COOKIE_TYPE_DATAFILE: -- fscache_stat(&fscache_n_cookie_data); -+ fscache_stat_unchecked(&fscache_n_cookie_data); - break; - default: -- fscache_stat(&fscache_n_cookie_special); -+ fscache_stat_unchecked(&fscache_n_cookie_special); - break; - } - -@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie( - if (fscache_acquire_non_index_cookie(cookie) < 0) { - atomic_dec(&parent->n_children); - __fscache_cookie_put(cookie); -- fscache_stat(&fscache_n_acquires_nobufs); -+ fscache_stat_unchecked(&fscache_n_acquires_nobufs); - _leave(" = NULL"); - return NULL; - } - } - -- fscache_stat(&fscache_n_acquires_ok); -+ fscache_stat_unchecked(&fscache_n_acquires_ok); - _leave(" = %p", cookie); - return cookie; - } -@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie) - cache = fscache_select_cache_for_object(cookie->parent); - if (!cache) { - up_read(&fscache_addremove_sem); -- fscache_stat(&fscache_n_acquires_no_cache); -+ fscache_stat_unchecked(&fscache_n_acquires_no_cache); - _leave(" = -ENOMEDIUM [no cache]"); - return -ENOMEDIUM; - } -@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache, - object = cache->ops->alloc_object(cache, cookie); - fscache_stat_d(&fscache_n_cop_alloc_object); - if (IS_ERR(object)) { -- fscache_stat(&fscache_n_object_no_alloc); -+ fscache_stat_unchecked(&fscache_n_object_no_alloc); - ret = PTR_ERR(object); - goto error; - } - -- fscache_stat(&fscache_n_object_alloc); -+ fscache_stat_unchecked(&fscache_n_object_alloc); - - object->debug_id = atomic_inc_return(&fscache_object_debug_id); - -@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie) - struct fscache_object *object; - struct hlist_node *_p; - -- fscache_stat(&fscache_n_updates); -+ fscache_stat_unchecked(&fscache_n_updates); - - if (!cookie) { -- fscache_stat(&fscache_n_updates_null); -+ fscache_stat_unchecked(&fscache_n_updates_null); - _leave(" [no cookie]"); - return; - } -@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) - struct fscache_object *object; - unsigned long event; - -- fscache_stat(&fscache_n_relinquishes); -+ fscache_stat_unchecked(&fscache_n_relinquishes); - if (retire) -- fscache_stat(&fscache_n_relinquishes_retire); -+ fscache_stat_unchecked(&fscache_n_relinquishes_retire); - - if (!cookie) { -- fscache_stat(&fscache_n_relinquishes_null); -+ fscache_stat_unchecked(&fscache_n_relinquishes_null); - _leave(" [no cookie]"); - return; - } -@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) - - /* wait for the cookie to finish being instantiated (or to fail) */ - if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) { -- fscache_stat(&fscache_n_relinquishes_waitcrt); -+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt); - wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); - } -diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h -index f6aad48..88dcf26 100644 ---- a/fs/fscache/internal.h -+++ b/fs/fscache/internal.h -@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void); - extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS]; - extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS]; - --extern atomic_t fscache_n_op_pend; --extern atomic_t fscache_n_op_run; --extern atomic_t fscache_n_op_enqueue; --extern atomic_t fscache_n_op_deferred_release; --extern atomic_t fscache_n_op_release; --extern atomic_t fscache_n_op_gc; --extern atomic_t fscache_n_op_cancelled; --extern atomic_t fscache_n_op_rejected; -+extern atomic_unchecked_t fscache_n_op_pend; -+extern atomic_unchecked_t fscache_n_op_run; -+extern atomic_unchecked_t fscache_n_op_enqueue; -+extern atomic_unchecked_t fscache_n_op_deferred_release; -+extern atomic_unchecked_t fscache_n_op_release; -+extern atomic_unchecked_t fscache_n_op_gc; -+extern atomic_unchecked_t fscache_n_op_cancelled; -+extern atomic_unchecked_t fscache_n_op_rejected; - --extern atomic_t fscache_n_attr_changed; --extern atomic_t fscache_n_attr_changed_ok; --extern atomic_t fscache_n_attr_changed_nobufs; --extern atomic_t fscache_n_attr_changed_nomem; --extern atomic_t fscache_n_attr_changed_calls; -+extern atomic_unchecked_t fscache_n_attr_changed; -+extern atomic_unchecked_t fscache_n_attr_changed_ok; -+extern atomic_unchecked_t fscache_n_attr_changed_nobufs; -+extern atomic_unchecked_t fscache_n_attr_changed_nomem; -+extern atomic_unchecked_t fscache_n_attr_changed_calls; - --extern atomic_t fscache_n_allocs; --extern atomic_t fscache_n_allocs_ok; --extern atomic_t fscache_n_allocs_wait; --extern atomic_t fscache_n_allocs_nobufs; --extern atomic_t fscache_n_allocs_intr; --extern atomic_t fscache_n_allocs_object_dead; --extern atomic_t fscache_n_alloc_ops; --extern atomic_t fscache_n_alloc_op_waits; -+extern atomic_unchecked_t fscache_n_allocs; -+extern atomic_unchecked_t fscache_n_allocs_ok; -+extern atomic_unchecked_t fscache_n_allocs_wait; -+extern atomic_unchecked_t fscache_n_allocs_nobufs; -+extern atomic_unchecked_t fscache_n_allocs_intr; -+extern atomic_unchecked_t fscache_n_allocs_object_dead; -+extern atomic_unchecked_t fscache_n_alloc_ops; -+extern atomic_unchecked_t fscache_n_alloc_op_waits; - --extern atomic_t fscache_n_retrievals; --extern atomic_t fscache_n_retrievals_ok; --extern atomic_t fscache_n_retrievals_wait; --extern atomic_t fscache_n_retrievals_nodata; --extern atomic_t fscache_n_retrievals_nobufs; --extern atomic_t fscache_n_retrievals_intr; --extern atomic_t fscache_n_retrievals_nomem; --extern atomic_t fscache_n_retrievals_object_dead; --extern atomic_t fscache_n_retrieval_ops; --extern atomic_t fscache_n_retrieval_op_waits; -+extern atomic_unchecked_t fscache_n_retrievals; -+extern atomic_unchecked_t fscache_n_retrievals_ok; -+extern atomic_unchecked_t fscache_n_retrievals_wait; -+extern atomic_unchecked_t fscache_n_retrievals_nodata; -+extern atomic_unchecked_t fscache_n_retrievals_nobufs; -+extern atomic_unchecked_t fscache_n_retrievals_intr; -+extern atomic_unchecked_t fscache_n_retrievals_nomem; -+extern atomic_unchecked_t fscache_n_retrievals_object_dead; -+extern atomic_unchecked_t fscache_n_retrieval_ops; -+extern atomic_unchecked_t fscache_n_retrieval_op_waits; - --extern atomic_t fscache_n_stores; --extern atomic_t fscache_n_stores_ok; --extern atomic_t fscache_n_stores_again; --extern atomic_t fscache_n_stores_nobufs; --extern atomic_t fscache_n_stores_oom; --extern atomic_t fscache_n_store_ops; --extern atomic_t fscache_n_store_calls; --extern atomic_t fscache_n_store_pages; --extern atomic_t fscache_n_store_radix_deletes; --extern atomic_t fscache_n_store_pages_over_limit; -+extern atomic_unchecked_t fscache_n_stores; -+extern atomic_unchecked_t fscache_n_stores_ok; -+extern atomic_unchecked_t fscache_n_stores_again; -+extern atomic_unchecked_t fscache_n_stores_nobufs; -+extern atomic_unchecked_t fscache_n_stores_oom; -+extern atomic_unchecked_t fscache_n_store_ops; -+extern atomic_unchecked_t fscache_n_store_calls; -+extern atomic_unchecked_t fscache_n_store_pages; -+extern atomic_unchecked_t fscache_n_store_radix_deletes; -+extern atomic_unchecked_t fscache_n_store_pages_over_limit; - --extern atomic_t fscache_n_store_vmscan_not_storing; --extern atomic_t fscache_n_store_vmscan_gone; --extern atomic_t fscache_n_store_vmscan_busy; --extern atomic_t fscache_n_store_vmscan_cancelled; -+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing; -+extern atomic_unchecked_t fscache_n_store_vmscan_gone; -+extern atomic_unchecked_t fscache_n_store_vmscan_busy; -+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled; - --extern atomic_t fscache_n_marks; --extern atomic_t fscache_n_uncaches; -+extern atomic_unchecked_t fscache_n_marks; -+extern atomic_unchecked_t fscache_n_uncaches; - --extern atomic_t fscache_n_acquires; --extern atomic_t fscache_n_acquires_null; --extern atomic_t fscache_n_acquires_no_cache; --extern atomic_t fscache_n_acquires_ok; --extern atomic_t fscache_n_acquires_nobufs; --extern atomic_t fscache_n_acquires_oom; -+extern atomic_unchecked_t fscache_n_acquires; -+extern atomic_unchecked_t fscache_n_acquires_null; -+extern atomic_unchecked_t fscache_n_acquires_no_cache; -+extern atomic_unchecked_t fscache_n_acquires_ok; -+extern atomic_unchecked_t fscache_n_acquires_nobufs; -+extern atomic_unchecked_t fscache_n_acquires_oom; - --extern atomic_t fscache_n_updates; --extern atomic_t fscache_n_updates_null; --extern atomic_t fscache_n_updates_run; -+extern atomic_unchecked_t fscache_n_updates; -+extern atomic_unchecked_t fscache_n_updates_null; -+extern atomic_unchecked_t fscache_n_updates_run; - --extern atomic_t fscache_n_relinquishes; --extern atomic_t fscache_n_relinquishes_null; --extern atomic_t fscache_n_relinquishes_waitcrt; --extern atomic_t fscache_n_relinquishes_retire; -+extern atomic_unchecked_t fscache_n_relinquishes; -+extern atomic_unchecked_t fscache_n_relinquishes_null; -+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt; -+extern atomic_unchecked_t fscache_n_relinquishes_retire; - --extern atomic_t fscache_n_cookie_index; --extern atomic_t fscache_n_cookie_data; --extern atomic_t fscache_n_cookie_special; -+extern atomic_unchecked_t fscache_n_cookie_index; -+extern atomic_unchecked_t fscache_n_cookie_data; -+extern atomic_unchecked_t fscache_n_cookie_special; - --extern atomic_t fscache_n_object_alloc; --extern atomic_t fscache_n_object_no_alloc; --extern atomic_t fscache_n_object_lookups; --extern atomic_t fscache_n_object_lookups_negative; --extern atomic_t fscache_n_object_lookups_positive; --extern atomic_t fscache_n_object_lookups_timed_out; --extern atomic_t fscache_n_object_created; --extern atomic_t fscache_n_object_avail; --extern atomic_t fscache_n_object_dead; -+extern atomic_unchecked_t fscache_n_object_alloc; -+extern atomic_unchecked_t fscache_n_object_no_alloc; -+extern atomic_unchecked_t fscache_n_object_lookups; -+extern atomic_unchecked_t fscache_n_object_lookups_negative; -+extern atomic_unchecked_t fscache_n_object_lookups_positive; -+extern atomic_unchecked_t fscache_n_object_lookups_timed_out; -+extern atomic_unchecked_t fscache_n_object_created; -+extern atomic_unchecked_t fscache_n_object_avail; -+extern atomic_unchecked_t fscache_n_object_dead; - --extern atomic_t fscache_n_checkaux_none; --extern atomic_t fscache_n_checkaux_okay; --extern atomic_t fscache_n_checkaux_update; --extern atomic_t fscache_n_checkaux_obsolete; -+extern atomic_unchecked_t fscache_n_checkaux_none; -+extern atomic_unchecked_t fscache_n_checkaux_okay; -+extern atomic_unchecked_t fscache_n_checkaux_update; -+extern atomic_unchecked_t fscache_n_checkaux_obsolete; - - extern atomic_t fscache_n_cop_alloc_object; - extern atomic_t fscache_n_cop_lookup_object; -@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat) - atomic_inc(stat); - } - -+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat) -+{ -+ atomic_inc_unchecked(stat); -+} -+ - static inline void fscache_stat_d(atomic_t *stat) - { - atomic_dec(stat); -@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops; - - #define __fscache_stat(stat) (NULL) - #define fscache_stat(stat) do {} while (0) -+#define fscache_stat_unchecked(stat) do {} while (0) - #define fscache_stat_d(stat) do {} while (0) - #endif - -diff --git a/fs/fscache/object.c b/fs/fscache/object.c -index b6b897c..0ffff9c 100644 ---- a/fs/fscache/object.c -+++ b/fs/fscache/object.c -@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object) - /* update the object metadata on disk */ - case FSCACHE_OBJECT_UPDATING: - clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); -- fscache_stat(&fscache_n_updates_run); -+ fscache_stat_unchecked(&fscache_n_updates_run); - fscache_stat(&fscache_n_cop_update_object); - object->cache->ops->update_object(object); - fscache_stat_d(&fscache_n_cop_update_object); -@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object) - spin_lock(&object->lock); - object->state = FSCACHE_OBJECT_DEAD; - spin_unlock(&object->lock); -- fscache_stat(&fscache_n_object_dead); -+ fscache_stat_unchecked(&fscache_n_object_dead); - goto terminal_transit; - - /* handle the parent cache of this object being withdrawn from -@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object) - spin_lock(&object->lock); - object->state = FSCACHE_OBJECT_DEAD; - spin_unlock(&object->lock); -- fscache_stat(&fscache_n_object_dead); -+ fscache_stat_unchecked(&fscache_n_object_dead); - goto terminal_transit; - - /* complain about the object being woken up once it is -@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object) - parent->cookie->def->name, cookie->def->name, - object->cache->tag->name); - -- fscache_stat(&fscache_n_object_lookups); -+ fscache_stat_unchecked(&fscache_n_object_lookups); - fscache_stat(&fscache_n_cop_lookup_object); - ret = object->cache->ops->lookup_object(object); - fscache_stat_d(&fscache_n_cop_lookup_object); -@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object) - if (ret == -ETIMEDOUT) { - /* probably stuck behind another object, so move this one to - * the back of the queue */ -- fscache_stat(&fscache_n_object_lookups_timed_out); -+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out); - set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); - } - -@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object) - - spin_lock(&object->lock); - if (object->state == FSCACHE_OBJECT_LOOKING_UP) { -- fscache_stat(&fscache_n_object_lookups_negative); -+ fscache_stat_unchecked(&fscache_n_object_lookups_negative); - - /* transit here to allow write requests to begin stacking up - * and read requests to begin returning ENODATA */ -@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object) - * result, in which case there may be data available */ - spin_lock(&object->lock); - if (object->state == FSCACHE_OBJECT_LOOKING_UP) { -- fscache_stat(&fscache_n_object_lookups_positive); -+ fscache_stat_unchecked(&fscache_n_object_lookups_positive); - - clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); - -@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object) - set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); - } else { - ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING); -- fscache_stat(&fscache_n_object_created); -+ fscache_stat_unchecked(&fscache_n_object_created); - - object->state = FSCACHE_OBJECT_AVAILABLE; - spin_unlock(&object->lock); -@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object) - fscache_enqueue_dependents(object); - - fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); -- fscache_stat(&fscache_n_object_avail); -+ fscache_stat_unchecked(&fscache_n_object_avail); - - _leave(""); - } -@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, - enum fscache_checkaux result; - - if (!object->cookie->def->check_aux) { -- fscache_stat(&fscache_n_checkaux_none); -+ fscache_stat_unchecked(&fscache_n_checkaux_none); - return FSCACHE_CHECKAUX_OKAY; - } - -@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, - switch (result) { - /* entry okay as is */ - case FSCACHE_CHECKAUX_OKAY: -- fscache_stat(&fscache_n_checkaux_okay); -+ fscache_stat_unchecked(&fscache_n_checkaux_okay); - break; - - /* entry requires update */ - case FSCACHE_CHECKAUX_NEEDS_UPDATE: -- fscache_stat(&fscache_n_checkaux_update); -+ fscache_stat_unchecked(&fscache_n_checkaux_update); - break; - - /* entry requires deletion */ - case FSCACHE_CHECKAUX_OBSOLETE: -- fscache_stat(&fscache_n_checkaux_obsolete); -+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete); - break; - - default: -diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c -index 30afdfa..2256596 100644 ---- a/fs/fscache/operation.c -+++ b/fs/fscache/operation.c -@@ -17,7 +17,7 @@ - #include <linux/slab.h> - #include "internal.h" - --atomic_t fscache_op_debug_id; -+atomic_unchecked_t fscache_op_debug_id; - EXPORT_SYMBOL(fscache_op_debug_id); - - /** -@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op) - ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); - ASSERTCMP(atomic_read(&op->usage), >, 0); - -- fscache_stat(&fscache_n_op_enqueue); -+ fscache_stat_unchecked(&fscache_n_op_enqueue); - switch (op->flags & FSCACHE_OP_TYPE) { - case FSCACHE_OP_ASYNC: - _debug("queue async"); -@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object, - wake_up_bit(&op->flags, FSCACHE_OP_WAITING); - if (op->processor) - fscache_enqueue_operation(op); -- fscache_stat(&fscache_n_op_run); -+ fscache_stat_unchecked(&fscache_n_op_run); - } - - /* -@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object, - if (object->n_ops > 1) { - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); -- fscache_stat(&fscache_n_op_pend); -+ fscache_stat_unchecked(&fscache_n_op_pend); - } else if (!list_empty(&object->pending_ops)) { - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); -- fscache_stat(&fscache_n_op_pend); -+ fscache_stat_unchecked(&fscache_n_op_pend); - fscache_start_operations(object); - } else { - ASSERTCMP(object->n_in_progress, ==, 0); -@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, - object->n_exclusive++; /* reads and writes must wait */ - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); -- fscache_stat(&fscache_n_op_pend); -+ fscache_stat_unchecked(&fscache_n_op_pend); - ret = 0; - } else { - /* not allowed to submit ops in any other state */ -@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object, - if (object->n_exclusive > 0) { - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); -- fscache_stat(&fscache_n_op_pend); -+ fscache_stat_unchecked(&fscache_n_op_pend); - } else if (!list_empty(&object->pending_ops)) { - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); -- fscache_stat(&fscache_n_op_pend); -+ fscache_stat_unchecked(&fscache_n_op_pend); - fscache_start_operations(object); - } else { - ASSERTCMP(object->n_exclusive, ==, 0); -@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object, - object->n_ops++; - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); -- fscache_stat(&fscache_n_op_pend); -+ fscache_stat_unchecked(&fscache_n_op_pend); - ret = 0; - } else if (object->state == FSCACHE_OBJECT_DYING || - object->state == FSCACHE_OBJECT_LC_DYING || - object->state == FSCACHE_OBJECT_WITHDRAWING) { -- fscache_stat(&fscache_n_op_rejected); -+ fscache_stat_unchecked(&fscache_n_op_rejected); - ret = -ENOBUFS; - } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { - fscache_report_unexpected_submission(object, op, ostate); -@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op) - - ret = -EBUSY; - if (!list_empty(&op->pend_link)) { -- fscache_stat(&fscache_n_op_cancelled); -+ fscache_stat_unchecked(&fscache_n_op_cancelled); - list_del_init(&op->pend_link); - object->n_ops--; - if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) -@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op) - if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) - BUG(); - -- fscache_stat(&fscache_n_op_release); -+ fscache_stat_unchecked(&fscache_n_op_release); - - if (op->release) { - op->release(op); -@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op) - * lock, and defer it otherwise */ - if (!spin_trylock(&object->lock)) { - _debug("defer put"); -- fscache_stat(&fscache_n_op_deferred_release); -+ fscache_stat_unchecked(&fscache_n_op_deferred_release); - - cache = object->cache; - spin_lock(&cache->op_gc_list_lock); -@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work) - - _debug("GC DEFERRED REL OBJ%x OP%x", - object->debug_id, op->debug_id); -- fscache_stat(&fscache_n_op_gc); -+ fscache_stat_unchecked(&fscache_n_op_gc); - - ASSERTCMP(atomic_read(&op->usage), ==, 0); - -diff --git a/fs/fscache/page.c b/fs/fscache/page.c -index 3f7a59b..cf196cc 100644 ---- a/fs/fscache/page.c -+++ b/fs/fscache/page.c -@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie, - val = radix_tree_lookup(&cookie->stores, page->index); - if (!val) { - rcu_read_unlock(); -- fscache_stat(&fscache_n_store_vmscan_not_storing); -+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing); - __fscache_uncache_page(cookie, page); - return true; - } -@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie, - spin_unlock(&cookie->stores_lock); - - if (xpage) { -- fscache_stat(&fscache_n_store_vmscan_cancelled); -- fscache_stat(&fscache_n_store_radix_deletes); -+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled); -+ fscache_stat_unchecked(&fscache_n_store_radix_deletes); - ASSERTCMP(xpage, ==, page); - } else { -- fscache_stat(&fscache_n_store_vmscan_gone); -+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone); - } - - wake_up_bit(&cookie->flags, 0); -@@ -107,7 +107,7 @@ page_busy: - /* we might want to wait here, but that could deadlock the allocator as - * the work threads writing to the cache may all end up sleeping - * on memory allocation */ -- fscache_stat(&fscache_n_store_vmscan_busy); -+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy); - return false; - } - EXPORT_SYMBOL(__fscache_maybe_release_page); -@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object, - FSCACHE_COOKIE_STORING_TAG); - if (!radix_tree_tag_get(&cookie->stores, page->index, - FSCACHE_COOKIE_PENDING_TAG)) { -- fscache_stat(&fscache_n_store_radix_deletes); -+ fscache_stat_unchecked(&fscache_n_store_radix_deletes); - xpage = radix_tree_delete(&cookie->stores, page->index); - } - spin_unlock(&cookie->stores_lock); -@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op) - - _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); - -- fscache_stat(&fscache_n_attr_changed_calls); -+ fscache_stat_unchecked(&fscache_n_attr_changed_calls); - - if (fscache_object_is_active(object)) { - fscache_stat(&fscache_n_cop_attr_changed); -@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) - - ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); - -- fscache_stat(&fscache_n_attr_changed); -+ fscache_stat_unchecked(&fscache_n_attr_changed); - - op = kzalloc(sizeof(*op), GFP_KERNEL); - if (!op) { -- fscache_stat(&fscache_n_attr_changed_nomem); -+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem); - _leave(" = -ENOMEM"); - return -ENOMEM; - } -@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) - if (fscache_submit_exclusive_op(object, op) < 0) - goto nobufs; - spin_unlock(&cookie->lock); -- fscache_stat(&fscache_n_attr_changed_ok); -+ fscache_stat_unchecked(&fscache_n_attr_changed_ok); - fscache_put_operation(op); - _leave(" = 0"); - return 0; -@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) - nobufs: - spin_unlock(&cookie->lock); - kfree(op); -- fscache_stat(&fscache_n_attr_changed_nobufs); -+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs); - _leave(" = %d", -ENOBUFS); - return -ENOBUFS; - } -@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval( - /* allocate a retrieval operation and attempt to submit it */ - op = kzalloc(sizeof(*op), GFP_NOIO); - if (!op) { -- fscache_stat(&fscache_n_retrievals_nomem); -+ fscache_stat_unchecked(&fscache_n_retrievals_nomem); - return NULL; - } - -@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) - return 0; - } - -- fscache_stat(&fscache_n_retrievals_wait); -+ fscache_stat_unchecked(&fscache_n_retrievals_wait); - - jif = jiffies; - if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, - fscache_wait_bit_interruptible, - TASK_INTERRUPTIBLE) != 0) { -- fscache_stat(&fscache_n_retrievals_intr); -+ fscache_stat_unchecked(&fscache_n_retrievals_intr); - _leave(" = -ERESTARTSYS"); - return -ERESTARTSYS; - } -@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) - */ - static int fscache_wait_for_retrieval_activation(struct fscache_object *object, - struct fscache_retrieval *op, -- atomic_t *stat_op_waits, -- atomic_t *stat_object_dead) -+ atomic_unchecked_t *stat_op_waits, -+ atomic_unchecked_t *stat_object_dead) - { - int ret; - -@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object, - goto check_if_dead; - - _debug(">>> WT"); -- fscache_stat(stat_op_waits); -+ fscache_stat_unchecked(stat_op_waits); - if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit_interruptible, - TASK_INTERRUPTIBLE) < 0) { -@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object, - - check_if_dead: - if (unlikely(fscache_object_is_dead(object))) { -- fscache_stat(stat_object_dead); -+ fscache_stat_unchecked(stat_object_dead); - return -ENOBUFS; - } - return 0; -@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, - - _enter("%p,%p,,,", cookie, page); - -- fscache_stat(&fscache_n_retrievals); -+ fscache_stat_unchecked(&fscache_n_retrievals); - - if (hlist_empty(&cookie->backing_objects)) - goto nobufs; -@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, - goto nobufs_unlock; - spin_unlock(&cookie->lock); - -- fscache_stat(&fscache_n_retrieval_ops); -+ fscache_stat_unchecked(&fscache_n_retrieval_ops); - - /* pin the netfs read context in case we need to do the actual netfs - * read because we've encountered a cache read failure */ -@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, - - error: - if (ret == -ENOMEM) -- fscache_stat(&fscache_n_retrievals_nomem); -+ fscache_stat_unchecked(&fscache_n_retrievals_nomem); - else if (ret == -ERESTARTSYS) -- fscache_stat(&fscache_n_retrievals_intr); -+ fscache_stat_unchecked(&fscache_n_retrievals_intr); - else if (ret == -ENODATA) -- fscache_stat(&fscache_n_retrievals_nodata); -+ fscache_stat_unchecked(&fscache_n_retrievals_nodata); - else if (ret < 0) -- fscache_stat(&fscache_n_retrievals_nobufs); -+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); - else -- fscache_stat(&fscache_n_retrievals_ok); -+ fscache_stat_unchecked(&fscache_n_retrievals_ok); - - fscache_put_retrieval(op); - _leave(" = %d", ret); -@@ -429,7 +429,7 @@ nobufs_unlock: - spin_unlock(&cookie->lock); - kfree(op); - nobufs: -- fscache_stat(&fscache_n_retrievals_nobufs); -+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); - _leave(" = -ENOBUFS"); - return -ENOBUFS; - } -@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, - - _enter("%p,,%d,,,", cookie, *nr_pages); - -- fscache_stat(&fscache_n_retrievals); -+ fscache_stat_unchecked(&fscache_n_retrievals); - - if (hlist_empty(&cookie->backing_objects)) - goto nobufs; -@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, - goto nobufs_unlock; - spin_unlock(&cookie->lock); - -- fscache_stat(&fscache_n_retrieval_ops); -+ fscache_stat_unchecked(&fscache_n_retrieval_ops); - - /* pin the netfs read context in case we need to do the actual netfs - * read because we've encountered a cache read failure */ -@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, - - error: - if (ret == -ENOMEM) -- fscache_stat(&fscache_n_retrievals_nomem); -+ fscache_stat_unchecked(&fscache_n_retrievals_nomem); - else if (ret == -ERESTARTSYS) -- fscache_stat(&fscache_n_retrievals_intr); -+ fscache_stat_unchecked(&fscache_n_retrievals_intr); - else if (ret == -ENODATA) -- fscache_stat(&fscache_n_retrievals_nodata); -+ fscache_stat_unchecked(&fscache_n_retrievals_nodata); - else if (ret < 0) -- fscache_stat(&fscache_n_retrievals_nobufs); -+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); - else -- fscache_stat(&fscache_n_retrievals_ok); -+ fscache_stat_unchecked(&fscache_n_retrievals_ok); - - fscache_put_retrieval(op); - _leave(" = %d", ret); -@@ -545,7 +545,7 @@ nobufs_unlock: - spin_unlock(&cookie->lock); - kfree(op); - nobufs: -- fscache_stat(&fscache_n_retrievals_nobufs); -+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); - _leave(" = -ENOBUFS"); - return -ENOBUFS; - } -@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, - - _enter("%p,%p,,,", cookie, page); - -- fscache_stat(&fscache_n_allocs); -+ fscache_stat_unchecked(&fscache_n_allocs); - - if (hlist_empty(&cookie->backing_objects)) - goto nobufs; -@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, - goto nobufs_unlock; - spin_unlock(&cookie->lock); - -- fscache_stat(&fscache_n_alloc_ops); -+ fscache_stat_unchecked(&fscache_n_alloc_ops); - - ret = fscache_wait_for_retrieval_activation( - object, op, -@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, - - error: - if (ret == -ERESTARTSYS) -- fscache_stat(&fscache_n_allocs_intr); -+ fscache_stat_unchecked(&fscache_n_allocs_intr); - else if (ret < 0) -- fscache_stat(&fscache_n_allocs_nobufs); -+ fscache_stat_unchecked(&fscache_n_allocs_nobufs); - else -- fscache_stat(&fscache_n_allocs_ok); -+ fscache_stat_unchecked(&fscache_n_allocs_ok); - - fscache_put_retrieval(op); - _leave(" = %d", ret); -@@ -625,7 +625,7 @@ nobufs_unlock: - spin_unlock(&cookie->lock); - kfree(op); - nobufs: -- fscache_stat(&fscache_n_allocs_nobufs); -+ fscache_stat_unchecked(&fscache_n_allocs_nobufs); - _leave(" = -ENOBUFS"); - return -ENOBUFS; - } -@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op) - - spin_lock(&cookie->stores_lock); - -- fscache_stat(&fscache_n_store_calls); -+ fscache_stat_unchecked(&fscache_n_store_calls); - - /* find a page to store */ - page = NULL; -@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op) - page = results[0]; - _debug("gang %d [%lx]", n, page->index); - if (page->index > op->store_limit) { -- fscache_stat(&fscache_n_store_pages_over_limit); -+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit); - goto superseded; - } - -@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op) - spin_unlock(&cookie->stores_lock); - spin_unlock(&object->lock); - -- fscache_stat(&fscache_n_store_pages); -+ fscache_stat_unchecked(&fscache_n_store_pages); - fscache_stat(&fscache_n_cop_write_page); - ret = object->cache->ops->write_page(op, page); - fscache_stat_d(&fscache_n_cop_write_page); -@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, - ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); - ASSERT(PageFsCache(page)); - -- fscache_stat(&fscache_n_stores); -+ fscache_stat_unchecked(&fscache_n_stores); - - op = kzalloc(sizeof(*op), GFP_NOIO); - if (!op) -@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, - spin_unlock(&cookie->stores_lock); - spin_unlock(&object->lock); - -- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); -+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); - op->store_limit = object->store_limit; - - if (fscache_submit_op(object, &op->op) < 0) -@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie, - - spin_unlock(&cookie->lock); - radix_tree_preload_end(); -- fscache_stat(&fscache_n_store_ops); -- fscache_stat(&fscache_n_stores_ok); -+ fscache_stat_unchecked(&fscache_n_store_ops); -+ fscache_stat_unchecked(&fscache_n_stores_ok); - - /* the work queue now carries its own ref on the object */ - fscache_put_operation(&op->op); -@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie, - return 0; - - already_queued: -- fscache_stat(&fscache_n_stores_again); -+ fscache_stat_unchecked(&fscache_n_stores_again); - already_pending: - spin_unlock(&cookie->stores_lock); - spin_unlock(&object->lock); - spin_unlock(&cookie->lock); - radix_tree_preload_end(); - kfree(op); -- fscache_stat(&fscache_n_stores_ok); -+ fscache_stat_unchecked(&fscache_n_stores_ok); - _leave(" = 0"); - return 0; - -@@ -851,14 +851,14 @@ nobufs: - spin_unlock(&cookie->lock); - radix_tree_preload_end(); - kfree(op); -- fscache_stat(&fscache_n_stores_nobufs); -+ fscache_stat_unchecked(&fscache_n_stores_nobufs); - _leave(" = -ENOBUFS"); - return -ENOBUFS; - - nomem_free: - kfree(op); - nomem: -- fscache_stat(&fscache_n_stores_oom); -+ fscache_stat_unchecked(&fscache_n_stores_oom); - _leave(" = -ENOMEM"); - return -ENOMEM; - } -@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) - ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); - ASSERTCMP(page, !=, NULL); - -- fscache_stat(&fscache_n_uncaches); -+ fscache_stat_unchecked(&fscache_n_uncaches); - - /* cache withdrawal may beat us to it */ - if (!PageFsCache(page)) -@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op, - unsigned long loop; - - #ifdef CONFIG_FSCACHE_STATS -- atomic_add(pagevec->nr, &fscache_n_marks); -+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks); - #endif - - for (loop = 0; loop < pagevec->nr; loop++) { -diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c -index 4765190..2a067f2 100644 ---- a/fs/fscache/stats.c -+++ b/fs/fscache/stats.c -@@ -18,95 +18,95 @@ - /* - * operation counters - */ --atomic_t fscache_n_op_pend; --atomic_t fscache_n_op_run; --atomic_t fscache_n_op_enqueue; --atomic_t fscache_n_op_requeue; --atomic_t fscache_n_op_deferred_release; --atomic_t fscache_n_op_release; --atomic_t fscache_n_op_gc; --atomic_t fscache_n_op_cancelled; --atomic_t fscache_n_op_rejected; -+atomic_unchecked_t fscache_n_op_pend; -+atomic_unchecked_t fscache_n_op_run; -+atomic_unchecked_t fscache_n_op_enqueue; -+atomic_unchecked_t fscache_n_op_requeue; -+atomic_unchecked_t fscache_n_op_deferred_release; -+atomic_unchecked_t fscache_n_op_release; -+atomic_unchecked_t fscache_n_op_gc; -+atomic_unchecked_t fscache_n_op_cancelled; -+atomic_unchecked_t fscache_n_op_rejected; - --atomic_t fscache_n_attr_changed; --atomic_t fscache_n_attr_changed_ok; --atomic_t fscache_n_attr_changed_nobufs; --atomic_t fscache_n_attr_changed_nomem; --atomic_t fscache_n_attr_changed_calls; -+atomic_unchecked_t fscache_n_attr_changed; -+atomic_unchecked_t fscache_n_attr_changed_ok; -+atomic_unchecked_t fscache_n_attr_changed_nobufs; -+atomic_unchecked_t fscache_n_attr_changed_nomem; -+atomic_unchecked_t fscache_n_attr_changed_calls; - --atomic_t fscache_n_allocs; --atomic_t fscache_n_allocs_ok; --atomic_t fscache_n_allocs_wait; --atomic_t fscache_n_allocs_nobufs; --atomic_t fscache_n_allocs_intr; --atomic_t fscache_n_allocs_object_dead; --atomic_t fscache_n_alloc_ops; --atomic_t fscache_n_alloc_op_waits; -+atomic_unchecked_t fscache_n_allocs; -+atomic_unchecked_t fscache_n_allocs_ok; -+atomic_unchecked_t fscache_n_allocs_wait; -+atomic_unchecked_t fscache_n_allocs_nobufs; -+atomic_unchecked_t fscache_n_allocs_intr; -+atomic_unchecked_t fscache_n_allocs_object_dead; -+atomic_unchecked_t fscache_n_alloc_ops; -+atomic_unchecked_t fscache_n_alloc_op_waits; - --atomic_t fscache_n_retrievals; --atomic_t fscache_n_retrievals_ok; --atomic_t fscache_n_retrievals_wait; --atomic_t fscache_n_retrievals_nodata; --atomic_t fscache_n_retrievals_nobufs; --atomic_t fscache_n_retrievals_intr; --atomic_t fscache_n_retrievals_nomem; --atomic_t fscache_n_retrievals_object_dead; --atomic_t fscache_n_retrieval_ops; --atomic_t fscache_n_retrieval_op_waits; -+atomic_unchecked_t fscache_n_retrievals; -+atomic_unchecked_t fscache_n_retrievals_ok; -+atomic_unchecked_t fscache_n_retrievals_wait; -+atomic_unchecked_t fscache_n_retrievals_nodata; -+atomic_unchecked_t fscache_n_retrievals_nobufs; -+atomic_unchecked_t fscache_n_retrievals_intr; -+atomic_unchecked_t fscache_n_retrievals_nomem; -+atomic_unchecked_t fscache_n_retrievals_object_dead; -+atomic_unchecked_t fscache_n_retrieval_ops; -+atomic_unchecked_t fscache_n_retrieval_op_waits; - --atomic_t fscache_n_stores; --atomic_t fscache_n_stores_ok; --atomic_t fscache_n_stores_again; --atomic_t fscache_n_stores_nobufs; --atomic_t fscache_n_stores_oom; --atomic_t fscache_n_store_ops; --atomic_t fscache_n_store_calls; --atomic_t fscache_n_store_pages; --atomic_t fscache_n_store_radix_deletes; --atomic_t fscache_n_store_pages_over_limit; -+atomic_unchecked_t fscache_n_stores; -+atomic_unchecked_t fscache_n_stores_ok; -+atomic_unchecked_t fscache_n_stores_again; -+atomic_unchecked_t fscache_n_stores_nobufs; -+atomic_unchecked_t fscache_n_stores_oom; -+atomic_unchecked_t fscache_n_store_ops; -+atomic_unchecked_t fscache_n_store_calls; -+atomic_unchecked_t fscache_n_store_pages; -+atomic_unchecked_t fscache_n_store_radix_deletes; -+atomic_unchecked_t fscache_n_store_pages_over_limit; - --atomic_t fscache_n_store_vmscan_not_storing; --atomic_t fscache_n_store_vmscan_gone; --atomic_t fscache_n_store_vmscan_busy; --atomic_t fscache_n_store_vmscan_cancelled; -+atomic_unchecked_t fscache_n_store_vmscan_not_storing; -+atomic_unchecked_t fscache_n_store_vmscan_gone; -+atomic_unchecked_t fscache_n_store_vmscan_busy; -+atomic_unchecked_t fscache_n_store_vmscan_cancelled; - --atomic_t fscache_n_marks; --atomic_t fscache_n_uncaches; -+atomic_unchecked_t fscache_n_marks; -+atomic_unchecked_t fscache_n_uncaches; - --atomic_t fscache_n_acquires; --atomic_t fscache_n_acquires_null; --atomic_t fscache_n_acquires_no_cache; --atomic_t fscache_n_acquires_ok; --atomic_t fscache_n_acquires_nobufs; --atomic_t fscache_n_acquires_oom; -+atomic_unchecked_t fscache_n_acquires; -+atomic_unchecked_t fscache_n_acquires_null; -+atomic_unchecked_t fscache_n_acquires_no_cache; -+atomic_unchecked_t fscache_n_acquires_ok; -+atomic_unchecked_t fscache_n_acquires_nobufs; -+atomic_unchecked_t fscache_n_acquires_oom; - --atomic_t fscache_n_updates; --atomic_t fscache_n_updates_null; --atomic_t fscache_n_updates_run; -+atomic_unchecked_t fscache_n_updates; -+atomic_unchecked_t fscache_n_updates_null; -+atomic_unchecked_t fscache_n_updates_run; - --atomic_t fscache_n_relinquishes; --atomic_t fscache_n_relinquishes_null; --atomic_t fscache_n_relinquishes_waitcrt; --atomic_t fscache_n_relinquishes_retire; -+atomic_unchecked_t fscache_n_relinquishes; -+atomic_unchecked_t fscache_n_relinquishes_null; -+atomic_unchecked_t fscache_n_relinquishes_waitcrt; -+atomic_unchecked_t fscache_n_relinquishes_retire; - --atomic_t fscache_n_cookie_index; --atomic_t fscache_n_cookie_data; --atomic_t fscache_n_cookie_special; -+atomic_unchecked_t fscache_n_cookie_index; -+atomic_unchecked_t fscache_n_cookie_data; -+atomic_unchecked_t fscache_n_cookie_special; - --atomic_t fscache_n_object_alloc; --atomic_t fscache_n_object_no_alloc; --atomic_t fscache_n_object_lookups; --atomic_t fscache_n_object_lookups_negative; --atomic_t fscache_n_object_lookups_positive; --atomic_t fscache_n_object_lookups_timed_out; --atomic_t fscache_n_object_created; --atomic_t fscache_n_object_avail; --atomic_t fscache_n_object_dead; -+atomic_unchecked_t fscache_n_object_alloc; -+atomic_unchecked_t fscache_n_object_no_alloc; -+atomic_unchecked_t fscache_n_object_lookups; -+atomic_unchecked_t fscache_n_object_lookups_negative; -+atomic_unchecked_t fscache_n_object_lookups_positive; -+atomic_unchecked_t fscache_n_object_lookups_timed_out; -+atomic_unchecked_t fscache_n_object_created; -+atomic_unchecked_t fscache_n_object_avail; -+atomic_unchecked_t fscache_n_object_dead; - --atomic_t fscache_n_checkaux_none; --atomic_t fscache_n_checkaux_okay; --atomic_t fscache_n_checkaux_update; --atomic_t fscache_n_checkaux_obsolete; -+atomic_unchecked_t fscache_n_checkaux_none; -+atomic_unchecked_t fscache_n_checkaux_okay; -+atomic_unchecked_t fscache_n_checkaux_update; -+atomic_unchecked_t fscache_n_checkaux_obsolete; - - atomic_t fscache_n_cop_alloc_object; - atomic_t fscache_n_cop_lookup_object; -@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v) - seq_puts(m, "FS-Cache statistics\n"); - - seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n", -- atomic_read(&fscache_n_cookie_index), -- atomic_read(&fscache_n_cookie_data), -- atomic_read(&fscache_n_cookie_special)); -+ atomic_read_unchecked(&fscache_n_cookie_index), -+ atomic_read_unchecked(&fscache_n_cookie_data), -+ atomic_read_unchecked(&fscache_n_cookie_special)); - - seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n", -- atomic_read(&fscache_n_object_alloc), -- atomic_read(&fscache_n_object_no_alloc), -- atomic_read(&fscache_n_object_avail), -- atomic_read(&fscache_n_object_dead)); -+ atomic_read_unchecked(&fscache_n_object_alloc), -+ atomic_read_unchecked(&fscache_n_object_no_alloc), -+ atomic_read_unchecked(&fscache_n_object_avail), -+ atomic_read_unchecked(&fscache_n_object_dead)); - seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n", -- atomic_read(&fscache_n_checkaux_none), -- atomic_read(&fscache_n_checkaux_okay), -- atomic_read(&fscache_n_checkaux_update), -- atomic_read(&fscache_n_checkaux_obsolete)); -+ atomic_read_unchecked(&fscache_n_checkaux_none), -+ atomic_read_unchecked(&fscache_n_checkaux_okay), -+ atomic_read_unchecked(&fscache_n_checkaux_update), -+ atomic_read_unchecked(&fscache_n_checkaux_obsolete)); - - seq_printf(m, "Pages : mrk=%u unc=%u\n", -- atomic_read(&fscache_n_marks), -- atomic_read(&fscache_n_uncaches)); -+ atomic_read_unchecked(&fscache_n_marks), -+ atomic_read_unchecked(&fscache_n_uncaches)); - - seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u" - " oom=%u\n", -- atomic_read(&fscache_n_acquires), -- atomic_read(&fscache_n_acquires_null), -- atomic_read(&fscache_n_acquires_no_cache), -- atomic_read(&fscache_n_acquires_ok), -- atomic_read(&fscache_n_acquires_nobufs), -- atomic_read(&fscache_n_acquires_oom)); -+ atomic_read_unchecked(&fscache_n_acquires), -+ atomic_read_unchecked(&fscache_n_acquires_null), -+ atomic_read_unchecked(&fscache_n_acquires_no_cache), -+ atomic_read_unchecked(&fscache_n_acquires_ok), -+ atomic_read_unchecked(&fscache_n_acquires_nobufs), -+ atomic_read_unchecked(&fscache_n_acquires_oom)); - - seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n", -- atomic_read(&fscache_n_object_lookups), -- atomic_read(&fscache_n_object_lookups_negative), -- atomic_read(&fscache_n_object_lookups_positive), -- atomic_read(&fscache_n_object_created), -- atomic_read(&fscache_n_object_lookups_timed_out)); -+ atomic_read_unchecked(&fscache_n_object_lookups), -+ atomic_read_unchecked(&fscache_n_object_lookups_negative), -+ atomic_read_unchecked(&fscache_n_object_lookups_positive), -+ atomic_read_unchecked(&fscache_n_object_created), -+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out)); - - seq_printf(m, "Updates: n=%u nul=%u run=%u\n", -- atomic_read(&fscache_n_updates), -- atomic_read(&fscache_n_updates_null), -- atomic_read(&fscache_n_updates_run)); -+ atomic_read_unchecked(&fscache_n_updates), -+ atomic_read_unchecked(&fscache_n_updates_null), -+ atomic_read_unchecked(&fscache_n_updates_run)); - - seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n", -- atomic_read(&fscache_n_relinquishes), -- atomic_read(&fscache_n_relinquishes_null), -- atomic_read(&fscache_n_relinquishes_waitcrt), -- atomic_read(&fscache_n_relinquishes_retire)); -+ atomic_read_unchecked(&fscache_n_relinquishes), -+ atomic_read_unchecked(&fscache_n_relinquishes_null), -+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt), -+ atomic_read_unchecked(&fscache_n_relinquishes_retire)); - - seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", -- atomic_read(&fscache_n_attr_changed), -- atomic_read(&fscache_n_attr_changed_ok), -- atomic_read(&fscache_n_attr_changed_nobufs), -- atomic_read(&fscache_n_attr_changed_nomem), -- atomic_read(&fscache_n_attr_changed_calls)); -+ atomic_read_unchecked(&fscache_n_attr_changed), -+ atomic_read_unchecked(&fscache_n_attr_changed_ok), -+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs), -+ atomic_read_unchecked(&fscache_n_attr_changed_nomem), -+ atomic_read_unchecked(&fscache_n_attr_changed_calls)); - - seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n", -- atomic_read(&fscache_n_allocs), -- atomic_read(&fscache_n_allocs_ok), -- atomic_read(&fscache_n_allocs_wait), -- atomic_read(&fscache_n_allocs_nobufs), -- atomic_read(&fscache_n_allocs_intr)); -+ atomic_read_unchecked(&fscache_n_allocs), -+ atomic_read_unchecked(&fscache_n_allocs_ok), -+ atomic_read_unchecked(&fscache_n_allocs_wait), -+ atomic_read_unchecked(&fscache_n_allocs_nobufs), -+ atomic_read_unchecked(&fscache_n_allocs_intr)); - seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n", -- atomic_read(&fscache_n_alloc_ops), -- atomic_read(&fscache_n_alloc_op_waits), -- atomic_read(&fscache_n_allocs_object_dead)); -+ atomic_read_unchecked(&fscache_n_alloc_ops), -+ atomic_read_unchecked(&fscache_n_alloc_op_waits), -+ atomic_read_unchecked(&fscache_n_allocs_object_dead)); - - seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" - " int=%u oom=%u\n", -- atomic_read(&fscache_n_retrievals), -- atomic_read(&fscache_n_retrievals_ok), -- atomic_read(&fscache_n_retrievals_wait), -- atomic_read(&fscache_n_retrievals_nodata), -- atomic_read(&fscache_n_retrievals_nobufs), -- atomic_read(&fscache_n_retrievals_intr), -- atomic_read(&fscache_n_retrievals_nomem)); -+ atomic_read_unchecked(&fscache_n_retrievals), -+ atomic_read_unchecked(&fscache_n_retrievals_ok), -+ atomic_read_unchecked(&fscache_n_retrievals_wait), -+ atomic_read_unchecked(&fscache_n_retrievals_nodata), -+ atomic_read_unchecked(&fscache_n_retrievals_nobufs), -+ atomic_read_unchecked(&fscache_n_retrievals_intr), -+ atomic_read_unchecked(&fscache_n_retrievals_nomem)); - seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n", -- atomic_read(&fscache_n_retrieval_ops), -- atomic_read(&fscache_n_retrieval_op_waits), -- atomic_read(&fscache_n_retrievals_object_dead)); -+ atomic_read_unchecked(&fscache_n_retrieval_ops), -+ atomic_read_unchecked(&fscache_n_retrieval_op_waits), -+ atomic_read_unchecked(&fscache_n_retrievals_object_dead)); - - seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", -- atomic_read(&fscache_n_stores), -- atomic_read(&fscache_n_stores_ok), -- atomic_read(&fscache_n_stores_again), -- atomic_read(&fscache_n_stores_nobufs), -- atomic_read(&fscache_n_stores_oom)); -+ atomic_read_unchecked(&fscache_n_stores), -+ atomic_read_unchecked(&fscache_n_stores_ok), -+ atomic_read_unchecked(&fscache_n_stores_again), -+ atomic_read_unchecked(&fscache_n_stores_nobufs), -+ atomic_read_unchecked(&fscache_n_stores_oom)); - seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n", -- atomic_read(&fscache_n_store_ops), -- atomic_read(&fscache_n_store_calls), -- atomic_read(&fscache_n_store_pages), -- atomic_read(&fscache_n_store_radix_deletes), -- atomic_read(&fscache_n_store_pages_over_limit)); -+ atomic_read_unchecked(&fscache_n_store_ops), -+ atomic_read_unchecked(&fscache_n_store_calls), -+ atomic_read_unchecked(&fscache_n_store_pages), -+ atomic_read_unchecked(&fscache_n_store_radix_deletes), -+ atomic_read_unchecked(&fscache_n_store_pages_over_limit)); - - seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n", -- atomic_read(&fscache_n_store_vmscan_not_storing), -- atomic_read(&fscache_n_store_vmscan_gone), -- atomic_read(&fscache_n_store_vmscan_busy), -- atomic_read(&fscache_n_store_vmscan_cancelled)); -+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing), -+ atomic_read_unchecked(&fscache_n_store_vmscan_gone), -+ atomic_read_unchecked(&fscache_n_store_vmscan_busy), -+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled)); - - seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n", -- atomic_read(&fscache_n_op_pend), -- atomic_read(&fscache_n_op_run), -- atomic_read(&fscache_n_op_enqueue), -- atomic_read(&fscache_n_op_cancelled), -- atomic_read(&fscache_n_op_rejected)); -+ atomic_read_unchecked(&fscache_n_op_pend), -+ atomic_read_unchecked(&fscache_n_op_run), -+ atomic_read_unchecked(&fscache_n_op_enqueue), -+ atomic_read_unchecked(&fscache_n_op_cancelled), -+ atomic_read_unchecked(&fscache_n_op_rejected)); - seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", -- atomic_read(&fscache_n_op_deferred_release), -- atomic_read(&fscache_n_op_release), -- atomic_read(&fscache_n_op_gc)); -+ atomic_read_unchecked(&fscache_n_op_deferred_release), -+ atomic_read_unchecked(&fscache_n_op_release), -+ atomic_read_unchecked(&fscache_n_op_gc)); - - seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n", - atomic_read(&fscache_n_cop_alloc_object), -diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c -index b6cca47..ec782c3 100644 ---- a/fs/fuse/cuse.c -+++ b/fs/fuse/cuse.c -@@ -586,10 +586,12 @@ static int __init cuse_init(void) - INIT_LIST_HEAD(&cuse_conntbl[i]); - - /* inherit and extend fuse_dev_operations */ -- cuse_channel_fops = fuse_dev_operations; -- cuse_channel_fops.owner = THIS_MODULE; -- cuse_channel_fops.open = cuse_channel_open; -- cuse_channel_fops.release = cuse_channel_release; -+ pax_open_kernel(); -+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations)); -+ *(void **)&cuse_channel_fops.owner = THIS_MODULE; -+ *(void **)&cuse_channel_fops.open = cuse_channel_open; -+ *(void **)&cuse_channel_fops.release = cuse_channel_release; -+ pax_close_kernel(); - - cuse_class = class_create(THIS_MODULE, "cuse"); - if (IS_ERR(cuse_class)) -diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c -index 5cb8614..6865b11 100644 ---- a/fs/fuse/dev.c -+++ b/fs/fuse/dev.c -@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, - ret = 0; - pipe_lock(pipe); - -- if (!pipe->readers) { -+ if (!atomic_read(&pipe->readers)) { - send_sig(SIGPIPE, current, 0); - if (!ret) - ret = -EPIPE; -diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c -index 9f63e49..d8a64c0 100644 ---- a/fs/fuse/dir.c -+++ b/fs/fuse/dir.c -@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry) - return link; - } - --static void free_link(char *link) -+static void free_link(const char *link) - { - if (!IS_ERR(link)) - free_page((unsigned long) link); -diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c -index 900cf98..3896726 100644 ---- a/fs/gfs2/inode.c -+++ b/fs/gfs2/inode.c -@@ -1517,7 +1517,7 @@ out: - - static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p) - { -- char *s = nd_get_link(nd); -+ const char *s = nd_get_link(nd); - if (!IS_ERR(s)) - kfree(s); - } -diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c -index 3ebc437..eb23952 100644 ---- a/fs/hfs/btree.c -+++ b/fs/hfs/btree.c -@@ -46,11 +46,27 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke - case HFS_EXT_CNID: - hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize, - mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz)); -+ -+ if (HFS_I(tree->inode)->alloc_blocks > -+ HFS_I(tree->inode)->first_blocks) { -+ printk(KERN_ERR "hfs: invalid btree extent records\n"); -+ unlock_new_inode(tree->inode); -+ goto free_inode; -+ } -+ - tree->inode->i_mapping->a_ops = &hfs_btree_aops; - break; - case HFS_CAT_CNID: - hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize, - mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz)); -+ -+ if (!HFS_I(tree->inode)->first_blocks) { -+ printk(KERN_ERR "hfs: invalid btree extent records " -+ "(0 size).\n"); -+ unlock_new_inode(tree->inode); -+ goto free_inode; -+ } -+ - tree->inode->i_mapping->a_ops = &hfs_btree_aops; - break; - default: -@@ -59,11 +75,6 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke - } - unlock_new_inode(tree->inode); - -- if (!HFS_I(tree->inode)->first_blocks) { -- printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n"); -- goto free_inode; -- } -- - mapping = tree->inode->i_mapping; - page = read_mapping_page(mapping, 0, NULL); - if (IS_ERR(page)) -diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c -index 4dfbfec..947c9c2 100644 ---- a/fs/hfsplus/catalog.c -+++ b/fs/hfsplus/catalog.c -@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid, - int err; - u16 type; - -+ pax_track_stack(); -+ - hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL); - err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry)); - if (err) -@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, - int entry_size; - int err; - -+ pax_track_stack(); -+ - dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", - str->name, cnid, inode->i_nlink); - err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); -@@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid, - int entry_size, type; - int err; - -+ pax_track_stack(); -+ - dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", - cnid, src_dir->i_ino, src_name->name, - dst_dir->i_ino, dst_name->name); -diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c -index 25b2443..09a3341 100644 ---- a/fs/hfsplus/dir.c -+++ b/fs/hfsplus/dir.c -@@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) - struct hfsplus_readdir_data *rd; - u16 type; - -+ pax_track_stack(); -+ - if (filp->f_pos >= inode->i_size) - return 0; - -diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c -index 4cc1e3a..ad0f70b 100644 ---- a/fs/hfsplus/inode.c -+++ b/fs/hfsplus/inode.c -@@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) - int res = 0; - u16 type; - -+ pax_track_stack(); -+ - type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset); - - HFSPLUS_I(inode)->linkid = 0; -@@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode *inode) - struct hfs_find_data fd; - hfsplus_cat_entry entry; - -+ pax_track_stack(); -+ - if (HFSPLUS_IS_RSRC(inode)) - main_inode = HFSPLUS_I(inode)->rsrc_inode; - -diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c -index fbaa669..c548cd0 100644 ---- a/fs/hfsplus/ioctl.c -+++ b/fs/hfsplus/ioctl.c -@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name, - struct hfsplus_cat_file *file; - int res; - -+ pax_track_stack(); -+ - if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode)) - return -EOPNOTSUPP; - -@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, - struct hfsplus_cat_file *file; - ssize_t res = 0; - -+ pax_track_stack(); -+ - if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode)) - return -EOPNOTSUPP; - -diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c -index d24a9b6..dd9b3dd 100644 ---- a/fs/hfsplus/super.c -+++ b/fs/hfsplus/super.c -@@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) - u64 last_fs_block, last_fs_page; - int err; - -+ pax_track_stack(); -+ - err = -EINVAL; - sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if (!sbi) -diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c -index ec88953..cb5e98e 100644 ---- a/fs/hugetlbfs/inode.c -+++ b/fs/hugetlbfs/inode.c -@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = { - .kill_sb = kill_litter_super, - }; - --static struct vfsmount *hugetlbfs_vfsmount; -+struct vfsmount *hugetlbfs_vfsmount; - - static int can_do_hugetlb_shm(void) - { -diff --git a/fs/inode.c b/fs/inode.c -index ec79246..054c36a 100644 ---- a/fs/inode.c -+++ b/fs/inode.c -@@ -787,8 +787,8 @@ unsigned int get_next_ino(void) - - #ifdef CONFIG_SMP - if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { -- static atomic_t shared_last_ino; -- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); -+ static atomic_unchecked_t shared_last_ino; -+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino); - - res = next - LAST_INO_BATCH; - } -diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c -index f94fc48..3bb8d30 100644 ---- a/fs/jbd/checkpoint.c -+++ b/fs/jbd/checkpoint.c -@@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal) - tid_t this_tid; - int result; - -+ pax_track_stack(); -+ - jbd_debug(1, "Start checkpoint\n"); - - /* -diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c -index 16a5047..88ff6ca 100644 ---- a/fs/jffs2/compr_rtime.c -+++ b/fs/jffs2/compr_rtime.c -@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in, - int outpos = 0; - int pos=0; - -+ pax_track_stack(); -+ - memset(positions,0,sizeof(positions)); - - while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { -@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in, - int outpos = 0; - int pos=0; - -+ pax_track_stack(); -+ - memset(positions,0,sizeof(positions)); - - while (outpos<destlen) { -diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c -index 9e7cec8..4713089 100644 ---- a/fs/jffs2/compr_rubin.c -+++ b/fs/jffs2/compr_rubin.c -@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, - int ret; - uint32_t mysrclen, mydstlen; - -+ pax_track_stack(); -+ - mysrclen = *sourcelen; - mydstlen = *dstlen - 8; - -diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c -index e513f19..2ab1351 100644 ---- a/fs/jffs2/erase.c -+++ b/fs/jffs2/erase.c -@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb - struct jffs2_unknown_node marker = { - .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), - .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), -- .totlen = cpu_to_je32(c->cleanmarker_size) -+ .totlen = cpu_to_je32(c->cleanmarker_size), -+ .hdr_crc = cpu_to_je32(0) - }; - - jffs2_prealloc_raw_node_refs(c, jeb, 1); -diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c -index 4515bea..178f2d6 100644 ---- a/fs/jffs2/wbuf.c -+++ b/fs/jffs2/wbuf.c -@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker = - { - .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK), - .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), -- .totlen = constant_cpu_to_je32(8) -+ .totlen = constant_cpu_to_je32(8), -+ .hdr_crc = constant_cpu_to_je32(0) - }; - - /* -diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c -index 3e93cdd..c8a80e1 100644 ---- a/fs/jffs2/xattr.c -+++ b/fs/jffs2/xattr.c -@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c) - - BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING)); - -+ pax_track_stack(); -+ - /* Phase.1 : Merge same xref */ - for (i=0; i < XREF_TMPHASH_SIZE; i++) - xref_tmphash[i] = NULL; -diff --git a/fs/jfs/super.c b/fs/jfs/super.c -index 06c8a67..589dbbd 100644 ---- a/fs/jfs/super.c -+++ b/fs/jfs/super.c -@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void) - - jfs_inode_cachep = - kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, -- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, -+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY, - init_once); - if (jfs_inode_cachep == NULL) - return -ENOMEM; -diff --git a/fs/libfs.c b/fs/libfs.c -index c18e9a1..0b04e2c 100644 ---- a/fs/libfs.c -+++ b/fs/libfs.c -@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) - - for (p=q->next; p != &dentry->d_subdirs; p=p->next) { - struct dentry *next; -+ char d_name[sizeof(next->d_iname)]; -+ const unsigned char *name; -+ - next = list_entry(p, struct dentry, d_u.d_child); - spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); - if (!simple_positive(next)) { -@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) - - spin_unlock(&next->d_lock); - spin_unlock(&dentry->d_lock); -- if (filldir(dirent, next->d_name.name, -+ name = next->d_name.name; -+ if (name == next->d_iname) { -+ memcpy(d_name, name, next->d_name.len); -+ name = d_name; -+ } -+ if (filldir(dirent, name, - next->d_name.len, filp->f_pos, - next->d_inode->i_ino, - dt_type(next->d_inode)) < 0) -diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c -index 8392cb8..ae8ed40 100644 ---- a/fs/lockd/clntproc.c -+++ b/fs/lockd/clntproc.c -@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops; - /* - * Cookie counter for NLM requests - */ --static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); -+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234); - - void nlmclnt_next_cookie(struct nlm_cookie *c) - { -- u32 cookie = atomic_inc_return(&nlm_cookie); -+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie); - - memcpy(c->data, &cookie, 4); - c->len=4; -@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) - struct nlm_rqst reqst, *req; - int status; - -+ pax_track_stack(); -+ - req = &reqst; - memset(req, 0, sizeof(*req)); - locks_init_lock(&req->a_args.lock.fl); -diff --git a/fs/locks.c b/fs/locks.c -index 703f545..150a552 100644 ---- a/fs/locks.c -+++ b/fs/locks.c -@@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *filp) - return; - - if (filp->f_op && filp->f_op->flock) { -- struct file_lock fl = { -+ struct file_lock flock = { - .fl_pid = current->tgid, - .fl_file = filp, - .fl_flags = FL_FLOCK, - .fl_type = F_UNLCK, - .fl_end = OFFSET_MAX, - }; -- filp->f_op->flock(filp, F_SETLKW, &fl); -- if (fl.fl_ops && fl.fl_ops->fl_release_private) -- fl.fl_ops->fl_release_private(&fl); -+ filp->f_op->flock(filp, F_SETLKW, &flock); -+ if (flock.fl_ops && flock.fl_ops->fl_release_private) -+ flock.fl_ops->fl_release_private(&flock); - } - - lock_flocks(); -diff --git a/fs/logfs/super.c b/fs/logfs/super.c -index ce03a18..ac8c14f 100644 ---- a/fs/logfs/super.c -+++ b/fs/logfs/super.c -@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super_block *sb) - struct logfs_disk_super _ds1, *ds1 = &_ds1; - int err, valid0, valid1; - -+ pax_track_stack(); -+ - /* read first superblock */ - err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0); - if (err) -diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c -index 3f32bcb..7c82c29 100644 ---- a/fs/minix/bitmap.c -+++ b/fs/minix/bitmap.c -@@ -20,10 +20,11 @@ static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 }; - - static DEFINE_SPINLOCK(bitmap_lock); - --static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits) -+static unsigned long count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits) - { - unsigned i, j, sum = 0; - struct buffer_head *bh; -+ unsigned numblocks = minix_blocks_needed(numbits, blocksize); - - for (i=0; i<numblocks-1; i++) { - if (!(bh=map[i])) -@@ -105,10 +106,12 @@ int minix_new_block(struct inode * inode) - return 0; - } - --unsigned long minix_count_free_blocks(struct minix_sb_info *sbi) -+unsigned long minix_count_free_blocks(struct super_block *sb) - { -- return (count_free(sbi->s_zmap, sbi->s_zmap_blocks, -- sbi->s_nzones - sbi->s_firstdatazone + 1) -+ struct minix_sb_info *sbi = minix_sb(sb); -+ u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1); -+ -+ return (count_free(sbi->s_zmap, sb->s_blocksize, bits) - << sbi->s_log_zone_size); - } - -@@ -273,7 +276,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error) - return inode; - } - --unsigned long minix_count_free_inodes(struct minix_sb_info *sbi) -+unsigned long minix_count_free_inodes(struct super_block *sb) - { -- return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1); -+ struct minix_sb_info *sbi = minix_sb(sb); -+ u32 bits = sbi->s_ninodes + 1; -+ -+ return count_free(sbi->s_imap, sb->s_blocksize, bits); - } -diff --git a/fs/minix/inode.c b/fs/minix/inode.c -index e7d23e2..1ed1351 100644 ---- a/fs/minix/inode.c -+++ b/fs/minix/inode.c -@@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) - else if (sbi->s_mount_state & MINIX_ERROR_FS) - printk("MINIX-fs: mounting file system with errors, " - "running fsck is recommended\n"); -+ -+ /* Apparently minix can create filesystems that allocate more blocks for -+ * the bitmaps than needed. We simply ignore that, but verify it didn't -+ * create one with not enough blocks and bail out if so. -+ */ -+ block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize); -+ if (sbi->s_imap_blocks < block) { -+ printk("MINIX-fs: file system does not have enough " -+ "imap blocks allocated. Refusing to mount\n"); -+ goto out_iput; -+ } -+ -+ block = minix_blocks_needed( -+ (sbi->s_nzones - (sbi->s_firstdatazone + 1)), -+ s->s_blocksize); -+ if (sbi->s_zmap_blocks < block) { -+ printk("MINIX-fs: file system does not have enough " -+ "zmap blocks allocated. Refusing to mount.\n"); -+ goto out_iput; -+ } -+ - return 0; - - out_iput: -@@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf) - buf->f_type = sb->s_magic; - buf->f_bsize = sb->s_blocksize; - buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size; -- buf->f_bfree = minix_count_free_blocks(sbi); -+ buf->f_bfree = minix_count_free_blocks(sb); - buf->f_bavail = buf->f_bfree; - buf->f_files = sbi->s_ninodes; -- buf->f_ffree = minix_count_free_inodes(sbi); -+ buf->f_ffree = minix_count_free_inodes(sb); - buf->f_namelen = sbi->s_namelen; - buf->f_fsid.val[0] = (u32)id; - buf->f_fsid.val[1] = (u32)(id >> 32); -diff --git a/fs/minix/minix.h b/fs/minix/minix.h -index 341e212..6415fe0 100644 ---- a/fs/minix/minix.h -+++ b/fs/minix/minix.h -@@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru - extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); - extern struct inode * minix_new_inode(const struct inode *, int, int *); - extern void minix_free_inode(struct inode * inode); --extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi); -+extern unsigned long minix_count_free_inodes(struct super_block *sb); - extern int minix_new_block(struct inode * inode); - extern void minix_free_block(struct inode *inode, unsigned long block); --extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi); -+extern unsigned long minix_count_free_blocks(struct super_block *sb); - extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); - extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); - -@@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode) - return list_entry(inode, struct minix_inode_info, vfs_inode); - } - -+static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize) -+{ -+ return DIV_ROUND_UP(bits, blocksize * 8); -+} -+ - #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \ - defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED) - -diff --git a/fs/namei.c b/fs/namei.c -index 3d15072..c1ddf9c 100644 ---- a/fs/namei.c -+++ b/fs/namei.c -@@ -281,16 +281,32 @@ int generic_permission(struct inode *inode, int mask) - if (ret != -EACCES) - return ret; - -+#ifdef CONFIG_GRKERNSEC -+ /* we'll block if we have to log due to a denied capability use */ -+ if (mask & MAY_NOT_BLOCK) -+ return -ECHILD; -+#endif -+ - if (S_ISDIR(inode->i_mode)) { - /* DACs are overridable for directories */ -- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) -- return 0; - if (!(mask & MAY_WRITE)) -- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) -+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) || -+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) - return 0; -+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) -+ return 0; - return -EACCES; - } - /* -+ * Searching includes executable on directories, else just read. -+ */ -+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; -+ if (mask == MAY_READ) -+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) || -+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) -+ return 0; -+ -+ /* - * Read/write DACs are always overridable. - * Executable DACs are overridable when there is - * at least one exec bit set. -@@ -299,14 +315,6 @@ int generic_permission(struct inode *inode, int mask) - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) - return 0; - -- /* -- * Searching includes executable on directories, else just read. -- */ -- mask &= MAY_READ | MAY_WRITE | MAY_EXEC; -- if (mask == MAY_READ) -- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) -- return 0; -- - return -EACCES; - } - -@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p) - return error; - } - -+ if (gr_handle_follow_link(dentry->d_parent->d_inode, -+ dentry->d_inode, dentry, nd->path.mnt)) { -+ error = -EACCES; -+ *p = ERR_PTR(error); /* no ->put_link(), please */ -+ path_put(&nd->path); -+ return error; -+ } -+ - nd->last_type = LAST_BIND; - *p = dentry->d_inode->i_op->follow_link(dentry, nd); - error = PTR_ERR(*p); - if (!IS_ERR(*p)) { -- char *s = nd_get_link(nd); -+ const char *s = nd_get_link(nd); - error = 0; - if (s) - error = __vfs_follow_link(nd, s); -@@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name, - if (!err) - err = complete_walk(nd); - -+ if (!(nd->flags & LOOKUP_PARENT)) { -+#ifdef CONFIG_GRKERNSEC -+ if (flags & LOOKUP_RCU) { -+ if (!err) -+ path_put(&nd->path); -+ err = -ECHILD; -+ } else -+#endif -+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) { -+ if (!err) -+ path_put(&nd->path); -+ err = -ENOENT; -+ } -+ } -+ - if (!err && nd->flags & LOOKUP_DIRECTORY) { - if (!nd->inode->i_op->lookup) { - path_put(&nd->path); -@@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name, - retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd); - - if (likely(!retval)) { -+ if (*name != '/' && nd->path.dentry && nd->inode) { -+#ifdef CONFIG_GRKERNSEC -+ if (flags & LOOKUP_RCU) -+ return -ECHILD; -+#endif -+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) -+ return -ENOENT; -+ } -+ - if (unlikely(!audit_dummy_context())) { - if (nd->path.dentry && nd->inode) - audit_inode(name, nd->path.dentry); -@@ -2049,7 +2089,27 @@ static int may_open(struct path *path, int acc_mode, int flag) - /* - * Ensure there are no outstanding leases on the file. - */ -- return break_lease(inode, flag); -+ error = break_lease(inode, flag); -+ -+ if (error) -+ return error; -+ -+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) { -+ error = -EPERM; -+ goto exit; -+ } -+ -+ if (gr_handle_rawio(inode)) { -+ error = -EPERM; -+ goto exit; -+ } -+ -+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) { -+ error = -EACCES; -+ goto exit; -+ } -+exit: -+ return error; - } - - static int handle_truncate(struct file *filp) -@@ -2110,6 +2170,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path, - error = complete_walk(nd); - if (error) - return ERR_PTR(error); -+#ifdef CONFIG_GRKERNSEC -+ if (nd->flags & LOOKUP_RCU) { -+ error = -ECHILD; -+ goto exit; -+ } -+#endif -+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) { -+ error = -ENOENT; -+ goto exit; -+ } - audit_inode(pathname, nd->path.dentry); - if (open_flag & O_CREAT) { - error = -EISDIR; -@@ -2120,6 +2190,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path, - error = complete_walk(nd); - if (error) - return ERR_PTR(error); -+#ifdef CONFIG_GRKERNSEC -+ if (nd->flags & LOOKUP_RCU) { -+ error = -ECHILD; -+ goto exit; -+ } -+#endif -+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) { -+ error = -ENOENT; -+ goto exit; -+ } - audit_inode(pathname, dir); - goto ok; - } -@@ -2141,6 +2221,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path, - error = complete_walk(nd); - if (error) - return ERR_PTR(-ECHILD); -+#ifdef CONFIG_GRKERNSEC -+ if (nd->flags & LOOKUP_RCU) { -+ error = -ECHILD; -+ goto exit; -+ } -+#endif -+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) { -+ error = -ENOENT; -+ goto exit; -+ } - - error = -ENOTDIR; - if (nd->flags & LOOKUP_DIRECTORY) { -@@ -2181,6 +2271,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path, - /* Negative dentry, just create the file */ - if (!dentry->d_inode) { - int mode = op->mode; -+ -+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) { -+ error = -EACCES; -+ goto exit_mutex_unlock; -+ } -+ - if (!IS_POSIXACL(dir->d_inode)) - mode &= ~current_umask(); - /* -@@ -2204,6 +2300,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path, - error = vfs_create(dir->d_inode, dentry, mode, nd); - if (error) - goto exit_mutex_unlock; -+ else -+ gr_handle_create(path->dentry, path->mnt); - mutex_unlock(&dir->d_inode->i_mutex); - dput(nd->path.dentry); - nd->path.dentry = dentry; -@@ -2213,6 +2311,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path, - /* - * It already exists. - */ -+ -+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) { -+ error = -ENOENT; -+ goto exit_mutex_unlock; -+ } -+ -+ /* only check if O_CREAT is specified, all other checks need to go -+ into may_open */ -+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) { -+ error = -EACCES; -+ goto exit_mutex_unlock; -+ } -+ - mutex_unlock(&dir->d_inode->i_mutex); - audit_inode(pathname, path->dentry); - -@@ -2425,6 +2536,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path - *path = nd.path; - return dentry; - eexist: -+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) { -+ dput(dentry); -+ dentry = ERR_PTR(-ENOENT); -+ goto fail; -+ } - dput(dentry); - dentry = ERR_PTR(-EEXIST); - fail: -@@ -2447,6 +2563,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat - } - EXPORT_SYMBOL(user_path_create); - -+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir) -+{ -+ char *tmp = getname(pathname); -+ struct dentry *res; -+ if (IS_ERR(tmp)) -+ return ERR_CAST(tmp); -+ res = kern_path_create(dfd, tmp, path, is_dir); -+ if (IS_ERR(res)) -+ putname(tmp); -+ else -+ *to = tmp; -+ return res; -+} -+ - int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) - { - int error = may_create(dir, dentry); -@@ -2514,6 +2644,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, - error = mnt_want_write(path.mnt); - if (error) - goto out_dput; -+ -+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) { -+ error = -EPERM; -+ goto out_drop_write; -+ } -+ -+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) { -+ error = -EACCES; -+ goto out_drop_write; -+ } -+ - error = security_path_mknod(&path, dentry, mode, dev); - if (error) - goto out_drop_write; -@@ -2531,6 +2672,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, - } - out_drop_write: - mnt_drop_write(path.mnt); -+ -+ if (!error) -+ gr_handle_create(dentry, path.mnt); - out_dput: - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); -@@ -2580,12 +2724,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode) - error = mnt_want_write(path.mnt); - if (error) - goto out_dput; -+ -+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) { -+ error = -EACCES; -+ goto out_drop_write; -+ } -+ - error = security_path_mkdir(&path, dentry, mode); - if (error) - goto out_drop_write; - error = vfs_mkdir(path.dentry->d_inode, dentry, mode); - out_drop_write: - mnt_drop_write(path.mnt); -+ -+ if (!error) -+ gr_handle_create(dentry, path.mnt); - out_dput: - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); -@@ -2665,6 +2818,8 @@ static long do_rmdir(int dfd, const char __user *pathname) - char * name; - struct dentry *dentry; - struct nameidata nd; -+ ino_t saved_ino = 0; -+ dev_t saved_dev = 0; - - error = user_path_parent(dfd, pathname, &nd, &name); - if (error) -@@ -2693,6 +2848,15 @@ static long do_rmdir(int dfd, const char __user *pathname) - error = -ENOENT; - goto exit3; - } -+ -+ saved_ino = dentry->d_inode->i_ino; -+ saved_dev = gr_get_dev_from_dentry(dentry); -+ -+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) { -+ error = -EACCES; -+ goto exit3; -+ } -+ - error = mnt_want_write(nd.path.mnt); - if (error) - goto exit3; -@@ -2700,6 +2864,8 @@ static long do_rmdir(int dfd, const char __user *pathname) - if (error) - goto exit4; - error = vfs_rmdir(nd.path.dentry->d_inode, dentry); -+ if (!error && (saved_dev || saved_ino)) -+ gr_handle_delete(saved_ino, saved_dev); - exit4: - mnt_drop_write(nd.path.mnt); - exit3: -@@ -2762,6 +2928,8 @@ static long do_unlinkat(int dfd, const char __user *pathname) - struct dentry *dentry; - struct nameidata nd; - struct inode *inode = NULL; -+ ino_t saved_ino = 0; -+ dev_t saved_dev = 0; - - error = user_path_parent(dfd, pathname, &nd, &name); - if (error) -@@ -2784,6 +2952,16 @@ static long do_unlinkat(int dfd, const char __user *pathname) - if (!inode) - goto slashes; - ihold(inode); -+ -+ if (inode->i_nlink <= 1) { -+ saved_ino = inode->i_ino; -+ saved_dev = gr_get_dev_from_dentry(dentry); -+ } -+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) { -+ error = -EACCES; -+ goto exit2; -+ } -+ - error = mnt_want_write(nd.path.mnt); - if (error) - goto exit2; -@@ -2791,6 +2969,8 @@ static long do_unlinkat(int dfd, const char __user *pathname) - if (error) - goto exit3; - error = vfs_unlink(nd.path.dentry->d_inode, dentry); -+ if (!error && (saved_ino || saved_dev)) -+ gr_handle_delete(saved_ino, saved_dev); - exit3: - mnt_drop_write(nd.path.mnt); - exit2: -@@ -2866,10 +3046,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, - error = mnt_want_write(path.mnt); - if (error) - goto out_dput; -+ -+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) { -+ error = -EACCES; -+ goto out_drop_write; -+ } -+ - error = security_path_symlink(&path, dentry, from); - if (error) - goto out_drop_write; - error = vfs_symlink(path.dentry->d_inode, dentry, from); -+ if (!error) -+ gr_handle_create(dentry, path.mnt); - out_drop_write: - mnt_drop_write(path.mnt); - out_dput: -@@ -2941,6 +3129,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, - { - struct dentry *new_dentry; - struct path old_path, new_path; -+ char *to = NULL; - int how = 0; - int error; - -@@ -2964,7 +3153,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, - if (error) - return error; - -- new_dentry = user_path_create(newdfd, newname, &new_path, 0); -+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0); - error = PTR_ERR(new_dentry); - if (IS_ERR(new_dentry)) - goto out; -@@ -2975,13 +3164,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, - error = mnt_want_write(new_path.mnt); - if (error) - goto out_dput; -+ -+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt, -+ old_path.dentry->d_inode, -+ old_path.dentry->d_inode->i_mode, to)) { -+ error = -EACCES; -+ goto out_drop_write; -+ } -+ -+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt, -+ old_path.dentry, old_path.mnt, to)) { -+ error = -EACCES; -+ goto out_drop_write; -+ } -+ - error = security_path_link(old_path.dentry, &new_path, new_dentry); - if (error) - goto out_drop_write; - error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry); -+ if (!error) -+ gr_handle_create(new_dentry, new_path.mnt); - out_drop_write: - mnt_drop_write(new_path.mnt); - out_dput: -+ putname(to); - dput(new_dentry); - mutex_unlock(&new_path.dentry->d_inode->i_mutex); - path_put(&new_path); -@@ -3153,6 +3359,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, - char *to; - int error; - -+ pax_track_stack(); -+ - error = user_path_parent(olddfd, oldname, &oldnd, &from); - if (error) - goto exit; -@@ -3209,6 +3417,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, - if (new_dentry == trap) - goto exit5; - -+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt, -+ old_dentry, old_dir->d_inode, oldnd.path.mnt, -+ to); -+ if (error) -+ goto exit5; -+ - error = mnt_want_write(oldnd.path.mnt); - if (error) - goto exit5; -@@ -3218,6 +3432,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, - goto exit6; - error = vfs_rename(old_dir->d_inode, old_dentry, - new_dir->d_inode, new_dentry); -+ if (!error) -+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry, -+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0); - exit6: - mnt_drop_write(oldnd.path.mnt); - exit5: -@@ -3243,6 +3460,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna - - int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) - { -+ char tmpbuf[64]; -+ const char *newlink; - int len; - - len = PTR_ERR(link); -@@ -3252,7 +3471,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c - len = strlen(link); - if (len > (unsigned) buflen) - len = buflen; -- if (copy_to_user(buffer, link, len)) -+ -+ if (len < sizeof(tmpbuf)) { -+ memcpy(tmpbuf, link, len); -+ newlink = tmpbuf; -+ } else -+ newlink = link; -+ -+ if (copy_to_user(buffer, newlink, len)) - len = -EFAULT; - out: - return len; -diff --git a/fs/namespace.c b/fs/namespace.c -index e5e1c7d..019609e 100644 ---- a/fs/namespace.c -+++ b/fs/namespace.c -@@ -1329,6 +1329,9 @@ static int do_umount(struct vfsmount *mnt, int flags) - if (!(sb->s_flags & MS_RDONLY)) - retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); - up_write(&sb->s_umount); -+ -+ gr_log_remount(mnt->mnt_devname, retval); -+ - return retval; - } - -@@ -1348,6 +1351,9 @@ static int do_umount(struct vfsmount *mnt, int flags) - br_write_unlock(vfsmount_lock); - up_write(&namespace_sem); - release_mounts(&umount_list); -+ -+ gr_log_unmount(mnt->mnt_devname, retval); -+ - return retval; - } - -@@ -2339,6 +2345,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, - MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | - MS_STRICTATIME); - -+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) { -+ retval = -EPERM; -+ goto dput_out; -+ } -+ -+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) { -+ retval = -EPERM; -+ goto dput_out; -+ } -+ - if (flags & MS_REMOUNT) - retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, - data_page); -@@ -2353,6 +2369,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, - dev_name, data_page); - dput_out: - path_put(&path); -+ -+ gr_log_mount(dev_name, dir_name, retval); -+ - return retval; - } - -@@ -2576,6 +2595,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, - if (error) - goto out2; - -+ if (gr_handle_chroot_pivot()) { -+ error = -EPERM; -+ goto out2; -+ } -+ - get_fs_root(current->fs, &root); - error = lock_mount(&old); - if (error) -diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c -index 9c51f62..503b252 100644 ---- a/fs/ncpfs/dir.c -+++ b/fs/ncpfs/dir.c -@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd) - int res, val = 0, len; - __u8 __name[NCP_MAXPATHLEN + 1]; - -+ pax_track_stack(); -+ - if (dentry == dentry->d_sb->s_root) - return 1; - -@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc - int error, res, len; - __u8 __name[NCP_MAXPATHLEN + 1]; - -+ pax_track_stack(); -+ - error = -EIO; - if (!ncp_conn_valid(server)) - goto finished; -@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode, - PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n", - dentry->d_parent->d_name.name, dentry->d_name.name, mode); - -+ pax_track_stack(); -+ - ncp_age_dentry(server, dentry); - len = sizeof(__name); - error = ncp_io2vol(server, __name, &len, dentry->d_name.name, -@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode) - int error, len; - __u8 __name[NCP_MAXPATHLEN + 1]; - -+ pax_track_stack(); -+ - DPRINTK("ncp_mkdir: making %s/%s\n", - dentry->d_parent->d_name.name, dentry->d_name.name); - -@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry, - int old_len, new_len; - __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1]; - -+ pax_track_stack(); -+ - DPRINTK("ncp_rename: %s/%s to %s/%s\n", - old_dentry->d_parent->d_name.name, old_dentry->d_name.name, - new_dentry->d_parent->d_name.name, new_dentry->d_name.name); -diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c -index 202f370..9d4565e 100644 ---- a/fs/ncpfs/inode.c -+++ b/fs/ncpfs/inode.c -@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) - #endif - struct ncp_entry_info finfo; - -+ pax_track_stack(); -+ - memset(&data, 0, sizeof(data)); - server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL); - if (!server) -diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c -index 281ae95..dd895b9 100644 ---- a/fs/nfs/blocklayout/blocklayout.c -+++ b/fs/nfs/blocklayout/blocklayout.c -@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect) - */ - struct parallel_io { - struct kref refcnt; -- struct rpc_call_ops call_ops; -+ rpc_call_ops_no_const call_ops; - void (*pnfs_callback) (void *data); - void *data; - }; -diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c -index 679d2f5..ef1ffec 100644 ---- a/fs/nfs/inode.c -+++ b/fs/nfs/inode.c -@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode) - nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); - nfsi->attrtimeo_timestamp = jiffies; - -- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); -+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf)); - if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) - nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; - else -@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt - return nfs_size_to_loff_t(fattr->size) > i_size_read(inode); - } - --static atomic_long_t nfs_attr_generation_counter; -+static atomic_long_unchecked_t nfs_attr_generation_counter; - - static unsigned long nfs_read_attr_generation_counter(void) - { -- return atomic_long_read(&nfs_attr_generation_counter); -+ return atomic_long_read_unchecked(&nfs_attr_generation_counter); - } - - unsigned long nfs_inc_attr_generation_counter(void) - { -- return atomic_long_inc_return(&nfs_attr_generation_counter); -+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter); - } - - void nfs_fattr_init(struct nfs_fattr *fattr) -diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c -index 6f8bcc7..8f823c5 100644 ---- a/fs/nfsd/nfs4state.c -+++ b/fs/nfsd/nfs4state.c -@@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, - unsigned int strhashval; - int err; - -+ pax_track_stack(); -+ - dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", - (long long) lock->lk_offset, - (long long) lock->lk_length); -diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c -index f810996..cec8977 100644 ---- a/fs/nfsd/nfs4xdr.c -+++ b/fs/nfsd/nfs4xdr.c -@@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, - .dentry = dentry, - }; - -+ pax_track_stack(); -+ - BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1); - BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion)); - BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion)); -diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c -index acf88ae..4fd6245 100644 ---- a/fs/nfsd/vfs.c -+++ b/fs/nfsd/vfs.c -@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, - } else { - oldfs = get_fs(); - set_fs(KERNEL_DS); -- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset); -+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset); - set_fs(oldfs); - } - -@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, - - /* Write the data. */ - oldfs = get_fs(); set_fs(KERNEL_DS); -- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); -+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset); - set_fs(oldfs); - if (host_err < 0) - goto out_nfserr; -@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) - */ - - oldfs = get_fs(); set_fs(KERNEL_DS); -- host_err = inode->i_op->readlink(dentry, buf, *lenp); -+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp); - set_fs(oldfs); - - if (host_err < 0) -diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c -index 9fde1c0..14e8827 100644 ---- a/fs/notify/fanotify/fanotify_user.c -+++ b/fs/notify/fanotify/fanotify_user.c -@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, - goto out_close_fd; - - ret = -EFAULT; -- if (copy_to_user(buf, &fanotify_event_metadata, -+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata || -+ copy_to_user(buf, &fanotify_event_metadata, - fanotify_event_metadata.event_len)) - goto out_kill_access_response; - -diff --git a/fs/notify/notification.c b/fs/notify/notification.c -index ee18815..7aa5d01 100644 ---- a/fs/notify/notification.c -+++ b/fs/notify/notification.c -@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep; - * get set to 0 so it will never get 'freed' - */ - static struct fsnotify_event *q_overflow_event; --static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); -+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0); - - /** - * fsnotify_get_cookie - return a unique cookie for use in synchronizing events. -@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); - */ - u32 fsnotify_get_cookie(void) - { -- return atomic_inc_return(&fsnotify_sync_cookie); -+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie); - } - EXPORT_SYMBOL_GPL(fsnotify_get_cookie); - -diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c -index 99e3610..02c1068 100644 ---- a/fs/ntfs/dir.c -+++ b/fs/ntfs/dir.c -@@ -1329,7 +1329,7 @@ find_next_index_buffer: - ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK & - ~(s64)(ndir->itype.index.block_size - 1))); - /* Bounds checks. */ -- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { -+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { - ntfs_error(sb, "Out of bounds check failed. Corrupt directory " - "inode 0x%lx or driver bug.", vdir->i_ino); - goto err_out; -diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c -index c587e2d..3641eaa 100644 ---- a/fs/ntfs/file.c -+++ b/fs/ntfs/file.c -@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = { - #endif /* NTFS_RW */ - }; - --const struct file_operations ntfs_empty_file_ops = {}; -+const struct file_operations ntfs_empty_file_ops __read_only; - --const struct inode_operations ntfs_empty_inode_ops = {}; -+const struct inode_operations ntfs_empty_inode_ops __read_only; -diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c -index 210c352..a174f83 100644 ---- a/fs/ocfs2/localalloc.c -+++ b/fs/ocfs2/localalloc.c -@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, - goto bail; - } - -- atomic_inc(&osb->alloc_stats.moves); -+ atomic_inc_unchecked(&osb->alloc_stats.moves); - - bail: - if (handle) -diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c -index 53aa41e..d7df9f1 100644 ---- a/fs/ocfs2/namei.c -+++ b/fs/ocfs2/namei.c -@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *old_dir, - struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; - struct ocfs2_dir_lookup_result target_insert = { NULL, }; - -+ pax_track_stack(); -+ - /* At some point it might be nice to break this function up a - * bit. */ - -diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h -index 4092858..51c70ff 100644 ---- a/fs/ocfs2/ocfs2.h -+++ b/fs/ocfs2/ocfs2.h -@@ -235,11 +235,11 @@ enum ocfs2_vol_state - - struct ocfs2_alloc_stats - { -- atomic_t moves; -- atomic_t local_data; -- atomic_t bitmap_data; -- atomic_t bg_allocs; -- atomic_t bg_extends; -+ atomic_unchecked_t moves; -+ atomic_unchecked_t local_data; -+ atomic_unchecked_t bitmap_data; -+ atomic_unchecked_t bg_allocs; -+ atomic_unchecked_t bg_extends; - }; - - enum ocfs2_local_alloc_state -diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c -index ba5d97e..c77db25 100644 ---- a/fs/ocfs2/suballoc.c -+++ b/fs/ocfs2/suballoc.c -@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, - mlog_errno(status); - goto bail; - } -- atomic_inc(&osb->alloc_stats.bg_extends); -+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends); - - /* You should never ask for this much metadata */ - BUG_ON(bits_wanted > -@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle, - mlog_errno(status); - goto bail; - } -- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); -+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); - - *suballoc_loc = res.sr_bg_blkno; - *suballoc_bit_start = res.sr_bit_offset; -@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle, - trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno, - res->sr_bits); - -- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); -+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); - - BUG_ON(res->sr_bits != 1); - -@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle, - mlog_errno(status); - goto bail; - } -- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); -+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); - - BUG_ON(res.sr_bits != 1); - -@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle, - cluster_start, - num_clusters); - if (!status) -- atomic_inc(&osb->alloc_stats.local_data); -+ atomic_inc_unchecked(&osb->alloc_stats.local_data); - } else { - if (min_clusters > (osb->bitmap_cpg - 1)) { - /* The only paths asking for contiguousness -@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle, - ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode, - res.sr_bg_blkno, - res.sr_bit_offset); -- atomic_inc(&osb->alloc_stats.bitmap_data); -+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data); - *num_clusters = res.sr_bits; - } - } -diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c -index 56f6102..1433c29 100644 ---- a/fs/ocfs2/super.c -+++ b/fs/ocfs2/super.c -@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len) - "%10s => GlobalAllocs: %d LocalAllocs: %d " - "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n", - "Stats", -- atomic_read(&osb->alloc_stats.bitmap_data), -- atomic_read(&osb->alloc_stats.local_data), -- atomic_read(&osb->alloc_stats.bg_allocs), -- atomic_read(&osb->alloc_stats.moves), -- atomic_read(&osb->alloc_stats.bg_extends)); -+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data), -+ atomic_read_unchecked(&osb->alloc_stats.local_data), -+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs), -+ atomic_read_unchecked(&osb->alloc_stats.moves), -+ atomic_read_unchecked(&osb->alloc_stats.bg_extends)); - - out += snprintf(buf + out, len - out, - "%10s => State: %u Descriptor: %llu Size: %u bits " -@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct super_block *sb, - spin_lock_init(&osb->osb_xattr_lock); - ocfs2_init_steal_slots(osb); - -- atomic_set(&osb->alloc_stats.moves, 0); -- atomic_set(&osb->alloc_stats.local_data, 0); -- atomic_set(&osb->alloc_stats.bitmap_data, 0); -- atomic_set(&osb->alloc_stats.bg_allocs, 0); -- atomic_set(&osb->alloc_stats.bg_extends, 0); -+ atomic_set_unchecked(&osb->alloc_stats.moves, 0); -+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0); -+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0); -+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0); -+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0); - - /* Copy the blockcheck stats from the superblock probe */ - osb->osb_ecc_stats = *stats; -diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c -index 5d22872..523db20 100644 ---- a/fs/ocfs2/symlink.c -+++ b/fs/ocfs2/symlink.c -@@ -142,7 +142,7 @@ bail: - - static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) - { -- char *link = nd_get_link(nd); -+ const char *link = nd_get_link(nd); - if (!IS_ERR(link)) - kfree(link); - } -diff --git a/fs/open.c b/fs/open.c -index f711921..28d5958 100644 ---- a/fs/open.c -+++ b/fs/open.c -@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) - error = locks_verify_truncate(inode, NULL, length); - if (!error) - error = security_path_truncate(&path); -+ -+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt)) -+ error = -EACCES; -+ - if (!error) - error = do_truncate(path.dentry, length, 0, NULL); - -@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode) - if (__mnt_is_readonly(path.mnt)) - res = -EROFS; - -+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode)) -+ res = -EACCES; -+ - out_path_release: - path_put(&path); - out: -@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename) - if (error) - goto dput_and_out; - -+ gr_log_chdir(path.dentry, path.mnt); -+ - set_fs_pwd(current->fs, &path); - - dput_and_out: -@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd) - goto out_putf; - - error = inode_permission(inode, MAY_EXEC | MAY_CHDIR); -+ -+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt)) -+ error = -EPERM; -+ -+ if (!error) -+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt); -+ - if (!error) - set_fs_pwd(current->fs, &file->f_path); - out_putf: -@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename) - if (error) - goto dput_and_out; - -+ if (gr_handle_chroot_chroot(path.dentry, path.mnt)) -+ goto dput_and_out; -+ - set_fs_root(current->fs, &path); -+ -+ gr_handle_chroot_chdir(&path); -+ - error = 0; - dput_and_out: - path_put(&path); -@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode) - if (error) - return error; - mutex_lock(&inode->i_mutex); -+ -+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) { -+ error = -EACCES; -+ goto out_unlock; -+ } -+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) { -+ error = -EACCES; -+ goto out_unlock; -+ } -+ - error = security_path_chmod(path->dentry, path->mnt, mode); - if (error) - goto out_unlock; -@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group) - int error; - struct iattr newattrs; - -+ if (!gr_acl_handle_chown(path->dentry, path->mnt)) -+ return -EACCES; -+ - newattrs.ia_valid = ATTR_CTIME; - if (user != (uid_t) -1) { - newattrs.ia_valid |= ATTR_UID; -diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c -index 6296b40..417c00f 100644 ---- a/fs/partitions/efi.c -+++ b/fs/partitions/efi.c -@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state, - if (!gpt) - return NULL; - -+ if (!le32_to_cpu(gpt->num_partition_entries)) -+ return NULL; -+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL); -+ if (!pte) -+ return NULL; -+ - count = le32_to_cpu(gpt->num_partition_entries) * - le32_to_cpu(gpt->sizeof_partition_entry); -- if (!count) -- return NULL; -- pte = kzalloc(count, GFP_KERNEL); -- if (!pte) -- return NULL; -- - if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba), - (u8 *) pte, - count) < count) { -diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c -index af9fdf0..75b15c3 100644 ---- a/fs/partitions/ldm.c -+++ b/fs/partitions/ldm.c -@@ -1322,7 +1322,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) - goto found; - } - -- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL); -+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL); - if (!f) { - ldm_crit ("Out of memory."); - return false; -diff --git a/fs/pipe.c b/fs/pipe.c -index 0e0be1d..f62a72d 100644 ---- a/fs/pipe.c -+++ b/fs/pipe.c -@@ -420,9 +420,9 @@ redo: - } - if (bufs) /* More to do? */ - continue; -- if (!pipe->writers) -+ if (!atomic_read(&pipe->writers)) - break; -- if (!pipe->waiting_writers) { -+ if (!atomic_read(&pipe->waiting_writers)) { - /* syscall merging: Usually we must not sleep - * if O_NONBLOCK is set, or if we got some data. - * But if a writer sleeps in kernel space, then -@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, - mutex_lock(&inode->i_mutex); - pipe = inode->i_pipe; - -- if (!pipe->readers) { -+ if (!atomic_read(&pipe->readers)) { - send_sig(SIGPIPE, current, 0); - ret = -EPIPE; - goto out; -@@ -530,7 +530,7 @@ redo1: - for (;;) { - int bufs; - -- if (!pipe->readers) { -+ if (!atomic_read(&pipe->readers)) { - send_sig(SIGPIPE, current, 0); - if (!ret) - ret = -EPIPE; -@@ -616,9 +616,9 @@ redo2: - kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); - do_wakeup = 0; - } -- pipe->waiting_writers++; -+ atomic_inc(&pipe->waiting_writers); - pipe_wait(pipe); -- pipe->waiting_writers--; -+ atomic_dec(&pipe->waiting_writers); - } - out: - mutex_unlock(&inode->i_mutex); -@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait) - mask = 0; - if (filp->f_mode & FMODE_READ) { - mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; -- if (!pipe->writers && filp->f_version != pipe->w_counter) -+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter) - mask |= POLLHUP; - } - -@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait) - * Most Unices do not set POLLERR for FIFOs but on Linux they - * behave exactly like pipes for poll(). - */ -- if (!pipe->readers) -+ if (!atomic_read(&pipe->readers)) - mask |= POLLERR; - } - -@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw) - - mutex_lock(&inode->i_mutex); - pipe = inode->i_pipe; -- pipe->readers -= decr; -- pipe->writers -= decw; -+ atomic_sub(decr, &pipe->readers); -+ atomic_sub(decw, &pipe->writers); - -- if (!pipe->readers && !pipe->writers) { -+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) { - free_pipe_info(inode); - } else { - wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP); -@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp) - - if (inode->i_pipe) { - ret = 0; -- inode->i_pipe->readers++; -+ atomic_inc(&inode->i_pipe->readers); - } - - mutex_unlock(&inode->i_mutex); -@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp) - - if (inode->i_pipe) { - ret = 0; -- inode->i_pipe->writers++; -+ atomic_inc(&inode->i_pipe->writers); - } - - mutex_unlock(&inode->i_mutex); -@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp) - if (inode->i_pipe) { - ret = 0; - if (filp->f_mode & FMODE_READ) -- inode->i_pipe->readers++; -+ atomic_inc(&inode->i_pipe->readers); - if (filp->f_mode & FMODE_WRITE) -- inode->i_pipe->writers++; -+ atomic_inc(&inode->i_pipe->writers); - } - - mutex_unlock(&inode->i_mutex); -@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode) - inode->i_pipe = NULL; - } - --static struct vfsmount *pipe_mnt __read_mostly; -+struct vfsmount *pipe_mnt __read_mostly; - - /* - * pipefs_dname() is called from d_path(). -@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void) - goto fail_iput; - inode->i_pipe = pipe; - -- pipe->readers = pipe->writers = 1; -+ atomic_set(&pipe->readers, 1); -+ atomic_set(&pipe->writers, 1); - inode->i_fop = &rdwr_pipefifo_fops; - - /* -diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig -index 15af622..0e9f4467 100644 ---- a/fs/proc/Kconfig -+++ b/fs/proc/Kconfig -@@ -30,12 +30,12 @@ config PROC_FS - - config PROC_KCORE - bool "/proc/kcore support" if !ARM -- depends on PROC_FS && MMU -+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD - - config PROC_VMCORE - bool "/proc/vmcore support" -- depends on PROC_FS && CRASH_DUMP -- default y -+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC -+ default n - help - Exports the dump image of crashed kernel in ELF format. - -@@ -59,8 +59,8 @@ config PROC_SYSCTL - limited in memory. - - config PROC_PAGE_MONITOR -- default y -- depends on PROC_FS && MMU -+ default n -+ depends on PROC_FS && MMU && !GRKERNSEC - bool "Enable /proc page monitoring" if EXPERT - help - Various /proc files exist to monitor process memory utilization: -diff --git a/fs/proc/array.c b/fs/proc/array.c -index 3a1dafd..c7fed72 100644 ---- a/fs/proc/array.c -+++ b/fs/proc/array.c -@@ -60,6 +60,7 @@ - #include <linux/tty.h> - #include <linux/string.h> - #include <linux/mman.h> -+#include <linux/grsecurity.h> - #include <linux/proc_fs.h> - #include <linux/ioport.h> - #include <linux/uaccess.h> -@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) - seq_putc(m, '\n'); - } - -+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) -+static inline void task_pax(struct seq_file *m, struct task_struct *p) -+{ -+ if (p->mm) -+ seq_printf(m, "PaX:\t%c%c%c%c%c\n", -+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p', -+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e', -+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm', -+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r', -+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's'); -+ else -+ seq_printf(m, "PaX:\t-----\n"); -+} -+#endif -+ - int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, - struct pid *pid, struct task_struct *task) - { -@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, - task_cpus_allowed(m, task); - cpuset_task_status_allowed(m, task); - task_context_switch_counts(m, task); -+ -+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) -+ task_pax(m, task); -+#endif -+ -+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) -+ task_grsec_rbac(m, task); -+#endif -+ - return 0; - } - -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ -+ (_mm->pax_flags & MF_PAX_RANDMMAP || \ -+ _mm->pax_flags & MF_PAX_SEGMEXEC)) -+#endif -+ - static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, - struct pid *pid, struct task_struct *task, int whole) - { -@@ -378,6 +409,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, - char tcomm[sizeof(task->comm)]; - unsigned long flags; - -+ pax_track_stack(); -+ - state = *get_task_state(task); - vsize = eip = esp = 0; - permitted = ptrace_may_access(task, PTRACE_MODE_READ); -@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, - gtime = task->gtime; - } - -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ if (PAX_RAND_FLAGS(mm)) { -+ eip = 0; -+ esp = 0; -+ wchan = 0; -+ } -+#endif -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ wchan = 0; -+ eip =0; -+ esp =0; -+#endif -+ - /* scale priority and nice values from timeslices to -20..20 */ - /* to make it look like a "normal" Unix priority/nice value */ - priority = task_prio(task); -@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, - vsize, - mm ? get_mm_rss(mm) : 0, - rsslim, -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0), -+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0), -+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0), -+#else - mm ? (permitted ? mm->start_code : 1) : 0, - mm ? (permitted ? mm->end_code : 1) : 0, - (permitted && mm) ? mm->start_stack : 0, -+#endif - esp, - eip, - /* The signal information here is obsolete. -@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, - - return 0; - } -+ -+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR -+int proc_pid_ipaddr(struct task_struct *task, char *buffer) -+{ -+ u32 curr_ip = 0; -+ unsigned long flags; -+ -+ if (lock_task_sighand(task, &flags)) { -+ curr_ip = task->signal->curr_ip; -+ unlock_task_sighand(task, &flags); -+ } -+ -+ return sprintf(buffer, "%pI4\n", &curr_ip); -+} -+#endif -diff --git a/fs/proc/base.c b/fs/proc/base.c -index 5eb0206..fe01db4 100644 ---- a/fs/proc/base.c -+++ b/fs/proc/base.c -@@ -107,6 +107,22 @@ struct pid_entry { - union proc_op op; - }; - -+struct getdents_callback { -+ struct linux_dirent __user * current_dir; -+ struct linux_dirent __user * previous; -+ struct file * file; -+ int count; -+ int error; -+}; -+ -+static int gr_fake_filldir(void * __buf, const char *name, int namlen, -+ loff_t offset, u64 ino, unsigned int d_type) -+{ -+ struct getdents_callback * buf = (struct getdents_callback *) __buf; -+ buf->error = -EINVAL; -+ return 0; -+} -+ - #define NOD(NAME, MODE, IOP, FOP, OP) { \ - .name = (NAME), \ - .len = sizeof(NAME) - 1, \ -@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_permission(struct task_struct *task) - if (task == current) - return mm; - -+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task)) -+ return ERR_PTR(-EPERM); -+ - /* - * If current is actively ptrace'ing, and would also be - * permitted to freshly attach with ptrace now, permit it. -@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer) - if (!mm->arg_end) - goto out_mm; /* Shh! No looking before we're done */ - -+ if (gr_acl_handle_procpidmem(task)) -+ goto out_mm; -+ - len = mm->arg_end - mm->arg_start; - - if (len > PAGE_SIZE) -@@ -309,12 +331,28 @@ out: - return res; - } - -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ -+ (_mm->pax_flags & MF_PAX_RANDMMAP || \ -+ _mm->pax_flags & MF_PAX_SEGMEXEC)) -+#endif -+ - static int proc_pid_auxv(struct task_struct *task, char *buffer) - { - struct mm_struct *mm = mm_for_maps(task); - int res = PTR_ERR(mm); - if (mm && !IS_ERR(mm)) { - unsigned int nwords = 0; -+ -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ /* allow if we're currently ptracing this task */ -+ if (PAX_RAND_FLAGS(mm) && -+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) { -+ mmput(mm); -+ return 0; -+ } -+#endif -+ - do { - nwords += 2; - } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ -@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer) - } - - --#ifdef CONFIG_KALLSYMS -+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) - /* - * Provides a wchan file via kallsyms in a proper one-value-per-file format. - * Returns the resolved symbol. If that fails, simply return the address. -@@ -367,7 +405,7 @@ static void unlock_trace(struct task_struct *task) - mutex_unlock(&task->signal->cred_guard_mutex); - } - --#ifdef CONFIG_STACKTRACE -+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) - - #define MAX_STACK_TRACE_DEPTH 64 - -@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer) - return count; - } - --#ifdef CONFIG_HAVE_ARCH_TRACEHOOK -+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) - static int proc_pid_syscall(struct task_struct *task, char *buffer) - { - long nr; -@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer) - /************************************************************************/ - - /* permission checks */ --static int proc_fd_access_allowed(struct inode *inode) -+static int proc_fd_access_allowed(struct inode *inode, unsigned int log) - { - struct task_struct *task; - int allowed = 0; -@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct inode *inode) - */ - task = get_proc_task(inode); - if (task) { -- allowed = ptrace_may_access(task, PTRACE_MODE_READ); -+ if (log) -+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ); -+ else -+ allowed = ptrace_may_access(task, PTRACE_MODE_READ); - put_task_struct(task); - } - return allowed; -@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file *file, char __user *buf, - if (!task) - goto out_no_task; - -+ if (gr_acl_handle_procpidmem(task)) -+ goto out; -+ - ret = -ENOMEM; - page = (char *)__get_free_page(GFP_TEMPORARY); - if (!page) -@@ -1613,7 +1657,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) - path_put(&nd->path); - - /* Are we allowed to snoop on the tasks file descriptors? */ -- if (!proc_fd_access_allowed(inode)) -+ if (!proc_fd_access_allowed(inode,0)) - goto out; - - error = PROC_I(inode)->op.proc_get_link(inode, &nd->path); -@@ -1652,8 +1696,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b - struct path path; - - /* Are we allowed to snoop on the tasks file descriptors? */ -- if (!proc_fd_access_allowed(inode)) -- goto out; -+ /* logging this is needed for learning on chromium to work properly, -+ but we don't want to flood the logs from 'ps' which does a readlink -+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn -+ CAP_SYS_PTRACE as it's not necessary for its basic functionality -+ */ -+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') { -+ if (!proc_fd_access_allowed(inode,0)) -+ goto out; -+ } else { -+ if (!proc_fd_access_allowed(inode,1)) -+ goto out; -+ } - - error = PROC_I(inode)->op.proc_get_link(inode, &path); - if (error) -@@ -1718,7 +1772,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t - rcu_read_lock(); - cred = __task_cred(task); - inode->i_uid = cred->euid; -+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP -+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; -+#else - inode->i_gid = cred->egid; -+#endif - rcu_read_unlock(); - } - security_task_to_inode(task, inode); -@@ -1736,6 +1794,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) - struct inode *inode = dentry->d_inode; - struct task_struct *task; - const struct cred *cred; -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ const struct cred *tmpcred = current_cred(); -+#endif - - generic_fillattr(inode, stat); - -@@ -1743,13 +1804,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) - stat->uid = 0; - stat->gid = 0; - task = pid_task(proc_pid(inode), PIDTYPE_PID); -+ -+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) { -+ rcu_read_unlock(); -+ return -ENOENT; -+ } -+ - if (task) { -+ cred = __task_cred(task); -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ if (!tmpcred->uid || (tmpcred->uid == cred->uid) -+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP -+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID) -+#endif -+ ) { -+#endif - if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || -+#ifdef CONFIG_GRKERNSEC_PROC_USER -+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || -+#endif - task_dumpable(task)) { -- cred = __task_cred(task); - stat->uid = cred->euid; -+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP -+ stat->gid = CONFIG_GRKERNSEC_PROC_GID; -+#else - stat->gid = cred->egid; -+#endif - } -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ } else { -+ rcu_read_unlock(); -+ return -ENOENT; -+ } -+#endif - } - rcu_read_unlock(); - return 0; -@@ -1786,11 +1875,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd) - - if (task) { - if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || -+#ifdef CONFIG_GRKERNSEC_PROC_USER -+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || -+#endif - task_dumpable(task)) { - rcu_read_lock(); - cred = __task_cred(task); - inode->i_uid = cred->euid; -+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP -+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; -+#else - inode->i_gid = cred->egid; -+#endif - rcu_read_unlock(); - } else { - inode->i_uid = 0; -@@ -1908,7 +2006,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info) - int fd = proc_fd(inode); - - if (task) { -- files = get_files_struct(task); -+ if (!gr_acl_handle_procpidmem(task)) -+ files = get_files_struct(task); - put_task_struct(task); - } - if (files) { -@@ -2176,11 +2275,21 @@ static const struct file_operations proc_fd_operations = { - */ - static int proc_fd_permission(struct inode *inode, int mask) - { -+ struct task_struct *task; - int rv = generic_permission(inode, mask); -- if (rv == 0) -- return 0; -+ - if (task_pid(current) == proc_pid(inode)) - rv = 0; -+ -+ task = get_proc_task(inode); -+ if (task == NULL) -+ return rv; -+ -+ if (gr_acl_handle_procpidmem(task)) -+ rv = -EACCES; -+ -+ put_task_struct(task); -+ - return rv; - } - -@@ -2290,6 +2399,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir, - if (!task) - goto out_no_task; - -+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) -+ goto out; -+ - /* - * Yes, it does not scale. And it should not. Don't add - * new entries into /proc/<tgid>/ without very good reasons. -@@ -2334,6 +2446,9 @@ static int proc_pident_readdir(struct file *filp, - if (!task) - goto out_no_task; - -+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) -+ goto out; -+ - ret = 0; - i = filp->f_pos; - switch (i) { -@@ -2604,7 +2719,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) - static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd, - void *cookie) - { -- char *s = nd_get_link(nd); -+ const char *s = nd_get_link(nd); - if (!IS_ERR(s)) - __putname(s); - } -@@ -2802,7 +2917,7 @@ static const struct pid_entry tgid_base_stuff[] = { - REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations), - #endif - REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), --#ifdef CONFIG_HAVE_ARCH_TRACEHOOK -+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) - INF("syscall", S_IRUGO, proc_pid_syscall), - #endif - INF("cmdline", S_IRUGO, proc_pid_cmdline), -@@ -2827,10 +2942,10 @@ static const struct pid_entry tgid_base_stuff[] = { - #ifdef CONFIG_SECURITY - DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), - #endif --#ifdef CONFIG_KALLSYMS -+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) - INF("wchan", S_IRUGO, proc_pid_wchan), - #endif --#ifdef CONFIG_STACKTRACE -+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) - ONE("stack", S_IRUGO, proc_pid_stack), - #endif - #ifdef CONFIG_SCHEDSTATS -@@ -2864,6 +2979,9 @@ static const struct pid_entry tgid_base_stuff[] = { - #ifdef CONFIG_HARDWALL - INF("hardwall", S_IRUGO, proc_pid_hardwall), - #endif -+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR -+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr), -+#endif - }; - - static int proc_tgid_base_readdir(struct file * filp, -@@ -2989,7 +3107,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir, - if (!inode) - goto out; - -+#ifdef CONFIG_GRKERNSEC_PROC_USER -+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR; -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; -+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP; -+#else - inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; -+#endif - inode->i_op = &proc_tgid_base_inode_operations; - inode->i_fop = &proc_tgid_base_operations; - inode->i_flags|=S_IMMUTABLE; -@@ -3031,7 +3156,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct - if (!task) - goto out; - -+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) -+ goto out_put_task; -+ - result = proc_pid_instantiate(dir, dentry, task, NULL); -+out_put_task: - put_task_struct(task); - out: - return result; -@@ -3096,6 +3225,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) - { - unsigned int nr; - struct task_struct *reaper; -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ const struct cred *tmpcred = current_cred(); -+ const struct cred *itercred; -+#endif -+ filldir_t __filldir = filldir; - struct tgid_iter iter; - struct pid_namespace *ns; - -@@ -3119,8 +3253,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) - for (iter = next_tgid(ns, iter); - iter.task; - iter.tgid += 1, iter = next_tgid(ns, iter)) { -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ rcu_read_lock(); -+ itercred = __task_cred(iter.task); -+#endif -+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task) -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ || (tmpcred->uid && (itercred->uid != tmpcred->uid) -+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP -+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID) -+#endif -+ ) -+#endif -+ ) -+ __filldir = &gr_fake_filldir; -+ else -+ __filldir = filldir; -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ rcu_read_unlock(); -+#endif - filp->f_pos = iter.tgid + TGID_OFFSET; -- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) { -+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) { - put_task_struct(iter.task); - goto out; - } -@@ -3148,7 +3301,7 @@ static const struct pid_entry tid_base_stuff[] = { - REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), - #endif - REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), --#ifdef CONFIG_HAVE_ARCH_TRACEHOOK -+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) - INF("syscall", S_IRUGO, proc_pid_syscall), - #endif - INF("cmdline", S_IRUGO, proc_pid_cmdline), -@@ -3172,10 +3325,10 @@ static const struct pid_entry tid_base_stuff[] = { - #ifdef CONFIG_SECURITY - DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), - #endif --#ifdef CONFIG_KALLSYMS -+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) - INF("wchan", S_IRUGO, proc_pid_wchan), - #endif --#ifdef CONFIG_STACKTRACE -+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) - ONE("stack", S_IRUGO, proc_pid_stack), - #endif - #ifdef CONFIG_SCHEDSTATS -diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c -index 82676e3..5f8518a 100644 ---- a/fs/proc/cmdline.c -+++ b/fs/proc/cmdline.c -@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = { - - static int __init proc_cmdline_init(void) - { -+#ifdef CONFIG_GRKERNSEC_PROC_ADD -+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops); -+#else - proc_create("cmdline", 0, NULL, &cmdline_proc_fops); -+#endif - return 0; - } - module_init(proc_cmdline_init); -diff --git a/fs/proc/devices.c b/fs/proc/devices.c -index b143471..bb105e5 100644 ---- a/fs/proc/devices.c -+++ b/fs/proc/devices.c -@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = { - - static int __init proc_devices_init(void) - { -+#ifdef CONFIG_GRKERNSEC_PROC_ADD -+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations); -+#else - proc_create("devices", 0, NULL, &proc_devinfo_operations); -+#endif - return 0; - } - module_init(proc_devices_init); -diff --git a/fs/proc/inode.c b/fs/proc/inode.c -index 7ed72d6..d5f061a 100644 ---- a/fs/proc/inode.c -+++ b/fs/proc/inode.c -@@ -18,12 +18,18 @@ - #include <linux/module.h> - #include <linux/sysctl.h> - #include <linux/slab.h> -+#include <linux/grsecurity.h> - - #include <asm/system.h> - #include <asm/uaccess.h> - - #include "internal.h" - -+#ifdef CONFIG_PROC_SYSCTL -+extern const struct inode_operations proc_sys_inode_operations; -+extern const struct inode_operations proc_sys_dir_operations; -+#endif -+ - static void proc_evict_inode(struct inode *inode) - { - struct proc_dir_entry *de; -@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode) - ns_ops = PROC_I(inode)->ns_ops; - if (ns_ops && ns_ops->put) - ns_ops->put(PROC_I(inode)->ns); -+ -+#ifdef CONFIG_PROC_SYSCTL -+ if (inode->i_op == &proc_sys_inode_operations || -+ inode->i_op == &proc_sys_dir_operations) -+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev); -+#endif -+ - } - - static struct kmem_cache * proc_inode_cachep; -@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) - if (de->mode) { - inode->i_mode = de->mode; - inode->i_uid = de->uid; -+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP -+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; -+#else - inode->i_gid = de->gid; -+#endif - } - if (de->size) - inode->i_size = de->size; -diff --git a/fs/proc/internal.h b/fs/proc/internal.h -index 7838e5c..ff92cbc 100644 ---- a/fs/proc/internal.h -+++ b/fs/proc/internal.h -@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, - struct pid *pid, struct task_struct *task); - extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, - struct pid *pid, struct task_struct *task); -+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR -+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer); -+#endif - extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); - - extern const struct file_operations proc_maps_operations; -diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c -index d245cb2..7e645bd 100644 ---- a/fs/proc/kcore.c -+++ b/fs/proc/kcore.c -@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) - off_t offset = 0; - struct kcore_list *m; - -+ pax_track_stack(); -+ - /* setup ELF header */ - elf = (struct elfhdr *) bufp; - bufp += sizeof(struct elfhdr); -@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) - * the addresses in the elf_phdr on our list. - */ - start = kc_offset_to_vaddr(*fpos - elf_buflen); -- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) -+ tsz = PAGE_SIZE - (start & ~PAGE_MASK); -+ if (tsz > buflen) - tsz = buflen; -- -+ - while (buflen) { - struct kcore_list *m; - -@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) - kfree(elf_buf); - } else { - if (kern_addr_valid(start)) { -- unsigned long n; -+ char *elf_buf; -+ mm_segment_t oldfs; - -- n = copy_to_user(buffer, (char *)start, tsz); -- /* -- * We cannot distingush between fault on source -- * and fault on destination. When this happens -- * we clear too and hope it will trigger the -- * EFAULT again. -- */ -- if (n) { -- if (clear_user(buffer + tsz - n, -- n)) -+ elf_buf = kmalloc(tsz, GFP_KERNEL); -+ if (!elf_buf) -+ return -ENOMEM; -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) { -+ set_fs(oldfs); -+ if (copy_to_user(buffer, elf_buf, tsz)) { -+ kfree(elf_buf); - return -EFAULT; -+ } - } -+ set_fs(oldfs); -+ kfree(elf_buf); - } else { - if (clear_user(buffer, tsz)) - return -EFAULT; -@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) - - static int open_kcore(struct inode *inode, struct file *filp) - { -+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM) -+ return -EPERM; -+#endif - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - if (kcore_need_update) -diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c -index 5861741..32c53bc 100644 ---- a/fs/proc/meminfo.c -+++ b/fs/proc/meminfo.c -@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) - unsigned long pages[NR_LRU_LISTS]; - int lru; - -+ pax_track_stack(); -+ - /* - * display in kilobytes. - */ -@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) - vmi.used >> 10, - vmi.largest_chunk >> 10 - #ifdef CONFIG_MEMORY_FAILURE -- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) -+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10) - #endif - #ifdef CONFIG_TRANSPARENT_HUGEPAGE - ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * -diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c -index b1822dd..df622cb 100644 ---- a/fs/proc/nommu.c -+++ b/fs/proc/nommu.c -@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region) - if (len < 1) - len = 1; - seq_printf(m, "%*c", len, ' '); -- seq_path(m, &file->f_path, ""); -+ seq_path(m, &file->f_path, "\n\"); - } - - seq_putc(m, '\n'); -diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c -index f738024..876984a 100644 ---- a/fs/proc/proc_net.c -+++ b/fs/proc/proc_net.c -@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir) - struct task_struct *task; - struct nsproxy *ns; - struct net *net = NULL; -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ const struct cred *cred = current_cred(); -+#endif -+ -+#ifdef CONFIG_GRKERNSEC_PROC_USER -+ if (cred->fsuid) -+ return net; -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)) -+ return net; -+#endif - - rcu_read_lock(); - task = pid_task(proc_pid(dir), PIDTYPE_PID); -diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c -index 1a77dbe..56ec911 100644 ---- a/fs/proc/proc_sysctl.c -+++ b/fs/proc/proc_sysctl.c -@@ -8,11 +8,13 @@ - #include <linux/namei.h> - #include "internal.h" - -+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op); -+ - static const struct dentry_operations proc_sys_dentry_operations; - static const struct file_operations proc_sys_file_operations; --static const struct inode_operations proc_sys_inode_operations; -+const struct inode_operations proc_sys_inode_operations; - static const struct file_operations proc_sys_dir_file_operations; --static const struct inode_operations proc_sys_dir_operations; -+const struct inode_operations proc_sys_dir_operations; - - static struct inode *proc_sys_make_inode(struct super_block *sb, - struct ctl_table_header *head, struct ctl_table *table) -@@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, - - err = NULL; - d_set_d_op(dentry, &proc_sys_dentry_operations); -+ -+ gr_handle_proc_create(dentry, inode); -+ - d_add(dentry, inode); - -+ if (gr_handle_sysctl(p, MAY_EXEC)) -+ err = ERR_PTR(-ENOENT); -+ - out: - sysctl_head_finish(head); - return err; -@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent, - return -ENOMEM; - } else { - d_set_d_op(child, &proc_sys_dentry_operations); -+ -+ gr_handle_proc_create(child, inode); -+ - d_add(child, inode); - } - } else { -@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table, - if (*pos < file->f_pos) - continue; - -+ if (gr_handle_sysctl(table, 0)) -+ continue; -+ - res = proc_sys_fill_cache(file, dirent, filldir, head, table); - if (res) - return res; -@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct - if (IS_ERR(head)) - return PTR_ERR(head); - -+ if (table && gr_handle_sysctl(table, MAY_EXEC)) -+ return -ENOENT; -+ - generic_fillattr(inode, stat); - if (table) - stat->mode = (stat->mode & S_IFMT) | table->mode; -@@ -370,17 +387,18 @@ static const struct file_operations proc_sys_file_operations = { - }; - - static const struct file_operations proc_sys_dir_file_operations = { -+ .read = generic_read_dir, - .readdir = proc_sys_readdir, - .llseek = generic_file_llseek, - }; - --static const struct inode_operations proc_sys_inode_operations = { -+const struct inode_operations proc_sys_inode_operations = { - .permission = proc_sys_permission, - .setattr = proc_sys_setattr, - .getattr = proc_sys_getattr, - }; - --static const struct inode_operations proc_sys_dir_operations = { -+const struct inode_operations proc_sys_dir_operations = { - .lookup = proc_sys_lookup, - .permission = proc_sys_permission, - .setattr = proc_sys_setattr, -diff --git a/fs/proc/root.c b/fs/proc/root.c -index 9a8a2b7..3018df6 100644 ---- a/fs/proc/root.c -+++ b/fs/proc/root.c -@@ -123,7 +123,15 @@ void __init proc_root_init(void) - #ifdef CONFIG_PROC_DEVICETREE - proc_device_tree_init(); - #endif -+#ifdef CONFIG_GRKERNSEC_PROC_ADD -+#ifdef CONFIG_GRKERNSEC_PROC_USER -+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL); -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); -+#endif -+#else - proc_mkdir("bus", NULL); -+#endif - proc_sys_init(); - } - -diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c -index c7d4ee6..41c5564 100644 ---- a/fs/proc/task_mmu.c -+++ b/fs/proc/task_mmu.c -@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) - "VmExe:\t%8lu kB\n" - "VmLib:\t%8lu kB\n" - "VmPTE:\t%8lu kB\n" -- "VmSwap:\t%8lu kB\n", -- hiwater_vm << (PAGE_SHIFT-10), -+ "VmSwap:\t%8lu kB\n" -+ -+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT -+ "CsBase:\t%8lx\nCsLim:\t%8lx\n" -+#endif -+ -+ ,hiwater_vm << (PAGE_SHIFT-10), - (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), - mm->locked_vm << (PAGE_SHIFT-10), - hiwater_rss << (PAGE_SHIFT-10), -@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) - data << (PAGE_SHIFT-10), - mm->stack_vm << (PAGE_SHIFT-10), text, lib, - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10, -- swap << (PAGE_SHIFT-10)); -+ swap << (PAGE_SHIFT-10) -+ -+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT -+ , mm->context.user_cs_base, mm->context.user_cs_limit -+#endif -+ -+ ); - } - - unsigned long task_vsize(struct mm_struct *mm) -@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *inode, struct file *file, - return ret; - } - -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ -+ (_mm->pax_flags & MF_PAX_RANDMMAP || \ -+ _mm->pax_flags & MF_PAX_SEGMEXEC)) -+#endif -+ - static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) - { - struct mm_struct *mm = vma->vm_mm; -@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) - pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; - } - -- /* We don't show the stack guard page in /proc/maps */ -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start; -+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end; -+#else - start = vma->vm_start; -- if (stack_guard_page_start(vma, start)) -- start += PAGE_SIZE; - end = vma->vm_end; -- if (stack_guard_page_end(vma, end)) -- end -= PAGE_SIZE; -+#endif - - seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", - start, -@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) - flags & VM_WRITE ? 'w' : '-', - flags & VM_EXEC ? 'x' : '-', - flags & VM_MAYSHARE ? 's' : 'p', -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff, -+#else - pgoff, -+#endif - MAJOR(dev), MINOR(dev), ino, &len); - - /* -@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) - */ - if (file) { - pad_len_spaces(m, len); -- seq_path(m, &file->f_path, "\n"); -+ seq_path(m, &file->f_path, "\n\"); - } else { - const char *name = arch_vma_name(vma); - if (!name) { -@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) - if (vma->vm_start <= mm->brk && - vma->vm_end >= mm->start_brk) { - name = "[heap]"; -- } else if (vma->vm_start <= mm->start_stack && -- vma->vm_end >= mm->start_stack) { -+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) || -+ (vma->vm_start <= mm->start_stack && -+ vma->vm_end >= mm->start_stack)) { - name = "[stack]"; - } - } else { -@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m, void *v) - }; - - memset(&mss, 0, sizeof mss); -- mss.vma = vma; -- /* mmap_sem is held in m_start */ -- if (vma->vm_mm && !is_vm_hugetlb_page(vma)) -- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); -- -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ if (!PAX_RAND_FLAGS(vma->vm_mm)) { -+#endif -+ mss.vma = vma; -+ /* mmap_sem is held in m_start */ -+ if (vma->vm_mm && !is_vm_hugetlb_page(vma)) -+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ } -+#endif - show_map_vma(m, vma); - - seq_printf(m, -@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m, void *v) - "KernelPageSize: %8lu kB\n" - "MMUPageSize: %8lu kB\n" - "Locked: %8lu kB\n", -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10, -+#else - (vma->vm_end - vma->vm_start) >> 10, -+#endif - mss.resident >> 10, - (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), - mss.shared_clean >> 10, -@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file *m, void *v) - - if (file) { - seq_printf(m, " file="); -- seq_path(m, &file->f_path, "\n\t= "); -+ seq_path(m, &file->f_path, "\n\t\= "); - } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { - seq_printf(m, " heap"); - } else if (vma->vm_start <= mm->start_stack && -diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c -index 980de54..2a4db5f 100644 ---- a/fs/proc/task_nommu.c -+++ b/fs/proc/task_nommu.c -@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) - else - bytes += kobjsize(mm); - -- if (current->fs && current->fs->users > 1) -+ if (current->fs && atomic_read(¤t->fs->users) > 1) - sbytes += kobjsize(current->fs); - else - bytes += kobjsize(current->fs); -@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) - - if (file) { - pad_len_spaces(m, len); -- seq_path(m, &file->f_path, ""); -+ seq_path(m, &file->f_path, "\n\"); - } else if (mm) { - if (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack) { -diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c -index d67908b..d13f6a6 100644 ---- a/fs/quota/netlink.c -+++ b/fs/quota/netlink.c -@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = { - void quota_send_warning(short type, unsigned int id, dev_t dev, - const char warntype) - { -- static atomic_t seq; -+ static atomic_unchecked_t seq; - struct sk_buff *skb; - void *msg_head; - int ret; -@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev, - "VFS: Not enough memory to send quota warning.\n"); - return; - } -- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), -+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq), - "a_genl_family, 0, QUOTA_NL_C_WARNING); - if (!msg_head) { - printk(KERN_ERR -diff --git a/fs/readdir.c b/fs/readdir.c -index 356f715..c918d38 100644 ---- a/fs/readdir.c -+++ b/fs/readdir.c -@@ -17,6 +17,7 @@ - #include <linux/security.h> - #include <linux/syscalls.h> - #include <linux/unistd.h> -+#include <linux/namei.h> - - #include <asm/uaccess.h> - -@@ -67,6 +68,7 @@ struct old_linux_dirent { - - struct readdir_callback { - struct old_linux_dirent __user * dirent; -+ struct file * file; - int result; - }; - -@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset - buf->result = -EOVERFLOW; - return -EOVERFLOW; - } -+ -+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) -+ return 0; -+ - buf->result++; - dirent = buf->dirent; - if (!access_ok(VERIFY_WRITE, dirent, -@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, - - buf.result = 0; - buf.dirent = dirent; -+ buf.file = file; - - error = vfs_readdir(file, fillonedir, &buf); - if (buf.result) -@@ -142,6 +149,7 @@ struct linux_dirent { - struct getdents_callback { - struct linux_dirent __user * current_dir; - struct linux_dirent __user * previous; -+ struct file * file; - int count; - int error; - }; -@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset, - buf->error = -EOVERFLOW; - return -EOVERFLOW; - } -+ -+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) -+ return 0; -+ - dirent = buf->previous; - if (dirent) { - if (__put_user(offset, &dirent->d_off)) -@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, - buf.previous = NULL; - buf.count = count; - buf.error = 0; -+ buf.file = file; - - error = vfs_readdir(file, filldir, &buf); - if (error >= 0) -@@ -229,6 +242,7 @@ out: - struct getdents_callback64 { - struct linux_dirent64 __user * current_dir; - struct linux_dirent64 __user * previous; -+ struct file *file; - int count; - int error; - }; -@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset, - buf->error = -EINVAL; /* only used if we fail.. */ - if (reclen > buf->count) - return -EINVAL; -+ -+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) -+ return 0; -+ - dirent = buf->previous; - if (dirent) { - if (__put_user(offset, &dirent->d_off)) -@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, - - buf.current_dir = dirent; - buf.previous = NULL; -+ buf.file = file; - buf.count = count; - buf.error = 0; - -@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, - error = buf.error; - lastdirent = buf.previous; - if (lastdirent) { -- typeof(lastdirent->d_off) d_off = file->f_pos; -+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos; - if (__put_user(d_off, &lastdirent->d_off)) - error = -EFAULT; - else -diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c -index 133e935..349ef18 100644 ---- a/fs/reiserfs/dir.c -+++ b/fs/reiserfs/dir.c -@@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, - struct reiserfs_dir_entry de; - int ret = 0; - -+ pax_track_stack(); -+ - reiserfs_write_lock(inode->i_sb); - - reiserfs_check_lock_depth(inode->i_sb, "readdir"); -diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c -index 60c0804..d814f98 100644 ---- a/fs/reiserfs/do_balan.c -+++ b/fs/reiserfs/do_balan.c -@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */ - return; - } - -- atomic_inc(&(fs_generation(tb->tb_sb))); -+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb))); - do_balance_starts(tb); - - /* balance leaf returns 0 except if combining L R and S into -diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c -index a159ba5..0396a76 100644 ---- a/fs/reiserfs/journal.c -+++ b/fs/reiserfs/journal.c -@@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev, - struct buffer_head *bh; - int i, j; - -+ pax_track_stack(); -+ - bh = __getblk(dev, block, bufsize); - if (buffer_uptodate(bh)) - return (bh); -diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c -index ef39232..0fa91ba 100644 ---- a/fs/reiserfs/namei.c -+++ b/fs/reiserfs/namei.c -@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, - unsigned long savelink = 1; - struct timespec ctime; - -+ pax_track_stack(); -+ - /* three balancings: (1) old name removal, (2) new name insertion - and (3) maybe "save" link insertion - stat data updates: (1) old directory, -diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c -index 7a99811..2c9286f 100644 ---- a/fs/reiserfs/procfs.c -+++ b/fs/reiserfs/procfs.c -@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb) - "SMALL_TAILS " : "NO_TAILS ", - replay_only(sb) ? "REPLAY_ONLY " : "", - convert_reiserfs(sb) ? "CONV " : "", -- atomic_read(&r->s_generation_counter), -+ atomic_read_unchecked(&r->s_generation_counter), - SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes), - SF(s_do_balance), SF(s_unneeded_left_neighbor), - SF(s_good_search_by_key_reada), SF(s_bmaps), -@@ -299,6 +299,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb) - struct journal_params *jp = &rs->s_v1.s_journal; - char b[BDEVNAME_SIZE]; - -+ pax_track_stack(); -+ - seq_printf(m, /* on-disk fields */ - "jp_journal_1st_block: \t%i\n" - "jp_journal_dev: \t%s[%x]\n" -diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c -index 313d39d..3a5811b 100644 ---- a/fs/reiserfs/stree.c -+++ b/fs/reiserfs/stree.c -@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, - int iter = 0; - #endif - -+ pax_track_stack(); -+ - BUG_ON(!th->t_trans_id); - - init_tb_struct(th, &s_del_balance, sb, path, -@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, - int retval; - int quota_cut_bytes = 0; - -+ pax_track_stack(); -+ - BUG_ON(!th->t_trans_id); - - le_key2cpu_key(&cpu_key, key); -@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, - int quota_cut_bytes; - loff_t tail_pos = 0; - -+ pax_track_stack(); -+ - BUG_ON(!th->t_trans_id); - - init_tb_struct(th, &s_cut_balance, inode->i_sb, path, -@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree - int retval; - int fs_gen; - -+ pax_track_stack(); -+ - BUG_ON(!th->t_trans_id); - - fs_gen = get_generation(inode->i_sb); -@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, - int fs_gen = 0; - int quota_bytes = 0; - -+ pax_track_stack(); -+ - BUG_ON(!th->t_trans_id); - - if (inode) { /* Do we count quotas for item? */ -diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c -index 14363b9..dd95a04 100644 ---- a/fs/reiserfs/super.c -+++ b/fs/reiserfs/super.c -@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin - {.option_name = NULL} - }; - -+ pax_track_stack(); -+ - *blocks = 0; - if (!options || !*options) - /* use default configuration: create tails, journaling on, no -diff --git a/fs/select.c b/fs/select.c -index d33418f..f8e06bc 100644 ---- a/fs/select.c -+++ b/fs/select.c -@@ -20,6 +20,7 @@ - #include <linux/module.h> - #include <linux/slab.h> - #include <linux/poll.h> -+#include <linux/security.h> - #include <linux/personality.h> /* for STICKY_TIMEOUTS */ - #include <linux/file.h> - #include <linux/fdtable.h> -@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) - int retval, i, timed_out = 0; - unsigned long slack = 0; - -+ pax_track_stack(); -+ - rcu_read_lock(); - retval = max_select_fd(n, fds); - rcu_read_unlock(); -@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, - /* Allocate small arguments on the stack to save memory and be faster */ - long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; - -+ pax_track_stack(); -+ - ret = -EINVAL; - if (n < 0) - goto out_nofds; -@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, - struct poll_list *walk = head; - unsigned long todo = nfds; - -+ pax_track_stack(); -+ -+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1); - if (nfds > rlimit(RLIMIT_NOFILE)) - return -EINVAL; - -diff --git a/fs/seq_file.c b/fs/seq_file.c -index 05d6b0e..ee96362 100644 ---- a/fs/seq_file.c -+++ b/fs/seq_file.c -@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset) - return 0; - } - if (!m->buf) { -- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); -+ m->size = PAGE_SIZE; -+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!m->buf) - return -ENOMEM; - } -@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset) - Eoverflow: - m->op->stop(m, p); - kfree(m->buf); -- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); -+ m->size <<= 1; -+ m->buf = kmalloc(m->size, GFP_KERNEL); - return !m->buf ? -ENOMEM : -EAGAIN; - } - -@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) - m->version = file->f_version; - /* grab buffer if we didn't have one */ - if (!m->buf) { -- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); -+ m->size = PAGE_SIZE; -+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!m->buf) - goto Enomem; - } -@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) - goto Fill; - m->op->stop(m, p); - kfree(m->buf); -- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); -+ m->size <<= 1; -+ m->buf = kmalloc(m->size, GFP_KERNEL); - if (!m->buf) - goto Enomem; - m->count = 0; -@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v) - int single_open(struct file *file, int (*show)(struct seq_file *, void *), - void *data) - { -- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL); -+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL); - int res = -ENOMEM; - - if (op) { -diff --git a/fs/splice.c b/fs/splice.c -index fa2defa..9a697a5 100644 ---- a/fs/splice.c -+++ b/fs/splice.c -@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, - pipe_lock(pipe); - - for (;;) { -- if (!pipe->readers) { -+ if (!atomic_read(&pipe->readers)) { - send_sig(SIGPIPE, current, 0); - if (!ret) - ret = -EPIPE; -@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, - do_wakeup = 0; - } - -- pipe->waiting_writers++; -+ atomic_inc(&pipe->waiting_writers); - pipe_wait(pipe); -- pipe->waiting_writers--; -+ atomic_dec(&pipe->waiting_writers); - } - - pipe_unlock(pipe); -@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, - .spd_release = spd_release_page, - }; - -+ pax_track_stack(); -+ - if (splice_grow_spd(pipe, &spd)) - return -ENOMEM; - -@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec, - old_fs = get_fs(); - set_fs(get_ds()); - /* The cast to a user pointer is valid due to the set_fs() */ -- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); -+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos); - set_fs(old_fs); - - return res; -@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count, - old_fs = get_fs(); - set_fs(get_ds()); - /* The cast to a user pointer is valid due to the set_fs() */ -- res = vfs_write(file, (const char __user *)buf, count, &pos); -+ res = vfs_write(file, (const char __force_user *)buf, count, &pos); - set_fs(old_fs); - - return res; -@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, - .spd_release = spd_release_page, - }; - -+ pax_track_stack(); -+ - if (splice_grow_spd(pipe, &spd)) - return -ENOMEM; - -@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, - goto err; - - this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); -- vec[i].iov_base = (void __user *) page_address(page); -+ vec[i].iov_base = (void __force_user *) page_address(page); - vec[i].iov_len = this_len; - spd.pages[i] = page; - spd.nr_pages++; -@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed); - int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) - { - while (!pipe->nrbufs) { -- if (!pipe->writers) -+ if (!atomic_read(&pipe->writers)) - return 0; - -- if (!pipe->waiting_writers && sd->num_spliced) -+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced) - return 0; - - if (sd->flags & SPLICE_F_NONBLOCK) -@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, - * out of the pipe right after the splice_to_pipe(). So set - * PIPE_READERS appropriately. - */ -- pipe->readers = 1; -+ atomic_set(&pipe->readers, 1); - - current->splice_pipe = pipe; - } -@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, - }; - long ret; - -+ pax_track_stack(); -+ - pipe = get_pipe_info(file); - if (!pipe) - return -EBADF; -@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) - ret = -ERESTARTSYS; - break; - } -- if (!pipe->writers) -+ if (!atomic_read(&pipe->writers)) - break; -- if (!pipe->waiting_writers) { -+ if (!atomic_read(&pipe->waiting_writers)) { - if (flags & SPLICE_F_NONBLOCK) { - ret = -EAGAIN; - break; -@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) - pipe_lock(pipe); - - while (pipe->nrbufs >= pipe->buffers) { -- if (!pipe->readers) { -+ if (!atomic_read(&pipe->readers)) { - send_sig(SIGPIPE, current, 0); - ret = -EPIPE; - break; -@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) - ret = -ERESTARTSYS; - break; - } -- pipe->waiting_writers++; -+ atomic_inc(&pipe->waiting_writers); - pipe_wait(pipe); -- pipe->waiting_writers--; -+ atomic_dec(&pipe->waiting_writers); - } - - pipe_unlock(pipe); -@@ -1819,14 +1825,14 @@ retry: - pipe_double_lock(ipipe, opipe); - - do { -- if (!opipe->readers) { -+ if (!atomic_read(&opipe->readers)) { - send_sig(SIGPIPE, current, 0); - if (!ret) - ret = -EPIPE; - break; - } - -- if (!ipipe->nrbufs && !ipipe->writers) -+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers)) - break; - - /* -@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, - pipe_double_lock(ipipe, opipe); - - do { -- if (!opipe->readers) { -+ if (!atomic_read(&opipe->readers)) { - send_sig(SIGPIPE, current, 0); - if (!ret) - ret = -EPIPE; -@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, - * return EAGAIN if we have the potential of some data in the - * future, otherwise just return 0 - */ -- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) -+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK)) - ret = -EAGAIN; - - pipe_unlock(ipipe); -diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c -index 1ad8c93..6633545 100644 ---- a/fs/sysfs/file.c -+++ b/fs/sysfs/file.c -@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock); - - struct sysfs_open_dirent { - atomic_t refcnt; -- atomic_t event; -+ atomic_unchecked_t event; - wait_queue_head_t poll; - struct list_head buffers; /* goes through sysfs_buffer.list */ - }; -@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer - if (!sysfs_get_active(attr_sd)) - return -ENODEV; - -- buffer->event = atomic_read(&attr_sd->s_attr.open->event); -+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event); - count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page); - - sysfs_put_active(attr_sd); -@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd, - return -ENOMEM; - - atomic_set(&new_od->refcnt, 0); -- atomic_set(&new_od->event, 1); -+ atomic_set_unchecked(&new_od->event, 1); - init_waitqueue_head(&new_od->poll); - INIT_LIST_HEAD(&new_od->buffers); - goto retry; -@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait) - - sysfs_put_active(attr_sd); - -- if (buffer->event != atomic_read(&od->event)) -+ if (buffer->event != atomic_read_unchecked(&od->event)) - goto trigger; - - return DEFAULT_POLLMASK; -@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd) - - od = sd->s_attr.open; - if (od) { -- atomic_inc(&od->event); -+ atomic_inc_unchecked(&od->event); - wake_up_interruptible(&od->poll); - } - -diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c -index e34f0d9..740ea7b 100644 ---- a/fs/sysfs/mount.c -+++ b/fs/sysfs/mount.c -@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = { - .s_name = "", - .s_count = ATOMIC_INIT(1), - .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT), -+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT -+ .s_mode = S_IFDIR | S_IRWXU, -+#else - .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, -+#endif - .s_ino = 1, - }; - -diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c -index a7ac78f..02158e1 100644 ---- a/fs/sysfs/symlink.c -+++ b/fs/sysfs/symlink.c -@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd) - - static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) - { -- char *page = nd_get_link(nd); -+ const char *page = nd_get_link(nd); - if (!IS_ERR(page)) - free_page((unsigned long)page); - } -diff --git a/fs/udf/inode.c b/fs/udf/inode.c -index 1d1358e..408bedb 100644 ---- a/fs/udf/inode.c -+++ b/fs/udf/inode.c -@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, - int goal = 0, pgoal = iinfo->i_location.logicalBlockNum; - int lastblock = 0; - -+ pax_track_stack(); -+ - prev_epos.offset = udf_file_entry_alloc_offset(inode); - prev_epos.block = iinfo->i_location; - prev_epos.bh = NULL; -diff --git a/fs/udf/misc.c b/fs/udf/misc.c -index 9215700..bf1f68e 100644 ---- a/fs/udf/misc.c -+++ b/fs/udf/misc.c -@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, - - u8 udf_tag_checksum(const struct tag *t) - { -- u8 *data = (u8 *)t; -+ const u8 *data = (const u8 *)t; - u8 checksum = 0; - int i; - for (i = 0; i < sizeof(struct tag); ++i) -diff --git a/fs/utimes.c b/fs/utimes.c -index ba653f3..06ea4b1 100644 ---- a/fs/utimes.c -+++ b/fs/utimes.c -@@ -1,6 +1,7 @@ - #include <linux/compiler.h> - #include <linux/file.h> - #include <linux/fs.h> -+#include <linux/security.h> - #include <linux/linkage.h> - #include <linux/mount.h> - #include <linux/namei.h> -@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times) - goto mnt_drop_write_and_out; - } - } -+ -+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) { -+ error = -EACCES; -+ goto mnt_drop_write_and_out; -+ } -+ - mutex_lock(&inode->i_mutex); - error = notify_change(path->dentry, &newattrs); - mutex_unlock(&inode->i_mutex); -diff --git a/fs/xattr.c b/fs/xattr.c -index f060663..def7007 100644 ---- a/fs/xattr.c -+++ b/fs/xattr.c -@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr); - * Extended attribute SET operations - */ - static long --setxattr(struct dentry *d, const char __user *name, const void __user *value, -+setxattr(struct path *path, const char __user *name, const void __user *value, - size_t size, int flags) - { - int error; -@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value, - return PTR_ERR(kvalue); - } - -- error = vfs_setxattr(d, kname, kvalue, size, flags); -+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) { -+ error = -EACCES; -+ goto out; -+ } -+ -+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags); -+out: - kfree(kvalue); - return error; - } -@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname, - return error; - error = mnt_want_write(path.mnt); - if (!error) { -- error = setxattr(path.dentry, name, value, size, flags); -+ error = setxattr(&path, name, value, size, flags); - mnt_drop_write(path.mnt); - } - path_put(&path); -@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname, - return error; - error = mnt_want_write(path.mnt); - if (!error) { -- error = setxattr(path.dentry, name, value, size, flags); -+ error = setxattr(&path, name, value, size, flags); - mnt_drop_write(path.mnt); - } - path_put(&path); -@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name, - const void __user *,value, size_t, size, int, flags) - { - struct file *f; -- struct dentry *dentry; - int error = -EBADF; - - f = fget(fd); - if (!f) - return error; -- dentry = f->f_path.dentry; -- audit_inode(NULL, dentry); -+ audit_inode(NULL, f->f_path.dentry); - error = mnt_want_write_file(f); - if (!error) { -- error = setxattr(dentry, name, value, size, flags); -+ error = setxattr(&f->f_path, name, value, size, flags); - mnt_drop_write(f->f_path.mnt); - } - fput(f); -diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c -index 8d5a506..7f62712 100644 ---- a/fs/xattr_acl.c -+++ b/fs/xattr_acl.c -@@ -17,8 +17,8 @@ - struct posix_acl * - posix_acl_from_xattr(const void *value, size_t size) - { -- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value; -- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end; -+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value; -+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end; - int count; - struct posix_acl *acl; - struct posix_acl_entry *acl_e; -diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c -index 452a291..91a95f3b 100644 ---- a/fs/xfs/xfs_bmap.c -+++ b/fs/xfs/xfs_bmap.c -@@ -250,7 +250,7 @@ xfs_bmap_validate_ret( - int nmap, - int ret_nmap); - #else --#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) -+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0) - #endif /* DEBUG */ - - STATIC int -diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c -index 79d05e8..e3e5861 100644 ---- a/fs/xfs/xfs_dir2_sf.c -+++ b/fs/xfs/xfs_dir2_sf.c -@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents( - } - - ino = xfs_dir2_sfe_get_ino(sfp, sfep); -- if (filldir(dirent, (char *)sfep->name, sfep->namelen, -+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) { -+ char name[sfep->namelen]; -+ memcpy(name, sfep->name, sfep->namelen); -+ if (filldir(dirent, name, sfep->namelen, -+ off & 0x7fffffff, ino, DT_UNKNOWN)) { -+ *offset = off & 0x7fffffff; -+ return 0; -+ } -+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen, - off & 0x7fffffff, ino, DT_UNKNOWN)) { - *offset = off & 0x7fffffff; - return 0; -diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c -index f7ce7de..e1a5db0 100644 ---- a/fs/xfs/xfs_ioctl.c -+++ b/fs/xfs/xfs_ioctl.c -@@ -128,7 +128,7 @@ xfs_find_handle( - } - - error = -EFAULT; -- if (copy_to_user(hreq->ohandle, &handle, hsize) || -+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) || - copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) - goto out_put; - -diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c -index 474920b..97169a9 100644 ---- a/fs/xfs/xfs_iops.c -+++ b/fs/xfs/xfs_iops.c -@@ -446,7 +446,7 @@ xfs_vn_put_link( - struct nameidata *nd, - void *p) - { -- char *s = nd_get_link(nd); -+ const char *s = nd_get_link(nd); - - if (!IS_ERR(s)) - kfree(s); -diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig -new file mode 100644 -index 0000000..9629731 ---- /dev/null -+++ b/grsecurity/Kconfig -@@ -0,0 +1,1037 @@ -+# -+# grecurity configuration -+# -+ -+menu "Grsecurity" -+ -+config GRKERNSEC -+ bool "Grsecurity" -+ select CRYPTO -+ select CRYPTO_SHA256 -+ help -+ If you say Y here, you will be able to configure many features -+ that will enhance the security of your system. It is highly -+ recommended that you say Y here and read through the help -+ for each option so that you fully understand the features and -+ can evaluate their usefulness for your machine. -+ -+choice -+ prompt "Security Level" -+ depends on GRKERNSEC -+ default GRKERNSEC_CUSTOM -+ -+config GRKERNSEC_LOW -+ bool "Low" -+ select GRKERNSEC_LINK -+ select GRKERNSEC_FIFO -+ select GRKERNSEC_RANDNET -+ select GRKERNSEC_DMESG -+ select GRKERNSEC_CHROOT -+ select GRKERNSEC_CHROOT_CHDIR -+ -+ help -+ If you choose this option, several of the grsecurity options will -+ be enabled that will give you greater protection against a number -+ of attacks, while assuring that none of your software will have any -+ conflicts with the additional security measures. If you run a lot -+ of unusual software, or you are having problems with the higher -+ security levels, you should say Y here. With this option, the -+ following features are enabled: -+ -+ - Linking restrictions -+ - FIFO restrictions -+ - Restricted dmesg -+ - Enforced chdir("/") on chroot -+ - Runtime module disabling -+ -+config GRKERNSEC_MEDIUM -+ bool "Medium" -+ select PAX -+ select PAX_EI_PAX -+ select PAX_PT_PAX_FLAGS -+ select PAX_HAVE_ACL_FLAGS -+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) -+ select GRKERNSEC_CHROOT -+ select GRKERNSEC_CHROOT_SYSCTL -+ select GRKERNSEC_LINK -+ select GRKERNSEC_FIFO -+ select GRKERNSEC_DMESG -+ select GRKERNSEC_RANDNET -+ select GRKERNSEC_FORKFAIL -+ select GRKERNSEC_TIME -+ select GRKERNSEC_SIGNAL -+ select GRKERNSEC_CHROOT -+ select GRKERNSEC_CHROOT_UNIX -+ select GRKERNSEC_CHROOT_MOUNT -+ select GRKERNSEC_CHROOT_PIVOT -+ select GRKERNSEC_CHROOT_DOUBLE -+ select GRKERNSEC_CHROOT_CHDIR -+ select GRKERNSEC_CHROOT_MKNOD -+ select GRKERNSEC_PROC -+ select GRKERNSEC_PROC_USERGROUP -+ select PAX_RANDUSTACK -+ select PAX_ASLR -+ select PAX_RANDMMAP -+ select PAX_REFCOUNT if (X86 || SPARC64) -+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB)) -+ -+ help -+ If you say Y here, several features in addition to those included -+ in the low additional security level will be enabled. These -+ features provide even more security to your system, though in rare -+ cases they may be incompatible with very old or poorly written -+ software. If you enable this option, make sure that your auth -+ service (identd) is running as gid 1001. With this option, -+ the following features (in addition to those provided in the -+ low additional security level) will be enabled: -+ -+ - Failed fork logging -+ - Time change logging -+ - Signal logging -+ - Deny mounts in chroot -+ - Deny double chrooting -+ - Deny sysctl writes in chroot -+ - Deny mknod in chroot -+ - Deny access to abstract AF_UNIX sockets out of chroot -+ - Deny pivot_root in chroot -+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port -+ - /proc restrictions with special GID set to 10 (usually wheel) -+ - Address Space Layout Randomization (ASLR) -+ - Prevent exploitation of most refcount overflows -+ - Bounds checking of copying between the kernel and userland -+ -+config GRKERNSEC_HIGH -+ bool "High" -+ select GRKERNSEC_LINK -+ select GRKERNSEC_FIFO -+ select GRKERNSEC_DMESG -+ select GRKERNSEC_FORKFAIL -+ select GRKERNSEC_TIME -+ select GRKERNSEC_SIGNAL -+ select GRKERNSEC_CHROOT -+ select GRKERNSEC_CHROOT_SHMAT -+ select GRKERNSEC_CHROOT_UNIX -+ select GRKERNSEC_CHROOT_MOUNT -+ select GRKERNSEC_CHROOT_FCHDIR -+ select GRKERNSEC_CHROOT_PIVOT -+ select GRKERNSEC_CHROOT_DOUBLE -+ select GRKERNSEC_CHROOT_CHDIR -+ select GRKERNSEC_CHROOT_MKNOD -+ select GRKERNSEC_CHROOT_CAPS -+ select GRKERNSEC_CHROOT_SYSCTL -+ select GRKERNSEC_CHROOT_FINDTASK -+ select GRKERNSEC_SYSFS_RESTRICT -+ select GRKERNSEC_PROC -+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) -+ select GRKERNSEC_HIDESYM -+ select GRKERNSEC_BRUTE -+ select GRKERNSEC_PROC_USERGROUP -+ select GRKERNSEC_KMEM -+ select GRKERNSEC_RESLOG -+ select GRKERNSEC_RANDNET -+ select GRKERNSEC_PROC_ADD -+ select GRKERNSEC_CHROOT_CHMOD -+ select GRKERNSEC_CHROOT_NICE -+ select GRKERNSEC_AUDIT_MOUNT -+ select GRKERNSEC_MODHARDEN if (MODULES) -+ select GRKERNSEC_HARDEN_PTRACE -+ select GRKERNSEC_VM86 if (X86_32) -+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC) -+ select PAX -+ select PAX_RANDUSTACK -+ select PAX_ASLR -+ select PAX_RANDMMAP -+ select PAX_NOEXEC -+ select PAX_MPROTECT -+ select PAX_EI_PAX -+ select PAX_PT_PAX_FLAGS -+ select PAX_HAVE_ACL_FLAGS -+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN) -+ select PAX_MEMORY_UDEREF if (X86 && !XEN) -+ select PAX_RANDKSTACK if (X86_TSC && X86) -+ select PAX_SEGMEXEC if (X86_32) -+ select PAX_PAGEEXEC -+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC) -+ select PAX_EMUTRAMP if (PARISC) -+ select PAX_EMUSIGRT if (PARISC) -+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC) -+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86)) -+ select PAX_REFCOUNT if (X86 || SPARC64) -+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB)) -+ help -+ If you say Y here, many of the features of grsecurity will be -+ enabled, which will protect you against many kinds of attacks -+ against your system. The heightened security comes at a cost -+ of an increased chance of incompatibilities with rare software -+ on your machine. Since this security level enables PaX, you should -+ view http://pax.grsecurity.net and read about the PaX -+ project. While you are there, download chpax and run it on -+ binaries that cause problems with PaX. Also remember that -+ since the /proc restrictions are enabled, you must run your -+ identd as gid 1001. This security level enables the following -+ features in addition to those listed in the low and medium -+ security levels: -+ -+ - Additional /proc restrictions -+ - Chmod restrictions in chroot -+ - No signals, ptrace, or viewing of processes outside of chroot -+ - Capability restrictions in chroot -+ - Deny fchdir out of chroot -+ - Priority restrictions in chroot -+ - Segmentation-based implementation of PaX -+ - Mprotect restrictions -+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat] -+ - Kernel stack randomization -+ - Mount/unmount/remount logging -+ - Kernel symbol hiding -+ - Hardening of module auto-loading -+ - Ptrace restrictions -+ - Restricted vm86 mode -+ - Restricted sysfs/debugfs -+ - Active kernel exploit response -+ -+config GRKERNSEC_CUSTOM -+ bool "Custom" -+ help -+ If you say Y here, you will be able to configure every grsecurity -+ option, which allows you to enable many more features that aren't -+ covered in the basic security levels. These additional features -+ include TPE, socket restrictions, and the sysctl system for -+ grsecurity. It is advised that you read through the help for -+ each option to determine its usefulness in your situation. -+ -+endchoice -+ -+menu "Address Space Protection" -+depends on GRKERNSEC -+ -+config GRKERNSEC_KMEM -+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port" -+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390) -+ help -+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to -+ be written to or read from to modify or leak the contents of the running -+ kernel. /dev/port will also not be allowed to be opened. If you have module -+ support disabled, enabling this will close up four ways that are -+ currently used to insert malicious code into the running kernel. -+ Even with all these features enabled, we still highly recommend that -+ you use the RBAC system, as it is still possible for an attacker to -+ modify the running kernel through privileged I/O granted by ioperm/iopl. -+ If you are not using XFree86, you may be able to stop this additional -+ case by enabling the 'Disable privileged I/O' option. Though nothing -+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem, -+ but only to video memory, which is the only writing we allow in this -+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will -+ not be allowed to mprotect it with PROT_WRITE later. -+ It is highly recommended that you say Y here if you meet all the -+ conditions above. -+ -+config GRKERNSEC_VM86 -+ bool "Restrict VM86 mode" -+ depends on X86_32 -+ -+ help -+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to -+ make use of a special execution mode on 32bit x86 processors called -+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain -+ video cards and will still work with this option enabled. The purpose -+ of the option is to prevent exploitation of emulation errors in -+ virtualization of vm86 mode like the one discovered in VMWare in 2009. -+ Nearly all users should be able to enable this option. -+ -+config GRKERNSEC_IO -+ bool "Disable privileged I/O" -+ depends on X86 -+ select RTC_CLASS -+ select RTC_INTF_DEV -+ select RTC_DRV_CMOS -+ -+ help -+ If you say Y here, all ioperm and iopl calls will return an error. -+ Ioperm and iopl can be used to modify the running kernel. -+ Unfortunately, some programs need this access to operate properly, -+ the most notable of which are XFree86 and hwclock. hwclock can be -+ remedied by having RTC support in the kernel, so real-time -+ clock support is enabled if this option is enabled, to ensure -+ that hwclock operates correctly. XFree86 still will not -+ operate correctly with this option enabled, so DO NOT CHOOSE Y -+ IF YOU USE XFree86. If you use XFree86 and you still want to -+ protect your kernel against modification, use the RBAC system. -+ -+config GRKERNSEC_PROC_MEMMAP -+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]" -+ default y if (PAX_NOEXEC || PAX_ASLR) -+ depends on PAX_NOEXEC || PAX_ASLR -+ help -+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will -+ give no information about the addresses of its mappings if -+ PaX features that rely on random addresses are enabled on the task. -+ If you use PaX it is greatly recommended that you say Y here as it -+ closes up a hole that makes the full ASLR useless for suid -+ binaries. -+ -+config GRKERNSEC_BRUTE -+ bool "Deter exploit bruteforcing" -+ help -+ If you say Y here, attempts to bruteforce exploits against forking -+ daemons such as apache or sshd, as well as against suid/sgid binaries -+ will be deterred. When a child of a forking daemon is killed by PaX -+ or crashes due to an illegal instruction or other suspicious signal, -+ the parent process will be delayed 30 seconds upon every subsequent -+ fork until the administrator is able to assess the situation and -+ restart the daemon. -+ In the suid/sgid case, the attempt is logged, the user has all their -+ processes terminated, and they are prevented from executing any further -+ processes for 15 minutes. -+ It is recommended that you also enable signal logging in the auditing -+ section so that logs are generated when a process triggers a suspicious -+ signal. -+ If the sysctl option is enabled, a sysctl option with name -+ "deter_bruteforce" is created. -+ -+ -+config GRKERNSEC_MODHARDEN -+ bool "Harden module auto-loading" -+ depends on MODULES -+ help -+ If you say Y here, module auto-loading in response to use of some -+ feature implemented by an unloaded module will be restricted to -+ root users. Enabling this option helps defend against attacks -+ by unprivileged users who abuse the auto-loading behavior to -+ cause a vulnerable module to load that is then exploited. -+ -+ If this option prevents a legitimate use of auto-loading for a -+ non-root user, the administrator can execute modprobe manually -+ with the exact name of the module mentioned in the alert log. -+ Alternatively, the administrator can add the module to the list -+ of modules loaded at boot by modifying init scripts. -+ -+ Modification of init scripts will most likely be needed on -+ Ubuntu servers with encrypted home directory support enabled, -+ as the first non-root user logging in will cause the ecb(aes), -+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded. -+ -+config GRKERNSEC_HIDESYM -+ bool "Hide kernel symbols" -+ help -+ If you say Y here, getting information on loaded modules, and -+ displaying all kernel symbols through a syscall will be restricted -+ to users with CAP_SYS_MODULE. For software compatibility reasons, -+ /proc/kallsyms will be restricted to the root user. The RBAC -+ system can hide that entry even from root. -+ -+ This option also prevents leaking of kernel addresses through -+ several /proc entries. -+ -+ Note that this option is only effective provided the following -+ conditions are met: -+ 1) The kernel using grsecurity is not precompiled by some distribution -+ 2) You have also enabled GRKERNSEC_DMESG -+ 3) You are using the RBAC system and hiding other files such as your -+ kernel image and System.map. Alternatively, enabling this option -+ causes the permissions on /boot, /lib/modules, and the kernel -+ source directory to change at compile time to prevent -+ reading by non-root users. -+ If the above conditions are met, this option will aid in providing a -+ useful protection against local kernel exploitation of overflows -+ and arbitrary read/write vulnerabilities. -+ -+config GRKERNSEC_KERN_LOCKOUT -+ bool "Active kernel exploit response" -+ depends on X86 || ARM || PPC || SPARC -+ help -+ If you say Y here, when a PaX alert is triggered due to suspicious -+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY) -+ or an OOPs occurs due to bad memory accesses, instead of just -+ terminating the offending process (and potentially allowing -+ a subsequent exploit from the same user), we will take one of two -+ actions: -+ If the user was root, we will panic the system -+ If the user was non-root, we will log the attempt, terminate -+ all processes owned by the user, then prevent them from creating -+ any new processes until the system is restarted -+ This deters repeated kernel exploitation/bruteforcing attempts -+ and is useful for later forensics. -+ -+endmenu -+menu "Role Based Access Control Options" -+depends on GRKERNSEC -+ -+config GRKERNSEC_RBAC_DEBUG -+ bool -+ -+config GRKERNSEC_NO_RBAC -+ bool "Disable RBAC system" -+ help -+ If you say Y here, the /dev/grsec device will be removed from the kernel, -+ preventing the RBAC system from being enabled. You should only say Y -+ here if you have no intention of using the RBAC system, so as to prevent -+ an attacker with root access from misusing the RBAC system to hide files -+ and processes when loadable module support and /dev/[k]mem have been -+ locked down. -+ -+config GRKERNSEC_ACL_HIDEKERN -+ bool "Hide kernel processes" -+ help -+ If you say Y here, all kernel threads will be hidden to all -+ processes but those whose subject has the "view hidden processes" -+ flag. -+ -+config GRKERNSEC_ACL_MAXTRIES -+ int "Maximum tries before password lockout" -+ default 3 -+ help -+ This option enforces the maximum number of times a user can attempt -+ to authorize themselves with the grsecurity RBAC system before being -+ denied the ability to attempt authorization again for a specified time. -+ The lower the number, the harder it will be to brute-force a password. -+ -+config GRKERNSEC_ACL_TIMEOUT -+ int "Time to wait after max password tries, in seconds" -+ default 30 -+ help -+ This option specifies the time the user must wait after attempting to -+ authorize to the RBAC system with the maximum number of invalid -+ passwords. The higher the number, the harder it will be to brute-force -+ a password. -+ -+endmenu -+menu "Filesystem Protections" -+depends on GRKERNSEC -+ -+config GRKERNSEC_PROC -+ bool "Proc restrictions" -+ help -+ If you say Y here, the permissions of the /proc filesystem -+ will be altered to enhance system security and privacy. You MUST -+ choose either a user only restriction or a user and group restriction. -+ Depending upon the option you choose, you can either restrict users to -+ see only the processes they themselves run, or choose a group that can -+ view all processes and files normally restricted to root if you choose -+ the "restrict to user only" option. NOTE: If you're running identd as -+ a non-root user, you will have to run it as the group you specify here. -+ -+config GRKERNSEC_PROC_USER -+ bool "Restrict /proc to user only" -+ depends on GRKERNSEC_PROC -+ help -+ If you say Y here, non-root users will only be able to view their own -+ processes, and restricts them from viewing network-related information, -+ and viewing kernel symbol and module information. -+ -+config GRKERNSEC_PROC_USERGROUP -+ bool "Allow special group" -+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER -+ help -+ If you say Y here, you will be able to select a group that will be -+ able to view all processes and network-related information. If you've -+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still -+ remain hidden. This option is useful if you want to run identd as -+ a non-root user. -+ -+config GRKERNSEC_PROC_GID -+ int "GID for special group" -+ depends on GRKERNSEC_PROC_USERGROUP -+ default 1001 -+ -+config GRKERNSEC_PROC_ADD -+ bool "Additional restrictions" -+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP -+ help -+ If you say Y here, additional restrictions will be placed on -+ /proc that keep normal users from viewing device information and -+ slabinfo information that could be useful for exploits. -+ -+config GRKERNSEC_LINK -+ bool "Linking restrictions" -+ help -+ If you say Y here, /tmp race exploits will be prevented, since users -+ will no longer be able to follow symlinks owned by other users in -+ world-writable +t directories (e.g. /tmp), unless the owner of the -+ symlink is the owner of the directory. users will also not be -+ able to hardlink to files they do not own. If the sysctl option is -+ enabled, a sysctl option with name "linking_restrictions" is created. -+ -+config GRKERNSEC_FIFO -+ bool "FIFO restrictions" -+ help -+ If you say Y here, users will not be able to write to FIFOs they don't -+ own in world-writable +t directories (e.g. /tmp), unless the owner of -+ the FIFO is the same owner of the directory it's held in. If the sysctl -+ option is enabled, a sysctl option with name "fifo_restrictions" is -+ created. -+ -+config GRKERNSEC_SYSFS_RESTRICT -+ bool "Sysfs/debugfs restriction" -+ depends on SYSFS -+ help -+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and -+ any filesystem normally mounted under it (e.g. debugfs) will only -+ be accessible by root. These filesystems generally provide access -+ to hardware and debug information that isn't appropriate for unprivileged -+ users of the system. Sysfs and debugfs have also become a large source -+ of new vulnerabilities, ranging from infoleaks to local compromise. -+ There has been very little oversight with an eye toward security involved -+ in adding new exporters of information to these filesystems, so their -+ use is discouraged. -+ This option is equivalent to a chmod 0700 of the mount paths. -+ -+config GRKERNSEC_ROFS -+ bool "Runtime read-only mount protection" -+ help -+ If you say Y here, a sysctl option with name "romount_protect" will -+ be created. By setting this option to 1 at runtime, filesystems -+ will be protected in the following ways: -+ * No new writable mounts will be allowed -+ * Existing read-only mounts won't be able to be remounted read/write -+ * Write operations will be denied on all block devices -+ This option acts independently of grsec_lock: once it is set to 1, -+ it cannot be turned off. Therefore, please be mindful of the resulting -+ behavior if this option is enabled in an init script on a read-only -+ filesystem. This feature is mainly intended for secure embedded systems. -+ -+config GRKERNSEC_CHROOT -+ bool "Chroot jail restrictions" -+ help -+ If you say Y here, you will be able to choose several options that will -+ make breaking out of a chrooted jail much more difficult. If you -+ encounter no software incompatibilities with the following options, it -+ is recommended that you enable each one. -+ -+config GRKERNSEC_CHROOT_MOUNT -+ bool "Deny mounts" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, processes inside a chroot will not be able to -+ mount or remount filesystems. If the sysctl option is enabled, a -+ sysctl option with name "chroot_deny_mount" is created. -+ -+config GRKERNSEC_CHROOT_DOUBLE -+ bool "Deny double-chroots" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, processes inside a chroot will not be able to chroot -+ again outside the chroot. This is a widely used method of breaking -+ out of a chroot jail and should not be allowed. If the sysctl -+ option is enabled, a sysctl option with name -+ "chroot_deny_chroot" is created. -+ -+config GRKERNSEC_CHROOT_PIVOT -+ bool "Deny pivot_root in chroot" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, processes inside a chroot will not be able to use -+ a function called pivot_root() that was introduced in Linux 2.3.41. It -+ works similar to chroot in that it changes the root filesystem. This -+ function could be misused in a chrooted process to attempt to break out -+ of the chroot, and therefore should not be allowed. If the sysctl -+ option is enabled, a sysctl option with name "chroot_deny_pivot" is -+ created. -+ -+config GRKERNSEC_CHROOT_CHDIR -+ bool "Enforce chdir("/") on all chroots" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, the current working directory of all newly-chrooted -+ applications will be set to the the root directory of the chroot. -+ The man page on chroot(2) states: -+ Note that this call does not change the current working -+ directory, so that `.' can be outside the tree rooted at -+ `/'. In particular, the super-user can escape from a -+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'. -+ -+ It is recommended that you say Y here, since it's not known to break -+ any software. If the sysctl option is enabled, a sysctl option with -+ name "chroot_enforce_chdir" is created. -+ -+config GRKERNSEC_CHROOT_CHMOD -+ bool "Deny (f)chmod +s" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, processes inside a chroot will not be able to chmod -+ or fchmod files to make them have suid or sgid bits. This protects -+ against another published method of breaking a chroot. If the sysctl -+ option is enabled, a sysctl option with name "chroot_deny_chmod" is -+ created. -+ -+config GRKERNSEC_CHROOT_FCHDIR -+ bool "Deny fchdir out of chroot" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, a well-known method of breaking chroots by fchdir'ing -+ to a file descriptor of the chrooting process that points to a directory -+ outside the filesystem will be stopped. If the sysctl option -+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created. -+ -+config GRKERNSEC_CHROOT_MKNOD -+ bool "Deny mknod" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, processes inside a chroot will not be allowed to -+ mknod. The problem with using mknod inside a chroot is that it -+ would allow an attacker to create a device entry that is the same -+ as one on the physical root of your system, which could range from -+ anything from the console device to a device for your harddrive (which -+ they could then use to wipe the drive or steal data). It is recommended -+ that you say Y here, unless you run into software incompatibilities. -+ If the sysctl option is enabled, a sysctl option with name -+ "chroot_deny_mknod" is created. -+ -+config GRKERNSEC_CHROOT_SHMAT -+ bool "Deny shmat() out of chroot" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, processes inside a chroot will not be able to attach -+ to shared memory segments that were created outside of the chroot jail. -+ It is recommended that you say Y here. If the sysctl option is enabled, -+ a sysctl option with name "chroot_deny_shmat" is created. -+ -+config GRKERNSEC_CHROOT_UNIX -+ bool "Deny access to abstract AF_UNIX sockets out of chroot" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, processes inside a chroot will not be able to -+ connect to abstract (meaning not belonging to a filesystem) Unix -+ domain sockets that were bound outside of a chroot. It is recommended -+ that you say Y here. If the sysctl option is enabled, a sysctl option -+ with name "chroot_deny_unix" is created. -+ -+config GRKERNSEC_CHROOT_FINDTASK -+ bool "Protect outside processes" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, processes inside a chroot will not be able to -+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid, -+ getsid, or view any process outside of the chroot. If the sysctl -+ option is enabled, a sysctl option with name "chroot_findtask" is -+ created. -+ -+config GRKERNSEC_CHROOT_NICE -+ bool "Restrict priority changes" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, processes inside a chroot will not be able to raise -+ the priority of processes in the chroot, or alter the priority of -+ processes outside the chroot. This provides more security than simply -+ removing CAP_SYS_NICE from the process' capability set. If the -+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice" -+ is created. -+ -+config GRKERNSEC_CHROOT_SYSCTL -+ bool "Deny sysctl writes" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, an attacker in a chroot will not be able to -+ write to sysctl entries, either by sysctl(2) or through a /proc -+ interface. It is strongly recommended that you say Y here. If the -+ sysctl option is enabled, a sysctl option with name -+ "chroot_deny_sysctl" is created. -+ -+config GRKERNSEC_CHROOT_CAPS -+ bool "Capability restrictions" -+ depends on GRKERNSEC_CHROOT -+ help -+ If you say Y here, the capabilities on all processes within a -+ chroot jail will be lowered to stop module insertion, raw i/o, -+ system and net admin tasks, rebooting the system, modifying immutable -+ files, modifying IPC owned by another, and changing the system time. -+ This is left an option because it can break some apps. Disable this -+ if your chrooted apps are having problems performing those kinds of -+ tasks. If the sysctl option is enabled, a sysctl option with -+ name "chroot_caps" is created. -+ -+endmenu -+menu "Kernel Auditing" -+depends on GRKERNSEC -+ -+config GRKERNSEC_AUDIT_GROUP -+ bool "Single group for auditing" -+ help -+ If you say Y here, the exec, chdir, and (un)mount logging features -+ will only operate on a group you specify. This option is recommended -+ if you only want to watch certain users instead of having a large -+ amount of logs from the entire system. If the sysctl option is enabled, -+ a sysctl option with name "audit_group" is created. -+ -+config GRKERNSEC_AUDIT_GID -+ int "GID for auditing" -+ depends on GRKERNSEC_AUDIT_GROUP -+ default 1007 -+ -+config GRKERNSEC_EXECLOG -+ bool "Exec logging" -+ help -+ If you say Y here, all execve() calls will be logged (since the -+ other exec*() calls are frontends to execve(), all execution -+ will be logged). Useful for shell-servers that like to keep track -+ of their users. If the sysctl option is enabled, a sysctl option with -+ name "exec_logging" is created. -+ WARNING: This option when enabled will produce a LOT of logs, especially -+ on an active system. -+ -+config GRKERNSEC_RESLOG -+ bool "Resource logging" -+ help -+ If you say Y here, all attempts to overstep resource limits will -+ be logged with the resource name, the requested size, and the current -+ limit. It is highly recommended that you say Y here. If the sysctl -+ option is enabled, a sysctl option with name "resource_logging" is -+ created. If the RBAC system is enabled, the sysctl value is ignored. -+ -+config GRKERNSEC_CHROOT_EXECLOG -+ bool "Log execs within chroot" -+ help -+ If you say Y here, all executions inside a chroot jail will be logged -+ to syslog. This can cause a large amount of logs if certain -+ applications (eg. djb's daemontools) are installed on the system, and -+ is therefore left as an option. If the sysctl option is enabled, a -+ sysctl option with name "chroot_execlog" is created. -+ -+config GRKERNSEC_AUDIT_PTRACE -+ bool "Ptrace logging" -+ help -+ If you say Y here, all attempts to attach to a process via ptrace -+ will be logged. If the sysctl option is enabled, a sysctl option -+ with name "audit_ptrace" is created. -+ -+config GRKERNSEC_AUDIT_CHDIR -+ bool "Chdir logging" -+ help -+ If you say Y here, all chdir() calls will be logged. If the sysctl -+ option is enabled, a sysctl option with name "audit_chdir" is created. -+ -+config GRKERNSEC_AUDIT_MOUNT -+ bool "(Un)Mount logging" -+ help -+ If you say Y here, all mounts and unmounts will be logged. If the -+ sysctl option is enabled, a sysctl option with name "audit_mount" is -+ created. -+ -+config GRKERNSEC_SIGNAL -+ bool "Signal logging" -+ help -+ If you say Y here, certain important signals will be logged, such as -+ SIGSEGV, which will as a result inform you of when a error in a program -+ occurred, which in some cases could mean a possible exploit attempt. -+ If the sysctl option is enabled, a sysctl option with name -+ "signal_logging" is created. -+ -+config GRKERNSEC_FORKFAIL -+ bool "Fork failure logging" -+ help -+ If you say Y here, all failed fork() attempts will be logged. -+ This could suggest a fork bomb, or someone attempting to overstep -+ their process limit. If the sysctl option is enabled, a sysctl option -+ with name "forkfail_logging" is created. -+ -+config GRKERNSEC_TIME -+ bool "Time change logging" -+ help -+ If you say Y here, any changes of the system clock will be logged. -+ If the sysctl option is enabled, a sysctl option with name -+ "timechange_logging" is created. -+ -+config GRKERNSEC_PROC_IPADDR -+ bool "/proc/<pid>/ipaddr support" -+ help -+ If you say Y here, a new entry will be added to each /proc/<pid> -+ directory that contains the IP address of the person using the task. -+ The IP is carried across local TCP and AF_UNIX stream sockets. -+ This information can be useful for IDS/IPSes to perform remote response -+ to a local attack. The entry is readable by only the owner of the -+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via -+ the RBAC system), and thus does not create privacy concerns. -+ -+config GRKERNSEC_RWXMAP_LOG -+ bool 'Denied RWX mmap/mprotect logging' -+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT -+ help -+ If you say Y here, calls to mmap() and mprotect() with explicit -+ usage of PROT_WRITE and PROT_EXEC together will be logged when -+ denied by the PAX_MPROTECT feature. If the sysctl option is -+ enabled, a sysctl option with name "rwxmap_logging" is created. -+ -+config GRKERNSEC_AUDIT_TEXTREL -+ bool 'ELF text relocations logging (READ HELP)' -+ depends on PAX_MPROTECT -+ help -+ If you say Y here, text relocations will be logged with the filename -+ of the offending library or binary. The purpose of the feature is -+ to help Linux distribution developers get rid of libraries and -+ binaries that need text relocations which hinder the future progress -+ of PaX. Only Linux distribution developers should say Y here, and -+ never on a production machine, as this option creates an information -+ leak that could aid an attacker in defeating the randomization of -+ a single memory region. If the sysctl option is enabled, a sysctl -+ option with name "audit_textrel" is created. -+ -+endmenu -+ -+menu "Executable Protections" -+depends on GRKERNSEC -+ -+config GRKERNSEC_DMESG -+ bool "Dmesg(8) restriction" -+ help -+ If you say Y here, non-root users will not be able to use dmesg(8) -+ to view up to the last 4kb of messages in the kernel's log buffer. -+ The kernel's log buffer often contains kernel addresses and other -+ identifying information useful to an attacker in fingerprinting a -+ system for a targeted exploit. -+ If the sysctl option is enabled, a sysctl option with name "dmesg" is -+ created. -+ -+config GRKERNSEC_HARDEN_PTRACE -+ bool "Deter ptrace-based process snooping" -+ help -+ If you say Y here, TTY sniffers and other malicious monitoring -+ programs implemented through ptrace will be defeated. If you -+ have been using the RBAC system, this option has already been -+ enabled for several years for all users, with the ability to make -+ fine-grained exceptions. -+ -+ This option only affects the ability of non-root users to ptrace -+ processes that are not a descendent of the ptracing process. -+ This means that strace ./binary and gdb ./binary will still work, -+ but attaching to arbitrary processes will not. If the sysctl -+ option is enabled, a sysctl option with name "harden_ptrace" is -+ created. -+ -+config GRKERNSEC_TPE -+ bool "Trusted Path Execution (TPE)" -+ help -+ If you say Y here, you will be able to choose a gid to add to the -+ supplementary groups of users you want to mark as "untrusted." -+ These users will not be able to execute any files that are not in -+ root-owned directories writable only by root. If the sysctl option -+ is enabled, a sysctl option with name "tpe" is created. -+ -+config GRKERNSEC_TPE_ALL -+ bool "Partially restrict all non-root users" -+ depends on GRKERNSEC_TPE -+ help -+ If you say Y here, all non-root users will be covered under -+ a weaker TPE restriction. This is separate from, and in addition to, -+ the main TPE options that you have selected elsewhere. Thus, if a -+ "trusted" GID is chosen, this restriction applies to even that GID. -+ Under this restriction, all non-root users will only be allowed to -+ execute files in directories they own that are not group or -+ world-writable, or in directories owned by root and writable only by -+ root. If the sysctl option is enabled, a sysctl option with name -+ "tpe_restrict_all" is created. -+ -+config GRKERNSEC_TPE_INVERT -+ bool "Invert GID option" -+ depends on GRKERNSEC_TPE -+ help -+ If you say Y here, the group you specify in the TPE configuration will -+ decide what group TPE restrictions will be *disabled* for. This -+ option is useful if you want TPE restrictions to be applied to most -+ users on the system. If the sysctl option is enabled, a sysctl option -+ with name "tpe_invert" is created. Unlike other sysctl options, this -+ entry will default to on for backward-compatibility. -+ -+config GRKERNSEC_TPE_GID -+ int "GID for untrusted users" -+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT -+ default 1005 -+ help -+ Setting this GID determines what group TPE restrictions will be -+ *enabled* for. If the sysctl option is enabled, a sysctl option -+ with name "tpe_gid" is created. -+ -+config GRKERNSEC_TPE_GID -+ int "GID for trusted users" -+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT -+ default 1005 -+ help -+ Setting this GID determines what group TPE restrictions will be -+ *disabled* for. If the sysctl option is enabled, a sysctl option -+ with name "tpe_gid" is created. -+ -+endmenu -+menu "Network Protections" -+depends on GRKERNSEC -+ -+config GRKERNSEC_RANDNET -+ bool "Larger entropy pools" -+ help -+ If you say Y here, the entropy pools used for many features of Linux -+ and grsecurity will be doubled in size. Since several grsecurity -+ features use additional randomness, it is recommended that you say Y -+ here. Saying Y here has a similar effect as modifying -+ /proc/sys/kernel/random/poolsize. -+ -+config GRKERNSEC_BLACKHOLE -+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention" -+ depends on NET -+ help -+ If you say Y here, neither TCP resets nor ICMP -+ destination-unreachable packets will be sent in response to packets -+ sent to ports for which no associated listening process exists. -+ This feature supports both IPV4 and IPV6 and exempts the -+ loopback interface from blackholing. Enabling this feature -+ makes a host more resilient to DoS attacks and reduces network -+ visibility against scanners. -+ -+ The blackhole feature as-implemented is equivalent to the FreeBSD -+ blackhole feature, as it prevents RST responses to all packets, not -+ just SYNs. Under most application behavior this causes no -+ problems, but applications (like haproxy) may not close certain -+ connections in a way that cleanly terminates them on the remote -+ end, leaving the remote host in LAST_ACK state. Because of this -+ side-effect and to prevent intentional LAST_ACK DoSes, this -+ feature also adds automatic mitigation against such attacks. -+ The mitigation drastically reduces the amount of time a socket -+ can spend in LAST_ACK state. If you're using haproxy and not -+ all servers it connects to have this option enabled, consider -+ disabling this feature on the haproxy host. -+ -+ If the sysctl option is enabled, two sysctl options with names -+ "ip_blackhole" and "lastack_retries" will be created. -+ While "ip_blackhole" takes the standard zero/non-zero on/off -+ toggle, "lastack_retries" uses the same kinds of values as -+ "tcp_retries1" and "tcp_retries2". The default value of 4 -+ prevents a socket from lasting more than 45 seconds in LAST_ACK -+ state. -+ -+config GRKERNSEC_SOCKET -+ bool "Socket restrictions" -+ depends on NET -+ help -+ If you say Y here, you will be able to choose from several options. -+ If you assign a GID on your system and add it to the supplementary -+ groups of users you want to restrict socket access to, this patch -+ will perform up to three things, based on the option(s) you choose. -+ -+config GRKERNSEC_SOCKET_ALL -+ bool "Deny any sockets to group" -+ depends on GRKERNSEC_SOCKET -+ help -+ If you say Y here, you will be able to choose a GID of whose users will -+ be unable to connect to other hosts from your machine or run server -+ applications from your machine. If the sysctl option is enabled, a -+ sysctl option with name "socket_all" is created. -+ -+config GRKERNSEC_SOCKET_ALL_GID -+ int "GID to deny all sockets for" -+ depends on GRKERNSEC_SOCKET_ALL -+ default 1004 -+ help -+ Here you can choose the GID to disable socket access for. Remember to -+ add the users you want socket access disabled for to the GID -+ specified here. If the sysctl option is enabled, a sysctl option -+ with name "socket_all_gid" is created. -+ -+config GRKERNSEC_SOCKET_CLIENT -+ bool "Deny client sockets to group" -+ depends on GRKERNSEC_SOCKET -+ help -+ If you say Y here, you will be able to choose a GID of whose users will -+ be unable to connect to other hosts from your machine, but will be -+ able to run servers. If this option is enabled, all users in the group -+ you specify will have to use passive mode when initiating ftp transfers -+ from the shell on your machine. If the sysctl option is enabled, a -+ sysctl option with name "socket_client" is created. -+ -+config GRKERNSEC_SOCKET_CLIENT_GID -+ int "GID to deny client sockets for" -+ depends on GRKERNSEC_SOCKET_CLIENT -+ default 1003 -+ help -+ Here you can choose the GID to disable client socket access for. -+ Remember to add the users you want client socket access disabled for to -+ the GID specified here. If the sysctl option is enabled, a sysctl -+ option with name "socket_client_gid" is created. -+ -+config GRKERNSEC_SOCKET_SERVER -+ bool "Deny server sockets to group" -+ depends on GRKERNSEC_SOCKET -+ help -+ If you say Y here, you will be able to choose a GID of whose users will -+ be unable to run server applications from your machine. If the sysctl -+ option is enabled, a sysctl option with name "socket_server" is created. -+ -+config GRKERNSEC_SOCKET_SERVER_GID -+ int "GID to deny server sockets for" -+ depends on GRKERNSEC_SOCKET_SERVER -+ default 1002 -+ help -+ Here you can choose the GID to disable server socket access for. -+ Remember to add the users you want server socket access disabled for to -+ the GID specified here. If the sysctl option is enabled, a sysctl -+ option with name "socket_server_gid" is created. -+ -+endmenu -+menu "Sysctl support" -+depends on GRKERNSEC && SYSCTL -+ -+config GRKERNSEC_SYSCTL -+ bool "Sysctl support" -+ help -+ If you say Y here, you will be able to change the options that -+ grsecurity runs with at bootup, without having to recompile your -+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity -+ to enable (1) or disable (0) various features. All the sysctl entries -+ are mutable until the "grsec_lock" entry is set to a non-zero value. -+ All features enabled in the kernel configuration are disabled at boot -+ if you do not say Y to the "Turn on features by default" option. -+ All options should be set at startup, and the grsec_lock entry should -+ be set to a non-zero value after all the options are set. -+ *THIS IS EXTREMELY IMPORTANT* -+ -+config GRKERNSEC_SYSCTL_DISTRO -+ bool "Extra sysctl support for distro makers (READ HELP)" -+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO -+ help -+ If you say Y here, additional sysctl options will be created -+ for features that affect processes running as root. Therefore, -+ it is critical when using this option that the grsec_lock entry be -+ enabled after boot. Only distros with prebuilt kernel packages -+ with this option enabled that can ensure grsec_lock is enabled -+ after boot should use this option. -+ *Failure to set grsec_lock after boot makes all grsec features -+ this option covers useless* -+ -+ Currently this option creates the following sysctl entries: -+ "Disable Privileged I/O": "disable_priv_io" -+ -+config GRKERNSEC_SYSCTL_ON -+ bool "Turn on features by default" -+ depends on GRKERNSEC_SYSCTL -+ help -+ If you say Y here, instead of having all features enabled in the -+ kernel configuration disabled at boot time, the features will be -+ enabled at boot time. It is recommended you say Y here unless -+ there is some reason you would want all sysctl-tunable features to -+ be disabled by default. As mentioned elsewhere, it is important -+ to enable the grsec_lock entry once you have finished modifying -+ the sysctl entries. -+ -+endmenu -+menu "Logging Options" -+depends on GRKERNSEC -+ -+config GRKERNSEC_FLOODTIME -+ int "Seconds in between log messages (minimum)" -+ default 10 -+ help -+ This option allows you to enforce the number of seconds between -+ grsecurity log messages. The default should be suitable for most -+ people, however, if you choose to change it, choose a value small enough -+ to allow informative logs to be produced, but large enough to -+ prevent flooding. -+ -+config GRKERNSEC_FLOODBURST -+ int "Number of messages in a burst (maximum)" -+ default 6 -+ help -+ This option allows you to choose the maximum number of messages allowed -+ within the flood time interval you chose in a separate option. The -+ default should be suitable for most people, however if you find that -+ many of your logs are being interpreted as flooding, you may want to -+ raise this value. -+ -+endmenu -+ -+endmenu -diff --git a/grsecurity/Makefile b/grsecurity/Makefile -new file mode 100644 -index 0000000..be9ae3a ---- /dev/null -+++ b/grsecurity/Makefile -@@ -0,0 +1,36 @@ -+# grsecurity's ACL system was originally written in 2001 by Michael Dalton -+# during 2001-2009 it has been completely redesigned by Brad Spengler -+# into an RBAC system -+# -+# All code in this directory and various hooks inserted throughout the kernel -+# are copyright Brad Spengler - Open Source Security, Inc., and released -+# under the GPL v2 or higher -+ -+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \ -+ grsec_mount.o grsec_sig.o grsec_sysctl.o \ -+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o -+ -+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \ -+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \ -+ gracl_learn.o grsec_log.o -+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o -+ -+ifdef CONFIG_NET -+obj-y += grsec_sock.o -+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o -+endif -+ -+ifndef CONFIG_GRKERNSEC -+obj-y += grsec_disabled.o -+endif -+ -+ifdef CONFIG_GRKERNSEC_HIDESYM -+extra-y := grsec_hidesym.o -+$(obj)/grsec_hidesym.o: -+ @-chmod -f 500 /boot -+ @-chmod -f 500 /lib/modules -+ @-chmod -f 500 /lib64/modules -+ @-chmod -f 500 /lib32/modules -+ @-chmod -f 700 . -+ @echo ' grsec: protected kernel image paths' -+endif -diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c -new file mode 100644 -index 0000000..09258e0 ---- /dev/null -+++ b/grsecurity/gracl.c -@@ -0,0 +1,4156 @@ -+#include <linux/kernel.h> -+#include <linux/module.h> -+#include <linux/sched.h> -+#include <linux/mm.h> -+#include <linux/file.h> -+#include <linux/fs.h> -+#include <linux/namei.h> -+#include <linux/mount.h> -+#include <linux/tty.h> -+#include <linux/proc_fs.h> -+#include <linux/lglock.h> -+#include <linux/slab.h> -+#include <linux/vmalloc.h> -+#include <linux/types.h> -+#include <linux/sysctl.h> -+#include <linux/netdevice.h> -+#include <linux/ptrace.h> -+#include <linux/gracl.h> -+#include <linux/gralloc.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+#include <linux/pid_namespace.h> -+#include <linux/fdtable.h> -+#include <linux/percpu.h> -+ -+#include <asm/uaccess.h> -+#include <asm/errno.h> -+#include <asm/mman.h> -+ -+static struct acl_role_db acl_role_set; -+static struct name_db name_set; -+static struct inodev_db inodev_set; -+ -+/* for keeping track of userspace pointers used for subjects, so we -+ can share references in the kernel as well -+*/ -+ -+static struct path real_root; -+ -+static struct acl_subj_map_db subj_map_set; -+ -+static struct acl_role_label *default_role; -+ -+static struct acl_role_label *role_list; -+ -+static u16 acl_sp_role_value; -+ -+extern char *gr_shared_page[4]; -+static DEFINE_MUTEX(gr_dev_mutex); -+DEFINE_RWLOCK(gr_inode_lock); -+ -+struct gr_arg *gr_usermode; -+ -+static unsigned int gr_status __read_only = GR_STATUS_INIT; -+ -+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum); -+extern void gr_clear_learn_entries(void); -+ -+#ifdef CONFIG_GRKERNSEC_RESLOG -+extern void gr_log_resource(const struct task_struct *task, -+ const int res, const unsigned long wanted, const int gt); -+#endif -+ -+unsigned char *gr_system_salt; -+unsigned char *gr_system_sum; -+ -+static struct sprole_pw **acl_special_roles = NULL; -+static __u16 num_sprole_pws = 0; -+ -+static struct acl_role_label *kernel_role = NULL; -+ -+static unsigned int gr_auth_attempts = 0; -+static unsigned long gr_auth_expires = 0UL; -+ -+#ifdef CONFIG_NET -+extern struct vfsmount *sock_mnt; -+#endif -+ -+extern struct vfsmount *pipe_mnt; -+extern struct vfsmount *shm_mnt; -+#ifdef CONFIG_HUGETLBFS -+extern struct vfsmount *hugetlbfs_vfsmount; -+#endif -+ -+static struct acl_object_label *fakefs_obj_rw; -+static struct acl_object_label *fakefs_obj_rwx; -+ -+extern int gr_init_uidset(void); -+extern void gr_free_uidset(void); -+extern void gr_remove_uid(uid_t uid); -+extern int gr_find_uid(uid_t uid); -+ -+DECLARE_BRLOCK(vfsmount_lock); -+ -+__inline__ int -+gr_acl_is_enabled(void) -+{ -+ return (gr_status & GR_READY); -+} -+ -+#ifdef CONFIG_BTRFS_FS -+extern dev_t get_btrfs_dev_from_inode(struct inode *inode); -+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); -+#endif -+ -+static inline dev_t __get_dev(const struct dentry *dentry) -+{ -+#ifdef CONFIG_BTRFS_FS -+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr) -+ return get_btrfs_dev_from_inode(dentry->d_inode); -+ else -+#endif -+ return dentry->d_inode->i_sb->s_dev; -+} -+ -+dev_t gr_get_dev_from_dentry(struct dentry *dentry) -+{ -+ return __get_dev(dentry); -+} -+ -+static char gr_task_roletype_to_char(struct task_struct *task) -+{ -+ switch (task->role->roletype & -+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP | -+ GR_ROLE_SPECIAL)) { -+ case GR_ROLE_DEFAULT: -+ return 'D'; -+ case GR_ROLE_USER: -+ return 'U'; -+ case GR_ROLE_GROUP: -+ return 'G'; -+ case GR_ROLE_SPECIAL: -+ return 'S'; -+ } -+ -+ return 'X'; -+} -+ -+char gr_roletype_to_char(void) -+{ -+ return gr_task_roletype_to_char(current); -+} -+ -+__inline__ int -+gr_acl_tpe_check(void) -+{ -+ if (unlikely(!(gr_status & GR_READY))) -+ return 0; -+ if (current->role->roletype & GR_ROLE_TPE) -+ return 1; -+ else -+ return 0; -+} -+ -+int -+gr_handle_rawio(const struct inode *inode) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS -+ if (inode && S_ISBLK(inode->i_mode) && -+ grsec_enable_chroot_caps && proc_is_chrooted(current) && -+ !capable(CAP_SYS_RAWIO)) -+ return 1; -+#endif -+ return 0; -+} -+ -+static int -+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb) -+{ -+ if (likely(lena != lenb)) -+ return 0; -+ -+ return !memcmp(a, b, lena); -+} -+ -+static int prepend(char **buffer, int *buflen, const char *str, int namelen) -+{ -+ *buflen -= namelen; -+ if (*buflen < 0) -+ return -ENAMETOOLONG; -+ *buffer -= namelen; -+ memcpy(*buffer, str, namelen); -+ return 0; -+} -+ -+static int prepend_name(char **buffer, int *buflen, struct qstr *name) -+{ -+ return prepend(buffer, buflen, name->name, name->len); -+} -+ -+static int prepend_path(const struct path *path, struct path *root, -+ char **buffer, int *buflen) -+{ -+ struct dentry *dentry = path->dentry; -+ struct vfsmount *vfsmnt = path->mnt; -+ bool slash = false; -+ int error = 0; -+ -+ while (dentry != root->dentry || vfsmnt != root->mnt) { -+ struct dentry * parent; -+ -+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { -+ /* Global root? */ -+ if (vfsmnt->mnt_parent == vfsmnt) { -+ goto out; -+ } -+ dentry = vfsmnt->mnt_mountpoint; -+ vfsmnt = vfsmnt->mnt_parent; -+ continue; -+ } -+ parent = dentry->d_parent; -+ prefetch(parent); -+ spin_lock(&dentry->d_lock); -+ error = prepend_name(buffer, buflen, &dentry->d_name); -+ spin_unlock(&dentry->d_lock); -+ if (!error) -+ error = prepend(buffer, buflen, "/", 1); -+ if (error) -+ break; -+ -+ slash = true; -+ dentry = parent; -+ } -+ -+out: -+ if (!error && !slash) -+ error = prepend(buffer, buflen, "/", 1); -+ -+ return error; -+} -+ -+/* this must be called with vfsmount_lock and rename_lock held */ -+ -+static char *__our_d_path(const struct path *path, struct path *root, -+ char *buf, int buflen) -+{ -+ char *res = buf + buflen; -+ int error; -+ -+ prepend(&res, &buflen, "\0", 1); -+ error = prepend_path(path, root, &res, &buflen); -+ if (error) -+ return ERR_PTR(error); -+ -+ return res; -+} -+ -+static char * -+gen_full_path(struct path *path, struct path *root, char *buf, int buflen) -+{ -+ char *retval; -+ -+ retval = __our_d_path(path, root, buf, buflen); -+ if (unlikely(IS_ERR(retval))) -+ retval = strcpy(buf, "<path too long>"); -+ else if (unlikely(retval[1] == '/' && retval[2] == '\0')) -+ retval[1] = '\0'; -+ -+ return retval; -+} -+ -+static char * -+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, -+ char *buf, int buflen) -+{ -+ struct path path; -+ char *res; -+ -+ path.dentry = (struct dentry *)dentry; -+ path.mnt = (struct vfsmount *)vfsmnt; -+ -+ /* we can use real_root.dentry, real_root.mnt, because this is only called -+ by the RBAC system */ -+ res = gen_full_path(&path, &real_root, buf, buflen); -+ -+ return res; -+} -+ -+static char * -+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, -+ char *buf, int buflen) -+{ -+ char *res; -+ struct path path; -+ struct path root; -+ struct task_struct *reaper = &init_task; -+ -+ path.dentry = (struct dentry *)dentry; -+ path.mnt = (struct vfsmount *)vfsmnt; -+ -+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */ -+ get_fs_root(reaper->fs, &root); -+ -+ write_seqlock(&rename_lock); -+ br_read_lock(vfsmount_lock); -+ res = gen_full_path(&path, &root, buf, buflen); -+ br_read_unlock(vfsmount_lock); -+ write_sequnlock(&rename_lock); -+ -+ path_put(&root); -+ return res; -+} -+ -+static char * -+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ char *ret; -+ write_seqlock(&rename_lock); -+ br_read_lock(vfsmount_lock); -+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), -+ PAGE_SIZE); -+ br_read_unlock(vfsmount_lock); -+ write_sequnlock(&rename_lock); -+ return ret; -+} -+ -+static char * -+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ char *ret; -+ char *buf; -+ int buflen; -+ -+ write_seqlock(&rename_lock); -+ br_read_lock(vfsmount_lock); -+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id()); -+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6); -+ buflen = (int)(ret - buf); -+ if (buflen >= 5) -+ prepend(&ret, &buflen, "/proc", 5); -+ else -+ ret = strcpy(buf, "<path too long>"); -+ br_read_unlock(vfsmount_lock); -+ write_sequnlock(&rename_lock); -+ return ret; -+} -+ -+char * -+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), -+ PAGE_SIZE); -+} -+ -+char * -+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), -+ PAGE_SIZE); -+} -+ -+char * -+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()), -+ PAGE_SIZE); -+} -+ -+char * -+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()), -+ PAGE_SIZE); -+} -+ -+char * -+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()), -+ PAGE_SIZE); -+} -+ -+__inline__ __u32 -+to_gr_audit(const __u32 reqmode) -+{ -+ /* masks off auditable permission flags, then shifts them to create -+ auditing flags, and adds the special case of append auditing if -+ we're requesting write */ -+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0)); -+} -+ -+struct acl_subject_label * -+lookup_subject_map(const struct acl_subject_label *userp) -+{ -+ unsigned int index = shash(userp, subj_map_set.s_size); -+ struct subject_map *match; -+ -+ match = subj_map_set.s_hash[index]; -+ -+ while (match && match->user != userp) -+ match = match->next; -+ -+ if (match != NULL) -+ return match->kernel; -+ else -+ return NULL; -+} -+ -+static void -+insert_subj_map_entry(struct subject_map *subjmap) -+{ -+ unsigned int index = shash(subjmap->user, subj_map_set.s_size); -+ struct subject_map **curr; -+ -+ subjmap->prev = NULL; -+ -+ curr = &subj_map_set.s_hash[index]; -+ if (*curr != NULL) -+ (*curr)->prev = subjmap; -+ -+ subjmap->next = *curr; -+ *curr = subjmap; -+ -+ return; -+} -+ -+static struct acl_role_label * -+lookup_acl_role_label(const struct task_struct *task, const uid_t uid, -+ const gid_t gid) -+{ -+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size); -+ struct acl_role_label *match; -+ struct role_allowed_ip *ipp; -+ unsigned int x; -+ u32 curr_ip = task->signal->curr_ip; -+ -+ task->signal->saved_ip = curr_ip; -+ -+ match = acl_role_set.r_hash[index]; -+ -+ while (match) { -+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) { -+ for (x = 0; x < match->domain_child_num; x++) { -+ if (match->domain_children[x] == uid) -+ goto found; -+ } -+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER) -+ break; -+ match = match->next; -+ } -+found: -+ if (match == NULL) { -+ try_group: -+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size); -+ match = acl_role_set.r_hash[index]; -+ -+ while (match) { -+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) { -+ for (x = 0; x < match->domain_child_num; x++) { -+ if (match->domain_children[x] == gid) -+ goto found2; -+ } -+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP) -+ break; -+ match = match->next; -+ } -+found2: -+ if (match == NULL) -+ match = default_role; -+ if (match->allowed_ips == NULL) -+ return match; -+ else { -+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { -+ if (likely -+ ((ntohl(curr_ip) & ipp->netmask) == -+ (ntohl(ipp->addr) & ipp->netmask))) -+ return match; -+ } -+ match = default_role; -+ } -+ } else if (match->allowed_ips == NULL) { -+ return match; -+ } else { -+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { -+ if (likely -+ ((ntohl(curr_ip) & ipp->netmask) == -+ (ntohl(ipp->addr) & ipp->netmask))) -+ return match; -+ } -+ goto try_group; -+ } -+ -+ return match; -+} -+ -+struct acl_subject_label * -+lookup_acl_subj_label(const ino_t ino, const dev_t dev, -+ const struct acl_role_label *role) -+{ -+ unsigned int index = fhash(ino, dev, role->subj_hash_size); -+ struct acl_subject_label *match; -+ -+ match = role->subj_hash[index]; -+ -+ while (match && (match->inode != ino || match->device != dev || -+ (match->mode & GR_DELETED))) { -+ match = match->next; -+ } -+ -+ if (match && !(match->mode & GR_DELETED)) -+ return match; -+ else -+ return NULL; -+} -+ -+struct acl_subject_label * -+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, -+ const struct acl_role_label *role) -+{ -+ unsigned int index = fhash(ino, dev, role->subj_hash_size); -+ struct acl_subject_label *match; -+ -+ match = role->subj_hash[index]; -+ -+ while (match && (match->inode != ino || match->device != dev || -+ !(match->mode & GR_DELETED))) { -+ match = match->next; -+ } -+ -+ if (match && (match->mode & GR_DELETED)) -+ return match; -+ else -+ return NULL; -+} -+ -+static struct acl_object_label * -+lookup_acl_obj_label(const ino_t ino, const dev_t dev, -+ const struct acl_subject_label *subj) -+{ -+ unsigned int index = fhash(ino, dev, subj->obj_hash_size); -+ struct acl_object_label *match; -+ -+ match = subj->obj_hash[index]; -+ -+ while (match && (match->inode != ino || match->device != dev || -+ (match->mode & GR_DELETED))) { -+ match = match->next; -+ } -+ -+ if (match && !(match->mode & GR_DELETED)) -+ return match; -+ else -+ return NULL; -+} -+ -+static struct acl_object_label * -+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev, -+ const struct acl_subject_label *subj) -+{ -+ unsigned int index = fhash(ino, dev, subj->obj_hash_size); -+ struct acl_object_label *match; -+ -+ match = subj->obj_hash[index]; -+ -+ while (match && (match->inode != ino || match->device != dev || -+ !(match->mode & GR_DELETED))) { -+ match = match->next; -+ } -+ -+ if (match && (match->mode & GR_DELETED)) -+ return match; -+ -+ match = subj->obj_hash[index]; -+ -+ while (match && (match->inode != ino || match->device != dev || -+ (match->mode & GR_DELETED))) { -+ match = match->next; -+ } -+ -+ if (match && !(match->mode & GR_DELETED)) -+ return match; -+ else -+ return NULL; -+} -+ -+static struct name_entry * -+lookup_name_entry(const char *name) -+{ -+ unsigned int len = strlen(name); -+ unsigned int key = full_name_hash(name, len); -+ unsigned int index = key % name_set.n_size; -+ struct name_entry *match; -+ -+ match = name_set.n_hash[index]; -+ -+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len))) -+ match = match->next; -+ -+ return match; -+} -+ -+static struct name_entry * -+lookup_name_entry_create(const char *name) -+{ -+ unsigned int len = strlen(name); -+ unsigned int key = full_name_hash(name, len); -+ unsigned int index = key % name_set.n_size; -+ struct name_entry *match; -+ -+ match = name_set.n_hash[index]; -+ -+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || -+ !match->deleted)) -+ match = match->next; -+ -+ if (match && match->deleted) -+ return match; -+ -+ match = name_set.n_hash[index]; -+ -+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || -+ match->deleted)) -+ match = match->next; -+ -+ if (match && !match->deleted) -+ return match; -+ else -+ return NULL; -+} -+ -+static struct inodev_entry * -+lookup_inodev_entry(const ino_t ino, const dev_t dev) -+{ -+ unsigned int index = fhash(ino, dev, inodev_set.i_size); -+ struct inodev_entry *match; -+ -+ match = inodev_set.i_hash[index]; -+ -+ while (match && (match->nentry->inode != ino || match->nentry->device != dev)) -+ match = match->next; -+ -+ return match; -+} -+ -+static void -+insert_inodev_entry(struct inodev_entry *entry) -+{ -+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device, -+ inodev_set.i_size); -+ struct inodev_entry **curr; -+ -+ entry->prev = NULL; -+ -+ curr = &inodev_set.i_hash[index]; -+ if (*curr != NULL) -+ (*curr)->prev = entry; -+ -+ entry->next = *curr; -+ *curr = entry; -+ -+ return; -+} -+ -+static void -+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid) -+{ -+ unsigned int index = -+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size); -+ struct acl_role_label **curr; -+ struct acl_role_label *tmp; -+ -+ curr = &acl_role_set.r_hash[index]; -+ -+ /* if role was already inserted due to domains and already has -+ a role in the same bucket as it attached, then we need to -+ combine these two buckets -+ */ -+ if (role->next) { -+ tmp = role->next; -+ while (tmp->next) -+ tmp = tmp->next; -+ tmp->next = *curr; -+ } else -+ role->next = *curr; -+ *curr = role; -+ -+ return; -+} -+ -+static void -+insert_acl_role_label(struct acl_role_label *role) -+{ -+ int i; -+ -+ if (role_list == NULL) { -+ role_list = role; -+ role->prev = NULL; -+ } else { -+ role->prev = role_list; -+ role_list = role; -+ } -+ -+ /* used for hash chains */ -+ role->next = NULL; -+ -+ if (role->roletype & GR_ROLE_DOMAIN) { -+ for (i = 0; i < role->domain_child_num; i++) -+ __insert_acl_role_label(role, role->domain_children[i]); -+ } else -+ __insert_acl_role_label(role, role->uidgid); -+} -+ -+static int -+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted) -+{ -+ struct name_entry **curr, *nentry; -+ struct inodev_entry *ientry; -+ unsigned int len = strlen(name); -+ unsigned int key = full_name_hash(name, len); -+ unsigned int index = key % name_set.n_size; -+ -+ curr = &name_set.n_hash[index]; -+ -+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len))) -+ curr = &((*curr)->next); -+ -+ if (*curr != NULL) -+ return 1; -+ -+ nentry = acl_alloc(sizeof (struct name_entry)); -+ if (nentry == NULL) -+ return 0; -+ ientry = acl_alloc(sizeof (struct inodev_entry)); -+ if (ientry == NULL) -+ return 0; -+ ientry->nentry = nentry; -+ -+ nentry->key = key; -+ nentry->name = name; -+ nentry->inode = inode; -+ nentry->device = device; -+ nentry->len = len; -+ nentry->deleted = deleted; -+ -+ nentry->prev = NULL; -+ curr = &name_set.n_hash[index]; -+ if (*curr != NULL) -+ (*curr)->prev = nentry; -+ nentry->next = *curr; -+ *curr = nentry; -+ -+ /* insert us into the table searchable by inode/dev */ -+ insert_inodev_entry(ientry); -+ -+ return 1; -+} -+ -+static void -+insert_acl_obj_label(struct acl_object_label *obj, -+ struct acl_subject_label *subj) -+{ -+ unsigned int index = -+ fhash(obj->inode, obj->device, subj->obj_hash_size); -+ struct acl_object_label **curr; -+ -+ -+ obj->prev = NULL; -+ -+ curr = &subj->obj_hash[index]; -+ if (*curr != NULL) -+ (*curr)->prev = obj; -+ -+ obj->next = *curr; -+ *curr = obj; -+ -+ return; -+} -+ -+static void -+insert_acl_subj_label(struct acl_subject_label *obj, -+ struct acl_role_label *role) -+{ -+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size); -+ struct acl_subject_label **curr; -+ -+ obj->prev = NULL; -+ -+ curr = &role->subj_hash[index]; -+ if (*curr != NULL) -+ (*curr)->prev = obj; -+ -+ obj->next = *curr; -+ *curr = obj; -+ -+ return; -+} -+ -+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */ -+ -+static void * -+create_table(__u32 * len, int elementsize) -+{ -+ unsigned int table_sizes[] = { -+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, -+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143, -+ 4194301, 8388593, 16777213, 33554393, 67108859 -+ }; -+ void *newtable = NULL; -+ unsigned int pwr = 0; -+ -+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) && -+ table_sizes[pwr] <= *len) -+ pwr++; -+ -+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize)) -+ return newtable; -+ -+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE) -+ newtable = -+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL); -+ else -+ newtable = vmalloc(table_sizes[pwr] * elementsize); -+ -+ *len = table_sizes[pwr]; -+ -+ return newtable; -+} -+ -+static int -+init_variables(const struct gr_arg *arg) -+{ -+ struct task_struct *reaper = &init_task; -+ unsigned int stacksize; -+ -+ subj_map_set.s_size = arg->role_db.num_subjects; -+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children; -+ name_set.n_size = arg->role_db.num_objects; -+ inodev_set.i_size = arg->role_db.num_objects; -+ -+ if (!subj_map_set.s_size || !acl_role_set.r_size || -+ !name_set.n_size || !inodev_set.i_size) -+ return 1; -+ -+ if (!gr_init_uidset()) -+ return 1; -+ -+ /* set up the stack that holds allocation info */ -+ -+ stacksize = arg->role_db.num_pointers + 5; -+ -+ if (!acl_alloc_stack_init(stacksize)) -+ return 1; -+ -+ /* grab reference for the real root dentry and vfsmount */ -+ get_fs_root(reaper->fs, &real_root); -+ -+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG -+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino); -+#endif -+ -+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label)); -+ if (fakefs_obj_rw == NULL) -+ return 1; -+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE; -+ -+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label)); -+ if (fakefs_obj_rwx == NULL) -+ return 1; -+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC; -+ -+ subj_map_set.s_hash = -+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *)); -+ acl_role_set.r_hash = -+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *)); -+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *)); -+ inodev_set.i_hash = -+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *)); -+ -+ if (!subj_map_set.s_hash || !acl_role_set.r_hash || -+ !name_set.n_hash || !inodev_set.i_hash) -+ return 1; -+ -+ memset(subj_map_set.s_hash, 0, -+ sizeof(struct subject_map *) * subj_map_set.s_size); -+ memset(acl_role_set.r_hash, 0, -+ sizeof (struct acl_role_label *) * acl_role_set.r_size); -+ memset(name_set.n_hash, 0, -+ sizeof (struct name_entry *) * name_set.n_size); -+ memset(inodev_set.i_hash, 0, -+ sizeof (struct inodev_entry *) * inodev_set.i_size); -+ -+ return 0; -+} -+ -+/* free information not needed after startup -+ currently contains user->kernel pointer mappings for subjects -+*/ -+ -+static void -+free_init_variables(void) -+{ -+ __u32 i; -+ -+ if (subj_map_set.s_hash) { -+ for (i = 0; i < subj_map_set.s_size; i++) { -+ if (subj_map_set.s_hash[i]) { -+ kfree(subj_map_set.s_hash[i]); -+ subj_map_set.s_hash[i] = NULL; -+ } -+ } -+ -+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <= -+ PAGE_SIZE) -+ kfree(subj_map_set.s_hash); -+ else -+ vfree(subj_map_set.s_hash); -+ } -+ -+ return; -+} -+ -+static void -+free_variables(void) -+{ -+ struct acl_subject_label *s; -+ struct acl_role_label *r; -+ struct task_struct *task, *task2; -+ unsigned int x; -+ -+ gr_clear_learn_entries(); -+ -+ read_lock(&tasklist_lock); -+ do_each_thread(task2, task) { -+ task->acl_sp_role = 0; -+ task->acl_role_id = 0; -+ task->acl = NULL; -+ task->role = NULL; -+ } while_each_thread(task2, task); -+ read_unlock(&tasklist_lock); -+ -+ /* release the reference to the real root dentry and vfsmount */ -+ path_put(&real_root); -+ -+ /* free all object hash tables */ -+ -+ FOR_EACH_ROLE_START(r) -+ if (r->subj_hash == NULL) -+ goto next_role; -+ FOR_EACH_SUBJECT_START(r, s, x) -+ if (s->obj_hash == NULL) -+ break; -+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) -+ kfree(s->obj_hash); -+ else -+ vfree(s->obj_hash); -+ FOR_EACH_SUBJECT_END(s, x) -+ FOR_EACH_NESTED_SUBJECT_START(r, s) -+ if (s->obj_hash == NULL) -+ break; -+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) -+ kfree(s->obj_hash); -+ else -+ vfree(s->obj_hash); -+ FOR_EACH_NESTED_SUBJECT_END(s) -+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE) -+ kfree(r->subj_hash); -+ else -+ vfree(r->subj_hash); -+ r->subj_hash = NULL; -+next_role: -+ FOR_EACH_ROLE_END(r) -+ -+ acl_free_all(); -+ -+ if (acl_role_set.r_hash) { -+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <= -+ PAGE_SIZE) -+ kfree(acl_role_set.r_hash); -+ else -+ vfree(acl_role_set.r_hash); -+ } -+ if (name_set.n_hash) { -+ if ((name_set.n_size * sizeof (struct name_entry *)) <= -+ PAGE_SIZE) -+ kfree(name_set.n_hash); -+ else -+ vfree(name_set.n_hash); -+ } -+ -+ if (inodev_set.i_hash) { -+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <= -+ PAGE_SIZE) -+ kfree(inodev_set.i_hash); -+ else -+ vfree(inodev_set.i_hash); -+ } -+ -+ gr_free_uidset(); -+ -+ memset(&name_set, 0, sizeof (struct name_db)); -+ memset(&inodev_set, 0, sizeof (struct inodev_db)); -+ memset(&acl_role_set, 0, sizeof (struct acl_role_db)); -+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db)); -+ -+ default_role = NULL; -+ role_list = NULL; -+ -+ return; -+} -+ -+static __u32 -+count_user_objs(struct acl_object_label *userp) -+{ -+ struct acl_object_label o_tmp; -+ __u32 num = 0; -+ -+ while (userp) { -+ if (copy_from_user(&o_tmp, userp, -+ sizeof (struct acl_object_label))) -+ break; -+ -+ userp = o_tmp.prev; -+ num++; -+ } -+ -+ return num; -+} -+ -+static struct acl_subject_label * -+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role); -+ -+static int -+copy_user_glob(struct acl_object_label *obj) -+{ -+ struct acl_object_label *g_tmp, **guser; -+ unsigned int len; -+ char *tmp; -+ -+ if (obj->globbed == NULL) -+ return 0; -+ -+ guser = &obj->globbed; -+ while (*guser) { -+ g_tmp = (struct acl_object_label *) -+ acl_alloc(sizeof (struct acl_object_label)); -+ if (g_tmp == NULL) -+ return -ENOMEM; -+ -+ if (copy_from_user(g_tmp, *guser, -+ sizeof (struct acl_object_label))) -+ return -EFAULT; -+ -+ len = strnlen_user(g_tmp->filename, PATH_MAX); -+ -+ if (!len || len >= PATH_MAX) -+ return -EINVAL; -+ -+ if ((tmp = (char *) acl_alloc(len)) == NULL) -+ return -ENOMEM; -+ -+ if (copy_from_user(tmp, g_tmp->filename, len)) -+ return -EFAULT; -+ tmp[len-1] = '\0'; -+ g_tmp->filename = tmp; -+ -+ *guser = g_tmp; -+ guser = &(g_tmp->next); -+ } -+ -+ return 0; -+} -+ -+static int -+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj, -+ struct acl_role_label *role) -+{ -+ struct acl_object_label *o_tmp; -+ unsigned int len; -+ int ret; -+ char *tmp; -+ -+ while (userp) { -+ if ((o_tmp = (struct acl_object_label *) -+ acl_alloc(sizeof (struct acl_object_label))) == NULL) -+ return -ENOMEM; -+ -+ if (copy_from_user(o_tmp, userp, -+ sizeof (struct acl_object_label))) -+ return -EFAULT; -+ -+ userp = o_tmp->prev; -+ -+ len = strnlen_user(o_tmp->filename, PATH_MAX); -+ -+ if (!len || len >= PATH_MAX) -+ return -EINVAL; -+ -+ if ((tmp = (char *) acl_alloc(len)) == NULL) -+ return -ENOMEM; -+ -+ if (copy_from_user(tmp, o_tmp->filename, len)) -+ return -EFAULT; -+ tmp[len-1] = '\0'; -+ o_tmp->filename = tmp; -+ -+ insert_acl_obj_label(o_tmp, subj); -+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode, -+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0)) -+ return -ENOMEM; -+ -+ ret = copy_user_glob(o_tmp); -+ if (ret) -+ return ret; -+ -+ if (o_tmp->nested) { -+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role); -+ if (IS_ERR(o_tmp->nested)) -+ return PTR_ERR(o_tmp->nested); -+ -+ /* insert into nested subject list */ -+ o_tmp->nested->next = role->hash->first; -+ role->hash->first = o_tmp->nested; -+ } -+ } -+ -+ return 0; -+} -+ -+static __u32 -+count_user_subjs(struct acl_subject_label *userp) -+{ -+ struct acl_subject_label s_tmp; -+ __u32 num = 0; -+ -+ while (userp) { -+ if (copy_from_user(&s_tmp, userp, -+ sizeof (struct acl_subject_label))) -+ break; -+ -+ userp = s_tmp.prev; -+ /* do not count nested subjects against this count, since -+ they are not included in the hash table, but are -+ attached to objects. We have already counted -+ the subjects in userspace for the allocation -+ stack -+ */ -+ if (!(s_tmp.mode & GR_NESTED)) -+ num++; -+ } -+ -+ return num; -+} -+ -+static int -+copy_user_allowedips(struct acl_role_label *rolep) -+{ -+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast; -+ -+ ruserip = rolep->allowed_ips; -+ -+ while (ruserip) { -+ rlast = rtmp; -+ -+ if ((rtmp = (struct role_allowed_ip *) -+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL) -+ return -ENOMEM; -+ -+ if (copy_from_user(rtmp, ruserip, -+ sizeof (struct role_allowed_ip))) -+ return -EFAULT; -+ -+ ruserip = rtmp->prev; -+ -+ if (!rlast) { -+ rtmp->prev = NULL; -+ rolep->allowed_ips = rtmp; -+ } else { -+ rlast->next = rtmp; -+ rtmp->prev = rlast; -+ } -+ -+ if (!ruserip) -+ rtmp->next = NULL; -+ } -+ -+ return 0; -+} -+ -+static int -+copy_user_transitions(struct acl_role_label *rolep) -+{ -+ struct role_transition *rusertp, *rtmp = NULL, *rlast; -+ -+ unsigned int len; -+ char *tmp; -+ -+ rusertp = rolep->transitions; -+ -+ while (rusertp) { -+ rlast = rtmp; -+ -+ if ((rtmp = (struct role_transition *) -+ acl_alloc(sizeof (struct role_transition))) == NULL) -+ return -ENOMEM; -+ -+ if (copy_from_user(rtmp, rusertp, -+ sizeof (struct role_transition))) -+ return -EFAULT; -+ -+ rusertp = rtmp->prev; -+ -+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN); -+ -+ if (!len || len >= GR_SPROLE_LEN) -+ return -EINVAL; -+ -+ if ((tmp = (char *) acl_alloc(len)) == NULL) -+ return -ENOMEM; -+ -+ if (copy_from_user(tmp, rtmp->rolename, len)) -+ return -EFAULT; -+ tmp[len-1] = '\0'; -+ rtmp->rolename = tmp; -+ -+ if (!rlast) { -+ rtmp->prev = NULL; -+ rolep->transitions = rtmp; -+ } else { -+ rlast->next = rtmp; -+ rtmp->prev = rlast; -+ } -+ -+ if (!rusertp) -+ rtmp->next = NULL; -+ } -+ -+ return 0; -+} -+ -+static struct acl_subject_label * -+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role) -+{ -+ struct acl_subject_label *s_tmp = NULL, *s_tmp2; -+ unsigned int len; -+ char *tmp; -+ __u32 num_objs; -+ struct acl_ip_label **i_tmp, *i_utmp2; -+ struct gr_hash_struct ghash; -+ struct subject_map *subjmap; -+ unsigned int i_num; -+ int err; -+ -+ s_tmp = lookup_subject_map(userp); -+ -+ /* we've already copied this subject into the kernel, just return -+ the reference to it, and don't copy it over again -+ */ -+ if (s_tmp) -+ return(s_tmp); -+ -+ if ((s_tmp = (struct acl_subject_label *) -+ acl_alloc(sizeof (struct acl_subject_label))) == NULL) -+ return ERR_PTR(-ENOMEM); -+ -+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL); -+ if (subjmap == NULL) -+ return ERR_PTR(-ENOMEM); -+ -+ subjmap->user = userp; -+ subjmap->kernel = s_tmp; -+ insert_subj_map_entry(subjmap); -+ -+ if (copy_from_user(s_tmp, userp, -+ sizeof (struct acl_subject_label))) -+ return ERR_PTR(-EFAULT); -+ -+ len = strnlen_user(s_tmp->filename, PATH_MAX); -+ -+ if (!len || len >= PATH_MAX) -+ return ERR_PTR(-EINVAL); -+ -+ if ((tmp = (char *) acl_alloc(len)) == NULL) -+ return ERR_PTR(-ENOMEM); -+ -+ if (copy_from_user(tmp, s_tmp->filename, len)) -+ return ERR_PTR(-EFAULT); -+ tmp[len-1] = '\0'; -+ s_tmp->filename = tmp; -+ -+ if (!strcmp(s_tmp->filename, "/")) -+ role->root_label = s_tmp; -+ -+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct))) -+ return ERR_PTR(-EFAULT); -+ -+ /* copy user and group transition tables */ -+ -+ if (s_tmp->user_trans_num) { -+ uid_t *uidlist; -+ -+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t)); -+ if (uidlist == NULL) -+ return ERR_PTR(-ENOMEM); -+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t))) -+ return ERR_PTR(-EFAULT); -+ -+ s_tmp->user_transitions = uidlist; -+ } -+ -+ if (s_tmp->group_trans_num) { -+ gid_t *gidlist; -+ -+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t)); -+ if (gidlist == NULL) -+ return ERR_PTR(-ENOMEM); -+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t))) -+ return ERR_PTR(-EFAULT); -+ -+ s_tmp->group_transitions = gidlist; -+ } -+ -+ /* set up object hash table */ -+ num_objs = count_user_objs(ghash.first); -+ -+ s_tmp->obj_hash_size = num_objs; -+ s_tmp->obj_hash = -+ (struct acl_object_label **) -+ create_table(&(s_tmp->obj_hash_size), sizeof(void *)); -+ -+ if (!s_tmp->obj_hash) -+ return ERR_PTR(-ENOMEM); -+ -+ memset(s_tmp->obj_hash, 0, -+ s_tmp->obj_hash_size * -+ sizeof (struct acl_object_label *)); -+ -+ /* add in objects */ -+ err = copy_user_objs(ghash.first, s_tmp, role); -+ -+ if (err) -+ return ERR_PTR(err); -+ -+ /* set pointer for parent subject */ -+ if (s_tmp->parent_subject) { -+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role); -+ -+ if (IS_ERR(s_tmp2)) -+ return s_tmp2; -+ -+ s_tmp->parent_subject = s_tmp2; -+ } -+ -+ /* add in ip acls */ -+ -+ if (!s_tmp->ip_num) { -+ s_tmp->ips = NULL; -+ goto insert; -+ } -+ -+ i_tmp = -+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num, -+ sizeof (struct acl_ip_label *)); -+ -+ if (!i_tmp) -+ return ERR_PTR(-ENOMEM); -+ -+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) { -+ *(i_tmp + i_num) = -+ (struct acl_ip_label *) -+ acl_alloc(sizeof (struct acl_ip_label)); -+ if (!*(i_tmp + i_num)) -+ return ERR_PTR(-ENOMEM); -+ -+ if (copy_from_user -+ (&i_utmp2, s_tmp->ips + i_num, -+ sizeof (struct acl_ip_label *))) -+ return ERR_PTR(-EFAULT); -+ -+ if (copy_from_user -+ (*(i_tmp + i_num), i_utmp2, -+ sizeof (struct acl_ip_label))) -+ return ERR_PTR(-EFAULT); -+ -+ if ((*(i_tmp + i_num))->iface == NULL) -+ continue; -+ -+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ); -+ if (!len || len >= IFNAMSIZ) -+ return ERR_PTR(-EINVAL); -+ tmp = acl_alloc(len); -+ if (tmp == NULL) -+ return ERR_PTR(-ENOMEM); -+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len)) -+ return ERR_PTR(-EFAULT); -+ (*(i_tmp + i_num))->iface = tmp; -+ } -+ -+ s_tmp->ips = i_tmp; -+ -+insert: -+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode, -+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0)) -+ return ERR_PTR(-ENOMEM); -+ -+ return s_tmp; -+} -+ -+static int -+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role) -+{ -+ struct acl_subject_label s_pre; -+ struct acl_subject_label * ret; -+ int err; -+ -+ while (userp) { -+ if (copy_from_user(&s_pre, userp, -+ sizeof (struct acl_subject_label))) -+ return -EFAULT; -+ -+ /* do not add nested subjects here, add -+ while parsing objects -+ */ -+ -+ if (s_pre.mode & GR_NESTED) { -+ userp = s_pre.prev; -+ continue; -+ } -+ -+ ret = do_copy_user_subj(userp, role); -+ -+ err = PTR_ERR(ret); -+ if (IS_ERR(ret)) -+ return err; -+ -+ insert_acl_subj_label(ret, role); -+ -+ userp = s_pre.prev; -+ } -+ -+ return 0; -+} -+ -+static int -+copy_user_acl(struct gr_arg *arg) -+{ -+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2; -+ struct sprole_pw *sptmp; -+ struct gr_hash_struct *ghash; -+ uid_t *domainlist; -+ unsigned int r_num; -+ unsigned int len; -+ char *tmp; -+ int err = 0; -+ __u16 i; -+ __u32 num_subjs; -+ -+ /* we need a default and kernel role */ -+ if (arg->role_db.num_roles < 2) -+ return -EINVAL; -+ -+ /* copy special role authentication info from userspace */ -+ -+ num_sprole_pws = arg->num_sprole_pws; -+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *)); -+ -+ if (!acl_special_roles) { -+ err = -ENOMEM; -+ goto cleanup; -+ } -+ -+ for (i = 0; i < num_sprole_pws; i++) { -+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw)); -+ if (!sptmp) { -+ err = -ENOMEM; -+ goto cleanup; -+ } -+ if (copy_from_user(sptmp, arg->sprole_pws + i, -+ sizeof (struct sprole_pw))) { -+ err = -EFAULT; -+ goto cleanup; -+ } -+ -+ len = -+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN); -+ -+ if (!len || len >= GR_SPROLE_LEN) { -+ err = -EINVAL; -+ goto cleanup; -+ } -+ -+ if ((tmp = (char *) acl_alloc(len)) == NULL) { -+ err = -ENOMEM; -+ goto cleanup; -+ } -+ -+ if (copy_from_user(tmp, sptmp->rolename, len)) { -+ err = -EFAULT; -+ goto cleanup; -+ } -+ tmp[len-1] = '\0'; -+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG -+ printk(KERN_ALERT "Copying special role %s\n", tmp); -+#endif -+ sptmp->rolename = tmp; -+ acl_special_roles[i] = sptmp; -+ } -+ -+ r_utmp = (struct acl_role_label **) arg->role_db.r_table; -+ -+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) { -+ r_tmp = acl_alloc(sizeof (struct acl_role_label)); -+ -+ if (!r_tmp) { -+ err = -ENOMEM; -+ goto cleanup; -+ } -+ -+ if (copy_from_user(&r_utmp2, r_utmp + r_num, -+ sizeof (struct acl_role_label *))) { -+ err = -EFAULT; -+ goto cleanup; -+ } -+ -+ if (copy_from_user(r_tmp, r_utmp2, -+ sizeof (struct acl_role_label))) { -+ err = -EFAULT; -+ goto cleanup; -+ } -+ -+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN); -+ -+ if (!len || len >= PATH_MAX) { -+ err = -EINVAL; -+ goto cleanup; -+ } -+ -+ if ((tmp = (char *) acl_alloc(len)) == NULL) { -+ err = -ENOMEM; -+ goto cleanup; -+ } -+ if (copy_from_user(tmp, r_tmp->rolename, len)) { -+ err = -EFAULT; -+ goto cleanup; -+ } -+ tmp[len-1] = '\0'; -+ r_tmp->rolename = tmp; -+ -+ if (!strcmp(r_tmp->rolename, "default") -+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) { -+ default_role = r_tmp; -+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) { -+ kernel_role = r_tmp; -+ } -+ -+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) { -+ err = -ENOMEM; -+ goto cleanup; -+ } -+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) { -+ err = -EFAULT; -+ goto cleanup; -+ } -+ -+ r_tmp->hash = ghash; -+ -+ num_subjs = count_user_subjs(r_tmp->hash->first); -+ -+ r_tmp->subj_hash_size = num_subjs; -+ r_tmp->subj_hash = -+ (struct acl_subject_label **) -+ create_table(&(r_tmp->subj_hash_size), sizeof(void *)); -+ -+ if (!r_tmp->subj_hash) { -+ err = -ENOMEM; -+ goto cleanup; -+ } -+ -+ err = copy_user_allowedips(r_tmp); -+ if (err) -+ goto cleanup; -+ -+ /* copy domain info */ -+ if (r_tmp->domain_children != NULL) { -+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t)); -+ if (domainlist == NULL) { -+ err = -ENOMEM; -+ goto cleanup; -+ } -+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) { -+ err = -EFAULT; -+ goto cleanup; -+ } -+ r_tmp->domain_children = domainlist; -+ } -+ -+ err = copy_user_transitions(r_tmp); -+ if (err) -+ goto cleanup; -+ -+ memset(r_tmp->subj_hash, 0, -+ r_tmp->subj_hash_size * -+ sizeof (struct acl_subject_label *)); -+ -+ err = copy_user_subjs(r_tmp->hash->first, r_tmp); -+ -+ if (err) -+ goto cleanup; -+ -+ /* set nested subject list to null */ -+ r_tmp->hash->first = NULL; -+ -+ insert_acl_role_label(r_tmp); -+ } -+ -+ goto return_err; -+ cleanup: -+ free_variables(); -+ return_err: -+ return err; -+ -+} -+ -+static int -+gracl_init(struct gr_arg *args) -+{ -+ int error = 0; -+ -+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN); -+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN); -+ -+ if (init_variables(args)) { -+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION); -+ error = -ENOMEM; -+ free_variables(); -+ goto out; -+ } -+ -+ error = copy_user_acl(args); -+ free_init_variables(); -+ if (error) { -+ free_variables(); -+ goto out; -+ } -+ -+ if ((error = gr_set_acls(0))) { -+ free_variables(); -+ goto out; -+ } -+ -+ pax_open_kernel(); -+ gr_status |= GR_READY; -+ pax_close_kernel(); -+ -+ out: -+ return error; -+} -+ -+/* derived from glibc fnmatch() 0: match, 1: no match*/ -+ -+static int -+glob_match(const char *p, const char *n) -+{ -+ char c; -+ -+ while ((c = *p++) != '\0') { -+ switch (c) { -+ case '?': -+ if (*n == '\0') -+ return 1; -+ else if (*n == '/') -+ return 1; -+ break; -+ case '\': -+ if (*n != c) -+ return 1; -+ break; -+ case '*': -+ for (c = *p++; c == '?' || c == '*'; c = *p++) { -+ if (*n == '/') -+ return 1; -+ else if (c == '?') { -+ if (*n == '\0') -+ return 1; -+ else -+ ++n; -+ } -+ } -+ if (c == '\0') { -+ return 0; -+ } else { -+ const char *endp; -+ -+ if ((endp = strchr(n, '/')) == NULL) -+ endp = n + strlen(n); -+ -+ if (c == '[') { -+ for (--p; n < endp; ++n) -+ if (!glob_match(p, n)) -+ return 0; -+ } else if (c == '/') { -+ while (*n != '\0' && *n != '/') -+ ++n; -+ if (*n == '/' && !glob_match(p, n + 1)) -+ return 0; -+ } else { -+ for (--p; n < endp; ++n) -+ if (*n == c && !glob_match(p, n)) -+ return 0; -+ } -+ -+ return 1; -+ } -+ case '[': -+ { -+ int not; -+ char cold; -+ -+ if (*n == '\0' || *n == '/') -+ return 1; -+ -+ not = (*p == '!' || *p == '^'); -+ if (not) -+ ++p; -+ -+ c = *p++; -+ for (;;) { -+ unsigned char fn = (unsigned char)*n; -+ -+ if (c == '\0') -+ return 1; -+ else { -+ if (c == fn) -+ goto matched; -+ cold = c; -+ c = *p++; -+ -+ if (c == '-' && *p != ']') { -+ unsigned char cend = *p++; -+ -+ if (cend == '\0') -+ return 1; -+ -+ if (cold <= fn && fn <= cend) -+ goto matched; -+ -+ c = *p++; -+ } -+ } -+ -+ if (c == ']') -+ break; -+ } -+ if (!not) -+ return 1; -+ break; -+ matched: -+ while (c != ']') { -+ if (c == '\0') -+ return 1; -+ -+ c = *p++; -+ } -+ if (not) -+ return 1; -+ } -+ break; -+ default: -+ if (c != *n) -+ return 1; -+ } -+ -+ ++n; -+ } -+ -+ if (*n == '\0') -+ return 0; -+ -+ if (*n == '/') -+ return 0; -+ -+ return 1; -+} -+ -+static struct acl_object_label * -+chk_glob_label(struct acl_object_label *globbed, -+ struct dentry *dentry, struct vfsmount *mnt, char **path) -+{ -+ struct acl_object_label *tmp; -+ -+ if (*path == NULL) -+ *path = gr_to_filename_nolock(dentry, mnt); -+ -+ tmp = globbed; -+ -+ while (tmp) { -+ if (!glob_match(tmp->filename, *path)) -+ return tmp; -+ tmp = tmp->next; -+ } -+ -+ return NULL; -+} -+ -+static struct acl_object_label * -+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, -+ const ino_t curr_ino, const dev_t curr_dev, -+ const struct acl_subject_label *subj, char **path, const int checkglob) -+{ -+ struct acl_subject_label *tmpsubj; -+ struct acl_object_label *retval; -+ struct acl_object_label *retval2; -+ -+ tmpsubj = (struct acl_subject_label *) subj; -+ read_lock(&gr_inode_lock); -+ do { -+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj); -+ if (retval) { -+ if (checkglob && retval->globbed) { -+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry, -+ (struct vfsmount *)orig_mnt, path); -+ if (retval2) -+ retval = retval2; -+ } -+ break; -+ } -+ } while ((tmpsubj = tmpsubj->parent_subject)); -+ read_unlock(&gr_inode_lock); -+ -+ return retval; -+} -+ -+static __inline__ struct acl_object_label * -+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, -+ struct dentry *curr_dentry, -+ const struct acl_subject_label *subj, char **path, const int checkglob) -+{ -+ int newglob = checkglob; -+ ino_t inode; -+ dev_t device; -+ -+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking -+ as we don't want a / * rule to match instead of the / object -+ don't do this for create lookups that call this function though, since they're looking up -+ on the parent and thus need globbing checks on all paths -+ */ -+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB) -+ newglob = GR_NO_GLOB; -+ -+ spin_lock(&curr_dentry->d_lock); -+ inode = curr_dentry->d_inode->i_ino; -+ device = __get_dev(curr_dentry); -+ spin_unlock(&curr_dentry->d_lock); -+ -+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob); -+} -+ -+static struct acl_object_label * -+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, -+ const struct acl_subject_label *subj, char *path, const int checkglob) -+{ -+ struct dentry *dentry = (struct dentry *) l_dentry; -+ struct vfsmount *mnt = (struct vfsmount *) l_mnt; -+ struct acl_object_label *retval; -+ struct dentry *parent; -+ -+ write_seqlock(&rename_lock); -+ br_read_lock(vfsmount_lock); -+ -+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt || -+#ifdef CONFIG_NET -+ mnt == sock_mnt || -+#endif -+#ifdef CONFIG_HUGETLBFS -+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) || -+#endif -+ /* ignore Eric Biederman */ -+ IS_PRIVATE(l_dentry->d_inode))) { -+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw; -+ goto out; -+ } -+ -+ for (;;) { -+ if (dentry == real_root.dentry && mnt == real_root.mnt) -+ break; -+ -+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { -+ if (mnt->mnt_parent == mnt) -+ break; -+ -+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); -+ if (retval != NULL) -+ goto out; -+ -+ dentry = mnt->mnt_mountpoint; -+ mnt = mnt->mnt_parent; -+ continue; -+ } -+ -+ parent = dentry->d_parent; -+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); -+ if (retval != NULL) -+ goto out; -+ -+ dentry = parent; -+ } -+ -+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); -+ -+ /* real_root is pinned so we don't have to hold a reference */ -+ if (retval == NULL) -+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob); -+out: -+ br_read_unlock(vfsmount_lock); -+ write_sequnlock(&rename_lock); -+ -+ BUG_ON(retval == NULL); -+ -+ return retval; -+} -+ -+static __inline__ struct acl_object_label * -+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, -+ const struct acl_subject_label *subj) -+{ -+ char *path = NULL; -+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB); -+} -+ -+static __inline__ struct acl_object_label * -+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt, -+ const struct acl_subject_label *subj) -+{ -+ char *path = NULL; -+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB); -+} -+ -+static __inline__ struct acl_object_label * -+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, -+ const struct acl_subject_label *subj, char *path) -+{ -+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB); -+} -+ -+static struct acl_subject_label * -+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, -+ const struct acl_role_label *role) -+{ -+ struct dentry *dentry = (struct dentry *) l_dentry; -+ struct vfsmount *mnt = (struct vfsmount *) l_mnt; -+ struct acl_subject_label *retval; -+ struct dentry *parent; -+ -+ write_seqlock(&rename_lock); -+ br_read_lock(vfsmount_lock); -+ -+ for (;;) { -+ if (dentry == real_root.dentry && mnt == real_root.mnt) -+ break; -+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { -+ if (mnt->mnt_parent == mnt) -+ break; -+ -+ spin_lock(&dentry->d_lock); -+ read_lock(&gr_inode_lock); -+ retval = -+ lookup_acl_subj_label(dentry->d_inode->i_ino, -+ __get_dev(dentry), role); -+ read_unlock(&gr_inode_lock); -+ spin_unlock(&dentry->d_lock); -+ if (retval != NULL) -+ goto out; -+ -+ dentry = mnt->mnt_mountpoint; -+ mnt = mnt->mnt_parent; -+ continue; -+ } -+ -+ spin_lock(&dentry->d_lock); -+ read_lock(&gr_inode_lock); -+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino, -+ __get_dev(dentry), role); -+ read_unlock(&gr_inode_lock); -+ parent = dentry->d_parent; -+ spin_unlock(&dentry->d_lock); -+ -+ if (retval != NULL) -+ goto out; -+ -+ dentry = parent; -+ } -+ -+ spin_lock(&dentry->d_lock); -+ read_lock(&gr_inode_lock); -+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino, -+ __get_dev(dentry), role); -+ read_unlock(&gr_inode_lock); -+ spin_unlock(&dentry->d_lock); -+ -+ if (unlikely(retval == NULL)) { -+ /* real_root is pinned, we don't need to hold a reference */ -+ read_lock(&gr_inode_lock); -+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino, -+ __get_dev(real_root.dentry), role); -+ read_unlock(&gr_inode_lock); -+ } -+out: -+ br_read_unlock(vfsmount_lock); -+ write_sequnlock(&rename_lock); -+ -+ BUG_ON(retval == NULL); -+ -+ return retval; -+} -+ -+static void -+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode) -+{ -+ struct task_struct *task = current; -+ const struct cred *cred = current_cred(); -+ -+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, -+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, -+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, -+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip); -+ -+ return; -+} -+ -+static void -+gr_log_learn_sysctl(const char *path, const __u32 mode) -+{ -+ struct task_struct *task = current; -+ const struct cred *cred = current_cred(); -+ -+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, -+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, -+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, -+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip); -+ -+ return; -+} -+ -+static void -+gr_log_learn_id_change(const char type, const unsigned int real, -+ const unsigned int effective, const unsigned int fs) -+{ -+ struct task_struct *task = current; -+ const struct cred *cred = current_cred(); -+ -+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype, -+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, -+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, -+ type, real, effective, fs, &task->signal->saved_ip); -+ -+ return; -+} -+ -+__u32 -+gr_search_file(const struct dentry * dentry, const __u32 mode, -+ const struct vfsmount * mnt) -+{ -+ __u32 retval = mode; -+ struct acl_subject_label *curracl; -+ struct acl_object_label *currobj; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return (mode & ~GR_AUDITS); -+ -+ curracl = current->acl; -+ -+ currobj = chk_obj_label(dentry, mnt, curracl); -+ retval = currobj->mode & mode; -+ -+ /* if we're opening a specified transfer file for writing -+ (e.g. /dev/initctl), then transfer our role to init -+ */ -+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE && -+ current->role->roletype & GR_ROLE_PERSIST)) { -+ struct task_struct *task = init_pid_ns.child_reaper; -+ -+ if (task->role != current->role) { -+ task->acl_sp_role = 0; -+ task->acl_role_id = current->acl_role_id; -+ task->role = current->role; -+ rcu_read_lock(); -+ read_lock(&grsec_exec_file_lock); -+ gr_apply_subject_to_task(task); -+ read_unlock(&grsec_exec_file_lock); -+ rcu_read_unlock(); -+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG); -+ } -+ } -+ -+ if (unlikely -+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE) -+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) { -+ __u32 new_mode = mode; -+ -+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); -+ -+ retval = new_mode; -+ -+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN) -+ new_mode |= GR_INHERIT; -+ -+ if (!(mode & GR_NOLEARN)) -+ gr_log_learn(dentry, mnt, new_mode); -+ } -+ -+ return retval; -+} -+ -+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry, -+ const struct dentry *parent, -+ const struct vfsmount *mnt) -+{ -+ struct name_entry *match; -+ struct acl_object_label *matchpo; -+ struct acl_subject_label *curracl; -+ char *path; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return NULL; -+ -+ preempt_disable(); -+ path = gr_to_filename_rbac(new_dentry, mnt); -+ match = lookup_name_entry_create(path); -+ -+ curracl = current->acl; -+ -+ if (match) { -+ read_lock(&gr_inode_lock); -+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl); -+ read_unlock(&gr_inode_lock); -+ -+ if (matchpo) { -+ preempt_enable(); -+ return matchpo; -+ } -+ } -+ -+ // lookup parent -+ -+ matchpo = chk_obj_create_label(parent, mnt, curracl, path); -+ -+ preempt_enable(); -+ return matchpo; -+} -+ -+__u32 -+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent, -+ const struct vfsmount * mnt, const __u32 mode) -+{ -+ struct acl_object_label *matchpo; -+ __u32 retval; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return (mode & ~GR_AUDITS); -+ -+ matchpo = gr_get_create_object(new_dentry, parent, mnt); -+ -+ retval = matchpo->mode & mode; -+ -+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))) -+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) { -+ __u32 new_mode = mode; -+ -+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); -+ -+ gr_log_learn(new_dentry, mnt, new_mode); -+ return new_mode; -+ } -+ -+ return retval; -+} -+ -+__u32 -+gr_check_link(const struct dentry * new_dentry, -+ const struct dentry * parent_dentry, -+ const struct vfsmount * parent_mnt, -+ const struct dentry * old_dentry, const struct vfsmount * old_mnt) -+{ -+ struct acl_object_label *obj; -+ __u32 oldmode, newmode; -+ __u32 needmode; -+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ | -+ GR_DELETE | GR_INHERIT; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return (GR_CREATE | GR_LINK); -+ -+ obj = chk_obj_label(old_dentry, old_mnt, current->acl); -+ oldmode = obj->mode; -+ -+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt); -+ newmode = obj->mode; -+ -+ needmode = newmode & checkmodes; -+ -+ // old name for hardlink must have at least the permissions of the new name -+ if ((oldmode & needmode) != needmode) -+ goto bad; -+ -+ // if old name had restrictions/auditing, make sure the new name does as well -+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS); -+ -+ // don't allow hardlinking of suid/sgid files without permission -+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) -+ needmode |= GR_SETID; -+ -+ if ((newmode & needmode) != needmode) -+ goto bad; -+ -+ // enforce minimum permissions -+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK)) -+ return newmode; -+bad: -+ needmode = oldmode; -+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) -+ needmode |= GR_SETID; -+ -+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) { -+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK); -+ return (GR_CREATE | GR_LINK); -+ } else if (newmode & GR_SUPPRESS) -+ return GR_SUPPRESS; -+ else -+ return 0; -+} -+ -+int -+gr_check_hidden_task(const struct task_struct *task) -+{ -+ if (unlikely(!(gr_status & GR_READY))) -+ return 0; -+ -+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW)) -+ return 1; -+ -+ return 0; -+} -+ -+int -+gr_check_protected_task(const struct task_struct *task) -+{ -+ if (unlikely(!(gr_status & GR_READY) || !task)) -+ return 0; -+ -+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && -+ task->acl != current->acl) -+ return 1; -+ -+ return 0; -+} -+ -+int -+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) -+{ -+ struct task_struct *p; -+ int ret = 0; -+ -+ if (unlikely(!(gr_status & GR_READY) || !pid)) -+ return ret; -+ -+ read_lock(&tasklist_lock); -+ do_each_pid_task(pid, type, p) { -+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && -+ p->acl != current->acl) { -+ ret = 1; -+ goto out; -+ } -+ } while_each_pid_task(pid, type, p); -+out: -+ read_unlock(&tasklist_lock); -+ -+ return ret; -+} -+ -+void -+gr_copy_label(struct task_struct *tsk) -+{ -+ tsk->signal->used_accept = 0; -+ tsk->acl_sp_role = 0; -+ tsk->acl_role_id = current->acl_role_id; -+ tsk->acl = current->acl; -+ tsk->role = current->role; -+ tsk->signal->curr_ip = current->signal->curr_ip; -+ tsk->signal->saved_ip = current->signal->saved_ip; -+ if (current->exec_file) -+ get_file(current->exec_file); -+ tsk->exec_file = current->exec_file; -+ tsk->is_writable = current->is_writable; -+ if (unlikely(current->signal->used_accept)) { -+ current->signal->curr_ip = 0; -+ current->signal->saved_ip = 0; -+ } -+ -+ return; -+} -+ -+static void -+gr_set_proc_res(struct task_struct *task) -+{ -+ struct acl_subject_label *proc; -+ unsigned short i; -+ -+ proc = task->acl; -+ -+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN)) -+ return; -+ -+ for (i = 0; i < RLIM_NLIMITS; i++) { -+ if (!(proc->resmask & (1 << i))) -+ continue; -+ -+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur; -+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max; -+ } -+ -+ return; -+} -+ -+extern int __gr_process_user_ban(struct user_struct *user); -+ -+int -+gr_check_user_change(int real, int effective, int fs) -+{ -+ unsigned int i; -+ __u16 num; -+ uid_t *uidlist; -+ int curuid; -+ int realok = 0; -+ int effectiveok = 0; -+ int fsok = 0; -+ -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ struct user_struct *user; -+ -+ if (real == -1) -+ goto skipit; -+ -+ user = find_user(real); -+ if (user == NULL) -+ goto skipit; -+ -+ if (__gr_process_user_ban(user)) { -+ /* for find_user */ -+ free_uid(user); -+ return 1; -+ } -+ -+ /* for find_user */ -+ free_uid(user); -+ -+skipit: -+#endif -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return 0; -+ -+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) -+ gr_log_learn_id_change('u', real, effective, fs); -+ -+ num = current->acl->user_trans_num; -+ uidlist = current->acl->user_transitions; -+ -+ if (uidlist == NULL) -+ return 0; -+ -+ if (real == -1) -+ realok = 1; -+ if (effective == -1) -+ effectiveok = 1; -+ if (fs == -1) -+ fsok = 1; -+ -+ if (current->acl->user_trans_type & GR_ID_ALLOW) { -+ for (i = 0; i < num; i++) { -+ curuid = (int)uidlist[i]; -+ if (real == curuid) -+ realok = 1; -+ if (effective == curuid) -+ effectiveok = 1; -+ if (fs == curuid) -+ fsok = 1; -+ } -+ } else if (current->acl->user_trans_type & GR_ID_DENY) { -+ for (i = 0; i < num; i++) { -+ curuid = (int)uidlist[i]; -+ if (real == curuid) -+ break; -+ if (effective == curuid) -+ break; -+ if (fs == curuid) -+ break; -+ } -+ /* not in deny list */ -+ if (i == num) { -+ realok = 1; -+ effectiveok = 1; -+ fsok = 1; -+ } -+ } -+ -+ if (realok && effectiveok && fsok) -+ return 0; -+ else { -+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); -+ return 1; -+ } -+} -+ -+int -+gr_check_group_change(int real, int effective, int fs) -+{ -+ unsigned int i; -+ __u16 num; -+ gid_t *gidlist; -+ int curgid; -+ int realok = 0; -+ int effectiveok = 0; -+ int fsok = 0; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return 0; -+ -+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) -+ gr_log_learn_id_change('g', real, effective, fs); -+ -+ num = current->acl->group_trans_num; -+ gidlist = current->acl->group_transitions; -+ -+ if (gidlist == NULL) -+ return 0; -+ -+ if (real == -1) -+ realok = 1; -+ if (effective == -1) -+ effectiveok = 1; -+ if (fs == -1) -+ fsok = 1; -+ -+ if (current->acl->group_trans_type & GR_ID_ALLOW) { -+ for (i = 0; i < num; i++) { -+ curgid = (int)gidlist[i]; -+ if (real == curgid) -+ realok = 1; -+ if (effective == curgid) -+ effectiveok = 1; -+ if (fs == curgid) -+ fsok = 1; -+ } -+ } else if (current->acl->group_trans_type & GR_ID_DENY) { -+ for (i = 0; i < num; i++) { -+ curgid = (int)gidlist[i]; -+ if (real == curgid) -+ break; -+ if (effective == curgid) -+ break; -+ if (fs == curgid) -+ break; -+ } -+ /* not in deny list */ -+ if (i == num) { -+ realok = 1; -+ effectiveok = 1; -+ fsok = 1; -+ } -+ } -+ -+ if (realok && effectiveok && fsok) -+ return 0; -+ else { -+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); -+ return 1; -+ } -+} -+ -+void -+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid) -+{ -+ struct acl_role_label *role = task->role; -+ struct acl_subject_label *subj = NULL; -+ struct acl_object_label *obj; -+ struct file *filp; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return; -+ -+ filp = task->exec_file; -+ -+ /* kernel process, we'll give them the kernel role */ -+ if (unlikely(!filp)) { -+ task->role = kernel_role; -+ task->acl = kernel_role->root_label; -+ return; -+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) -+ role = lookup_acl_role_label(task, uid, gid); -+ -+ /* perform subject lookup in possibly new role -+ we can use this result below in the case where role == task->role -+ */ -+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role); -+ -+ /* if we changed uid/gid, but result in the same role -+ and are using inheritance, don't lose the inherited subject -+ if current subject is other than what normal lookup -+ would result in, we arrived via inheritance, don't -+ lose subject -+ */ -+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) && -+ (subj == task->acl))) -+ task->acl = subj; -+ -+ task->role = role; -+ -+ task->is_writable = 0; -+ -+ /* ignore additional mmap checks for processes that are writable -+ by the default ACL */ -+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); -+ if (unlikely(obj->mode & GR_WRITE)) -+ task->is_writable = 1; -+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); -+ if (unlikely(obj->mode & GR_WRITE)) -+ task->is_writable = 1; -+ -+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG -+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); -+#endif -+ -+ gr_set_proc_res(task); -+ -+ return; -+} -+ -+int -+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, -+ const int unsafe_share) -+{ -+ struct task_struct *task = current; -+ struct acl_subject_label *newacl; -+ struct acl_object_label *obj; -+ __u32 retmode; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return 0; -+ -+ newacl = chk_subj_label(dentry, mnt, task->role); -+ -+ task_lock(task); -+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) && -+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) && -+ !(task->role->roletype & GR_ROLE_GOD) && -+ !gr_search_file(dentry, GR_PTRACERD, mnt) && -+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) { -+ task_unlock(task); -+ if (unsafe_share) -+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt); -+ else -+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt); -+ return -EACCES; -+ } -+ task_unlock(task); -+ -+ obj = chk_obj_label(dentry, mnt, task->acl); -+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT); -+ -+ if (!(task->acl->mode & GR_INHERITLEARN) && -+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) { -+ if (obj->nested) -+ task->acl = obj->nested; -+ else -+ task->acl = newacl; -+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT) -+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt); -+ -+ task->is_writable = 0; -+ -+ /* ignore additional mmap checks for processes that are writable -+ by the default ACL */ -+ obj = chk_obj_label(dentry, mnt, default_role->root_label); -+ if (unlikely(obj->mode & GR_WRITE)) -+ task->is_writable = 1; -+ obj = chk_obj_label(dentry, mnt, task->role->root_label); -+ if (unlikely(obj->mode & GR_WRITE)) -+ task->is_writable = 1; -+ -+ gr_set_proc_res(task); -+ -+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG -+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); -+#endif -+ return 0; -+} -+ -+/* always called with valid inodev ptr */ -+static void -+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev) -+{ -+ struct acl_object_label *matchpo; -+ struct acl_subject_label *matchps; -+ struct acl_subject_label *subj; -+ struct acl_role_label *role; -+ unsigned int x; -+ -+ FOR_EACH_ROLE_START(role) -+ FOR_EACH_SUBJECT_START(role, subj, x) -+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL) -+ matchpo->mode |= GR_DELETED; -+ FOR_EACH_SUBJECT_END(subj,x) -+ FOR_EACH_NESTED_SUBJECT_START(role, subj) -+ if (subj->inode == ino && subj->device == dev) -+ subj->mode |= GR_DELETED; -+ FOR_EACH_NESTED_SUBJECT_END(subj) -+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL) -+ matchps->mode |= GR_DELETED; -+ FOR_EACH_ROLE_END(role) -+ -+ inodev->nentry->deleted = 1; -+ -+ return; -+} -+ -+void -+gr_handle_delete(const ino_t ino, const dev_t dev) -+{ -+ struct inodev_entry *inodev; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return; -+ -+ write_lock(&gr_inode_lock); -+ inodev = lookup_inodev_entry(ino, dev); -+ if (inodev != NULL) -+ do_handle_delete(inodev, ino, dev); -+ write_unlock(&gr_inode_lock); -+ -+ return; -+} -+ -+static void -+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice, -+ const ino_t newinode, const dev_t newdevice, -+ struct acl_subject_label *subj) -+{ -+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size); -+ struct acl_object_label *match; -+ -+ match = subj->obj_hash[index]; -+ -+ while (match && (match->inode != oldinode || -+ match->device != olddevice || -+ !(match->mode & GR_DELETED))) -+ match = match->next; -+ -+ if (match && (match->inode == oldinode) -+ && (match->device == olddevice) -+ && (match->mode & GR_DELETED)) { -+ if (match->prev == NULL) { -+ subj->obj_hash[index] = match->next; -+ if (match->next != NULL) -+ match->next->prev = NULL; -+ } else { -+ match->prev->next = match->next; -+ if (match->next != NULL) -+ match->next->prev = match->prev; -+ } -+ match->prev = NULL; -+ match->next = NULL; -+ match->inode = newinode; -+ match->device = newdevice; -+ match->mode &= ~GR_DELETED; -+ -+ insert_acl_obj_label(match, subj); -+ } -+ -+ return; -+} -+ -+static void -+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice, -+ const ino_t newinode, const dev_t newdevice, -+ struct acl_role_label *role) -+{ -+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size); -+ struct acl_subject_label *match; -+ -+ match = role->subj_hash[index]; -+ -+ while (match && (match->inode != oldinode || -+ match->device != olddevice || -+ !(match->mode & GR_DELETED))) -+ match = match->next; -+ -+ if (match && (match->inode == oldinode) -+ && (match->device == olddevice) -+ && (match->mode & GR_DELETED)) { -+ if (match->prev == NULL) { -+ role->subj_hash[index] = match->next; -+ if (match->next != NULL) -+ match->next->prev = NULL; -+ } else { -+ match->prev->next = match->next; -+ if (match->next != NULL) -+ match->next->prev = match->prev; -+ } -+ match->prev = NULL; -+ match->next = NULL; -+ match->inode = newinode; -+ match->device = newdevice; -+ match->mode &= ~GR_DELETED; -+ -+ insert_acl_subj_label(match, role); -+ } -+ -+ return; -+} -+ -+static void -+update_inodev_entry(const ino_t oldinode, const dev_t olddevice, -+ const ino_t newinode, const dev_t newdevice) -+{ -+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size); -+ struct inodev_entry *match; -+ -+ match = inodev_set.i_hash[index]; -+ -+ while (match && (match->nentry->inode != oldinode || -+ match->nentry->device != olddevice || !match->nentry->deleted)) -+ match = match->next; -+ -+ if (match && (match->nentry->inode == oldinode) -+ && (match->nentry->device == olddevice) && -+ match->nentry->deleted) { -+ if (match->prev == NULL) { -+ inodev_set.i_hash[index] = match->next; -+ if (match->next != NULL) -+ match->next->prev = NULL; -+ } else { -+ match->prev->next = match->next; -+ if (match->next != NULL) -+ match->next->prev = match->prev; -+ } -+ match->prev = NULL; -+ match->next = NULL; -+ match->nentry->inode = newinode; -+ match->nentry->device = newdevice; -+ match->nentry->deleted = 0; -+ -+ insert_inodev_entry(match); -+ } -+ -+ return; -+} -+ -+static void -+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev) -+{ -+ struct acl_subject_label *subj; -+ struct acl_role_label *role; -+ unsigned int x; -+ -+ FOR_EACH_ROLE_START(role) -+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role); -+ -+ FOR_EACH_NESTED_SUBJECT_START(role, subj) -+ if ((subj->inode == ino) && (subj->device == dev)) { -+ subj->inode = ino; -+ subj->device = dev; -+ } -+ FOR_EACH_NESTED_SUBJECT_END(subj) -+ FOR_EACH_SUBJECT_START(role, subj, x) -+ update_acl_obj_label(matchn->inode, matchn->device, -+ ino, dev, subj); -+ FOR_EACH_SUBJECT_END(subj,x) -+ FOR_EACH_ROLE_END(role) -+ -+ update_inodev_entry(matchn->inode, matchn->device, ino, dev); -+ -+ return; -+} -+ -+static void -+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry, -+ const struct vfsmount *mnt) -+{ -+ ino_t ino = dentry->d_inode->i_ino; -+ dev_t dev = __get_dev(dentry); -+ -+ __do_handle_create(matchn, ino, dev); -+ -+ return; -+} -+ -+void -+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ struct name_entry *matchn; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return; -+ -+ preempt_disable(); -+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt)); -+ -+ if (unlikely((unsigned long)matchn)) { -+ write_lock(&gr_inode_lock); -+ do_handle_create(matchn, dentry, mnt); -+ write_unlock(&gr_inode_lock); -+ } -+ preempt_enable(); -+ -+ return; -+} -+ -+void -+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) -+{ -+ struct name_entry *matchn; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return; -+ -+ preempt_disable(); -+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt)); -+ -+ if (unlikely((unsigned long)matchn)) { -+ write_lock(&gr_inode_lock); -+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev); -+ write_unlock(&gr_inode_lock); -+ } -+ preempt_enable(); -+ -+ return; -+} -+ -+void -+gr_handle_rename(struct inode *old_dir, struct inode *new_dir, -+ struct dentry *old_dentry, -+ struct dentry *new_dentry, -+ struct vfsmount *mnt, const __u8 replace) -+{ -+ struct name_entry *matchn; -+ struct inodev_entry *inodev; -+ struct inode *inode = new_dentry->d_inode; -+ ino_t old_ino = old_dentry->d_inode->i_ino; -+ dev_t old_dev = __get_dev(old_dentry); -+ -+ /* vfs_rename swaps the name and parent link for old_dentry and -+ new_dentry -+ at this point, old_dentry has the new name, parent link, and inode -+ for the renamed file -+ if a file is being replaced by a rename, new_dentry has the inode -+ and name for the replaced file -+ */ -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return; -+ -+ preempt_disable(); -+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt)); -+ -+ /* we wouldn't have to check d_inode if it weren't for -+ NFS silly-renaming -+ */ -+ -+ write_lock(&gr_inode_lock); -+ if (unlikely(replace && inode)) { -+ ino_t new_ino = inode->i_ino; -+ dev_t new_dev = __get_dev(new_dentry); -+ -+ inodev = lookup_inodev_entry(new_ino, new_dev); -+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode))) -+ do_handle_delete(inodev, new_ino, new_dev); -+ } -+ -+ inodev = lookup_inodev_entry(old_ino, old_dev); -+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode))) -+ do_handle_delete(inodev, old_ino, old_dev); -+ -+ if (unlikely((unsigned long)matchn)) -+ do_handle_create(matchn, old_dentry, mnt); -+ -+ write_unlock(&gr_inode_lock); -+ preempt_enable(); -+ -+ return; -+} -+ -+static int -+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt, -+ unsigned char **sum) -+{ -+ struct acl_role_label *r; -+ struct role_allowed_ip *ipp; -+ struct role_transition *trans; -+ unsigned int i; -+ int found = 0; -+ u32 curr_ip = current->signal->curr_ip; -+ -+ current->signal->saved_ip = curr_ip; -+ -+ /* check transition table */ -+ -+ for (trans = current->role->transitions; trans; trans = trans->next) { -+ if (!strcmp(rolename, trans->rolename)) { -+ found = 1; -+ break; -+ } -+ } -+ -+ if (!found) -+ return 0; -+ -+ /* handle special roles that do not require authentication -+ and check ip */ -+ -+ FOR_EACH_ROLE_START(r) -+ if (!strcmp(rolename, r->rolename) && -+ (r->roletype & GR_ROLE_SPECIAL)) { -+ found = 0; -+ if (r->allowed_ips != NULL) { -+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) { -+ if ((ntohl(curr_ip) & ipp->netmask) == -+ (ntohl(ipp->addr) & ipp->netmask)) -+ found = 1; -+ } -+ } else -+ found = 2; -+ if (!found) -+ return 0; -+ -+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) || -+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) { -+ *salt = NULL; -+ *sum = NULL; -+ return 1; -+ } -+ } -+ FOR_EACH_ROLE_END(r) -+ -+ for (i = 0; i < num_sprole_pws; i++) { -+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) { -+ *salt = acl_special_roles[i]->salt; -+ *sum = acl_special_roles[i]->sum; -+ return 1; -+ } -+ } -+ -+ return 0; -+} -+ -+static void -+assign_special_role(char *rolename) -+{ -+ struct acl_object_label *obj; -+ struct acl_role_label *r; -+ struct acl_role_label *assigned = NULL; -+ struct task_struct *tsk; -+ struct file *filp; -+ -+ FOR_EACH_ROLE_START(r) -+ if (!strcmp(rolename, r->rolename) && -+ (r->roletype & GR_ROLE_SPECIAL)) { -+ assigned = r; -+ break; -+ } -+ FOR_EACH_ROLE_END(r) -+ -+ if (!assigned) -+ return; -+ -+ read_lock(&tasklist_lock); -+ read_lock(&grsec_exec_file_lock); -+ -+ tsk = current->real_parent; -+ if (tsk == NULL) -+ goto out_unlock; -+ -+ filp = tsk->exec_file; -+ if (filp == NULL) -+ goto out_unlock; -+ -+ tsk->is_writable = 0; -+ -+ tsk->acl_sp_role = 1; -+ tsk->acl_role_id = ++acl_sp_role_value; -+ tsk->role = assigned; -+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role); -+ -+ /* ignore additional mmap checks for processes that are writable -+ by the default ACL */ -+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); -+ if (unlikely(obj->mode & GR_WRITE)) -+ tsk->is_writable = 1; -+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label); -+ if (unlikely(obj->mode & GR_WRITE)) -+ tsk->is_writable = 1; -+ -+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG -+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid); -+#endif -+ -+out_unlock: -+ read_unlock(&grsec_exec_file_lock); -+ read_unlock(&tasklist_lock); -+ return; -+} -+ -+int gr_check_secure_terminal(struct task_struct *task) -+{ -+ struct task_struct *p, *p2, *p3; -+ struct files_struct *files; -+ struct fdtable *fdt; -+ struct file *our_file = NULL, *file; -+ int i; -+ -+ if (task->signal->tty == NULL) -+ return 1; -+ -+ files = get_files_struct(task); -+ if (files != NULL) { -+ rcu_read_lock(); -+ fdt = files_fdtable(files); -+ for (i=0; i < fdt->max_fds; i++) { -+ file = fcheck_files(files, i); -+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) { -+ get_file(file); -+ our_file = file; -+ } -+ } -+ rcu_read_unlock(); -+ put_files_struct(files); -+ } -+ -+ if (our_file == NULL) -+ return 1; -+ -+ read_lock(&tasklist_lock); -+ do_each_thread(p2, p) { -+ files = get_files_struct(p); -+ if (files == NULL || -+ (p->signal && p->signal->tty == task->signal->tty)) { -+ if (files != NULL) -+ put_files_struct(files); -+ continue; -+ } -+ rcu_read_lock(); -+ fdt = files_fdtable(files); -+ for (i=0; i < fdt->max_fds; i++) { -+ file = fcheck_files(files, i); -+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) && -+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) { -+ p3 = task; -+ while (p3->pid > 0) { -+ if (p3 == p) -+ break; -+ p3 = p3->real_parent; -+ } -+ if (p3 == p) -+ break; -+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p); -+ gr_handle_alertkill(p); -+ rcu_read_unlock(); -+ put_files_struct(files); -+ read_unlock(&tasklist_lock); -+ fput(our_file); -+ return 0; -+ } -+ } -+ rcu_read_unlock(); -+ put_files_struct(files); -+ } while_each_thread(p2, p); -+ read_unlock(&tasklist_lock); -+ -+ fput(our_file); -+ return 1; -+} -+ -+ssize_t -+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos) -+{ -+ struct gr_arg_wrapper uwrap; -+ unsigned char *sprole_salt = NULL; -+ unsigned char *sprole_sum = NULL; -+ int error = sizeof (struct gr_arg_wrapper); -+ int error2 = 0; -+ -+ mutex_lock(&gr_dev_mutex); -+ -+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) { -+ error = -EPERM; -+ goto out; -+ } -+ -+ if (count != sizeof (struct gr_arg_wrapper)) { -+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper)); -+ error = -EINVAL; -+ goto out; -+ } -+ -+ -+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) { -+ gr_auth_expires = 0; -+ gr_auth_attempts = 0; -+ } -+ -+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) { -+ error = -EFAULT; -+ goto out; -+ } -+ -+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) { -+ error = -EINVAL; -+ goto out; -+ } -+ -+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) { -+ error = -EFAULT; -+ goto out; -+ } -+ -+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM && -+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && -+ time_after(gr_auth_expires, get_seconds())) { -+ error = -EBUSY; -+ goto out; -+ } -+ -+ /* if non-root trying to do anything other than use a special role, -+ do not attempt authentication, do not count towards authentication -+ locking -+ */ -+ -+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS && -+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM && -+ current_uid()) { -+ error = -EPERM; -+ goto out; -+ } -+ -+ /* ensure pw and special role name are null terminated */ -+ -+ gr_usermode->pw[GR_PW_LEN - 1] = '\0'; -+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0'; -+ -+ /* Okay. -+ * We have our enough of the argument structure..(we have yet -+ * to copy_from_user the tables themselves) . Copy the tables -+ * only if we need them, i.e. for loading operations. */ -+ -+ switch (gr_usermode->mode) { -+ case GR_STATUS: -+ if (gr_status & GR_READY) { -+ error = 1; -+ if (!gr_check_secure_terminal(current)) -+ error = 3; -+ } else -+ error = 2; -+ goto out; -+ case GR_SHUTDOWN: -+ if ((gr_status & GR_READY) -+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { -+ pax_open_kernel(); -+ gr_status &= ~GR_READY; -+ pax_close_kernel(); -+ -+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG); -+ free_variables(); -+ memset(gr_usermode, 0, sizeof (struct gr_arg)); -+ memset(gr_system_salt, 0, GR_SALT_LEN); -+ memset(gr_system_sum, 0, GR_SHA_LEN); -+ } else if (gr_status & GR_READY) { -+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG); -+ error = -EPERM; -+ } else { -+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG); -+ error = -EAGAIN; -+ } -+ break; -+ case GR_ENABLE: -+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode))) -+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION); -+ else { -+ if (gr_status & GR_READY) -+ error = -EAGAIN; -+ else -+ error = error2; -+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION); -+ } -+ break; -+ case GR_RELOAD: -+ if (!(gr_status & GR_READY)) { -+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION); -+ error = -EAGAIN; -+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { -+ preempt_disable(); -+ -+ pax_open_kernel(); -+ gr_status &= ~GR_READY; -+ pax_close_kernel(); -+ -+ free_variables(); -+ if (!(error2 = gracl_init(gr_usermode))) { -+ preempt_enable(); -+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION); -+ } else { -+ preempt_enable(); -+ error = error2; -+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); -+ } -+ } else { -+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); -+ error = -EPERM; -+ } -+ break; -+ case GR_SEGVMOD: -+ if (unlikely(!(gr_status & GR_READY))) { -+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG); -+ error = -EAGAIN; -+ break; -+ } -+ -+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { -+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG); -+ if (gr_usermode->segv_device && gr_usermode->segv_inode) { -+ struct acl_subject_label *segvacl; -+ segvacl = -+ lookup_acl_subj_label(gr_usermode->segv_inode, -+ gr_usermode->segv_device, -+ current->role); -+ if (segvacl) { -+ segvacl->crashes = 0; -+ segvacl->expires = 0; -+ } -+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) { -+ gr_remove_uid(gr_usermode->segv_uid); -+ } -+ } else { -+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG); -+ error = -EPERM; -+ } -+ break; -+ case GR_SPROLE: -+ case GR_SPROLEPAM: -+ if (unlikely(!(gr_status & GR_READY))) { -+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG); -+ error = -EAGAIN; -+ break; -+ } -+ -+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) { -+ current->role->expires = 0; -+ current->role->auth_attempts = 0; -+ } -+ -+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && -+ time_after(current->role->expires, get_seconds())) { -+ error = -EBUSY; -+ goto out; -+ } -+ -+ if (lookup_special_role_auth -+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum) -+ && ((!sprole_salt && !sprole_sum) -+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) { -+ char *p = ""; -+ assign_special_role(gr_usermode->sp_role); -+ read_lock(&tasklist_lock); -+ if (current->real_parent) -+ p = current->real_parent->role->rolename; -+ read_unlock(&tasklist_lock); -+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG, -+ p, acl_sp_role_value); -+ } else { -+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role); -+ error = -EPERM; -+ if(!(current->role->auth_attempts++)) -+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; -+ -+ goto out; -+ } -+ break; -+ case GR_UNSPROLE: -+ if (unlikely(!(gr_status & GR_READY))) { -+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG); -+ error = -EAGAIN; -+ break; -+ } -+ -+ if (current->role->roletype & GR_ROLE_SPECIAL) { -+ char *p = ""; -+ int i = 0; -+ -+ read_lock(&tasklist_lock); -+ if (current->real_parent) { -+ p = current->real_parent->role->rolename; -+ i = current->real_parent->acl_role_id; -+ } -+ read_unlock(&tasklist_lock); -+ -+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i); -+ gr_set_acls(1); -+ } else { -+ error = -EPERM; -+ goto out; -+ } -+ break; -+ default: -+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode); -+ error = -EINVAL; -+ break; -+ } -+ -+ if (error != -EPERM) -+ goto out; -+ -+ if(!(gr_auth_attempts++)) -+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; -+ -+ out: -+ mutex_unlock(&gr_dev_mutex); -+ return error; -+} -+ -+/* must be called with -+ rcu_read_lock(); -+ read_lock(&tasklist_lock); -+ read_lock(&grsec_exec_file_lock); -+*/ -+int gr_apply_subject_to_task(struct task_struct *task) -+{ -+ struct acl_object_label *obj; -+ char *tmpname; -+ struct acl_subject_label *tmpsubj; -+ struct file *filp; -+ struct name_entry *nmatch; -+ -+ filp = task->exec_file; -+ if (filp == NULL) -+ return 0; -+ -+ /* the following is to apply the correct subject -+ on binaries running when the RBAC system -+ is enabled, when the binaries have been -+ replaced or deleted since their execution -+ ----- -+ when the RBAC system starts, the inode/dev -+ from exec_file will be one the RBAC system -+ is unaware of. It only knows the inode/dev -+ of the present file on disk, or the absence -+ of it. -+ */ -+ preempt_disable(); -+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt); -+ -+ nmatch = lookup_name_entry(tmpname); -+ preempt_enable(); -+ tmpsubj = NULL; -+ if (nmatch) { -+ if (nmatch->deleted) -+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role); -+ else -+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role); -+ if (tmpsubj != NULL) -+ task->acl = tmpsubj; -+ } -+ if (tmpsubj == NULL) -+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, -+ task->role); -+ if (task->acl) { -+ task->is_writable = 0; -+ /* ignore additional mmap checks for processes that are writable -+ by the default ACL */ -+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); -+ if (unlikely(obj->mode & GR_WRITE)) -+ task->is_writable = 1; -+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); -+ if (unlikely(obj->mode & GR_WRITE)) -+ task->is_writable = 1; -+ -+ gr_set_proc_res(task); -+ -+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG -+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); -+#endif -+ } else { -+ return 1; -+ } -+ -+ return 0; -+} -+ -+int -+gr_set_acls(const int type) -+{ -+ struct task_struct *task, *task2; -+ struct acl_role_label *role = current->role; -+ __u16 acl_role_id = current->acl_role_id; -+ const struct cred *cred; -+ int ret; -+ -+ rcu_read_lock(); -+ read_lock(&tasklist_lock); -+ read_lock(&grsec_exec_file_lock); -+ do_each_thread(task2, task) { -+ /* check to see if we're called from the exit handler, -+ if so, only replace ACLs that have inherited the admin -+ ACL */ -+ -+ if (type && (task->role != role || -+ task->acl_role_id != acl_role_id)) -+ continue; -+ -+ task->acl_role_id = 0; -+ task->acl_sp_role = 0; -+ -+ if (task->exec_file) { -+ cred = __task_cred(task); -+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid); -+ ret = gr_apply_subject_to_task(task); -+ if (ret) { -+ read_unlock(&grsec_exec_file_lock); -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid); -+ return ret; -+ } -+ } else { -+ // it's a kernel process -+ task->role = kernel_role; -+ task->acl = kernel_role->root_label; -+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN -+ task->acl->mode &= ~GR_PROCFIND; -+#endif -+ } -+ } while_each_thread(task2, task); -+ read_unlock(&grsec_exec_file_lock); -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+ -+ return 0; -+} -+ -+void -+gr_learn_resource(const struct task_struct *task, -+ const int res, const unsigned long wanted, const int gt) -+{ -+ struct acl_subject_label *acl; -+ const struct cred *cred; -+ -+ if (unlikely((gr_status & GR_READY) && -+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) -+ goto skip_reslog; -+ -+#ifdef CONFIG_GRKERNSEC_RESLOG -+ gr_log_resource(task, res, wanted, gt); -+#endif -+ skip_reslog: -+ -+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS)) -+ return; -+ -+ acl = task->acl; -+ -+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) || -+ !(acl->resmask & (1 << (unsigned short) res)))) -+ return; -+ -+ if (wanted >= acl->res[res].rlim_cur) { -+ unsigned long res_add; -+ -+ res_add = wanted; -+ switch (res) { -+ case RLIMIT_CPU: -+ res_add += GR_RLIM_CPU_BUMP; -+ break; -+ case RLIMIT_FSIZE: -+ res_add += GR_RLIM_FSIZE_BUMP; -+ break; -+ case RLIMIT_DATA: -+ res_add += GR_RLIM_DATA_BUMP; -+ break; -+ case RLIMIT_STACK: -+ res_add += GR_RLIM_STACK_BUMP; -+ break; -+ case RLIMIT_CORE: -+ res_add += GR_RLIM_CORE_BUMP; -+ break; -+ case RLIMIT_RSS: -+ res_add += GR_RLIM_RSS_BUMP; -+ break; -+ case RLIMIT_NPROC: -+ res_add += GR_RLIM_NPROC_BUMP; -+ break; -+ case RLIMIT_NOFILE: -+ res_add += GR_RLIM_NOFILE_BUMP; -+ break; -+ case RLIMIT_MEMLOCK: -+ res_add += GR_RLIM_MEMLOCK_BUMP; -+ break; -+ case RLIMIT_AS: -+ res_add += GR_RLIM_AS_BUMP; -+ break; -+ case RLIMIT_LOCKS: -+ res_add += GR_RLIM_LOCKS_BUMP; -+ break; -+ case RLIMIT_SIGPENDING: -+ res_add += GR_RLIM_SIGPENDING_BUMP; -+ break; -+ case RLIMIT_MSGQUEUE: -+ res_add += GR_RLIM_MSGQUEUE_BUMP; -+ break; -+ case RLIMIT_NICE: -+ res_add += GR_RLIM_NICE_BUMP; -+ break; -+ case RLIMIT_RTPRIO: -+ res_add += GR_RLIM_RTPRIO_BUMP; -+ break; -+ case RLIMIT_RTTIME: -+ res_add += GR_RLIM_RTTIME_BUMP; -+ break; -+ } -+ -+ acl->res[res].rlim_cur = res_add; -+ -+ if (wanted > acl->res[res].rlim_max) -+ acl->res[res].rlim_max = res_add; -+ -+ /* only log the subject filename, since resource logging is supported for -+ single-subject learning only */ -+ rcu_read_lock(); -+ cred = __task_cred(task); -+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, -+ task->role->roletype, cred->uid, cred->gid, acl->filename, -+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max, -+ "", (unsigned long) res, &task->signal->saved_ip); -+ rcu_read_unlock(); -+ } -+ -+ return; -+} -+ -+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)) -+void -+pax_set_initial_flags(struct linux_binprm *bprm) -+{ -+ struct task_struct *task = current; -+ struct acl_subject_label *proc; -+ unsigned long flags; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return; -+ -+ flags = pax_get_flags(task); -+ -+ proc = task->acl; -+ -+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC) -+ flags &= ~MF_PAX_PAGEEXEC; -+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC) -+ flags &= ~MF_PAX_SEGMEXEC; -+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP) -+ flags &= ~MF_PAX_RANDMMAP; -+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP) -+ flags &= ~MF_PAX_EMUTRAMP; -+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT) -+ flags &= ~MF_PAX_MPROTECT; -+ -+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC) -+ flags |= MF_PAX_PAGEEXEC; -+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC) -+ flags |= MF_PAX_SEGMEXEC; -+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP) -+ flags |= MF_PAX_RANDMMAP; -+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP) -+ flags |= MF_PAX_EMUTRAMP; -+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT) -+ flags |= MF_PAX_MPROTECT; -+ -+ pax_set_flags(task, flags); -+ -+ return; -+} -+#endif -+ -+#ifdef CONFIG_SYSCTL -+/* Eric Biederman likes breaking userland ABI and every inode-based security -+ system to save 35kb of memory */ -+ -+/* we modify the passed in filename, but adjust it back before returning */ -+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len) -+{ -+ struct name_entry *nmatch; -+ char *p, *lastp = NULL; -+ struct acl_object_label *obj = NULL, *tmp; -+ struct acl_subject_label *tmpsubj; -+ char c = '\0'; -+ -+ read_lock(&gr_inode_lock); -+ -+ p = name + len - 1; -+ do { -+ nmatch = lookup_name_entry(name); -+ if (lastp != NULL) -+ *lastp = c; -+ -+ if (nmatch == NULL) -+ goto next_component; -+ tmpsubj = current->acl; -+ do { -+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj); -+ if (obj != NULL) { -+ tmp = obj->globbed; -+ while (tmp) { -+ if (!glob_match(tmp->filename, name)) { -+ obj = tmp; -+ goto found_obj; -+ } -+ tmp = tmp->next; -+ } -+ goto found_obj; -+ } -+ } while ((tmpsubj = tmpsubj->parent_subject)); -+next_component: -+ /* end case */ -+ if (p == name) -+ break; -+ -+ while (*p != '/') -+ p--; -+ if (p == name) -+ lastp = p + 1; -+ else { -+ lastp = p; -+ p--; -+ } -+ c = *lastp; -+ *lastp = '\0'; -+ } while (1); -+found_obj: -+ read_unlock(&gr_inode_lock); -+ /* obj returned will always be non-null */ -+ return obj; -+} -+ -+/* returns 0 when allowing, non-zero on error -+ op of 0 is used for readdir, so we don't log the names of hidden files -+*/ -+__u32 -+gr_handle_sysctl(const struct ctl_table *table, const int op) -+{ -+ struct ctl_table *tmp; -+ const char *proc_sys = "/proc/sys"; -+ char *path; -+ struct acl_object_label *obj; -+ unsigned short len = 0, pos = 0, depth = 0, i; -+ __u32 err = 0; -+ __u32 mode = 0; -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return 0; -+ -+ /* for now, ignore operations on non-sysctl entries if it's not a -+ readdir*/ -+ if (table->child != NULL && op != 0) -+ return 0; -+ -+ mode |= GR_FIND; -+ /* it's only a read if it's an entry, read on dirs is for readdir */ -+ if (op & MAY_READ) -+ mode |= GR_READ; -+ if (op & MAY_WRITE) -+ mode |= GR_WRITE; -+ -+ preempt_disable(); -+ -+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id()); -+ -+ /* it's only a read/write if it's an actual entry, not a dir -+ (which are opened for readdir) -+ */ -+ -+ /* convert the requested sysctl entry into a pathname */ -+ -+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) { -+ len += strlen(tmp->procname); -+ len++; -+ depth++; -+ } -+ -+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) { -+ /* deny */ -+ goto out; -+ } -+ -+ memset(path, 0, PAGE_SIZE); -+ -+ memcpy(path, proc_sys, strlen(proc_sys)); -+ -+ pos += strlen(proc_sys); -+ -+ for (; depth > 0; depth--) { -+ path[pos] = '/'; -+ pos++; -+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) { -+ if (depth == i) { -+ memcpy(path + pos, tmp->procname, -+ strlen(tmp->procname)); -+ pos += strlen(tmp->procname); -+ } -+ i++; -+ } -+ } -+ -+ obj = gr_lookup_by_name(path, pos); -+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS); -+ -+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) && -+ ((err & mode) != mode))) { -+ __u32 new_mode = mode; -+ -+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); -+ -+ err = 0; -+ gr_log_learn_sysctl(path, new_mode); -+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) { -+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path); -+ err = -ENOENT; -+ } else if (!(err & GR_FIND)) { -+ err = -ENOENT; -+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) { -+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied", -+ path, (mode & GR_READ) ? " reading" : "", -+ (mode & GR_WRITE) ? " writing" : ""); -+ err = -EACCES; -+ } else if ((err & mode) != mode) { -+ err = -EACCES; -+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) { -+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful", -+ path, (mode & GR_READ) ? " reading" : "", -+ (mode & GR_WRITE) ? " writing" : ""); -+ err = 0; -+ } else -+ err = 0; -+ -+ out: -+ preempt_enable(); -+ -+ return err; -+} -+#endif -+ -+int -+gr_handle_proc_ptrace(struct task_struct *task) -+{ -+ struct file *filp; -+ struct task_struct *tmp = task; -+ struct task_struct *curtemp = current; -+ __u32 retmode; -+ -+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE -+ if (unlikely(!(gr_status & GR_READY))) -+ return 0; -+#endif -+ -+ read_lock(&tasklist_lock); -+ read_lock(&grsec_exec_file_lock); -+ filp = task->exec_file; -+ -+ while (tmp->pid > 0) { -+ if (tmp == curtemp) -+ break; -+ tmp = tmp->real_parent; -+ } -+ -+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) || -+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) { -+ read_unlock(&grsec_exec_file_lock); -+ read_unlock(&tasklist_lock); -+ return 1; -+ } -+ -+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE -+ if (!(gr_status & GR_READY)) { -+ read_unlock(&grsec_exec_file_lock); -+ read_unlock(&tasklist_lock); -+ return 0; -+ } -+#endif -+ -+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt); -+ read_unlock(&grsec_exec_file_lock); -+ read_unlock(&tasklist_lock); -+ -+ if (retmode & GR_NOPTRACE) -+ return 1; -+ -+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD) -+ && (current->acl != task->acl || (current->acl != current->role->root_label -+ && current->pid != task->pid))) -+ return 1; -+ -+ return 0; -+} -+ -+void task_grsec_rbac(struct seq_file *m, struct task_struct *p) -+{ -+ if (unlikely(!(gr_status & GR_READY))) -+ return; -+ -+ if (!(current->role->roletype & GR_ROLE_GOD)) -+ return; -+ -+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n", -+ p->role->rolename, gr_task_roletype_to_char(p), -+ p->acl->filename); -+} -+ -+int -+gr_handle_ptrace(struct task_struct *task, const long request) -+{ -+ struct task_struct *tmp = task; -+ struct task_struct *curtemp = current; -+ __u32 retmode; -+ -+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE -+ if (unlikely(!(gr_status & GR_READY))) -+ return 0; -+#endif -+ -+ read_lock(&tasklist_lock); -+ while (tmp->pid > 0) { -+ if (tmp == curtemp) -+ break; -+ tmp = tmp->real_parent; -+ } -+ -+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) || -+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) { -+ read_unlock(&tasklist_lock); -+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); -+ return 1; -+ } -+ read_unlock(&tasklist_lock); -+ -+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE -+ if (!(gr_status & GR_READY)) -+ return 0; -+#endif -+ -+ read_lock(&grsec_exec_file_lock); -+ if (unlikely(!task->exec_file)) { -+ read_unlock(&grsec_exec_file_lock); -+ return 0; -+ } -+ -+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt); -+ read_unlock(&grsec_exec_file_lock); -+ -+ if (retmode & GR_NOPTRACE) { -+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); -+ return 1; -+ } -+ -+ if (retmode & GR_PTRACERD) { -+ switch (request) { -+ case PTRACE_SEIZE: -+ case PTRACE_POKETEXT: -+ case PTRACE_POKEDATA: -+ case PTRACE_POKEUSR: -+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) -+ case PTRACE_SETREGS: -+ case PTRACE_SETFPREGS: -+#endif -+#ifdef CONFIG_X86 -+ case PTRACE_SETFPXREGS: -+#endif -+#ifdef CONFIG_ALTIVEC -+ case PTRACE_SETVRREGS: -+#endif -+ return 1; -+ default: -+ return 0; -+ } -+ } else if (!(current->acl->mode & GR_POVERRIDE) && -+ !(current->role->roletype & GR_ROLE_GOD) && -+ (current->acl != task->acl)) { -+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); -+ return 1; -+ } -+ -+ return 0; -+} -+ -+static int is_writable_mmap(const struct file *filp) -+{ -+ struct task_struct *task = current; -+ struct acl_object_label *obj, *obj2; -+ -+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) && -+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) { -+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); -+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, -+ task->role->root_label); -+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) { -+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt); -+ return 1; -+ } -+ } -+ return 0; -+} -+ -+int -+gr_acl_handle_mmap(const struct file *file, const unsigned long prot) -+{ -+ __u32 mode; -+ -+ if (unlikely(!file || !(prot & PROT_EXEC))) -+ return 1; -+ -+ if (is_writable_mmap(file)) -+ return 0; -+ -+ mode = -+ gr_search_file(file->f_path.dentry, -+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, -+ file->f_path.mnt); -+ -+ if (!gr_tpe_allow(file)) -+ return 0; -+ -+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { -+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); -+ return 0; -+ } else if (unlikely(!(mode & GR_EXEC))) { -+ return 0; -+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { -+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); -+ return 1; -+ } -+ -+ return 1; -+} -+ -+int -+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) -+{ -+ __u32 mode; -+ -+ if (unlikely(!file || !(prot & PROT_EXEC))) -+ return 1; -+ -+ if (is_writable_mmap(file)) -+ return 0; -+ -+ mode = -+ gr_search_file(file->f_path.dentry, -+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, -+ file->f_path.mnt); -+ -+ if (!gr_tpe_allow(file)) -+ return 0; -+ -+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { -+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); -+ return 0; -+ } else if (unlikely(!(mode & GR_EXEC))) { -+ return 0; -+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { -+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); -+ return 1; -+ } -+ -+ return 1; -+} -+ -+void -+gr_acl_handle_psacct(struct task_struct *task, const long code) -+{ -+ unsigned long runtime; -+ unsigned long cputime; -+ unsigned int wday, cday; -+ __u8 whr, chr; -+ __u8 wmin, cmin; -+ __u8 wsec, csec; -+ struct timespec timeval; -+ -+ if (unlikely(!(gr_status & GR_READY) || !task->acl || -+ !(task->acl->mode & GR_PROCACCT))) -+ return; -+ -+ do_posix_clock_monotonic_gettime(&timeval); -+ runtime = timeval.tv_sec - task->start_time.tv_sec; -+ wday = runtime / (3600 * 24); -+ runtime -= wday * (3600 * 24); -+ whr = runtime / 3600; -+ runtime -= whr * 3600; -+ wmin = runtime / 60; -+ runtime -= wmin * 60; -+ wsec = runtime; -+ -+ cputime = (task->utime + task->stime) / HZ; -+ cday = cputime / (3600 * 24); -+ cputime -= cday * (3600 * 24); -+ chr = cputime / 3600; -+ cputime -= chr * 3600; -+ cmin = cputime / 60; -+ cputime -= cmin * 60; -+ csec = cputime; -+ -+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code); -+ -+ return; -+} -+ -+void gr_set_kernel_label(struct task_struct *task) -+{ -+ if (gr_status & GR_READY) { -+ task->role = kernel_role; -+ task->acl = kernel_role->root_label; -+ } -+ return; -+} -+ -+#ifdef CONFIG_TASKSTATS -+int gr_is_taskstats_denied(int pid) -+{ -+ struct task_struct *task; -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ const struct cred *cred; -+#endif -+ int ret = 0; -+ -+ /* restrict taskstats viewing to un-chrooted root users -+ who have the 'view' subject flag if the RBAC system is enabled -+ */ -+ -+ rcu_read_lock(); -+ read_lock(&tasklist_lock); -+ task = find_task_by_vpid(pid); -+ if (task) { -+#ifdef CONFIG_GRKERNSEC_CHROOT -+ if (proc_is_chrooted(task)) -+ ret = -EACCES; -+#endif -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ cred = __task_cred(task); -+#ifdef CONFIG_GRKERNSEC_PROC_USER -+ if (cred->uid != 0) -+ ret = -EACCES; -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID)) -+ ret = -EACCES; -+#endif -+#endif -+ if (gr_status & GR_READY) { -+ if (!(task->acl->mode & GR_VIEW)) -+ ret = -EACCES; -+ } -+ } else -+ ret = -ENOENT; -+ -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+ -+ return ret; -+} -+#endif -+ -+/* AUXV entries are filled via a descendant of search_binary_handler -+ after we've already applied the subject for the target -+*/ -+int gr_acl_enable_at_secure(void) -+{ -+ if (unlikely(!(gr_status & GR_READY))) -+ return 0; -+ -+ if (current->acl->mode & GR_ATSECURE) -+ return 1; -+ -+ return 0; -+} -+ -+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino) -+{ -+ struct task_struct *task = current; -+ struct dentry *dentry = file->f_path.dentry; -+ struct vfsmount *mnt = file->f_path.mnt; -+ struct acl_object_label *obj, *tmp; -+ struct acl_subject_label *subj; -+ unsigned int bufsize; -+ int is_not_root; -+ char *path; -+ dev_t dev = __get_dev(dentry); -+ -+ if (unlikely(!(gr_status & GR_READY))) -+ return 1; -+ -+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)) -+ return 1; -+ -+ /* ignore Eric Biederman */ -+ if (IS_PRIVATE(dentry->d_inode)) -+ return 1; -+ -+ subj = task->acl; -+ do { -+ obj = lookup_acl_obj_label(ino, dev, subj); -+ if (obj != NULL) -+ return (obj->mode & GR_FIND) ? 1 : 0; -+ } while ((subj = subj->parent_subject)); -+ -+ /* this is purely an optimization since we're looking for an object -+ for the directory we're doing a readdir on -+ if it's possible for any globbed object to match the entry we're -+ filling into the directory, then the object we find here will be -+ an anchor point with attached globbed objects -+ */ -+ obj = chk_obj_label_noglob(dentry, mnt, task->acl); -+ if (obj->globbed == NULL) -+ return (obj->mode & GR_FIND) ? 1 : 0; -+ -+ is_not_root = ((obj->filename[0] == '/') && -+ (obj->filename[1] == '\0')) ? 0 : 1; -+ bufsize = PAGE_SIZE - namelen - is_not_root; -+ -+ /* check bufsize > PAGE_SIZE || bufsize == 0 */ -+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1))) -+ return 1; -+ -+ preempt_disable(); -+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), -+ bufsize); -+ -+ bufsize = strlen(path); -+ -+ /* if base is "/", don't append an additional slash */ -+ if (is_not_root) -+ *(path + bufsize) = '/'; -+ memcpy(path + bufsize + is_not_root, name, namelen); -+ *(path + bufsize + namelen + is_not_root) = '\0'; -+ -+ tmp = obj->globbed; -+ while (tmp) { -+ if (!glob_match(tmp->filename, path)) { -+ preempt_enable(); -+ return (tmp->mode & GR_FIND) ? 1 : 0; -+ } -+ tmp = tmp->next; -+ } -+ preempt_enable(); -+ return (obj->mode & GR_FIND) ? 1 : 0; -+} -+ -+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE -+EXPORT_SYMBOL(gr_acl_is_enabled); -+#endif -+EXPORT_SYMBOL(gr_learn_resource); -+EXPORT_SYMBOL(gr_set_kernel_label); -+#ifdef CONFIG_SECURITY -+EXPORT_SYMBOL(gr_check_user_change); -+EXPORT_SYMBOL(gr_check_group_change); -+#endif -+ -diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c -new file mode 100644 -index 0000000..34fefda ---- /dev/null -+++ b/grsecurity/gracl_alloc.c -@@ -0,0 +1,105 @@ -+#include <linux/kernel.h> -+#include <linux/mm.h> -+#include <linux/slab.h> -+#include <linux/vmalloc.h> -+#include <linux/gracl.h> -+#include <linux/grsecurity.h> -+ -+static unsigned long alloc_stack_next = 1; -+static unsigned long alloc_stack_size = 1; -+static void **alloc_stack; -+ -+static __inline__ int -+alloc_pop(void) -+{ -+ if (alloc_stack_next == 1) -+ return 0; -+ -+ kfree(alloc_stack[alloc_stack_next - 2]); -+ -+ alloc_stack_next--; -+ -+ return 1; -+} -+ -+static __inline__ int -+alloc_push(void *buf) -+{ -+ if (alloc_stack_next >= alloc_stack_size) -+ return 1; -+ -+ alloc_stack[alloc_stack_next - 1] = buf; -+ -+ alloc_stack_next++; -+ -+ return 0; -+} -+ -+void * -+acl_alloc(unsigned long len) -+{ -+ void *ret = NULL; -+ -+ if (!len || len > PAGE_SIZE) -+ goto out; -+ -+ ret = kmalloc(len, GFP_KERNEL); -+ -+ if (ret) { -+ if (alloc_push(ret)) { -+ kfree(ret); -+ ret = NULL; -+ } -+ } -+ -+out: -+ return ret; -+} -+ -+void * -+acl_alloc_num(unsigned long num, unsigned long len) -+{ -+ if (!len || (num > (PAGE_SIZE / len))) -+ return NULL; -+ -+ return acl_alloc(num * len); -+} -+ -+void -+acl_free_all(void) -+{ -+ if (gr_acl_is_enabled() || !alloc_stack) -+ return; -+ -+ while (alloc_pop()) ; -+ -+ if (alloc_stack) { -+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE) -+ kfree(alloc_stack); -+ else -+ vfree(alloc_stack); -+ } -+ -+ alloc_stack = NULL; -+ alloc_stack_size = 1; -+ alloc_stack_next = 1; -+ -+ return; -+} -+ -+int -+acl_alloc_stack_init(unsigned long size) -+{ -+ if ((size * sizeof (void *)) <= PAGE_SIZE) -+ alloc_stack = -+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL); -+ else -+ alloc_stack = (void **) vmalloc(size * sizeof (void *)); -+ -+ alloc_stack_size = size; -+ -+ if (!alloc_stack) -+ return 0; -+ else -+ return 1; -+} -diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c -new file mode 100644 -index 0000000..955ddfb ---- /dev/null -+++ b/grsecurity/gracl_cap.c -@@ -0,0 +1,101 @@ -+#include <linux/kernel.h> -+#include <linux/module.h> -+#include <linux/sched.h> -+#include <linux/gracl.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+ -+extern const char *captab_log[]; -+extern int captab_log_entries; -+ -+int -+gr_acl_is_capable(const int cap) -+{ -+ struct task_struct *task = current; -+ const struct cred *cred = current_cred(); -+ struct acl_subject_label *curracl; -+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; -+ kernel_cap_t cap_audit = __cap_empty_set; -+ -+ if (!gr_acl_is_enabled()) -+ return 1; -+ -+ curracl = task->acl; -+ -+ cap_drop = curracl->cap_lower; -+ cap_mask = curracl->cap_mask; -+ cap_audit = curracl->cap_invert_audit; -+ -+ while ((curracl = curracl->parent_subject)) { -+ /* if the cap isn't specified in the current computed mask but is specified in the -+ current level subject, and is lowered in the current level subject, then add -+ it to the set of dropped capabilities -+ otherwise, add the current level subject's mask to the current computed mask -+ */ -+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { -+ cap_raise(cap_mask, cap); -+ if (cap_raised(curracl->cap_lower, cap)) -+ cap_raise(cap_drop, cap); -+ if (cap_raised(curracl->cap_invert_audit, cap)) -+ cap_raise(cap_audit, cap); -+ } -+ } -+ -+ if (!cap_raised(cap_drop, cap)) { -+ if (cap_raised(cap_audit, cap)) -+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]); -+ return 1; -+ } -+ -+ curracl = task->acl; -+ -+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) -+ && cap_raised(cred->cap_effective, cap)) { -+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, -+ task->role->roletype, cred->uid, -+ cred->gid, task->exec_file ? -+ gr_to_filename(task->exec_file->f_path.dentry, -+ task->exec_file->f_path.mnt) : curracl->filename, -+ curracl->filename, 0UL, -+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip); -+ return 1; -+ } -+ -+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap)) -+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]); -+ return 0; -+} -+ -+int -+gr_acl_is_capable_nolog(const int cap) -+{ -+ struct acl_subject_label *curracl; -+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; -+ -+ if (!gr_acl_is_enabled()) -+ return 1; -+ -+ curracl = current->acl; -+ -+ cap_drop = curracl->cap_lower; -+ cap_mask = curracl->cap_mask; -+ -+ while ((curracl = curracl->parent_subject)) { -+ /* if the cap isn't specified in the current computed mask but is specified in the -+ current level subject, and is lowered in the current level subject, then add -+ it to the set of dropped capabilities -+ otherwise, add the current level subject's mask to the current computed mask -+ */ -+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { -+ cap_raise(cap_mask, cap); -+ if (cap_raised(curracl->cap_lower, cap)) -+ cap_raise(cap_drop, cap); -+ } -+ } -+ -+ if (!cap_raised(cap_drop, cap)) -+ return 1; -+ -+ return 0; -+} -+ -diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c -new file mode 100644 -index 0000000..4eda5c3 ---- /dev/null -+++ b/grsecurity/gracl_fs.c -@@ -0,0 +1,433 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/types.h> -+#include <linux/fs.h> -+#include <linux/file.h> -+#include <linux/stat.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+#include <linux/gracl.h> -+ -+__u32 -+gr_acl_handle_hidden_file(const struct dentry * dentry, -+ const struct vfsmount * mnt) -+{ -+ __u32 mode; -+ -+ if (unlikely(!dentry->d_inode)) -+ return GR_FIND; -+ -+ mode = -+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt); -+ -+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) { -+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); -+ return mode; -+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) { -+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); -+ return 0; -+ } else if (unlikely(!(mode & GR_FIND))) -+ return 0; -+ -+ return GR_FIND; -+} -+ -+__u32 -+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, -+ int acc_mode) -+{ -+ __u32 reqmode = GR_FIND; -+ __u32 mode; -+ -+ if (unlikely(!dentry->d_inode)) -+ return reqmode; -+ -+ if (acc_mode & MAY_APPEND) -+ reqmode |= GR_APPEND; -+ else if (acc_mode & MAY_WRITE) -+ reqmode |= GR_WRITE; -+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode)) -+ reqmode |= GR_READ; -+ -+ mode = -+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, -+ mnt); -+ -+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { -+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, -+ reqmode & GR_READ ? " reading" : "", -+ reqmode & GR_WRITE ? " writing" : reqmode & -+ GR_APPEND ? " appending" : ""); -+ return reqmode; -+ } else -+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) -+ { -+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, -+ reqmode & GR_READ ? " reading" : "", -+ reqmode & GR_WRITE ? " writing" : reqmode & -+ GR_APPEND ? " appending" : ""); -+ return 0; -+ } else if (unlikely((mode & reqmode) != reqmode)) -+ return 0; -+ -+ return reqmode; -+} -+ -+__u32 -+gr_acl_handle_creat(const struct dentry * dentry, -+ const struct dentry * p_dentry, -+ const struct vfsmount * p_mnt, int open_flags, int acc_mode, -+ const int imode) -+{ -+ __u32 reqmode = GR_WRITE | GR_CREATE; -+ __u32 mode; -+ -+ if (acc_mode & MAY_APPEND) -+ reqmode |= GR_APPEND; -+ // if a directory was required or the directory already exists, then -+ // don't count this open as a read -+ if ((acc_mode & MAY_READ) && -+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)))) -+ reqmode |= GR_READ; -+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID))) -+ reqmode |= GR_SETID; -+ -+ mode = -+ gr_check_create(dentry, p_dentry, p_mnt, -+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); -+ -+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { -+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, -+ reqmode & GR_READ ? " reading" : "", -+ reqmode & GR_WRITE ? " writing" : reqmode & -+ GR_APPEND ? " appending" : ""); -+ return reqmode; -+ } else -+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) -+ { -+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, -+ reqmode & GR_READ ? " reading" : "", -+ reqmode & GR_WRITE ? " writing" : reqmode & -+ GR_APPEND ? " appending" : ""); -+ return 0; -+ } else if (unlikely((mode & reqmode) != reqmode)) -+ return 0; -+ -+ return reqmode; -+} -+ -+__u32 -+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt, -+ const int fmode) -+{ -+ __u32 mode, reqmode = GR_FIND; -+ -+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode)) -+ reqmode |= GR_EXEC; -+ if (fmode & S_IWOTH) -+ reqmode |= GR_WRITE; -+ if (fmode & S_IROTH) -+ reqmode |= GR_READ; -+ -+ mode = -+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, -+ mnt); -+ -+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { -+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, -+ reqmode & GR_READ ? " reading" : "", -+ reqmode & GR_WRITE ? " writing" : "", -+ reqmode & GR_EXEC ? " executing" : ""); -+ return reqmode; -+ } else -+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) -+ { -+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, -+ reqmode & GR_READ ? " reading" : "", -+ reqmode & GR_WRITE ? " writing" : "", -+ reqmode & GR_EXEC ? " executing" : ""); -+ return 0; -+ } else if (unlikely((mode & reqmode) != reqmode)) -+ return 0; -+ -+ return reqmode; -+} -+ -+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt) -+{ -+ __u32 mode; -+ -+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); -+ -+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { -+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt); -+ return mode; -+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { -+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt); -+ return 0; -+ } else if (unlikely((mode & (reqmode)) != (reqmode))) -+ return 0; -+ -+ return (reqmode); -+} -+ -+__u32 -+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) -+{ -+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG); -+} -+ -+__u32 -+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG); -+} -+ -+__u32 -+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG); -+} -+ -+__u32 -+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG); -+} -+ -+__u32 -+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt, -+ mode_t mode) -+{ -+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode))) -+ return 1; -+ -+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) { -+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, -+ GR_FCHMOD_ACL_MSG); -+ } else { -+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG); -+ } -+} -+ -+__u32 -+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt, -+ mode_t mode) -+{ -+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) { -+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, -+ GR_CHMOD_ACL_MSG); -+ } else { -+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG); -+ } -+} -+ -+__u32 -+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG); -+} -+ -+__u32 -+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG); -+} -+ -+__u32 -+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG); -+} -+ -+__u32 -+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE, -+ GR_UNIXCONNECT_ACL_MSG); -+} -+ -+/* hardlinks require at minimum create and link permission, -+ any additional privilege required is based on the -+ privilege of the file being linked to -+*/ -+__u32 -+gr_acl_handle_link(const struct dentry * new_dentry, -+ const struct dentry * parent_dentry, -+ const struct vfsmount * parent_mnt, -+ const struct dentry * old_dentry, -+ const struct vfsmount * old_mnt, const char *to) -+{ -+ __u32 mode; -+ __u32 needmode = GR_CREATE | GR_LINK; -+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK; -+ -+ mode = -+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry, -+ old_mnt); -+ -+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) { -+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); -+ return mode; -+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { -+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); -+ return 0; -+ } else if (unlikely((mode & needmode) != needmode)) -+ return 0; -+ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_symlink(const struct dentry * new_dentry, -+ const struct dentry * parent_dentry, -+ const struct vfsmount * parent_mnt, const char *from) -+{ -+ __u32 needmode = GR_WRITE | GR_CREATE; -+ __u32 mode; -+ -+ mode = -+ gr_check_create(new_dentry, parent_dentry, parent_mnt, -+ GR_CREATE | GR_AUDIT_CREATE | -+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS); -+ -+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) { -+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); -+ return mode; -+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { -+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); -+ return 0; -+ } else if (unlikely((mode & needmode) != needmode)) -+ return 0; -+ -+ return (GR_WRITE | GR_CREATE); -+} -+ -+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt) -+{ -+ __u32 mode; -+ -+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); -+ -+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { -+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt); -+ return mode; -+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { -+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt); -+ return 0; -+ } else if (unlikely((mode & (reqmode)) != (reqmode))) -+ return 0; -+ -+ return (reqmode); -+} -+ -+__u32 -+gr_acl_handle_mknod(const struct dentry * new_dentry, -+ const struct dentry * parent_dentry, -+ const struct vfsmount * parent_mnt, -+ const int mode) -+{ -+ __u32 reqmode = GR_WRITE | GR_CREATE; -+ if (unlikely(mode & (S_ISUID | S_ISGID))) -+ reqmode |= GR_SETID; -+ -+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, -+ reqmode, GR_MKNOD_ACL_MSG); -+} -+ -+__u32 -+gr_acl_handle_mkdir(const struct dentry *new_dentry, -+ const struct dentry *parent_dentry, -+ const struct vfsmount *parent_mnt) -+{ -+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, -+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG); -+} -+ -+#define RENAME_CHECK_SUCCESS(old, new) \ -+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \ -+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ))) -+ -+int -+gr_acl_handle_rename(struct dentry *new_dentry, -+ struct dentry *parent_dentry, -+ const struct vfsmount *parent_mnt, -+ struct dentry *old_dentry, -+ struct inode *old_parent_inode, -+ struct vfsmount *old_mnt, const char *newname) -+{ -+ __u32 comp1, comp2; -+ int error = 0; -+ -+ if (unlikely(!gr_acl_is_enabled())) -+ return 0; -+ -+ if (!new_dentry->d_inode) { -+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt, -+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ | -+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS); -+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE | -+ GR_DELETE | GR_AUDIT_DELETE | -+ GR_AUDIT_READ | GR_AUDIT_WRITE | -+ GR_SUPPRESS, old_mnt); -+ } else { -+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE | -+ GR_CREATE | GR_DELETE | -+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | -+ GR_AUDIT_READ | GR_AUDIT_WRITE | -+ GR_SUPPRESS, parent_mnt); -+ comp2 = -+ gr_search_file(old_dentry, -+ GR_READ | GR_WRITE | GR_AUDIT_READ | -+ GR_DELETE | GR_AUDIT_DELETE | -+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt); -+ } -+ -+ if (RENAME_CHECK_SUCCESS(comp1, comp2) && -+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS))) -+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); -+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS) -+ && !(comp2 & GR_SUPPRESS)) { -+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); -+ error = -EACCES; -+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2))) -+ error = -EACCES; -+ -+ return error; -+} -+ -+void -+gr_acl_handle_exit(void) -+{ -+ u16 id; -+ char *rolename; -+ struct file *exec_file; -+ -+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() && -+ !(current->role->roletype & GR_ROLE_PERSIST))) { -+ id = current->acl_role_id; -+ rolename = current->role->rolename; -+ gr_set_acls(1); -+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id); -+ } -+ -+ write_lock(&grsec_exec_file_lock); -+ exec_file = current->exec_file; -+ current->exec_file = NULL; -+ write_unlock(&grsec_exec_file_lock); -+ -+ if (exec_file) -+ fput(exec_file); -+} -+ -+int -+gr_acl_handle_procpidmem(const struct task_struct *task) -+{ -+ if (unlikely(!gr_acl_is_enabled())) -+ return 0; -+ -+ if (task != current && task->acl->mode & GR_PROTPROCFD) -+ return -EACCES; -+ -+ return 0; -+} -diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c -new file mode 100644 -index 0000000..17050ca ---- /dev/null -+++ b/grsecurity/gracl_ip.c -@@ -0,0 +1,381 @@ -+#include <linux/kernel.h> -+#include <asm/uaccess.h> -+#include <asm/errno.h> -+#include <net/sock.h> -+#include <linux/file.h> -+#include <linux/fs.h> -+#include <linux/net.h> -+#include <linux/in.h> -+#include <linux/skbuff.h> -+#include <linux/ip.h> -+#include <linux/udp.h> -+#include <linux/types.h> -+#include <linux/sched.h> -+#include <linux/netdevice.h> -+#include <linux/inetdevice.h> -+#include <linux/gracl.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+ -+#define GR_BIND 0x01 -+#define GR_CONNECT 0x02 -+#define GR_INVERT 0x04 -+#define GR_BINDOVERRIDE 0x08 -+#define GR_CONNECTOVERRIDE 0x10 -+#define GR_SOCK_FAMILY 0x20 -+ -+static const char * gr_protocols[IPPROTO_MAX] = { -+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt", -+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet", -+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1", -+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp", -+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++", -+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre", -+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile", -+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63", -+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv", -+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", -+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", -+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp", -+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim", -+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip", -+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp", -+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup", -+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135", -+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143", -+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151", -+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159", -+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167", -+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175", -+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183", -+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191", -+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199", -+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207", -+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215", -+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223", -+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231", -+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239", -+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247", -+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255", -+ }; -+ -+static const char * gr_socktypes[SOCK_MAX] = { -+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", -+ "unknown:7", "unknown:8", "unknown:9", "packet" -+ }; -+ -+static const char * gr_sockfamilies[AF_MAX+1] = { -+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25", -+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash", -+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28", -+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf" -+ }; -+ -+const char * -+gr_proto_to_name(unsigned char proto) -+{ -+ return gr_protocols[proto]; -+} -+ -+const char * -+gr_socktype_to_name(unsigned char type) -+{ -+ return gr_socktypes[type]; -+} -+ -+const char * -+gr_sockfamily_to_name(unsigned char family) -+{ -+ return gr_sockfamilies[family]; -+} -+ -+int -+gr_search_socket(const int domain, const int type, const int protocol) -+{ -+ struct acl_subject_label *curr; -+ const struct cred *cred = current_cred(); -+ -+ if (unlikely(!gr_acl_is_enabled())) -+ goto exit; -+ -+ if ((domain < 0) || (type < 0) || (protocol < 0) || -+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX)) -+ goto exit; // let the kernel handle it -+ -+ curr = current->acl; -+ -+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) { -+ /* the family is allowed, if this is PF_INET allow it only if -+ the extra sock type/protocol checks pass */ -+ if (domain == PF_INET) -+ goto inet_check; -+ goto exit; -+ } else { -+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { -+ __u32 fakeip = 0; -+ security_learn(GR_IP_LEARN_MSG, current->role->rolename, -+ current->role->roletype, cred->uid, -+ cred->gid, current->exec_file ? -+ gr_to_filename(current->exec_file->f_path.dentry, -+ current->exec_file->f_path.mnt) : -+ curr->filename, curr->filename, -+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY, -+ ¤t->signal->saved_ip); -+ goto exit; -+ } -+ goto exit_fail; -+ } -+ -+inet_check: -+ /* the rest of this checking is for IPv4 only */ -+ if (!curr->ips) -+ goto exit; -+ -+ if ((curr->ip_type & (1 << type)) && -+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32)))) -+ goto exit; -+ -+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { -+ /* we don't place acls on raw sockets , and sometimes -+ dgram/ip sockets are opened for ioctl and not -+ bind/connect, so we'll fake a bind learn log */ -+ if (type == SOCK_RAW || type == SOCK_PACKET) { -+ __u32 fakeip = 0; -+ security_learn(GR_IP_LEARN_MSG, current->role->rolename, -+ current->role->roletype, cred->uid, -+ cred->gid, current->exec_file ? -+ gr_to_filename(current->exec_file->f_path.dentry, -+ current->exec_file->f_path.mnt) : -+ curr->filename, curr->filename, -+ &fakeip, 0, type, -+ protocol, GR_CONNECT, ¤t->signal->saved_ip); -+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) { -+ __u32 fakeip = 0; -+ security_learn(GR_IP_LEARN_MSG, current->role->rolename, -+ current->role->roletype, cred->uid, -+ cred->gid, current->exec_file ? -+ gr_to_filename(current->exec_file->f_path.dentry, -+ current->exec_file->f_path.mnt) : -+ curr->filename, curr->filename, -+ &fakeip, 0, type, -+ protocol, GR_BIND, ¤t->signal->saved_ip); -+ } -+ /* we'll log when they use connect or bind */ -+ goto exit; -+ } -+ -+exit_fail: -+ if (domain == PF_INET) -+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain), -+ gr_socktype_to_name(type), gr_proto_to_name(protocol)); -+ else -+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain), -+ gr_socktype_to_name(type), protocol); -+ -+ return 0; -+exit: -+ return 1; -+} -+ -+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask) -+{ -+ if ((ip->mode & mode) && -+ (ip_port >= ip->low) && -+ (ip_port <= ip->high) && -+ ((ntohl(ip_addr) & our_netmask) == -+ (ntohl(our_addr) & our_netmask)) -+ && (ip->proto[protocol / 32] & (1 << (protocol % 32))) -+ && (ip->type & (1 << type))) { -+ if (ip->mode & GR_INVERT) -+ return 2; // specifically denied -+ else -+ return 1; // allowed -+ } -+ -+ return 0; // not specifically allowed, may continue parsing -+} -+ -+static int -+gr_search_connectbind(const int full_mode, struct sock *sk, -+ struct sockaddr_in *addr, const int type) -+{ -+ char iface[IFNAMSIZ] = {0}; -+ struct acl_subject_label *curr; -+ struct acl_ip_label *ip; -+ struct inet_sock *isk; -+ struct net_device *dev; -+ struct in_device *idev; -+ unsigned long i; -+ int ret; -+ int mode = full_mode & (GR_BIND | GR_CONNECT); -+ __u32 ip_addr = 0; -+ __u32 our_addr; -+ __u32 our_netmask; -+ char *p; -+ __u16 ip_port = 0; -+ const struct cred *cred = current_cred(); -+ -+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET)) -+ return 0; -+ -+ curr = current->acl; -+ isk = inet_sk(sk); -+ -+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */ -+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) -+ addr->sin_addr.s_addr = curr->inaddr_any_override; -+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) { -+ struct sockaddr_in saddr; -+ int err; -+ -+ saddr.sin_family = AF_INET; -+ saddr.sin_addr.s_addr = curr->inaddr_any_override; -+ saddr.sin_port = isk->inet_sport; -+ -+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); -+ if (err) -+ return err; -+ -+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); -+ if (err) -+ return err; -+ } -+ -+ if (!curr->ips) -+ return 0; -+ -+ ip_addr = addr->sin_addr.s_addr; -+ ip_port = ntohs(addr->sin_port); -+ -+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { -+ security_learn(GR_IP_LEARN_MSG, current->role->rolename, -+ current->role->roletype, cred->uid, -+ cred->gid, current->exec_file ? -+ gr_to_filename(current->exec_file->f_path.dentry, -+ current->exec_file->f_path.mnt) : -+ curr->filename, curr->filename, -+ &ip_addr, ip_port, type, -+ sk->sk_protocol, mode, ¤t->signal->saved_ip); -+ return 0; -+ } -+ -+ for (i = 0; i < curr->ip_num; i++) { -+ ip = *(curr->ips + i); -+ if (ip->iface != NULL) { -+ strncpy(iface, ip->iface, IFNAMSIZ - 1); -+ p = strchr(iface, ':'); -+ if (p != NULL) -+ *p = '\0'; -+ dev = dev_get_by_name(sock_net(sk), iface); -+ if (dev == NULL) -+ continue; -+ idev = in_dev_get(dev); -+ if (idev == NULL) { -+ dev_put(dev); -+ continue; -+ } -+ rcu_read_lock(); -+ for_ifa(idev) { -+ if (!strcmp(ip->iface, ifa->ifa_label)) { -+ our_addr = ifa->ifa_address; -+ our_netmask = 0xffffffff; -+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); -+ if (ret == 1) { -+ rcu_read_unlock(); -+ in_dev_put(idev); -+ dev_put(dev); -+ return 0; -+ } else if (ret == 2) { -+ rcu_read_unlock(); -+ in_dev_put(idev); -+ dev_put(dev); -+ goto denied; -+ } -+ } -+ } endfor_ifa(idev); -+ rcu_read_unlock(); -+ in_dev_put(idev); -+ dev_put(dev); -+ } else { -+ our_addr = ip->addr; -+ our_netmask = ip->netmask; -+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); -+ if (ret == 1) -+ return 0; -+ else if (ret == 2) -+ goto denied; -+ } -+ } -+ -+denied: -+ if (mode == GR_BIND) -+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); -+ else if (mode == GR_CONNECT) -+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); -+ -+ return -EACCES; -+} -+ -+int -+gr_search_connect(struct socket *sock, struct sockaddr_in *addr) -+{ -+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type); -+} -+ -+int -+gr_search_bind(struct socket *sock, struct sockaddr_in *addr) -+{ -+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type); -+} -+ -+int gr_search_listen(struct socket *sock) -+{ -+ struct sock *sk = sock->sk; -+ struct sockaddr_in addr; -+ -+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr; -+ addr.sin_port = inet_sk(sk)->inet_sport; -+ -+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); -+} -+ -+int gr_search_accept(struct socket *sock) -+{ -+ struct sock *sk = sock->sk; -+ struct sockaddr_in addr; -+ -+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr; -+ addr.sin_port = inet_sk(sk)->inet_sport; -+ -+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); -+} -+ -+int -+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr) -+{ -+ if (addr) -+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM); -+ else { -+ struct sockaddr_in sin; -+ const struct inet_sock *inet = inet_sk(sk); -+ -+ sin.sin_addr.s_addr = inet->inet_daddr; -+ sin.sin_port = inet->inet_dport; -+ -+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); -+ } -+} -+ -+int -+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb) -+{ -+ struct sockaddr_in sin; -+ -+ if (unlikely(skb->len < sizeof (struct udphdr))) -+ return 0; // skip this packet -+ -+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr; -+ sin.sin_port = udp_hdr(skb)->source; -+ -+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); -+} -diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c -new file mode 100644 -index 0000000..25f54ef ---- /dev/null -+++ b/grsecurity/gracl_learn.c -@@ -0,0 +1,207 @@ -+#include <linux/kernel.h> -+#include <linux/mm.h> -+#include <linux/sched.h> -+#include <linux/poll.h> -+#include <linux/string.h> -+#include <linux/file.h> -+#include <linux/types.h> -+#include <linux/vmalloc.h> -+#include <linux/grinternal.h> -+ -+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf, -+ size_t count, loff_t *ppos); -+extern int gr_acl_is_enabled(void); -+ -+static DECLARE_WAIT_QUEUE_HEAD(learn_wait); -+static int gr_learn_attached; -+ -+/* use a 512k buffer */ -+#define LEARN_BUFFER_SIZE (512 * 1024) -+ -+static DEFINE_SPINLOCK(gr_learn_lock); -+static DEFINE_MUTEX(gr_learn_user_mutex); -+ -+/* we need to maintain two buffers, so that the kernel context of grlearn -+ uses a semaphore around the userspace copying, and the other kernel contexts -+ use a spinlock when copying into the buffer, since they cannot sleep -+*/ -+static char *learn_buffer; -+static char *learn_buffer_user; -+static int learn_buffer_len; -+static int learn_buffer_user_len; -+ -+static ssize_t -+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos) -+{ -+ DECLARE_WAITQUEUE(wait, current); -+ ssize_t retval = 0; -+ -+ add_wait_queue(&learn_wait, &wait); -+ set_current_state(TASK_INTERRUPTIBLE); -+ do { -+ mutex_lock(&gr_learn_user_mutex); -+ spin_lock(&gr_learn_lock); -+ if (learn_buffer_len) -+ break; -+ spin_unlock(&gr_learn_lock); -+ mutex_unlock(&gr_learn_user_mutex); -+ if (file->f_flags & O_NONBLOCK) { -+ retval = -EAGAIN; -+ goto out; -+ } -+ if (signal_pending(current)) { -+ retval = -ERESTARTSYS; -+ goto out; -+ } -+ -+ schedule(); -+ } while (1); -+ -+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len); -+ learn_buffer_user_len = learn_buffer_len; -+ retval = learn_buffer_len; -+ learn_buffer_len = 0; -+ -+ spin_unlock(&gr_learn_lock); -+ -+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len)) -+ retval = -EFAULT; -+ -+ mutex_unlock(&gr_learn_user_mutex); -+out: -+ set_current_state(TASK_RUNNING); -+ remove_wait_queue(&learn_wait, &wait); -+ return retval; -+} -+ -+static unsigned int -+poll_learn(struct file * file, poll_table * wait) -+{ -+ poll_wait(file, &learn_wait, wait); -+ -+ if (learn_buffer_len) -+ return (POLLIN | POLLRDNORM); -+ -+ return 0; -+} -+ -+void -+gr_clear_learn_entries(void) -+{ -+ char *tmp; -+ -+ mutex_lock(&gr_learn_user_mutex); -+ spin_lock(&gr_learn_lock); -+ tmp = learn_buffer; -+ learn_buffer = NULL; -+ spin_unlock(&gr_learn_lock); -+ if (tmp) -+ vfree(tmp); -+ if (learn_buffer_user != NULL) { -+ vfree(learn_buffer_user); -+ learn_buffer_user = NULL; -+ } -+ learn_buffer_len = 0; -+ mutex_unlock(&gr_learn_user_mutex); -+ -+ return; -+} -+ -+void -+gr_add_learn_entry(const char *fmt, ...) -+{ -+ va_list args; -+ unsigned int len; -+ -+ if (!gr_learn_attached) -+ return; -+ -+ spin_lock(&gr_learn_lock); -+ -+ /* leave a gap at the end so we know when it's "full" but don't have to -+ compute the exact length of the string we're trying to append -+ */ -+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) { -+ spin_unlock(&gr_learn_lock); -+ wake_up_interruptible(&learn_wait); -+ return; -+ } -+ if (learn_buffer == NULL) { -+ spin_unlock(&gr_learn_lock); -+ return; -+ } -+ -+ va_start(args, fmt); -+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args); -+ va_end(args); -+ -+ learn_buffer_len += len + 1; -+ -+ spin_unlock(&gr_learn_lock); -+ wake_up_interruptible(&learn_wait); -+ -+ return; -+} -+ -+static int -+open_learn(struct inode *inode, struct file *file) -+{ -+ if (file->f_mode & FMODE_READ && gr_learn_attached) -+ return -EBUSY; -+ if (file->f_mode & FMODE_READ) { -+ int retval = 0; -+ mutex_lock(&gr_learn_user_mutex); -+ if (learn_buffer == NULL) -+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE); -+ if (learn_buffer_user == NULL) -+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE); -+ if (learn_buffer == NULL) { -+ retval = -ENOMEM; -+ goto out_error; -+ } -+ if (learn_buffer_user == NULL) { -+ retval = -ENOMEM; -+ goto out_error; -+ } -+ learn_buffer_len = 0; -+ learn_buffer_user_len = 0; -+ gr_learn_attached = 1; -+out_error: -+ mutex_unlock(&gr_learn_user_mutex); -+ return retval; -+ } -+ return 0; -+} -+ -+static int -+close_learn(struct inode *inode, struct file *file) -+{ -+ if (file->f_mode & FMODE_READ) { -+ char *tmp = NULL; -+ mutex_lock(&gr_learn_user_mutex); -+ spin_lock(&gr_learn_lock); -+ tmp = learn_buffer; -+ learn_buffer = NULL; -+ spin_unlock(&gr_learn_lock); -+ if (tmp) -+ vfree(tmp); -+ if (learn_buffer_user != NULL) { -+ vfree(learn_buffer_user); -+ learn_buffer_user = NULL; -+ } -+ learn_buffer_len = 0; -+ learn_buffer_user_len = 0; -+ gr_learn_attached = 0; -+ mutex_unlock(&gr_learn_user_mutex); -+ } -+ -+ return 0; -+} -+ -+const struct file_operations grsec_fops = { -+ .read = read_learn, -+ .write = write_grsec_handler, -+ .open = open_learn, -+ .release = close_learn, -+ .poll = poll_learn, -+}; -diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c -new file mode 100644 -index 0000000..39645c9 ---- /dev/null -+++ b/grsecurity/gracl_res.c -@@ -0,0 +1,68 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/gracl.h> -+#include <linux/grinternal.h> -+ -+static const char *restab_log[] = { -+ [RLIMIT_CPU] = "RLIMIT_CPU", -+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE", -+ [RLIMIT_DATA] = "RLIMIT_DATA", -+ [RLIMIT_STACK] = "RLIMIT_STACK", -+ [RLIMIT_CORE] = "RLIMIT_CORE", -+ [RLIMIT_RSS] = "RLIMIT_RSS", -+ [RLIMIT_NPROC] = "RLIMIT_NPROC", -+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE", -+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK", -+ [RLIMIT_AS] = "RLIMIT_AS", -+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS", -+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING", -+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE", -+ [RLIMIT_NICE] = "RLIMIT_NICE", -+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO", -+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME", -+ [GR_CRASH_RES] = "RLIMIT_CRASH" -+}; -+ -+void -+gr_log_resource(const struct task_struct *task, -+ const int res, const unsigned long wanted, const int gt) -+{ -+ const struct cred *cred; -+ unsigned long rlim; -+ -+ if (!gr_acl_is_enabled() && !grsec_resource_logging) -+ return; -+ -+ // not yet supported resource -+ if (unlikely(!restab_log[res])) -+ return; -+ -+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME) -+ rlim = task_rlimit_max(task, res); -+ else -+ rlim = task_rlimit(task, res); -+ -+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim))) -+ return; -+ -+ rcu_read_lock(); -+ cred = __task_cred(task); -+ -+ if (res == RLIMIT_NPROC && -+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || -+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE))) -+ goto out_rcu_unlock; -+ else if (res == RLIMIT_MEMLOCK && -+ cap_raised(cred->cap_effective, CAP_IPC_LOCK)) -+ goto out_rcu_unlock; -+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE)) -+ goto out_rcu_unlock; -+ rcu_read_unlock(); -+ -+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim); -+ -+ return; -+out_rcu_unlock: -+ rcu_read_unlock(); -+ return; -+} -diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c -new file mode 100644 -index 0000000..5556be3 ---- /dev/null -+++ b/grsecurity/gracl_segv.c -@@ -0,0 +1,299 @@ -+#include <linux/kernel.h> -+#include <linux/mm.h> -+#include <asm/uaccess.h> -+#include <asm/errno.h> -+#include <asm/mman.h> -+#include <net/sock.h> -+#include <linux/file.h> -+#include <linux/fs.h> -+#include <linux/net.h> -+#include <linux/in.h> -+#include <linux/slab.h> -+#include <linux/types.h> -+#include <linux/sched.h> -+#include <linux/timer.h> -+#include <linux/gracl.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+ -+static struct crash_uid *uid_set; -+static unsigned short uid_used; -+static DEFINE_SPINLOCK(gr_uid_lock); -+extern rwlock_t gr_inode_lock; -+extern struct acl_subject_label * -+ lookup_acl_subj_label(const ino_t inode, const dev_t dev, -+ struct acl_role_label *role); -+ -+#ifdef CONFIG_BTRFS_FS -+extern dev_t get_btrfs_dev_from_inode(struct inode *inode); -+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); -+#endif -+ -+static inline dev_t __get_dev(const struct dentry *dentry) -+{ -+#ifdef CONFIG_BTRFS_FS -+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr) -+ return get_btrfs_dev_from_inode(dentry->d_inode); -+ else -+#endif -+ return dentry->d_inode->i_sb->s_dev; -+} -+ -+int -+gr_init_uidset(void) -+{ -+ uid_set = -+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL); -+ uid_used = 0; -+ -+ return uid_set ? 1 : 0; -+} -+ -+void -+gr_free_uidset(void) -+{ -+ if (uid_set) -+ kfree(uid_set); -+ -+ return; -+} -+ -+int -+gr_find_uid(const uid_t uid) -+{ -+ struct crash_uid *tmp = uid_set; -+ uid_t buid; -+ int low = 0, high = uid_used - 1, mid; -+ -+ while (high >= low) { -+ mid = (low + high) >> 1; -+ buid = tmp[mid].uid; -+ if (buid == uid) -+ return mid; -+ if (buid > uid) -+ high = mid - 1; -+ if (buid < uid) -+ low = mid + 1; -+ } -+ -+ return -1; -+} -+ -+static __inline__ void -+gr_insertsort(void) -+{ -+ unsigned short i, j; -+ struct crash_uid index; -+ -+ for (i = 1; i < uid_used; i++) { -+ index = uid_set[i]; -+ j = i; -+ while ((j > 0) && uid_set[j - 1].uid > index.uid) { -+ uid_set[j] = uid_set[j - 1]; -+ j--; -+ } -+ uid_set[j] = index; -+ } -+ -+ return; -+} -+ -+static __inline__ void -+gr_insert_uid(const uid_t uid, const unsigned long expires) -+{ -+ int loc; -+ -+ if (uid_used == GR_UIDTABLE_MAX) -+ return; -+ -+ loc = gr_find_uid(uid); -+ -+ if (loc >= 0) { -+ uid_set[loc].expires = expires; -+ return; -+ } -+ -+ uid_set[uid_used].uid = uid; -+ uid_set[uid_used].expires = expires; -+ uid_used++; -+ -+ gr_insertsort(); -+ -+ return; -+} -+ -+void -+gr_remove_uid(const unsigned short loc) -+{ -+ unsigned short i; -+ -+ for (i = loc + 1; i < uid_used; i++) -+ uid_set[i - 1] = uid_set[i]; -+ -+ uid_used--; -+ -+ return; -+} -+ -+int -+gr_check_crash_uid(const uid_t uid) -+{ -+ int loc; -+ int ret = 0; -+ -+ if (unlikely(!gr_acl_is_enabled())) -+ return 0; -+ -+ spin_lock(&gr_uid_lock); -+ loc = gr_find_uid(uid); -+ -+ if (loc < 0) -+ goto out_unlock; -+ -+ if (time_before_eq(uid_set[loc].expires, get_seconds())) -+ gr_remove_uid(loc); -+ else -+ ret = 1; -+ -+out_unlock: -+ spin_unlock(&gr_uid_lock); -+ return ret; -+} -+ -+static __inline__ int -+proc_is_setxid(const struct cred *cred) -+{ -+ if (cred->uid != cred->euid || cred->uid != cred->suid || -+ cred->uid != cred->fsuid) -+ return 1; -+ if (cred->gid != cred->egid || cred->gid != cred->sgid || -+ cred->gid != cred->fsgid) -+ return 1; -+ -+ return 0; -+} -+ -+extern int gr_fake_force_sig(int sig, struct task_struct *t); -+ -+void -+gr_handle_crash(struct task_struct *task, const int sig) -+{ -+ struct acl_subject_label *curr; -+ struct task_struct *tsk, *tsk2; -+ const struct cred *cred; -+ const struct cred *cred2; -+ -+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL) -+ return; -+ -+ if (unlikely(!gr_acl_is_enabled())) -+ return; -+ -+ curr = task->acl; -+ -+ if (!(curr->resmask & (1 << GR_CRASH_RES))) -+ return; -+ -+ if (time_before_eq(curr->expires, get_seconds())) { -+ curr->expires = 0; -+ curr->crashes = 0; -+ } -+ -+ curr->crashes++; -+ -+ if (!curr->expires) -+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max; -+ -+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && -+ time_after(curr->expires, get_seconds())) { -+ rcu_read_lock(); -+ cred = __task_cred(task); -+ if (cred->uid && proc_is_setxid(cred)) { -+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); -+ spin_lock(&gr_uid_lock); -+ gr_insert_uid(cred->uid, curr->expires); -+ spin_unlock(&gr_uid_lock); -+ curr->expires = 0; -+ curr->crashes = 0; -+ read_lock(&tasklist_lock); -+ do_each_thread(tsk2, tsk) { -+ cred2 = __task_cred(tsk); -+ if (tsk != task && cred2->uid == cred->uid) -+ gr_fake_force_sig(SIGKILL, tsk); -+ } while_each_thread(tsk2, tsk); -+ read_unlock(&tasklist_lock); -+ } else { -+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); -+ read_lock(&tasklist_lock); -+ read_lock(&grsec_exec_file_lock); -+ do_each_thread(tsk2, tsk) { -+ if (likely(tsk != task)) { -+ // if this thread has the same subject as the one that triggered -+ // RES_CRASH and it's the same binary, kill it -+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file) -+ gr_fake_force_sig(SIGKILL, tsk); -+ } -+ } while_each_thread(tsk2, tsk); -+ read_unlock(&grsec_exec_file_lock); -+ read_unlock(&tasklist_lock); -+ } -+ rcu_read_unlock(); -+ } -+ -+ return; -+} -+ -+int -+gr_check_crash_exec(const struct file *filp) -+{ -+ struct acl_subject_label *curr; -+ -+ if (unlikely(!gr_acl_is_enabled())) -+ return 0; -+ -+ read_lock(&gr_inode_lock); -+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino, -+ __get_dev(filp->f_path.dentry), -+ current->role); -+ read_unlock(&gr_inode_lock); -+ -+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) || -+ (!curr->crashes && !curr->expires)) -+ return 0; -+ -+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && -+ time_after(curr->expires, get_seconds())) -+ return 1; -+ else if (time_before_eq(curr->expires, get_seconds())) { -+ curr->crashes = 0; -+ curr->expires = 0; -+ } -+ -+ return 0; -+} -+ -+void -+gr_handle_alertkill(struct task_struct *task) -+{ -+ struct acl_subject_label *curracl; -+ __u32 curr_ip; -+ struct task_struct *p, *p2; -+ -+ if (unlikely(!gr_acl_is_enabled())) -+ return; -+ -+ curracl = task->acl; -+ curr_ip = task->signal->curr_ip; -+ -+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) { -+ read_lock(&tasklist_lock); -+ do_each_thread(p2, p) { -+ if (p->signal->curr_ip == curr_ip) -+ gr_fake_force_sig(SIGKILL, p); -+ } while_each_thread(p2, p); -+ read_unlock(&tasklist_lock); -+ } else if (curracl->mode & GR_KILLPROC) -+ gr_fake_force_sig(SIGKILL, task); -+ -+ return; -+} -diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c -new file mode 100644 -index 0000000..9d83a69 ---- /dev/null -+++ b/grsecurity/gracl_shm.c -@@ -0,0 +1,40 @@ -+#include <linux/kernel.h> -+#include <linux/mm.h> -+#include <linux/sched.h> -+#include <linux/file.h> -+#include <linux/ipc.h> -+#include <linux/gracl.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+ -+int -+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, -+ const time_t shm_createtime, const uid_t cuid, const int shmid) -+{ -+ struct task_struct *task; -+ -+ if (!gr_acl_is_enabled()) -+ return 1; -+ -+ rcu_read_lock(); -+ read_lock(&tasklist_lock); -+ -+ task = find_task_by_vpid(shm_cprid); -+ -+ if (unlikely(!task)) -+ task = find_task_by_vpid(shm_lapid); -+ -+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) || -+ (task->pid == shm_lapid)) && -+ (task->acl->mode & GR_PROTSHM) && -+ (task->acl != current->acl))) { -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid); -+ return 0; -+ } -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+ -+ return 1; -+} -diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c -new file mode 100644 -index 0000000..bc0be01 ---- /dev/null -+++ b/grsecurity/grsec_chdir.c -@@ -0,0 +1,19 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/fs.h> -+#include <linux/file.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+ -+void -+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR -+ if ((grsec_enable_chdir && grsec_enable_group && -+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir && -+ !grsec_enable_group)) { -+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt); -+ } -+#endif -+ return; -+} -diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c -new file mode 100644 -index 0000000..a2dc675 ---- /dev/null -+++ b/grsecurity/grsec_chroot.c -@@ -0,0 +1,351 @@ -+#include <linux/kernel.h> -+#include <linux/module.h> -+#include <linux/sched.h> -+#include <linux/file.h> -+#include <linux/fs.h> -+#include <linux/mount.h> -+#include <linux/types.h> -+#include <linux/pid_namespace.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+ -+void gr_set_chroot_entries(struct task_struct *task, struct path *path) -+{ -+#ifdef CONFIG_GRKERNSEC -+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry && -+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root) -+ task->gr_is_chrooted = 1; -+ else -+ task->gr_is_chrooted = 0; -+ -+ task->gr_chroot_dentry = path->dentry; -+#endif -+ return; -+} -+ -+void gr_clear_chroot_entries(struct task_struct *task) -+{ -+#ifdef CONFIG_GRKERNSEC -+ task->gr_is_chrooted = 0; -+ task->gr_chroot_dentry = NULL; -+#endif -+ return; -+} -+ -+int -+gr_handle_chroot_unix(const pid_t pid) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX -+ struct task_struct *p; -+ -+ if (unlikely(!grsec_enable_chroot_unix)) -+ return 1; -+ -+ if (likely(!proc_is_chrooted(current))) -+ return 1; -+ -+ rcu_read_lock(); -+ read_lock(&tasklist_lock); -+ p = find_task_by_vpid_unrestricted(pid); -+ if (unlikely(p && !have_same_root(current, p))) { -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG); -+ return 0; -+ } -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+#endif -+ return 1; -+} -+ -+int -+gr_handle_chroot_nice(void) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE -+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) { -+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG); -+ return -EPERM; -+ } -+#endif -+ return 0; -+} -+ -+int -+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE -+ if (grsec_enable_chroot_nice && (niceval < task_nice(p)) -+ && proc_is_chrooted(current)) { -+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid); -+ return -EACCES; -+ } -+#endif -+ return 0; -+} -+ -+int -+gr_handle_chroot_rawio(const struct inode *inode) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS -+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) && -+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO)) -+ return 1; -+#endif -+ return 0; -+} -+ -+int -+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK -+ struct task_struct *p; -+ int ret = 0; -+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid) -+ return ret; -+ -+ read_lock(&tasklist_lock); -+ do_each_pid_task(pid, type, p) { -+ if (!have_same_root(current, p)) { -+ ret = 1; -+ goto out; -+ } -+ } while_each_pid_task(pid, type, p); -+out: -+ read_unlock(&tasklist_lock); -+ return ret; -+#endif -+ return 0; -+} -+ -+int -+gr_pid_is_chrooted(struct task_struct *p) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK -+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL) -+ return 0; -+ -+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) || -+ !have_same_root(current, p)) { -+ return 1; -+ } -+#endif -+ return 0; -+} -+ -+EXPORT_SYMBOL(gr_pid_is_chrooted); -+ -+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR) -+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt) -+{ -+ struct path path, currentroot; -+ int ret = 0; -+ -+ path.dentry = (struct dentry *)u_dentry; -+ path.mnt = (struct vfsmount *)u_mnt; -+ get_fs_root(current->fs, ¤troot); -+ if (path_is_under(&path, ¤troot)) -+ ret = 1; -+ path_put(¤troot); -+ -+ return ret; -+} -+#endif -+ -+int -+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR -+ if (!grsec_enable_chroot_fchdir) -+ return 1; -+ -+ if (!proc_is_chrooted(current)) -+ return 1; -+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) { -+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt); -+ return 0; -+ } -+#endif -+ return 1; -+} -+ -+int -+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, -+ const time_t shm_createtime) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT -+ struct task_struct *p; -+ time_t starttime; -+ -+ if (unlikely(!grsec_enable_chroot_shmat)) -+ return 1; -+ -+ if (likely(!proc_is_chrooted(current))) -+ return 1; -+ -+ rcu_read_lock(); -+ read_lock(&tasklist_lock); -+ -+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) { -+ starttime = p->start_time.tv_sec; -+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) { -+ if (have_same_root(current, p)) { -+ goto allow; -+ } else { -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); -+ return 0; -+ } -+ } -+ /* creator exited, pid reuse, fall through to next check */ -+ } -+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) { -+ if (unlikely(!have_same_root(current, p))) { -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); -+ return 0; -+ } -+ } -+ -+allow: -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+#endif -+ return 1; -+} -+ -+void -+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG -+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current)) -+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt); -+#endif -+ return; -+} -+ -+int -+gr_handle_chroot_mknod(const struct dentry *dentry, -+ const struct vfsmount *mnt, const int mode) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD -+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && -+ proc_is_chrooted(current)) { -+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt); -+ return -EPERM; -+ } -+#endif -+ return 0; -+} -+ -+int -+gr_handle_chroot_mount(const struct dentry *dentry, -+ const struct vfsmount *mnt, const char *dev_name) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT -+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) { -+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt); -+ return -EPERM; -+ } -+#endif -+ return 0; -+} -+ -+int -+gr_handle_chroot_pivot(void) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT -+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) { -+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG); -+ return -EPERM; -+ } -+#endif -+ return 0; -+} -+ -+int -+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE -+ if (grsec_enable_chroot_double && proc_is_chrooted(current) && -+ !gr_is_outside_chroot(dentry, mnt)) { -+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt); -+ return -EPERM; -+ } -+#endif -+ return 0; -+} -+ -+extern const char *captab_log[]; -+extern int captab_log_entries; -+ -+int -+gr_chroot_is_capable(const int cap) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS -+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) { -+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS; -+ if (cap_raised(chroot_caps, cap)) { -+ const struct cred *creds = current_cred(); -+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) { -+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]); -+ } -+ return 0; -+ } -+ } -+#endif -+ return 1; -+} -+ -+int -+gr_chroot_is_capable_nolog(const int cap) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS -+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) { -+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS; -+ if (cap_raised(chroot_caps, cap)) { -+ return 0; -+ } -+ } -+#endif -+ return 1; -+} -+ -+int -+gr_handle_chroot_sysctl(const int op) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL -+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) && -+ proc_is_chrooted(current)) -+ return -EACCES; -+#endif -+ return 0; -+} -+ -+void -+gr_handle_chroot_chdir(struct path *path) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR -+ if (grsec_enable_chroot_chdir) -+ set_fs_pwd(current->fs, path); -+#endif -+ return; -+} -+ -+int -+gr_handle_chroot_chmod(const struct dentry *dentry, -+ const struct vfsmount *mnt, const int mode) -+{ -+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD -+ /* allow chmod +s on directories, but not files */ -+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) && -+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) && -+ proc_is_chrooted(current)) { -+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt); -+ return -EPERM; -+ } -+#endif -+ return 0; -+} -diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c -new file mode 100644 -index 0000000..d81a586 ---- /dev/null -+++ b/grsecurity/grsec_disabled.c -@@ -0,0 +1,439 @@ -+#include <linux/kernel.h> -+#include <linux/module.h> -+#include <linux/sched.h> -+#include <linux/file.h> -+#include <linux/fs.h> -+#include <linux/kdev_t.h> -+#include <linux/net.h> -+#include <linux/in.h> -+#include <linux/ip.h> -+#include <linux/skbuff.h> -+#include <linux/sysctl.h> -+ -+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS -+void -+pax_set_initial_flags(struct linux_binprm *bprm) -+{ -+ return; -+} -+#endif -+ -+#ifdef CONFIG_SYSCTL -+__u32 -+gr_handle_sysctl(const struct ctl_table * table, const int op) -+{ -+ return 0; -+} -+#endif -+ -+#ifdef CONFIG_TASKSTATS -+int gr_is_taskstats_denied(int pid) -+{ -+ return 0; -+} -+#endif -+ -+int -+gr_acl_is_enabled(void) -+{ -+ return 0; -+} -+ -+void -+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) -+{ -+ return; -+} -+ -+int -+gr_handle_rawio(const struct inode *inode) -+{ -+ return 0; -+} -+ -+void -+gr_acl_handle_psacct(struct task_struct *task, const long code) -+{ -+ return; -+} -+ -+int -+gr_handle_ptrace(struct task_struct *task, const long request) -+{ -+ return 0; -+} -+ -+int -+gr_handle_proc_ptrace(struct task_struct *task) -+{ -+ return 0; -+} -+ -+void -+gr_learn_resource(const struct task_struct *task, -+ const int res, const unsigned long wanted, const int gt) -+{ -+ return; -+} -+ -+int -+gr_set_acls(const int type) -+{ -+ return 0; -+} -+ -+int -+gr_check_hidden_task(const struct task_struct *tsk) -+{ -+ return 0; -+} -+ -+int -+gr_check_protected_task(const struct task_struct *task) -+{ -+ return 0; -+} -+ -+int -+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) -+{ -+ return 0; -+} -+ -+void -+gr_copy_label(struct task_struct *tsk) -+{ -+ return; -+} -+ -+void -+gr_set_pax_flags(struct task_struct *task) -+{ -+ return; -+} -+ -+int -+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, -+ const int unsafe_share) -+{ -+ return 0; -+} -+ -+void -+gr_handle_delete(const ino_t ino, const dev_t dev) -+{ -+ return; -+} -+ -+void -+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+ return; -+} -+ -+void -+gr_handle_crash(struct task_struct *task, const int sig) -+{ -+ return; -+} -+ -+int -+gr_check_crash_exec(const struct file *filp) -+{ -+ return 0; -+} -+ -+int -+gr_check_crash_uid(const uid_t uid) -+{ -+ return 0; -+} -+ -+void -+gr_handle_rename(struct inode *old_dir, struct inode *new_dir, -+ struct dentry *old_dentry, -+ struct dentry *new_dentry, -+ struct vfsmount *mnt, const __u8 replace) -+{ -+ return; -+} -+ -+int -+gr_search_socket(const int family, const int type, const int protocol) -+{ -+ return 1; -+} -+ -+int -+gr_search_connectbind(const int mode, const struct socket *sock, -+ const struct sockaddr_in *addr) -+{ -+ return 0; -+} -+ -+void -+gr_handle_alertkill(struct task_struct *task) -+{ -+ return; -+} -+ -+__u32 -+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_hidden_file(const struct dentry * dentry, -+ const struct vfsmount * mnt) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, -+ int acc_mode) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt) -+{ -+ return 1; -+} -+ -+int -+gr_acl_handle_mmap(const struct file *file, const unsigned long prot, -+ unsigned int *vm_flags) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_truncate(const struct dentry * dentry, -+ const struct vfsmount * mnt) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_access(const struct dentry * dentry, -+ const struct vfsmount * mnt, const int fmode) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt, -+ mode_t mode) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt, -+ mode_t mode) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt) -+{ -+ return 1; -+} -+ -+void -+grsecurity_init(void) -+{ -+ return; -+} -+ -+__u32 -+gr_acl_handle_mknod(const struct dentry * new_dentry, -+ const struct dentry * parent_dentry, -+ const struct vfsmount * parent_mnt, -+ const int mode) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_mkdir(const struct dentry * new_dentry, -+ const struct dentry * parent_dentry, -+ const struct vfsmount * parent_mnt) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_symlink(const struct dentry * new_dentry, -+ const struct dentry * parent_dentry, -+ const struct vfsmount * parent_mnt, const char *from) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_link(const struct dentry * new_dentry, -+ const struct dentry * parent_dentry, -+ const struct vfsmount * parent_mnt, -+ const struct dentry * old_dentry, -+ const struct vfsmount * old_mnt, const char *to) -+{ -+ return 1; -+} -+ -+int -+gr_acl_handle_rename(const struct dentry *new_dentry, -+ const struct dentry *parent_dentry, -+ const struct vfsmount *parent_mnt, -+ const struct dentry *old_dentry, -+ const struct inode *old_parent_inode, -+ const struct vfsmount *old_mnt, const char *newname) -+{ -+ return 0; -+} -+ -+int -+gr_acl_handle_filldir(const struct file *file, const char *name, -+ const int namelen, const ino_t ino) -+{ -+ return 1; -+} -+ -+int -+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, -+ const time_t shm_createtime, const uid_t cuid, const int shmid) -+{ -+ return 1; -+} -+ -+int -+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr) -+{ -+ return 0; -+} -+ -+int -+gr_search_accept(const struct socket *sock) -+{ -+ return 0; -+} -+ -+int -+gr_search_listen(const struct socket *sock) -+{ -+ return 0; -+} -+ -+int -+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr) -+{ -+ return 0; -+} -+ -+__u32 -+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt) -+{ -+ return 1; -+} -+ -+__u32 -+gr_acl_handle_creat(const struct dentry * dentry, -+ const struct dentry * p_dentry, -+ const struct vfsmount * p_mnt, int open_flags, int acc_mode, -+ const int imode) -+{ -+ return 1; -+} -+ -+void -+gr_acl_handle_exit(void) -+{ -+ return; -+} -+ -+int -+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) -+{ -+ return 1; -+} -+ -+void -+gr_set_role_label(const uid_t uid, const gid_t gid) -+{ -+ return; -+} -+ -+int -+gr_acl_handle_procpidmem(const struct task_struct *task) -+{ -+ return 0; -+} -+ -+int -+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb) -+{ -+ return 0; -+} -+ -+int -+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr) -+{ -+ return 0; -+} -+ -+void -+gr_set_kernel_label(struct task_struct *task) -+{ -+ return; -+} -+ -+int -+gr_check_user_change(int real, int effective, int fs) -+{ -+ return 0; -+} -+ -+int -+gr_check_group_change(int real, int effective, int fs) -+{ -+ return 0; -+} -+ -+int gr_acl_enable_at_secure(void) -+{ -+ return 0; -+} -+ -+dev_t gr_get_dev_from_dentry(struct dentry *dentry) -+{ -+ return dentry->d_inode->i_sb->s_dev; -+} -+ -+EXPORT_SYMBOL(gr_learn_resource); -+EXPORT_SYMBOL(gr_set_kernel_label); -+#ifdef CONFIG_SECURITY -+EXPORT_SYMBOL(gr_check_user_change); -+EXPORT_SYMBOL(gr_check_group_change); -+#endif -diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c -new file mode 100644 -index 0000000..2b05ada ---- /dev/null -+++ b/grsecurity/grsec_exec.c -@@ -0,0 +1,146 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/file.h> -+#include <linux/binfmts.h> -+#include <linux/fs.h> -+#include <linux/types.h> -+#include <linux/grdefs.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+#include <linux/capability.h> -+#include <linux/module.h> -+ -+#include <asm/uaccess.h> -+ -+#ifdef CONFIG_GRKERNSEC_EXECLOG -+static char gr_exec_arg_buf[132]; -+static DEFINE_MUTEX(gr_exec_arg_mutex); -+#endif -+ -+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr); -+ -+void -+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv) -+{ -+#ifdef CONFIG_GRKERNSEC_EXECLOG -+ char *grarg = gr_exec_arg_buf; -+ unsigned int i, x, execlen = 0; -+ char c; -+ -+ if (!((grsec_enable_execlog && grsec_enable_group && -+ in_group_p(grsec_audit_gid)) -+ || (grsec_enable_execlog && !grsec_enable_group))) -+ return; -+ -+ mutex_lock(&gr_exec_arg_mutex); -+ memset(grarg, 0, sizeof(gr_exec_arg_buf)); -+ -+ for (i = 0; i < bprm->argc && execlen < 128; i++) { -+ const char __user *p; -+ unsigned int len; -+ -+ p = get_user_arg_ptr(argv, i); -+ if (IS_ERR(p)) -+ goto log; -+ -+ len = strnlen_user(p, 128 - execlen); -+ if (len > 128 - execlen) -+ len = 128 - execlen; -+ else if (len > 0) -+ len--; -+ if (copy_from_user(grarg + execlen, p, len)) -+ goto log; -+ -+ /* rewrite unprintable characters */ -+ for (x = 0; x < len; x++) { -+ c = *(grarg + execlen + x); -+ if (c < 32 || c > 126) -+ *(grarg + execlen + x) = ' '; -+ } -+ -+ execlen += len; -+ *(grarg + execlen) = ' '; -+ *(grarg + execlen + 1) = '\0'; -+ execlen++; -+ } -+ -+ log: -+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry, -+ bprm->file->f_path.mnt, grarg); -+ mutex_unlock(&gr_exec_arg_mutex); -+#endif -+ return; -+} -+ -+#ifdef CONFIG_GRKERNSEC -+extern int gr_acl_is_capable(const int cap); -+extern int gr_acl_is_capable_nolog(const int cap); -+extern int gr_chroot_is_capable(const int cap); -+extern int gr_chroot_is_capable_nolog(const int cap); -+#endif -+ -+const char *captab_log[] = { -+ "CAP_CHOWN", -+ "CAP_DAC_OVERRIDE", -+ "CAP_DAC_READ_SEARCH", -+ "CAP_FOWNER", -+ "CAP_FSETID", -+ "CAP_KILL", -+ "CAP_SETGID", -+ "CAP_SETUID", -+ "CAP_SETPCAP", -+ "CAP_LINUX_IMMUTABLE", -+ "CAP_NET_BIND_SERVICE", -+ "CAP_NET_BROADCAST", -+ "CAP_NET_ADMIN", -+ "CAP_NET_RAW", -+ "CAP_IPC_LOCK", -+ "CAP_IPC_OWNER", -+ "CAP_SYS_MODULE", -+ "CAP_SYS_RAWIO", -+ "CAP_SYS_CHROOT", -+ "CAP_SYS_PTRACE", -+ "CAP_SYS_PACCT", -+ "CAP_SYS_ADMIN", -+ "CAP_SYS_BOOT", -+ "CAP_SYS_NICE", -+ "CAP_SYS_RESOURCE", -+ "CAP_SYS_TIME", -+ "CAP_SYS_TTY_CONFIG", -+ "CAP_MKNOD", -+ "CAP_LEASE", -+ "CAP_AUDIT_WRITE", -+ "CAP_AUDIT_CONTROL", -+ "CAP_SETFCAP", -+ "CAP_MAC_OVERRIDE", -+ "CAP_MAC_ADMIN", -+ "CAP_SYSLOG", -+ "CAP_WAKE_ALARM" -+}; -+ -+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]); -+ -+int gr_is_capable(const int cap) -+{ -+#ifdef CONFIG_GRKERNSEC -+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap)) -+ return 1; -+ return 0; -+#else -+ return 1; -+#endif -+} -+ -+int gr_is_capable_nolog(const int cap) -+{ -+#ifdef CONFIG_GRKERNSEC -+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap)) -+ return 1; -+ return 0; -+#else -+ return 1; -+#endif -+} -+ -+EXPORT_SYMBOL(gr_is_capable); -+EXPORT_SYMBOL(gr_is_capable_nolog); -diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c -new file mode 100644 -index 0000000..d3ee748 ---- /dev/null -+++ b/grsecurity/grsec_fifo.c -@@ -0,0 +1,24 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/fs.h> -+#include <linux/file.h> -+#include <linux/grinternal.h> -+ -+int -+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt, -+ const struct dentry *dir, const int flag, const int acc_mode) -+{ -+#ifdef CONFIG_GRKERNSEC_FIFO -+ const struct cred *cred = current_cred(); -+ -+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) && -+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) && -+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) && -+ (cred->fsuid != dentry->d_inode->i_uid)) { -+ if (!inode_permission(dentry->d_inode, acc_mode)) -+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid); -+ return -EACCES; -+ } -+#endif -+ return 0; -+} -diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c -new file mode 100644 -index 0000000..8ca18bf ---- /dev/null -+++ b/grsecurity/grsec_fork.c -@@ -0,0 +1,23 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+#include <linux/errno.h> -+ -+void -+gr_log_forkfail(const int retval) -+{ -+#ifdef CONFIG_GRKERNSEC_FORKFAIL -+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) { -+ switch (retval) { -+ case -EAGAIN: -+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN"); -+ break; -+ case -ENOMEM: -+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM"); -+ break; -+ } -+ } -+#endif -+ return; -+} -diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c -new file mode 100644 -index 0000000..356ef00 ---- /dev/null -+++ b/grsecurity/grsec_init.c -@@ -0,0 +1,269 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/mm.h> -+#include <linux/gracl.h> -+#include <linux/slab.h> -+#include <linux/vmalloc.h> -+#include <linux/percpu.h> -+#include <linux/module.h> -+ -+int grsec_enable_brute; -+int grsec_enable_link; -+int grsec_enable_dmesg; -+int grsec_enable_harden_ptrace; -+int grsec_enable_fifo; -+int grsec_enable_execlog; -+int grsec_enable_signal; -+int grsec_enable_forkfail; -+int grsec_enable_audit_ptrace; -+int grsec_enable_time; -+int grsec_enable_audit_textrel; -+int grsec_enable_group; -+int grsec_audit_gid; -+int grsec_enable_chdir; -+int grsec_enable_mount; -+int grsec_enable_rofs; -+int grsec_enable_chroot_findtask; -+int grsec_enable_chroot_mount; -+int grsec_enable_chroot_shmat; -+int grsec_enable_chroot_fchdir; -+int grsec_enable_chroot_double; -+int grsec_enable_chroot_pivot; -+int grsec_enable_chroot_chdir; -+int grsec_enable_chroot_chmod; -+int grsec_enable_chroot_mknod; -+int grsec_enable_chroot_nice; -+int grsec_enable_chroot_execlog; -+int grsec_enable_chroot_caps; -+int grsec_enable_chroot_sysctl; -+int grsec_enable_chroot_unix; -+int grsec_enable_tpe; -+int grsec_tpe_gid; -+int grsec_enable_blackhole; -+#ifdef CONFIG_IPV6_MODULE -+EXPORT_SYMBOL(grsec_enable_blackhole); -+#endif -+int grsec_lastack_retries; -+int grsec_enable_tpe_all; -+int grsec_enable_tpe_invert; -+int grsec_enable_socket_all; -+int grsec_socket_all_gid; -+int grsec_enable_socket_client; -+int grsec_socket_client_gid; -+int grsec_enable_socket_server; -+int grsec_socket_server_gid; -+int grsec_resource_logging; -+int grsec_disable_privio; -+int grsec_enable_log_rwxmaps; -+int grsec_lock; -+ -+DEFINE_SPINLOCK(grsec_alert_lock); -+unsigned long grsec_alert_wtime = 0; -+unsigned long grsec_alert_fyet = 0; -+ -+DEFINE_SPINLOCK(grsec_audit_lock); -+ -+DEFINE_RWLOCK(grsec_exec_file_lock); -+ -+char *gr_shared_page[4]; -+ -+char *gr_alert_log_fmt; -+char *gr_audit_log_fmt; -+char *gr_alert_log_buf; -+char *gr_audit_log_buf; -+ -+extern struct gr_arg *gr_usermode; -+extern unsigned char *gr_system_salt; -+extern unsigned char *gr_system_sum; -+ -+void __init -+grsecurity_init(void) -+{ -+ int j; -+ /* create the per-cpu shared pages */ -+ -+#ifdef CONFIG_X86 -+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36); -+#endif -+ -+ for (j = 0; j < 4; j++) { -+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long)); -+ if (gr_shared_page[j] == NULL) { -+ panic("Unable to allocate grsecurity shared page"); -+ return; -+ } -+ } -+ -+ /* allocate log buffers */ -+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL); -+ if (!gr_alert_log_fmt) { -+ panic("Unable to allocate grsecurity alert log format buffer"); -+ return; -+ } -+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL); -+ if (!gr_audit_log_fmt) { -+ panic("Unable to allocate grsecurity audit log format buffer"); -+ return; -+ } -+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL); -+ if (!gr_alert_log_buf) { -+ panic("Unable to allocate grsecurity alert log buffer"); -+ return; -+ } -+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL); -+ if (!gr_audit_log_buf) { -+ panic("Unable to allocate grsecurity audit log buffer"); -+ return; -+ } -+ -+ /* allocate memory for authentication structure */ -+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL); -+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL); -+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL); -+ -+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) { -+ panic("Unable to allocate grsecurity authentication structure"); -+ return; -+ } -+ -+ -+#ifdef CONFIG_GRKERNSEC_IO -+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO) -+ grsec_disable_privio = 1; -+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON) -+ grsec_disable_privio = 1; -+#else -+ grsec_disable_privio = 0; -+#endif -+#endif -+ -+#ifdef CONFIG_GRKERNSEC_TPE_INVERT -+ /* for backward compatibility, tpe_invert always defaults to on if -+ enabled in the kernel -+ */ -+ grsec_enable_tpe_invert = 1; -+#endif -+ -+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON) -+#ifndef CONFIG_GRKERNSEC_SYSCTL -+ grsec_lock = 1; -+#endif -+ -+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL -+ grsec_enable_audit_textrel = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG -+ grsec_enable_log_rwxmaps = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP -+ grsec_enable_group = 1; -+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID; -+#endif -+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR -+ grsec_enable_chdir = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE -+ grsec_enable_harden_ptrace = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT -+ grsec_enable_mount = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_LINK -+ grsec_enable_link = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_BRUTE -+ grsec_enable_brute = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_DMESG -+ grsec_enable_dmesg = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ grsec_enable_blackhole = 1; -+ grsec_lastack_retries = 4; -+#endif -+#ifdef CONFIG_GRKERNSEC_FIFO -+ grsec_enable_fifo = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_EXECLOG -+ grsec_enable_execlog = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_SIGNAL -+ grsec_enable_signal = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_FORKFAIL -+ grsec_enable_forkfail = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_TIME -+ grsec_enable_time = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_RESLOG -+ grsec_resource_logging = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK -+ grsec_enable_chroot_findtask = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX -+ grsec_enable_chroot_unix = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT -+ grsec_enable_chroot_mount = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR -+ grsec_enable_chroot_fchdir = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT -+ grsec_enable_chroot_shmat = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE -+ grsec_enable_audit_ptrace = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE -+ grsec_enable_chroot_double = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT -+ grsec_enable_chroot_pivot = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR -+ grsec_enable_chroot_chdir = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD -+ grsec_enable_chroot_chmod = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD -+ grsec_enable_chroot_mknod = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE -+ grsec_enable_chroot_nice = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG -+ grsec_enable_chroot_execlog = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS -+ grsec_enable_chroot_caps = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL -+ grsec_enable_chroot_sysctl = 1; -+#endif -+#ifdef CONFIG_GRKERNSEC_TPE -+ grsec_enable_tpe = 1; -+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID; -+#ifdef CONFIG_GRKERNSEC_TPE_ALL -+ grsec_enable_tpe_all = 1; -+#endif -+#endif -+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL -+ grsec_enable_socket_all = 1; -+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID; -+#endif -+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT -+ grsec_enable_socket_client = 1; -+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID; -+#endif -+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER -+ grsec_enable_socket_server = 1; -+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID; -+#endif -+#endif -+ -+ return; -+} -diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c -new file mode 100644 -index 0000000..3efe141 ---- /dev/null -+++ b/grsecurity/grsec_link.c -@@ -0,0 +1,43 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/fs.h> -+#include <linux/file.h> -+#include <linux/grinternal.h> -+ -+int -+gr_handle_follow_link(const struct inode *parent, -+ const struct inode *inode, -+ const struct dentry *dentry, const struct vfsmount *mnt) -+{ -+#ifdef CONFIG_GRKERNSEC_LINK -+ const struct cred *cred = current_cred(); -+ -+ if (grsec_enable_link && S_ISLNK(inode->i_mode) && -+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) && -+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) { -+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid); -+ return -EACCES; -+ } -+#endif -+ return 0; -+} -+ -+int -+gr_handle_hardlink(const struct dentry *dentry, -+ const struct vfsmount *mnt, -+ struct inode *inode, const int mode, const char *to) -+{ -+#ifdef CONFIG_GRKERNSEC_LINK -+ const struct cred *cred = current_cred(); -+ -+ if (grsec_enable_link && cred->fsuid != inode->i_uid && -+ (!S_ISREG(mode) || (mode & S_ISUID) || -+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) || -+ (inode_permission(inode, MAY_READ | MAY_WRITE))) && -+ !capable(CAP_FOWNER) && cred->uid) { -+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to); -+ return -EPERM; -+ } -+#endif -+ return 0; -+} -diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c -new file mode 100644 -index 0000000..a45d2e9 ---- /dev/null -+++ b/grsecurity/grsec_log.c -@@ -0,0 +1,322 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/file.h> -+#include <linux/tty.h> -+#include <linux/fs.h> -+#include <linux/grinternal.h> -+ -+#ifdef CONFIG_TREE_PREEMPT_RCU -+#define DISABLE_PREEMPT() preempt_disable() -+#define ENABLE_PREEMPT() preempt_enable() -+#else -+#define DISABLE_PREEMPT() -+#define ENABLE_PREEMPT() -+#endif -+ -+#define BEGIN_LOCKS(x) \ -+ DISABLE_PREEMPT(); \ -+ rcu_read_lock(); \ -+ read_lock(&tasklist_lock); \ -+ read_lock(&grsec_exec_file_lock); \ -+ if (x != GR_DO_AUDIT) \ -+ spin_lock(&grsec_alert_lock); \ -+ else \ -+ spin_lock(&grsec_audit_lock) -+ -+#define END_LOCKS(x) \ -+ if (x != GR_DO_AUDIT) \ -+ spin_unlock(&grsec_alert_lock); \ -+ else \ -+ spin_unlock(&grsec_audit_lock); \ -+ read_unlock(&grsec_exec_file_lock); \ -+ read_unlock(&tasklist_lock); \ -+ rcu_read_unlock(); \ -+ ENABLE_PREEMPT(); \ -+ if (x == GR_DONT_AUDIT) \ -+ gr_handle_alertkill(current) -+ -+enum { -+ FLOODING, -+ NO_FLOODING -+}; -+ -+extern char *gr_alert_log_fmt; -+extern char *gr_audit_log_fmt; -+extern char *gr_alert_log_buf; -+extern char *gr_audit_log_buf; -+ -+static int gr_log_start(int audit) -+{ -+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT; -+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt; -+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; -+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0) -+ unsigned long curr_secs = get_seconds(); -+ -+ if (audit == GR_DO_AUDIT) -+ goto set_fmt; -+ -+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) { -+ grsec_alert_wtime = curr_secs; -+ grsec_alert_fyet = 0; -+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME) -+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { -+ grsec_alert_fyet++; -+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { -+ grsec_alert_wtime = curr_secs; -+ grsec_alert_fyet++; -+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); -+ return FLOODING; -+ } -+ else return FLOODING; -+ -+set_fmt: -+#endif -+ memset(buf, 0, PAGE_SIZE); -+ if (current->signal->curr_ip && gr_acl_is_enabled()) { -+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) "); -+ snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename); -+ } else if (current->signal->curr_ip) { -+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: "); -+ snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip); -+ } else if (gr_acl_is_enabled()) { -+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) "); -+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename); -+ } else { -+ sprintf(fmt, "%s%s", loglevel, "grsec: "); -+ strcpy(buf, fmt); -+ } -+ -+ return NO_FLOODING; -+} -+ -+static void gr_log_middle(int audit, const char *msg, va_list ap) -+ __attribute__ ((format (printf, 2, 0))); -+ -+static void gr_log_middle(int audit, const char *msg, va_list ap) -+{ -+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; -+ unsigned int len = strlen(buf); -+ -+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); -+ -+ return; -+} -+ -+static void gr_log_middle_varargs(int audit, const char *msg, ...) -+ __attribute__ ((format (printf, 2, 3))); -+ -+static void gr_log_middle_varargs(int audit, const char *msg, ...) -+{ -+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; -+ unsigned int len = strlen(buf); -+ va_list ap; -+ -+ va_start(ap, msg); -+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); -+ va_end(ap); -+ -+ return; -+} -+ -+static void gr_log_end(int audit, int append_default) -+{ -+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; -+ -+ if (append_default) { -+ unsigned int len = strlen(buf); -+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent))); -+ } -+ -+ printk("%s\n", buf); -+ -+ return; -+} -+ -+void gr_log_varargs(int audit, const char *msg, int argtypes, ...) -+{ -+ int logtype; -+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied"; -+ char *str1 = NULL, *str2 = NULL, *str3 = NULL; -+ void *voidptr = NULL; -+ int num1 = 0, num2 = 0; -+ unsigned long ulong1 = 0, ulong2 = 0; -+ struct dentry *dentry = NULL; -+ struct vfsmount *mnt = NULL; -+ struct file *file = NULL; -+ struct task_struct *task = NULL; -+ const struct cred *cred, *pcred; -+ va_list ap; -+ -+ BEGIN_LOCKS(audit); -+ logtype = gr_log_start(audit); -+ if (logtype == FLOODING) { -+ END_LOCKS(audit); -+ return; -+ } -+ va_start(ap, argtypes); -+ switch (argtypes) { -+ case GR_TTYSNIFF: -+ task = va_arg(ap, struct task_struct *); -+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid); -+ break; -+ case GR_SYSCTL_HIDDEN: -+ str1 = va_arg(ap, char *); -+ gr_log_middle_varargs(audit, msg, result, str1); -+ break; -+ case GR_RBAC: -+ dentry = va_arg(ap, struct dentry *); -+ mnt = va_arg(ap, struct vfsmount *); -+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt)); -+ break; -+ case GR_RBAC_STR: -+ dentry = va_arg(ap, struct dentry *); -+ mnt = va_arg(ap, struct vfsmount *); -+ str1 = va_arg(ap, char *); -+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1); -+ break; -+ case GR_STR_RBAC: -+ str1 = va_arg(ap, char *); -+ dentry = va_arg(ap, struct dentry *); -+ mnt = va_arg(ap, struct vfsmount *); -+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt)); -+ break; -+ case GR_RBAC_MODE2: -+ dentry = va_arg(ap, struct dentry *); -+ mnt = va_arg(ap, struct vfsmount *); -+ str1 = va_arg(ap, char *); -+ str2 = va_arg(ap, char *); -+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2); -+ break; -+ case GR_RBAC_MODE3: -+ dentry = va_arg(ap, struct dentry *); -+ mnt = va_arg(ap, struct vfsmount *); -+ str1 = va_arg(ap, char *); -+ str2 = va_arg(ap, char *); -+ str3 = va_arg(ap, char *); -+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3); -+ break; -+ case GR_FILENAME: -+ dentry = va_arg(ap, struct dentry *); -+ mnt = va_arg(ap, struct vfsmount *); -+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt)); -+ break; -+ case GR_STR_FILENAME: -+ str1 = va_arg(ap, char *); -+ dentry = va_arg(ap, struct dentry *); -+ mnt = va_arg(ap, struct vfsmount *); -+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt)); -+ break; -+ case GR_FILENAME_STR: -+ dentry = va_arg(ap, struct dentry *); -+ mnt = va_arg(ap, struct vfsmount *); -+ str1 = va_arg(ap, char *); -+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1); -+ break; -+ case GR_FILENAME_TWO_INT: -+ dentry = va_arg(ap, struct dentry *); -+ mnt = va_arg(ap, struct vfsmount *); -+ num1 = va_arg(ap, int); -+ num2 = va_arg(ap, int); -+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2); -+ break; -+ case GR_FILENAME_TWO_INT_STR: -+ dentry = va_arg(ap, struct dentry *); -+ mnt = va_arg(ap, struct vfsmount *); -+ num1 = va_arg(ap, int); -+ num2 = va_arg(ap, int); -+ str1 = va_arg(ap, char *); -+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1); -+ break; -+ case GR_TEXTREL: -+ file = va_arg(ap, struct file *); -+ ulong1 = va_arg(ap, unsigned long); -+ ulong2 = va_arg(ap, unsigned long); -+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2); -+ break; -+ case GR_PTRACE: -+ task = va_arg(ap, struct task_struct *); -+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid); -+ break; -+ case GR_RESOURCE: -+ task = va_arg(ap, struct task_struct *); -+ cred = __task_cred(task); -+ pcred = __task_cred(task->real_parent); -+ ulong1 = va_arg(ap, unsigned long); -+ str1 = va_arg(ap, char *); -+ ulong2 = va_arg(ap, unsigned long); -+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); -+ break; -+ case GR_CAP: -+ task = va_arg(ap, struct task_struct *); -+ cred = __task_cred(task); -+ pcred = __task_cred(task->real_parent); -+ str1 = va_arg(ap, char *); -+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); -+ break; -+ case GR_SIG: -+ str1 = va_arg(ap, char *); -+ voidptr = va_arg(ap, void *); -+ gr_log_middle_varargs(audit, msg, str1, voidptr); -+ break; -+ case GR_SIG2: -+ task = va_arg(ap, struct task_struct *); -+ cred = __task_cred(task); -+ pcred = __task_cred(task->real_parent); -+ num1 = va_arg(ap, int); -+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); -+ break; -+ case GR_CRASH1: -+ task = va_arg(ap, struct task_struct *); -+ cred = __task_cred(task); -+ pcred = __task_cred(task->real_parent); -+ ulong1 = va_arg(ap, unsigned long); -+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1); -+ break; -+ case GR_CRASH2: -+ task = va_arg(ap, struct task_struct *); -+ cred = __task_cred(task); -+ pcred = __task_cred(task->real_parent); -+ ulong1 = va_arg(ap, unsigned long); -+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1); -+ break; -+ case GR_RWXMAP: -+ file = va_arg(ap, struct file *); -+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>"); -+ break; -+ case GR_PSACCT: -+ { -+ unsigned int wday, cday; -+ __u8 whr, chr; -+ __u8 wmin, cmin; -+ __u8 wsec, csec; -+ char cur_tty[64] = { 0 }; -+ char parent_tty[64] = { 0 }; -+ -+ task = va_arg(ap, struct task_struct *); -+ wday = va_arg(ap, unsigned int); -+ cday = va_arg(ap, unsigned int); -+ whr = va_arg(ap, int); -+ chr = va_arg(ap, int); -+ wmin = va_arg(ap, int); -+ cmin = va_arg(ap, int); -+ wsec = va_arg(ap, int); -+ csec = va_arg(ap, int); -+ ulong1 = va_arg(ap, unsigned long); -+ cred = __task_cred(task); -+ pcred = __task_cred(task->real_parent); -+ -+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid); -+ } -+ break; -+ default: -+ gr_log_middle(audit, msg, ap); -+ } -+ va_end(ap); -+ // these don't need DEFAULTSECARGS printed on the end -+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2) -+ gr_log_end(audit, 0); -+ else -+ gr_log_end(audit, 1); -+ END_LOCKS(audit); -+} -diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c -new file mode 100644 -index 0000000..6c0416b ---- /dev/null -+++ b/grsecurity/grsec_mem.c -@@ -0,0 +1,33 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/mm.h> -+#include <linux/mman.h> -+#include <linux/grinternal.h> -+ -+void -+gr_handle_ioperm(void) -+{ -+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG); -+ return; -+} -+ -+void -+gr_handle_iopl(void) -+{ -+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG); -+ return; -+} -+ -+void -+gr_handle_mem_readwrite(u64 from, u64 to) -+{ -+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to); -+ return; -+} -+ -+void -+gr_handle_vm86(void) -+{ -+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG); -+ return; -+} -diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c -new file mode 100644 -index 0000000..2131422 ---- /dev/null -+++ b/grsecurity/grsec_mount.c -@@ -0,0 +1,62 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/mount.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+ -+void -+gr_log_remount(const char *devname, const int retval) -+{ -+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT -+ if (grsec_enable_mount && (retval >= 0)) -+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none"); -+#endif -+ return; -+} -+ -+void -+gr_log_unmount(const char *devname, const int retval) -+{ -+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT -+ if (grsec_enable_mount && (retval >= 0)) -+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none"); -+#endif -+ return; -+} -+ -+void -+gr_log_mount(const char *from, const char *to, const int retval) -+{ -+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT -+ if (grsec_enable_mount && (retval >= 0)) -+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to); -+#endif -+ return; -+} -+ -+int -+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags) -+{ -+#ifdef CONFIG_GRKERNSEC_ROFS -+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) { -+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt); -+ return -EPERM; -+ } else -+ return 0; -+#endif -+ return 0; -+} -+ -+int -+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode) -+{ -+#ifdef CONFIG_GRKERNSEC_ROFS -+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) && -+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) { -+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt); -+ return -EPERM; -+ } else -+ return 0; -+#endif -+ return 0; -+} -diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c -new file mode 100644 -index 0000000..a3b12a0 ---- /dev/null -+++ b/grsecurity/grsec_pax.c -@@ -0,0 +1,36 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/mm.h> -+#include <linux/file.h> -+#include <linux/grinternal.h> -+#include <linux/grsecurity.h> -+ -+void -+gr_log_textrel(struct vm_area_struct * vma) -+{ -+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL -+ if (grsec_enable_audit_textrel) -+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff); -+#endif -+ return; -+} -+ -+void -+gr_log_rwxmmap(struct file *file) -+{ -+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG -+ if (grsec_enable_log_rwxmaps) -+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file); -+#endif -+ return; -+} -+ -+void -+gr_log_rwxmprotect(struct file *file) -+{ -+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG -+ if (grsec_enable_log_rwxmaps) -+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file); -+#endif -+ return; -+} -diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c -new file mode 100644 -index 0000000..472c1d6 ---- /dev/null -+++ b/grsecurity/grsec_ptrace.c -@@ -0,0 +1,14 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/grinternal.h> -+#include <linux/grsecurity.h> -+ -+void -+gr_audit_ptrace(struct task_struct *task) -+{ -+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE -+ if (grsec_enable_audit_ptrace) -+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task); -+#endif -+ return; -+} -diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c -new file mode 100644 -index 0000000..cf090b3 ---- /dev/null -+++ b/grsecurity/grsec_sig.c -@@ -0,0 +1,206 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/delay.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+#include <linux/hardirq.h> -+ -+char *signames[] = { -+ [SIGSEGV] = "Segmentation fault", -+ [SIGILL] = "Illegal instruction", -+ [SIGABRT] = "Abort", -+ [SIGBUS] = "Invalid alignment/Bus error" -+}; -+ -+void -+gr_log_signal(const int sig, const void *addr, const struct task_struct *t) -+{ -+#ifdef CONFIG_GRKERNSEC_SIGNAL -+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) || -+ (sig == SIGABRT) || (sig == SIGBUS))) { -+ if (t->pid == current->pid) { -+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr); -+ } else { -+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig); -+ } -+ } -+#endif -+ return; -+} -+ -+int -+gr_handle_signal(const struct task_struct *p, const int sig) -+{ -+#ifdef CONFIG_GRKERNSEC -+ if (current->pid > 1 && gr_check_protected_task(p)) { -+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig); -+ return -EPERM; -+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) { -+ return -EPERM; -+ } -+#endif -+ return 0; -+} -+ -+#ifdef CONFIG_GRKERNSEC -+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t); -+ -+int gr_fake_force_sig(int sig, struct task_struct *t) -+{ -+ unsigned long int flags; -+ int ret, blocked, ignored; -+ struct k_sigaction *action; -+ -+ spin_lock_irqsave(&t->sighand->siglock, flags); -+ action = &t->sighand->action[sig-1]; -+ ignored = action->sa.sa_handler == SIG_IGN; -+ blocked = sigismember(&t->blocked, sig); -+ if (blocked || ignored) { -+ action->sa.sa_handler = SIG_DFL; -+ if (blocked) { -+ sigdelset(&t->blocked, sig); -+ recalc_sigpending_and_wake(t); -+ } -+ } -+ if (action->sa.sa_handler == SIG_DFL) -+ t->signal->flags &= ~SIGNAL_UNKILLABLE; -+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t); -+ -+ spin_unlock_irqrestore(&t->sighand->siglock, flags); -+ -+ return ret; -+} -+#endif -+ -+#ifdef CONFIG_GRKERNSEC_BRUTE -+#define GR_USER_BAN_TIME (15 * 60) -+ -+static int __get_dumpable(unsigned long mm_flags) -+{ -+ int ret; -+ -+ ret = mm_flags & MMF_DUMPABLE_MASK; -+ return (ret >= 2) ? 2 : ret; -+} -+#endif -+ -+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags) -+{ -+#ifdef CONFIG_GRKERNSEC_BRUTE -+ uid_t uid = 0; -+ -+ if (!grsec_enable_brute) -+ return; -+ -+ rcu_read_lock(); -+ read_lock(&tasklist_lock); -+ read_lock(&grsec_exec_file_lock); -+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) -+ p->real_parent->brute = 1; -+ else { -+ const struct cred *cred = __task_cred(p), *cred2; -+ struct task_struct *tsk, *tsk2; -+ -+ if (!__get_dumpable(mm_flags) && cred->uid) { -+ struct user_struct *user; -+ -+ uid = cred->uid; -+ -+ /* this is put upon execution past expiration */ -+ user = find_user(uid); -+ if (user == NULL) -+ goto unlock; -+ user->banned = 1; -+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME; -+ if (user->ban_expires == ~0UL) -+ user->ban_expires--; -+ -+ do_each_thread(tsk2, tsk) { -+ cred2 = __task_cred(tsk); -+ if (tsk != p && cred2->uid == uid) -+ gr_fake_force_sig(SIGKILL, tsk); -+ } while_each_thread(tsk2, tsk); -+ } -+ } -+unlock: -+ read_unlock(&grsec_exec_file_lock); -+ read_unlock(&tasklist_lock); -+ rcu_read_unlock(); -+ -+ if (uid) -+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60); -+ -+#endif -+ return; -+} -+ -+void gr_handle_brute_check(void) -+{ -+#ifdef CONFIG_GRKERNSEC_BRUTE -+ if (current->brute) -+ msleep(30 * 1000); -+#endif -+ return; -+} -+ -+void gr_handle_kernel_exploit(void) -+{ -+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT -+ const struct cred *cred; -+ struct task_struct *tsk, *tsk2; -+ struct user_struct *user; -+ uid_t uid; -+ -+ if (in_irq() || in_serving_softirq() || in_nmi()) -+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context"); -+ -+ uid = current_uid(); -+ -+ if (uid == 0) -+ panic("grsec: halting the system due to suspicious kernel crash caused by root"); -+ else { -+ /* kill all the processes of this user, hold a reference -+ to their creds struct, and prevent them from creating -+ another process until system reset -+ */ -+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid); -+ /* we intentionally leak this ref */ -+ user = get_uid(current->cred->user); -+ if (user) { -+ user->banned = 1; -+ user->ban_expires = ~0UL; -+ } -+ -+ read_lock(&tasklist_lock); -+ do_each_thread(tsk2, tsk) { -+ cred = __task_cred(tsk); -+ if (cred->uid == uid) -+ gr_fake_force_sig(SIGKILL, tsk); -+ } while_each_thread(tsk2, tsk); -+ read_unlock(&tasklist_lock); -+ } -+#endif -+} -+ -+int __gr_process_user_ban(struct user_struct *user) -+{ -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ if (unlikely(user->banned)) { -+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) { -+ user->banned = 0; -+ user->ban_expires = 0; -+ free_uid(user); -+ } else -+ return -EPERM; -+ } -+#endif -+ return 0; -+} -+ -+int gr_process_user_ban(void) -+{ -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ return __gr_process_user_ban(current->cred->user); -+#endif -+ return 0; -+} -diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c -new file mode 100644 -index 0000000..4030d57 ---- /dev/null -+++ b/grsecurity/grsec_sock.c -@@ -0,0 +1,244 @@ -+#include <linux/kernel.h> -+#include <linux/module.h> -+#include <linux/sched.h> -+#include <linux/file.h> -+#include <linux/net.h> -+#include <linux/in.h> -+#include <linux/ip.h> -+#include <net/sock.h> -+#include <net/inet_sock.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+#include <linux/gracl.h> -+ -+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb); -+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr); -+ -+EXPORT_SYMBOL(gr_search_udp_recvmsg); -+EXPORT_SYMBOL(gr_search_udp_sendmsg); -+ -+#ifdef CONFIG_UNIX_MODULE -+EXPORT_SYMBOL(gr_acl_handle_unix); -+EXPORT_SYMBOL(gr_acl_handle_mknod); -+EXPORT_SYMBOL(gr_handle_chroot_unix); -+EXPORT_SYMBOL(gr_handle_create); -+#endif -+ -+#ifdef CONFIG_GRKERNSEC -+#define gr_conn_table_size 32749 -+struct conn_table_entry { -+ struct conn_table_entry *next; -+ struct signal_struct *sig; -+}; -+ -+struct conn_table_entry *gr_conn_table[gr_conn_table_size]; -+DEFINE_SPINLOCK(gr_conn_table_lock); -+ -+extern const char * gr_socktype_to_name(unsigned char type); -+extern const char * gr_proto_to_name(unsigned char proto); -+extern const char * gr_sockfamily_to_name(unsigned char family); -+ -+static __inline__ int -+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size) -+{ -+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size); -+} -+ -+static __inline__ int -+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, -+ __u16 sport, __u16 dport) -+{ -+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr && -+ sig->gr_sport == sport && sig->gr_dport == dport)) -+ return 1; -+ else -+ return 0; -+} -+ -+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent) -+{ -+ struct conn_table_entry **match; -+ unsigned int index; -+ -+ index = conn_hash(sig->gr_saddr, sig->gr_daddr, -+ sig->gr_sport, sig->gr_dport, -+ gr_conn_table_size); -+ -+ newent->sig = sig; -+ -+ match = &gr_conn_table[index]; -+ newent->next = *match; -+ *match = newent; -+ -+ return; -+} -+ -+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig) -+{ -+ struct conn_table_entry *match, *last = NULL; -+ unsigned int index; -+ -+ index = conn_hash(sig->gr_saddr, sig->gr_daddr, -+ sig->gr_sport, sig->gr_dport, -+ gr_conn_table_size); -+ -+ match = gr_conn_table[index]; -+ while (match && !conn_match(match->sig, -+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport, -+ sig->gr_dport)) { -+ last = match; -+ match = match->next; -+ } -+ -+ if (match) { -+ if (last) -+ last->next = match->next; -+ else -+ gr_conn_table[index] = NULL; -+ kfree(match); -+ } -+ -+ return; -+} -+ -+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr, -+ __u16 sport, __u16 dport) -+{ -+ struct conn_table_entry *match; -+ unsigned int index; -+ -+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size); -+ -+ match = gr_conn_table[index]; -+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport)) -+ match = match->next; -+ -+ if (match) -+ return match->sig; -+ else -+ return NULL; -+} -+ -+#endif -+ -+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet) -+{ -+#ifdef CONFIG_GRKERNSEC -+ struct signal_struct *sig = task->signal; -+ struct conn_table_entry *newent; -+ -+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC); -+ if (newent == NULL) -+ return; -+ /* no bh lock needed since we are called with bh disabled */ -+ spin_lock(&gr_conn_table_lock); -+ gr_del_task_from_ip_table_nolock(sig); -+ sig->gr_saddr = inet->inet_rcv_saddr; -+ sig->gr_daddr = inet->inet_daddr; -+ sig->gr_sport = inet->inet_sport; -+ sig->gr_dport = inet->inet_dport; -+ gr_add_to_task_ip_table_nolock(sig, newent); -+ spin_unlock(&gr_conn_table_lock); -+#endif -+ return; -+} -+ -+void gr_del_task_from_ip_table(struct task_struct *task) -+{ -+#ifdef CONFIG_GRKERNSEC -+ spin_lock_bh(&gr_conn_table_lock); -+ gr_del_task_from_ip_table_nolock(task->signal); -+ spin_unlock_bh(&gr_conn_table_lock); -+#endif -+ return; -+} -+ -+void -+gr_attach_curr_ip(const struct sock *sk) -+{ -+#ifdef CONFIG_GRKERNSEC -+ struct signal_struct *p, *set; -+ const struct inet_sock *inet = inet_sk(sk); -+ -+ if (unlikely(sk->sk_protocol != IPPROTO_TCP)) -+ return; -+ -+ set = current->signal; -+ -+ spin_lock_bh(&gr_conn_table_lock); -+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr, -+ inet->inet_dport, inet->inet_sport); -+ if (unlikely(p != NULL)) { -+ set->curr_ip = p->curr_ip; -+ set->used_accept = 1; -+ gr_del_task_from_ip_table_nolock(p); -+ spin_unlock_bh(&gr_conn_table_lock); -+ return; -+ } -+ spin_unlock_bh(&gr_conn_table_lock); -+ -+ set->curr_ip = inet->inet_daddr; -+ set->used_accept = 1; -+#endif -+ return; -+} -+ -+int -+gr_handle_sock_all(const int family, const int type, const int protocol) -+{ -+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL -+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) && -+ (family != AF_UNIX)) { -+ if (family == AF_INET) -+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol)); -+ else -+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol); -+ return -EACCES; -+ } -+#endif -+ return 0; -+} -+ -+int -+gr_handle_sock_server(const struct sockaddr *sck) -+{ -+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER -+ if (grsec_enable_socket_server && -+ in_group_p(grsec_socket_server_gid) && -+ sck && (sck->sa_family != AF_UNIX) && -+ (sck->sa_family != AF_LOCAL)) { -+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); -+ return -EACCES; -+ } -+#endif -+ return 0; -+} -+ -+int -+gr_handle_sock_server_other(const struct sock *sck) -+{ -+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER -+ if (grsec_enable_socket_server && -+ in_group_p(grsec_socket_server_gid) && -+ sck && (sck->sk_family != AF_UNIX) && -+ (sck->sk_family != AF_LOCAL)) { -+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); -+ return -EACCES; -+ } -+#endif -+ return 0; -+} -+ -+int -+gr_handle_sock_client(const struct sockaddr *sck) -+{ -+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT -+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) && -+ sck && (sck->sa_family != AF_UNIX) && -+ (sck->sa_family != AF_LOCAL)) { -+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG); -+ return -EACCES; -+ } -+#endif -+ return 0; -+} -diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c -new file mode 100644 -index 0000000..174668f ---- /dev/null -+++ b/grsecurity/grsec_sysctl.c -@@ -0,0 +1,433 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/sysctl.h> -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+ -+int -+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op) -+{ -+#ifdef CONFIG_GRKERNSEC_SYSCTL -+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) { -+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name); -+ return -EACCES; -+ } -+#endif -+ return 0; -+} -+ -+#ifdef CONFIG_GRKERNSEC_ROFS -+static int __maybe_unused one = 1; -+#endif -+ -+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) -+struct ctl_table grsecurity_table[] = { -+#ifdef CONFIG_GRKERNSEC_SYSCTL -+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO -+#ifdef CONFIG_GRKERNSEC_IO -+ { -+ .procname = "disable_priv_io", -+ .data = &grsec_disable_privio, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#endif -+#ifdef CONFIG_GRKERNSEC_LINK -+ { -+ .procname = "linking_restrictions", -+ .data = &grsec_enable_link, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_BRUTE -+ { -+ .procname = "deter_bruteforce", -+ .data = &grsec_enable_brute, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_FIFO -+ { -+ .procname = "fifo_restrictions", -+ .data = &grsec_enable_fifo, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ { -+ .procname = "ip_blackhole", -+ .data = &grsec_enable_blackhole, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+ { -+ .procname = "lastack_retries", -+ .data = &grsec_lastack_retries, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_EXECLOG -+ { -+ .procname = "exec_logging", -+ .data = &grsec_enable_execlog, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG -+ { -+ .procname = "rwxmap_logging", -+ .data = &grsec_enable_log_rwxmaps, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_SIGNAL -+ { -+ .procname = "signal_logging", -+ .data = &grsec_enable_signal, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_FORKFAIL -+ { -+ .procname = "forkfail_logging", -+ .data = &grsec_enable_forkfail, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_TIME -+ { -+ .procname = "timechange_logging", -+ .data = &grsec_enable_time, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT -+ { -+ .procname = "chroot_deny_shmat", -+ .data = &grsec_enable_chroot_shmat, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX -+ { -+ .procname = "chroot_deny_unix", -+ .data = &grsec_enable_chroot_unix, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT -+ { -+ .procname = "chroot_deny_mount", -+ .data = &grsec_enable_chroot_mount, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR -+ { -+ .procname = "chroot_deny_fchdir", -+ .data = &grsec_enable_chroot_fchdir, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE -+ { -+ .procname = "chroot_deny_chroot", -+ .data = &grsec_enable_chroot_double, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT -+ { -+ .procname = "chroot_deny_pivot", -+ .data = &grsec_enable_chroot_pivot, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR -+ { -+ .procname = "chroot_enforce_chdir", -+ .data = &grsec_enable_chroot_chdir, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD -+ { -+ .procname = "chroot_deny_chmod", -+ .data = &grsec_enable_chroot_chmod, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD -+ { -+ .procname = "chroot_deny_mknod", -+ .data = &grsec_enable_chroot_mknod, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE -+ { -+ .procname = "chroot_restrict_nice", -+ .data = &grsec_enable_chroot_nice, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG -+ { -+ .procname = "chroot_execlog", -+ .data = &grsec_enable_chroot_execlog, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS -+ { -+ .procname = "chroot_caps", -+ .data = &grsec_enable_chroot_caps, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL -+ { -+ .procname = "chroot_deny_sysctl", -+ .data = &grsec_enable_chroot_sysctl, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_TPE -+ { -+ .procname = "tpe", -+ .data = &grsec_enable_tpe, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+ { -+ .procname = "tpe_gid", -+ .data = &grsec_tpe_gid, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_TPE_INVERT -+ { -+ .procname = "tpe_invert", -+ .data = &grsec_enable_tpe_invert, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_TPE_ALL -+ { -+ .procname = "tpe_restrict_all", -+ .data = &grsec_enable_tpe_all, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL -+ { -+ .procname = "socket_all", -+ .data = &grsec_enable_socket_all, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+ { -+ .procname = "socket_all_gid", -+ .data = &grsec_socket_all_gid, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT -+ { -+ .procname = "socket_client", -+ .data = &grsec_enable_socket_client, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+ { -+ .procname = "socket_client_gid", -+ .data = &grsec_socket_client_gid, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER -+ { -+ .procname = "socket_server", -+ .data = &grsec_enable_socket_server, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+ { -+ .procname = "socket_server_gid", -+ .data = &grsec_socket_server_gid, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP -+ { -+ .procname = "audit_group", -+ .data = &grsec_enable_group, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+ { -+ .procname = "audit_gid", -+ .data = &grsec_audit_gid, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR -+ { -+ .procname = "audit_chdir", -+ .data = &grsec_enable_chdir, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT -+ { -+ .procname = "audit_mount", -+ .data = &grsec_enable_mount, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL -+ { -+ .procname = "audit_textrel", -+ .data = &grsec_enable_audit_textrel, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_DMESG -+ { -+ .procname = "dmesg", -+ .data = &grsec_enable_dmesg, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK -+ { -+ .procname = "chroot_findtask", -+ .data = &grsec_enable_chroot_findtask, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_RESLOG -+ { -+ .procname = "resource_logging", -+ .data = &grsec_resource_logging, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE -+ { -+ .procname = "audit_ptrace", -+ .data = &grsec_enable_audit_ptrace, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE -+ { -+ .procname = "harden_ptrace", -+ .data = &grsec_enable_harden_ptrace, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+ { -+ .procname = "grsec_lock", -+ .data = &grsec_lock, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+#endif -+#ifdef CONFIG_GRKERNSEC_ROFS -+ { -+ .procname = "romount_protect", -+ .data = &grsec_enable_rofs, -+ .maxlen = sizeof(int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec_minmax, -+ .extra1 = &one, -+ .extra2 = &one, -+ }, -+#endif -+ { } -+}; -+#endif -diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c -new file mode 100644 -index 0000000..0dc13c3 ---- /dev/null -+++ b/grsecurity/grsec_time.c -@@ -0,0 +1,16 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/grinternal.h> -+#include <linux/module.h> -+ -+void -+gr_log_timechange(void) -+{ -+#ifdef CONFIG_GRKERNSEC_TIME -+ if (grsec_enable_time) -+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG); -+#endif -+ return; -+} -+ -+EXPORT_SYMBOL(gr_log_timechange); -diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c -new file mode 100644 -index 0000000..4a78774 ---- /dev/null -+++ b/grsecurity/grsec_tpe.c -@@ -0,0 +1,39 @@ -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/file.h> -+#include <linux/fs.h> -+#include <linux/grinternal.h> -+ -+extern int gr_acl_tpe_check(void); -+ -+int -+gr_tpe_allow(const struct file *file) -+{ -+#ifdef CONFIG_GRKERNSEC -+ struct inode *inode = file->f_path.dentry->d_parent->d_inode; -+ const struct cred *cred = current_cred(); -+ -+ if (cred->uid && ((grsec_enable_tpe && -+#ifdef CONFIG_GRKERNSEC_TPE_INVERT -+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) || -+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))) -+#else -+ in_group_p(grsec_tpe_gid) -+#endif -+ ) || gr_acl_tpe_check()) && -+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) || -+ (inode->i_mode & S_IWOTH))))) { -+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt); -+ return 0; -+ } -+#ifdef CONFIG_GRKERNSEC_TPE_ALL -+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all && -+ ((inode->i_uid && (inode->i_uid != cred->uid)) || -+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) { -+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt); -+ return 0; -+ } -+#endif -+#endif -+ return 1; -+} -diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c -new file mode 100644 -index 0000000..9f7b1ac ---- /dev/null -+++ b/grsecurity/grsum.c -@@ -0,0 +1,61 @@ -+#include <linux/err.h> -+#include <linux/kernel.h> -+#include <linux/sched.h> -+#include <linux/mm.h> -+#include <linux/scatterlist.h> -+#include <linux/crypto.h> -+#include <linux/gracl.h> -+ -+ -+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE) -+#error "crypto and sha256 must be built into the kernel" -+#endif -+ -+int -+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) -+{ -+ char *p; -+ struct crypto_hash *tfm; -+ struct hash_desc desc; -+ struct scatterlist sg; -+ unsigned char temp_sum[GR_SHA_LEN]; -+ volatile int retval = 0; -+ volatile int dummy = 0; -+ unsigned int i; -+ -+ sg_init_table(&sg, 1); -+ -+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); -+ if (IS_ERR(tfm)) { -+ /* should never happen, since sha256 should be built in */ -+ return 1; -+ } -+ -+ desc.tfm = tfm; -+ desc.flags = 0; -+ -+ crypto_hash_init(&desc); -+ -+ p = salt; -+ sg_set_buf(&sg, p, GR_SALT_LEN); -+ crypto_hash_update(&desc, &sg, sg.length); -+ -+ p = entry->pw; -+ sg_set_buf(&sg, p, strlen(p)); -+ -+ crypto_hash_update(&desc, &sg, sg.length); -+ -+ crypto_hash_final(&desc, temp_sum); -+ -+ memset(entry->pw, 0, GR_PW_LEN); -+ -+ for (i = 0; i < GR_SHA_LEN; i++) -+ if (sum[i] != temp_sum[i]) -+ retval = 1; -+ else -+ dummy = 1; // waste a cycle -+ -+ crypto_free_hash(tfm); -+ -+ return retval; -+} -diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h -index 6cd5b64..f620d2d 100644 ---- a/include/acpi/acpi_bus.h -+++ b/include/acpi/acpi_bus.h -@@ -107,7 +107,7 @@ struct acpi_device_ops { - acpi_op_bind bind; - acpi_op_unbind unbind; - acpi_op_notify notify; --}; -+} __no_const; - - #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */ - -diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h -index b7babf0..71e4e74 100644 ---- a/include/asm-generic/atomic-long.h -+++ b/include/asm-generic/atomic-long.h -@@ -22,6 +22,12 @@ - - typedef atomic64_t atomic_long_t; - -+#ifdef CONFIG_PAX_REFCOUNT -+typedef atomic64_unchecked_t atomic_long_unchecked_t; -+#else -+typedef atomic64_t atomic_long_unchecked_t; -+#endif -+ - #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) - - static inline long atomic_long_read(atomic_long_t *l) -@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l) - return (long)atomic64_read(v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) -+{ -+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; -+ -+ return (long)atomic64_read_unchecked(v); -+} -+#endif -+ - static inline void atomic_long_set(atomic_long_t *l, long i) - { - atomic64_t *v = (atomic64_t *)l; -@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i) - atomic64_set(v, i); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) -+{ -+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; -+ -+ atomic64_set_unchecked(v, i); -+} -+#endif -+ - static inline void atomic_long_inc(atomic_long_t *l) - { - atomic64_t *v = (atomic64_t *)l; -@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l) - atomic64_inc(v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) -+{ -+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; -+ -+ atomic64_inc_unchecked(v); -+} -+#endif -+ - static inline void atomic_long_dec(atomic_long_t *l) - { - atomic64_t *v = (atomic64_t *)l; -@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l) - atomic64_dec(v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) -+{ -+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; -+ -+ atomic64_dec_unchecked(v); -+} -+#endif -+ - static inline void atomic_long_add(long i, atomic_long_t *l) - { - atomic64_t *v = (atomic64_t *)l; -@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l) - atomic64_add(i, v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) -+{ -+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; -+ -+ atomic64_add_unchecked(i, v); -+} -+#endif -+ - static inline void atomic_long_sub(long i, atomic_long_t *l) - { - atomic64_t *v = (atomic64_t *)l; -@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l) - atomic64_sub(i, v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l) -+{ -+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; -+ -+ atomic64_sub_unchecked(i, v); -+} -+#endif -+ - static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) - { - atomic64_t *v = (atomic64_t *)l; -@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l) - return (long)atomic64_inc_return(v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) -+{ -+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; -+ -+ return (long)atomic64_inc_return_unchecked(v); -+} -+#endif -+ - static inline long atomic_long_dec_return(atomic_long_t *l) - { - atomic64_t *v = (atomic64_t *)l; -@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) - - typedef atomic_t atomic_long_t; - -+#ifdef CONFIG_PAX_REFCOUNT -+typedef atomic_unchecked_t atomic_long_unchecked_t; -+#else -+typedef atomic_t atomic_long_unchecked_t; -+#endif -+ - #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) - static inline long atomic_long_read(atomic_long_t *l) - { -@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l) - return (long)atomic_read(v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) -+{ -+ atomic_unchecked_t *v = (atomic_unchecked_t *)l; -+ -+ return (long)atomic_read_unchecked(v); -+} -+#endif -+ - static inline void atomic_long_set(atomic_long_t *l, long i) - { - atomic_t *v = (atomic_t *)l; -@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i) - atomic_set(v, i); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) -+{ -+ atomic_unchecked_t *v = (atomic_unchecked_t *)l; -+ -+ atomic_set_unchecked(v, i); -+} -+#endif -+ - static inline void atomic_long_inc(atomic_long_t *l) - { - atomic_t *v = (atomic_t *)l; -@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l) - atomic_inc(v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) -+{ -+ atomic_unchecked_t *v = (atomic_unchecked_t *)l; -+ -+ atomic_inc_unchecked(v); -+} -+#endif -+ - static inline void atomic_long_dec(atomic_long_t *l) - { - atomic_t *v = (atomic_t *)l; -@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l) - atomic_dec(v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) -+{ -+ atomic_unchecked_t *v = (atomic_unchecked_t *)l; -+ -+ atomic_dec_unchecked(v); -+} -+#endif -+ - static inline void atomic_long_add(long i, atomic_long_t *l) - { - atomic_t *v = (atomic_t *)l; -@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l) - atomic_add(i, v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) -+{ -+ atomic_unchecked_t *v = (atomic_unchecked_t *)l; -+ -+ atomic_add_unchecked(i, v); -+} -+#endif -+ - static inline void atomic_long_sub(long i, atomic_long_t *l) - { - atomic_t *v = (atomic_t *)l; -@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l) - atomic_sub(i, v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l) -+{ -+ atomic_unchecked_t *v = (atomic_unchecked_t *)l; -+ -+ atomic_sub_unchecked(i, v); -+} -+#endif -+ - static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) - { - atomic_t *v = (atomic_t *)l; -@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l) - return (long)atomic_inc_return(v); - } - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) -+{ -+ atomic_unchecked_t *v = (atomic_unchecked_t *)l; -+ -+ return (long)atomic_inc_return_unchecked(v); -+} -+#endif -+ - static inline long atomic_long_dec_return(atomic_long_t *l) - { - atomic_t *v = (atomic_t *)l; -@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) - - #endif /* BITS_PER_LONG == 64 */ - -+#ifdef CONFIG_PAX_REFCOUNT -+static inline void pax_refcount_needs_these_functions(void) -+{ -+ atomic_read_unchecked((atomic_unchecked_t *)NULL); -+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0); -+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL); -+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL); -+ atomic_inc_unchecked((atomic_unchecked_t *)NULL); -+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL); -+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL); -+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL); -+ atomic_dec_unchecked((atomic_unchecked_t *)NULL); -+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0); -+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0); -+ -+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL); -+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0); -+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL); -+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL); -+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL); -+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL); -+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL); -+} -+#else -+#define atomic_read_unchecked(v) atomic_read(v) -+#define atomic_set_unchecked(v, i) atomic_set((v), (i)) -+#define atomic_add_unchecked(i, v) atomic_add((i), (v)) -+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v)) -+#define atomic_inc_unchecked(v) atomic_inc(v) -+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v) -+#define atomic_inc_return_unchecked(v) atomic_inc_return(v) -+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v)) -+#define atomic_dec_unchecked(v) atomic_dec(v) -+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n)) -+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i)) -+ -+#define atomic_long_read_unchecked(v) atomic_long_read(v) -+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i)) -+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v)) -+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v)) -+#define atomic_long_inc_unchecked(v) atomic_long_inc(v) -+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v) -+#define atomic_long_dec_unchecked(v) atomic_long_dec(v) -+#endif -+ - #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ -diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h -index b18ce4f..2ee2843 100644 ---- a/include/asm-generic/atomic64.h -+++ b/include/asm-generic/atomic64.h -@@ -16,6 +16,8 @@ typedef struct { - long long counter; - } atomic64_t; - -+typedef atomic64_t atomic64_unchecked_t; -+ - #define ATOMIC64_INIT(i) { (i) } - - extern long long atomic64_read(const atomic64_t *v); -@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u); - #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) - #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) - -+#define atomic64_read_unchecked(v) atomic64_read(v) -+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) -+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) -+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) -+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) -+#define atomic64_inc_unchecked(v) atomic64_inc(v) -+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) -+#define atomic64_dec_unchecked(v) atomic64_dec(v) -+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) -+ - #endif /* _ASM_GENERIC_ATOMIC64_H */ -diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h -index 1bfcfe5..e04c5c9 100644 ---- a/include/asm-generic/cache.h -+++ b/include/asm-generic/cache.h -@@ -6,7 +6,7 @@ - * cache lines need to provide their own cache.h. - */ - --#define L1_CACHE_SHIFT 5 --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_SHIFT 5UL -+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT) - - #endif /* __ASM_GENERIC_CACHE_H */ -diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h -index 1ca3efc..e3dc852 100644 ---- a/include/asm-generic/int-l64.h -+++ b/include/asm-generic/int-l64.h -@@ -46,6 +46,8 @@ typedef unsigned int u32; - typedef signed long s64; - typedef unsigned long u64; - -+typedef unsigned int intoverflow_t __attribute__ ((mode(TI))); -+ - #define S8_C(x) x - #define U8_C(x) x ## U - #define S16_C(x) x -diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h -index f394147..b6152b9 100644 ---- a/include/asm-generic/int-ll64.h -+++ b/include/asm-generic/int-ll64.h -@@ -51,6 +51,8 @@ typedef unsigned int u32; - typedef signed long long s64; - typedef unsigned long long u64; - -+typedef unsigned long long intoverflow_t; -+ - #define S8_C(x) x - #define U8_C(x) x ## U - #define S16_C(x) x -diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h -index 0232ccb..13d9165 100644 ---- a/include/asm-generic/kmap_types.h -+++ b/include/asm-generic/kmap_types.h -@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE, - KMAP_D(17) KM_NMI, - KMAP_D(18) KM_NMI_PTE, - KMAP_D(19) KM_KDB, -+KMAP_D(20) KM_CLEARPAGE, - /* - * Remember to update debug_kmap_atomic() when adding new kmap types! - */ --KMAP_D(20) KM_TYPE_NR -+KMAP_D(21) KM_TYPE_NR - }; - - #undef KMAP_D -diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h -index 725612b..9cc513a 100644 ---- a/include/asm-generic/pgtable-nopmd.h -+++ b/include/asm-generic/pgtable-nopmd.h -@@ -1,14 +1,19 @@ - #ifndef _PGTABLE_NOPMD_H - #define _PGTABLE_NOPMD_H - --#ifndef __ASSEMBLY__ -- - #include <asm-generic/pgtable-nopud.h> - --struct mm_struct; -- - #define __PAGETABLE_PMD_FOLDED - -+#define PMD_SHIFT PUD_SHIFT -+#define PTRS_PER_PMD 1 -+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) -+#define PMD_MASK (~(PMD_SIZE-1)) -+ -+#ifndef __ASSEMBLY__ -+ -+struct mm_struct; -+ - /* - * Having the pmd type consist of a pud gets the size right, and allows - * us to conceptually access the pud entry that this pmd is folded into -@@ -16,11 +21,6 @@ struct mm_struct; - */ - typedef struct { pud_t pud; } pmd_t; - --#define PMD_SHIFT PUD_SHIFT --#define PTRS_PER_PMD 1 --#define PMD_SIZE (1UL << PMD_SHIFT) --#define PMD_MASK (~(PMD_SIZE-1)) -- - /* - * The "pud_xxx()" functions here are trivial for a folded two-level - * setup: the pmd is never bad, and a pmd always exists (as it's folded -diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h -index 810431d..ccc3638 100644 ---- a/include/asm-generic/pgtable-nopud.h -+++ b/include/asm-generic/pgtable-nopud.h -@@ -1,10 +1,15 @@ - #ifndef _PGTABLE_NOPUD_H - #define _PGTABLE_NOPUD_H - --#ifndef __ASSEMBLY__ -- - #define __PAGETABLE_PUD_FOLDED - -+#define PUD_SHIFT PGDIR_SHIFT -+#define PTRS_PER_PUD 1 -+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) -+#define PUD_MASK (~(PUD_SIZE-1)) -+ -+#ifndef __ASSEMBLY__ -+ - /* - * Having the pud type consist of a pgd gets the size right, and allows - * us to conceptually access the pgd entry that this pud is folded into -@@ -12,11 +17,6 @@ - */ - typedef struct { pgd_t pgd; } pud_t; - --#define PUD_SHIFT PGDIR_SHIFT --#define PTRS_PER_PUD 1 --#define PUD_SIZE (1UL << PUD_SHIFT) --#define PUD_MASK (~(PUD_SIZE-1)) -- - /* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pud is never bad, and a pud always exists (as it's folded -diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h -index 76bff2b..c7a14e2 100644 ---- a/include/asm-generic/pgtable.h -+++ b/include/asm-generic/pgtable.h -@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd) - #endif /* __HAVE_ARCH_PMD_WRITE */ - #endif - -+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL -+static inline unsigned long pax_open_kernel(void) { return 0; } -+#endif -+ -+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL -+static inline unsigned long pax_close_kernel(void) { return 0; } -+#endif -+ - #endif /* !__ASSEMBLY__ */ - - #endif /* _ASM_GENERIC_PGTABLE_H */ -diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h -index db22d13..1f2e3e1 100644 ---- a/include/asm-generic/vmlinux.lds.h -+++ b/include/asm-generic/vmlinux.lds.h -@@ -217,6 +217,7 @@ - .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__start_rodata) = .; \ - *(.rodata) *(.rodata.*) \ -+ *(.data..read_only) \ - *(__vermagic) /* Kernel version magic */ \ - . = ALIGN(8); \ - VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ -@@ -723,17 +724,18 @@ - * section in the linker script will go there too. @phdr should have - * a leading colon. - * -- * Note that this macros defines __per_cpu_load as an absolute symbol. -+ * Note that this macros defines per_cpu_load as an absolute symbol. - * If there is no need to put the percpu section at a predetermined - * address, use PERCPU_SECTION. - */ - #define PERCPU_VADDR(cacheline, vaddr, phdr) \ -- VMLINUX_SYMBOL(__per_cpu_load) = .; \ -- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ -+ per_cpu_load = .; \ -+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \ - - LOAD_OFFSET) { \ -+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \ - PERCPU_INPUT(cacheline) \ - } phdr \ -- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); -+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu); - - /** - * PERCPU_SECTION - define output section for percpu area, simple version -diff --git a/include/drm/drmP.h b/include/drm/drmP.h -index 9b7c2bb..76b7d1e 100644 ---- a/include/drm/drmP.h -+++ b/include/drm/drmP.h -@@ -73,6 +73,7 @@ - #include <linux/workqueue.h> - #include <linux/poll.h> - #include <asm/pgalloc.h> -+#include <asm/local.h> - #include "drm.h" - - #include <linux/idr.h> -@@ -1035,7 +1036,7 @@ struct drm_device { - - /** \name Usage Counters */ - /*@{ */ -- int open_count; /**< Outstanding files open */ -+ local_t open_count; /**< Outstanding files open */ - atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ - atomic_t vma_count; /**< Outstanding vma areas open */ - int buf_use; /**< Buffers in use -- cannot alloc */ -@@ -1046,7 +1047,7 @@ struct drm_device { - /*@{ */ - unsigned long counters; - enum drm_stat_type types[15]; -- atomic_t counts[15]; -+ atomic_unchecked_t counts[15]; - /*@} */ - - struct list_head filelist; -diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h -index 73b0712..0b7ef2f 100644 ---- a/include/drm/drm_crtc_helper.h -+++ b/include/drm/drm_crtc_helper.h -@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs { - - /* disable crtc when not in use - more explicit than dpms off */ - void (*disable)(struct drm_crtc *crtc); --}; -+} __no_const; - - struct drm_encoder_helper_funcs { - void (*dpms)(struct drm_encoder *encoder, int mode); -@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs { - struct drm_connector *connector); - /* disable encoder when not in use - more explicit than dpms off */ - void (*disable)(struct drm_encoder *encoder); --}; -+} __no_const; - - struct drm_connector_helper_funcs { - int (*get_modes)(struct drm_connector *connector); -diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h -index 26c1f78..6722682 100644 ---- a/include/drm/ttm/ttm_memory.h -+++ b/include/drm/ttm/ttm_memory.h -@@ -47,7 +47,7 @@ - - struct ttm_mem_shrink { - int (*do_shrink) (struct ttm_mem_shrink *); --}; -+} __no_const; - - /** - * struct ttm_mem_global - Global memory accounting structure. -diff --git a/include/linux/a.out.h b/include/linux/a.out.h -index e86dfca..40cc55f 100644 ---- a/include/linux/a.out.h -+++ b/include/linux/a.out.h -@@ -39,6 +39,14 @@ enum machine_type { - M_MIPS2 = 152 /* MIPS R6000/R4000 binary */ - }; - -+/* Constants for the N_FLAGS field */ -+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ -+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */ -+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */ -+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */ -+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ -+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ -+ - #if !defined (N_MAGIC) - #define N_MAGIC(exec) ((exec).a_info & 0xffff) - #endif -diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h -index 49a83ca..df96b54 100644 ---- a/include/linux/atmdev.h -+++ b/include/linux/atmdev.h -@@ -237,7 +237,7 @@ struct compat_atm_iobuf { - #endif - - struct k_atm_aal_stats { --#define __HANDLE_ITEM(i) atomic_t i -+#define __HANDLE_ITEM(i) atomic_unchecked_t i - __AAL_STAT_ITEMS - #undef __HANDLE_ITEM - }; -diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h -index fd88a39..f4d0bad 100644 ---- a/include/linux/binfmts.h -+++ b/include/linux/binfmts.h -@@ -88,6 +88,7 @@ struct linux_binfmt { - int (*load_binary)(struct linux_binprm *, struct pt_regs * regs); - int (*load_shlib)(struct file *); - int (*core_dump)(struct coredump_params *cprm); -+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags); - unsigned long min_coredump; /* minimal dump size */ - }; - -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 7fbaa91..5e6a460 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -1321,7 +1321,7 @@ struct block_device_operations { - /* this callback is with swap_lock and sometimes page table lock held */ - void (*swap_slot_free_notify) (struct block_device *, unsigned long); - struct module *owner; --}; -+} __do_const; - - extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, - unsigned long); -diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h -index 8e9e4bc..88bd457 100644 ---- a/include/linux/blktrace_api.h -+++ b/include/linux/blktrace_api.h -@@ -162,7 +162,7 @@ struct blk_trace { - struct dentry *dir; - struct dentry *dropped_file; - struct dentry *msg_file; -- atomic_t dropped; -+ atomic_unchecked_t dropped; - }; - - extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); -diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h -index 83195fb..0b0f77d 100644 ---- a/include/linux/byteorder/little_endian.h -+++ b/include/linux/byteorder/little_endian.h -@@ -42,51 +42,51 @@ - - static inline __le64 __cpu_to_le64p(const __u64 *p) - { -- return (__force __le64)*p; -+ return (__force const __le64)*p; - } - static inline __u64 __le64_to_cpup(const __le64 *p) - { -- return (__force __u64)*p; -+ return (__force const __u64)*p; - } - static inline __le32 __cpu_to_le32p(const __u32 *p) - { -- return (__force __le32)*p; -+ return (__force const __le32)*p; - } - static inline __u32 __le32_to_cpup(const __le32 *p) - { -- return (__force __u32)*p; -+ return (__force const __u32)*p; - } - static inline __le16 __cpu_to_le16p(const __u16 *p) - { -- return (__force __le16)*p; -+ return (__force const __le16)*p; - } - static inline __u16 __le16_to_cpup(const __le16 *p) - { -- return (__force __u16)*p; -+ return (__force const __u16)*p; - } - static inline __be64 __cpu_to_be64p(const __u64 *p) - { -- return (__force __be64)__swab64p(p); -+ return (__force const __be64)__swab64p(p); - } - static inline __u64 __be64_to_cpup(const __be64 *p) - { -- return __swab64p((__u64 *)p); -+ return __swab64p((const __u64 *)p); - } - static inline __be32 __cpu_to_be32p(const __u32 *p) - { -- return (__force __be32)__swab32p(p); -+ return (__force const __be32)__swab32p(p); - } - static inline __u32 __be32_to_cpup(const __be32 *p) - { -- return __swab32p((__u32 *)p); -+ return __swab32p((const __u32 *)p); - } - static inline __be16 __cpu_to_be16p(const __u16 *p) - { -- return (__force __be16)__swab16p(p); -+ return (__force const __be16)__swab16p(p); - } - static inline __u16 __be16_to_cpup(const __be16 *p) - { -- return __swab16p((__u16 *)p); -+ return __swab16p((const __u16 *)p); - } - #define __cpu_to_le64s(x) do { (void)(x); } while (0) - #define __le64_to_cpus(x) do { (void)(x); } while (0) -diff --git a/include/linux/cache.h b/include/linux/cache.h -index 4c57065..4307975 100644 ---- a/include/linux/cache.h -+++ b/include/linux/cache.h -@@ -16,6 +16,10 @@ - #define __read_mostly - #endif - -+#ifndef __read_only -+#define __read_only __read_mostly -+#endif -+ - #ifndef ____cacheline_aligned - #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) - #endif -diff --git a/include/linux/capability.h b/include/linux/capability.h -index c421123..e343179 100644 ---- a/include/linux/capability.h -+++ b/include/linux/capability.h -@@ -547,6 +547,9 @@ extern bool capable(int cap); - extern bool ns_capable(struct user_namespace *ns, int cap); - extern bool task_ns_capable(struct task_struct *t, int cap); - extern bool nsown_capable(int cap); -+extern bool task_ns_capable_nolog(struct task_struct *t, int cap); -+extern bool ns_capable_nolog(struct user_namespace *ns, int cap); -+extern bool capable_nolog(int cap); - - /* audit system wants to get cap info from files as well */ - extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); -diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h -index 04ffb2e..6799180 100644 ---- a/include/linux/cleancache.h -+++ b/include/linux/cleancache.h -@@ -31,7 +31,7 @@ struct cleancache_ops { - void (*flush_page)(int, struct cleancache_filekey, pgoff_t); - void (*flush_inode)(int, struct cleancache_filekey); - void (*flush_fs)(int); --}; -+} __no_const; - - extern struct cleancache_ops - cleancache_register_ops(struct cleancache_ops *ops); -diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h -index dfadc96..c0e70c1 100644 ---- a/include/linux/compiler-gcc4.h -+++ b/include/linux/compiler-gcc4.h -@@ -31,6 +31,12 @@ - - - #if __GNUC_MINOR__ >= 5 -+ -+#ifdef CONSTIFY_PLUGIN -+#define __no_const __attribute__((no_const)) -+#define __do_const __attribute__((do_const)) -+#endif -+ - /* - * Mark a position in code as unreachable. This can be used to - * suppress control flow warnings after asm blocks that transfer -@@ -46,6 +52,11 @@ - #define __noclone __attribute__((__noclone__)) - - #endif -+ -+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__))) -+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg)) -+#define __bos0(ptr) __bos((ptr), 0) -+#define __bos1(ptr) __bos((ptr), 1) - #endif - - #if __GNUC_MINOR__ > 0 -diff --git a/include/linux/compiler.h b/include/linux/compiler.h -index 320d6c9..8573a1c 100644 ---- a/include/linux/compiler.h -+++ b/include/linux/compiler.h -@@ -5,31 +5,62 @@ - - #ifdef __CHECKER__ - # define __user __attribute__((noderef, address_space(1))) -+# define __force_user __force __user - # define __kernel __attribute__((address_space(0))) -+# define __force_kernel __force __kernel - # define __safe __attribute__((safe)) - # define __force __attribute__((force)) - # define __nocast __attribute__((nocast)) - # define __iomem __attribute__((noderef, address_space(2))) -+# define __force_iomem __force __iomem - # define __acquires(x) __attribute__((context(x,0,1))) - # define __releases(x) __attribute__((context(x,1,0))) - # define __acquire(x) __context__(x,1) - # define __release(x) __context__(x,-1) - # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) - # define __percpu __attribute__((noderef, address_space(3))) -+# define __force_percpu __force __percpu - #ifdef CONFIG_SPARSE_RCU_POINTER - # define __rcu __attribute__((noderef, address_space(4))) -+# define __force_rcu __force __rcu - #else - # define __rcu -+# define __force_rcu - #endif - extern void __chk_user_ptr(const volatile void __user *); - extern void __chk_io_ptr(const volatile void __iomem *); -+#elif defined(CHECKER_PLUGIN) -+//# define __user -+//# define __force_user -+//# define __kernel -+//# define __force_kernel -+# define __safe -+# define __force -+# define __nocast -+# define __iomem -+# define __force_iomem -+# define __chk_user_ptr(x) (void)0 -+# define __chk_io_ptr(x) (void)0 -+# define __builtin_warning(x, y...) (1) -+# define __acquires(x) -+# define __releases(x) -+# define __acquire(x) (void)0 -+# define __release(x) (void)0 -+# define __cond_lock(x,c) (c) -+# define __percpu -+# define __force_percpu -+# define __rcu -+# define __force_rcu - #else - # define __user -+# define __force_user - # define __kernel -+# define __force_kernel - # define __safe - # define __force - # define __nocast - # define __iomem -+# define __force_iomem - # define __chk_user_ptr(x) (void)0 - # define __chk_io_ptr(x) (void)0 - # define __builtin_warning(x, y...) (1) -@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); - # define __release(x) (void)0 - # define __cond_lock(x,c) (c) - # define __percpu -+# define __force_percpu - # define __rcu -+# define __force_rcu - #endif - - #ifdef __KERNEL__ -@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); - # define __attribute_const__ /* unimplemented */ - #endif - -+#ifndef __no_const -+# define __no_const -+#endif -+ -+#ifndef __do_const -+# define __do_const -+#endif -+ - /* - * Tell gcc if a function is cold. The compiler will assume any path - * directly leading to the call is unlikely. -@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); - #define __cold - #endif - -+#ifndef __alloc_size -+#define __alloc_size(...) -+#endif -+ -+#ifndef __bos -+#define __bos(ptr, arg) -+#endif -+ -+#ifndef __bos0 -+#define __bos0(ptr) -+#endif -+ -+#ifndef __bos1 -+#define __bos1(ptr) -+#endif -+ - /* Simple shorthand for a section definition */ - #ifndef __section - # define __section(S) __attribute__ ((__section__(#S))) -@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); - * use is to mediate communication between process-level code and irq/NMI - * handlers, all running on the same CPU. - */ --#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) -+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x)) -+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x)) - - #endif /* __LINUX_COMPILER_H */ -diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h -index e9eaec5..bfeb9bb 100644 ---- a/include/linux/cpuset.h -+++ b/include/linux/cpuset.h -@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void) - * nodemask. - */ - smp_mb(); -- --ACCESS_ONCE(current->mems_allowed_change_disable); -+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable); - } - - static inline void set_mems_allowed(nodemask_t nodemask) -diff --git a/include/linux/crypto.h b/include/linux/crypto.h -index e5e468e..f079672 100644 ---- a/include/linux/crypto.h -+++ b/include/linux/crypto.h -@@ -361,7 +361,7 @@ struct cipher_tfm { - const u8 *key, unsigned int keylen); - void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); - void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); --}; -+} __no_const; - - struct hash_tfm { - int (*init)(struct hash_desc *desc); -@@ -382,13 +382,13 @@ struct compress_tfm { - int (*cot_decompress)(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); --}; -+} __no_const; - - struct rng_tfm { - int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, - unsigned int dlen); - int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); --}; -+} __no_const; - - #define crt_ablkcipher crt_u.ablkcipher - #define crt_aead crt_u.aead -diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h -index 7925bf0..d5143d2 100644 ---- a/include/linux/decompress/mm.h -+++ b/include/linux/decompress/mm.h -@@ -77,7 +77,7 @@ static void free(void *where) - * warnings when not needed (indeed large_malloc / large_free are not - * needed by inflate */ - --#define malloc(a) kmalloc(a, GFP_KERNEL) -+#define malloc(a) kmalloc((a), GFP_KERNEL) - #define free(a) kfree(a) - - #define large_malloc(a) vmalloc(a) -diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h -index 347fdc3..cd01657 100644 ---- a/include/linux/dma-mapping.h -+++ b/include/linux/dma-mapping.h -@@ -42,7 +42,7 @@ struct dma_map_ops { - int (*dma_supported)(struct device *dev, u64 mask); - int (*set_dma_mask)(struct device *dev, u64 mask); - int is_phys; --}; -+} __do_const; - - #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) - -diff --git a/include/linux/efi.h b/include/linux/efi.h -index 2362a0b..cfaf8fcc 100644 ---- a/include/linux/efi.h -+++ b/include/linux/efi.h -@@ -446,7 +446,7 @@ struct efivar_operations { - efi_get_variable_t *get_variable; - efi_get_next_variable_t *get_next_variable; - efi_set_variable_t *set_variable; --}; -+} __no_const; - - struct efivars { - /* -diff --git a/include/linux/elf.h b/include/linux/elf.h -index 110821c..cb14c08 100644 ---- a/include/linux/elf.h -+++ b/include/linux/elf.h -@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword; - #define PT_GNU_EH_FRAME 0x6474e550 - - #define PT_GNU_STACK (PT_LOOS + 0x474e551) -+#define PT_GNU_RELRO (PT_LOOS + 0x474e552) -+ -+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580) -+ -+/* Constants for the e_flags field */ -+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ -+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */ -+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */ -+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */ -+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ -+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ - - /* - * Extended Numbering -@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword; - #define DT_DEBUG 21 - #define DT_TEXTREL 22 - #define DT_JMPREL 23 -+#define DT_FLAGS 30 -+ #define DF_TEXTREL 0x00000004 - #define DT_ENCODING 32 - #define OLD_DT_LOOS 0x60000000 - #define DT_LOOS 0x6000000d -@@ -252,6 +265,19 @@ typedef struct elf64_hdr { - #define PF_W 0x2 - #define PF_X 0x1 - -+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */ -+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */ -+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */ -+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */ -+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */ -+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */ -+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */ -+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */ -+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */ -+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */ -+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */ -+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */ -+ - typedef struct elf32_phdr{ - Elf32_Word p_type; - Elf32_Off p_offset; -@@ -344,6 +370,8 @@ typedef struct elf64_shdr { - #define EI_OSABI 7 - #define EI_PAD 8 - -+#define EI_PAX 14 -+ - #define ELFMAG0 0x7f /* EI_MAG */ - #define ELFMAG1 'E' - #define ELFMAG2 'L' -@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC []; - #define elf_note elf32_note - #define elf_addr_t Elf32_Off - #define Elf_Half Elf32_Half -+#define elf_dyn Elf32_Dyn - - #else - -@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC []; - #define elf_note elf64_note - #define elf_addr_t Elf64_Off - #define Elf_Half Elf64_Half -+#define elf_dyn Elf64_Dyn - - #endif - -diff --git a/include/linux/filter.h b/include/linux/filter.h -index 741956f..f02f482 100644 ---- a/include/linux/filter.h -+++ b/include/linux/filter.h -@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ - - struct sk_buff; - struct sock; -+struct bpf_jit_work; - - struct sk_filter - { -@@ -141,6 +142,9 @@ struct sk_filter - unsigned int len; /* Number of filter blocks */ - unsigned int (*bpf_func)(const struct sk_buff *skb, - const struct sock_filter *filter); -+#ifdef CONFIG_BPF_JIT -+ struct bpf_jit_work *work; -+#endif - struct rcu_head rcu; - struct sock_filter insns[0]; - }; -diff --git a/include/linux/firewire.h b/include/linux/firewire.h -index 84ccf8e..2e9b14c 100644 ---- a/include/linux/firewire.h -+++ b/include/linux/firewire.h -@@ -428,7 +428,7 @@ struct fw_iso_context { - union { - fw_iso_callback_t sc; - fw_iso_mc_callback_t mc; -- } callback; -+ } __no_const callback; - void *callback_data; - }; - -diff --git a/include/linux/fs.h b/include/linux/fs.h -index 277f497..9be66a4 100644 ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -1588,7 +1588,8 @@ struct file_operations { - int (*setlease)(struct file *, long, struct file_lock **); - long (*fallocate)(struct file *file, int mode, loff_t offset, - loff_t len); --}; -+} __do_const; -+typedef struct file_operations __no_const file_operations_no_const; - - struct inode_operations { - struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); -diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h -index 003dc0f..3c4ea97 100644 ---- a/include/linux/fs_struct.h -+++ b/include/linux/fs_struct.h -@@ -6,7 +6,7 @@ - #include <linux/seqlock.h> - - struct fs_struct { -- int users; -+ atomic_t users; - spinlock_t lock; - seqcount_t seq; - int umask; -diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h -index af095b5..cf1220c 100644 ---- a/include/linux/fscache-cache.h -+++ b/include/linux/fscache-cache.h -@@ -102,7 +102,7 @@ struct fscache_operation { - fscache_operation_release_t release; - }; - --extern atomic_t fscache_op_debug_id; -+extern atomic_unchecked_t fscache_op_debug_id; - extern void fscache_op_work_func(struct work_struct *work); - - extern void fscache_enqueue_operation(struct fscache_operation *); -@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op, - { - INIT_WORK(&op->work, fscache_op_work_func); - atomic_set(&op->usage, 1); -- op->debug_id = atomic_inc_return(&fscache_op_debug_id); -+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); - op->processor = processor; - op->release = release; - INIT_LIST_HEAD(&op->pend_link); -diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h -index 2a53f10..0187fdf 100644 ---- a/include/linux/fsnotify.h -+++ b/include/linux/fsnotify.h -@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) - */ - static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) - { -- return kstrdup(name, GFP_KERNEL); -+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL); - } - - /* -diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h -index 96efa67..1261547 100644 ---- a/include/linux/ftrace_event.h -+++ b/include/linux/ftrace_event.h -@@ -97,7 +97,7 @@ struct trace_event_functions { - trace_print_func raw; - trace_print_func hex; - trace_print_func binary; --}; -+} __no_const; - - struct trace_event { - struct hlist_node node; -@@ -252,7 +252,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, - extern int trace_add_event_call(struct ftrace_event_call *call); - extern void trace_remove_event_call(struct ftrace_event_call *call); - --#define is_signed_type(type) (((type)(-1)) < 0) -+#define is_signed_type(type) (((type)(-1)) < (type)1) - - int trace_set_clr_event(const char *system, const char *event, int set); - -diff --git a/include/linux/genhd.h b/include/linux/genhd.h -index 02fa469..a15f279 100644 ---- a/include/linux/genhd.h -+++ b/include/linux/genhd.h -@@ -184,7 +184,7 @@ struct gendisk { - struct kobject *slave_dir; - - struct timer_rand_state *random; -- atomic_t sync_io; /* RAID */ -+ atomic_unchecked_t sync_io; /* RAID */ - struct disk_events *ev; - #ifdef CONFIG_BLK_DEV_INTEGRITY - struct blk_integrity *integrity; -diff --git a/include/linux/gracl.h b/include/linux/gracl.h -new file mode 100644 -index 0000000..0dc3943 ---- /dev/null -+++ b/include/linux/gracl.h -@@ -0,0 +1,317 @@ -+#ifndef GR_ACL_H -+#define GR_ACL_H -+ -+#include <linux/grdefs.h> -+#include <linux/resource.h> -+#include <linux/capability.h> -+#include <linux/dcache.h> -+#include <asm/resource.h> -+ -+/* Major status information */ -+ -+#define GR_VERSION "grsecurity 2.2.2" -+#define GRSECURITY_VERSION 0x2202 -+ -+enum { -+ GR_SHUTDOWN = 0, -+ GR_ENABLE = 1, -+ GR_SPROLE = 2, -+ GR_RELOAD = 3, -+ GR_SEGVMOD = 4, -+ GR_STATUS = 5, -+ GR_UNSPROLE = 6, -+ GR_PASSSET = 7, -+ GR_SPROLEPAM = 8, -+}; -+ -+/* Password setup definitions -+ * kernel/grhash.c */ -+enum { -+ GR_PW_LEN = 128, -+ GR_SALT_LEN = 16, -+ GR_SHA_LEN = 32, -+}; -+ -+enum { -+ GR_SPROLE_LEN = 64, -+}; -+ -+enum { -+ GR_NO_GLOB = 0, -+ GR_REG_GLOB, -+ GR_CREATE_GLOB -+}; -+ -+#define GR_NLIMITS 32 -+ -+/* Begin Data Structures */ -+ -+struct sprole_pw { -+ unsigned char *rolename; -+ unsigned char salt[GR_SALT_LEN]; -+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */ -+}; -+ -+struct name_entry { -+ __u32 key; -+ ino_t inode; -+ dev_t device; -+ char *name; -+ __u16 len; -+ __u8 deleted; -+ struct name_entry *prev; -+ struct name_entry *next; -+}; -+ -+struct inodev_entry { -+ struct name_entry *nentry; -+ struct inodev_entry *prev; -+ struct inodev_entry *next; -+}; -+ -+struct acl_role_db { -+ struct acl_role_label **r_hash; -+ __u32 r_size; -+}; -+ -+struct inodev_db { -+ struct inodev_entry **i_hash; -+ __u32 i_size; -+}; -+ -+struct name_db { -+ struct name_entry **n_hash; -+ __u32 n_size; -+}; -+ -+struct crash_uid { -+ uid_t uid; -+ unsigned long expires; -+}; -+ -+struct gr_hash_struct { -+ void **table; -+ void **nametable; -+ void *first; -+ __u32 table_size; -+ __u32 used_size; -+ int type; -+}; -+ -+/* Userspace Grsecurity ACL data structures */ -+ -+struct acl_subject_label { -+ char *filename; -+ ino_t inode; -+ dev_t device; -+ __u32 mode; -+ kernel_cap_t cap_mask; -+ kernel_cap_t cap_lower; -+ kernel_cap_t cap_invert_audit; -+ -+ struct rlimit res[GR_NLIMITS]; -+ __u32 resmask; -+ -+ __u8 user_trans_type; -+ __u8 group_trans_type; -+ uid_t *user_transitions; -+ gid_t *group_transitions; -+ __u16 user_trans_num; -+ __u16 group_trans_num; -+ -+ __u32 sock_families[2]; -+ __u32 ip_proto[8]; -+ __u32 ip_type; -+ struct acl_ip_label **ips; -+ __u32 ip_num; -+ __u32 inaddr_any_override; -+ -+ __u32 crashes; -+ unsigned long expires; -+ -+ struct acl_subject_label *parent_subject; -+ struct gr_hash_struct *hash; -+ struct acl_subject_label *prev; -+ struct acl_subject_label *next; -+ -+ struct acl_object_label **obj_hash; -+ __u32 obj_hash_size; -+ __u16 pax_flags; -+}; -+ -+struct role_allowed_ip { -+ __u32 addr; -+ __u32 netmask; -+ -+ struct role_allowed_ip *prev; -+ struct role_allowed_ip *next; -+}; -+ -+struct role_transition { -+ char *rolename; -+ -+ struct role_transition *prev; -+ struct role_transition *next; -+}; -+ -+struct acl_role_label { -+ char *rolename; -+ uid_t uidgid; -+ __u16 roletype; -+ -+ __u16 auth_attempts; -+ unsigned long expires; -+ -+ struct acl_subject_label *root_label; -+ struct gr_hash_struct *hash; -+ -+ struct acl_role_label *prev; -+ struct acl_role_label *next; -+ -+ struct role_transition *transitions; -+ struct role_allowed_ip *allowed_ips; -+ uid_t *domain_children; -+ __u16 domain_child_num; -+ -+ struct acl_subject_label **subj_hash; -+ __u32 subj_hash_size; -+}; -+ -+struct user_acl_role_db { -+ struct acl_role_label **r_table; -+ __u32 num_pointers; /* Number of allocations to track */ -+ __u32 num_roles; /* Number of roles */ -+ __u32 num_domain_children; /* Number of domain children */ -+ __u32 num_subjects; /* Number of subjects */ -+ __u32 num_objects; /* Number of objects */ -+}; -+ -+struct acl_object_label { -+ char *filename; -+ ino_t inode; -+ dev_t device; -+ __u32 mode; -+ -+ struct acl_subject_label *nested; -+ struct acl_object_label *globbed; -+ -+ /* next two structures not used */ -+ -+ struct acl_object_label *prev; -+ struct acl_object_label *next; -+}; -+ -+struct acl_ip_label { -+ char *iface; -+ __u32 addr; -+ __u32 netmask; -+ __u16 low, high; -+ __u8 mode; -+ __u32 type; -+ __u32 proto[8]; -+ -+ /* next two structures not used */ -+ -+ struct acl_ip_label *prev; -+ struct acl_ip_label *next; -+}; -+ -+struct gr_arg { -+ struct user_acl_role_db role_db; -+ unsigned char pw[GR_PW_LEN]; -+ unsigned char salt[GR_SALT_LEN]; -+ unsigned char sum[GR_SHA_LEN]; -+ unsigned char sp_role[GR_SPROLE_LEN]; -+ struct sprole_pw *sprole_pws; -+ dev_t segv_device; -+ ino_t segv_inode; -+ uid_t segv_uid; -+ __u16 num_sprole_pws; -+ __u16 mode; -+}; -+ -+struct gr_arg_wrapper { -+ struct gr_arg *arg; -+ __u32 version; -+ __u32 size; -+}; -+ -+struct subject_map { -+ struct acl_subject_label *user; -+ struct acl_subject_label *kernel; -+ struct subject_map *prev; -+ struct subject_map *next; -+}; -+ -+struct acl_subj_map_db { -+ struct subject_map **s_hash; -+ __u32 s_size; -+}; -+ -+/* End Data Structures Section */ -+ -+/* Hash functions generated by empirical testing by Brad Spengler -+ Makes good use of the low bits of the inode. Generally 0-1 times -+ in loop for successful match. 0-3 for unsuccessful match. -+ Shift/add algorithm with modulus of table size and an XOR*/ -+ -+static __inline__ unsigned int -+rhash(const uid_t uid, const __u16 type, const unsigned int sz) -+{ -+ return ((((uid + type) << (16 + type)) ^ uid) % sz); -+} -+ -+ static __inline__ unsigned int -+shash(const struct acl_subject_label *userp, const unsigned int sz) -+{ -+ return ((const unsigned long)userp % sz); -+} -+ -+static __inline__ unsigned int -+fhash(const ino_t ino, const dev_t dev, const unsigned int sz) -+{ -+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz); -+} -+ -+static __inline__ unsigned int -+nhash(const char *name, const __u16 len, const unsigned int sz) -+{ -+ return full_name_hash((const unsigned char *)name, len) % sz; -+} -+ -+#define FOR_EACH_ROLE_START(role) \ -+ role = role_list; \ -+ while (role) { -+ -+#define FOR_EACH_ROLE_END(role) \ -+ role = role->prev; \ -+ } -+ -+#define FOR_EACH_SUBJECT_START(role,subj,iter) \ -+ subj = NULL; \ -+ iter = 0; \ -+ while (iter < role->subj_hash_size) { \ -+ if (subj == NULL) \ -+ subj = role->subj_hash[iter]; \ -+ if (subj == NULL) { \ -+ iter++; \ -+ continue; \ -+ } -+ -+#define FOR_EACH_SUBJECT_END(subj,iter) \ -+ subj = subj->next; \ -+ if (subj == NULL) \ -+ iter++; \ -+ } -+ -+ -+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \ -+ subj = role->hash->first; \ -+ while (subj != NULL) { -+ -+#define FOR_EACH_NESTED_SUBJECT_END(subj) \ -+ subj = subj->next; \ -+ } -+ -+#endif -+ -diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h -new file mode 100644 -index 0000000..323ecf2 ---- /dev/null -+++ b/include/linux/gralloc.h -@@ -0,0 +1,9 @@ -+#ifndef __GRALLOC_H -+#define __GRALLOC_H -+ -+void acl_free_all(void); -+int acl_alloc_stack_init(unsigned long size); -+void *acl_alloc(unsigned long len); -+void *acl_alloc_num(unsigned long num, unsigned long len); -+ -+#endif -diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h -new file mode 100644 -index 0000000..b30e9bc ---- /dev/null -+++ b/include/linux/grdefs.h -@@ -0,0 +1,140 @@ -+#ifndef GRDEFS_H -+#define GRDEFS_H -+ -+/* Begin grsecurity status declarations */ -+ -+enum { -+ GR_READY = 0x01, -+ GR_STATUS_INIT = 0x00 // disabled state -+}; -+ -+/* Begin ACL declarations */ -+ -+/* Role flags */ -+ -+enum { -+ GR_ROLE_USER = 0x0001, -+ GR_ROLE_GROUP = 0x0002, -+ GR_ROLE_DEFAULT = 0x0004, -+ GR_ROLE_SPECIAL = 0x0008, -+ GR_ROLE_AUTH = 0x0010, -+ GR_ROLE_NOPW = 0x0020, -+ GR_ROLE_GOD = 0x0040, -+ GR_ROLE_LEARN = 0x0080, -+ GR_ROLE_TPE = 0x0100, -+ GR_ROLE_DOMAIN = 0x0200, -+ GR_ROLE_PAM = 0x0400, -+ GR_ROLE_PERSIST = 0x0800 -+}; -+ -+/* ACL Subject and Object mode flags */ -+enum { -+ GR_DELETED = 0x80000000 -+}; -+ -+/* ACL Object-only mode flags */ -+enum { -+ GR_READ = 0x00000001, -+ GR_APPEND = 0x00000002, -+ GR_WRITE = 0x00000004, -+ GR_EXEC = 0x00000008, -+ GR_FIND = 0x00000010, -+ GR_INHERIT = 0x00000020, -+ GR_SETID = 0x00000040, -+ GR_CREATE = 0x00000080, -+ GR_DELETE = 0x00000100, -+ GR_LINK = 0x00000200, -+ GR_AUDIT_READ = 0x00000400, -+ GR_AUDIT_APPEND = 0x00000800, -+ GR_AUDIT_WRITE = 0x00001000, -+ GR_AUDIT_EXEC = 0x00002000, -+ GR_AUDIT_FIND = 0x00004000, -+ GR_AUDIT_INHERIT= 0x00008000, -+ GR_AUDIT_SETID = 0x00010000, -+ GR_AUDIT_CREATE = 0x00020000, -+ GR_AUDIT_DELETE = 0x00040000, -+ GR_AUDIT_LINK = 0x00080000, -+ GR_PTRACERD = 0x00100000, -+ GR_NOPTRACE = 0x00200000, -+ GR_SUPPRESS = 0x00400000, -+ GR_NOLEARN = 0x00800000, -+ GR_INIT_TRANSFER= 0x01000000 -+}; -+ -+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \ -+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \ -+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK) -+ -+/* ACL subject-only mode flags */ -+enum { -+ GR_KILL = 0x00000001, -+ GR_VIEW = 0x00000002, -+ GR_PROTECTED = 0x00000004, -+ GR_LEARN = 0x00000008, -+ GR_OVERRIDE = 0x00000010, -+ /* just a placeholder, this mode is only used in userspace */ -+ GR_DUMMY = 0x00000020, -+ GR_PROTSHM = 0x00000040, -+ GR_KILLPROC = 0x00000080, -+ GR_KILLIPPROC = 0x00000100, -+ /* just a placeholder, this mode is only used in userspace */ -+ GR_NOTROJAN = 0x00000200, -+ GR_PROTPROCFD = 0x00000400, -+ GR_PROCACCT = 0x00000800, -+ GR_RELAXPTRACE = 0x00001000, -+ GR_NESTED = 0x00002000, -+ GR_INHERITLEARN = 0x00004000, -+ GR_PROCFIND = 0x00008000, -+ GR_POVERRIDE = 0x00010000, -+ GR_KERNELAUTH = 0x00020000, -+ GR_ATSECURE = 0x00040000, -+ GR_SHMEXEC = 0x00080000 -+}; -+ -+enum { -+ GR_PAX_ENABLE_SEGMEXEC = 0x0001, -+ GR_PAX_ENABLE_PAGEEXEC = 0x0002, -+ GR_PAX_ENABLE_MPROTECT = 0x0004, -+ GR_PAX_ENABLE_RANDMMAP = 0x0008, -+ GR_PAX_ENABLE_EMUTRAMP = 0x0010, -+ GR_PAX_DISABLE_SEGMEXEC = 0x0100, -+ GR_PAX_DISABLE_PAGEEXEC = 0x0200, -+ GR_PAX_DISABLE_MPROTECT = 0x0400, -+ GR_PAX_DISABLE_RANDMMAP = 0x0800, -+ GR_PAX_DISABLE_EMUTRAMP = 0x1000, -+}; -+ -+enum { -+ GR_ID_USER = 0x01, -+ GR_ID_GROUP = 0x02, -+}; -+ -+enum { -+ GR_ID_ALLOW = 0x01, -+ GR_ID_DENY = 0x02, -+}; -+ -+#define GR_CRASH_RES 31 -+#define GR_UIDTABLE_MAX 500 -+ -+/* begin resource learning section */ -+enum { -+ GR_RLIM_CPU_BUMP = 60, -+ GR_RLIM_FSIZE_BUMP = 50000, -+ GR_RLIM_DATA_BUMP = 10000, -+ GR_RLIM_STACK_BUMP = 1000, -+ GR_RLIM_CORE_BUMP = 10000, -+ GR_RLIM_RSS_BUMP = 500000, -+ GR_RLIM_NPROC_BUMP = 1, -+ GR_RLIM_NOFILE_BUMP = 5, -+ GR_RLIM_MEMLOCK_BUMP = 50000, -+ GR_RLIM_AS_BUMP = 500000, -+ GR_RLIM_LOCKS_BUMP = 2, -+ GR_RLIM_SIGPENDING_BUMP = 5, -+ GR_RLIM_MSGQUEUE_BUMP = 10000, -+ GR_RLIM_NICE_BUMP = 1, -+ GR_RLIM_RTPRIO_BUMP = 1, -+ GR_RLIM_RTTIME_BUMP = 1000000 -+}; -+ -+#endif -diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h -new file mode 100644 -index 0000000..60cda84 ---- /dev/null -+++ b/include/linux/grinternal.h -@@ -0,0 +1,220 @@ -+#ifndef __GRINTERNAL_H -+#define __GRINTERNAL_H -+ -+#ifdef CONFIG_GRKERNSEC -+ -+#include <linux/fs.h> -+#include <linux/mnt_namespace.h> -+#include <linux/nsproxy.h> -+#include <linux/gracl.h> -+#include <linux/grdefs.h> -+#include <linux/grmsg.h> -+ -+void gr_add_learn_entry(const char *fmt, ...) -+ __attribute__ ((format (printf, 1, 2))); -+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode, -+ const struct vfsmount *mnt); -+__u32 gr_check_create(const struct dentry *new_dentry, -+ const struct dentry *parent, -+ const struct vfsmount *mnt, const __u32 mode); -+int gr_check_protected_task(const struct task_struct *task); -+__u32 to_gr_audit(const __u32 reqmode); -+int gr_set_acls(const int type); -+int gr_apply_subject_to_task(struct task_struct *task); -+int gr_acl_is_enabled(void); -+char gr_roletype_to_char(void); -+ -+void gr_handle_alertkill(struct task_struct *task); -+char *gr_to_filename(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+char *gr_to_filename1(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+char *gr_to_filename2(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+char *gr_to_filename3(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+ -+extern int grsec_enable_harden_ptrace; -+extern int grsec_enable_link; -+extern int grsec_enable_fifo; -+extern int grsec_enable_execve; -+extern int grsec_enable_shm; -+extern int grsec_enable_execlog; -+extern int grsec_enable_signal; -+extern int grsec_enable_audit_ptrace; -+extern int grsec_enable_forkfail; -+extern int grsec_enable_time; -+extern int grsec_enable_rofs; -+extern int grsec_enable_chroot_shmat; -+extern int grsec_enable_chroot_mount; -+extern int grsec_enable_chroot_double; -+extern int grsec_enable_chroot_pivot; -+extern int grsec_enable_chroot_chdir; -+extern int grsec_enable_chroot_chmod; -+extern int grsec_enable_chroot_mknod; -+extern int grsec_enable_chroot_fchdir; -+extern int grsec_enable_chroot_nice; -+extern int grsec_enable_chroot_execlog; -+extern int grsec_enable_chroot_caps; -+extern int grsec_enable_chroot_sysctl; -+extern int grsec_enable_chroot_unix; -+extern int grsec_enable_tpe; -+extern int grsec_tpe_gid; -+extern int grsec_enable_tpe_all; -+extern int grsec_enable_tpe_invert; -+extern int grsec_enable_socket_all; -+extern int grsec_socket_all_gid; -+extern int grsec_enable_socket_client; -+extern int grsec_socket_client_gid; -+extern int grsec_enable_socket_server; -+extern int grsec_socket_server_gid; -+extern int grsec_audit_gid; -+extern int grsec_enable_group; -+extern int grsec_enable_audit_textrel; -+extern int grsec_enable_log_rwxmaps; -+extern int grsec_enable_mount; -+extern int grsec_enable_chdir; -+extern int grsec_resource_logging; -+extern int grsec_enable_blackhole; -+extern int grsec_lastack_retries; -+extern int grsec_enable_brute; -+extern int grsec_lock; -+ -+extern spinlock_t grsec_alert_lock; -+extern unsigned long grsec_alert_wtime; -+extern unsigned long grsec_alert_fyet; -+ -+extern spinlock_t grsec_audit_lock; -+ -+extern rwlock_t grsec_exec_file_lock; -+ -+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \ -+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \ -+ (tsk)->exec_file->f_vfsmnt) : "/") -+ -+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \ -+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \ -+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/") -+ -+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \ -+ gr_to_filename((tsk)->exec_file->f_path.dentry, \ -+ (tsk)->exec_file->f_vfsmnt) : "/") -+ -+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \ -+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \ -+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/") -+ -+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted) -+ -+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry) -+ -+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \ -+ (task)->pid, (cred)->uid, \ -+ (cred)->euid, (cred)->gid, (cred)->egid, \ -+ gr_parent_task_fullpath(task), \ -+ (task)->real_parent->comm, (task)->real_parent->pid, \ -+ (pcred)->uid, (pcred)->euid, \ -+ (pcred)->gid, (pcred)->egid -+ -+#define GR_CHROOT_CAPS {{ \ -+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ -+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ -+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \ -+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \ -+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \ -+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \ -+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }} -+ -+#define security_learn(normal_msg,args...) \ -+({ \ -+ read_lock(&grsec_exec_file_lock); \ -+ gr_add_learn_entry(normal_msg "\n", ## args); \ -+ read_unlock(&grsec_exec_file_lock); \ -+}) -+ -+enum { -+ GR_DO_AUDIT, -+ GR_DONT_AUDIT, -+ /* used for non-audit messages that we shouldn't kill the task on */ -+ GR_DONT_AUDIT_GOOD -+}; -+ -+enum { -+ GR_TTYSNIFF, -+ GR_RBAC, -+ GR_RBAC_STR, -+ GR_STR_RBAC, -+ GR_RBAC_MODE2, -+ GR_RBAC_MODE3, -+ GR_FILENAME, -+ GR_SYSCTL_HIDDEN, -+ GR_NOARGS, -+ GR_ONE_INT, -+ GR_ONE_INT_TWO_STR, -+ GR_ONE_STR, -+ GR_STR_INT, -+ GR_TWO_STR_INT, -+ GR_TWO_INT, -+ GR_TWO_U64, -+ GR_THREE_INT, -+ GR_FIVE_INT_TWO_STR, -+ GR_TWO_STR, -+ GR_THREE_STR, -+ GR_FOUR_STR, -+ GR_STR_FILENAME, -+ GR_FILENAME_STR, -+ GR_FILENAME_TWO_INT, -+ GR_FILENAME_TWO_INT_STR, -+ GR_TEXTREL, -+ GR_PTRACE, -+ GR_RESOURCE, -+ GR_CAP, -+ GR_SIG, -+ GR_SIG2, -+ GR_CRASH1, -+ GR_CRASH2, -+ GR_PSACCT, -+ GR_RWXMAP -+}; -+ -+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str) -+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task) -+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt) -+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str) -+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt) -+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2) -+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3) -+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt) -+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS) -+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num) -+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2) -+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str) -+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num) -+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2) -+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2) -+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3) -+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2) -+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2) -+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num) -+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3) -+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4) -+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt) -+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str) -+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2) -+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str) -+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2) -+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task) -+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2) -+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str) -+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr) -+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num) -+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong) -+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1) -+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) -+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str) -+ -+void gr_log_varargs(int audit, const char *msg, int argtypes, ...); -+ -+#endif -+ -+#endif -diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h -new file mode 100644 -index 0000000..9d5fd4a ---- /dev/null -+++ b/include/linux/grmsg.h -@@ -0,0 +1,108 @@ -+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" -+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" -+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " -+#define GR_STOPMOD_MSG "denied modification of module state by " -+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by " -+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by " -+#define GR_IOPERM_MSG "denied use of ioperm() by " -+#define GR_IOPL_MSG "denied use of iopl() by " -+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " -+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by " -+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " -+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by " -+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " -+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4" -+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4" -+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " -+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " -+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " -+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " -+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by " -+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " -+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by " -+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against " -+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " -+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " -+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " -+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " -+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " -+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " -+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " -+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " -+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by " -+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " -+#define GR_EXEC_ACL_MSG "%s execution of %.950s by " -+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by " -+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds" -+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds" -+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by " -+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by " -+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " -+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " -+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " -+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by " -+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by " -+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by " -+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " -+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by " -+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " -+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by " -+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " -+#define GR_INITF_ACL_MSG "init_variables() failed %s by " -+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader" -+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by " -+#define GR_SHUTS_ACL_MSG "shutdown auth success for " -+#define GR_SHUTF_ACL_MSG "shutdown auth failure for " -+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " -+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " -+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " -+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " -+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by " -+#define GR_ENABLEF_ACL_MSG "unable to load %s for " -+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system" -+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by " -+#define GR_RELOADF_ACL_MSG "failed reload of %s for " -+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for " -+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " -+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " -+#define GR_SPROLEF_ACL_MSG "special role %s failure for " -+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for " -+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " -+#define GR_INVMODE_ACL_MSG "invalid mode %d by " -+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by " -+#define GR_FAILFORK_MSG "failed fork with errno %s by " -+#define GR_NICE_CHROOT_MSG "denied priority change by " -+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in " -+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " -+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by " -+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by " -+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " -+#define GR_TIME_MSG "time set by " -+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by " -+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " -+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " -+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by " -+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by " -+#define GR_BIND_MSG "denied bind() by " -+#define GR_CONNECT_MSG "denied connect() by " -+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by " -+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by " -+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4" -+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " -+#define GR_CAP_ACL_MSG "use of %s denied for " -+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for " -+#define GR_CAP_ACL_MSG2 "use of %s permitted for " -+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for " -+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for " -+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by " -+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by " -+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by " -+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " -+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by " -+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for " -+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by " -+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by " -+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by " -+#define GR_VM86_MSG "denied use of vm86 by " -+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by " -+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by " -diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h -new file mode 100644 -index 0000000..bd25f72 ---- /dev/null -+++ b/include/linux/grsecurity.h -@@ -0,0 +1,228 @@ -+#ifndef GR_SECURITY_H -+#define GR_SECURITY_H -+#include <linux/fs.h> -+#include <linux/fs_struct.h> -+#include <linux/binfmts.h> -+#include <linux/gracl.h> -+ -+/* notify of brain-dead configs */ -+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled." -+#endif -+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC) -+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled." -+#endif -+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS) -+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled." -+#endif -+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS) -+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled." -+#endif -+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP) -+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled." -+#endif -+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR) -+#error "CONFIG_PAX enabled, but no PaX options are enabled." -+#endif -+ -+#include <linux/compat.h> -+ -+struct user_arg_ptr { -+#ifdef CONFIG_COMPAT -+ bool is_compat; -+#endif -+ union { -+ const char __user *const __user *native; -+#ifdef CONFIG_COMPAT -+ compat_uptr_t __user *compat; -+#endif -+ } ptr; -+}; -+ -+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags); -+void gr_handle_brute_check(void); -+void gr_handle_kernel_exploit(void); -+int gr_process_user_ban(void); -+ -+char gr_roletype_to_char(void); -+ -+int gr_acl_enable_at_secure(void); -+ -+int gr_check_user_change(int real, int effective, int fs); -+int gr_check_group_change(int real, int effective, int fs); -+ -+void gr_del_task_from_ip_table(struct task_struct *p); -+ -+int gr_pid_is_chrooted(struct task_struct *p); -+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type); -+int gr_handle_chroot_nice(void); -+int gr_handle_chroot_sysctl(const int op); -+int gr_handle_chroot_setpriority(struct task_struct *p, -+ const int niceval); -+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt); -+int gr_handle_chroot_chroot(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+void gr_handle_chroot_chdir(struct path *path); -+int gr_handle_chroot_chmod(const struct dentry *dentry, -+ const struct vfsmount *mnt, const int mode); -+int gr_handle_chroot_mknod(const struct dentry *dentry, -+ const struct vfsmount *mnt, const int mode); -+int gr_handle_chroot_mount(const struct dentry *dentry, -+ const struct vfsmount *mnt, -+ const char *dev_name); -+int gr_handle_chroot_pivot(void); -+int gr_handle_chroot_unix(const pid_t pid); -+ -+int gr_handle_rawio(const struct inode *inode); -+ -+void gr_handle_ioperm(void); -+void gr_handle_iopl(void); -+ -+int gr_tpe_allow(const struct file *file); -+ -+void gr_set_chroot_entries(struct task_struct *task, struct path *path); -+void gr_clear_chroot_entries(struct task_struct *task); -+ -+void gr_log_forkfail(const int retval); -+void gr_log_timechange(void); -+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t); -+void gr_log_chdir(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+void gr_log_chroot_exec(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv); -+void gr_log_remount(const char *devname, const int retval); -+void gr_log_unmount(const char *devname, const int retval); -+void gr_log_mount(const char *from, const char *to, const int retval); -+void gr_log_textrel(struct vm_area_struct *vma); -+void gr_log_rwxmmap(struct file *file); -+void gr_log_rwxmprotect(struct file *file); -+ -+int gr_handle_follow_link(const struct inode *parent, -+ const struct inode *inode, -+ const struct dentry *dentry, -+ const struct vfsmount *mnt); -+int gr_handle_fifo(const struct dentry *dentry, -+ const struct vfsmount *mnt, -+ const struct dentry *dir, const int flag, -+ const int acc_mode); -+int gr_handle_hardlink(const struct dentry *dentry, -+ const struct vfsmount *mnt, -+ struct inode *inode, -+ const int mode, const char *to); -+ -+int gr_is_capable(const int cap); -+int gr_is_capable_nolog(const int cap); -+void gr_learn_resource(const struct task_struct *task, const int limit, -+ const unsigned long wanted, const int gt); -+void gr_copy_label(struct task_struct *tsk); -+void gr_handle_crash(struct task_struct *task, const int sig); -+int gr_handle_signal(const struct task_struct *p, const int sig); -+int gr_check_crash_uid(const uid_t uid); -+int gr_check_protected_task(const struct task_struct *task); -+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type); -+int gr_acl_handle_mmap(const struct file *file, -+ const unsigned long prot); -+int gr_acl_handle_mprotect(const struct file *file, -+ const unsigned long prot); -+int gr_check_hidden_task(const struct task_struct *tsk); -+__u32 gr_acl_handle_truncate(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+__u32 gr_acl_handle_utime(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+__u32 gr_acl_handle_access(const struct dentry *dentry, -+ const struct vfsmount *mnt, const int fmode); -+__u32 gr_acl_handle_fchmod(const struct dentry *dentry, -+ const struct vfsmount *mnt, mode_t mode); -+__u32 gr_acl_handle_chmod(const struct dentry *dentry, -+ const struct vfsmount *mnt, mode_t mode); -+__u32 gr_acl_handle_chown(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+__u32 gr_acl_handle_setxattr(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+int gr_handle_ptrace(struct task_struct *task, const long request); -+int gr_handle_proc_ptrace(struct task_struct *task); -+__u32 gr_acl_handle_execve(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+int gr_check_crash_exec(const struct file *filp); -+int gr_acl_is_enabled(void); -+void gr_set_kernel_label(struct task_struct *task); -+void gr_set_role_label(struct task_struct *task, const uid_t uid, -+ const gid_t gid); -+int gr_set_proc_label(const struct dentry *dentry, -+ const struct vfsmount *mnt, -+ const int unsafe_share); -+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+__u32 gr_acl_handle_open(const struct dentry *dentry, -+ const struct vfsmount *mnt, int acc_mode); -+__u32 gr_acl_handle_creat(const struct dentry *dentry, -+ const struct dentry *p_dentry, -+ const struct vfsmount *p_mnt, -+ int open_flags, int acc_mode, const int imode); -+void gr_handle_create(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+void gr_handle_proc_create(const struct dentry *dentry, -+ const struct inode *inode); -+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry, -+ const struct dentry *parent_dentry, -+ const struct vfsmount *parent_mnt, -+ const int mode); -+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry, -+ const struct dentry *parent_dentry, -+ const struct vfsmount *parent_mnt); -+__u32 gr_acl_handle_rmdir(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+void gr_handle_delete(const ino_t ino, const dev_t dev); -+__u32 gr_acl_handle_unlink(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry, -+ const struct dentry *parent_dentry, -+ const struct vfsmount *parent_mnt, -+ const char *from); -+__u32 gr_acl_handle_link(const struct dentry *new_dentry, -+ const struct dentry *parent_dentry, -+ const struct vfsmount *parent_mnt, -+ const struct dentry *old_dentry, -+ const struct vfsmount *old_mnt, const char *to); -+int gr_acl_handle_rename(struct dentry *new_dentry, -+ struct dentry *parent_dentry, -+ const struct vfsmount *parent_mnt, -+ struct dentry *old_dentry, -+ struct inode *old_parent_inode, -+ struct vfsmount *old_mnt, const char *newname); -+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir, -+ struct dentry *old_dentry, -+ struct dentry *new_dentry, -+ struct vfsmount *mnt, const __u8 replace); -+__u32 gr_check_link(const struct dentry *new_dentry, -+ const struct dentry *parent_dentry, -+ const struct vfsmount *parent_mnt, -+ const struct dentry *old_dentry, -+ const struct vfsmount *old_mnt); -+int gr_acl_handle_filldir(const struct file *file, const char *name, -+ const unsigned int namelen, const ino_t ino); -+ -+__u32 gr_acl_handle_unix(const struct dentry *dentry, -+ const struct vfsmount *mnt); -+void gr_acl_handle_exit(void); -+void gr_acl_handle_psacct(struct task_struct *task, const long code); -+int gr_acl_handle_procpidmem(const struct task_struct *task); -+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags); -+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode); -+void gr_audit_ptrace(struct task_struct *task); -+dev_t gr_get_dev_from_dentry(struct dentry *dentry); -+ -+#ifdef CONFIG_GRKERNSEC -+void task_grsec_rbac(struct seq_file *m, struct task_struct *p); -+void gr_handle_vm86(void); -+void gr_handle_mem_readwrite(u64 from, u64 to); -+ -+extern int grsec_enable_dmesg; -+extern int grsec_disable_privio; -+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK -+extern int grsec_enable_chroot_findtask; -+#endif -+#endif -+ -+#endif -diff --git a/include/linux/grsock.h b/include/linux/grsock.h -new file mode 100644 -index 0000000..e7ffaaf ---- /dev/null -+++ b/include/linux/grsock.h -@@ -0,0 +1,19 @@ -+#ifndef __GRSOCK_H -+#define __GRSOCK_H -+ -+extern void gr_attach_curr_ip(const struct sock *sk); -+extern int gr_handle_sock_all(const int family, const int type, -+ const int protocol); -+extern int gr_handle_sock_server(const struct sockaddr *sck); -+extern int gr_handle_sock_server_other(const struct sock *sck); -+extern int gr_handle_sock_client(const struct sockaddr *sck); -+extern int gr_search_connect(struct socket * sock, -+ struct sockaddr_in * addr); -+extern int gr_search_bind(struct socket * sock, -+ struct sockaddr_in * addr); -+extern int gr_search_listen(struct socket * sock); -+extern int gr_search_accept(struct socket * sock); -+extern int gr_search_socket(const int domain, const int type, -+ const int protocol); -+ -+#endif -diff --git a/include/linux/hid.h b/include/linux/hid.h -index 9cf8e7a..5ec94d0 100644 ---- a/include/linux/hid.h -+++ b/include/linux/hid.h -@@ -676,7 +676,7 @@ struct hid_ll_driver { - unsigned int code, int value); - - int (*parse)(struct hid_device *hdev); --}; -+} __no_const; - - #define PM_HINT_FULLON 1<<5 - #define PM_HINT_NORMAL 1<<1 -diff --git a/include/linux/highmem.h b/include/linux/highmem.h -index 3a93f73..b19d0b3 100644 ---- a/include/linux/highmem.h -+++ b/include/linux/highmem.h -@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page) - kunmap_atomic(kaddr, KM_USER0); - } - -+static inline void sanitize_highpage(struct page *page) -+{ -+ void *kaddr; -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ kaddr = kmap_atomic(page, KM_CLEARPAGE); -+ clear_page(kaddr); -+ kunmap_atomic(kaddr, KM_CLEARPAGE); -+ local_irq_restore(flags); -+} -+ - static inline void zero_user_segments(struct page *page, - unsigned start1, unsigned end1, - unsigned start2, unsigned end2) -diff --git a/include/linux/i2c.h b/include/linux/i2c.h -index a6c652e..1f5878f 100644 ---- a/include/linux/i2c.h -+++ b/include/linux/i2c.h -@@ -346,6 +346,7 @@ struct i2c_algorithm { - /* To determine what the adapter supports */ - u32 (*functionality) (struct i2c_adapter *); - }; -+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const; - - /* - * i2c_adapter is the structure used to identify a physical i2c bus along -diff --git a/include/linux/i2o.h b/include/linux/i2o.h -index a6deef4..c56a7f2 100644 ---- a/include/linux/i2o.h -+++ b/include/linux/i2o.h -@@ -564,7 +564,7 @@ struct i2o_controller { - struct i2o_device *exec; /* Executive */ - #if BITS_PER_LONG == 64 - spinlock_t context_list_lock; /* lock for context_list */ -- atomic_t context_list_counter; /* needed for unique contexts */ -+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */ - struct list_head context_list; /* list of context id's - and pointers */ - #endif -diff --git a/include/linux/init.h b/include/linux/init.h -index 9146f39..885354d 100644 ---- a/include/linux/init.h -+++ b/include/linux/init.h -@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline); - - /* Each module must use one module_init(). */ - #define module_init(initfn) \ -- static inline initcall_t __inittest(void) \ -+ static inline __used initcall_t __inittest(void) \ - { return initfn; } \ - int init_module(void) __attribute__((alias(#initfn))); - - /* This is only required if you want to be unloadable. */ - #define module_exit(exitfn) \ -- static inline exitcall_t __exittest(void) \ -+ static inline __used exitcall_t __exittest(void) \ - { return exitfn; } \ - void cleanup_module(void) __attribute__((alias(#exitfn))); - -diff --git a/include/linux/init_task.h b/include/linux/init_task.h -index d14e058..4162929 100644 ---- a/include/linux/init_task.h -+++ b/include/linux/init_task.h -@@ -126,6 +126,12 @@ extern struct cred init_cred; - # define INIT_PERF_EVENTS(tsk) - #endif - -+#ifdef CONFIG_X86 -+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO, -+#else -+#define INIT_TASK_THREAD_INFO -+#endif -+ - /* - * INIT_TASK is used to set up the first task table, touch at - * your own risk!. Base=0, limit=0x1fffff (=2MB) -@@ -164,6 +170,7 @@ extern struct cred init_cred; - RCU_INIT_POINTER(.cred, &init_cred), \ - .comm = "swapper", \ - .thread = INIT_THREAD, \ -+ INIT_TASK_THREAD_INFO \ - .fs = &init_fs, \ - .files = &init_files, \ - .signal = &init_signals, \ -diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h -index 9310c69..6ebb244 100644 ---- a/include/linux/intel-iommu.h -+++ b/include/linux/intel-iommu.h -@@ -296,7 +296,7 @@ struct iommu_flush { - u8 fm, u64 type); - void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, - unsigned int size_order, u64 type); --}; -+} __no_const; - - enum { - SR_DMAR_FECTL_REG, -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index f51a81b..adfcb44 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -425,7 +425,7 @@ enum - /* map softirq index to softirq name. update 'softirq_to_name' in - * kernel/softirq.c when adding a new softirq. - */ --extern char *softirq_to_name[NR_SOFTIRQS]; -+extern const char * const softirq_to_name[NR_SOFTIRQS]; - - /* softirq mask and active fields moved to irq_cpustat_t in - * asm/hardirq.h to get better cache usage. KAO -@@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS]; - - struct softirq_action - { -- void (*action)(struct softirq_action *); -+ void (*action)(void); - }; - - asmlinkage void do_softirq(void); - asmlinkage void __do_softirq(void); --extern void open_softirq(int nr, void (*action)(struct softirq_action *)); -+extern void open_softirq(int nr, void (*action)(void)); - extern void softirq_init(void); - static inline void __raise_softirq_irqoff(unsigned int nr) - { -diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h -index 0df513b..fe901a2 100644 ---- a/include/linux/kallsyms.h -+++ b/include/linux/kallsyms.h -@@ -15,7 +15,8 @@ - - struct module; - --#ifdef CONFIG_KALLSYMS -+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS) -+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) - /* Lookup the address for a symbol. Returns 0 if not found. */ - unsigned long kallsyms_lookup_name(const char *name); - -@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u - /* Stupid that this does nothing, but I didn't create this mess. */ - #define __print_symbol(fmt, addr) - #endif /*CONFIG_KALLSYMS*/ -+#else /* when included by kallsyms.c, vsnprintf.c, or -+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */ -+extern void __print_symbol(const char *fmt, unsigned long address); -+extern int sprint_backtrace(char *buffer, unsigned long address); -+extern int sprint_symbol(char *buffer, unsigned long address); -+const char *kallsyms_lookup(unsigned long addr, -+ unsigned long *symbolsize, -+ unsigned long *offset, -+ char **modname, char *namebuf); -+#endif - - /* This macro allows us to keep printk typechecking */ - static void __check_printsym_format(const char *fmt, ...) -diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h -index fa39183..40160be 100644 ---- a/include/linux/kgdb.h -+++ b/include/linux/kgdb.h -@@ -53,7 +53,7 @@ extern int kgdb_connected; - extern int kgdb_io_module_registered; - - extern atomic_t kgdb_setting_breakpoint; --extern atomic_t kgdb_cpu_doing_single_step; -+extern atomic_unchecked_t kgdb_cpu_doing_single_step; - - extern struct task_struct *kgdb_usethread; - extern struct task_struct *kgdb_contthread; -@@ -251,7 +251,7 @@ struct kgdb_arch { - void (*disable_hw_break)(struct pt_regs *regs); - void (*remove_all_hw_break)(void); - void (*correct_hw_break)(void); --}; -+} __do_const; - - /** - * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. -@@ -276,7 +276,7 @@ struct kgdb_io { - void (*pre_exception) (void); - void (*post_exception) (void); - int is_console; --}; -+} __do_const; - - extern struct kgdb_arch arch_kgdb_ops; - -diff --git a/include/linux/kmod.h b/include/linux/kmod.h -index 0da38cf..d23f05f 100644 ---- a/include/linux/kmod.h -+++ b/include/linux/kmod.h -@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */ - * usually useless though. */ - extern int __request_module(bool wait, const char *name, ...) \ - __attribute__((format(printf, 2, 3))); -+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \ -+ __attribute__((format(printf, 3, 4))); - #define request_module(mod...) __request_module(true, mod) - #define request_module_nowait(mod...) __request_module(false, mod) - #define try_then_request_module(x, mod...) \ -diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h -index eabb21a..3f030f4 100644 ---- a/include/linux/kvm_host.h -+++ b/include/linux/kvm_host.h -@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); - void vcpu_load(struct kvm_vcpu *vcpu); - void vcpu_put(struct kvm_vcpu *vcpu); - --int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, -+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align, - struct module *module); - void kvm_exit(void); - -@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - struct kvm_guest_debug *dbg); - int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); - --int kvm_arch_init(void *opaque); -+int kvm_arch_init(const void *opaque); - void kvm_arch_exit(void); - - int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); -diff --git a/include/linux/libata.h b/include/linux/libata.h -index efd6f98..5f5fd37 100644 ---- a/include/linux/libata.h -+++ b/include/linux/libata.h -@@ -909,7 +909,7 @@ struct ata_port_operations { - * fields must be pointers. - */ - const struct ata_port_operations *inherits; --}; -+} __do_const; - - struct ata_port_info { - unsigned long flags; -diff --git a/include/linux/mca.h b/include/linux/mca.h -index 3797270..7765ede 100644 ---- a/include/linux/mca.h -+++ b/include/linux/mca.h -@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions { - int region); - void * (*mca_transform_memory)(struct mca_device *, - void *memory); --}; -+} __no_const; - - struct mca_bus { - u64 default_dma_mask; -diff --git a/include/linux/memory.h b/include/linux/memory.h -index 935699b..11042cc 100644 ---- a/include/linux/memory.h -+++ b/include/linux/memory.h -@@ -144,7 +144,7 @@ struct memory_accessor { - size_t count); - ssize_t (*write)(struct memory_accessor *, const char *buf, - off_t offset, size_t count); --}; -+} __no_const; - - /* - * Kernel text modification mutex, used for code patching. Users of this lock -diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h -index 896b5e4..1159ad0 100644 ---- a/include/linux/mfd/abx500.h -+++ b/include/linux/mfd/abx500.h -@@ -234,6 +234,7 @@ struct abx500_ops { - int (*event_registers_startup_state_get) (struct device *, u8 *); - int (*startup_irq_enabled) (struct device *, unsigned int); - }; -+typedef struct abx500_ops __no_const abx500_ops_no_const; - - int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops); - void abx500_remove_ops(struct device *dev); -diff --git a/include/linux/mm.h b/include/linux/mm.h -index fedc5f0..7cedb6d 100644 ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void *objp); - - #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ - #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ -+ -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) -+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */ -+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */ -+#else - #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ -+#endif -+ - #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ - #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ - -@@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page); - int set_page_dirty_lock(struct page *page); - int clear_page_dirty_for_io(struct page *page); - --/* Is the vma a continuation of the stack vma above it? */ --static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) --{ -- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); --} -- --static inline int stack_guard_page_start(struct vm_area_struct *vma, -- unsigned long addr) --{ -- return (vma->vm_flags & VM_GROWSDOWN) && -- (vma->vm_start == addr) && -- !vma_growsdown(vma->vm_prev, addr); --} -- --/* Is the vma a continuation of the stack vma below it? */ --static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) --{ -- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); --} -- --static inline int stack_guard_page_end(struct vm_area_struct *vma, -- unsigned long addr) --{ -- return (vma->vm_flags & VM_GROWSUP) && -- (vma->vm_end == addr) && -- !vma_growsup(vma->vm_next, addr); --} -- - extern unsigned long move_page_tables(struct vm_area_struct *vma, - unsigned long old_addr, struct vm_area_struct *new_vma, - unsigned long new_addr, unsigned long len); -@@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) - } - #endif - -+#ifdef CONFIG_MMU -+pgprot_t vm_get_page_prot(vm_flags_t vm_flags); -+#else -+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) -+{ -+ return __pgprot(0); -+} -+#endif -+ - int vma_wants_writenotify(struct vm_area_struct *vma); - - extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, -@@ -1417,6 +1405,7 @@ out: - } - - extern int do_munmap(struct mm_struct *, unsigned long, size_t); -+extern int __do_munmap(struct mm_struct *, unsigned long, size_t); - - extern unsigned long do_brk(unsigned long, unsigned long); - -@@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add - extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, - struct vm_area_struct **pprev); - -+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma); -+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma); -+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl); -+ - /* Look up the first VMA which intersects the interval start_addr..end_addr-1, - NULL if none. Assume start_addr < end_addr. */ - static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) -@@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) - return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - } - --#ifdef CONFIG_MMU --pgprot_t vm_get_page_prot(unsigned long vm_flags); --#else --static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) --{ -- return __pgprot(0); --} --#endif -- - struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); - int remap_pfn_range(struct vm_area_struct *, unsigned long addr, - unsigned long pfn, unsigned long size, pgprot_t); -@@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long pfn); - extern int sysctl_memory_failure_early_kill; - extern int sysctl_memory_failure_recovery; - extern void shake_page(struct page *p, int access); --extern atomic_long_t mce_bad_pages; -+extern atomic_long_unchecked_t mce_bad_pages; - extern int soft_offline_page(struct page *page, int flags); - - extern void dump_page(struct page *page); -@@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, - unsigned int pages_per_huge_page); - #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ - -+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT -+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot); -+#else -+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {} -+#endif -+ - #endif /* __KERNEL__ */ - #endif /* _LINUX_MM_H */ -diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index 10a2f62..c8fa287 100644 ---- a/include/linux/mm_types.h -+++ b/include/linux/mm_types.h -@@ -230,6 +230,8 @@ struct vm_area_struct { - #ifdef CONFIG_NUMA - struct mempolicy *vm_policy; /* NUMA policy for the VMA */ - #endif -+ -+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */ - }; - - struct core_thread { -@@ -362,6 +364,24 @@ struct mm_struct { - #ifdef CONFIG_CPUMASK_OFFSTACK - struct cpumask cpumask_allocation; - #endif -+ -+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) -+ unsigned long pax_flags; -+#endif -+ -+#ifdef CONFIG_PAX_DLRESOLVE -+ unsigned long call_dl_resolve; -+#endif -+ -+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) -+ unsigned long call_syscall; -+#endif -+ -+#ifdef CONFIG_PAX_ASLR -+ unsigned long delta_mmap; /* randomized offset */ -+ unsigned long delta_stack; /* randomized offset */ -+#endif -+ - }; - - static inline void mm_init_cpumask(struct mm_struct *mm) -diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h -index 1d1b1e1..2a13c78 100644 ---- a/include/linux/mmu_notifier.h -+++ b/include/linux/mmu_notifier.h -@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) - */ - #define ptep_clear_flush_notify(__vma, __address, __ptep) \ - ({ \ -- pte_t __pte; \ -+ pte_t ___pte; \ - struct vm_area_struct *___vma = __vma; \ - unsigned long ___address = __address; \ -- __pte = ptep_clear_flush(___vma, ___address, __ptep); \ -+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \ - mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ -- __pte; \ -+ ___pte; \ - }) - - #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \ -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index be1ac8d..26868ce 100644 ---- a/include/linux/mmzone.h -+++ b/include/linux/mmzone.h -@@ -356,7 +356,7 @@ struct zone { - unsigned long flags; /* zone flags, see below */ - - /* Zone statistics */ -- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; -+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; - - /* - * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on -diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h -index ae28e93..1ac2233 100644 ---- a/include/linux/mod_devicetable.h -+++ b/include/linux/mod_devicetable.h -@@ -12,7 +12,7 @@ - typedef unsigned long kernel_ulong_t; - #endif - --#define PCI_ANY_ID (~0) -+#define PCI_ANY_ID ((__u16)~0) - - struct pci_device_id { - __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ -@@ -131,7 +131,7 @@ struct usb_device_id { - #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100 - #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200 - --#define HID_ANY_ID (~0) -+#define HID_ANY_ID (~0U) - - struct hid_device_id { - __u16 bus; -diff --git a/include/linux/module.h b/include/linux/module.h -index 1c30087..fc2a442 100644 ---- a/include/linux/module.h -+++ b/include/linux/module.h -@@ -16,6 +16,7 @@ - #include <linux/kobject.h> - #include <linux/moduleparam.h> - #include <linux/tracepoint.h> -+#include <linux/fs.h> - - #include <linux/percpu.h> - #include <asm/module.h> -@@ -327,19 +328,16 @@ struct module - int (*init)(void); - - /* If this is non-NULL, vfree after init() returns */ -- void *module_init; -+ void *module_init_rx, *module_init_rw; - - /* Here is the actual code + data, vfree'd on unload. */ -- void *module_core; -+ void *module_core_rx, *module_core_rw; - - /* Here are the sizes of the init and core sections */ -- unsigned int init_size, core_size; -+ unsigned int init_size_rw, core_size_rw; - - /* The size of the executable code in each section. */ -- unsigned int init_text_size, core_text_size; -- -- /* Size of RO sections of the module (text+rodata) */ -- unsigned int init_ro_size, core_ro_size; -+ unsigned int init_size_rx, core_size_rx; - - /* Arch-specific module values */ - struct mod_arch_specific arch; -@@ -395,6 +393,10 @@ struct module - #ifdef CONFIG_EVENT_TRACING - struct ftrace_event_call **trace_events; - unsigned int num_trace_events; -+ struct file_operations trace_id; -+ struct file_operations trace_enable; -+ struct file_operations trace_format; -+ struct file_operations trace_filter; - #endif - #ifdef CONFIG_FTRACE_MCOUNT_RECORD - unsigned int num_ftrace_callsites; -@@ -445,16 +447,46 @@ bool is_module_address(unsigned long addr); - bool is_module_percpu_address(unsigned long addr); - bool is_module_text_address(unsigned long addr); - -+static inline int within_module_range(unsigned long addr, void *start, unsigned long size) -+{ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ if (ktla_ktva(addr) >= (unsigned long)start && -+ ktla_ktva(addr) < (unsigned long)start + size) -+ return 1; -+#endif -+ -+ return ((void *)addr >= start && (void *)addr < start + size); -+} -+ -+static inline int within_module_core_rx(unsigned long addr, struct module *mod) -+{ -+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx); -+} -+ -+static inline int within_module_core_rw(unsigned long addr, struct module *mod) -+{ -+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw); -+} -+ -+static inline int within_module_init_rx(unsigned long addr, struct module *mod) -+{ -+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx); -+} -+ -+static inline int within_module_init_rw(unsigned long addr, struct module *mod) -+{ -+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw); -+} -+ - static inline int within_module_core(unsigned long addr, struct module *mod) - { -- return (unsigned long)mod->module_core <= addr && -- addr < (unsigned long)mod->module_core + mod->core_size; -+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod); - } - - static inline int within_module_init(unsigned long addr, struct module *mod) - { -- return (unsigned long)mod->module_init <= addr && -- addr < (unsigned long)mod->module_init + mod->init_size; -+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod); - } - - /* Search for module by name: must hold module_mutex. */ -diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h -index b2be02e..6a9fdb1 100644 ---- a/include/linux/moduleloader.h -+++ b/include/linux/moduleloader.h -@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section); - sections. Returns NULL on failure. */ - void *module_alloc(unsigned long size); - -+#ifdef CONFIG_PAX_KERNEXEC -+void *module_alloc_exec(unsigned long size); -+#else -+#define module_alloc_exec(x) module_alloc(x) -+#endif -+ - /* Free memory returned from module_alloc. */ - void module_free(struct module *mod, void *module_region); - -+#ifdef CONFIG_PAX_KERNEXEC -+void module_free_exec(struct module *mod, void *module_region); -+#else -+#define module_free_exec(x, y) module_free((x), (y)) -+#endif -+ - /* Apply the given relocation to the (simplified) ELF. Return -error - or 0. */ - int apply_relocate(Elf_Shdr *sechdrs, -diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h -index ddaae98..3c70938 100644 ---- a/include/linux/moduleparam.h -+++ b/include/linux/moduleparam.h -@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock(void) - * @len is usually just sizeof(string). - */ - #define module_param_string(name, string, len, perm) \ -- static const struct kparam_string __param_string_##name \ -+ static const struct kparam_string __param_string_##name __used \ - = { len, string }; \ - __module_param_call(MODULE_PARAM_PREFIX, name, \ - ¶m_ops_string, \ -@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp); - * module_param_named() for why this might be necessary. - */ - #define module_param_array_named(name, array, type, nump, perm) \ -- static const struct kparam_array __param_arr_##name \ -+ static const struct kparam_array __param_arr_##name __used \ - = { .max = ARRAY_SIZE(array), .num = nump, \ - .ops = ¶m_ops_##type, \ - .elemsize = sizeof(array[0]), .elem = array }; \ -diff --git a/include/linux/namei.h b/include/linux/namei.h -index ffc0213..2c1f2cb 100644 ---- a/include/linux/namei.h -+++ b/include/linux/namei.h -@@ -24,7 +24,7 @@ struct nameidata { - unsigned seq; - int last_type; - unsigned depth; -- char *saved_names[MAX_NESTED_LINKS + 1]; -+ const char *saved_names[MAX_NESTED_LINKS + 1]; - - /* Intent data */ - union { -@@ -94,12 +94,12 @@ extern int follow_up(struct path *); - extern struct dentry *lock_rename(struct dentry *, struct dentry *); - extern void unlock_rename(struct dentry *, struct dentry *); - --static inline void nd_set_link(struct nameidata *nd, char *path) -+static inline void nd_set_link(struct nameidata *nd, const char *path) - { - nd->saved_names[nd->depth] = path; - } - --static inline char *nd_get_link(struct nameidata *nd) -+static inline const char *nd_get_link(const struct nameidata *nd) - { - return nd->saved_names[nd->depth]; - } -diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h -index ddee79b..67af106 100644 ---- a/include/linux/netdevice.h -+++ b/include/linux/netdevice.h -@@ -944,6 +944,7 @@ struct net_device_ops { - int (*ndo_set_features)(struct net_device *dev, - u32 features); - }; -+typedef struct net_device_ops __no_const net_device_ops_no_const; - - /* - * The DEVICE structure. -diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h -new file mode 100644 -index 0000000..33f4af8 ---- /dev/null -+++ b/include/linux/netfilter/xt_gradm.h -@@ -0,0 +1,9 @@ -+#ifndef _LINUX_NETFILTER_XT_GRADM_H -+#define _LINUX_NETFILTER_XT_GRADM_H 1 -+ -+struct xt_gradm_mtinfo { -+ __u16 flags; -+ __u16 invflags; -+}; -+ -+#endif -diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h -index c65a18a..0c05f3a 100644 ---- a/include/linux/of_pdt.h -+++ b/include/linux/of_pdt.h -@@ -32,7 +32,7 @@ struct of_pdt_ops { - - /* return 0 on success; fill in 'len' with number of bytes in path */ - int (*pkg2path)(phandle node, char *buf, const int buflen, int *len); --}; -+} __no_const; - - extern void *prom_early_alloc(unsigned long size); - -diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h -index 49c8727..34d2ae1 100644 ---- a/include/linux/oprofile.h -+++ b/include/linux/oprofile.h -@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root, - int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root, - char const * name, ulong * val); - --/** Create a file for read-only access to an atomic_t. */ -+/** Create a file for read-only access to an atomic_unchecked_t. */ - int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root, -- char const * name, atomic_t * val); -+ char const * name, atomic_unchecked_t * val); - - /** create a directory */ - struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root, -diff --git a/include/linux/padata.h b/include/linux/padata.h -index 4633b2f..988bc08 100644 ---- a/include/linux/padata.h -+++ b/include/linux/padata.h -@@ -129,7 +129,7 @@ struct parallel_data { - struct padata_instance *pinst; - struct padata_parallel_queue __percpu *pqueue; - struct padata_serial_queue __percpu *squeue; -- atomic_t seq_nr; -+ atomic_unchecked_t seq_nr; - atomic_t reorder_objects; - atomic_t refcnt; - unsigned int max_seq_nr; -diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h -index c816075..cd28c4d 100644 ---- a/include/linux/perf_event.h -+++ b/include/linux/perf_event.h -@@ -745,8 +745,8 @@ struct perf_event { - - enum perf_event_active_state state; - unsigned int attach_state; -- local64_t count; -- atomic64_t child_count; -+ local64_t count; /* PaX: fix it one day */ -+ atomic64_unchecked_t child_count; - - /* - * These are the total time in nanoseconds that the event -@@ -797,8 +797,8 @@ struct perf_event { - * These accumulate total time (in nanoseconds) that children - * events have been enabled and running, respectively. - */ -- atomic64_t child_total_time_enabled; -- atomic64_t child_total_time_running; -+ atomic64_unchecked_t child_total_time_enabled; -+ atomic64_unchecked_t child_total_time_running; - - /* - * Protect attach/detach and child_list: -diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h -index 77257c9..51d473a 100644 ---- a/include/linux/pipe_fs_i.h -+++ b/include/linux/pipe_fs_i.h -@@ -46,9 +46,9 @@ struct pipe_buffer { - struct pipe_inode_info { - wait_queue_head_t wait; - unsigned int nrbufs, curbuf, buffers; -- unsigned int readers; -- unsigned int writers; -- unsigned int waiting_writers; -+ atomic_t readers; -+ atomic_t writers; -+ atomic_t waiting_writers; - unsigned int r_counter; - unsigned int w_counter; - struct page *tmp_page; -diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h -index daac05d..c6802ce 100644 ---- a/include/linux/pm_runtime.h -+++ b/include/linux/pm_runtime.h -@@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) - - static inline void pm_runtime_mark_last_busy(struct device *dev) - { -- ACCESS_ONCE(dev->power.last_busy) = jiffies; -+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies; - } - - #else /* !CONFIG_PM_RUNTIME */ -diff --git a/include/linux/poison.h b/include/linux/poison.h -index 79159de..f1233a9 100644 ---- a/include/linux/poison.h -+++ b/include/linux/poison.h -@@ -19,8 +19,8 @@ - * under normal circumstances, used to verify that nobody uses - * non-initialized list entries. - */ --#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) --#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) -+#define LIST_POISON1 ((void *) (long)0xFFFFFF01) -+#define LIST_POISON2 ((void *) (long)0xFFFFFF02) - - /********** include/linux/timer.h **********/ - /* -diff --git a/include/linux/preempt.h b/include/linux/preempt.h -index 58969b2..ead129b 100644 ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -123,7 +123,7 @@ struct preempt_ops { - void (*sched_in)(struct preempt_notifier *notifier, int cpu); - void (*sched_out)(struct preempt_notifier *notifier, - struct task_struct *next); --}; -+} __no_const; - - /** - * preempt_notifier - key for installing preemption notifiers -diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h -index 643b96c..ef55a9c 100644 ---- a/include/linux/proc_fs.h -+++ b/include/linux/proc_fs.h -@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode, - return proc_create_data(name, mode, parent, proc_fops, NULL); - } - -+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode, -+ struct proc_dir_entry *parent, const struct file_operations *proc_fops) -+{ -+#ifdef CONFIG_GRKERNSEC_PROC_USER -+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL); -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL); -+#else -+ return proc_create_data(name, mode, parent, proc_fops, NULL); -+#endif -+} -+ -+ - static inline struct proc_dir_entry *create_proc_read_entry(const char *name, - mode_t mode, struct proc_dir_entry *base, - read_proc_t *read_proc, void * data) -@@ -258,7 +271,7 @@ union proc_op { - int (*proc_show)(struct seq_file *m, - struct pid_namespace *ns, struct pid *pid, - struct task_struct *task); --}; -+} __no_const; - - struct ctl_table_header; - struct ctl_table; -diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h -index 800f113..af90cc8 100644 ---- a/include/linux/ptrace.h -+++ b/include/linux/ptrace.h -@@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_struct *child); - extern void exit_ptrace(struct task_struct *tracer); - #define PTRACE_MODE_READ 1 - #define PTRACE_MODE_ATTACH 2 --/* Returns 0 on success, -errno on denial. */ --extern int __ptrace_may_access(struct task_struct *task, unsigned int mode); - /* Returns true on success, false on denial. */ - extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); -+/* Returns true on success, false on denial. */ -+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode); - - static inline int ptrace_reparented(struct task_struct *child) - { -diff --git a/include/linux/random.h b/include/linux/random.h -index d13059f..2eaafaa 100644 ---- a/include/linux/random.h -+++ b/include/linux/random.h -@@ -69,12 +69,17 @@ void srandom32(u32 seed); - - u32 prandom32(struct rnd_state *); - -+static inline unsigned long pax_get_random_long(void) -+{ -+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0); -+} -+ - /* - * Handle minimum values for seeds - */ - static inline u32 __seed(u32 x, u32 m) - { -- return (x < m) ? x + m : x; -+ return (x <= m) ? x + m + 1 : x; - } - - /** -diff --git a/include/linux/reboot.h b/include/linux/reboot.h -index e0879a7..a12f962 100644 ---- a/include/linux/reboot.h -+++ b/include/linux/reboot.h -@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *); - * Architecture-specific implementations of sys_reboot commands. - */ - --extern void machine_restart(char *cmd); --extern void machine_halt(void); --extern void machine_power_off(void); -+extern void machine_restart(char *cmd) __noreturn; -+extern void machine_halt(void) __noreturn; -+extern void machine_power_off(void) __noreturn; - - extern void machine_shutdown(void); - struct pt_regs; -@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *); - */ - - extern void kernel_restart_prepare(char *cmd); --extern void kernel_restart(char *cmd); --extern void kernel_halt(void); --extern void kernel_power_off(void); -+extern void kernel_restart(char *cmd) __noreturn; -+extern void kernel_halt(void) __noreturn; -+extern void kernel_power_off(void) __noreturn; - - extern int C_A_D; /* for sysctl */ - void ctrl_alt_del(void); -@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force); - * Emergency restart, callable from an interrupt handler. - */ - --extern void emergency_restart(void); -+extern void emergency_restart(void) __noreturn; - #include <asm/emergency-restart.h> - - #endif -diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h -index 96d465f..b084e05 100644 ---- a/include/linux/reiserfs_fs.h -+++ b/include/linux/reiserfs_fs.h -@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode) - #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */ - - #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter) --#define get_generation(s) atomic_read (&fs_generation(s)) -+#define get_generation(s) atomic_read_unchecked (&fs_generation(s)) - #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen) - #define __fs_changed(gen,s) (gen != get_generation (s)) - #define fs_changed(gen,s) \ -diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h -index 52c83b6..18ed7eb 100644 ---- a/include/linux/reiserfs_fs_sb.h -+++ b/include/linux/reiserfs_fs_sb.h -@@ -386,7 +386,7 @@ struct reiserfs_sb_info { - /* Comment? -Hans */ - wait_queue_head_t s_wait; - /* To be obsoleted soon by per buffer seals.. -Hans */ -- atomic_t s_generation_counter; // increased by one every time the -+ atomic_unchecked_t s_generation_counter; // increased by one every time the - // tree gets re-balanced - unsigned long s_properties; /* File system properties. Currently holds - on-disk FS format */ -diff --git a/include/linux/relay.h b/include/linux/relay.h -index 14a86bc..17d0700 100644 ---- a/include/linux/relay.h -+++ b/include/linux/relay.h -@@ -159,7 +159,7 @@ struct rchan_callbacks - * The callback should return 0 if successful, negative if not. - */ - int (*remove_buf_file)(struct dentry *dentry); --}; -+} __no_const; - - /* - * CONFIG_RELAY kernel API, kernel/relay.c -diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h -index c6c6084..5bf1212 100644 ---- a/include/linux/rfkill.h -+++ b/include/linux/rfkill.h -@@ -147,6 +147,7 @@ struct rfkill_ops { - void (*query)(struct rfkill *rfkill, void *data); - int (*set_block)(void *data, bool blocked); - }; -+typedef struct rfkill_ops __no_const rfkill_ops_no_const; - - #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) - /** -diff --git a/include/linux/rmap.h b/include/linux/rmap.h -index 2148b12..519b820 100644 ---- a/include/linux/rmap.h -+++ b/include/linux/rmap.h -@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma) - void anon_vma_init(void); /* create anon_vma_cachep */ - int anon_vma_prepare(struct vm_area_struct *); - void unlink_anon_vmas(struct vm_area_struct *); --int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); --int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); -+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *); -+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *); - void __anon_vma_link(struct vm_area_struct *); - - static inline void anon_vma_merge(struct vm_area_struct *vma, -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 41d0237..5a64056 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -100,6 +100,7 @@ struct bio_list; - struct fs_struct; - struct perf_event_context; - struct blk_plug; -+struct linux_binprm; - - /* - * List of flags we want to share for kernel threads, -@@ -380,10 +381,13 @@ struct user_namespace; - #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) - - extern int sysctl_max_map_count; -+extern unsigned long sysctl_heap_stack_gap; - - #include <linux/aio.h> - - #ifdef CONFIG_MMU -+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len); -+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len); - extern void arch_pick_mmap_layout(struct mm_struct *mm); - extern unsigned long - arch_get_unmapped_area(struct file *, unsigned long, unsigned long, -@@ -629,6 +633,17 @@ struct signal_struct { - #ifdef CONFIG_TASKSTATS - struct taskstats *stats; - #endif -+ -+#ifdef CONFIG_GRKERNSEC -+ u32 curr_ip; -+ u32 saved_ip; -+ u32 gr_saddr; -+ u32 gr_daddr; -+ u16 gr_sport; -+ u16 gr_dport; -+ u8 used_accept:1; -+#endif -+ - #ifdef CONFIG_AUDIT - unsigned audit_tty; - struct tty_audit_buf *tty_audit_buf; -@@ -710,6 +725,11 @@ struct user_struct { - struct key *session_keyring; /* UID's default session keyring */ - #endif - -+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) -+ unsigned int banned; -+ unsigned long ban_expires; -+#endif -+ - /* Hash table maintenance information */ - struct hlist_node uidhash_node; - uid_t uid; -@@ -1340,8 +1360,8 @@ struct task_struct { - struct list_head thread_group; - - struct completion *vfork_done; /* for vfork() */ -- int __user *set_child_tid; /* CLONE_CHILD_SETTID */ -- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ -+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */ -+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ - - cputime_t utime, stime, utimescaled, stimescaled; - cputime_t gtime; -@@ -1357,13 +1377,6 @@ struct task_struct { - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; - --/* process credentials */ -- const struct cred __rcu *real_cred; /* objective and real subjective task -- * credentials (COW) */ -- const struct cred __rcu *cred; /* effective (overridable) subjective task -- * credentials (COW) */ -- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ -- - char comm[TASK_COMM_LEN]; /* executable name excluding path - - access with [gs]et_task_comm (which lock - it with task_lock()) -@@ -1380,8 +1393,16 @@ struct task_struct { - #endif - /* CPU-specific state of this task */ - struct thread_struct thread; -+/* thread_info moved to task_struct */ -+#ifdef CONFIG_X86 -+ struct thread_info tinfo; -+#endif - /* filesystem information */ - struct fs_struct *fs; -+ -+ const struct cred __rcu *cred; /* effective (overridable) subjective task -+ * credentials (COW) */ -+ - /* open file information */ - struct files_struct *files; - /* namespaces */ -@@ -1428,6 +1449,11 @@ struct task_struct { - struct rt_mutex_waiter *pi_blocked_on; - #endif - -+/* process credentials */ -+ const struct cred __rcu *real_cred; /* objective and real subjective task -+ * credentials (COW) */ -+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ -+ - #ifdef CONFIG_DEBUG_MUTEXES - /* mutex deadlock detection */ - struct mutex_waiter *blocked_on; -@@ -1537,6 +1563,21 @@ struct task_struct { - unsigned long default_timer_slack_ns; - - struct list_head *scm_work_list; -+ -+#ifdef CONFIG_GRKERNSEC -+ /* grsecurity */ -+ struct dentry *gr_chroot_dentry; -+ struct acl_subject_label *acl; -+ struct acl_role_label *role; -+ struct file *exec_file; -+ u16 acl_role_id; -+ /* is this the task that authenticated to the special role */ -+ u8 acl_sp_role; -+ u8 is_writable; -+ u8 brute; -+ u8 gr_is_chrooted; -+#endif -+ - #ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* Index of current stored address in ret_stack */ - int curr_ret_stack; -@@ -1571,6 +1612,57 @@ struct task_struct { - #endif - }; - -+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */ -+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */ -+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */ -+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */ -+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */ -+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */ -+ -+#ifdef CONFIG_PAX_SOFTMODE -+extern int pax_softmode; -+#endif -+ -+extern int pax_check_flags(unsigned long *); -+ -+/* if tsk != current then task_lock must be held on it */ -+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) -+static inline unsigned long pax_get_flags(struct task_struct *tsk) -+{ -+ if (likely(tsk->mm)) -+ return tsk->mm->pax_flags; -+ else -+ return 0UL; -+} -+ -+/* if tsk != current then task_lock must be held on it */ -+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags) -+{ -+ if (likely(tsk->mm)) { -+ tsk->mm->pax_flags = flags; -+ return 0; -+ } -+ return -EINVAL; -+} -+#endif -+ -+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS -+extern void pax_set_initial_flags(struct linux_binprm *bprm); -+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) -+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); -+#endif -+ -+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp); -+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp); -+extern void pax_report_refcount_overflow(struct pt_regs *regs); -+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET; -+ -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK -+extern void pax_track_stack(void); -+#else -+static inline void pax_track_stack(void) {} -+#endif -+ - /* Future-safe accessor for struct task_struct's cpus_allowed. */ - #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) - -@@ -2074,7 +2166,9 @@ void yield(void); - extern struct exec_domain default_exec_domain; - - union thread_union { -+#ifndef CONFIG_X86 - struct thread_info thread_info; -+#endif - unsigned long stack[THREAD_SIZE/sizeof(long)]; - }; - -@@ -2107,6 +2201,7 @@ extern struct pid_namespace init_pid_ns; - */ - - extern struct task_struct *find_task_by_vpid(pid_t nr); -+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr); - extern struct task_struct *find_task_by_pid_ns(pid_t nr, - struct pid_namespace *ns); - -@@ -2243,7 +2338,7 @@ extern void __cleanup_sighand(struct sighand_struct *); - extern void exit_itimers(struct signal_struct *); - extern void flush_itimer_signals(void); - --extern NORET_TYPE void do_group_exit(int); -+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET; - - extern void daemonize(const char *, ...); - extern int allow_signal(int); -@@ -2408,13 +2503,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p) - - #endif - --static inline int object_is_on_stack(void *obj) -+static inline int object_starts_on_stack(void *obj) - { -- void *stack = task_stack_page(current); -+ const void *stack = task_stack_page(current); - - return (obj >= stack) && (obj < (stack + THREAD_SIZE)); - } - -+#ifdef CONFIG_PAX_USERCOPY -+extern int object_is_on_stack(const void *obj, unsigned long len); -+#endif -+ - extern void thread_info_cache_init(void); - - #ifdef CONFIG_DEBUG_STACK_USAGE -diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h -index 899fbb4..1cb4138 100644 ---- a/include/linux/screen_info.h -+++ b/include/linux/screen_info.h -@@ -43,7 +43,8 @@ struct screen_info { - __u16 pages; /* 0x32 */ - __u16 vesa_attributes; /* 0x34 */ - __u32 capabilities; /* 0x36 */ -- __u8 _reserved[6]; /* 0x3a */ -+ __u16 vesapm_size; /* 0x3a */ -+ __u8 _reserved[4]; /* 0x3c */ - } __attribute__((packed)); - - #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */ -diff --git a/include/linux/security.h b/include/linux/security.h -index ebd2a53..2d949ae 100644 ---- a/include/linux/security.h -+++ b/include/linux/security.h -@@ -36,6 +36,7 @@ - #include <linux/key.h> - #include <linux/xfrm.h> - #include <linux/slab.h> -+#include <linux/grsecurity.h> - #include <net/flow.h> - - /* Maximum number of letters for an LSM name string */ -diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h -index be720cd..a0e1b94 100644 ---- a/include/linux/seq_file.h -+++ b/include/linux/seq_file.h -@@ -33,6 +33,7 @@ struct seq_operations { - void * (*next) (struct seq_file *m, void *v, loff_t *pos); - int (*show) (struct seq_file *m, void *v); - }; -+typedef struct seq_operations __no_const seq_operations_no_const; - - #define SEQ_SKIP 1 - -diff --git a/include/linux/shm.h b/include/linux/shm.h -index 92808b8..c28cac4 100644 ---- a/include/linux/shm.h -+++ b/include/linux/shm.h -@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */ - - /* The task created the shm object. NULL if the task is dead. */ - struct task_struct *shm_creator; -+#ifdef CONFIG_GRKERNSEC -+ time_t shm_createtime; -+ pid_t shm_lapid; -+#endif - }; - - /* shm_mode upper byte flags */ -diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h -index 0f96646..cfb757a 100644 ---- a/include/linux/skbuff.h -+++ b/include/linux/skbuff.h -@@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) - */ - static inline int skb_queue_empty(const struct sk_buff_head *list) - { -- return list->next == (struct sk_buff *)list; -+ return list->next == (const struct sk_buff *)list; - } - - /** -@@ -623,7 +623,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) - static inline bool skb_queue_is_last(const struct sk_buff_head *list, - const struct sk_buff *skb) - { -- return skb->next == (struct sk_buff *)list; -+ return skb->next == (const struct sk_buff *)list; - } - - /** -@@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list, - static inline bool skb_queue_is_first(const struct sk_buff_head *list, - const struct sk_buff *skb) - { -- return skb->prev == (struct sk_buff *)list; -+ return skb->prev == (const struct sk_buff *)list; - } - - /** -@@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) - * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) - */ - #ifndef NET_SKB_PAD --#define NET_SKB_PAD max(32, L1_CACHE_BYTES) -+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES) - #endif - - extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); -diff --git a/include/linux/slab.h b/include/linux/slab.h -index 573c809..e84c132 100644 ---- a/include/linux/slab.h -+++ b/include/linux/slab.h -@@ -11,12 +11,20 @@ - - #include <linux/gfp.h> - #include <linux/types.h> -+#include <linux/err.h> - - /* - * Flags to pass to kmem_cache_create(). - * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. - */ - #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ -+ -+#ifdef CONFIG_PAX_USERCOPY -+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */ -+#else -+#define SLAB_USERCOPY 0x00000000UL -+#endif -+ - #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ - #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ - #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ -@@ -87,10 +95,13 @@ - * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. - * Both make kfree a no-op. - */ --#define ZERO_SIZE_PTR ((void *)16) -+#define ZERO_SIZE_PTR \ -+({ \ -+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\ -+ (void *)(-MAX_ERRNO-1L); \ -+}) - --#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ -- (unsigned long)ZERO_SIZE_PTR) -+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1) - - /* - * struct kmem_cache related prototypes -@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t); - void kfree(const void *); - void kzfree(const void *); - size_t ksize(const void *); -+void check_object_size(const void *ptr, unsigned long n, bool to); - - /* - * Allocator specific definitions. These are mainly used to establish optimized -@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) - - void __init kmem_cache_init_late(void); - -+#define kmalloc(x, y) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \ -+ ___retval = NULL; \ -+ else \ -+ ___retval = kmalloc((size_t)___x, (y)); \ -+ ___retval; \ -+}) -+ -+#define kmalloc_node(x, y, z) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\ -+ ___retval = NULL; \ -+ else \ -+ ___retval = kmalloc_node((size_t)___x, (y), (z));\ -+ ___retval; \ -+}) -+ -+#define kzalloc(x, y) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \ -+ ___retval = NULL; \ -+ else \ -+ ___retval = kzalloc((size_t)___x, (y)); \ -+ ___retval; \ -+}) -+ -+#define __krealloc(x, y, z) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___y = (intoverflow_t)y; \ -+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\ -+ ___retval = NULL; \ -+ else \ -+ ___retval = __krealloc((x), (size_t)___y, (z)); \ -+ ___retval; \ -+}) -+ -+#define krealloc(x, y, z) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___y = (intoverflow_t)y; \ -+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \ -+ ___retval = NULL; \ -+ else \ -+ ___retval = krealloc((x), (size_t)___y, (z)); \ -+ ___retval; \ -+}) -+ - #endif /* _LINUX_SLAB_H */ -diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h -index d00e0ba..1b3bf7b 100644 ---- a/include/linux/slab_def.h -+++ b/include/linux/slab_def.h -@@ -68,10 +68,10 @@ struct kmem_cache { - unsigned long node_allocs; - unsigned long node_frees; - unsigned long node_overflow; -- atomic_t allochit; -- atomic_t allocmiss; -- atomic_t freehit; -- atomic_t freemiss; -+ atomic_unchecked_t allochit; -+ atomic_unchecked_t allocmiss; -+ atomic_unchecked_t freehit; -+ atomic_unchecked_t freemiss; - - /* - * If debugging is enabled, then the allocator can add additional -diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h -index f58d641..c56bf9c 100644 ---- a/include/linux/slub_def.h -+++ b/include/linux/slub_def.h -@@ -85,7 +85,7 @@ struct kmem_cache { - struct kmem_cache_order_objects max; - struct kmem_cache_order_objects min; - gfp_t allocflags; /* gfp flags to use on each alloc */ -- int refcount; /* Refcount for slab cache destroy */ -+ atomic_t refcount; /* Refcount for slab cache destroy */ - void (*ctor)(void *); - int inuse; /* Offset to metadata */ - int align; /* Alignment */ -@@ -211,7 +211,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) - } - - void *kmem_cache_alloc(struct kmem_cache *, gfp_t); --void *__kmalloc(size_t size, gfp_t flags); -+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1); - - static __always_inline void * - kmalloc_order(size_t size, gfp_t flags, unsigned int order) -diff --git a/include/linux/sonet.h b/include/linux/sonet.h -index de8832d..0147b46 100644 ---- a/include/linux/sonet.h -+++ b/include/linux/sonet.h -@@ -61,7 +61,7 @@ struct sonet_stats { - #include <linux/atomic.h> - - struct k_sonet_stats { --#define __HANDLE_ITEM(i) atomic_t i -+#define __HANDLE_ITEM(i) atomic_unchecked_t i - __SONET_ITEMS - #undef __HANDLE_ITEM - }; -diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h -index db7bcaf..1aca77e 100644 ---- a/include/linux/sunrpc/clnt.h -+++ b/include/linux/sunrpc/clnt.h -@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap) - { - switch (sap->sa_family) { - case AF_INET: -- return ntohs(((struct sockaddr_in *)sap)->sin_port); -+ return ntohs(((const struct sockaddr_in *)sap)->sin_port); - case AF_INET6: -- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); -+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port); - } - return 0; - } -@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, - static inline bool __rpc_copy_addr4(struct sockaddr *dst, - const struct sockaddr *src) - { -- const struct sockaddr_in *ssin = (struct sockaddr_in *) src; -+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src; - struct sockaddr_in *dsin = (struct sockaddr_in *) dst; - - dsin->sin_family = ssin->sin_family; -@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa) - if (sa->sa_family != AF_INET6) - return 0; - -- return ((struct sockaddr_in6 *) sa)->sin6_scope_id; -+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id; - } - - #endif /* __KERNEL__ */ -diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h -index e775689..9e206d9 100644 ---- a/include/linux/sunrpc/sched.h -+++ b/include/linux/sunrpc/sched.h -@@ -105,6 +105,7 @@ struct rpc_call_ops { - void (*rpc_call_done)(struct rpc_task *, void *); - void (*rpc_release)(void *); - }; -+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const; - - struct rpc_task_setup { - struct rpc_task *task; -diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h -index c14fe86..393245e 100644 ---- a/include/linux/sunrpc/svc_rdma.h -+++ b/include/linux/sunrpc/svc_rdma.h -@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord; - extern unsigned int svcrdma_max_requests; - extern unsigned int svcrdma_max_req_size; - --extern atomic_t rdma_stat_recv; --extern atomic_t rdma_stat_read; --extern atomic_t rdma_stat_write; --extern atomic_t rdma_stat_sq_starve; --extern atomic_t rdma_stat_rq_starve; --extern atomic_t rdma_stat_rq_poll; --extern atomic_t rdma_stat_rq_prod; --extern atomic_t rdma_stat_sq_poll; --extern atomic_t rdma_stat_sq_prod; -+extern atomic_unchecked_t rdma_stat_recv; -+extern atomic_unchecked_t rdma_stat_read; -+extern atomic_unchecked_t rdma_stat_write; -+extern atomic_unchecked_t rdma_stat_sq_starve; -+extern atomic_unchecked_t rdma_stat_rq_starve; -+extern atomic_unchecked_t rdma_stat_rq_poll; -+extern atomic_unchecked_t rdma_stat_rq_prod; -+extern atomic_unchecked_t rdma_stat_sq_poll; -+extern atomic_unchecked_t rdma_stat_sq_prod; - - #define RPCRDMA_VERSION 1 - -diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h -index 11684d9..0d245eb 100644 ---- a/include/linux/sysctl.h -+++ b/include/linux/sysctl.h -@@ -155,7 +155,11 @@ enum - KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ - }; - -- -+#ifdef CONFIG_PAX_SOFTMODE -+enum { -+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */ -+}; -+#endif - - /* CTL_VM names: */ - enum -@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write, - - extern int proc_dostring(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -+extern int proc_dostring_modpriv(struct ctl_table *, int, -+ void __user *, size_t *, loff_t *); - extern int proc_dointvec(struct ctl_table *, int, - void __user *, size_t *, loff_t *); - extern int proc_dointvec_minmax(struct ctl_table *, int, -diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h -index ff7dc08..893e1bd 100644 ---- a/include/linux/tty_ldisc.h -+++ b/include/linux/tty_ldisc.h -@@ -148,7 +148,7 @@ struct tty_ldisc_ops { - - struct module *owner; - -- int refcount; -+ atomic_t refcount; - }; - - struct tty_ldisc { -diff --git a/include/linux/types.h b/include/linux/types.h -index 176da8c..e45e473 100644 ---- a/include/linux/types.h -+++ b/include/linux/types.h -@@ -213,10 +213,26 @@ typedef struct { - int counter; - } atomic_t; - -+#ifdef CONFIG_PAX_REFCOUNT -+typedef struct { -+ int counter; -+} atomic_unchecked_t; -+#else -+typedef atomic_t atomic_unchecked_t; -+#endif -+ - #ifdef CONFIG_64BIT - typedef struct { - long counter; - } atomic64_t; -+ -+#ifdef CONFIG_PAX_REFCOUNT -+typedef struct { -+ long counter; -+} atomic64_unchecked_t; -+#else -+typedef atomic64_t atomic64_unchecked_t; -+#endif - #endif - - struct list_head { -diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h -index 5ca0951..ab496a5 100644 ---- a/include/linux/uaccess.h -+++ b/include/linux/uaccess.h -@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to, - long ret; \ - mm_segment_t old_fs = get_fs(); \ - \ -- set_fs(KERNEL_DS); \ - pagefault_disable(); \ -- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ -- pagefault_enable(); \ -+ set_fs(KERNEL_DS); \ -+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \ - set_fs(old_fs); \ -+ pagefault_enable(); \ - ret; \ - }) - -diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h -index 99c1b4d..bb94261 100644 ---- a/include/linux/unaligned/access_ok.h -+++ b/include/linux/unaligned/access_ok.h -@@ -6,32 +6,32 @@ - - static inline u16 get_unaligned_le16(const void *p) - { -- return le16_to_cpup((__le16 *)p); -+ return le16_to_cpup((const __le16 *)p); - } - - static inline u32 get_unaligned_le32(const void *p) - { -- return le32_to_cpup((__le32 *)p); -+ return le32_to_cpup((const __le32 *)p); - } - - static inline u64 get_unaligned_le64(const void *p) - { -- return le64_to_cpup((__le64 *)p); -+ return le64_to_cpup((const __le64 *)p); - } - - static inline u16 get_unaligned_be16(const void *p) - { -- return be16_to_cpup((__be16 *)p); -+ return be16_to_cpup((const __be16 *)p); - } - - static inline u32 get_unaligned_be32(const void *p) - { -- return be32_to_cpup((__be32 *)p); -+ return be32_to_cpup((const __be32 *)p); - } - - static inline u64 get_unaligned_be64(const void *p) - { -- return be64_to_cpup((__be64 *)p); -+ return be64_to_cpup((const __be64 *)p); - } - - static inline void put_unaligned_le16(u16 val, void *p) -diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h -index cf97b5b..40ebc87 100644 ---- a/include/linux/vermagic.h -+++ b/include/linux/vermagic.h -@@ -26,9 +26,35 @@ - #define MODULE_ARCH_VERMAGIC "" - #endif - -+#ifdef CONFIG_PAX_REFCOUNT -+#define MODULE_PAX_REFCOUNT "REFCOUNT " -+#else -+#define MODULE_PAX_REFCOUNT "" -+#endif -+ -+#ifdef CONSTIFY_PLUGIN -+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN " -+#else -+#define MODULE_CONSTIFY_PLUGIN "" -+#endif -+ -+#ifdef STACKLEAK_PLUGIN -+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN " -+#else -+#define MODULE_STACKLEAK_PLUGIN "" -+#endif -+ -+#ifdef CONFIG_GRKERNSEC -+#define MODULE_GRSEC "GRSEC " -+#else -+#define MODULE_GRSEC "" -+#endif -+ - #define VERMAGIC_STRING \ - UTS_RELEASE " " \ - MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ - MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ -- MODULE_ARCH_VERMAGIC -+ MODULE_ARCH_VERMAGIC \ -+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \ -+ MODULE_GRSEC - -diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h -index 687fb11..b342358 100644 ---- a/include/linux/vmalloc.h -+++ b/include/linux/vmalloc.h -@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ - #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ - #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ - #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */ -+ -+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) -+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */ -+#endif -+ - /* bits [20..32] reserved for arch specific ioremap internals */ - - /* -@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) - # endif - #endif - -+#define vmalloc(x) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \ -+ ___retval = NULL; \ -+ else \ -+ ___retval = vmalloc((unsigned long)___x); \ -+ ___retval; \ -+}) -+ -+#define vzalloc(x) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \ -+ ___retval = NULL; \ -+ else \ -+ ___retval = vzalloc((unsigned long)___x); \ -+ ___retval; \ -+}) -+ -+#define __vmalloc(x, y, z) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\ -+ ___retval = NULL; \ -+ else \ -+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\ -+ ___retval; \ -+}) -+ -+#define vmalloc_user(x) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\ -+ ___retval = NULL; \ -+ else \ -+ ___retval = vmalloc_user((unsigned long)___x); \ -+ ___retval; \ -+}) -+ -+#define vmalloc_exec(x) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\ -+ ___retval = NULL; \ -+ else \ -+ ___retval = vmalloc_exec((unsigned long)___x); \ -+ ___retval; \ -+}) -+ -+#define vmalloc_node(x, y) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\ -+ ___retval = NULL; \ -+ else \ -+ ___retval = vmalloc_node((unsigned long)___x, (y));\ -+ ___retval; \ -+}) -+ -+#define vzalloc_node(x, y) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\ -+ ___retval = NULL; \ -+ else \ -+ ___retval = vzalloc_node((unsigned long)___x, (y));\ -+ ___retval; \ -+}) -+ -+#define vmalloc_32(x) \ -+({ \ -+ void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\ -+ ___retval = NULL; \ -+ else \ -+ ___retval = vmalloc_32((unsigned long)___x); \ -+ ___retval; \ -+}) -+ -+#define vmalloc_32_user(x) \ -+({ \ -+void *___retval; \ -+ intoverflow_t ___x = (intoverflow_t)x; \ -+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\ -+ ___retval = NULL; \ -+ else \ -+ ___retval = vmalloc_32_user((unsigned long)___x);\ -+ ___retval; \ -+}) -+ - #endif /* _LINUX_VMALLOC_H */ -diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h -index 65efb92..137adbb 100644 ---- a/include/linux/vmstat.h -+++ b/include/linux/vmstat.h -@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu) - /* - * Zone based page accounting with per cpu differentials. - */ --extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; -+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; - - static inline void zone_page_state_add(long x, struct zone *zone, - enum zone_stat_item item) - { -- atomic_long_add(x, &zone->vm_stat[item]); -- atomic_long_add(x, &vm_stat[item]); -+ atomic_long_add_unchecked(x, &zone->vm_stat[item]); -+ atomic_long_add_unchecked(x, &vm_stat[item]); - } - - static inline unsigned long global_page_state(enum zone_stat_item item) - { -- long x = atomic_long_read(&vm_stat[item]); -+ long x = atomic_long_read_unchecked(&vm_stat[item]); - #ifdef CONFIG_SMP - if (x < 0) - x = 0; -@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item) - static inline unsigned long zone_page_state(struct zone *zone, - enum zone_stat_item item) - { -- long x = atomic_long_read(&zone->vm_stat[item]); -+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]); - #ifdef CONFIG_SMP - if (x < 0) - x = 0; -@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone, - static inline unsigned long zone_page_state_snapshot(struct zone *zone, - enum zone_stat_item item) - { -- long x = atomic_long_read(&zone->vm_stat[item]); -+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]); - - #ifdef CONFIG_SMP - int cpu; -@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone, - - static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) - { -- atomic_long_inc(&zone->vm_stat[item]); -- atomic_long_inc(&vm_stat[item]); -+ atomic_long_inc_unchecked(&zone->vm_stat[item]); -+ atomic_long_inc_unchecked(&vm_stat[item]); - } - - static inline void __inc_zone_page_state(struct page *page, -@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page, - - static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) - { -- atomic_long_dec(&zone->vm_stat[item]); -- atomic_long_dec(&vm_stat[item]); -+ atomic_long_dec_unchecked(&zone->vm_stat[item]); -+ atomic_long_dec_unchecked(&vm_stat[item]); - } - - static inline void __dec_zone_page_state(struct page *page, -diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h -index 4aeff96..b378cdc 100644 ---- a/include/media/saa7146_vv.h -+++ b/include/media/saa7146_vv.h -@@ -163,7 +163,7 @@ struct saa7146_ext_vv - int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *); - - /* the extension can override this */ -- struct v4l2_ioctl_ops ops; -+ v4l2_ioctl_ops_no_const ops; - /* pointer to the saa7146 core ops */ - const struct v4l2_ioctl_ops *core_ops; - -diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h -index c7c40f1..4f01585 100644 ---- a/include/media/v4l2-dev.h -+++ b/include/media/v4l2-dev.h -@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local); - - - struct v4l2_file_operations { -- struct module *owner; -+ struct module * const owner; - ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); - ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); - unsigned int (*poll) (struct file *, struct poll_table_struct *); -@@ -68,6 +68,7 @@ struct v4l2_file_operations { - int (*open) (struct file *); - int (*release) (struct file *); - }; -+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const; - - /* - * Newer version of video_device, handled by videodev2.c -diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h -index dd9f1e7..8c4dd86 100644 ---- a/include/media/v4l2-ioctl.h -+++ b/include/media/v4l2-ioctl.h -@@ -272,7 +272,7 @@ struct v4l2_ioctl_ops { - long (*vidioc_default) (struct file *file, void *fh, - bool valid_prio, int cmd, void *arg); - }; -- -+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const; - - /* v4l debugging and diagnostics */ - -diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h -index c5dedd8..a93b07b 100644 ---- a/include/net/caif/caif_hsi.h -+++ b/include/net/caif/caif_hsi.h -@@ -94,7 +94,7 @@ struct cfhsi_drv { - void (*rx_done_cb) (struct cfhsi_drv *drv); - void (*wake_up_cb) (struct cfhsi_drv *drv); - void (*wake_down_cb) (struct cfhsi_drv *drv); --}; -+} __no_const; - - /* Structure implemented by HSI device. */ - struct cfhsi_dev { -diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h -index 9e5425b..8136ffc 100644 ---- a/include/net/caif/cfctrl.h -+++ b/include/net/caif/cfctrl.h -@@ -52,7 +52,7 @@ struct cfctrl_rsp { - void (*radioset_rsp)(void); - void (*reject_rsp)(struct cflayer *layer, u8 linkid, - struct cflayer *client_layer); --}; -+} __no_const; - - /* Link Setup Parameters for CAIF-Links. */ - struct cfctrl_link_param { -@@ -101,8 +101,8 @@ struct cfctrl_request_info { - struct cfctrl { - struct cfsrvl serv; - struct cfctrl_rsp res; -- atomic_t req_seq_no; -- atomic_t rsp_seq_no; -+ atomic_unchecked_t req_seq_no; -+ atomic_unchecked_t rsp_seq_no; - struct list_head list; - /* Protects from simultaneous access to first_req list */ - spinlock_t info_list_lock; -diff --git a/include/net/flow.h b/include/net/flow.h -index a094477..bc91db1 100644 ---- a/include/net/flow.h -+++ b/include/net/flow.h -@@ -207,6 +207,6 @@ extern struct flow_cache_object *flow_cache_lookup( - u8 dir, flow_resolve_t resolver, void *ctx); - - extern void flow_cache_flush(void); --extern atomic_t flow_cache_genid; -+extern atomic_unchecked_t flow_cache_genid; - - #endif -diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h -index e9ff3fc..9d3e5c7 100644 ---- a/include/net/inetpeer.h -+++ b/include/net/inetpeer.h -@@ -48,8 +48,8 @@ struct inet_peer { - */ - union { - struct { -- atomic_t rid; /* Frag reception counter */ -- atomic_t ip_id_count; /* IP ID for the next packet */ -+ atomic_unchecked_t rid; /* Frag reception counter */ -+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */ - __u32 tcp_ts; - __u32 tcp_ts_stamp; - }; -@@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more) - more++; - inet_peer_refcheck(p); - do { -- old = atomic_read(&p->ip_id_count); -+ old = atomic_read_unchecked(&p->ip_id_count); - new = old + more; - if (!new) - new = 1; -- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old); -+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old); - return new; - } - -diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h -index 10422ef..662570f 100644 ---- a/include/net/ip_fib.h -+++ b/include/net/ip_fib.h -@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); - - #define FIB_RES_SADDR(net, res) \ - ((FIB_RES_NH(res).nh_saddr_genid == \ -- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \ -+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \ - FIB_RES_NH(res).nh_saddr : \ - fib_info_update_nh_saddr((net), &FIB_RES_NH(res))) - #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw) -diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h -index 8fa4430..05dd772 100644 ---- a/include/net/ip_vs.h -+++ b/include/net/ip_vs.h -@@ -509,7 +509,7 @@ struct ip_vs_conn { - struct ip_vs_conn *control; /* Master control connection */ - atomic_t n_control; /* Number of controlled ones */ - struct ip_vs_dest *dest; /* real server */ -- atomic_t in_pkts; /* incoming packet counter */ -+ atomic_unchecked_t in_pkts; /* incoming packet counter */ - - /* packet transmitter for different forwarding methods. If it - mangles the packet, it must return NF_DROP or better NF_STOLEN, -@@ -647,7 +647,7 @@ struct ip_vs_dest { - __be16 port; /* port number of the server */ - union nf_inet_addr addr; /* IP address of the server */ - volatile unsigned flags; /* dest status flags */ -- atomic_t conn_flags; /* flags to copy to conn */ -+ atomic_unchecked_t conn_flags; /* flags to copy to conn */ - atomic_t weight; /* server weight */ - - atomic_t refcnt; /* reference counter */ -diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h -index 69b610a..fe3962c 100644 ---- a/include/net/irda/ircomm_core.h -+++ b/include/net/irda/ircomm_core.h -@@ -51,7 +51,7 @@ typedef struct { - int (*connect_response)(struct ircomm_cb *, struct sk_buff *); - int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *, - struct ircomm_info *); --} call_t; -+} __no_const call_t; - - struct ircomm_cb { - irda_queue_t queue; -diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h -index 59ba38bc..d515662 100644 ---- a/include/net/irda/ircomm_tty.h -+++ b/include/net/irda/ircomm_tty.h -@@ -35,6 +35,7 @@ - #include <linux/termios.h> - #include <linux/timer.h> - #include <linux/tty.h> /* struct tty_struct */ -+#include <asm/local.h> - - #include <net/irda/irias_object.h> - #include <net/irda/ircomm_core.h> -@@ -105,8 +106,8 @@ struct ircomm_tty_cb { - unsigned short close_delay; - unsigned short closing_wait; /* time to wait before closing */ - -- int open_count; -- int blocked_open; /* # of blocked opens */ -+ local_t open_count; -+ local_t blocked_open; /* # of blocked opens */ - - /* Protect concurent access to : - * o self->open_count -diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h -index f82a1e8..82d81e8 100644 ---- a/include/net/iucv/af_iucv.h -+++ b/include/net/iucv/af_iucv.h -@@ -87,7 +87,7 @@ struct iucv_sock { - struct iucv_sock_list { - struct hlist_head head; - rwlock_t lock; -- atomic_t autobind_name; -+ atomic_unchecked_t autobind_name; - }; - - unsigned int iucv_sock_poll(struct file *file, struct socket *sock, -diff --git a/include/net/lapb.h b/include/net/lapb.h -index 96cb5dd..25e8d4f 100644 ---- a/include/net/lapb.h -+++ b/include/net/lapb.h -@@ -95,7 +95,7 @@ struct lapb_cb { - struct sk_buff_head write_queue; - struct sk_buff_head ack_queue; - unsigned char window; -- struct lapb_register_struct callbacks; -+ struct lapb_register_struct *callbacks; - - /* FRMR control information */ - struct lapb_frame frmr_data; -diff --git a/include/net/neighbour.h b/include/net/neighbour.h -index 2720884..3aa5c25 100644 ---- a/include/net/neighbour.h -+++ b/include/net/neighbour.h -@@ -122,7 +122,7 @@ struct neigh_ops { - void (*error_report)(struct neighbour *, struct sk_buff *); - int (*output)(struct neighbour *, struct sk_buff *); - int (*connected_output)(struct neighbour *, struct sk_buff *); --}; -+} __do_const; - - struct pneigh_entry { - struct pneigh_entry *next; -diff --git a/include/net/netlink.h b/include/net/netlink.h -index 98c1854..d4add7b 100644 ---- a/include/net/netlink.h -+++ b/include/net/netlink.h -@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb) - static inline void nlmsg_trim(struct sk_buff *skb, const void *mark) - { - if (mark) -- skb_trim(skb, (unsigned char *) mark - skb->data); -+ skb_trim(skb, (const unsigned char *) mark - skb->data); - } - - /** -diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h -index d786b4f..4c3dd41 100644 ---- a/include/net/netns/ipv4.h -+++ b/include/net/netns/ipv4.h -@@ -56,8 +56,8 @@ struct netns_ipv4 { - - unsigned int sysctl_ping_group_range[2]; - -- atomic_t rt_genid; -- atomic_t dev_addr_genid; -+ atomic_unchecked_t rt_genid; -+ atomic_unchecked_t dev_addr_genid; - - #ifdef CONFIG_IP_MROUTE - #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES -diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h -index 6a72a58..e6a127d 100644 ---- a/include/net/sctp/sctp.h -+++ b/include/net/sctp/sctp.h -@@ -318,9 +318,9 @@ do { \ - - #else /* SCTP_DEBUG */ - --#define SCTP_DEBUG_PRINTK(whatever...) --#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) --#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) -+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0) -+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0) -+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0) - #define SCTP_ENABLE_DEBUG - #define SCTP_DISABLE_DEBUG - #define SCTP_ASSERT(expr, str, func) -diff --git a/include/net/sock.h b/include/net/sock.h -index 8e4062f..77b041e 100644 ---- a/include/net/sock.h -+++ b/include/net/sock.h -@@ -278,7 +278,7 @@ struct sock { - #ifdef CONFIG_RPS - __u32 sk_rxhash; - #endif -- atomic_t sk_drops; -+ atomic_unchecked_t sk_drops; - int sk_rcvbuf; - - struct sk_filter __rcu *sk_filter; -@@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags) - } - - static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, -- char __user *from, char *to, -+ char __user *from, unsigned char *to, - int copy, int offset) - { - if (skb->ip_summed == CHECKSUM_NONE) { -diff --git a/include/net/tcp.h b/include/net/tcp.h -index acc620a..f4d99c6 100644 ---- a/include/net/tcp.h -+++ b/include/net/tcp.h -@@ -1401,8 +1401,8 @@ enum tcp_seq_states { - struct tcp_seq_afinfo { - char *name; - sa_family_t family; -- struct file_operations seq_fops; -- struct seq_operations seq_ops; -+ file_operations_no_const seq_fops; -+ seq_operations_no_const seq_ops; - }; - - struct tcp_iter_state { -diff --git a/include/net/udp.h b/include/net/udp.h -index 67ea6fc..e42aee8 100644 ---- a/include/net/udp.h -+++ b/include/net/udp.h -@@ -234,8 +234,8 @@ struct udp_seq_afinfo { - char *name; - sa_family_t family; - struct udp_table *udp_table; -- struct file_operations seq_fops; -- struct seq_operations seq_ops; -+ file_operations_no_const seq_fops; -+ seq_operations_no_const seq_ops; - }; - - struct udp_iter_state { -diff --git a/include/net/xfrm.h b/include/net/xfrm.h -index b203e14..1df3991 100644 ---- a/include/net/xfrm.h -+++ b/include/net/xfrm.h -@@ -505,7 +505,7 @@ struct xfrm_policy { - struct timer_list timer; - - struct flow_cache_object flo; -- atomic_t genid; -+ atomic_unchecked_t genid; - u32 priority; - u32 index; - struct xfrm_mark mark; -diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h -index 2d0191c..a55797d 100644 ---- a/include/rdma/iw_cm.h -+++ b/include/rdma/iw_cm.h -@@ -120,7 +120,7 @@ struct iw_cm_verbs { - int backlog); - - int (*destroy_listen)(struct iw_cm_id *cm_id); --}; -+} __no_const; - - /** - * iw_create_cm_id - Create an IW CM identifier. -diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h -index 7d96829..4ba78d3 100644 ---- a/include/scsi/libfc.h -+++ b/include/scsi/libfc.h -@@ -758,6 +758,7 @@ struct libfc_function_template { - */ - void (*disc_stop_final) (struct fc_lport *); - }; -+typedef struct libfc_function_template __no_const libfc_function_template_no_const; - - /** - * struct fc_disc - Discovery context -@@ -861,7 +862,7 @@ struct fc_lport { - struct fc_vport *vport; - - /* Operational Information */ -- struct libfc_function_template tt; -+ libfc_function_template_no_const tt; - u8 link_up; - u8 qfull; - enum fc_lport_state state; -diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h -index d371c3c..e228a8c 100644 ---- a/include/scsi/scsi_device.h -+++ b/include/scsi/scsi_device.h -@@ -161,9 +161,9 @@ struct scsi_device { - unsigned int max_device_blocked; /* what device_blocked counts down from */ - #define SCSI_DEFAULT_DEVICE_BLOCKED 3 - -- atomic_t iorequest_cnt; -- atomic_t iodone_cnt; -- atomic_t ioerr_cnt; -+ atomic_unchecked_t iorequest_cnt; -+ atomic_unchecked_t iodone_cnt; -+ atomic_unchecked_t ioerr_cnt; - - struct device sdev_gendev, - sdev_dev; -diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h -index 2a65167..91e01f8 100644 ---- a/include/scsi/scsi_transport_fc.h -+++ b/include/scsi/scsi_transport_fc.h -@@ -711,7 +711,7 @@ struct fc_function_template { - unsigned long show_host_system_hostname:1; - - unsigned long disable_target_scan:1; --}; -+} __do_const; - - - /** -diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h -index 030b87c..98a6954 100644 ---- a/include/sound/ak4xxx-adda.h -+++ b/include/sound/ak4xxx-adda.h -@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops { - void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg, - unsigned char val); - void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate); --}; -+} __no_const; - - #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */ - -diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h -index 8c05e47..2b5df97 100644 ---- a/include/sound/hwdep.h -+++ b/include/sound/hwdep.h -@@ -49,7 +49,7 @@ struct snd_hwdep_ops { - struct snd_hwdep_dsp_status *status); - int (*dsp_load)(struct snd_hwdep *hw, - struct snd_hwdep_dsp_image *image); --}; -+} __no_const; - - struct snd_hwdep { - struct snd_card *card; -diff --git a/include/sound/info.h b/include/sound/info.h -index 4e94cf1..76748b1 100644 ---- a/include/sound/info.h -+++ b/include/sound/info.h -@@ -44,7 +44,7 @@ struct snd_info_entry_text { - struct snd_info_buffer *buffer); - void (*write)(struct snd_info_entry *entry, - struct snd_info_buffer *buffer); --}; -+} __no_const; - - struct snd_info_entry_ops { - int (*open)(struct snd_info_entry *entry, -diff --git a/include/sound/pcm.h b/include/sound/pcm.h -index 57e71fa..a2c7534 100644 ---- a/include/sound/pcm.h -+++ b/include/sound/pcm.h -@@ -81,6 +81,7 @@ struct snd_pcm_ops { - int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma); - int (*ack)(struct snd_pcm_substream *substream); - }; -+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const; - - /* - * -diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h -index af1b49e..a5d55a5 100644 ---- a/include/sound/sb16_csp.h -+++ b/include/sound/sb16_csp.h -@@ -146,7 +146,7 @@ struct snd_sb_csp_ops { - int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels); - int (*csp_stop) (struct snd_sb_csp * p); - int (*csp_qsound_transfer) (struct snd_sb_csp * p); --}; -+} __no_const; - - /* - * CSP private data -diff --git a/include/sound/soc.h b/include/sound/soc.h -index aa19f5a..a5b8208 100644 ---- a/include/sound/soc.h -+++ b/include/sound/soc.h -@@ -676,7 +676,7 @@ struct snd_soc_platform_driver { - /* platform IO - used for platform DAPM */ - unsigned int (*read)(struct snd_soc_platform *, unsigned int); - int (*write)(struct snd_soc_platform *, unsigned int, unsigned int); --}; -+} __do_const; - - struct snd_soc_platform { - const char *name; -diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h -index 444cd6b..3327cc5 100644 ---- a/include/sound/ymfpci.h -+++ b/include/sound/ymfpci.h -@@ -358,7 +358,7 @@ struct snd_ymfpci { - spinlock_t reg_lock; - spinlock_t voice_lock; - wait_queue_head_t interrupt_sleep; -- atomic_t interrupt_sleep_count; -+ atomic_unchecked_t interrupt_sleep_count; - struct snd_info_entry *proc_entry; - const struct firmware *dsp_microcode; - const struct firmware *controller_microcode; -diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h -index 2704065..e10f3ef 100644 ---- a/include/target/target_core_base.h -+++ b/include/target/target_core_base.h -@@ -356,7 +356,7 @@ struct t10_reservation_ops { - int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32); - int (*t10_pr_register)(struct se_cmd *); - int (*t10_pr_clear)(struct se_cmd *); --}; -+} __no_const; - - struct t10_reservation { - /* Reservation effects all target ports */ -@@ -496,8 +496,8 @@ struct se_cmd { - atomic_t t_task_cdbs_left; - atomic_t t_task_cdbs_ex_left; - atomic_t t_task_cdbs_timeout_left; -- atomic_t t_task_cdbs_sent; -- atomic_t t_transport_aborted; -+ atomic_unchecked_t t_task_cdbs_sent; -+ atomic_unchecked_t t_transport_aborted; - atomic_t t_transport_active; - atomic_t t_transport_complete; - atomic_t t_transport_queue_active; -@@ -744,7 +744,7 @@ struct se_device { - atomic_t active_cmds; - atomic_t simple_cmds; - atomic_t depth_left; -- atomic_t dev_ordered_id; -+ atomic_unchecked_t dev_ordered_id; - atomic_t dev_tur_active; - atomic_t execute_tasks; - atomic_t dev_status_thr_count; -diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h -index 1c09820..7f5ec79 100644 ---- a/include/trace/events/irq.h -+++ b/include/trace/events/irq.h -@@ -36,7 +36,7 @@ struct softirq_action; - */ - TRACE_EVENT(irq_handler_entry, - -- TP_PROTO(int irq, struct irqaction *action), -+ TP_PROTO(int irq, const struct irqaction *action), - - TP_ARGS(irq, action), - -@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry, - */ - TRACE_EVENT(irq_handler_exit, - -- TP_PROTO(int irq, struct irqaction *action, int ret), -+ TP_PROTO(int irq, const struct irqaction *action, int ret), - - TP_ARGS(irq, action, ret), - -diff --git a/include/video/udlfb.h b/include/video/udlfb.h -index 69d485a..dd0bee7 100644 ---- a/include/video/udlfb.h -+++ b/include/video/udlfb.h -@@ -51,10 +51,10 @@ struct dlfb_data { - int base8; - u32 pseudo_palette[256]; - /* blit-only rendering path metrics, exposed through sysfs */ -- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */ -- atomic_t bytes_identical; /* saved effort with backbuffer comparison */ -- atomic_t bytes_sent; /* to usb, after compression including overhead */ -- atomic_t cpu_kcycles_used; /* transpired during pixel processing */ -+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */ -+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */ -+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */ -+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */ - }; - - #define NR_USB_REQUEST_I2C_SUB_IO 0x02 -diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h -index 0993a22..32ba2fe 100644 ---- a/include/video/uvesafb.h -+++ b/include/video/uvesafb.h -@@ -177,6 +177,7 @@ struct uvesafb_par { - u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */ - u8 pmi_setpal; /* PMI for palette changes */ - u16 *pmi_base; /* protected mode interface location */ -+ u8 *pmi_code; /* protected mode code location */ - void *pmi_start; - void *pmi_pal; - u8 *vbe_state_orig; /* -diff --git a/init/Kconfig b/init/Kconfig -index d627783..693a9f3 100644 ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1202,7 +1202,7 @@ config SLUB_DEBUG - - config COMPAT_BRK - bool "Disable heap randomization" -- default y -+ default n - help - Randomizing heap placement makes heap exploits harder, but it - also breaks ancient binaries (including anything libc5 based). -diff --git a/init/do_mounts.c b/init/do_mounts.c -index c0851a8..4f8977d 100644 ---- a/init/do_mounts.c -+++ b/init/do_mounts.c -@@ -287,11 +287,11 @@ static void __init get_fs_names(char *page) - - static int __init do_mount_root(char *name, char *fs, int flags, void *data) - { -- int err = sys_mount(name, "/root", fs, flags, data); -+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data); - if (err) - return err; - -- sys_chdir((const char __user __force *)"/root"); -+ sys_chdir((const char __force_user*)"/root"); - ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev; - printk(KERN_INFO - "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n", -@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...) - va_start(args, fmt); - vsprintf(buf, fmt, args); - va_end(args); -- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0); -+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0); - if (fd >= 0) { - sys_ioctl(fd, FDEJECT, 0); - sys_close(fd); - } - printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf); -- fd = sys_open("/dev/console", O_RDWR, 0); -+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0); - if (fd >= 0) { - sys_ioctl(fd, TCGETS, (long)&termios); - termios.c_lflag &= ~ICANON; - sys_ioctl(fd, TCSETSF, (long)&termios); -- sys_read(fd, &c, 1); -+ sys_read(fd, (char __user *)&c, 1); - termios.c_lflag |= ICANON; - sys_ioctl(fd, TCSETSF, (long)&termios); - sys_close(fd); -@@ -488,6 +488,6 @@ void __init prepare_namespace(void) - mount_root(); - out: - devtmpfs_mount("dev"); -- sys_mount(".", "/", NULL, MS_MOVE, NULL); -- sys_chroot((const char __user __force *)"."); -+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL); -+ sys_chroot((const char __force_user *)"."); - } -diff --git a/init/do_mounts.h b/init/do_mounts.h -index f5b978a..69dbfe8 100644 ---- a/init/do_mounts.h -+++ b/init/do_mounts.h -@@ -15,15 +15,15 @@ extern int root_mountflags; - - static inline int create_dev(char *name, dev_t dev) - { -- sys_unlink(name); -- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev)); -+ sys_unlink((char __force_user *)name); -+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev)); - } - - #if BITS_PER_LONG == 32 - static inline u32 bstat(char *name) - { - struct stat64 stat; -- if (sys_stat64(name, &stat) != 0) -+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0) - return 0; - if (!S_ISBLK(stat.st_mode)) - return 0; -@@ -35,7 +35,7 @@ static inline u32 bstat(char *name) - static inline u32 bstat(char *name) - { - struct stat stat; -- if (sys_newstat(name, &stat) != 0) -+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0) - return 0; - if (!S_ISBLK(stat.st_mode)) - return 0; -diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c -index 3098a38..253064e 100644 ---- a/init/do_mounts_initrd.c -+++ b/init/do_mounts_initrd.c -@@ -44,13 +44,13 @@ static void __init handle_initrd(void) - create_dev("/dev/root.old", Root_RAM0); - /* mount initrd on rootfs' /root */ - mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY); -- sys_mkdir("/old", 0700); -- root_fd = sys_open("/", 0, 0); -- old_fd = sys_open("/old", 0, 0); -+ sys_mkdir((const char __force_user *)"/old", 0700); -+ root_fd = sys_open((const char __force_user *)"/", 0, 0); -+ old_fd = sys_open((const char __force_user *)"/old", 0, 0); - /* move initrd over / and chdir/chroot in initrd root */ -- sys_chdir("/root"); -- sys_mount(".", "/", NULL, MS_MOVE, NULL); -- sys_chroot("."); -+ sys_chdir((const char __force_user *)"/root"); -+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL); -+ sys_chroot((const char __force_user *)"."); - - /* - * In case that a resume from disk is carried out by linuxrc or one of -@@ -67,15 +67,15 @@ static void __init handle_initrd(void) - - /* move initrd to rootfs' /old */ - sys_fchdir(old_fd); -- sys_mount("/", ".", NULL, MS_MOVE, NULL); -+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL); - /* switch root and cwd back to / of rootfs */ - sys_fchdir(root_fd); -- sys_chroot("."); -+ sys_chroot((const char __force_user *)"."); - sys_close(old_fd); - sys_close(root_fd); - - if (new_decode_dev(real_root_dev) == Root_RAM0) { -- sys_chdir("/old"); -+ sys_chdir((const char __force_user *)"/old"); - return; - } - -@@ -83,17 +83,17 @@ static void __init handle_initrd(void) - mount_root(); - - printk(KERN_NOTICE "Trying to move old root to /initrd ... "); -- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL); -+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL); - if (!error) - printk("okay\n"); - else { -- int fd = sys_open("/dev/root.old", O_RDWR, 0); -+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0); - if (error == -ENOENT) - printk("/initrd does not exist. Ignored.\n"); - else - printk("failed\n"); - printk(KERN_NOTICE "Unmounting old root\n"); -- sys_umount("/old", MNT_DETACH); -+ sys_umount((char __force_user *)"/old", MNT_DETACH); - printk(KERN_NOTICE "Trying to free ramdisk memory ... "); - if (fd < 0) { - error = fd; -@@ -116,11 +116,11 @@ int __init initrd_load(void) - * mounted in the normal path. - */ - if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) { -- sys_unlink("/initrd.image"); -+ sys_unlink((const char __force_user *)"/initrd.image"); - handle_initrd(); - return 1; - } - } -- sys_unlink("/initrd.image"); -+ sys_unlink((const char __force_user *)"/initrd.image"); - return 0; - } -diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c -index 32c4799..c27ee74 100644 ---- a/init/do_mounts_md.c -+++ b/init/do_mounts_md.c -@@ -170,7 +170,7 @@ static void __init md_setup_drive(void) - partitioned ? "_d" : "", minor, - md_setup_args[ent].device_names); - -- fd = sys_open(name, 0, 0); -+ fd = sys_open((char __force_user *)name, 0, 0); - if (fd < 0) { - printk(KERN_ERR "md: open failed - cannot start " - "array %s\n", name); -@@ -233,7 +233,7 @@ static void __init md_setup_drive(void) - * array without it - */ - sys_close(fd); -- fd = sys_open(name, 0, 0); -+ fd = sys_open((char __force_user *)name, 0, 0); - sys_ioctl(fd, BLKRRPART, 0); - } - sys_close(fd); -@@ -283,7 +283,7 @@ static void __init autodetect_raid(void) - - wait_for_device_probe(); - -- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0); -+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0); - if (fd >= 0) { - sys_ioctl(fd, RAID_AUTORUN, raid_autopart); - sys_close(fd); -diff --git a/init/initramfs.c b/init/initramfs.c -index 2531811..040d4d4 100644 ---- a/init/initramfs.c -+++ b/init/initramfs.c -@@ -74,7 +74,7 @@ static void __init free_hash(void) - } - } - --static long __init do_utime(char __user *filename, time_t mtime) -+static long __init do_utime(__force char __user *filename, time_t mtime) - { - struct timespec t[2]; - -@@ -109,7 +109,7 @@ static void __init dir_utime(void) - struct dir_entry *de, *tmp; - list_for_each_entry_safe(de, tmp, &dir_list, list) { - list_del(&de->list); -- do_utime(de->name, de->mtime); -+ do_utime((char __force_user *)de->name, de->mtime); - kfree(de->name); - kfree(de); - } -@@ -271,7 +271,7 @@ static int __init maybe_link(void) - if (nlink >= 2) { - char *old = find_link(major, minor, ino, mode, collected); - if (old) -- return (sys_link(old, collected) < 0) ? -1 : 1; -+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1; - } - return 0; - } -@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode) - { - struct stat st; - -- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) { -+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) { - if (S_ISDIR(st.st_mode)) -- sys_rmdir(path); -+ sys_rmdir((char __force_user *)path); - else -- sys_unlink(path); -+ sys_unlink((char __force_user *)path); - } - } - -@@ -305,7 +305,7 @@ static int __init do_name(void) - int openflags = O_WRONLY|O_CREAT; - if (ml != 1) - openflags |= O_TRUNC; -- wfd = sys_open(collected, openflags, mode); -+ wfd = sys_open((char __force_user *)collected, openflags, mode); - - if (wfd >= 0) { - sys_fchown(wfd, uid, gid); -@@ -317,17 +317,17 @@ static int __init do_name(void) - } - } - } else if (S_ISDIR(mode)) { -- sys_mkdir(collected, mode); -- sys_chown(collected, uid, gid); -- sys_chmod(collected, mode); -+ sys_mkdir((char __force_user *)collected, mode); -+ sys_chown((char __force_user *)collected, uid, gid); -+ sys_chmod((char __force_user *)collected, mode); - dir_add(collected, mtime); - } else if (S_ISBLK(mode) || S_ISCHR(mode) || - S_ISFIFO(mode) || S_ISSOCK(mode)) { - if (maybe_link() == 0) { -- sys_mknod(collected, mode, rdev); -- sys_chown(collected, uid, gid); -- sys_chmod(collected, mode); -- do_utime(collected, mtime); -+ sys_mknod((char __force_user *)collected, mode, rdev); -+ sys_chown((char __force_user *)collected, uid, gid); -+ sys_chmod((char __force_user *)collected, mode); -+ do_utime((char __force_user *)collected, mtime); - } - } - return 0; -@@ -336,15 +336,15 @@ static int __init do_name(void) - static int __init do_copy(void) - { - if (count >= body_len) { -- sys_write(wfd, victim, body_len); -+ sys_write(wfd, (char __force_user *)victim, body_len); - sys_close(wfd); -- do_utime(vcollected, mtime); -+ do_utime((char __force_user *)vcollected, mtime); - kfree(vcollected); - eat(body_len); - state = SkipIt; - return 0; - } else { -- sys_write(wfd, victim, count); -+ sys_write(wfd, (char __force_user *)victim, count); - body_len -= count; - eat(count); - return 1; -@@ -355,9 +355,9 @@ static int __init do_symlink(void) - { - collected[N_ALIGN(name_len) + body_len] = '\0'; - clean_path(collected, 0); -- sys_symlink(collected + N_ALIGN(name_len), collected); -- sys_lchown(collected, uid, gid); -- do_utime(collected, mtime); -+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected); -+ sys_lchown((char __force_user *)collected, uid, gid); -+ do_utime((char __force_user *)collected, mtime); - state = SkipIt; - next_state = Reset; - return 0; -diff --git a/init/main.c b/init/main.c -index 03b408d..5777f59 100644 ---- a/init/main.c -+++ b/init/main.c -@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { } - extern void tc_init(void); - #endif - -+extern void grsecurity_init(void); -+ - /* - * Debug helper: via this flag we know that we are in 'early bootup code' - * where only the boot processor is running with IRQ disabled. This means -@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str) - - __setup("reset_devices", set_reset_devices); - -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+extern char pax_enter_kernel_user[]; -+extern char pax_exit_kernel_user[]; -+extern pgdval_t clone_pgd_mask; -+#endif -+ -+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF) -+static int __init setup_pax_nouderef(char *str) -+{ -+#ifdef CONFIG_X86_32 -+ unsigned int cpu; -+ struct desc_struct *gdt; -+ -+ for (cpu = 0; cpu < NR_CPUS; cpu++) { -+ gdt = get_cpu_gdt_table(cpu); -+ gdt[GDT_ENTRY_KERNEL_DS].type = 3; -+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf; -+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf; -+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf; -+ } -+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory"); -+#else -+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1); -+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1); -+ clone_pgd_mask = ~(pgdval_t)0UL; -+#endif -+ -+ return 0; -+} -+early_param("pax_nouderef", setup_pax_nouderef); -+#endif -+ -+#ifdef CONFIG_PAX_SOFTMODE -+int pax_softmode; -+ -+static int __init setup_pax_softmode(char *str) -+{ -+ get_option(&str, &pax_softmode); -+ return 1; -+} -+__setup("pax_softmode=", setup_pax_softmode); -+#endif -+ - static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; - const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; - static const char *panic_later, *panic_param; -@@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(initcall_t fn) - { - int count = preempt_count(); - int ret; -+ const char *msg1 = "", *msg2 = ""; - - if (initcall_debug) - ret = do_one_initcall_debug(fn); -@@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(initcall_t fn) - sprintf(msgbuf, "error code %d ", ret); - - if (preempt_count() != count) { -- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); -+ msg1 = " preemption imbalance"; - preempt_count() = count; - } - if (irqs_disabled()) { -- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); -+ msg2 = " disabled interrupts"; - local_irq_enable(); - } -- if (msgbuf[0]) { -- printk("initcall %pF returned with %s\n", fn, msgbuf); -+ if (msgbuf[0] || *msg1 || *msg2) { -+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2); - } - - return ret; -@@ -817,7 +863,7 @@ static int __init kernel_init(void * unused) - do_basic_setup(); - - /* Open the /dev/console on the rootfs, this should never fail */ -- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) -+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0) - printk(KERN_WARNING "Warning: unable to open an initial console.\n"); - - (void) sys_dup(0); -@@ -830,11 +876,13 @@ static int __init kernel_init(void * unused) - if (!ramdisk_execute_command) - ramdisk_execute_command = "/init"; - -- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { -+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) { - ramdisk_execute_command = NULL; - prepare_namespace(); - } - -+ grsecurity_init(); -+ - /* - * Ok, we have completed the initial bootup, and - * we're essentially up and running. Get rid of the -diff --git a/ipc/mqueue.c b/ipc/mqueue.c -index ed049ea..6442f7f 100644 ---- a/ipc/mqueue.c -+++ b/ipc/mqueue.c -@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb, - mq_bytes = (mq_msg_tblsz + - (info->attr.mq_maxmsg * info->attr.mq_msgsize)); - -+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1); - spin_lock(&mq_lock); - if (u->mq_bytes + mq_bytes < u->mq_bytes || - u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) { -diff --git a/ipc/msg.c b/ipc/msg.c -index 7385de2..a8180e0 100644 ---- a/ipc/msg.c -+++ b/ipc/msg.c -@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) - return security_msg_queue_associate(msq, msgflg); - } - -+static struct ipc_ops msg_ops = { -+ .getnew = newque, -+ .associate = msg_security, -+ .more_checks = NULL -+}; -+ - SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) - { - struct ipc_namespace *ns; -- struct ipc_ops msg_ops; - struct ipc_params msg_params; - - ns = current->nsproxy->ipc_ns; - -- msg_ops.getnew = newque; -- msg_ops.associate = msg_security; -- msg_ops.more_checks = NULL; -- - msg_params.key = key; - msg_params.flg = msgflg; - -diff --git a/ipc/sem.c b/ipc/sem.c -index c8e00f8..1135c4e 100644 ---- a/ipc/sem.c -+++ b/ipc/sem.c -@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, - return 0; - } - -+static struct ipc_ops sem_ops = { -+ .getnew = newary, -+ .associate = sem_security, -+ .more_checks = sem_more_checks -+}; -+ - SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) - { - struct ipc_namespace *ns; -- struct ipc_ops sem_ops; - struct ipc_params sem_params; - - ns = current->nsproxy->ipc_ns; -@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) - if (nsems < 0 || nsems > ns->sc_semmsl) - return -EINVAL; - -- sem_ops.getnew = newary; -- sem_ops.associate = sem_security; -- sem_ops.more_checks = sem_more_checks; -- - sem_params.key = key; - sem_params.flg = semflg; - sem_params.u.nsems = nsems; -@@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, - int nsems; - struct list_head tasks; - -+ pax_track_stack(); -+ - sma = sem_lock_check(ns, semid); - if (IS_ERR(sma)) - return PTR_ERR(sma); -@@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, - struct ipc_namespace *ns; - struct list_head tasks; - -+ pax_track_stack(); -+ - ns = current->nsproxy->ipc_ns; - - if (nsops < 1 || semid < 0) -diff --git a/ipc/shm.c b/ipc/shm.c -index 02ecf2c..c8f5627 100644 ---- a/ipc/shm.c -+++ b/ipc/shm.c -@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); - static int sysvipc_shm_proc_show(struct seq_file *s, void *it); - #endif - -+#ifdef CONFIG_GRKERNSEC -+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, -+ const time_t shm_createtime, const uid_t cuid, -+ const int shmid); -+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, -+ const time_t shm_createtime); -+#endif -+ - void shm_init_ns(struct ipc_namespace *ns) - { - ns->shm_ctlmax = SHMMAX; -@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) - shp->shm_lprid = 0; - shp->shm_atim = shp->shm_dtim = 0; - shp->shm_ctim = get_seconds(); -+#ifdef CONFIG_GRKERNSEC -+ { -+ struct timespec timeval; -+ do_posix_clock_monotonic_gettime(&timeval); -+ -+ shp->shm_createtime = timeval.tv_sec; -+ } -+#endif - shp->shm_segsz = size; - shp->shm_nattch = 0; - shp->shm_file = file; -@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, - return 0; - } - -+static struct ipc_ops shm_ops = { -+ .getnew = newseg, -+ .associate = shm_security, -+ .more_checks = shm_more_checks -+}; -+ - SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) - { - struct ipc_namespace *ns; -- struct ipc_ops shm_ops; - struct ipc_params shm_params; - - ns = current->nsproxy->ipc_ns; - -- shm_ops.getnew = newseg; -- shm_ops.associate = shm_security; -- shm_ops.more_checks = shm_more_checks; -- - shm_params.key = key; - shm_params.flg = shmflg; - shm_params.u.size = size; -@@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) - case SHM_LOCK: - case SHM_UNLOCK: - { -- struct file *uninitialized_var(shm_file); -- - lru_add_drain_all(); /* drain pagevecs to lru lists */ - - shp = shm_lock_check(ns, shmid); -@@ -1004,9 +1019,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) - if (err) - goto out_unlock; - -+#ifdef CONFIG_GRKERNSEC -+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime, -+ shp->shm_perm.cuid, shmid) || -+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) { -+ err = -EACCES; -+ goto out_unlock; -+ } -+#endif -+ - path = shp->shm_file->f_path; - path_get(&path); - shp->shm_nattch++; -+#ifdef CONFIG_GRKERNSEC -+ shp->shm_lapid = current->pid; -+#endif - size = i_size_read(path.dentry->d_inode); - shm_unlock(shp); - -diff --git a/kernel/acct.c b/kernel/acct.c -index fa7eb3d..7faf116 100644 ---- a/kernel/acct.c -+++ b/kernel/acct.c -@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct, - */ - flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; -- file->f_op->write(file, (char *)&ac, -+ file->f_op->write(file, (char __force_user *)&ac, - sizeof(acct_t), &file->f_pos); - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; - set_fs(fs); -diff --git a/kernel/audit.c b/kernel/audit.c -index 0a1355c..dca420f 100644 ---- a/kernel/audit.c -+++ b/kernel/audit.c -@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0; - 3) suppressed due to audit_rate_limit - 4) suppressed due to audit_backlog_limit - */ --static atomic_t audit_lost = ATOMIC_INIT(0); -+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0); - - /* The netlink socket. */ - static struct sock *audit_sock; -@@ -237,7 +237,7 @@ void audit_log_lost(const char *message) - unsigned long now; - int print; - -- atomic_inc(&audit_lost); -+ atomic_inc_unchecked(&audit_lost); - - print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit); - -@@ -256,7 +256,7 @@ void audit_log_lost(const char *message) - printk(KERN_WARNING - "audit: audit_lost=%d audit_rate_limit=%d " - "audit_backlog_limit=%d\n", -- atomic_read(&audit_lost), -+ atomic_read_unchecked(&audit_lost), - audit_rate_limit, - audit_backlog_limit); - audit_panic(message); -@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) - status_set.pid = audit_pid; - status_set.rate_limit = audit_rate_limit; - status_set.backlog_limit = audit_backlog_limit; -- status_set.lost = atomic_read(&audit_lost); -+ status_set.lost = atomic_read_unchecked(&audit_lost); - status_set.backlog = skb_queue_len(&audit_skb_queue); - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0, - &status_set, sizeof(status_set)); -diff --git a/kernel/auditsc.c b/kernel/auditsc.c -index ce4b054..8139ed7 100644 ---- a/kernel/auditsc.c -+++ b/kernel/auditsc.c -@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx, - } - - /* global counter which is incremented every time something logs in */ --static atomic_t session_id = ATOMIC_INIT(0); -+static atomic_unchecked_t session_id = ATOMIC_INIT(0); - - /** - * audit_set_loginuid - set a task's audit_context loginuid -@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0); - */ - int audit_set_loginuid(struct task_struct *task, uid_t loginuid) - { -- unsigned int sessionid = atomic_inc_return(&session_id); -+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id); - struct audit_context *context = task->audit_context; - - if (context && context->in_syscall) { -diff --git a/kernel/capability.c b/kernel/capability.c -index 283c529..36ac81e 100644 ---- a/kernel/capability.c -+++ b/kernel/capability.c -@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr) - * before modification is attempted and the application - * fails. - */ -+ if (tocopy > ARRAY_SIZE(kdata)) -+ return -EFAULT; -+ - if (copy_to_user(dataptr, kdata, tocopy - * sizeof(struct __user_cap_data_struct))) { - return -EFAULT; -@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap) - BUG(); - } - -- if (security_capable(ns, current_cred(), cap) == 0) { -+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) { - current->flags |= PF_SUPERPRIV; - return true; - } -@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap) - } - EXPORT_SYMBOL(ns_capable); - -+bool ns_capable_nolog(struct user_namespace *ns, int cap) -+{ -+ if (unlikely(!cap_valid(cap))) { -+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap); -+ BUG(); -+ } -+ -+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) { -+ current->flags |= PF_SUPERPRIV; -+ return true; -+ } -+ return false; -+} -+EXPORT_SYMBOL(ns_capable_nolog); -+ -+bool capable_nolog(int cap) -+{ -+ return ns_capable_nolog(&init_user_ns, cap); -+} -+EXPORT_SYMBOL(capable_nolog); -+ - /** - * task_ns_capable - Determine whether current task has a superior - * capability targeted at a specific task's user namespace. -@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap) - } - EXPORT_SYMBOL(task_ns_capable); - -+bool task_ns_capable_nolog(struct task_struct *t, int cap) -+{ -+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap); -+} -+EXPORT_SYMBOL(task_ns_capable_nolog); -+ - /** - * nsown_capable - Check superior capability to one's own user_ns - * @cap: The capability in question -diff --git a/kernel/cgroup.c b/kernel/cgroup.c -index 1d2b6ce..87bf267 100644 ---- a/kernel/cgroup.c -+++ b/kernel/cgroup.c -@@ -595,6 +595,8 @@ static struct css_set *find_css_set( - struct hlist_head *hhead; - struct cg_cgroup_link *link; - -+ pax_track_stack(); -+ - /* First see if we already have a cgroup group that matches - * the desired set */ - read_lock(&css_set_lock); -diff --git a/kernel/compat.c b/kernel/compat.c -index e2435ee..8e82199 100644 ---- a/kernel/compat.c -+++ b/kernel/compat.c -@@ -13,6 +13,7 @@ - - #include <linux/linkage.h> - #include <linux/compat.h> -+#include <linux/module.h> - #include <linux/errno.h> - #include <linux/time.h> - #include <linux/signal.h> -@@ -167,7 +168,7 @@ static long compat_nanosleep_restart(struct restart_block *restart) - mm_segment_t oldfs; - long ret; - -- restart->nanosleep.rmtp = (struct timespec __user *) &rmt; -+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt; - oldfs = get_fs(); - set_fs(KERNEL_DS); - ret = hrtimer_nanosleep_restart(restart); -@@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, - oldfs = get_fs(); - set_fs(KERNEL_DS); - ret = hrtimer_nanosleep(&tu, -- rmtp ? (struct timespec __user *)&rmt : NULL, -+ rmtp ? (struct timespec __force_user *)&rmt : NULL, - HRTIMER_MODE_REL, CLOCK_MONOTONIC); - set_fs(oldfs); - -@@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); -- ret = sys_sigpending((old_sigset_t __user *) &s); -+ ret = sys_sigpending((old_sigset_t __force_user *) &s); - set_fs(old_fs); - if (ret == 0) - ret = put_user(s, set); -@@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_sigprocmask(how, -- set ? (old_sigset_t __user *) &s : NULL, -- oset ? (old_sigset_t __user *) &s : NULL); -+ set ? (old_sigset_t __force_user *) &s : NULL, -+ oset ? (old_sigset_t __force_user *) &s : NULL); - set_fs(old_fs); - if (ret == 0) - if (oset) -@@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource, - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); -- ret = sys_old_getrlimit(resource, &r); -+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r); - set_fs(old_fs); - - if (!ret) { -@@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); -- ret = sys_getrusage(who, (struct rusage __user *) &r); -+ ret = sys_getrusage(who, (struct rusage __force_user *) &r); - set_fs(old_fs); - - if (ret) -@@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, - set_fs (KERNEL_DS); - ret = sys_wait4(pid, - (stat_addr ? -- (unsigned int __user *) &status : NULL), -- options, (struct rusage __user *) &r); -+ (unsigned int __force_user *) &status : NULL), -+ options, (struct rusage __force_user *) &r); - set_fs (old_fs); - - if (ret > 0) { -@@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, - memset(&info, 0, sizeof(info)); - - set_fs(KERNEL_DS); -- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, -- uru ? (struct rusage __user *)&ru : NULL); -+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options, -+ uru ? (struct rusage __force_user *)&ru : NULL); - set_fs(old_fs); - - if ((ret < 0) || (info.si_signo == 0)) -@@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags, - oldfs = get_fs(); - set_fs(KERNEL_DS); - err = sys_timer_settime(timer_id, flags, -- (struct itimerspec __user *) &newts, -- (struct itimerspec __user *) &oldts); -+ (struct itimerspec __force_user *) &newts, -+ (struct itimerspec __force_user *) &oldts); - set_fs(oldfs); - if (!err && old && put_compat_itimerspec(old, &oldts)) - return -EFAULT; -@@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t timer_id, - oldfs = get_fs(); - set_fs(KERNEL_DS); - err = sys_timer_gettime(timer_id, -- (struct itimerspec __user *) &ts); -+ (struct itimerspec __force_user *) &ts); - set_fs(oldfs); - if (!err && put_compat_itimerspec(setting, &ts)) - return -EFAULT; -@@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t which_clock, - oldfs = get_fs(); - set_fs(KERNEL_DS); - err = sys_clock_settime(which_clock, -- (struct timespec __user *) &ts); -+ (struct timespec __force_user *) &ts); - set_fs(oldfs); - return err; - } -@@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t which_clock, - oldfs = get_fs(); - set_fs(KERNEL_DS); - err = sys_clock_gettime(which_clock, -- (struct timespec __user *) &ts); -+ (struct timespec __force_user *) &ts); - set_fs(oldfs); - if (!err && put_compat_timespec(&ts, tp)) - return -EFAULT; -@@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock, - - oldfs = get_fs(); - set_fs(KERNEL_DS); -- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc); -+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc); - set_fs(oldfs); - - err = compat_put_timex(utp, &txc); -@@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t which_clock, - oldfs = get_fs(); - set_fs(KERNEL_DS); - err = sys_clock_getres(which_clock, -- (struct timespec __user *) &ts); -+ (struct timespec __force_user *) &ts); - set_fs(oldfs); - if (!err && tp && put_compat_timespec(&ts, tp)) - return -EFAULT; -@@ -729,9 +730,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) - long err; - mm_segment_t oldfs; - struct timespec tu; -- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; -+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp; - -- restart->nanosleep.rmtp = (struct timespec __user *) &tu; -+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu; - oldfs = get_fs(); - set_fs(KERNEL_DS); - err = clock_nanosleep_restart(restart); -@@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, - oldfs = get_fs(); - set_fs(KERNEL_DS); - err = sys_clock_nanosleep(which_clock, flags, -- (struct timespec __user *) &in, -- (struct timespec __user *) &out); -+ (struct timespec __force_user *) &in, -+ (struct timespec __force_user *) &out); - set_fs(oldfs); - - if ((err == -ERESTART_RESTARTBLOCK) && rmtp && -diff --git a/kernel/configs.c b/kernel/configs.c -index 42e8fa0..9e7406b 100644 ---- a/kernel/configs.c -+++ b/kernel/configs.c -@@ -74,8 +74,19 @@ static int __init ikconfig_init(void) - struct proc_dir_entry *entry; - - /* create the current config file */ -+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM) -+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM) -+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL, -+ &ikconfig_file_ops); -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL, -+ &ikconfig_file_ops); -+#endif -+#else - entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL, - &ikconfig_file_ops); -+#endif -+ - if (!entry) - return -ENOMEM; - -diff --git a/kernel/cred.c b/kernel/cred.c -index 8ef31f5..f63d997 100644 ---- a/kernel/cred.c -+++ b/kernel/cred.c -@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head *rcu) - */ - void __put_cred(struct cred *cred) - { -+ pax_track_stack(); -+ - kdebug("__put_cred(%p{%d,%d})", cred, - atomic_read(&cred->usage), - read_cred_subscribers(cred)); -@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk) - { - struct cred *cred; - -+ pax_track_stack(); -+ - kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred, - atomic_read(&tsk->cred->usage), - read_cred_subscribers(tsk->cred)); -@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct task_struct *task) - { - const struct cred *cred; - -+ pax_track_stack(); -+ - rcu_read_lock(); - - do { -@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void) - { - struct cred *new; - -+ pax_track_stack(); -+ - new = kmem_cache_zalloc(cred_jar, GFP_KERNEL); - if (!new) - return NULL; -@@ -287,6 +295,8 @@ struct cred *prepare_creds(void) - const struct cred *old; - struct cred *new; - -+ pax_track_stack(); -+ - validate_process_creds(); - - new = kmem_cache_alloc(cred_jar, GFP_KERNEL); -@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void) - struct thread_group_cred *tgcred = NULL; - struct cred *new; - -+ pax_track_stack(); -+ - #ifdef CONFIG_KEYS - tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); - if (!tgcred) -@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) - struct cred *new; - int ret; - -+ pax_track_stack(); -+ - if ( - #ifdef CONFIG_KEYS - !p->cred->thread_keyring && -@@ -475,6 +489,8 @@ int commit_creds(struct cred *new) - struct task_struct *task = current; - const struct cred *old = task->real_cred; - -+ pax_track_stack(); -+ - kdebug("commit_creds(%p{%d,%d})", new, - atomic_read(&new->usage), - read_cred_subscribers(new)); -@@ -489,6 +505,8 @@ int commit_creds(struct cred *new) - - get_cred(new); /* we will require a ref for the subj creds too */ - -+ gr_set_role_label(task, new->uid, new->gid); -+ - /* dumpability changes */ - if (old->euid != new->euid || - old->egid != new->egid || -@@ -549,6 +567,8 @@ EXPORT_SYMBOL(commit_creds); - */ - void abort_creds(struct cred *new) - { -+ pax_track_stack(); -+ - kdebug("abort_creds(%p{%d,%d})", new, - atomic_read(&new->usage), - read_cred_subscribers(new)); -@@ -572,6 +592,8 @@ const struct cred *override_creds(const struct cred *new) - { - const struct cred *old = current->cred; - -+ pax_track_stack(); -+ - kdebug("override_creds(%p{%d,%d})", new, - atomic_read(&new->usage), - read_cred_subscribers(new)); -@@ -601,6 +623,8 @@ void revert_creds(const struct cred *old) - { - const struct cred *override = current->cred; - -+ pax_track_stack(); -+ - kdebug("revert_creds(%p{%d,%d})", old, - atomic_read(&old->usage), - read_cred_subscribers(old)); -@@ -647,6 +671,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) - const struct cred *old; - struct cred *new; - -+ pax_track_stack(); -+ - new = kmem_cache_alloc(cred_jar, GFP_KERNEL); - if (!new) - return NULL; -@@ -701,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred); - */ - int set_security_override(struct cred *new, u32 secid) - { -+ pax_track_stack(); -+ - return security_kernel_act_as(new, secid); - } - EXPORT_SYMBOL(set_security_override); -@@ -720,6 +748,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx) - u32 secid; - int ret; - -+ pax_track_stack(); -+ - ret = security_secctx_to_secid(secctx, strlen(secctx), &secid); - if (ret < 0) - return ret; -diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c -index 0d7c087..01b8cef 100644 ---- a/kernel/debug/debug_core.c -+++ b/kernel/debug/debug_core.c -@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock); - */ - static atomic_t masters_in_kgdb; - static atomic_t slaves_in_kgdb; --static atomic_t kgdb_break_tasklet_var; -+static atomic_unchecked_t kgdb_break_tasklet_var; - atomic_t kgdb_setting_breakpoint; - - struct task_struct *kgdb_usethread; -@@ -129,7 +129,7 @@ int kgdb_single_step; - static pid_t kgdb_sstep_pid; - - /* to keep track of the CPU which is doing the single stepping*/ --atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); -+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); - - /* - * If you are debugging a problem where roundup (the collection of -@@ -542,7 +542,7 @@ return_normal: - * kernel will only try for the value of sstep_tries before - * giving up and continuing on. - */ -- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && -+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 && - (kgdb_info[cpu].task && - kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { - atomic_set(&kgdb_active, -1); -@@ -636,8 +636,8 @@ cpu_master_loop: - } - - kgdb_restore: -- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { -- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step); -+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) { -+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step); - if (kgdb_info[sstep_cpu].task) - kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; - else -@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void) - static void kgdb_tasklet_bpt(unsigned long ing) - { - kgdb_breakpoint(); -- atomic_set(&kgdb_break_tasklet_var, 0); -+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0); - } - - static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0); - - void kgdb_schedule_breakpoint(void) - { -- if (atomic_read(&kgdb_break_tasklet_var) || -+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) || - atomic_read(&kgdb_active) != -1 || - atomic_read(&kgdb_setting_breakpoint)) - return; -- atomic_inc(&kgdb_break_tasklet_var); -+ atomic_inc_unchecked(&kgdb_break_tasklet_var); - tasklet_schedule(&kgdb_tasklet_breakpoint); - } - EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint); -diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c -index 63786e7..0780cac 100644 ---- a/kernel/debug/kdb/kdb_main.c -+++ b/kernel/debug/kdb/kdb_main.c -@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv) - list_for_each_entry(mod, kdb_modules, list) { - - kdb_printf("%-20s%8u 0x%p ", mod->name, -- mod->core_size, (void *)mod); -+ mod->core_size_rx + mod->core_size_rw, (void *)mod); - #ifdef CONFIG_MODULE_UNLOAD - kdb_printf("%4d ", module_refcount(mod)); - #endif -@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv) - kdb_printf(" (Loading)"); - else - kdb_printf(" (Live)"); -- kdb_printf(" 0x%p", mod->module_core); -+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw); - - #ifdef CONFIG_MODULE_UNLOAD - { -diff --git a/kernel/events/core.c b/kernel/events/core.c -index 0f85778..0d43716 100644 ---- a/kernel/events/core.c -+++ b/kernel/events/core.c -@@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write, - return 0; - } - --static atomic64_t perf_event_id; -+static atomic64_unchecked_t perf_event_id; - - static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, - enum event_type_t event_type); -@@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info) - - static inline u64 perf_event_count(struct perf_event *event) - { -- return local64_read(&event->count) + atomic64_read(&event->child_count); -+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count); - } - - static u64 perf_event_read(struct perf_event *event) -@@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) - mutex_lock(&event->child_mutex); - total += perf_event_read(event); - *enabled += event->total_time_enabled + -- atomic64_read(&event->child_total_time_enabled); -+ atomic64_read_unchecked(&event->child_total_time_enabled); - *running += event->total_time_running + -- atomic64_read(&event->child_total_time_running); -+ atomic64_read_unchecked(&event->child_total_time_running); - - list_for_each_entry(child, &event->child_list, child_list) { - total += perf_event_read(child); -@@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct perf_event *event) - userpg->offset -= local64_read(&event->hw.prev_count); - - userpg->time_enabled = enabled + -- atomic64_read(&event->child_total_time_enabled); -+ atomic64_read_unchecked(&event->child_total_time_enabled); - - userpg->time_running = running + -- atomic64_read(&event->child_total_time_running); -+ atomic64_read_unchecked(&event->child_total_time_running); - - barrier(); - ++userpg->lock; -@@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct perf_output_handle *handle, - values[n++] = perf_event_count(event); - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { - values[n++] = enabled + -- atomic64_read(&event->child_total_time_enabled); -+ atomic64_read_unchecked(&event->child_total_time_enabled); - } - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { - values[n++] = running + -- atomic64_read(&event->child_total_time_running); -+ atomic64_read_unchecked(&event->child_total_time_running); - } - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_event_id(event); -@@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) - * need to add enough zero bytes after the string to handle - * the 64bit alignment we do later. - */ -- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); -+ buf = kzalloc(PATH_MAX, GFP_KERNEL); - if (!buf) { - name = strncpy(tmp, "//enomem", sizeof(tmp)); - goto got_name; - } -- name = d_path(&file->f_path, buf, PATH_MAX); -+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64)); - if (IS_ERR(name)) { - name = strncpy(tmp, "//toolong", sizeof(tmp)); - goto got_name; -@@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, - event->parent = parent_event; - - event->ns = get_pid_ns(current->nsproxy->pid_ns); -- event->id = atomic64_inc_return(&perf_event_id); -+ event->id = atomic64_inc_return_unchecked(&perf_event_id); - - event->state = PERF_EVENT_STATE_INACTIVE; - -@@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf_event *child_event, - /* - * Add back the child's count to the parent's count: - */ -- atomic64_add(child_val, &parent_event->child_count); -- atomic64_add(child_event->total_time_enabled, -+ atomic64_add_unchecked(child_val, &parent_event->child_count); -+ atomic64_add_unchecked(child_event->total_time_enabled, - &parent_event->child_total_time_enabled); -- atomic64_add(child_event->total_time_running, -+ atomic64_add_unchecked(child_event->total_time_running, - &parent_event->child_total_time_running); - - /* -diff --git a/kernel/exit.c b/kernel/exit.c -index 2913b35..4465c81 100644 ---- a/kernel/exit.c -+++ b/kernel/exit.c -@@ -57,6 +57,10 @@ - #include <asm/pgtable.h> - #include <asm/mmu_context.h> - -+#ifdef CONFIG_GRKERNSEC -+extern rwlock_t grsec_exec_file_lock; -+#endif -+ - static void exit_mm(struct task_struct * tsk); - - static void __unhash_process(struct task_struct *p, bool group_dead) -@@ -168,6 +172,10 @@ void release_task(struct task_struct * p) - struct task_struct *leader; - int zap_leader; - repeat: -+#ifdef CONFIG_NET -+ gr_del_task_from_ip_table(p); -+#endif -+ - /* don't need to get the RCU readlock here - the process is dead and - * can't be modifying its own credentials. But shut RCU-lockdep up */ - rcu_read_lock(); -@@ -380,7 +388,7 @@ int allow_signal(int sig) - * know it'll be handled, so that they don't get converted to - * SIGKILL or just silently dropped. - */ -- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; -+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - return 0; -@@ -416,6 +424,17 @@ void daemonize(const char *name, ...) - vsnprintf(current->comm, sizeof(current->comm), name, args); - va_end(args); - -+#ifdef CONFIG_GRKERNSEC -+ write_lock(&grsec_exec_file_lock); -+ if (current->exec_file) { -+ fput(current->exec_file); -+ current->exec_file = NULL; -+ } -+ write_unlock(&grsec_exec_file_lock); -+#endif -+ -+ gr_set_kernel_label(current); -+ - /* - * If we were started as result of loading a module, close all of the - * user space pages. We don't need them, and if we didn't close them -@@ -895,6 +914,8 @@ NORET_TYPE void do_exit(long code) - struct task_struct *tsk = current; - int group_dead; - -+ set_fs(USER_DS); -+ - profile_task_exit(tsk); - - WARN_ON(blk_needs_flush_plug(tsk)); -@@ -911,7 +932,6 @@ NORET_TYPE void do_exit(long code) - * mm_release()->clear_child_tid() from writing to a user-controlled - * kernel address. - */ -- set_fs(USER_DS); - - ptrace_event(PTRACE_EVENT_EXIT, code); - -@@ -973,6 +993,9 @@ NORET_TYPE void do_exit(long code) - tsk->exit_code = code; - taskstats_exit(tsk, group_dead); - -+ gr_acl_handle_psacct(tsk, code); -+ gr_acl_handle_exit(); -+ - exit_mm(tsk); - - if (group_dead) -diff --git a/kernel/fork.c b/kernel/fork.c -index 8e6b6f4..9dccf00 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -285,7 +285,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) - *stackend = STACK_END_MAGIC; /* for overflow detection */ - - #ifdef CONFIG_CC_STACKPROTECTOR -- tsk->stack_canary = get_random_int(); -+ tsk->stack_canary = pax_get_random_long(); - #endif - - /* -@@ -309,13 +309,77 @@ out: - } - - #ifdef CONFIG_MMU -+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt) -+{ -+ struct vm_area_struct *tmp; -+ unsigned long charge; -+ struct mempolicy *pol; -+ struct file *file; -+ -+ charge = 0; -+ if (mpnt->vm_flags & VM_ACCOUNT) { -+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; -+ if (security_vm_enough_memory(len)) -+ goto fail_nomem; -+ charge = len; -+ } -+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); -+ if (!tmp) -+ goto fail_nomem; -+ *tmp = *mpnt; -+ tmp->vm_mm = mm; -+ INIT_LIST_HEAD(&tmp->anon_vma_chain); -+ pol = mpol_dup(vma_policy(mpnt)); -+ if (IS_ERR(pol)) -+ goto fail_nomem_policy; -+ vma_set_policy(tmp, pol); -+ if (anon_vma_fork(tmp, mpnt)) -+ goto fail_nomem_anon_vma_fork; -+ tmp->vm_flags &= ~VM_LOCKED; -+ tmp->vm_next = tmp->vm_prev = NULL; -+ tmp->vm_mirror = NULL; -+ file = tmp->vm_file; -+ if (file) { -+ struct inode *inode = file->f_path.dentry->d_inode; -+ struct address_space *mapping = file->f_mapping; -+ -+ get_file(file); -+ if (tmp->vm_flags & VM_DENYWRITE) -+ atomic_dec(&inode->i_writecount); -+ mutex_lock(&mapping->i_mmap_mutex); -+ if (tmp->vm_flags & VM_SHARED) -+ mapping->i_mmap_writable++; -+ flush_dcache_mmap_lock(mapping); -+ /* insert tmp into the share list, just after mpnt */ -+ vma_prio_tree_add(tmp, mpnt); -+ flush_dcache_mmap_unlock(mapping); -+ mutex_unlock(&mapping->i_mmap_mutex); -+ } -+ -+ /* -+ * Clear hugetlb-related page reserves for children. This only -+ * affects MAP_PRIVATE mappings. Faults generated by the child -+ * are not guaranteed to succeed, even if read-only -+ */ -+ if (is_vm_hugetlb_page(tmp)) -+ reset_vma_resv_huge_pages(tmp); -+ -+ return tmp; -+ -+fail_nomem_anon_vma_fork: -+ mpol_put(pol); -+fail_nomem_policy: -+ kmem_cache_free(vm_area_cachep, tmp); -+fail_nomem: -+ vm_unacct_memory(charge); -+ return NULL; -+} -+ - static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) - { - struct vm_area_struct *mpnt, *tmp, *prev, **pprev; - struct rb_node **rb_link, *rb_parent; - int retval; -- unsigned long charge; -- struct mempolicy *pol; - - down_write(&oldmm->mmap_sem); - flush_cache_dup_mm(oldmm); -@@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) - mm->locked_vm = 0; - mm->mmap = NULL; - mm->mmap_cache = NULL; -- mm->free_area_cache = oldmm->mmap_base; -- mm->cached_hole_size = ~0UL; -+ mm->free_area_cache = oldmm->free_area_cache; -+ mm->cached_hole_size = oldmm->cached_hole_size; - mm->map_count = 0; - cpumask_clear(mm_cpumask(mm)); - mm->mm_rb = RB_ROOT; -@@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) - - prev = NULL; - for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { -- struct file *file; -- - if (mpnt->vm_flags & VM_DONTCOPY) { - long pages = vma_pages(mpnt); - mm->total_vm -= pages; -@@ -353,53 +415,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) - -pages); - continue; - } -- charge = 0; -- if (mpnt->vm_flags & VM_ACCOUNT) { -- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; -- if (security_vm_enough_memory(len)) -- goto fail_nomem; -- charge = len; -+ tmp = dup_vma(mm, mpnt); -+ if (!tmp) { -+ retval = -ENOMEM; -+ goto out; - } -- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); -- if (!tmp) -- goto fail_nomem; -- *tmp = *mpnt; -- INIT_LIST_HEAD(&tmp->anon_vma_chain); -- pol = mpol_dup(vma_policy(mpnt)); -- retval = PTR_ERR(pol); -- if (IS_ERR(pol)) -- goto fail_nomem_policy; -- vma_set_policy(tmp, pol); -- tmp->vm_mm = mm; -- if (anon_vma_fork(tmp, mpnt)) -- goto fail_nomem_anon_vma_fork; -- tmp->vm_flags &= ~VM_LOCKED; -- tmp->vm_next = tmp->vm_prev = NULL; -- file = tmp->vm_file; -- if (file) { -- struct inode *inode = file->f_path.dentry->d_inode; -- struct address_space *mapping = file->f_mapping; -- -- get_file(file); -- if (tmp->vm_flags & VM_DENYWRITE) -- atomic_dec(&inode->i_writecount); -- mutex_lock(&mapping->i_mmap_mutex); -- if (tmp->vm_flags & VM_SHARED) -- mapping->i_mmap_writable++; -- flush_dcache_mmap_lock(mapping); -- /* insert tmp into the share list, just after mpnt */ -- vma_prio_tree_add(tmp, mpnt); -- flush_dcache_mmap_unlock(mapping); -- mutex_unlock(&mapping->i_mmap_mutex); -- } -- -- /* -- * Clear hugetlb-related page reserves for children. This only -- * affects MAP_PRIVATE mappings. Faults generated by the child -- * are not guaranteed to succeed, even if read-only -- */ -- if (is_vm_hugetlb_page(tmp)) -- reset_vma_resv_huge_pages(tmp); - - /* - * Link in the new vma and copy the page table entries. -@@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) - if (retval) - goto out; - } -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) { -+ struct vm_area_struct *mpnt_m; -+ -+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) { -+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm); -+ -+ if (!mpnt->vm_mirror) -+ continue; -+ -+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) { -+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt); -+ mpnt->vm_mirror = mpnt_m; -+ } else { -+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm); -+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror; -+ mpnt_m->vm_mirror->vm_mirror = mpnt_m; -+ mpnt->vm_mirror->vm_mirror = mpnt; -+ } -+ } -+ BUG_ON(mpnt_m); -+ } -+#endif -+ - /* a new mm has just been created */ - arch_dup_mmap(oldmm, mm); - retval = 0; -@@ -430,14 +475,6 @@ out: - flush_tlb_mm(oldmm); - up_write(&oldmm->mmap_sem); - return retval; --fail_nomem_anon_vma_fork: -- mpol_put(pol); --fail_nomem_policy: -- kmem_cache_free(vm_area_cachep, tmp); --fail_nomem: -- retval = -ENOMEM; -- vm_unacct_memory(charge); -- goto out; - } - - static inline int mm_alloc_pgd(struct mm_struct *mm) -@@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) - spin_unlock(&fs->lock); - return -EAGAIN; - } -- fs->users++; -+ atomic_inc(&fs->users); - spin_unlock(&fs->lock); - return 0; - } - tsk->fs = copy_fs_struct(fs); - if (!tsk->fs) - return -ENOMEM; -+ gr_set_chroot_entries(tsk, &tsk->fs->root); - return 0; - } - -@@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, - DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); - #endif - retval = -EAGAIN; -+ -+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0); -+ - if (atomic_read(&p->real_cred->user->processes) >= - task_rlimit(p, RLIMIT_NPROC)) { - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && -@@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, - if (clone_flags & CLONE_THREAD) - p->tgid = current->tgid; - -+ gr_copy_label(p); -+ - p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; - /* - * Clear TID on mm_release()? -@@ -1428,6 +1471,8 @@ bad_fork_cleanup_count: - bad_fork_free: - free_task(p); - fork_out: -+ gr_log_forkfail(retval); -+ - return ERR_PTR(retval); - } - -@@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags, - if (clone_flags & CLONE_PARENT_SETTID) - put_user(nr, parent_tidptr); - -+ gr_handle_brute_check(); -+ - if (clone_flags & CLONE_VFORK) { - p->vfork_done = &vfork; - init_completion(&vfork); -@@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) - return 0; - - /* don't need lock here; in the worst case we'll do useless copy */ -- if (fs->users == 1) -+ if (atomic_read(&fs->users) == 1) - return 0; - - *new_fsp = copy_fs_struct(fs); -@@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) - fs = current->fs; - spin_lock(&fs->lock); - current->fs = new_fs; -- if (--fs->users) -+ gr_set_chroot_entries(current, ¤t->fs->root); -+ if (atomic_dec_return(&fs->users)) - new_fs = NULL; - else - new_fs = fs; -diff --git a/kernel/futex.c b/kernel/futex.c -index 11cbe05..9ff191b 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -54,6 +54,7 @@ - #include <linux/mount.h> - #include <linux/pagemap.h> - #include <linux/syscalls.h> -+#include <linux/ptrace.h> - #include <linux/signal.h> - #include <linux/module.h> - #include <linux/magic.h> -@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) - struct page *page, *page_head; - int err, ro = 0; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE) -+ return -EFAULT; -+#endif -+ - /* - * The futex address must be "naturally" aligned. - */ -@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, - struct futex_q q = futex_q_init; - int ret; - -+ pax_track_stack(); -+ - if (!bitset) - return -EINVAL; - q.bitset = bitset; -@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - struct futex_q q = futex_q_init; - int res, ret; - -+ pax_track_stack(); -+ - if (!bitset) - return -EINVAL; - -@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, - { - struct robust_list_head __user *head; - unsigned long ret; -+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP - const struct cred *cred = current_cred(), *pcred; -+#endif - - if (!futex_cmpxchg_enabled) - return -ENOSYS; -@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, - if (!p) - goto err_unlock; - ret = -EPERM; -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ if (!ptrace_may_access(p, PTRACE_MODE_READ)) -+ goto err_unlock; -+#else - pcred = __task_cred(p); - /* If victim is in different user_ns, then uids are not - comparable, so we must have CAP_SYS_PTRACE */ -@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, - !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE)) - goto err_unlock; - ok: -+#endif - head = p->robust_list; - rcu_read_unlock(); - } -@@ -2712,6 +2729,7 @@ static int __init futex_init(void) - { - u32 curval; - int i; -+ mm_segment_t oldfs; - - /* - * This will fail and we want it. Some arch implementations do -@@ -2723,8 +2741,11 @@ static int __init futex_init(void) - * implementation, the non-functional ones will return - * -ENOSYS. - */ -+ oldfs = get_fs(); -+ set_fs(USER_DS); - if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) - futex_cmpxchg_enabled = 1; -+ set_fs(oldfs); - - for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { - plist_head_init(&futex_queues[i].chain); -diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c -index 5f9e689..03afa21 100644 ---- a/kernel/futex_compat.c -+++ b/kernel/futex_compat.c -@@ -10,6 +10,7 @@ - #include <linux/compat.h> - #include <linux/nsproxy.h> - #include <linux/futex.h> -+#include <linux/ptrace.h> - - #include <asm/uaccess.h> - -@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, - { - struct compat_robust_list_head __user *head; - unsigned long ret; -- const struct cred *cred = current_cred(), *pcred; -+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP -+ const struct cred *cred = current_cred(); -+ const struct cred *pcred; -+#endif - - if (!futex_cmpxchg_enabled) - return -ENOSYS; -@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, - if (!p) - goto err_unlock; - ret = -EPERM; -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ if (!ptrace_may_access(p, PTRACE_MODE_READ)) -+ goto err_unlock; -+#else - pcred = __task_cred(p); - /* If victim is in different user_ns, then uids are not - comparable, so we must have CAP_SYS_PTRACE */ -@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, - !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE)) - goto err_unlock; - ok: -+#endif - head = p->compat_robust_list; - rcu_read_unlock(); - } -diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c -index 9b22d03..6295b62 100644 ---- a/kernel/gcov/base.c -+++ b/kernel/gcov/base.c -@@ -102,11 +102,6 @@ void gcov_enable_events(void) - } - - #ifdef CONFIG_MODULES --static inline int within(void *addr, void *start, unsigned long size) --{ -- return ((addr >= start) && (addr < start + size)); --} -- - /* Update list and generate events when modules are unloaded. */ - static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, - void *data) -@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, - prev = NULL; - /* Remove entries located in module from linked list. */ - for (info = gcov_info_head; info; info = info->next) { -- if (within(info, mod->module_core, mod->core_size)) { -+ if (within_module_core_rw((unsigned long)info, mod)) { - if (prev) - prev->next = info->next; - else -diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c -index 2043c08..ec81a69 100644 ---- a/kernel/hrtimer.c -+++ b/kernel/hrtimer.c -@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void) - local_irq_restore(flags); - } - --static void run_hrtimer_softirq(struct softirq_action *h) -+static void run_hrtimer_softirq(void) - { - hrtimer_peek_ahead_timers(); - } -diff --git a/kernel/jump_label.c b/kernel/jump_label.c -index e6f1f24..6c19597 100644 ---- a/kernel/jump_label.c -+++ b/kernel/jump_label.c -@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) - - size = (((unsigned long)stop - (unsigned long)start) - / sizeof(struct jump_entry)); -+ pax_open_kernel(); - sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); -+ pax_close_kernel(); - } - - static void jump_label_update(struct jump_label_key *key, int enable); -@@ -298,10 +300,12 @@ static void jump_label_invalidate_module_init(struct module *mod) - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; - struct jump_entry *iter; - -+ pax_open_kernel(); - for (iter = iter_start; iter < iter_stop; iter++) { - if (within_module_init(iter->code, mod)) - iter->code = 0; - } -+ pax_close_kernel(); - } - - static int -diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c -index 079f1d3..a407562 100644 ---- a/kernel/kallsyms.c -+++ b/kernel/kallsyms.c -@@ -11,6 +11,9 @@ - * Changed the compression method from stem compression to "table lookup" - * compression (see scripts/kallsyms.c for a more complete description) - */ -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+#define __INCLUDED_BY_HIDESYM 1 -+#endif - #include <linux/kallsyms.h> - #include <linux/module.h> - #include <linux/init.h> -@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak)); - - static inline int is_kernel_inittext(unsigned long addr) - { -+ if (system_state != SYSTEM_BOOTING) -+ return 0; -+ - if (addr >= (unsigned long)_sinittext - && addr <= (unsigned long)_einittext) - return 1; - return 0; - } - -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+#ifdef CONFIG_MODULES -+static inline int is_module_text(unsigned long addr) -+{ -+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END) -+ return 1; -+ -+ addr = ktla_ktva(addr); -+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END; -+} -+#else -+static inline int is_module_text(unsigned long addr) -+{ -+ return 0; -+} -+#endif -+#endif -+ - static inline int is_kernel_text(unsigned long addr) - { - if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || -@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr) - - static inline int is_kernel(unsigned long addr) - { -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ if (is_kernel_text(addr) || is_kernel_inittext(addr)) -+ return 1; -+ -+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end) -+#else - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) -+#endif -+ - return 1; - return in_gate_area_no_mm(addr); - } - - static int is_ksym_addr(unsigned long addr) - { -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ if (is_module_text(addr)) -+ return 0; -+#endif -+ - if (all_var) - return is_kernel(addr); - -@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter) - - static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) - { -- iter->name[0] = '\0'; - iter->nameoff = get_symbol_offset(new_pos); - iter->pos = new_pos; - } -@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p) - { - struct kallsym_iter *iter = m->private; - -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ if (current_uid()) -+ return 0; -+#endif -+ - /* Some debugging symbols have no name. Ignore them. */ - if (!iter->name[0]) - return 0; -@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file) - struct kallsym_iter *iter; - int ret; - -- iter = kmalloc(sizeof(*iter), GFP_KERNEL); -+ iter = kzalloc(sizeof(*iter), GFP_KERNEL); - if (!iter) - return -ENOMEM; - reset_iter(iter, 0); -diff --git a/kernel/kexec.c b/kernel/kexec.c -index 296fbc8..84cb857 100644 ---- a/kernel/kexec.c -+++ b/kernel/kexec.c -@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, - unsigned long flags) - { - struct compat_kexec_segment in; -- struct kexec_segment out, __user *ksegments; -+ struct kexec_segment out; -+ struct kexec_segment __user *ksegments; - unsigned long i, result; - - /* Don't allow clients that don't understand the native -diff --git a/kernel/kmod.c b/kernel/kmod.c -index a4bea97..7a1ae9a 100644 ---- a/kernel/kmod.c -+++ b/kernel/kmod.c -@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; - * If module auto-loading support is disabled then this function - * becomes a no-operation. - */ --int __request_module(bool wait, const char *fmt, ...) -+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap) - { -- va_list args; - char module_name[MODULE_NAME_LEN]; - unsigned int max_modprobes; - int ret; -- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL }; -+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL }; - static char *envp[] = { "HOME=/", - "TERM=linux", - "PATH=/sbin:/usr/sbin:/bin:/usr/bin", -@@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...) - #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ - static int kmod_loop_msg; - -- va_start(args, fmt); -- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); -- va_end(args); -+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap); - if (ret >= MODULE_NAME_LEN) - return -ENAMETOOLONG; - -@@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...) - if (ret) - return ret; - -+#ifdef CONFIG_GRKERNSEC_MODHARDEN -+ if (!current_uid()) { -+ /* hack to workaround consolekit/udisks stupidity */ -+ read_lock(&tasklist_lock); -+ if (!strcmp(current->comm, "mount") && -+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) { -+ read_unlock(&tasklist_lock); -+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name); -+ return -EPERM; -+ } -+ read_unlock(&tasklist_lock); -+ } -+#endif -+ - /* If modprobe needs a service that is in a module, we get a recursive - * loop. Limit the number of running kmod threads to max_threads/2 or - * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method -@@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...) - atomic_dec(&kmod_concurrent); - return ret; - } -+ -+int ___request_module(bool wait, char *module_param, const char *fmt, ...) -+{ -+ va_list args; -+ int ret; -+ -+ va_start(args, fmt); -+ ret = ____request_module(wait, module_param, fmt, args); -+ va_end(args); -+ -+ return ret; -+} -+ -+int __request_module(bool wait, const char *fmt, ...) -+{ -+ va_list args; -+ int ret; -+ -+#ifdef CONFIG_GRKERNSEC_MODHARDEN -+ if (current_uid()) { -+ char module_param[MODULE_NAME_LEN]; -+ -+ memset(module_param, 0, sizeof(module_param)); -+ -+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid()); -+ -+ va_start(args, fmt); -+ ret = ____request_module(wait, module_param, fmt, args); -+ va_end(args); -+ -+ return ret; -+ } -+#endif -+ -+ va_start(args, fmt); -+ ret = ____request_module(wait, NULL, fmt, args); -+ va_end(args); -+ -+ return ret; -+} -+ - EXPORT_SYMBOL(__request_module); - #endif /* CONFIG_MODULES */ - -@@ -222,7 +274,7 @@ static int wait_for_helper(void *data) - * - * Thus the __user pointer cast is valid here. - */ -- sys_wait4(pid, (int __user *)&ret, 0, NULL); -+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL); - - /* - * If ret is 0, either ____call_usermodehelper failed and the -diff --git a/kernel/kprobes.c b/kernel/kprobes.c -index b30fd54..11821ec 100644 ---- a/kernel/kprobes.c -+++ b/kernel/kprobes.c -@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) - * kernel image and loaded module images reside. This is required - * so x86_64 can correctly handle the %rip-relative fixups. - */ -- kip->insns = module_alloc(PAGE_SIZE); -+ kip->insns = module_alloc_exec(PAGE_SIZE); - if (!kip->insns) { - kfree(kip); - return NULL; -@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) - */ - if (!list_is_singular(&kip->list)) { - list_del(&kip->list); -- module_free(NULL, kip->insns); -+ module_free_exec(NULL, kip->insns); - kfree(kip); - } - return 1; -@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void) - { - int i, err = 0; - unsigned long offset = 0, size = 0; -- char *modname, namebuf[128]; -+ char *modname, namebuf[KSYM_NAME_LEN]; - const char *symbol_name; - void *addr; - struct kprobe_blackpoint *kb; -@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) - const char *sym = NULL; - unsigned int i = *(loff_t *) v; - unsigned long offset = 0; -- char *modname, namebuf[128]; -+ char *modname, namebuf[KSYM_NAME_LEN]; - - head = &kprobe_table[i]; - preempt_disable(); -diff --git a/kernel/lockdep.c b/kernel/lockdep.c -index 91d67ce..ac259df 100644 ---- a/kernel/lockdep.c -+++ b/kernel/lockdep.c -@@ -583,6 +583,10 @@ static int static_obj(void *obj) - end = (unsigned long) &_end, - addr = (unsigned long) obj; - -+#ifdef CONFIG_PAX_KERNEXEC -+ start = ktla_ktva(start); -+#endif -+ - /* - * static variable? - */ -@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) - if (!static_obj(lock->key)) { - debug_locks_off(); - printk("INFO: trying to register non-static key.\n"); -+ printk("lock:%pS key:%pS.\n", lock, lock->key); - printk("the code is fine but needs lockdep annotation.\n"); - printk("turning off the locking correctness validator.\n"); - dump_stack(); -@@ -2948,7 +2953,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, - if (!class) - return 0; - } -- atomic_inc((atomic_t *)&class->ops); -+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops); - if (very_verbose(class)) { - printk("\nacquire class [%p] %s", class->key, class->name); - if (class->name_version > 1) -diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c -index 71edd2f..e0542a5 100644 ---- a/kernel/lockdep_proc.c -+++ b/kernel/lockdep_proc.c -@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v) - - static void print_name(struct seq_file *m, struct lock_class *class) - { -- char str[128]; -+ char str[KSYM_NAME_LEN]; - const char *name = class->name; - - if (!name) { -diff --git a/kernel/module.c b/kernel/module.c -index 04379f92..fba2faf 100644 ---- a/kernel/module.c -+++ b/kernel/module.c -@@ -58,6 +58,7 @@ - #include <linux/jump_label.h> - #include <linux/pfn.h> - #include <linux/bsearch.h> -+#include <linux/grsecurity.h> - - #define CREATE_TRACE_POINTS - #include <trace/events/module.h> -@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list); - - /* Bounds of module allocation, for speeding __module_address. - * Protected by module_mutex. */ --static unsigned long module_addr_min = -1UL, module_addr_max = 0; -+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0; -+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0; - - int register_module_notifier(struct notifier_block * nb) - { -@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, - return true; - - list_for_each_entry_rcu(mod, &modules, list) { -- struct symsearch arr[] = { -+ struct symsearch modarr[] = { - { mod->syms, mod->syms + mod->num_syms, mod->crcs, - NOT_GPL_ONLY, false }, - { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, -@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, - #endif - }; - -- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) -+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data)) - return true; - } - return false; -@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod) - static int percpu_modalloc(struct module *mod, - unsigned long size, unsigned long align) - { -- if (align > PAGE_SIZE) { -+ if (align-1 >= PAGE_SIZE) { - printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", - mod->name, align, PAGE_SIZE); - align = PAGE_SIZE; -@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod, - */ - #ifdef CONFIG_SYSFS - --#ifdef CONFIG_KALLSYMS -+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) - static inline bool sect_empty(const Elf_Shdr *sect) - { - return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; -@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base, - - static void unset_module_core_ro_nx(struct module *mod) - { -- set_page_attributes(mod->module_core + mod->core_text_size, -- mod->module_core + mod->core_size, -+ set_page_attributes(mod->module_core_rw, -+ mod->module_core_rw + mod->core_size_rw, - set_memory_x); -- set_page_attributes(mod->module_core, -- mod->module_core + mod->core_ro_size, -+ set_page_attributes(mod->module_core_rx, -+ mod->module_core_rx + mod->core_size_rx, - set_memory_rw); - } - - static void unset_module_init_ro_nx(struct module *mod) - { -- set_page_attributes(mod->module_init + mod->init_text_size, -- mod->module_init + mod->init_size, -+ set_page_attributes(mod->module_init_rw, -+ mod->module_init_rw + mod->init_size_rw, - set_memory_x); -- set_page_attributes(mod->module_init, -- mod->module_init + mod->init_ro_size, -+ set_page_attributes(mod->module_init_rx, -+ mod->module_init_rx + mod->init_size_rx, - set_memory_rw); - } - -@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void) - - mutex_lock(&module_mutex); - list_for_each_entry_rcu(mod, &modules, list) { -- if ((mod->module_core) && (mod->core_text_size)) { -- set_page_attributes(mod->module_core, -- mod->module_core + mod->core_text_size, -+ if ((mod->module_core_rx) && (mod->core_size_rx)) { -+ set_page_attributes(mod->module_core_rx, -+ mod->module_core_rx + mod->core_size_rx, - set_memory_rw); - } -- if ((mod->module_init) && (mod->init_text_size)) { -- set_page_attributes(mod->module_init, -- mod->module_init + mod->init_text_size, -+ if ((mod->module_init_rx) && (mod->init_size_rx)) { -+ set_page_attributes(mod->module_init_rx, -+ mod->module_init_rx + mod->init_size_rx, - set_memory_rw); - } - } -@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void) - - mutex_lock(&module_mutex); - list_for_each_entry_rcu(mod, &modules, list) { -- if ((mod->module_core) && (mod->core_text_size)) { -- set_page_attributes(mod->module_core, -- mod->module_core + mod->core_text_size, -+ if ((mod->module_core_rx) && (mod->core_size_rx)) { -+ set_page_attributes(mod->module_core_rx, -+ mod->module_core_rx + mod->core_size_rx, - set_memory_ro); - } -- if ((mod->module_init) && (mod->init_text_size)) { -- set_page_attributes(mod->module_init, -- mod->module_init + mod->init_text_size, -+ if ((mod->module_init_rx) && (mod->init_size_rx)) { -+ set_page_attributes(mod->module_init_rx, -+ mod->module_init_rx + mod->init_size_rx, - set_memory_ro); - } - } -@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod) - - /* This may be NULL, but that's OK */ - unset_module_init_ro_nx(mod); -- module_free(mod, mod->module_init); -+ module_free(mod, mod->module_init_rw); -+ module_free_exec(mod, mod->module_init_rx); - kfree(mod->args); - percpu_modfree(mod); - - /* Free lock-classes: */ -- lockdep_free_key_range(mod->module_core, mod->core_size); -+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx); -+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw); - - /* Finally, free the core (containing the module structure) */ - unset_module_core_ro_nx(mod); -- module_free(mod, mod->module_core); -+ module_free_exec(mod, mod->module_core_rx); -+ module_free(mod, mod->module_core_rw); - - #ifdef CONFIG_MPU - update_protections(current->mm); -@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) - unsigned int i; - int ret = 0; - const struct kernel_symbol *ksym; -+#ifdef CONFIG_GRKERNSEC_MODHARDEN -+ int is_fs_load = 0; -+ int register_filesystem_found = 0; -+ char *p; -+ -+ p = strstr(mod->args, "grsec_modharden_fs"); -+ if (p) { -+ char *endptr = p + strlen("grsec_modharden_fs"); -+ /* copy \0 as well */ -+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1); -+ is_fs_load = 1; -+ } -+#endif - - for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { - const char *name = info->strtab + sym[i].st_name; - -+#ifdef CONFIG_GRKERNSEC_MODHARDEN -+ /* it's a real shame this will never get ripped and copied -+ upstream! ;( -+ */ -+ if (is_fs_load && !strcmp(name, "register_filesystem")) -+ register_filesystem_found = 1; -+#endif -+ - switch (sym[i].st_shndx) { - case SHN_COMMON: - /* We compiled with -fno-common. These are not -@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) - ksym = resolve_symbol_wait(mod, info, name); - /* Ok if resolved. */ - if (ksym && !IS_ERR(ksym)) { -+ pax_open_kernel(); - sym[i].st_value = ksym->value; -+ pax_close_kernel(); - break; - } - -@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) - secbase = (unsigned long)mod_percpu(mod); - else - secbase = info->sechdrs[sym[i].st_shndx].sh_addr; -+ pax_open_kernel(); - sym[i].st_value += secbase; -+ pax_close_kernel(); - break; - } - } - -+#ifdef CONFIG_GRKERNSEC_MODHARDEN -+ if (is_fs_load && !register_filesystem_found) { -+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name); -+ ret = -EPERM; -+ } -+#endif -+ - return ret; - } - -@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info) - || s->sh_entsize != ~0UL - || strstarts(sname, ".init")) - continue; -- s->sh_entsize = get_offset(mod, &mod->core_size, s, i); -+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) -+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i); -+ else -+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i); - DEBUGP("\t%s\n", name); - } -- switch (m) { -- case 0: /* executable */ -- mod->core_size = debug_align(mod->core_size); -- mod->core_text_size = mod->core_size; -- break; -- case 1: /* RO: text and ro-data */ -- mod->core_size = debug_align(mod->core_size); -- mod->core_ro_size = mod->core_size; -- break; -- case 3: /* whole core */ -- mod->core_size = debug_align(mod->core_size); -- break; -- } - } - - DEBUGP("Init section allocation order:\n"); -@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info) - || s->sh_entsize != ~0UL - || !strstarts(sname, ".init")) - continue; -- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) -- | INIT_OFFSET_MASK); -+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) -+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i); -+ else -+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i); -+ s->sh_entsize |= INIT_OFFSET_MASK; - DEBUGP("\t%s\n", sname); - } -- switch (m) { -- case 0: /* executable */ -- mod->init_size = debug_align(mod->init_size); -- mod->init_text_size = mod->init_size; -- break; -- case 1: /* RO: text and ro-data */ -- mod->init_size = debug_align(mod->init_size); -- mod->init_ro_size = mod->init_size; -- break; -- case 3: /* whole init */ -- mod->init_size = debug_align(mod->init_size); -- break; -- } - } - } - -@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info) - - /* Put symbol section at end of init part of module. */ - symsect->sh_flags |= SHF_ALLOC; -- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, -+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect, - info->index.sym) | INIT_OFFSET_MASK; - DEBUGP("\t%s\n", info->secstrings + symsect->sh_name); - -@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info) - } - - /* Append room for core symbols at end of core part. */ -- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); -- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); -+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1); -+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym); - - /* Put string table section at end of init part of module. */ - strsect->sh_flags |= SHF_ALLOC; -- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, -+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect, - info->index.str) | INIT_OFFSET_MASK; - DEBUGP("\t%s\n", info->secstrings + strsect->sh_name); - - /* Append room for core symbols' strings at end of core part. */ -- info->stroffs = mod->core_size; -+ info->stroffs = mod->core_size_rx; - __set_bit(0, info->strmap); -- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size); -+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size); - } - - static void add_kallsyms(struct module *mod, const struct load_info *info) -@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) - /* Make sure we get permanent strtab: don't use info->strtab. */ - mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; - -+ pax_open_kernel(); -+ - /* Set types up while we still have access to sections. */ - for (i = 0; i < mod->num_symtab; i++) - mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); - -- mod->core_symtab = dst = mod->module_core + info->symoffs; -+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs; - src = mod->symtab; - *dst = *src; - for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { -@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) - } - mod->core_num_syms = ndst; - -- mod->core_strtab = s = mod->module_core + info->stroffs; -+ mod->core_strtab = s = mod->module_core_rx + info->stroffs; - for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i) - if (test_bit(i, info->strmap)) - *++s = mod->strtab[i]; -+ -+ pax_close_kernel(); - } - #else - static inline void layout_symtab(struct module *mod, struct load_info *info) -@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size) - return size == 0 ? NULL : vmalloc_exec(size); - } - --static void *module_alloc_update_bounds(unsigned long size) -+static void *module_alloc_update_bounds_rw(unsigned long size) - { - void *ret = module_alloc(size); - - if (ret) { - mutex_lock(&module_mutex); - /* Update module bounds. */ -- if ((unsigned long)ret < module_addr_min) -- module_addr_min = (unsigned long)ret; -- if ((unsigned long)ret + size > module_addr_max) -- module_addr_max = (unsigned long)ret + size; -+ if ((unsigned long)ret < module_addr_min_rw) -+ module_addr_min_rw = (unsigned long)ret; -+ if ((unsigned long)ret + size > module_addr_max_rw) -+ module_addr_max_rw = (unsigned long)ret + size; -+ mutex_unlock(&module_mutex); -+ } -+ return ret; -+} -+ -+static void *module_alloc_update_bounds_rx(unsigned long size) -+{ -+ void *ret = module_alloc_exec(size); -+ -+ if (ret) { -+ mutex_lock(&module_mutex); -+ /* Update module bounds. */ -+ if ((unsigned long)ret < module_addr_min_rx) -+ module_addr_min_rx = (unsigned long)ret; -+ if ((unsigned long)ret + size > module_addr_max_rx) -+ module_addr_max_rx = (unsigned long)ret + size; - mutex_unlock(&module_mutex); - } - return ret; -@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info) - static int check_modinfo(struct module *mod, struct load_info *info) - { - const char *modmagic = get_modinfo(info, "vermagic"); -+ const char *license = get_modinfo(info, "license"); - int err; - -+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR -+ if (!license || !license_is_gpl_compatible(license)) -+ return -ENOEXEC; -+#endif -+ - /* This is allowed: modprobe --force will invalidate it. */ - if (!modmagic) { - err = try_to_force_load(mod, "bad vermagic"); -@@ -2495,7 +2538,7 @@ static int check_modinfo(struct module *mod, struct load_info *info) - } - - /* Set up license info based on the info section */ -- set_license(mod, get_modinfo(info, "license")); -+ set_license(mod, license); - - return 0; - } -@@ -2589,7 +2632,7 @@ static int move_module(struct module *mod, struct load_info *info) - void *ptr; - - /* Do the allocs. */ -- ptr = module_alloc_update_bounds(mod->core_size); -+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw); - /* - * The pointer to this block is stored in the module structure - * which is inside the block. Just mark it as not being a -@@ -2599,23 +2642,50 @@ static int move_module(struct module *mod, struct load_info *info) - if (!ptr) - return -ENOMEM; - -- memset(ptr, 0, mod->core_size); -- mod->module_core = ptr; -+ memset(ptr, 0, mod->core_size_rw); -+ mod->module_core_rw = ptr; - -- ptr = module_alloc_update_bounds(mod->init_size); -+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw); - /* - * The pointer to this block is stored in the module structure - * which is inside the block. This block doesn't need to be - * scanned as it contains data and code that will be freed - * after the module is initialized. - */ -- kmemleak_ignore(ptr); -- if (!ptr && mod->init_size) { -- module_free(mod, mod->module_core); -+ kmemleak_not_leak(ptr); -+ if (!ptr && mod->init_size_rw) { -+ module_free(mod, mod->module_core_rw); - return -ENOMEM; - } -- memset(ptr, 0, mod->init_size); -- mod->module_init = ptr; -+ memset(ptr, 0, mod->init_size_rw); -+ mod->module_init_rw = ptr; -+ -+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx); -+ kmemleak_not_leak(ptr); -+ if (!ptr) { -+ module_free(mod, mod->module_init_rw); -+ module_free(mod, mod->module_core_rw); -+ return -ENOMEM; -+ } -+ -+ pax_open_kernel(); -+ memset(ptr, 0, mod->core_size_rx); -+ pax_close_kernel(); -+ mod->module_core_rx = ptr; -+ -+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx); -+ kmemleak_not_leak(ptr); -+ if (!ptr && mod->init_size_rx) { -+ module_free_exec(mod, mod->module_core_rx); -+ module_free(mod, mod->module_init_rw); -+ module_free(mod, mod->module_core_rw); -+ return -ENOMEM; -+ } -+ -+ pax_open_kernel(); -+ memset(ptr, 0, mod->init_size_rx); -+ pax_close_kernel(); -+ mod->module_init_rx = ptr; - - /* Transfer each section which specifies SHF_ALLOC */ - DEBUGP("final section addresses:\n"); -@@ -2626,16 +2696,45 @@ static int move_module(struct module *mod, struct load_info *info) - if (!(shdr->sh_flags & SHF_ALLOC)) - continue; - -- if (shdr->sh_entsize & INIT_OFFSET_MASK) -- dest = mod->module_init -- + (shdr->sh_entsize & ~INIT_OFFSET_MASK); -- else -- dest = mod->module_core + shdr->sh_entsize; -+ if (shdr->sh_entsize & INIT_OFFSET_MASK) { -+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC)) -+ dest = mod->module_init_rw -+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK); -+ else -+ dest = mod->module_init_rx -+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK); -+ } else { -+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC)) -+ dest = mod->module_core_rw + shdr->sh_entsize; -+ else -+ dest = mod->module_core_rx + shdr->sh_entsize; -+ } -+ -+ if (shdr->sh_type != SHT_NOBITS) { -+ -+#ifdef CONFIG_PAX_KERNEXEC -+#ifdef CONFIG_X86_64 -+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR)) -+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT); -+#endif -+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) { -+ pax_open_kernel(); -+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); -+ pax_close_kernel(); -+ } else -+#endif - -- if (shdr->sh_type != SHT_NOBITS) - memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); -+ } - /* Update sh_addr to point to copy in image. */ -- shdr->sh_addr = (unsigned long)dest; -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ if (shdr->sh_flags & SHF_EXECINSTR) -+ shdr->sh_addr = ktva_ktla((unsigned long)dest); -+ else -+#endif -+ -+ shdr->sh_addr = (unsigned long)dest; - DEBUGP("\t0x%lx %s\n", - shdr->sh_addr, info->secstrings + shdr->sh_name); - } -@@ -2686,12 +2785,12 @@ static void flush_module_icache(const struct module *mod) - * Do it before processing of module parameters, so the module - * can provide parameter accessor functions of its own. - */ -- if (mod->module_init) -- flush_icache_range((unsigned long)mod->module_init, -- (unsigned long)mod->module_init -- + mod->init_size); -- flush_icache_range((unsigned long)mod->module_core, -- (unsigned long)mod->module_core + mod->core_size); -+ if (mod->module_init_rx) -+ flush_icache_range((unsigned long)mod->module_init_rx, -+ (unsigned long)mod->module_init_rx -+ + mod->init_size_rx); -+ flush_icache_range((unsigned long)mod->module_core_rx, -+ (unsigned long)mod->module_core_rx + mod->core_size_rx); - - set_fs(old_fs); - } -@@ -2771,8 +2870,10 @@ static void module_deallocate(struct module *mod, struct load_info *info) - { - kfree(info->strmap); - percpu_modfree(mod); -- module_free(mod, mod->module_init); -- module_free(mod, mod->module_core); -+ module_free_exec(mod, mod->module_init_rx); -+ module_free_exec(mod, mod->module_core_rx); -+ module_free(mod, mod->module_init_rw); -+ module_free(mod, mod->module_core_rw); - } - - int __weak module_finalize(const Elf_Ehdr *hdr, -@@ -2836,9 +2937,38 @@ static struct module *load_module(void __user *umod, - if (err) - goto free_unload; - -+ /* Now copy in args */ -+ mod->args = strndup_user(uargs, ~0UL >> 1); -+ if (IS_ERR(mod->args)) { -+ err = PTR_ERR(mod->args); -+ goto free_unload; -+ } -+ - /* Set up MODINFO_ATTR fields */ - setup_modinfo(mod, &info); - -+#ifdef CONFIG_GRKERNSEC_MODHARDEN -+ { -+ char *p, *p2; -+ -+ if (strstr(mod->args, "grsec_modharden_netdev")) { -+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name); -+ err = -EPERM; -+ goto free_modinfo; -+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) { -+ p += strlen("grsec_modharden_normal"); -+ p2 = strstr(p, "_"); -+ if (p2) { -+ *p2 = '\0'; -+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p); -+ *p2 = '_'; -+ } -+ err = -EPERM; -+ goto free_modinfo; -+ } -+ } -+#endif -+ - /* Fix up syms, so that st_value is a pointer to location. */ - err = simplify_symbols(mod, &info); - if (err < 0) -@@ -2854,13 +2984,6 @@ static struct module *load_module(void __user *umod, - - flush_module_icache(mod); - -- /* Now copy in args */ -- mod->args = strndup_user(uargs, ~0UL >> 1); -- if (IS_ERR(mod->args)) { -- err = PTR_ERR(mod->args); -- goto free_arch_cleanup; -- } -- - /* Mark state as coming so strong_try_module_get() ignores us. */ - mod->state = MODULE_STATE_COMING; - -@@ -2920,11 +3043,10 @@ static struct module *load_module(void __user *umod, - unlock: - mutex_unlock(&module_mutex); - synchronize_sched(); -- kfree(mod->args); -- free_arch_cleanup: - module_arch_cleanup(mod); - free_modinfo: - free_modinfo(mod); -+ kfree(mod->args); - free_unload: - module_unload_free(mod); - free_module: -@@ -2965,16 +3087,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, - MODULE_STATE_COMING, mod); - - /* Set RO and NX regions for core */ -- set_section_ro_nx(mod->module_core, -- mod->core_text_size, -- mod->core_ro_size, -- mod->core_size); -+ set_section_ro_nx(mod->module_core_rx, -+ mod->core_size_rx, -+ mod->core_size_rx, -+ mod->core_size_rx); - - /* Set RO and NX regions for init */ -- set_section_ro_nx(mod->module_init, -- mod->init_text_size, -- mod->init_ro_size, -- mod->init_size); -+ set_section_ro_nx(mod->module_init_rx, -+ mod->init_size_rx, -+ mod->init_size_rx, -+ mod->init_size_rx); - - do_mod_ctors(mod); - /* Start the module */ -@@ -3020,11 +3142,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, - mod->strtab = mod->core_strtab; - #endif - unset_module_init_ro_nx(mod); -- module_free(mod, mod->module_init); -- mod->module_init = NULL; -- mod->init_size = 0; -- mod->init_ro_size = 0; -- mod->init_text_size = 0; -+ module_free(mod, mod->module_init_rw); -+ module_free_exec(mod, mod->module_init_rx); -+ mod->module_init_rw = NULL; -+ mod->module_init_rx = NULL; -+ mod->init_size_rw = 0; -+ mod->init_size_rx = 0; - mutex_unlock(&module_mutex); - - return 0; -@@ -3055,10 +3178,16 @@ static const char *get_ksymbol(struct module *mod, - unsigned long nextval; - - /* At worse, next value is at end of module */ -- if (within_module_init(addr, mod)) -- nextval = (unsigned long)mod->module_init+mod->init_text_size; -+ if (within_module_init_rx(addr, mod)) -+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx; -+ else if (within_module_init_rw(addr, mod)) -+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw; -+ else if (within_module_core_rx(addr, mod)) -+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx; -+ else if (within_module_core_rw(addr, mod)) -+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw; - else -- nextval = (unsigned long)mod->module_core+mod->core_text_size; -+ return NULL; - - /* Scan for closest preceding symbol, and next symbol. (ELF - starts real symbols at 1). */ -@@ -3304,7 +3433,7 @@ static int m_show(struct seq_file *m, void *p) - char buf[8]; - - seq_printf(m, "%s %u", -- mod->name, mod->init_size + mod->core_size); -+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw); - print_unload_info(m, mod); - - /* Informative for users. */ -@@ -3313,7 +3442,7 @@ static int m_show(struct seq_file *m, void *p) - mod->state == MODULE_STATE_COMING ? "Loading": - "Live"); - /* Used by oprofile and other similar tools. */ -- seq_printf(m, " 0x%pK", mod->module_core); -+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw); - - /* Taints info */ - if (mod->taints) -@@ -3349,7 +3478,17 @@ static const struct file_operations proc_modules_operations = { - - static int __init proc_modules_init(void) - { -+#ifndef CONFIG_GRKERNSEC_HIDESYM -+#ifdef CONFIG_GRKERNSEC_PROC_USER -+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations); -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations); -+#else - proc_create("modules", 0, NULL, &proc_modules_operations); -+#endif -+#else -+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations); -+#endif - return 0; - } - module_init(proc_modules_init); -@@ -3408,12 +3547,12 @@ struct module *__module_address(unsigned long addr) - { - struct module *mod; - -- if (addr < module_addr_min || addr > module_addr_max) -+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) && -+ (addr < module_addr_min_rw || addr > module_addr_max_rw)) - return NULL; - - list_for_each_entry_rcu(mod, &modules, list) -- if (within_module_core(addr, mod) -- || within_module_init(addr, mod)) -+ if (within_module_init(addr, mod) || within_module_core(addr, mod)) - return mod; - return NULL; - } -@@ -3447,11 +3586,20 @@ bool is_module_text_address(unsigned long addr) - */ - struct module *__module_text_address(unsigned long addr) - { -- struct module *mod = __module_address(addr); -+ struct module *mod; -+ -+#ifdef CONFIG_X86_32 -+ addr = ktla_ktva(addr); -+#endif -+ -+ if (addr < module_addr_min_rx || addr > module_addr_max_rx) -+ return NULL; -+ -+ mod = __module_address(addr); -+ - if (mod) { - /* Make sure it's within the text section. */ -- if (!within(addr, mod->module_init, mod->init_text_size) -- && !within(addr, mod->module_core, mod->core_text_size)) -+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod)) - mod = NULL; - } - return mod; -diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c -index 73da83a..fe46e99 100644 ---- a/kernel/mutex-debug.c -+++ b/kernel/mutex-debug.c -@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter) - } - - void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, -- struct thread_info *ti) -+ struct task_struct *task) - { - SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); - - /* Mark the current thread as blocked on the lock: */ -- ti->task->blocked_on = waiter; -+ task->blocked_on = waiter; - } - - void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, -- struct thread_info *ti) -+ struct task_struct *task) - { - DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); -- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task); -- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter); -- ti->task->blocked_on = NULL; -+ DEBUG_LOCKS_WARN_ON(waiter->task != task); -+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); -+ task->blocked_on = NULL; - - list_del_init(&waiter->list); - waiter->task = NULL; -diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h -index 0799fd3..d06ae3b 100644 ---- a/kernel/mutex-debug.h -+++ b/kernel/mutex-debug.h -@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock, - extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); - extern void debug_mutex_add_waiter(struct mutex *lock, - struct mutex_waiter *waiter, -- struct thread_info *ti); -+ struct task_struct *task); - extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, -- struct thread_info *ti); -+ struct task_struct *task); - extern void debug_mutex_unlock(struct mutex *lock); - extern void debug_mutex_init(struct mutex *lock, const char *name, - struct lock_class_key *key); -diff --git a/kernel/mutex.c b/kernel/mutex.c -index d607ed5..58d0a52 100644 ---- a/kernel/mutex.c -+++ b/kernel/mutex.c -@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, - spin_lock_mutex(&lock->wait_lock, flags); - - debug_mutex_lock_common(lock, &waiter); -- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); -+ debug_mutex_add_waiter(lock, &waiter, task); - - /* add waiting tasks to the end of the waitqueue (FIFO): */ - list_add_tail(&waiter.list, &lock->wait_list); -@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, - * TASK_UNINTERRUPTIBLE case.) - */ - if (unlikely(signal_pending_state(state, task))) { -- mutex_remove_waiter(lock, &waiter, -- task_thread_info(task)); -+ mutex_remove_waiter(lock, &waiter, task); - mutex_release(&lock->dep_map, 1, ip); - spin_unlock_mutex(&lock->wait_lock, flags); - -@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, - done: - lock_acquired(&lock->dep_map, ip); - /* got the lock - rejoice! */ -- mutex_remove_waiter(lock, &waiter, current_thread_info()); -+ mutex_remove_waiter(lock, &waiter, task); - mutex_set_owner(lock); - - /* set it to 0 if there are no waiters left: */ -diff --git a/kernel/padata.c b/kernel/padata.c -index b91941d..0871d60 100644 ---- a/kernel/padata.c -+++ b/kernel/padata.c -@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst, - padata->pd = pd; - padata->cb_cpu = cb_cpu; - -- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr)) -- atomic_set(&pd->seq_nr, -1); -+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr)) -+ atomic_set_unchecked(&pd->seq_nr, -1); - -- padata->seq_nr = atomic_inc_return(&pd->seq_nr); -+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr); - - target_cpu = padata_cpu_hash(padata); - queue = per_cpu_ptr(pd->pqueue, target_cpu); -@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, - padata_init_pqueues(pd); - padata_init_squeues(pd); - setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); -- atomic_set(&pd->seq_nr, -1); -+ atomic_set_unchecked(&pd->seq_nr, -1); - atomic_set(&pd->reorder_objects, 0); - atomic_set(&pd->refcnt, 0); - pd->pinst = pinst; -diff --git a/kernel/panic.c b/kernel/panic.c -index d7bb697..9ef9f19 100644 ---- a/kernel/panic.c -+++ b/kernel/panic.c -@@ -371,7 +371,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, - const char *board; - - printk(KERN_WARNING "------------[ cut here ]------------\n"); -- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller); -+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller); - board = dmi_get_system_info(DMI_PRODUCT_NAME); - if (board) - printk(KERN_WARNING "Hardware name: %s\n", board); -@@ -426,7 +426,8 @@ EXPORT_SYMBOL(warn_slowpath_null); - */ - void __stack_chk_fail(void) - { -- panic("stack-protector: Kernel stack is corrupted in: %p\n", -+ dump_stack(); -+ panic("stack-protector: Kernel stack is corrupted in: %pA\n", - __builtin_return_address(0)); - } - EXPORT_SYMBOL(__stack_chk_fail); -diff --git a/kernel/pid.c b/kernel/pid.c -index e432057..a2b2ac5 100644 ---- a/kernel/pid.c -+++ b/kernel/pid.c -@@ -33,6 +33,7 @@ - #include <linux/rculist.h> - #include <linux/bootmem.h> - #include <linux/hash.h> -+#include <linux/security.h> - #include <linux/pid_namespace.h> - #include <linux/init_task.h> - #include <linux/syscalls.h> -@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID; - - int pid_max = PID_MAX_DEFAULT; - --#define RESERVED_PIDS 300 -+#define RESERVED_PIDS 500 - - int pid_max_min = RESERVED_PIDS + 1; - int pid_max_max = PID_MAX_LIMIT; -@@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task); - */ - struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) - { -+ struct task_struct *task; -+ - rcu_lockdep_assert(rcu_read_lock_held()); -- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); -+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); -+ -+ if (gr_pid_is_chrooted(task)) -+ return NULL; -+ -+ return task; - } - - struct task_struct *find_task_by_vpid(pid_t vnr) -@@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pid_t vnr) - return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns); - } - -+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr) -+{ -+ rcu_lockdep_assert(rcu_read_lock_held()); -+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID); -+} -+ - struct pid *get_task_pid(struct task_struct *task, enum pid_type type) - { - struct pid *pid; -diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c -index 640ded8..3dafb85 100644 ---- a/kernel/posix-cpu-timers.c -+++ b/kernel/posix-cpu-timers.c -@@ -6,6 +6,7 @@ - #include <linux/posix-timers.h> - #include <linux/errno.h> - #include <linux/math64.h> -+#include <linux/security.h> - #include <asm/uaccess.h> - #include <linux/kernel_stat.h> - #include <trace/events/timer.h> -@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = { - - static __init int init_posix_cpu_timers(void) - { -- struct k_clock process = { -+ static struct k_clock process = { - .clock_getres = process_cpu_clock_getres, - .clock_get = process_cpu_clock_get, - .timer_create = process_cpu_timer_create, - .nsleep = process_cpu_nsleep, - .nsleep_restart = process_cpu_nsleep_restart, - }; -- struct k_clock thread = { -+ static struct k_clock thread = { - .clock_getres = thread_cpu_clock_getres, - .clock_get = thread_cpu_clock_get, - .timer_create = thread_cpu_timer_create, -diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c -index 4556182..9335419 100644 ---- a/kernel/posix-timers.c -+++ b/kernel/posix-timers.c -@@ -43,6 +43,7 @@ - #include <linux/idr.h> - #include <linux/posix-clock.h> - #include <linux/posix-timers.h> -+#include <linux/grsecurity.h> - #include <linux/syscalls.h> - #include <linux/wait.h> - #include <linux/workqueue.h> -@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock); - * which we beg off on and pass to do_sys_settimeofday(). - */ - --static struct k_clock posix_clocks[MAX_CLOCKS]; -+static struct k_clock *posix_clocks[MAX_CLOCKS]; - - /* - * These ones are defined below. -@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp) - */ - static __init int init_posix_timers(void) - { -- struct k_clock clock_realtime = { -+ static struct k_clock clock_realtime = { - .clock_getres = hrtimer_get_res, - .clock_get = posix_clock_realtime_get, - .clock_set = posix_clock_realtime_set, -@@ -239,7 +240,7 @@ static __init int init_posix_timers(void) - .timer_get = common_timer_get, - .timer_del = common_timer_del, - }; -- struct k_clock clock_monotonic = { -+ static struct k_clock clock_monotonic = { - .clock_getres = hrtimer_get_res, - .clock_get = posix_ktime_get_ts, - .nsleep = common_nsleep, -@@ -249,19 +250,19 @@ static __init int init_posix_timers(void) - .timer_get = common_timer_get, - .timer_del = common_timer_del, - }; -- struct k_clock clock_monotonic_raw = { -+ static struct k_clock clock_monotonic_raw = { - .clock_getres = hrtimer_get_res, - .clock_get = posix_get_monotonic_raw, - }; -- struct k_clock clock_realtime_coarse = { -+ static struct k_clock clock_realtime_coarse = { - .clock_getres = posix_get_coarse_res, - .clock_get = posix_get_realtime_coarse, - }; -- struct k_clock clock_monotonic_coarse = { -+ static struct k_clock clock_monotonic_coarse = { - .clock_getres = posix_get_coarse_res, - .clock_get = posix_get_monotonic_coarse, - }; -- struct k_clock clock_boottime = { -+ static struct k_clock clock_boottime = { - .clock_getres = hrtimer_get_res, - .clock_get = posix_get_boottime, - .nsleep = common_nsleep, -@@ -272,6 +273,8 @@ static __init int init_posix_timers(void) - .timer_del = common_timer_del, - }; - -+ pax_track_stack(); -+ - posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime); - posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic); - posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); -@@ -473,7 +476,7 @@ void posix_timers_register_clock(const clockid_t clock_id, - return; - } - -- posix_clocks[clock_id] = *new_clock; -+ posix_clocks[clock_id] = new_clock; - } - EXPORT_SYMBOL_GPL(posix_timers_register_clock); - -@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id) - return (id & CLOCKFD_MASK) == CLOCKFD ? - &clock_posix_dynamic : &clock_posix_cpu; - -- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres) -+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres) - return NULL; -- return &posix_clocks[id]; -+ return posix_clocks[id]; - } - - static int common_timer_create(struct k_itimer *new_timer) -@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, - if (copy_from_user(&new_tp, tp, sizeof (*tp))) - return -EFAULT; - -+ /* only the CLOCK_REALTIME clock can be set, all other clocks -+ have their clock_set fptr set to a nosettime dummy function -+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to -+ call common_clock_set, which calls do_sys_settimeofday, which -+ we hook -+ */ -+ - return kc->clock_set(which_clock, &new_tp); - } - -diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c -index d523593..68197a4 100644 ---- a/kernel/power/poweroff.c -+++ b/kernel/power/poweroff.c -@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = { - .enable_mask = SYSRQ_ENABLE_BOOT, - }; - --static int pm_sysrq_init(void) -+static int __init pm_sysrq_init(void) - { - register_sysrq_key('o', &sysrq_poweroff_op); - return 0; -diff --git a/kernel/power/process.c b/kernel/power/process.c -index 0cf3a27..5481be4 100644 ---- a/kernel/power/process.c -+++ b/kernel/power/process.c -@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only) - u64 elapsed_csecs64; - unsigned int elapsed_csecs; - bool wakeup = false; -+ bool timedout = false; - - do_gettimeofday(&start); - -@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only) - - while (true) { - todo = 0; -+ if (time_after(jiffies, end_time)) -+ timedout = true; - read_lock(&tasklist_lock); - do_each_thread(g, p) { - if (frozen(p) || !freezable(p)) -@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only) - * try_to_stop() after schedule() in ptrace/signal - * stop sees TIF_FREEZE. - */ -- if (!task_is_stopped_or_traced(p) && -- !freezer_should_skip(p)) -+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) { - todo++; -+ if (timedout) { -+ printk(KERN_ERR "Task refusing to freeze:\n"); -+ sched_show_task(p); -+ } -+ } - } while_each_thread(g, p); - read_unlock(&tasklist_lock); - -@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only) - todo += wq_busy; - } - -- if (!todo || time_after(jiffies, end_time)) -+ if (!todo || timedout) - break; - - if (pm_wakeup_pending()) { -diff --git a/kernel/printk.c b/kernel/printk.c -index 28a40d8..2411bec 100644 ---- a/kernel/printk.c -+++ b/kernel/printk.c -@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file) - if (from_file && type != SYSLOG_ACTION_OPEN) - return 0; - -+#ifdef CONFIG_GRKERNSEC_DMESG -+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN)) -+ return -EPERM; -+#endif -+ - if (syslog_action_restricted(type)) { - if (capable(CAP_SYSLOG)) - return 0; -diff --git a/kernel/profile.c b/kernel/profile.c -index 961b389..c451353 100644 ---- a/kernel/profile.c -+++ b/kernel/profile.c -@@ -39,7 +39,7 @@ struct profile_hit { - /* Oprofile timer tick hook */ - static int (*timer_hook)(struct pt_regs *) __read_mostly; - --static atomic_t *prof_buffer; -+static atomic_unchecked_t *prof_buffer; - static unsigned long prof_len, prof_shift; - - int prof_on __read_mostly; -@@ -281,7 +281,7 @@ static void profile_flip_buffers(void) - hits[i].pc = 0; - continue; - } -- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); -+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]); - hits[i].hits = hits[i].pc = 0; - } - } -@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) - * Add the current hit(s) and flush the write-queue out - * to the global buffer: - */ -- atomic_add(nr_hits, &prof_buffer[pc]); -+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]); - for (i = 0; i < NR_PROFILE_HIT; ++i) { -- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); -+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]); - hits[i].pc = hits[i].hits = 0; - } - out: -@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) - { - unsigned long pc; - pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; -- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); -+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); - } - #endif /* !CONFIG_SMP */ - -@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) - return -EFAULT; - buf++; p++; count--; read++; - } -- pnt = (char *)prof_buffer + p - sizeof(atomic_t); -+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t); - if (copy_to_user(buf, (void *)pnt, count)) - return -EFAULT; - read += count; -@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf, - } - #endif - profile_discard_flip_buffers(); -- memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); -+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t)); - return count; - } - -diff --git a/kernel/ptrace.c b/kernel/ptrace.c -index a70d2a5..cbd4b4f 100644 ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -161,7 +161,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state) - return ret; - } - --int __ptrace_may_access(struct task_struct *task, unsigned int mode) -+static int __ptrace_may_access(struct task_struct *task, unsigned int mode, -+ unsigned int log) - { - const struct cred *cred = current_cred(), *tcred; - -@@ -187,7 +188,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) - cred->gid == tcred->sgid && - cred->gid == tcred->gid)) - goto ok; -- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)) -+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) || -+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))) - goto ok; - rcu_read_unlock(); - return -EPERM; -@@ -196,7 +198,9 @@ ok: - smp_rmb(); - if (task->mm) - dumpable = get_dumpable(task->mm); -- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE)) -+ if (!dumpable && -+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) || -+ (log && !task_ns_capable(task, CAP_SYS_PTRACE)))) - return -EPERM; - - return security_ptrace_access_check(task, mode); -@@ -206,7 +210,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) - { - int err; - task_lock(task); -- err = __ptrace_may_access(task, mode); -+ err = __ptrace_may_access(task, mode, 0); -+ task_unlock(task); -+ return !err; -+} -+ -+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode) -+{ -+ int err; -+ task_lock(task); -+ err = __ptrace_may_access(task, mode, 1); - task_unlock(task); - return !err; - } -@@ -251,7 +264,7 @@ static int ptrace_attach(struct task_struct *task, long request, - goto out; - - task_lock(task); -- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); -+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1); - task_unlock(task); - if (retval) - goto unlock_creds; -@@ -266,7 +279,7 @@ static int ptrace_attach(struct task_struct *task, long request, - task->ptrace = PT_PTRACED; - if (seize) - task->ptrace |= PT_SEIZED; -- if (task_ns_capable(task, CAP_SYS_PTRACE)) -+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE)) - task->ptrace |= PT_PTRACE_CAP; - - __ptrace_link(task, current); -@@ -461,6 +474,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst - { - int copied = 0; - -+ pax_track_stack(); -+ - while (len > 0) { - char buf[128]; - int this_len, retval; -@@ -472,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst - break; - return -EIO; - } -- if (copy_to_user(dst, buf, retval)) -+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval)) - return -EFAULT; - copied += retval; - src += retval; -@@ -486,6 +501,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds - { - int copied = 0; - -+ pax_track_stack(); -+ - while (len > 0) { - char buf[128]; - int this_len, retval; -@@ -669,10 +686,12 @@ int ptrace_request(struct task_struct *child, long request, - bool seized = child->ptrace & PT_SEIZED; - int ret = -EIO; - siginfo_t siginfo, *si; -- void __user *datavp = (void __user *) data; -+ void __user *datavp = (__force void __user *) data; - unsigned long __user *datalp = datavp; - unsigned long flags; - -+ pax_track_stack(); -+ - switch (request) { - case PTRACE_PEEKTEXT: - case PTRACE_PEEKDATA: -@@ -871,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, - goto out; - } - -+ if (gr_handle_ptrace(child, request)) { -+ ret = -EPERM; -+ goto out_put_task_struct; -+ } -+ - if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { - ret = ptrace_attach(child, request, data); - /* - * Some architectures need to do book-keeping after - * a ptrace attach. - */ -- if (!ret) -+ if (!ret) { - arch_ptrace_attach(child); -+ gr_audit_ptrace(child); -+ } - goto out_put_task_struct; - } - -@@ -904,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, - copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); - if (copied != sizeof(tmp)) - return -EIO; -- return put_user(tmp, (unsigned long __user *)data); -+ return put_user(tmp, (__force unsigned long __user *)data); - } - - int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, -@@ -927,6 +953,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request, - siginfo_t siginfo; - int ret; - -+ pax_track_stack(); -+ - switch (request) { - case PTRACE_PEEKTEXT: - case PTRACE_PEEKDATA: -@@ -1014,14 +1042,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, - goto out; - } - -+ if (gr_handle_ptrace(child, request)) { -+ ret = -EPERM; -+ goto out_put_task_struct; -+ } -+ - if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { - ret = ptrace_attach(child, request, data); - /* - * Some architectures need to do book-keeping after - * a ptrace attach. - */ -- if (!ret) -+ if (!ret) { - arch_ptrace_attach(child); -+ gr_audit_ptrace(child); -+ } - goto out_put_task_struct; - } - -diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c -index 98f51b1..30b950c 100644 ---- a/kernel/rcutorture.c -+++ b/kernel/rcutorture.c -@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = - { 0 }; - static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = - { 0 }; --static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; --static atomic_t n_rcu_torture_alloc; --static atomic_t n_rcu_torture_alloc_fail; --static atomic_t n_rcu_torture_free; --static atomic_t n_rcu_torture_mberror; --static atomic_t n_rcu_torture_error; -+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; -+static atomic_unchecked_t n_rcu_torture_alloc; -+static atomic_unchecked_t n_rcu_torture_alloc_fail; -+static atomic_unchecked_t n_rcu_torture_free; -+static atomic_unchecked_t n_rcu_torture_mberror; -+static atomic_unchecked_t n_rcu_torture_error; - static long n_rcu_torture_boost_ktrerror; - static long n_rcu_torture_boost_rterror; - static long n_rcu_torture_boost_failure; -@@ -223,11 +223,11 @@ rcu_torture_alloc(void) - - spin_lock_bh(&rcu_torture_lock); - if (list_empty(&rcu_torture_freelist)) { -- atomic_inc(&n_rcu_torture_alloc_fail); -+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail); - spin_unlock_bh(&rcu_torture_lock); - return NULL; - } -- atomic_inc(&n_rcu_torture_alloc); -+ atomic_inc_unchecked(&n_rcu_torture_alloc); - p = rcu_torture_freelist.next; - list_del_init(p); - spin_unlock_bh(&rcu_torture_lock); -@@ -240,7 +240,7 @@ rcu_torture_alloc(void) - static void - rcu_torture_free(struct rcu_torture *p) - { -- atomic_inc(&n_rcu_torture_free); -+ atomic_inc_unchecked(&n_rcu_torture_free); - spin_lock_bh(&rcu_torture_lock); - list_add_tail(&p->rtort_free, &rcu_torture_freelist); - spin_unlock_bh(&rcu_torture_lock); -@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p) - i = rp->rtort_pipe_count; - if (i > RCU_TORTURE_PIPE_LEN) - i = RCU_TORTURE_PIPE_LEN; -- atomic_inc(&rcu_torture_wcount[i]); -+ atomic_inc_unchecked(&rcu_torture_wcount[i]); - if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { - rp->rtort_mbtest = 0; - rcu_torture_free(rp); -@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p) - i = rp->rtort_pipe_count; - if (i > RCU_TORTURE_PIPE_LEN) - i = RCU_TORTURE_PIPE_LEN; -- atomic_inc(&rcu_torture_wcount[i]); -+ atomic_inc_unchecked(&rcu_torture_wcount[i]); - if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { - rp->rtort_mbtest = 0; - list_del(&rp->rtort_free); -@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg) - i = old_rp->rtort_pipe_count; - if (i > RCU_TORTURE_PIPE_LEN) - i = RCU_TORTURE_PIPE_LEN; -- atomic_inc(&rcu_torture_wcount[i]); -+ atomic_inc_unchecked(&rcu_torture_wcount[i]); - old_rp->rtort_pipe_count++; - cur_ops->deferred_free(old_rp); - } -@@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned long unused) - return; - } - if (p->rtort_mbtest == 0) -- atomic_inc(&n_rcu_torture_mberror); -+ atomic_inc_unchecked(&n_rcu_torture_mberror); - spin_lock(&rand_lock); - cur_ops->read_delay(&rand); - n_rcu_torture_timers++; -@@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg) - continue; - } - if (p->rtort_mbtest == 0) -- atomic_inc(&n_rcu_torture_mberror); -+ atomic_inc_unchecked(&n_rcu_torture_mberror); - cur_ops->read_delay(&rand); - preempt_disable(); - pipe_count = p->rtort_pipe_count; -@@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page) - rcu_torture_current, - rcu_torture_current_version, - list_empty(&rcu_torture_freelist), -- atomic_read(&n_rcu_torture_alloc), -- atomic_read(&n_rcu_torture_alloc_fail), -- atomic_read(&n_rcu_torture_free), -- atomic_read(&n_rcu_torture_mberror), -+ atomic_read_unchecked(&n_rcu_torture_alloc), -+ atomic_read_unchecked(&n_rcu_torture_alloc_fail), -+ atomic_read_unchecked(&n_rcu_torture_free), -+ atomic_read_unchecked(&n_rcu_torture_mberror), - n_rcu_torture_boost_ktrerror, - n_rcu_torture_boost_rterror, - n_rcu_torture_boost_failure, - n_rcu_torture_boosts, - n_rcu_torture_timers); -- if (atomic_read(&n_rcu_torture_mberror) != 0 || -+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 || - n_rcu_torture_boost_ktrerror != 0 || - n_rcu_torture_boost_rterror != 0 || - n_rcu_torture_boost_failure != 0) -@@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page) - cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); - if (i > 1) { - cnt += sprintf(&page[cnt], "!!! "); -- atomic_inc(&n_rcu_torture_error); -+ atomic_inc_unchecked(&n_rcu_torture_error); - WARN_ON_ONCE(1); - } - cnt += sprintf(&page[cnt], "Reader Pipe: "); -@@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page) - cnt += sprintf(&page[cnt], "Free-Block Circulation: "); - for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { - cnt += sprintf(&page[cnt], " %d", -- atomic_read(&rcu_torture_wcount[i])); -+ atomic_read_unchecked(&rcu_torture_wcount[i])); - } - cnt += sprintf(&page[cnt], "\n"); - if (cur_ops->stats) -@@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void) - - if (cur_ops->cleanup) - cur_ops->cleanup(); -- if (atomic_read(&n_rcu_torture_error)) -+ if (atomic_read_unchecked(&n_rcu_torture_error)) - rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); - else - rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); -@@ -1474,17 +1474,17 @@ rcu_torture_init(void) - - rcu_torture_current = NULL; - rcu_torture_current_version = 0; -- atomic_set(&n_rcu_torture_alloc, 0); -- atomic_set(&n_rcu_torture_alloc_fail, 0); -- atomic_set(&n_rcu_torture_free, 0); -- atomic_set(&n_rcu_torture_mberror, 0); -- atomic_set(&n_rcu_torture_error, 0); -+ atomic_set_unchecked(&n_rcu_torture_alloc, 0); -+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0); -+ atomic_set_unchecked(&n_rcu_torture_free, 0); -+ atomic_set_unchecked(&n_rcu_torture_mberror, 0); -+ atomic_set_unchecked(&n_rcu_torture_error, 0); - n_rcu_torture_boost_ktrerror = 0; - n_rcu_torture_boost_rterror = 0; - n_rcu_torture_boost_failure = 0; - n_rcu_torture_boosts = 0; - for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) -- atomic_set(&rcu_torture_wcount[i], 0); -+ atomic_set_unchecked(&rcu_torture_wcount[i], 0); - for_each_possible_cpu(cpu) { - for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { - per_cpu(rcu_torture_count, cpu)[i] = 0; -diff --git a/kernel/rcutree.c b/kernel/rcutree.c -index ba06207..85d8ba8 100644 ---- a/kernel/rcutree.c -+++ b/kernel/rcutree.c -@@ -356,9 +356,9 @@ void rcu_enter_nohz(void) - } - /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ - smp_mb__before_atomic_inc(); /* See above. */ -- atomic_inc(&rdtp->dynticks); -+ atomic_inc_unchecked(&rdtp->dynticks); - smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ -- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); -+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1); - local_irq_restore(flags); - - /* If the interrupt queued a callback, get out of dyntick mode. */ -@@ -387,10 +387,10 @@ void rcu_exit_nohz(void) - return; - } - smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ -- atomic_inc(&rdtp->dynticks); -+ atomic_inc_unchecked(&rdtp->dynticks); - /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ - smp_mb__after_atomic_inc(); /* See above. */ -- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); -+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)); - local_irq_restore(flags); - } - -@@ -406,14 +406,14 @@ void rcu_nmi_enter(void) - struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); - - if (rdtp->dynticks_nmi_nesting == 0 && -- (atomic_read(&rdtp->dynticks) & 0x1)) -+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1)) - return; - rdtp->dynticks_nmi_nesting++; - smp_mb__before_atomic_inc(); /* Force delay from prior write. */ -- atomic_inc(&rdtp->dynticks); -+ atomic_inc_unchecked(&rdtp->dynticks); - /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ - smp_mb__after_atomic_inc(); /* See above. */ -- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); -+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)); - } - - /** -@@ -432,9 +432,9 @@ void rcu_nmi_exit(void) - return; - /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ - smp_mb__before_atomic_inc(); /* See above. */ -- atomic_inc(&rdtp->dynticks); -+ atomic_inc_unchecked(&rdtp->dynticks); - smp_mb__after_atomic_inc(); /* Force delay to next write. */ -- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); -+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1); - } - - /** -@@ -469,7 +469,7 @@ void rcu_irq_exit(void) - */ - static int dyntick_save_progress_counter(struct rcu_data *rdp) - { -- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); -+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks); - return 0; - } - -@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) - unsigned long curr; - unsigned long snap; - -- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks); -+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks); - snap = (unsigned long)rdp->dynticks_snap; - - /* -@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) - /* - * Do softirq processing for the current CPU. - */ --static void rcu_process_callbacks(struct softirq_action *unused) -+static void rcu_process_callbacks(void) - { - __rcu_process_callbacks(&rcu_sched_state, - &__get_cpu_var(rcu_sched_data)); -diff --git a/kernel/rcutree.h b/kernel/rcutree.h -index 01b2ccd..4f5d80a 100644 ---- a/kernel/rcutree.h -+++ b/kernel/rcutree.h -@@ -86,7 +86,7 @@ - struct rcu_dynticks { - int dynticks_nesting; /* Track irq/process nesting level. */ - int dynticks_nmi_nesting; /* Track NMI nesting level. */ -- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */ -+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */ - }; - - /* RCU's kthread states for tracing. */ -diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h -index 8aafbb8..2fca109 100644 ---- a/kernel/rcutree_plugin.h -+++ b/kernel/rcutree_plugin.h -@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void) - - /* Clean up and exit. */ - smp_mb(); /* ensure expedited GP seen before counter increment. */ -- ACCESS_ONCE(sync_rcu_preempt_exp_count)++; -+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++; - unlock_mb_ret: - mutex_unlock(&sync_rcu_preempt_exp_mutex); - mb_ret: -@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited); - - #else /* #ifndef CONFIG_SMP */ - --static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0); --static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0); -+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0); -+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0); - - static int synchronize_sched_expedited_cpu_stop(void *data) - { -@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void) - int firstsnap, s, snap, trycount = 0; - - /* Note that atomic_inc_return() implies full memory barrier. */ -- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); -+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started); - get_online_cpus(); - - /* -@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void) - } - - /* Check to see if someone else did our work for us. */ -- s = atomic_read(&sync_sched_expedited_done); -+ s = atomic_read_unchecked(&sync_sched_expedited_done); - if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) { - smp_mb(); /* ensure test happens before caller kfree */ - return; -@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void) - * grace period works for us. - */ - get_online_cpus(); -- snap = atomic_read(&sync_sched_expedited_started) - 1; -+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1; - smp_mb(); /* ensure read is before try_stop_cpus(). */ - } - -@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void) - * than we did beat us to the punch. - */ - do { -- s = atomic_read(&sync_sched_expedited_done); -+ s = atomic_read_unchecked(&sync_sched_expedited_done); - if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) { - smp_mb(); /* ensure test happens before caller kfree */ - break; - } -- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s); -+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s); - - put_online_cpus(); - } -@@ -1953,7 +1953,7 @@ int rcu_needs_cpu(int cpu) - for_each_online_cpu(thatcpu) { - if (thatcpu == cpu) - continue; -- snap = atomic_add_return(0, &per_cpu(rcu_dynticks, -+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks, - thatcpu).dynticks); - smp_mb(); /* Order sampling of snap with end of grace period. */ - if ((snap & 0x1) != 0) { -diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c -index 3b0c098..43ba2d8 100644 ---- a/kernel/rcutree_trace.c -+++ b/kernel/rcutree_trace.c -@@ -74,7 +74,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) - rdp->qs_pending); - #ifdef CONFIG_NO_HZ - seq_printf(m, " dt=%d/%d/%d df=%lu", -- atomic_read(&rdp->dynticks->dynticks), -+ atomic_read_unchecked(&rdp->dynticks->dynticks), - rdp->dynticks->dynticks_nesting, - rdp->dynticks->dynticks_nmi_nesting, - rdp->dynticks_fqs); -@@ -148,7 +148,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) - rdp->qs_pending); - #ifdef CONFIG_NO_HZ - seq_printf(m, ",%d,%d,%d,%lu", -- atomic_read(&rdp->dynticks->dynticks), -+ atomic_read_unchecked(&rdp->dynticks->dynticks), - rdp->dynticks->dynticks_nesting, - rdp->dynticks->dynticks_nmi_nesting, - rdp->dynticks_fqs); -diff --git a/kernel/relay.c b/kernel/relay.c -index 859ea5a..096e2fe 100644 ---- a/kernel/relay.c -+++ b/kernel/relay.c -@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struct file *in, - }; - ssize_t ret; - -+ pax_track_stack(); -+ - if (rbuf->subbufs_produced == rbuf->subbufs_consumed) - return 0; - if (splice_grow_spd(pipe, &spd)) -diff --git a/kernel/resource.c b/kernel/resource.c -index c8dc249..f1e2359 100644 ---- a/kernel/resource.c -+++ b/kernel/resource.c -@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = { - - static int __init ioresources_init(void) - { -+#ifdef CONFIG_GRKERNSEC_PROC_ADD -+#ifdef CONFIG_GRKERNSEC_PROC_USER -+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations); -+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations); -+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) -+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations); -+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations); -+#endif -+#else - proc_create("ioports", 0, NULL, &proc_ioports_operations); - proc_create("iomem", 0, NULL, &proc_iomem_operations); -+#endif - return 0; - } - __initcall(ioresources_init); -diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c -index 5c9ccd3..a35e22b 100644 ---- a/kernel/rtmutex-tester.c -+++ b/kernel/rtmutex-tester.c -@@ -20,7 +20,7 @@ - #define MAX_RT_TEST_MUTEXES 8 - - static spinlock_t rttest_lock; --static atomic_t rttest_event; -+static atomic_unchecked_t rttest_event; - - struct test_thread_data { - int opcode; -@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) - - case RTTEST_LOCKCONT: - td->mutexes[td->opdata] = 1; -- td->event = atomic_add_return(1, &rttest_event); -+ td->event = atomic_add_return_unchecked(1, &rttest_event); - return 0; - - case RTTEST_RESET: -@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) - return 0; - - case RTTEST_RESETEVENT: -- atomic_set(&rttest_event, 0); -+ atomic_set_unchecked(&rttest_event, 0); - return 0; - - default: -@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) - return ret; - - td->mutexes[id] = 1; -- td->event = atomic_add_return(1, &rttest_event); -+ td->event = atomic_add_return_unchecked(1, &rttest_event); - rt_mutex_lock(&mutexes[id]); -- td->event = atomic_add_return(1, &rttest_event); -+ td->event = atomic_add_return_unchecked(1, &rttest_event); - td->mutexes[id] = 4; - return 0; - -@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) - return ret; - - td->mutexes[id] = 1; -- td->event = atomic_add_return(1, &rttest_event); -+ td->event = atomic_add_return_unchecked(1, &rttest_event); - ret = rt_mutex_lock_interruptible(&mutexes[id], 0); -- td->event = atomic_add_return(1, &rttest_event); -+ td->event = atomic_add_return_unchecked(1, &rttest_event); - td->mutexes[id] = ret ? 0 : 4; - return ret ? -EINTR : 0; - -@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) - if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4) - return ret; - -- td->event = atomic_add_return(1, &rttest_event); -+ td->event = atomic_add_return_unchecked(1, &rttest_event); - rt_mutex_unlock(&mutexes[id]); -- td->event = atomic_add_return(1, &rttest_event); -+ td->event = atomic_add_return_unchecked(1, &rttest_event); - td->mutexes[id] = 0; - return 0; - -@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) - break; - - td->mutexes[dat] = 2; -- td->event = atomic_add_return(1, &rttest_event); -+ td->event = atomic_add_return_unchecked(1, &rttest_event); - break; - - default: -@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) - return; - - td->mutexes[dat] = 3; -- td->event = atomic_add_return(1, &rttest_event); -+ td->event = atomic_add_return_unchecked(1, &rttest_event); - break; - - case RTTEST_LOCKNOWAIT: -@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) - return; - - td->mutexes[dat] = 1; -- td->event = atomic_add_return(1, &rttest_event); -+ td->event = atomic_add_return_unchecked(1, &rttest_event); - return; - - default: -diff --git a/kernel/sched.c b/kernel/sched.c -index b50b0f0..1c6c591 100644 ---- a/kernel/sched.c -+++ b/kernel/sched.c -@@ -4264,6 +4264,8 @@ static void __sched __schedule(void) - struct rq *rq; - int cpu; - -+ pax_track_stack(); -+ - need_resched: - preempt_disable(); - cpu = smp_processor_id(); -@@ -4950,6 +4952,8 @@ int can_nice(const struct task_struct *p, const int nice) - /* convert nice value [19,-20] to rlimit style value [1,40] */ - int nice_rlim = 20 - nice; - -+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1); -+ - return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || - capable(CAP_SYS_NICE)); - } -@@ -4983,7 +4987,8 @@ SYSCALL_DEFINE1(nice, int, increment) - if (nice > 19) - nice = 19; - -- if (increment < 0 && !can_nice(current, nice)) -+ if (increment < 0 && (!can_nice(current, nice) || -+ gr_handle_chroot_nice())) - return -EPERM; - - retval = security_task_setnice(current, nice); -@@ -5127,6 +5132,7 @@ recheck: - unsigned long rlim_rtprio = - task_rlimit(p, RLIMIT_RTPRIO); - -+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1); - /* can't set/change the rt policy */ - if (policy != p->policy && !rlim_rtprio) - return -EPERM; -diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c -index 429242f..d7cca82 100644 ---- a/kernel/sched_autogroup.c -+++ b/kernel/sched_autogroup.c -@@ -7,7 +7,7 @@ - - unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; - static struct autogroup autogroup_default; --static atomic_t autogroup_seq_nr; -+static atomic_unchecked_t autogroup_seq_nr; - - static void __init autogroup_init(struct task_struct *init_task) - { -@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void) - - kref_init(&ag->kref); - init_rwsem(&ag->lock); -- ag->id = atomic_inc_return(&autogroup_seq_nr); -+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr); - ag->tg = tg; - #ifdef CONFIG_RT_GROUP_SCHED - /* -diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c -index bc8ee99..b6f6492 100644 ---- a/kernel/sched_fair.c -+++ b/kernel/sched_fair.c -@@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } - * run_rebalance_domains is triggered when needed from the scheduler tick. - * Also triggered for nohz idle balancing (with nohz_balancing_kick set). - */ --static void run_rebalance_domains(struct softirq_action *h) -+static void run_rebalance_domains(void) - { - int this_cpu = smp_processor_id(); - struct rq *this_rq = cpu_rq(this_cpu); -diff --git a/kernel/signal.c b/kernel/signal.c -index 291c970..304bd03 100644 ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep; - - int print_fatal_signals __read_mostly; - --static void __user *sig_handler(struct task_struct *t, int sig) -+static __sighandler_t sig_handler(struct task_struct *t, int sig) - { - return t->sighand->action[sig - 1].sa.sa_handler; - } - --static int sig_handler_ignored(void __user *handler, int sig) -+static int sig_handler_ignored(__sighandler_t handler, int sig) - { - /* Is it explicitly or implicitly ignored? */ - return handler == SIG_IGN || -@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig) - static int sig_task_ignored(struct task_struct *t, int sig, - int from_ancestor_ns) - { -- void __user *handler; -+ __sighandler_t handler; - - handler = sig_handler(t, sig); - -@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi - atomic_inc(&user->sigpending); - rcu_read_unlock(); - -+ if (!override_rlimit) -+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1); -+ - if (override_rlimit || - atomic_read(&user->sigpending) <= - task_rlimit(t, RLIMIT_SIGPENDING)) { -@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default) - - int unhandled_signal(struct task_struct *tsk, int sig) - { -- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; -+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler; - if (is_global_init(tsk)) - return 1; - if (handler != SIG_IGN && handler != SIG_DFL) -@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info, - } - } - -+ /* allow glibc communication via tgkill to other threads in our -+ thread group */ -+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL || -+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid) -+ && gr_handle_signal(t, sig)) -+ return -EPERM; -+ - return security_task_kill(t, info, sig, 0); - } - -@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) - return send_signal(sig, info, p, 1); - } - --static int -+int - specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) - { - return send_signal(sig, info, t, 0); -@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) - unsigned long int flags; - int ret, blocked, ignored; - struct k_sigaction *action; -+ int is_unhandled = 0; - - spin_lock_irqsave(&t->sighand->siglock, flags); - action = &t->sighand->action[sig-1]; -@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) - } - if (action->sa.sa_handler == SIG_DFL) - t->signal->flags &= ~SIGNAL_UNKILLABLE; -+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL) -+ is_unhandled = 1; - ret = specific_send_sig_info(sig, info, t); - spin_unlock_irqrestore(&t->sighand->siglock, flags); - -+ /* only deal with unhandled signals, java etc trigger SIGSEGV during -+ normal operation */ -+ if (is_unhandled) { -+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t); -+ gr_handle_crash(t, sig); -+ } -+ - return ret; - } - -@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) - ret = check_kill_permission(sig, info, p); - rcu_read_unlock(); - -- if (!ret && sig) -+ if (!ret && sig) { - ret = do_send_sig_info(sig, info, p, true); -+ if (!ret) -+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p); -+ } - - return ret; - } -@@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr, int exit_code, int why) - { - siginfo_t info; - -+ pax_track_stack(); -+ - memset(&info, 0, sizeof info); - info.si_signo = signr; - info.si_code = exit_code; -@@ -2748,7 +2773,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) - int error = -ESRCH; - - rcu_read_lock(); -- p = find_task_by_vpid(pid); -+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK -+ /* allow glibc communication via tgkill to other threads in our -+ thread group */ -+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL && -+ sig == (SIGRTMIN+1) && tgid == info->si_pid) -+ p = find_task_by_vpid_unrestricted(pid); -+ else -+#endif -+ p = find_task_by_vpid(pid); - if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { - error = check_kill_permission(sig, info, p); - /* -diff --git a/kernel/smp.c b/kernel/smp.c -index fb67dfa..f819e2e 100644 ---- a/kernel/smp.c -+++ b/kernel/smp.c -@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait) - } - EXPORT_SYMBOL(smp_call_function); - --void ipi_call_lock(void) -+void ipi_call_lock(void) __acquires(call_function.lock) - { - raw_spin_lock(&call_function.lock); - } - --void ipi_call_unlock(void) -+void ipi_call_unlock(void) __releases(call_function.lock) - { - raw_spin_unlock(&call_function.lock); - } - --void ipi_call_lock_irq(void) -+void ipi_call_lock_irq(void) __acquires(call_function.lock) - { - raw_spin_lock_irq(&call_function.lock); - } - --void ipi_call_unlock_irq(void) -+void ipi_call_unlock_irq(void) __releases(call_function.lock) - { - raw_spin_unlock_irq(&call_function.lock); - } -diff --git a/kernel/softirq.c b/kernel/softirq.c -index fca82c3..1db9690 100644 ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp - - DEFINE_PER_CPU(struct task_struct *, ksoftirqd); - --char *softirq_to_name[NR_SOFTIRQS] = { -+const char * const softirq_to_name[NR_SOFTIRQS] = { - "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", - "TASKLET", "SCHED", "HRTIMER", "RCU" - }; -@@ -235,7 +235,7 @@ restart: - kstat_incr_softirqs_this_cpu(vec_nr); - - trace_softirq_entry(vec_nr); -- h->action(h); -+ h->action(); - trace_softirq_exit(vec_nr); - if (unlikely(prev_count != preempt_count())) { - printk(KERN_ERR "huh, entered softirq %u %s %p" -@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr) - local_irq_restore(flags); - } - --void open_softirq(int nr, void (*action)(struct softirq_action *)) -+void open_softirq(int nr, void (*action)(void)) - { -- softirq_vec[nr].action = action; -+ pax_open_kernel(); -+ *(void **)&softirq_vec[nr].action = action; -+ pax_close_kernel(); - } - - /* -@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) - - EXPORT_SYMBOL(__tasklet_hi_schedule_first); - --static void tasklet_action(struct softirq_action *a) -+static void tasklet_action(void) - { - struct tasklet_struct *list; - -@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a) - } - } - --static void tasklet_hi_action(struct softirq_action *a) -+static void tasklet_hi_action(void) - { - struct tasklet_struct *list; - -diff --git a/kernel/sys.c b/kernel/sys.c -index 1dbbe69..e96e1dd 100644 ---- a/kernel/sys.c -+++ b/kernel/sys.c -@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error) - error = -EACCES; - goto out; - } -+ -+ if (gr_handle_chroot_setpriority(p, niceval)) { -+ error = -EACCES; -+ goto out; -+ } -+ - no_nice = security_task_setnice(p, niceval); - if (no_nice) { - error = no_nice; -@@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) - goto error; - } - -+ if (gr_check_group_change(new->gid, new->egid, -1)) -+ goto error; -+ - if (rgid != (gid_t) -1 || - (egid != (gid_t) -1 && egid != old->gid)) - new->sgid = new->egid; -@@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid) - old = current_cred(); - - retval = -EPERM; -+ -+ if (gr_check_group_change(gid, gid, gid)) -+ goto error; -+ - if (nsown_capable(CAP_SETGID)) - new->gid = new->egid = new->sgid = new->fsgid = gid; - else if (gid == old->gid || gid == old->sgid) -@@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) - goto error; - } - -+ if (gr_check_user_change(new->uid, new->euid, -1)) -+ goto error; -+ - if (new->uid != old->uid) { - retval = set_user(new); - if (retval < 0) -@@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid) - old = current_cred(); - - retval = -EPERM; -+ -+ if (gr_check_crash_uid(uid)) -+ goto error; -+ if (gr_check_user_change(uid, uid, uid)) -+ goto error; -+ - if (nsown_capable(CAP_SETUID)) { - new->suid = new->uid = uid; - if (uid != old->uid) { -@@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) - goto error; - } - -+ if (gr_check_user_change(ruid, euid, -1)) -+ goto error; -+ - if (ruid != (uid_t) -1) { - new->uid = ruid; - if (ruid != old->uid) { -@@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) - goto error; - } - -+ if (gr_check_group_change(rgid, egid, -1)) -+ goto error; -+ - if (rgid != (gid_t) -1) - new->gid = rgid; - if (egid != (gid_t) -1) -@@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) - old = current_cred(); - old_fsuid = old->fsuid; - -+ if (gr_check_user_change(-1, -1, uid)) -+ goto error; -+ - if (uid == old->uid || uid == old->euid || - uid == old->suid || uid == old->fsuid || - nsown_capable(CAP_SETUID)) { -@@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) - } - } - -+error: - abort_creds(new); - return old_fsuid; - -@@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid) - if (gid == old->gid || gid == old->egid || - gid == old->sgid || gid == old->fsgid || - nsown_capable(CAP_SETGID)) { -+ if (gr_check_group_change(-1, -1, gid)) -+ goto error; -+ - if (gid != old_fsgid) { - new->fsgid = gid; - goto change_okay; - } - } - -+error: - abort_creds(new); - return old_fsgid; - -@@ -1188,7 +1224,10 @@ static int override_release(char __user *release, int len) - } - v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; - snprintf(buf, len, "2.6.%u%s", v, rest); -- ret = copy_to_user(release, buf, len); -+ if (len > sizeof(buf)) -+ ret = -EFAULT; -+ else -+ ret = copy_to_user(release, buf, len); - } - return ret; - } -@@ -1242,19 +1281,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) - return -EFAULT; - - down_read(&uts_sem); -- error = __copy_to_user(&name->sysname, &utsname()->sysname, -+ error = __copy_to_user(name->sysname, &utsname()->sysname, - __OLD_UTS_LEN); - error |= __put_user(0, name->sysname + __OLD_UTS_LEN); -- error |= __copy_to_user(&name->nodename, &utsname()->nodename, -+ error |= __copy_to_user(name->nodename, &utsname()->nodename, - __OLD_UTS_LEN); - error |= __put_user(0, name->nodename + __OLD_UTS_LEN); -- error |= __copy_to_user(&name->release, &utsname()->release, -+ error |= __copy_to_user(name->release, &utsname()->release, - __OLD_UTS_LEN); - error |= __put_user(0, name->release + __OLD_UTS_LEN); -- error |= __copy_to_user(&name->version, &utsname()->version, -+ error |= __copy_to_user(name->version, &utsname()->version, - __OLD_UTS_LEN); - error |= __put_user(0, name->version + __OLD_UTS_LEN); -- error |= __copy_to_user(&name->machine, &utsname()->machine, -+ error |= __copy_to_user(name->machine, &utsname()->machine, - __OLD_UTS_LEN); - error |= __put_user(0, name->machine + __OLD_UTS_LEN); - up_read(&uts_sem); -@@ -1717,7 +1756,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, - error = get_dumpable(me->mm); - break; - case PR_SET_DUMPABLE: -- if (arg2 < 0 || arg2 > 1) { -+ if (arg2 > 1) { - error = -EINVAL; - break; - } -diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index 11d65b5..6957b37 100644 ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -85,6 +85,13 @@ - - - #if defined(CONFIG_SYSCTL) -+#include <linux/grsecurity.h> -+#include <linux/grinternal.h> -+ -+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op); -+extern int gr_handle_sysctl_mod(const char *dirname, const char *name, -+ const int op); -+extern int gr_handle_chroot_sysctl(const int op); - - /* External variables not in a header file. */ - extern int sysctl_overcommit_memory; -@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write, - } - - #endif -+extern struct ctl_table grsecurity_table[]; - - static struct ctl_table root_table[]; - static struct ctl_table_root sysctl_table_root; -@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[]; - int sysctl_legacy_va_layout; - #endif - -+#ifdef CONFIG_PAX_SOFTMODE -+static ctl_table pax_table[] = { -+ { -+ .procname = "softmode", -+ .data = &pax_softmode, -+ .maxlen = sizeof(unsigned int), -+ .mode = 0600, -+ .proc_handler = &proc_dointvec, -+ }, -+ -+ { } -+}; -+#endif -+ - /* The default sysctl tables: */ - - static struct ctl_table root_table[] = { -@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000; - #endif - - static struct ctl_table kern_table[] = { -+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) -+ { -+ .procname = "grsecurity", -+ .mode = 0500, -+ .child = grsecurity_table, -+ }, -+#endif -+ -+#ifdef CONFIG_PAX_SOFTMODE -+ { -+ .procname = "pax", -+ .mode = 0500, -+ .child = pax_table, -+ }, -+#endif -+ - { - .procname = "sched_child_runs_first", - .data = &sysctl_sched_child_runs_first, -@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = { - .data = &modprobe_path, - .maxlen = KMOD_PATH_LEN, - .mode = 0644, -- .proc_handler = proc_dostring, -+ .proc_handler = proc_dostring_modpriv, - }, - { - .procname = "modules_disabled", -@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = { - .extra1 = &zero, - .extra2 = &one, - }, -+#endif - { - .procname = "kptr_restrict", - .data = &kptr_restrict, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dmesg_restrict, -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ .extra1 = &two, -+#else - .extra1 = &zero, -+#endif - .extra2 = &two, - }, --#endif - { - .procname = "ngroups_max", - .data = &ngroups_max, -@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = { - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - }, -+ { -+ .procname = "heap_stack_gap", -+ .data = &sysctl_heap_stack_gap, -+ .maxlen = sizeof(sysctl_heap_stack_gap), -+ .mode = 0644, -+ .proc_handler = proc_doulongvec_minmax, -+ }, - #else - { - .procname = "nr_trim_pages", -@@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op) - int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op) - { - int mode; -+ int error; -+ -+ if (table->parent != NULL && table->parent->procname != NULL && -+ table->procname != NULL && -+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op)) -+ return -EACCES; -+ if (gr_handle_chroot_sysctl(op)) -+ return -EACCES; -+ error = gr_handle_sysctl(table, op); -+ if (error) -+ return error; - - if (root->permissions) - mode = root->permissions(root, current->nsproxy, table); -@@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *table, int write, - buffer, lenp, ppos); - } - -+int proc_dostring_modpriv(struct ctl_table *table, int write, -+ void __user *buffer, size_t *lenp, loff_t *ppos) -+{ -+ if (write && !capable(CAP_SYS_MODULE)) -+ return -EPERM; -+ -+ return _proc_do_string(table->data, table->maxlen, write, -+ buffer, lenp, ppos); -+} -+ - static size_t proc_skip_spaces(char **buf) - { - size_t ret; -@@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val, - len = strlen(tmp); - if (len > *size) - len = *size; -+ if (len > sizeof(tmp)) -+ len = sizeof(tmp); - if (copy_to_user(*buf, tmp, len)) - return -EFAULT; - *size -= len; -@@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int - *i = val; - } else { - val = convdiv * (*i) / convmul; -- if (!first) -+ if (!first) { - err = proc_put_char(&buffer, &left, '\t'); -+ if (err) -+ break; -+ } - err = proc_put_long(&buffer, &left, val, false); - if (err) - break; -@@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *table, int write, - return -ENOSYS; - } - -+int proc_dostring_modpriv(struct ctl_table *table, int write, -+ void __user *buffer, size_t *lenp, loff_t *ppos) -+{ -+ return -ENOSYS; -+} -+ - int proc_dointvec(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) - { -@@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax); - EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); - EXPORT_SYMBOL(proc_dointvec_ms_jiffies); - EXPORT_SYMBOL(proc_dostring); -+EXPORT_SYMBOL(proc_dostring_modpriv); - EXPORT_SYMBOL(proc_doulongvec_minmax); - EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax); - EXPORT_SYMBOL(register_sysctl_table); -diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c -index e8bffbe..2344401 100644 ---- a/kernel/sysctl_binary.c -+++ b/kernel/sysctl_binary.c -@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file, - int i; - - set_fs(KERNEL_DS); -- result = vfs_read(file, buffer, BUFSZ - 1, &pos); -+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos); - set_fs(old_fs); - if (result < 0) - goto out_kfree; -@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file, - } - - set_fs(KERNEL_DS); -- result = vfs_write(file, buffer, str - buffer, &pos); -+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos); - set_fs(old_fs); - if (result < 0) - goto out_kfree; -@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file, - int i; - - set_fs(KERNEL_DS); -- result = vfs_read(file, buffer, BUFSZ - 1, &pos); -+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos); - set_fs(old_fs); - if (result < 0) - goto out_kfree; -@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file, - } - - set_fs(KERNEL_DS); -- result = vfs_write(file, buffer, str - buffer, &pos); -+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos); - set_fs(old_fs); - if (result < 0) - goto out_kfree; -@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file, - int i; - - set_fs(KERNEL_DS); -- result = vfs_read(file, buf, sizeof(buf) - 1, &pos); -+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos); - set_fs(old_fs); - if (result < 0) - goto out; -@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file, - __le16 dnaddr; - - set_fs(KERNEL_DS); -- result = vfs_read(file, buf, sizeof(buf) - 1, &pos); -+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos); - set_fs(old_fs); - if (result < 0) - goto out; -@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file, - le16_to_cpu(dnaddr) & 0x3ff); - - set_fs(KERNEL_DS); -- result = vfs_write(file, buf, len, &pos); -+ result = vfs_write(file, (const char __force_user *)buf, len, &pos); - set_fs(old_fs); - if (result < 0) - goto out; -diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c -index 362da65..ab8ef8c 100644 ---- a/kernel/sysctl_check.c -+++ b/kernel/sysctl_check.c -@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) - set_fail(&fail, table, "Directory with extra2"); - } else { - if ((table->proc_handler == proc_dostring) || -+ (table->proc_handler == proc_dostring_modpriv) || - (table->proc_handler == proc_dointvec) || - (table->proc_handler == proc_dointvec_minmax) || - (table->proc_handler == proc_dointvec_jiffies) || -diff --git a/kernel/taskstats.c b/kernel/taskstats.c -index e660464..c8b9e67 100644 ---- a/kernel/taskstats.c -+++ b/kernel/taskstats.c -@@ -27,9 +27,12 @@ - #include <linux/cgroup.h> - #include <linux/fs.h> - #include <linux/file.h> -+#include <linux/grsecurity.h> - #include <net/genetlink.h> - #include <linux/atomic.h> - -+extern int gr_is_taskstats_denied(int pid); -+ - /* - * Maximum length of a cpumask that can be specified in - * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute -@@ -556,6 +559,9 @@ err: - - static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) - { -+ if (gr_is_taskstats_denied(current->pid)) -+ return -EACCES; -+ - if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK]) - return cmd_attr_register_cpumask(info); - else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK]) -diff --git a/kernel/time.c b/kernel/time.c -index d776062..fa8d186 100644 ---- a/kernel/time.c -+++ b/kernel/time.c -@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) - return error; - - if (tz) { -+ /* we log in do_settimeofday called below, so don't log twice -+ */ -+ if (!tv) -+ gr_log_timechange(); -+ - /* SMP safe, global irq locking makes it work. */ - sys_tz = *tz; - update_vsyscall_tz(); -diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c -index ea5e1a9..8b8df07 100644 ---- a/kernel/time/alarmtimer.c -+++ b/kernel/time/alarmtimer.c -@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void) - { - int error = 0; - int i; -- struct k_clock alarm_clock = { -+ static struct k_clock alarm_clock = { - .clock_getres = alarm_clock_getres, - .clock_get = alarm_clock_get, - .timer_create = alarm_timer_create, -diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c -index 7a90d02..6d8585a 100644 ---- a/kernel/time/tick-broadcast.c -+++ b/kernel/time/tick-broadcast.c -@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) - * then clear the broadcast bit. - */ - if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { -- int cpu = smp_processor_id(); -+ cpu = smp_processor_id(); - - cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); - tick_broadcast_clear_oneshot(cpu); -diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c -index 6f9798b..f8c4087 100644 ---- a/kernel/time/timekeeping.c -+++ b/kernel/time/timekeeping.c -@@ -14,6 +14,7 @@ - #include <linux/init.h> - #include <linux/mm.h> - #include <linux/sched.h> -+#include <linux/grsecurity.h> - #include <linux/syscore_ops.h> - #include <linux/clocksource.h> - #include <linux/jiffies.h> -@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv) - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - -+ gr_log_timechange(); -+ - write_seqlock_irqsave(&xtime_lock, flags); - - timekeeping_forward_now(); -diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c -index 3258455..f35227d 100644 ---- a/kernel/time/timer_list.c -+++ b/kernel/time/timer_list.c -@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); - - static void print_name_offset(struct seq_file *m, void *sym) - { -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ SEQ_printf(m, "<%p>", NULL); -+#else - char symname[KSYM_NAME_LEN]; - - if (lookup_symbol_name((unsigned long)sym, symname) < 0) - SEQ_printf(m, "<%pK>", sym); - else - SEQ_printf(m, "%s", symname); -+#endif - } - - static void -@@ -112,7 +116,11 @@ next_one: - static void - print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) - { -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ SEQ_printf(m, " .base: %p\n", NULL); -+#else - SEQ_printf(m, " .base: %pK\n", base); -+#endif - SEQ_printf(m, " .index: %d\n", - base->index); - SEQ_printf(m, " .resolution: %Lu nsecs\n", -@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void) - { - struct proc_dir_entry *pe; - -+#ifdef CONFIG_GRKERNSEC_PROC_ADD -+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops); -+#else - pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); -+#endif - if (!pe) - return -ENOMEM; - return 0; -diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c -index a5d0a3a..60c7948 100644 ---- a/kernel/time/timer_stats.c -+++ b/kernel/time/timer_stats.c -@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop; - static unsigned long nr_entries; - static struct entry entries[MAX_ENTRIES]; - --static atomic_t overflow_count; -+static atomic_unchecked_t overflow_count; - - /* - * The entries are in a hash-table, for fast lookup: -@@ -140,7 +140,7 @@ static void reset_entries(void) - nr_entries = 0; - memset(entries, 0, sizeof(entries)); - memset(tstat_hash_table, 0, sizeof(tstat_hash_table)); -- atomic_set(&overflow_count, 0); -+ atomic_set_unchecked(&overflow_count, 0); - } - - static struct entry *alloc_entry(void) -@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, - if (likely(entry)) - entry->count++; - else -- atomic_inc(&overflow_count); -+ atomic_inc_unchecked(&overflow_count); - - out_unlock: - raw_spin_unlock_irqrestore(lock, flags); -@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, - - static void print_name_offset(struct seq_file *m, unsigned long addr) - { -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ seq_printf(m, "<%p>", NULL); -+#else - char symname[KSYM_NAME_LEN]; - - if (lookup_symbol_name(addr, symname) < 0) - seq_printf(m, "<%p>", (void *)addr); - else - seq_printf(m, "%s", symname); -+#endif - } - - static int tstats_show(struct seq_file *m, void *v) -@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v) - - seq_puts(m, "Timer Stats Version: v0.2\n"); - seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); -- if (atomic_read(&overflow_count)) -+ if (atomic_read_unchecked(&overflow_count)) - seq_printf(m, "Overflow: %d entries\n", -- atomic_read(&overflow_count)); -+ atomic_read_unchecked(&overflow_count)); - - for (i = 0; i < nr_entries; i++) { - entry = entries + i; -@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void) - { - struct proc_dir_entry *pe; - -+#ifdef CONFIG_GRKERNSEC_PROC_ADD -+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops); -+#else - pe = proc_create("timer_stats", 0644, NULL, &tstats_fops); -+#endif - if (!pe) - return -ENOMEM; - return 0; -diff --git a/kernel/timer.c b/kernel/timer.c -index 8cff361..0fb5cd8 100644 ---- a/kernel/timer.c -+++ b/kernel/timer.c -@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick) - /* - * This function runs timers and the timer-tq in bottom half context. - */ --static void run_timer_softirq(struct softirq_action *h) -+static void run_timer_softirq(void) - { - struct tvec_base *base = __this_cpu_read(tvec_bases); - -diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c -index 7c910a5..8b72104 100644 ---- a/kernel/trace/blktrace.c -+++ b/kernel/trace/blktrace.c -@@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, - struct blk_trace *bt = filp->private_data; - char buf[16]; - -- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); -+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped)); - - return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - } -@@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, - return 1; - - bt = buf->chan->private_data; -- atomic_inc(&bt->dropped); -+ atomic_inc_unchecked(&bt->dropped); - return 0; - } - -@@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, - - bt->dir = dir; - bt->dev = dev; -- atomic_set(&bt->dropped, 0); -+ atomic_set_unchecked(&bt->dropped, 0); - - ret = -EIO; - bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, -diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c -index 48d3762..3b61fce 100644 ---- a/kernel/trace/ftrace.c -+++ b/kernel/trace/ftrace.c -@@ -1584,12 +1584,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) - if (unlikely(ftrace_disabled)) - return 0; - -+ ret = ftrace_arch_code_modify_prepare(); -+ FTRACE_WARN_ON(ret); -+ if (ret) -+ return 0; -+ - ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); -+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process()); - if (ret) { - ftrace_bug(ret, ip); -- return 0; - } -- return 1; -+ return ret ? 0 : 1; - } - - /* -@@ -2606,7 +2611,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp) - - int - register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, -- void *data) -+ void *data) - { - struct ftrace_func_probe *entry; - struct ftrace_page *pg; -diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index 17a2d44..85907e2 100644 ---- a/kernel/trace/trace.c -+++ b/kernel/trace/trace.c -@@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, - size_t rem; - unsigned int i; - -+ pax_track_stack(); -+ - if (splice_grow_spd(pipe, &spd)) - return -ENOMEM; - -@@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, - int entries, size, i; - size_t ret; - -+ pax_track_stack(); -+ - if (splice_grow_spd(pipe, &spd)) - return -ENOMEM; - -@@ -4093,10 +4097,9 @@ static const struct file_operations tracing_dyn_info_fops = { - }; - #endif - --static struct dentry *d_tracer; -- - struct dentry *tracing_init_dentry(void) - { -+ static struct dentry *d_tracer; - static int once; - - if (d_tracer) -@@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void) - return d_tracer; - } - --static struct dentry *d_percpu; -- - struct dentry *tracing_dentry_percpu(void) - { -+ static struct dentry *d_percpu; - static int once; - struct dentry *d_tracer; - -diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c -index c212a7f..7b02394 100644 ---- a/kernel/trace/trace_events.c -+++ b/kernel/trace/trace_events.c -@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list); - struct ftrace_module_file_ops { - struct list_head list; - struct module *mod; -- struct file_operations id; -- struct file_operations enable; -- struct file_operations format; -- struct file_operations filter; - }; - - static struct ftrace_module_file_ops * -@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod) - - file_ops->mod = mod; - -- file_ops->id = ftrace_event_id_fops; -- file_ops->id.owner = mod; -- -- file_ops->enable = ftrace_enable_fops; -- file_ops->enable.owner = mod; -- -- file_ops->filter = ftrace_event_filter_fops; -- file_ops->filter.owner = mod; -- -- file_ops->format = ftrace_event_format_fops; -- file_ops->format.owner = mod; -+ pax_open_kernel(); -+ *(void **)&mod->trace_id.owner = mod; -+ *(void **)&mod->trace_enable.owner = mod; -+ *(void **)&mod->trace_filter.owner = mod; -+ *(void **)&mod->trace_format.owner = mod; -+ pax_close_kernel(); - - list_add(&file_ops->list, &ftrace_module_file_list); - -@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod) - - for_each_event(call, start, end) { - __trace_add_event_call(*call, mod, -- &file_ops->id, &file_ops->enable, -- &file_ops->filter, &file_ops->format); -+ &mod->trace_id, &mod->trace_enable, -+ &mod->trace_filter, &mod->trace_format); - } - } - -diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c -index 00d527c..7c5b1a3 100644 ---- a/kernel/trace/trace_kprobe.c -+++ b/kernel/trace/trace_kprobe.c -@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, - long ret; - int maxlen = get_rloc_len(*(u32 *)dest); - u8 *dst = get_rloc_data(dest); -- u8 *src = addr; -+ const u8 __user *src = (const u8 __force_user *)addr; - mm_segment_t old_fs = get_fs(); - if (!maxlen) - return; -@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, - pagefault_disable(); - do - ret = __copy_from_user_inatomic(dst++, src++, 1); -- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen); -+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen); - dst[-1] = '\0'; - pagefault_enable(); - set_fs(old_fs); -@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, - ((u8 *)get_rloc_data(dest))[0] = '\0'; - *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); - } else -- *(u32 *)dest = make_data_rloc(src - (u8 *)addr, -+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr, - get_rloc_offs(*(u32 *)dest)); - } - /* Return the length of string -- including null terminal byte */ -@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, - set_fs(KERNEL_DS); - pagefault_disable(); - do { -- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); -+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1); - len++; - } while (c && ret == 0 && len < MAX_STRING_SIZE); - pagefault_enable(); -diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c -index fd3c8aa..5f324a6 100644 ---- a/kernel/trace/trace_mmiotrace.c -+++ b/kernel/trace/trace_mmiotrace.c -@@ -24,7 +24,7 @@ struct header_iter { - static struct trace_array *mmio_trace_array; - static bool overrun_detected; - static unsigned long prev_overruns; --static atomic_t dropped_count; -+static atomic_unchecked_t dropped_count; - - static void mmio_reset_data(struct trace_array *tr) - { -@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter) - - static unsigned long count_overruns(struct trace_iterator *iter) - { -- unsigned long cnt = atomic_xchg(&dropped_count, 0); -+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0); - unsigned long over = ring_buffer_overruns(iter->tr->buffer); - - if (over > prev_overruns) -@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, - event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, - sizeof(*entry), 0, pc); - if (!event) { -- atomic_inc(&dropped_count); -+ atomic_inc_unchecked(&dropped_count); - return; - } - entry = ring_buffer_event_data(event); -@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, - event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, - sizeof(*entry), 0, pc); - if (!event) { -- atomic_inc(&dropped_count); -+ atomic_inc_unchecked(&dropped_count); - return; - } - entry = ring_buffer_event_data(event); -diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c -index 5199930..26c73a0 100644 ---- a/kernel/trace/trace_output.c -+++ b/kernel/trace/trace_output.c -@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path) - - p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); - if (!IS_ERR(p)) { -- p = mangle_path(s->buffer + s->len, p, "\n"); -+ p = mangle_path(s->buffer + s->len, p, "\n\"); - if (p) { - s->len = p - s->buffer; - return 1; -diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c -index 77575b3..6e623d1 100644 ---- a/kernel/trace/trace_stack.c -+++ b/kernel/trace/trace_stack.c -@@ -50,7 +50,7 @@ static inline void check_stack(void) - return; - - /* we do not handle interrupt stacks yet */ -- if (!object_is_on_stack(&this_size)) -+ if (!object_starts_on_stack(&this_size)) - return; - - local_irq_save(flags); -diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c -index 209b379..7f76423 100644 ---- a/kernel/trace/trace_workqueue.c -+++ b/kernel/trace/trace_workqueue.c -@@ -22,7 +22,7 @@ struct cpu_workqueue_stats { - int cpu; - pid_t pid; - /* Can be inserted from interrupt or user context, need to be atomic */ -- atomic_t inserted; -+ atomic_unchecked_t inserted; - /* - * Don't need to be atomic, works are serialized in a single workqueue thread - * on a single CPU. -@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore, - spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); - list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { - if (node->pid == wq_thread->pid) { -- atomic_inc(&node->inserted); -+ atomic_inc_unchecked(&node->inserted); - goto found; - } - } -@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p) - tsk = get_pid_task(pid, PIDTYPE_PID); - if (tsk) { - seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, -- atomic_read(&cws->inserted), cws->executed, -+ atomic_read_unchecked(&cws->inserted), cws->executed, - tsk->comm); - put_task_struct(tsk); - } -diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug -index c0cb9c4..f33aa89 100644 ---- a/lib/Kconfig.debug -+++ b/lib/Kconfig.debug -@@ -1091,6 +1091,7 @@ config LATENCYTOP - depends on DEBUG_KERNEL - depends on STACKTRACE_SUPPORT - depends on PROC_FS -+ depends on !GRKERNSEC_HIDESYM - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE - select KALLSYMS - select KALLSYMS_ALL -diff --git a/lib/bitmap.c b/lib/bitmap.c -index 2f4412e..a557e27 100644 ---- a/lib/bitmap.c -+++ b/lib/bitmap.c -@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, - { - int c, old_c, totaldigits, ndigits, nchunks, nbits; - u32 chunk; -- const char __user *ubuf = buf; -+ const char __user *ubuf = (const char __force_user *)buf; - - bitmap_zero(maskp, nmaskbits); - -@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf, - { - if (!access_ok(VERIFY_READ, ubuf, ulen)) - return -EFAULT; -- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits); -+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits); - } - EXPORT_SYMBOL(bitmap_parse_user); - -@@ -594,7 +594,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, - { - unsigned a, b; - int c, old_c, totaldigits; -- const char __user *ubuf = buf; -+ const char __user *ubuf = (const char __force_user *)buf; - int exp_digit, in_range; - - totaldigits = c = 0; -@@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __user *ubuf, - { - if (!access_ok(VERIFY_READ, ubuf, ulen)) - return -EFAULT; -- return __bitmap_parselist((const char *)ubuf, -+ return __bitmap_parselist((const char __force_kernel *)ubuf, - ulen, 1, maskp, nmaskbits); - } - EXPORT_SYMBOL(bitmap_parselist_user); -diff --git a/lib/bug.c b/lib/bug.c -index 1955209..cbbb2ad 100644 ---- a/lib/bug.c -+++ b/lib/bug.c -@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) - return BUG_TRAP_TYPE_NONE; - - bug = find_bug(bugaddr); -+ if (!bug) -+ return BUG_TRAP_TYPE_NONE; - - file = NULL; - line = 0; -diff --git a/lib/debugobjects.c b/lib/debugobjects.c -index a78b7c6..2c73084 100644 ---- a/lib/debugobjects.c -+++ b/lib/debugobjects.c -@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack) - if (limit > 4) - return; - -- is_on_stack = object_is_on_stack(addr); -+ is_on_stack = object_starts_on_stack(addr); - if (is_on_stack == onstack) - return; - -diff --git a/lib/devres.c b/lib/devres.c -index 7c0e953..f642b5c 100644 ---- a/lib/devres.c -+++ b/lib/devres.c -@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache); - void devm_iounmap(struct device *dev, void __iomem *addr) - { - WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, -- (void *)addr)); -+ (void __force *)addr)); - iounmap(addr); - } - EXPORT_SYMBOL(devm_iounmap); -@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) - { - ioport_unmap(addr); - WARN_ON(devres_destroy(dev, devm_ioport_map_release, -- devm_ioport_map_match, (void *)addr)); -+ devm_ioport_map_match, (void __force *)addr)); - } - EXPORT_SYMBOL(devm_ioport_unmap); - -diff --git a/lib/dma-debug.c b/lib/dma-debug.c -index db07bfd..719b5ab 100644 ---- a/lib/dma-debug.c -+++ b/lib/dma-debug.c -@@ -870,7 +870,7 @@ out: - - static void check_for_stack(struct device *dev, void *addr) - { -- if (object_is_on_stack(addr)) -+ if (object_starts_on_stack(addr)) - err_printk(dev, NULL, "DMA-API: device driver maps memory from" - "stack [addr=%p]\n", addr); - } -diff --git a/lib/extable.c b/lib/extable.c -index 4cac81e..63e9b8f 100644 ---- a/lib/extable.c -+++ b/lib/extable.c -@@ -13,6 +13,7 @@ - #include <linux/init.h> - #include <linux/sort.h> - #include <asm/uaccess.h> -+#include <asm/pgtable.h> - - #ifndef ARCH_HAS_SORT_EXTABLE - /* -@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b) - void sort_extable(struct exception_table_entry *start, - struct exception_table_entry *finish) - { -+ pax_open_kernel(); - sort(start, finish - start, sizeof(struct exception_table_entry), - cmp_ex, NULL); -+ pax_close_kernel(); - } - - #ifdef CONFIG_MODULES -diff --git a/lib/inflate.c b/lib/inflate.c -index 013a761..c28f3fc 100644 ---- a/lib/inflate.c -+++ b/lib/inflate.c -@@ -269,7 +269,7 @@ static void free(void *where) - malloc_ptr = free_mem_ptr; - } - #else --#define malloc(a) kmalloc(a, GFP_KERNEL) -+#define malloc(a) kmalloc((a), GFP_KERNEL) - #define free(a) kfree(a) - #endif - -diff --git a/lib/kref.c b/lib/kref.c -index 3efb882..8492f4c 100644 ---- a/lib/kref.c -+++ b/lib/kref.c -@@ -52,7 +52,7 @@ void kref_get(struct kref *kref) - */ - int kref_put(struct kref *kref, void (*release)(struct kref *kref)) - { -- WARN_ON(release == NULL); -+ BUG_ON(release == NULL); - WARN_ON(release == (void (*)(struct kref *))kfree); - - if (atomic_dec_and_test(&kref->refcount)) { -diff --git a/lib/radix-tree.c b/lib/radix-tree.c -index a2f9da5..3bcadb6 100644 ---- a/lib/radix-tree.c -+++ b/lib/radix-tree.c -@@ -80,7 +80,7 @@ struct radix_tree_preload { - int nr; - struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; - }; --static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; -+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); - - static inline void *ptr_to_indirect(void *ptr) - { -diff --git a/lib/vsprintf.c b/lib/vsprintf.c -index d7222a9..2172edc 100644 ---- a/lib/vsprintf.c -+++ b/lib/vsprintf.c -@@ -16,6 +16,9 @@ - * - scnprintf and vscnprintf - */ - -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+#define __INCLUDED_BY_HIDESYM 1 -+#endif - #include <stdarg.h> - #include <linux/module.h> - #include <linux/types.h> -@@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end, void *ptr, - char sym[KSYM_SYMBOL_LEN]; - if (ext == 'B') - sprint_backtrace(sym, value); -- else if (ext != 'f' && ext != 's') -+ else if (ext != 'f' && ext != 's' && ext != 'a') - sprint_symbol(sym, value); - else - kallsyms_lookup(value, NULL, NULL, NULL, sym); -@@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr, - return string(buf, end, uuid, spec); - } - -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+int kptr_restrict __read_mostly = 2; -+#else - int kptr_restrict __read_mostly; -+#endif - - /* - * Show a '%p' thing. A kernel extension is that the '%p' is followed -@@ -810,6 +817,8 @@ int kptr_restrict __read_mostly; - * - 'S' For symbolic direct pointers with offset - * - 's' For symbolic direct pointers without offset - * - 'B' For backtraced symbolic direct pointers with offset -+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM -+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM - * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] - * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] - * - 'M' For a 6-byte MAC address, it prints the address in the -@@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, - { - if (!ptr && *fmt != 'K') { - /* -- * Print (null) with the same width as a pointer so it makes -+ * Print (nil) with the same width as a pointer so it makes - * tabular output look nice. - */ - if (spec.field_width == -1) - spec.field_width = 2 * sizeof(void *); -- return string(buf, end, "(null)", spec); -+ return string(buf, end, "(nil)", spec); - } - - switch (*fmt) { -@@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, - /* Fallthrough */ - case 'S': - case 's': -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ break; -+#else -+ return symbol_string(buf, end, ptr, spec, *fmt); -+#endif -+ case 'A': -+ case 'a': - case 'B': - return symbol_string(buf, end, ptr, spec, *fmt); - case 'R': -@@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) - typeof(type) value; \ - if (sizeof(type) == 8) { \ - args = PTR_ALIGN(args, sizeof(u32)); \ -- *(u32 *)&value = *(u32 *)args; \ -- *((u32 *)&value + 1) = *(u32 *)(args + 4); \ -+ *(u32 *)&value = *(const u32 *)args; \ -+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \ - } else { \ - args = PTR_ALIGN(args, sizeof(type)); \ -- value = *(typeof(type) *)args; \ -+ value = *(const typeof(type) *)args; \ - } \ - args += sizeof(type); \ - value; \ -@@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) - case FORMAT_TYPE_STR: { - const char *str_arg = args; - args += strlen(str_arg) + 1; -- str = string(str, end, (char *)str_arg, spec); -+ str = string(str, end, str_arg, spec); - break; - } - -diff --git a/localversion-grsec b/localversion-grsec -new file mode 100644 -index 0000000..7cd6065 ---- /dev/null -+++ b/localversion-grsec -@@ -0,0 +1 @@ -+-grsec -diff --git a/mm/Kconfig b/mm/Kconfig -index f2f1ca1..0645f06 100644 ---- a/mm/Kconfig -+++ b/mm/Kconfig -@@ -238,10 +238,10 @@ config KSM - root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). - - config DEFAULT_MMAP_MIN_ADDR -- int "Low address space to protect from user allocation" -+ int "Low address space to protect from user allocation" - depends on MMU -- default 4096 -- help -+ default 65536 -+ help - This is the portion of low virtual memory which should be protected - from userspace allocation. Keeping a user from writing to low pages - can help reduce the impact of kernel NULL pointer bugs. -diff --git a/mm/filemap.c b/mm/filemap.c -index 7771871..91bcdb4 100644 ---- a/mm/filemap.c -+++ b/mm/filemap.c -@@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) - struct address_space *mapping = file->f_mapping; - - if (!mapping->a_ops->readpage) -- return -ENOEXEC; -+ return -ENODEV; - file_accessed(file); - vma->vm_ops = &generic_file_vm_ops; - vma->vm_flags |= VM_CAN_NONLINEAR; -@@ -2190,6 +2190,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i - *pos = i_size_read(inode); - - if (limit != RLIM_INFINITY) { -+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0); - if (*pos >= limit) { - send_sig(SIGXFSZ, current, 0); - return -EFBIG; -diff --git a/mm/fremap.c b/mm/fremap.c -index b8e0e2d..076e171 100644 ---- a/mm/fremap.c -+++ b/mm/fremap.c -@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, - retry: - vma = find_vma(mm, start); - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC)) -+ goto out; -+#endif -+ - /* - * Make sure the vma is shared, that it supports prefaulting, - * and that the remapped range is valid and fully within -diff --git a/mm/highmem.c b/mm/highmem.c -index 5ef672c..d7660f4 100644 ---- a/mm/highmem.c -+++ b/mm/highmem.c -@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void) - * So no dangers, even with speculative execution. - */ - page = pte_page(pkmap_page_table[i]); -+ pax_open_kernel(); - pte_clear(&init_mm, (unsigned long)page_address(page), - &pkmap_page_table[i]); -- -+ pax_close_kernel(); - set_page_address(page, NULL); - need_flush = 1; - } -@@ -186,9 +187,11 @@ start: - } - } - vaddr = PKMAP_ADDR(last_pkmap_nr); -+ -+ pax_open_kernel(); - set_pte_at(&init_mm, vaddr, - &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); -- -+ pax_close_kernel(); - pkmap_count[last_pkmap_nr] = 1; - set_page_address(page, (void *)vaddr); - -diff --git a/mm/huge_memory.c b/mm/huge_memory.c -index d819d93..468e18f 100644 ---- a/mm/huge_memory.c -+++ b/mm/huge_memory.c -@@ -702,7 +702,7 @@ out: - * run pte_offset_map on the pmd, if an huge pmd could - * materialize from under us from a different thread. - */ -- if (unlikely(__pte_alloc(mm, vma, pmd, address))) -+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address))) - return VM_FAULT_OOM; - /* if an huge pmd materialized from under us just retry later */ - if (unlikely(pmd_trans_huge(*pmd))) -@@ -829,7 +829,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, - - for (i = 0; i < HPAGE_PMD_NR; i++) { - copy_user_highpage(pages[i], page + i, -- haddr + PAGE_SHIFT*i, vma); -+ haddr + PAGE_SIZE*i, vma); - __SetPageUptodate(pages[i]); - cond_resched(); - } -diff --git a/mm/hugetlb.c b/mm/hugetlb.c -index bb28a5f..fef0140 100644 ---- a/mm/hugetlb.c -+++ b/mm/hugetlb.c -@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) - __SetPageHead(page); - for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { - __SetPageTail(p); -+ set_page_count(p, 0); - p->first_page = page; - } - } -@@ -2346,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, - return 1; - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m) -+{ -+ struct mm_struct *mm = vma->vm_mm; -+ struct vm_area_struct *vma_m; -+ unsigned long address_m; -+ pte_t *ptep_m; -+ -+ vma_m = pax_find_mirror_vma(vma); -+ if (!vma_m) -+ return; -+ -+ BUG_ON(address >= SEGMEXEC_TASK_SIZE); -+ address_m = address + SEGMEXEC_TASK_SIZE; -+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK); -+ get_page(page_m); -+ hugepage_add_anon_rmap(page_m, vma_m, address_m); -+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0)); -+} -+#endif -+ - /* - * Hugetlb_cow() should be called with page lock of the original hugepage held. - */ -@@ -2449,6 +2471,11 @@ retry_avoidcopy: - make_huge_pte(vma, new_page, 1)); - page_remove_rmap(old_page); - hugepage_add_new_anon_rmap(new_page, vma, address); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ pax_mirror_huge_pte(vma, address, new_page); -+#endif -+ - /* Make the old page be freed below */ - new_page = old_page; - mmu_notifier_invalidate_range_end(mm, -@@ -2600,6 +2627,10 @@ retry: - && (vma->vm_flags & VM_SHARED))); - set_huge_pte_at(mm, address, ptep, new_pte); - -+#ifdef CONFIG_PAX_SEGMEXEC -+ pax_mirror_huge_pte(vma, address, page); -+#endif -+ - if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { - /* Optimization, do the COW without a second fault */ - ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); -@@ -2629,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, - static DEFINE_MUTEX(hugetlb_instantiation_mutex); - struct hstate *h = hstate_vma(vma); - -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m; -+#endif -+ - ptep = huge_pte_offset(mm, address); - if (ptep) { - entry = huge_ptep_get(ptep); -@@ -2640,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, - VM_FAULT_SET_HINDEX(h - hstates); - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+ vma_m = pax_find_mirror_vma(vma); -+ if (vma_m) { -+ unsigned long address_m; -+ -+ if (vma->vm_start > vma_m->vm_start) { -+ address_m = address; -+ address -= SEGMEXEC_TASK_SIZE; -+ vma = vma_m; -+ h = hstate_vma(vma); -+ } else -+ address_m = address + SEGMEXEC_TASK_SIZE; -+ -+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h))) -+ return VM_FAULT_OOM; -+ address_m &= HPAGE_MASK; -+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL); -+ } -+#endif -+ - ptep = huge_pte_alloc(mm, address, huge_page_size(h)); - if (!ptep) - return VM_FAULT_OOM; -diff --git a/mm/internal.h b/mm/internal.h -index 2189af4..f2ca332 100644 ---- a/mm/internal.h -+++ b/mm/internal.h -@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page); - * in mm/page_alloc.c - */ - extern void __free_pages_bootmem(struct page *page, unsigned int order); -+extern void free_compound_page(struct page *page); - extern void prep_compound_page(struct page *page, unsigned long order); - #ifdef CONFIG_MEMORY_FAILURE - extern bool is_free_buddy_page(struct page *page); -diff --git a/mm/kmemleak.c b/mm/kmemleak.c -index d6880f5..ed77913 100644 ---- a/mm/kmemleak.c -+++ b/mm/kmemleak.c -@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq, - - for (i = 0; i < object->trace_len; i++) { - void *ptr = (void *)object->trace[i]; -- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); -+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr); - } - } - -diff --git a/mm/maccess.c b/mm/maccess.c -index 4cee182..e00511d 100644 ---- a/mm/maccess.c -+++ b/mm/maccess.c -@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size) - set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_from_user_inatomic(dst, -- (__force const void __user *)src, size); -+ (const void __force_user *)src, size); - pagefault_enable(); - set_fs(old_fs); - -@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) - - set_fs(KERNEL_DS); - pagefault_disable(); -- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); -+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size); - pagefault_enable(); - set_fs(old_fs); - -diff --git a/mm/madvise.c b/mm/madvise.c -index 74bf193..feb6fd3 100644 ---- a/mm/madvise.c -+++ b/mm/madvise.c -@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma, - pgoff_t pgoff; - unsigned long new_flags = vma->vm_flags; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m; -+#endif -+ - switch (behavior) { - case MADV_NORMAL: - new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; -@@ -110,6 +114,13 @@ success: - /* - * vm_flags is protected by the mmap_sem held in write mode. - */ -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ vma_m = pax_find_mirror_vma(vma); -+ if (vma_m) -+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT); -+#endif -+ - vma->vm_flags = new_flags; - - out: -@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma, - struct vm_area_struct ** prev, - unsigned long start, unsigned long end) - { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m; -+#endif -+ - *prev = vma; - if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) - return -EINVAL; -@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma, - zap_page_range(vma, start, end - start, &details); - } else - zap_page_range(vma, start, end - start, NULL); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ vma_m = pax_find_mirror_vma(vma); -+ if (vma_m) { -+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) { -+ struct zap_details details = { -+ .nonlinear_vma = vma_m, -+ .last_index = ULONG_MAX, -+ }; -+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details); -+ } else -+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL); -+ } -+#endif -+ - return 0; - } - -@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) - if (end < start) - goto out; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { -+ if (end > SEGMEXEC_TASK_SIZE) -+ goto out; -+ } else -+#endif -+ -+ if (end > TASK_SIZE) -+ goto out; -+ - error = 0; - if (end == start) - goto out; -diff --git a/mm/memory-failure.c b/mm/memory-failure.c -index 2b43ba0..fc09657 100644 ---- a/mm/memory-failure.c -+++ b/mm/memory-failure.c -@@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0; - - int sysctl_memory_failure_recovery __read_mostly = 1; - --atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); -+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); - - #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) - -@@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, - si.si_signo = SIGBUS; - si.si_errno = 0; - si.si_code = BUS_MCEERR_AO; -- si.si_addr = (void *)addr; -+ si.si_addr = (void __user *)addr; - #ifdef __ARCH_SI_TRAPNO - si.si_trapno = trapno; - #endif -@@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) - } - - nr_pages = 1 << compound_trans_order(hpage); -- atomic_long_add(nr_pages, &mce_bad_pages); -+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages); - - /* - * We need/can do nothing about count=0 pages. -@@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) - if (!PageHWPoison(hpage) - || (hwpoison_filter(p) && TestClearPageHWPoison(p)) - || (p != hpage && TestSetPageHWPoison(hpage))) { -- atomic_long_sub(nr_pages, &mce_bad_pages); -+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages); - return 0; - } - set_page_hwpoison_huge_page(hpage); -@@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) - } - if (hwpoison_filter(p)) { - if (TestClearPageHWPoison(p)) -- atomic_long_sub(nr_pages, &mce_bad_pages); -+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages); - unlock_page(hpage); - put_page(hpage); - return 0; -@@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn) - return 0; - } - if (TestClearPageHWPoison(p)) -- atomic_long_sub(nr_pages, &mce_bad_pages); -+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages); - pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn); - return 0; - } -@@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn) - */ - if (TestClearPageHWPoison(page)) { - pr_info("MCE: Software-unpoisoned page %#lx\n", pfn); -- atomic_long_sub(nr_pages, &mce_bad_pages); -+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages); - freeit = 1; - if (PageHuge(page)) - clear_page_hwpoison_huge_page(page); -@@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct page *page, int flags) - } - done: - if (!PageHWPoison(hpage)) -- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages); -+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages); - set_page_hwpoison_huge_page(hpage); - dequeue_hwpoisoned_huge_page(hpage); - /* keep elevated page count for bad page */ -@@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page, int flags) - return ret; - - done: -- atomic_long_add(1, &mce_bad_pages); -+ atomic_long_add_unchecked(1, &mce_bad_pages); - SetPageHWPoison(page); - /* keep elevated page count for bad page */ - return ret; -diff --git a/mm/memory.c b/mm/memory.c -index b2b8731..6080174 100644 ---- a/mm/memory.c -+++ b/mm/memory.c -@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, - return; - - pmd = pmd_offset(pud, start); -+ -+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD) - pud_clear(pud); - pmd_free_tlb(tlb, pmd, start); -+#endif -+ - } - - static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, -@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, - if (end - 1 > ceiling - 1) - return; - -+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD) - pud = pud_offset(pgd, start); - pgd_clear(pgd); - pud_free_tlb(tlb, pud, start); -+#endif -+ - } - - /* -@@ -1566,12 +1573,6 @@ no_page_table: - return page; - } - --static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) --{ -- return stack_guard_page_start(vma, addr) || -- stack_guard_page_end(vma, addr+PAGE_SIZE); --} -- - /** - * __get_user_pages() - pin user pages in memory - * @tsk: task_struct of target task -@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); - i = 0; - -- do { -+ while (nr_pages) { - struct vm_area_struct *vma; - -- vma = find_extend_vma(mm, start); -+ vma = find_vma(mm, start); - if (!vma && in_gate_area(mm, start)) { - unsigned long pg = start & PAGE_MASK; - pgd_t *pgd; -@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - goto next_page; - } - -- if (!vma || -+ if (!vma || start < vma->vm_start || - (vma->vm_flags & (VM_IO | VM_PFNMAP)) || - !(vm_flags & vma->vm_flags)) - return i ? : -EFAULT; -@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - int ret; - unsigned int fault_flags = 0; - -- /* For mlock, just skip the stack guard page. */ -- if (foll_flags & FOLL_MLOCK) { -- if (stack_guard_page(vma, start)) -- goto next_page; -- } - if (foll_flags & FOLL_WRITE) - fault_flags |= FAULT_FLAG_WRITE; - if (nonblocking) -@@ -1800,7 +1796,7 @@ next_page: - start += PAGE_SIZE; - nr_pages--; - } while (nr_pages && start < vma->vm_end); -- } while (nr_pages); -+ } - return i; - } - EXPORT_SYMBOL(__get_user_pages); -@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, - page_add_file_rmap(page); - set_pte_at(mm, addr, pte, mk_pte(page, prot)); - -+#ifdef CONFIG_PAX_SEGMEXEC -+ pax_mirror_file_pte(vma, addr, page, ptl); -+#endif -+ - retval = 0; - pte_unmap_unlock(pte, ptl); - return retval; -@@ -2041,10 +2041,22 @@ out: - int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, - struct page *page) - { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m; -+#endif -+ - if (addr < vma->vm_start || addr >= vma->vm_end) - return -EFAULT; - if (!page_count(page)) - return -EINVAL; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ vma_m = pax_find_mirror_vma(vma); -+ if (vma_m) -+ vma_m->vm_flags |= VM_INSERTPAGE; -+#endif -+ - vma->vm_flags |= VM_INSERTPAGE; - return insert_page(vma, addr, page, vma->vm_page_prot); - } -@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, - unsigned long pfn) - { - BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); -+ BUG_ON(vma->vm_mirror); - - if (addr < vma->vm_start || addr >= vma->vm_end) - return -EFAULT; -@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo - copy_user_highpage(dst, src, va, vma); - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) -+{ -+ struct mm_struct *mm = vma->vm_mm; -+ spinlock_t *ptl; -+ pte_t *pte, entry; -+ -+ pte = pte_offset_map_lock(mm, pmd, address, &ptl); -+ entry = *pte; -+ if (!pte_present(entry)) { -+ if (!pte_none(entry)) { -+ BUG_ON(pte_file(entry)); -+ free_swap_and_cache(pte_to_swp_entry(entry)); -+ pte_clear_not_present_full(mm, address, pte, 0); -+ } -+ } else { -+ struct page *page; -+ -+ flush_cache_page(vma, address, pte_pfn(entry)); -+ entry = ptep_clear_flush(vma, address, pte); -+ BUG_ON(pte_dirty(entry)); -+ page = vm_normal_page(vma, address, entry); -+ if (page) { -+ update_hiwater_rss(mm); -+ if (PageAnon(page)) -+ dec_mm_counter_fast(mm, MM_ANONPAGES); -+ else -+ dec_mm_counter_fast(mm, MM_FILEPAGES); -+ page_remove_rmap(page); -+ page_cache_release(page); -+ } -+ } -+ pte_unmap_unlock(pte, ptl); -+} -+ -+/* PaX: if vma is mirrored, synchronize the mirror's PTE -+ * -+ * the ptl of the lower mapped page is held on entry and is not released on exit -+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc) -+ */ -+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) -+{ -+ struct mm_struct *mm = vma->vm_mm; -+ unsigned long address_m; -+ spinlock_t *ptl_m; -+ struct vm_area_struct *vma_m; -+ pmd_t *pmd_m; -+ pte_t *pte_m, entry_m; -+ -+ BUG_ON(!page_m || !PageAnon(page_m)); -+ -+ vma_m = pax_find_mirror_vma(vma); -+ if (!vma_m) -+ return; -+ -+ BUG_ON(!PageLocked(page_m)); -+ BUG_ON(address >= SEGMEXEC_TASK_SIZE); -+ address_m = address + SEGMEXEC_TASK_SIZE; -+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); -+ pte_m = pte_offset_map(pmd_m, address_m); -+ ptl_m = pte_lockptr(mm, pmd_m); -+ if (ptl != ptl_m) { -+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); -+ if (!pte_none(*pte_m)) -+ goto out; -+ } -+ -+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); -+ page_cache_get(page_m); -+ page_add_anon_rmap(page_m, vma_m, address_m); -+ inc_mm_counter_fast(mm, MM_ANONPAGES); -+ set_pte_at(mm, address_m, pte_m, entry_m); -+ update_mmu_cache(vma_m, address_m, entry_m); -+out: -+ if (ptl != ptl_m) -+ spin_unlock(ptl_m); -+ pte_unmap(pte_m); -+ unlock_page(page_m); -+} -+ -+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) -+{ -+ struct mm_struct *mm = vma->vm_mm; -+ unsigned long address_m; -+ spinlock_t *ptl_m; -+ struct vm_area_struct *vma_m; -+ pmd_t *pmd_m; -+ pte_t *pte_m, entry_m; -+ -+ BUG_ON(!page_m || PageAnon(page_m)); -+ -+ vma_m = pax_find_mirror_vma(vma); -+ if (!vma_m) -+ return; -+ -+ BUG_ON(address >= SEGMEXEC_TASK_SIZE); -+ address_m = address + SEGMEXEC_TASK_SIZE; -+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); -+ pte_m = pte_offset_map(pmd_m, address_m); -+ ptl_m = pte_lockptr(mm, pmd_m); -+ if (ptl != ptl_m) { -+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); -+ if (!pte_none(*pte_m)) -+ goto out; -+ } -+ -+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); -+ page_cache_get(page_m); -+ page_add_file_rmap(page_m); -+ inc_mm_counter_fast(mm, MM_FILEPAGES); -+ set_pte_at(mm, address_m, pte_m, entry_m); -+ update_mmu_cache(vma_m, address_m, entry_m); -+out: -+ if (ptl != ptl_m) -+ spin_unlock(ptl_m); -+ pte_unmap(pte_m); -+} -+ -+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl) -+{ -+ struct mm_struct *mm = vma->vm_mm; -+ unsigned long address_m; -+ spinlock_t *ptl_m; -+ struct vm_area_struct *vma_m; -+ pmd_t *pmd_m; -+ pte_t *pte_m, entry_m; -+ -+ vma_m = pax_find_mirror_vma(vma); -+ if (!vma_m) -+ return; -+ -+ BUG_ON(address >= SEGMEXEC_TASK_SIZE); -+ address_m = address + SEGMEXEC_TASK_SIZE; -+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); -+ pte_m = pte_offset_map(pmd_m, address_m); -+ ptl_m = pte_lockptr(mm, pmd_m); -+ if (ptl != ptl_m) { -+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); -+ if (!pte_none(*pte_m)) -+ goto out; -+ } -+ -+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot); -+ set_pte_at(mm, address_m, pte_m, entry_m); -+out: -+ if (ptl != ptl_m) -+ spin_unlock(ptl_m); -+ pte_unmap(pte_m); -+} -+ -+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl) -+{ -+ struct page *page_m; -+ pte_t entry; -+ -+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC)) -+ goto out; -+ -+ entry = *pte; -+ page_m = vm_normal_page(vma, address, entry); -+ if (!page_m) -+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl); -+ else if (PageAnon(page_m)) { -+ if (pax_find_mirror_vma(vma)) { -+ pte_unmap_unlock(pte, ptl); -+ lock_page(page_m); -+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl); -+ if (pte_same(entry, *pte)) -+ pax_mirror_anon_pte(vma, address, page_m, ptl); -+ else -+ unlock_page(page_m); -+ } -+ } else -+ pax_mirror_file_pte(vma, address, page_m, ptl); -+ -+out: -+ pte_unmap_unlock(pte, ptl); -+} -+#endif -+ - /* - * This routine handles present pages, when users try to write - * to a shared page. It is done by copying the page to a new address -@@ -2656,6 +2849,12 @@ gotten: - */ - page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - if (likely(pte_same(*page_table, orig_pte))) { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (pax_find_mirror_vma(vma)) -+ BUG_ON(!trylock_page(new_page)); -+#endif -+ - if (old_page) { - if (!PageAnon(old_page)) { - dec_mm_counter_fast(mm, MM_FILEPAGES); -@@ -2707,6 +2906,10 @@ gotten: - page_remove_rmap(old_page); - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+ pax_mirror_anon_pte(vma, address, new_page, ptl); -+#endif -+ - /* Free the old page.. */ - new_page = old_page; - ret |= VM_FAULT_WRITE; -@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, - swap_free(entry); - if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) - try_to_free_swap(page); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma)) -+#endif -+ - unlock_page(page); - if (swapcache) { - /* -@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, - - /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, page_table); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ pax_mirror_anon_pte(vma, address, page, ptl); -+#endif -+ - unlock: - pte_unmap_unlock(page_table, ptl); - out: -@@ -3028,40 +3241,6 @@ out_release: - } - - /* -- * This is like a special single-page "expand_{down|up}wards()", -- * except we must first make sure that 'address{-|+}PAGE_SIZE' -- * doesn't hit another vma. -- */ --static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) --{ -- address &= PAGE_MASK; -- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { -- struct vm_area_struct *prev = vma->vm_prev; -- -- /* -- * Is there a mapping abutting this one below? -- * -- * That's only ok if it's the same stack mapping -- * that has gotten split.. -- */ -- if (prev && prev->vm_end == address) -- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; -- -- expand_downwards(vma, address - PAGE_SIZE); -- } -- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { -- struct vm_area_struct *next = vma->vm_next; -- -- /* As VM_GROWSDOWN but s/below/above/ */ -- if (next && next->vm_start == address + PAGE_SIZE) -- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; -- -- expand_upwards(vma, address + PAGE_SIZE); -- } -- return 0; --} -- --/* - * We enter with non-exclusive mmap_sem (to exclude vma changes, - * but allow concurrent faults), and pte mapped but not yet locked. - * We return with mmap_sem still held, but pte unmapped and unlocked. -@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pte_t *page_table, pmd_t *pmd, - unsigned int flags) - { -- struct page *page; -+ struct page *page = NULL; - spinlock_t *ptl; - pte_t entry; - -- pte_unmap(page_table); -- -- /* Check if we need to add a guard page to the stack */ -- if (check_stack_guard_page(vma, address) < 0) -- return VM_FAULT_SIGBUS; -- -- /* Use the zero-page for reads */ - if (!(flags & FAULT_FLAG_WRITE)) { - entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), - vma->vm_page_prot)); -- page_table = pte_offset_map_lock(mm, pmd, address, &ptl); -+ ptl = pte_lockptr(mm, pmd); -+ spin_lock(ptl); - if (!pte_none(*page_table)) - goto unlock; - goto setpte; - } - - /* Allocate our own private page. */ -+ pte_unmap(page_table); -+ - if (unlikely(anon_vma_prepare(vma))) - goto oom; - page = alloc_zeroed_user_highpage_movable(vma, address); -@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, - if (!pte_none(*page_table)) - goto release; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (pax_find_mirror_vma(vma)) -+ BUG_ON(!trylock_page(page)); -+#endif -+ - inc_mm_counter_fast(mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, address); - setpte: -@@ -3116,6 +3296,12 @@ setpte: - - /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, page_table); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (page) -+ pax_mirror_anon_pte(vma, address, page, ptl); -+#endif -+ - unlock: - pte_unmap_unlock(page_table, ptl); - return 0; -@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, - */ - /* Only go through if we didn't race with anybody else... */ - if (likely(pte_same(*page_table, orig_pte))) { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (anon && pax_find_mirror_vma(vma)) -+ BUG_ON(!trylock_page(page)); -+#endif -+ - flush_icache_page(vma, page); - entry = mk_pte(page, vma->vm_page_prot); - if (flags & FAULT_FLAG_WRITE) -@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, - - /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, address, page_table); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (anon) -+ pax_mirror_anon_pte(vma, address, page, ptl); -+ else -+ pax_mirror_file_pte(vma, address, page, ptl); -+#endif -+ - } else { - if (cow_page) - mem_cgroup_uncharge_page(cow_page); -@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm, - if (flags & FAULT_FLAG_WRITE) - flush_tlb_fix_spurious_fault(vma, address); - } -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ pax_mirror_pte(vma, address, pte, pmd, ptl); -+ return 0; -+#endif -+ - unlock: - pte_unmap_unlock(pte, ptl); - return 0; -@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - pmd_t *pmd; - pte_t *pte; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m; -+#endif -+ - __set_current_state(TASK_RUNNING); - - count_vm_event(PGFAULT); -@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - if (unlikely(is_vm_hugetlb_page(vma))) - return hugetlb_fault(mm, vma, address, flags); - -+#ifdef CONFIG_PAX_SEGMEXEC -+ vma_m = pax_find_mirror_vma(vma); -+ if (vma_m) { -+ unsigned long address_m; -+ pgd_t *pgd_m; -+ pud_t *pud_m; -+ pmd_t *pmd_m; -+ -+ if (vma->vm_start > vma_m->vm_start) { -+ address_m = address; -+ address -= SEGMEXEC_TASK_SIZE; -+ vma = vma_m; -+ } else -+ address_m = address + SEGMEXEC_TASK_SIZE; -+ -+ pgd_m = pgd_offset(mm, address_m); -+ pud_m = pud_alloc(mm, pgd_m, address_m); -+ if (!pud_m) -+ return VM_FAULT_OOM; -+ pmd_m = pmd_alloc(mm, pud_m, address_m); -+ if (!pmd_m) -+ return VM_FAULT_OOM; -+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m)) -+ return VM_FAULT_OOM; -+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m); -+ } -+#endif -+ - pgd = pgd_offset(mm, address); - pud = pud_alloc(mm, pgd, address); - if (!pud) -@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - * run pte_offset_map on the pmd, if an huge pmd could - * materialize from under us from a different thread. - */ -- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address)) -+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address))) - return VM_FAULT_OOM; - /* if an huge pmd materialized from under us just retry later */ - if (unlikely(pmd_trans_huge(*pmd))) -@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void) - gate_vma.vm_start = FIXADDR_USER_START; - gate_vma.vm_end = FIXADDR_USER_END; - gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; -- gate_vma.vm_page_prot = __P101; -+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); - /* - * Make sure the vDSO gets into every core dump. - * Dumping its contents makes post-mortem fully interpretable later -diff --git a/mm/mempolicy.c b/mm/mempolicy.c -index 9c51f9f..a9416cf 100644 ---- a/mm/mempolicy.c -+++ b/mm/mempolicy.c -@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, - unsigned long vmstart; - unsigned long vmend; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m; -+#endif -+ - vma = find_vma_prev(mm, start, &prev); - if (!vma || vma->vm_start > start) - return -EFAULT; -@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, - err = policy_vma(vma, new_pol); - if (err) - goto out; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ vma_m = pax_find_mirror_vma(vma); -+ if (vma_m) { -+ err = policy_vma(vma_m, new_pol); -+ if (err) -+ goto out; -+ } -+#endif -+ - } - - out: -@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start, unsigned long len, - - if (end < start) - return -EINVAL; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) { -+ if (end > SEGMEXEC_TASK_SIZE) -+ return -EINVAL; -+ } else -+#endif -+ -+ if (end > TASK_SIZE) -+ return -EINVAL; -+ - if (end == start) - return 0; - -@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, - if (!mm) - goto out; - -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ if (mm != current->mm && -+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) { -+ err = -EPERM; -+ goto out; -+ } -+#endif -+ - /* - * Check if this process has the right to modify the specified - * process. The right exists if the process has administrative -@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, - rcu_read_lock(); - tcred = __task_cred(task); - if (cred->euid != tcred->suid && cred->euid != tcred->uid && -- cred->uid != tcred->suid && cred->uid != tcred->uid && -- !capable(CAP_SYS_NICE)) { -+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) { - rcu_read_unlock(); - err = -EPERM; - goto out; -diff --git a/mm/migrate.c b/mm/migrate.c -index 14d0a6a..0360908 100644 ---- a/mm/migrate.c -+++ b/mm/migrate.c -@@ -866,9 +866,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, - - if (anon_vma) - put_anon_vma(anon_vma); --out: - unlock_page(hpage); - -+out: - if (rc != -EAGAIN) { - list_del(&hpage->lru); - put_page(hpage); -@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task, - unsigned long chunk_start; - int err; - -+ pax_track_stack(); -+ - task_nodes = cpuset_mems_allowed(task); - - err = -ENOMEM; -@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, - if (!mm) - return -EINVAL; - -+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ if (mm != current->mm && -+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) { -+ err = -EPERM; -+ goto out; -+ } -+#endif -+ - /* - * Check if this process has the right to modify the specified - * process. The right exists if the process has administrative -@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, - rcu_read_lock(); - tcred = __task_cred(task); - if (cred->euid != tcred->suid && cred->euid != tcred->uid && -- cred->uid != tcred->suid && cred->uid != tcred->uid && -- !capable(CAP_SYS_NICE)) { -+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) { - rcu_read_unlock(); - err = -EPERM; - goto out; -diff --git a/mm/mlock.c b/mm/mlock.c -index 048260c..57f4a4e 100644 ---- a/mm/mlock.c -+++ b/mm/mlock.c -@@ -13,6 +13,7 @@ - #include <linux/pagemap.h> - #include <linux/mempolicy.h> - #include <linux/syscalls.h> -+#include <linux/security.h> - #include <linux/sched.h> - #include <linux/module.h> - #include <linux/rmap.h> -@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start, size_t len, int on) - return -EINVAL; - if (end == start) - return 0; -+ if (end > TASK_SIZE) -+ return -EINVAL; -+ - vma = find_vma_prev(current->mm, start, &prev); - if (!vma || vma->vm_start > start) - return -ENOMEM; -@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start, size_t len, int on) - for (nstart = start ; ; ) { - vm_flags_t newflags; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) -+ break; -+#endif -+ - /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ - - newflags = vma->vm_flags | VM_LOCKED; -@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) - lock_limit >>= PAGE_SHIFT; - - /* check against resource limits */ -+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1); - if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) - error = do_mlock(start, len, 1); - up_write(¤t->mm->mmap_sem); -@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) - static int do_mlockall(int flags) - { - struct vm_area_struct * vma, * prev = NULL; -- unsigned int def_flags = 0; - - if (flags & MCL_FUTURE) -- def_flags = VM_LOCKED; -- current->mm->def_flags = def_flags; -+ current->mm->def_flags |= VM_LOCKED; -+ else -+ current->mm->def_flags &= ~VM_LOCKED; - if (flags == MCL_FUTURE) - goto out; - - for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { - vm_flags_t newflags; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) -+ break; -+#endif -+ -+ BUG_ON(vma->vm_end > TASK_SIZE); - newflags = vma->vm_flags | VM_LOCKED; - if (!(flags & MCL_CURRENT)) - newflags &= ~VM_LOCKED; -@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags) - lock_limit >>= PAGE_SHIFT; - - ret = -ENOMEM; -+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1); - if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || - capable(CAP_IPC_LOCK)) - ret = do_mlockall(flags); -diff --git a/mm/mmap.c b/mm/mmap.c -index a65efd4..17d61ff 100644 ---- a/mm/mmap.c -+++ b/mm/mmap.c -@@ -46,6 +46,16 @@ - #define arch_rebalance_pgtables(addr, len) (addr) - #endif - -+static inline void verify_mm_writelocked(struct mm_struct *mm) -+{ -+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX) -+ if (unlikely(down_read_trylock(&mm->mmap_sem))) { -+ up_read(&mm->mmap_sem); -+ BUG(); -+ } -+#endif -+} -+ - static void unmap_region(struct mm_struct *mm, - struct vm_area_struct *vma, struct vm_area_struct *prev, - unsigned long start, unsigned long end); -@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm, - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * - */ --pgprot_t protection_map[16] = { -+pgprot_t protection_map[16] __read_only = { - __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, - __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 - }; - --pgprot_t vm_get_page_prot(unsigned long vm_flags) -+pgprot_t vm_get_page_prot(vm_flags_t vm_flags) - { -- return __pgprot(pgprot_val(protection_map[vm_flags & -+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | - pgprot_val(arch_vm_get_page_prot(vm_flags))); -+ -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) -+ if (!(__supported_pte_mask & _PAGE_NX) && -+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC && -+ (vm_flags & (VM_READ | VM_WRITE))) -+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot))))); -+#endif -+ -+ return prot; - } - EXPORT_SYMBOL(vm_get_page_prot); - - int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */ - int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */ - int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; -+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024; - /* - * Make sure vm_committed_as in one cacheline and not cacheline shared with - * other variables. It can be updated by several CPUs frequently. -@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) - struct vm_area_struct *next = vma->vm_next; - - might_sleep(); -+ BUG_ON(vma->vm_mirror); - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - if (vma->vm_file) { -@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) - * not page aligned -Ram Gupta - */ - rlim = rlimit(RLIMIT_DATA); -+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1); - if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + - (mm->end_data - mm->start_data) > rlim) - goto out; -@@ -689,6 +711,12 @@ static int - can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) - { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE) -+ return 0; -+#endif -+ - if (is_mergeable_vma(vma, file, vm_flags) && - is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { - if (vma->vm_pgoff == vm_pgoff) -@@ -708,6 +736,12 @@ static int - can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) - { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE) -+ return 0; -+#endif -+ - if (is_mergeable_vma(vma, file, vm_flags) && - is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { - pgoff_t vm_pglen; -@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, - struct vm_area_struct *vma_merge(struct mm_struct *mm, - struct vm_area_struct *prev, unsigned long addr, - unsigned long end, unsigned long vm_flags, -- struct anon_vma *anon_vma, struct file *file, -+ struct anon_vma *anon_vma, struct file *file, - pgoff_t pgoff, struct mempolicy *policy) - { - pgoff_t pglen = (end - addr) >> PAGE_SHIFT; - struct vm_area_struct *area, *next; - int err; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE; -+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL; -+ -+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end); -+#endif -+ - /* - * We later require that vma->vm_flags == vm_flags, - * so this tests vma->vm_flags & VM_SPECIAL, too. -@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, - if (next && next->vm_end == end) /* cases 6, 7, 8 */ - next = next->vm_next; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (prev) -+ prev_m = pax_find_mirror_vma(prev); -+ if (area) -+ area_m = pax_find_mirror_vma(area); -+ if (next) -+ next_m = pax_find_mirror_vma(next); -+#endif -+ - /* - * Can it merge with the predecessor? - */ -@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, - /* cases 1, 6 */ - err = vma_adjust(prev, prev->vm_start, - next->vm_end, prev->vm_pgoff, NULL); -- } else /* cases 2, 5, 7 */ -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (!err && prev_m) -+ err = vma_adjust(prev_m, prev_m->vm_start, -+ next_m->vm_end, prev_m->vm_pgoff, NULL); -+#endif -+ -+ } else { /* cases 2, 5, 7 */ - err = vma_adjust(prev, prev->vm_start, - end, prev->vm_pgoff, NULL); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (!err && prev_m) -+ err = vma_adjust(prev_m, prev_m->vm_start, -+ end_m, prev_m->vm_pgoff, NULL); -+#endif -+ -+ } - if (err) - return NULL; - khugepaged_enter_vma_merge(prev); -@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, - mpol_equal(policy, vma_policy(next)) && - can_vma_merge_before(next, vm_flags, - anon_vma, file, pgoff+pglen)) { -- if (prev && addr < prev->vm_end) /* case 4 */ -+ if (prev && addr < prev->vm_end) { /* case 4 */ - err = vma_adjust(prev, prev->vm_start, - addr, prev->vm_pgoff, NULL); -- else /* cases 3, 8 */ -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (!err && prev_m) -+ err = vma_adjust(prev_m, prev_m->vm_start, -+ addr_m, prev_m->vm_pgoff, NULL); -+#endif -+ -+ } else { /* cases 3, 8 */ - err = vma_adjust(area, addr, next->vm_end, - next->vm_pgoff - pglen, NULL); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (!err && area_m) -+ err = vma_adjust(area_m, addr_m, next_m->vm_end, -+ next_m->vm_pgoff - pglen, NULL); -+#endif -+ -+ } - if (err) - return NULL; - khugepaged_enter_vma_merge(area); -@@ -921,14 +1001,11 @@ none: - void vm_stat_account(struct mm_struct *mm, unsigned long flags, - struct file *file, long pages) - { -- const unsigned long stack_flags -- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); -- - if (file) { - mm->shared_vm += pages; - if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) - mm->exec_vm += pages; -- } else if (flags & stack_flags) -+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN)) - mm->stack_vm += pages; - if (flags & (VM_RESERVED|VM_IO)) - mm->reserved_vm += pages; -@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, - * (the exception is when the underlying filesystem is noexec - * mounted, in which case we dont add PROT_EXEC.) - */ -- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) -+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) - if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) - prot |= PROT_EXEC; - -@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, - /* Obtain the address to map to. we verify (or select) it and ensure - * that it represents a valid section of the address space. - */ -- addr = get_unmapped_area(file, addr, len, pgoff, flags); -+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0)); - if (addr & ~PAGE_MASK) - return addr; - -@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, - vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | - mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; - -+#ifdef CONFIG_PAX_MPROTECT -+ if (mm->pax_flags & MF_PAX_MPROTECT) { -+#ifndef CONFIG_PAX_MPROTECT_COMPAT -+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) { -+ gr_log_rwxmmap(file); -+ -+#ifdef CONFIG_PAX_EMUPLT -+ vm_flags &= ~VM_EXEC; -+#else -+ return -EPERM; -+#endif -+ -+ } -+ -+ if (!(vm_flags & VM_EXEC)) -+ vm_flags &= ~VM_MAYEXEC; -+#else -+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC) -+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC); -+#endif -+ else -+ vm_flags &= ~VM_MAYWRITE; -+ } -+#endif -+ -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) -+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file) -+ vm_flags &= ~VM_PAGEEXEC; -+#endif -+ - if (flags & MAP_LOCKED) - if (!can_do_mlock()) - return -EPERM; -@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, - locked += mm->locked_vm; - lock_limit = rlimit(RLIMIT_MEMLOCK); - lock_limit >>= PAGE_SHIFT; -+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - return -EAGAIN; - } -@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, - if (error) - return error; - -+ if (!gr_acl_handle_mmap(file, prot)) -+ return -EACCES; -+ - return mmap_region(file, addr, len, flags, vm_flags, pgoff); - } - EXPORT_SYMBOL(do_mmap_pgoff); -@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma) - vm_flags_t vm_flags = vma->vm_flags; - - /* If it was private or non-writable, the write bit is already clear */ -- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) -+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED)) - return 0; - - /* The backer wishes to know when pages are first written to? */ -@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long charged = 0; - struct inode *inode = file ? file->f_path.dentry->d_inode : NULL; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m = NULL; -+#endif -+ -+ /* -+ * mm->mmap_sem is required to protect against another thread -+ * changing the mappings in case we sleep. -+ */ -+ verify_mm_writelocked(mm); -+ - /* Clear old maps */ - error = -ENOMEM; --munmap_back: - vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); - if (vma && vma->vm_start < addr + len) { - if (do_munmap(mm, addr, len)) - return -ENOMEM; -- goto munmap_back; -+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); -+ BUG_ON(vma && vma->vm_start < addr + len); - } - - /* Check against address space limit. */ -@@ -1258,6 +1379,16 @@ munmap_back: - goto unacct_error; - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) { -+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); -+ if (!vma_m) { -+ error = -ENOMEM; -+ goto free_vma; -+ } -+ } -+#endif -+ - vma->vm_mm = mm; - vma->vm_start = addr; - vma->vm_end = addr + len; -@@ -1281,6 +1412,19 @@ munmap_back: - error = file->f_op->mmap(file, vma); - if (error) - goto unmap_and_free_vma; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (vma_m && (vm_flags & VM_EXECUTABLE)) -+ added_exe_file_vma(mm); -+#endif -+ -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) -+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) { -+ vma->vm_flags |= VM_PAGEEXEC; -+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); -+ } -+#endif -+ - if (vm_flags & VM_EXECUTABLE) - added_exe_file_vma(mm); - -@@ -1316,6 +1460,11 @@ munmap_back: - vma_link(mm, vma, prev, rb_link, rb_parent); - file = vma->vm_file; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (vma_m) -+ BUG_ON(pax_mirror_vma(vma_m, vma)); -+#endif -+ - /* Once vma denies write, undo our temporary denial count */ - if (correct_wcount) - atomic_inc(&inode->i_writecount); -@@ -1324,6 +1473,7 @@ out: - - mm->total_vm += len >> PAGE_SHIFT; - vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); -+ track_exec_limit(mm, addr, addr + len, vm_flags); - if (vm_flags & VM_LOCKED) { - if (!mlock_vma_pages_range(vma, addr, addr + len)) - mm->locked_vm += (len >> PAGE_SHIFT); -@@ -1341,6 +1491,12 @@ unmap_and_free_vma: - unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); - charged = 0; - free_vma: -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (vma_m) -+ kmem_cache_free(vm_area_cachep, vma_m); -+#endif -+ - kmem_cache_free(vm_area_cachep, vma); - unacct_error: - if (charged) -@@ -1348,6 +1504,44 @@ unacct_error: - return error; - } - -+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len) -+{ -+ if (!vma) { -+#ifdef CONFIG_STACK_GROWSUP -+ if (addr > sysctl_heap_stack_gap) -+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap); -+ else -+ vma = find_vma(current->mm, 0); -+ if (vma && (vma->vm_flags & VM_GROWSUP)) -+ return false; -+#endif -+ return true; -+ } -+ -+ if (addr + len > vma->vm_start) -+ return false; -+ -+ if (vma->vm_flags & VM_GROWSDOWN) -+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len; -+#ifdef CONFIG_STACK_GROWSUP -+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) -+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap; -+#endif -+ -+ return true; -+} -+ -+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len) -+{ -+ if (vma->vm_start < len) -+ return -ENOMEM; -+ if (!(vma->vm_flags & VM_GROWSDOWN)) -+ return vma->vm_start - len; -+ if (sysctl_heap_stack_gap <= vma->vm_start - len) -+ return vma->vm_start - len - sysctl_heap_stack_gap; -+ return -ENOMEM; -+} -+ - /* Get an address range which is currently unmapped. - * For shmat() with addr=0. - * -@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - if (flags & MAP_FIXED) - return addr; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - addr = PAGE_ALIGN(addr); -- vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -- return addr; -+ if (TASK_SIZE - len >= addr) { -+ vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len)) -+ return addr; -+ } - } - if (len > mm->cached_hole_size) { -- start_addr = addr = mm->free_area_cache; -+ start_addr = addr = mm->free_area_cache; - } else { -- start_addr = addr = TASK_UNMAPPED_BASE; -- mm->cached_hole_size = 0; -+ start_addr = addr = mm->mmap_base; -+ mm->cached_hole_size = 0; - } - - full_search: -@@ -1396,34 +1595,40 @@ full_search: - * Start a new search - just in case we missed - * some holes. - */ -- if (start_addr != TASK_UNMAPPED_BASE) { -- addr = TASK_UNMAPPED_BASE; -- start_addr = addr; -+ if (start_addr != mm->mmap_base) { -+ start_addr = addr = mm->mmap_base; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } -- if (!vma || addr + len <= vma->vm_start) { -- /* -- * Remember the place where we stopped the search: -- */ -- mm->free_area_cache = addr + len; -- return addr; -- } -+ if (check_heap_stack_gap(vma, addr, len)) -+ break; - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - addr = vma->vm_end; - } -+ -+ /* -+ * Remember the place where we stopped the search: -+ */ -+ mm->free_area_cache = addr + len; -+ return addr; - } - #endif - - void arch_unmap_area(struct mm_struct *mm, unsigned long addr) - { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr) -+ return; -+#endif -+ - /* - * Is this a new hole at the lowest possible address? - */ -- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { -+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) { - mm->free_area_cache = addr; - mm->cached_hole_size = ~0UL; - } -@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - { - struct vm_area_struct *vma; - struct mm_struct *mm = current->mm; -- unsigned long addr = addr0; -+ unsigned long base = mm->mmap_base, addr = addr0; - - /* requested length too big for entire address space */ - if (len > TASK_SIZE) -@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - if (flags & MAP_FIXED) - return addr; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - /* requesting a specific address */ - if (addr) { - addr = PAGE_ALIGN(addr); -- vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -- return addr; -+ if (TASK_SIZE - len >= addr) { -+ vma = find_vma(mm, addr); -+ if (check_heap_stack_gap(vma, addr, len)) -+ return addr; -+ } - } - - /* check if free_area_cache is useful for us */ -@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - /* make sure it can fit in the remaining address space */ - if (addr > len) { - vma = find_vma(mm, addr-len); -- if (!vma || addr <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr - len, len)) - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } -@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - * return with success: - */ - vma = find_vma(mm, addr); -- if (!vma || addr+len <= vma->vm_start) -+ if (check_heap_stack_gap(vma, addr, len)) - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - -@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ -- addr = vma->vm_start-len; -- } while (len < vma->vm_start); -+ addr = skip_heap_stack_gap(vma, len); -+ } while (!IS_ERR_VALUE(addr)); - - bottomup: - /* -@@ -1507,13 +1717,21 @@ bottomup: - * can happen with large stack limits and large mmap() - * allocations. - */ -+ mm->mmap_base = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ -+ mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; -- mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ -- mm->free_area_cache = mm->mmap_base; -+ mm->mmap_base = base; -+ mm->free_area_cache = base; - mm->cached_hole_size = ~0UL; - - return addr; -@@ -1522,6 +1740,12 @@ bottomup: - - void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) - { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr) -+ return; -+#endif -+ - /* - * Is this a new hole at the highest possible address? - */ -@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) - mm->free_area_cache = addr; - - /* dont allow allocations above current base */ -- if (mm->free_area_cache > mm->mmap_base) -+ if (mm->free_area_cache > mm->mmap_base) { - mm->free_area_cache = mm->mmap_base; -+ mm->cached_hole_size = ~0UL; -+ } - } - - unsigned long -@@ -1638,6 +1864,28 @@ out: - return prev ? prev->vm_next : vma; - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma) -+{ -+ struct vm_area_struct *vma_m; -+ -+ BUG_ON(!vma || vma->vm_start >= vma->vm_end); -+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) { -+ BUG_ON(vma->vm_mirror); -+ return NULL; -+ } -+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end); -+ vma_m = vma->vm_mirror; -+ BUG_ON(!vma_m || vma_m->vm_mirror != vma); -+ BUG_ON(vma->vm_file != vma_m->vm_file); -+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start); -+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff); -+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root); -+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED)); -+ return vma_m; -+} -+#endif -+ - /* - * Verify that the stack growth is acceptable and - * update accounting. This is shared with both the -@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns - return -ENOMEM; - - /* Stack limit test */ -+ gr_learn_resource(current, RLIMIT_STACK, size, 1); - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) - return -ENOMEM; - -@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns - locked = mm->locked_vm + grow; - limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); - limit >>= PAGE_SHIFT; -+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); - if (locked > limit && !capable(CAP_IPC_LOCK)) - return -ENOMEM; - } -@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns - * PA-RISC uses this for its stack; IA64 for its Register Backing Store. - * vma is the last one with address > vma->vm_end. Have to extend vma. - */ -+#ifndef CONFIG_IA64 -+static -+#endif - int expand_upwards(struct vm_area_struct *vma, unsigned long address) - { - int error; -+ bool locknext; - - if (!(vma->vm_flags & VM_GROWSUP)) - return -EFAULT; - -+ /* Also guard against wrapping around to address 0. */ -+ if (address < PAGE_ALIGN(address+1)) -+ address = PAGE_ALIGN(address+1); -+ else -+ return -ENOMEM; -+ - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ - if (unlikely(anon_vma_prepare(vma))) - return -ENOMEM; -+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN); -+ if (locknext && anon_vma_prepare(vma->vm_next)) -+ return -ENOMEM; - vma_lock_anon_vma(vma); -+ if (locknext) -+ vma_lock_anon_vma(vma->vm_next); - - /* - * vma->vm_start/vm_end cannot change under us because the caller - * is required to hold the mmap_sem in read mode. We need the -- * anon_vma lock to serialize against concurrent expand_stacks. -- * Also guard against wrapping around to address 0. -+ * anon_vma locks to serialize against concurrent expand_stacks -+ * and expand_upwards. - */ -- if (address < PAGE_ALIGN(address+4)) -- address = PAGE_ALIGN(address+4); -- else { -- vma_unlock_anon_vma(vma); -- return -ENOMEM; -- } - error = 0; - - /* Somebody else might have raced and expanded it already */ -- if (address > vma->vm_end) { -+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap) -+ error = -ENOMEM; -+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) { - unsigned long size, grow; - - size = address - vma->vm_start; -@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) - } - } - } -+ if (locknext) -+ vma_unlock_anon_vma(vma->vm_next); - vma_unlock_anon_vma(vma); - khugepaged_enter_vma_merge(vma); - return error; -@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma, - unsigned long address) - { - int error; -+ bool lockprev = false; -+ struct vm_area_struct *prev; - - /* - * We must make sure the anon_vma is allocated -@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma, - if (error) - return error; - -+ prev = vma->vm_prev; -+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) -+ lockprev = prev && (prev->vm_flags & VM_GROWSUP); -+#endif -+ if (lockprev && anon_vma_prepare(prev)) -+ return -ENOMEM; -+ if (lockprev) -+ vma_lock_anon_vma(prev); -+ - vma_lock_anon_vma(vma); - - /* -@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma, - */ - - /* Somebody else might have raced and expanded it already */ -- if (address < vma->vm_start) { -+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap) -+ error = -ENOMEM; -+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) { - unsigned long size, grow; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m; -+ -+ vma_m = pax_find_mirror_vma(vma); -+#endif -+ - size = vma->vm_end - address; - grow = (vma->vm_start - address) >> PAGE_SHIFT; - -@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma, - if (!error) { - vma->vm_start = address; - vma->vm_pgoff -= grow; -+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (vma_m) { -+ vma_m->vm_start -= grow << PAGE_SHIFT; -+ vma_m->vm_pgoff -= grow; -+ } -+#endif -+ - perf_event_mmap(vma); - } - } - } - vma_unlock_anon_vma(vma); -+ if (lockprev) -+ vma_unlock_anon_vma(prev); - khugepaged_enter_vma_merge(vma); - return error; - } -@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) - do { - long nrpages = vma_pages(vma); - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) { -+ vma = remove_vma(vma); -+ continue; -+ } -+#endif -+ - mm->total_vm -= nrpages; - vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); - vma = remove_vma(vma); -@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, - insertion_point = (prev ? &prev->vm_next : &mm->mmap); - vma->vm_prev = NULL; - do { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (vma->vm_mirror) { -+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma); -+ vma->vm_mirror->vm_mirror = NULL; -+ vma->vm_mirror->vm_flags &= ~VM_EXEC; -+ vma->vm_mirror = NULL; -+ } -+#endif -+ - rb_erase(&vma->vm_rb, &mm->mm_rb); - mm->map_count--; - tail_vma = vma; -@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, - struct vm_area_struct *new; - int err = -ENOMEM; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m, *new_m = NULL; -+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE; -+#endif -+ - if (is_vm_hugetlb_page(vma) && (addr & - ~(huge_page_mask(hstate_vma(vma))))) - return -EINVAL; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ vma_m = pax_find_mirror_vma(vma); -+#endif -+ - new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); - if (!new) - goto out_err; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (vma_m) { -+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); -+ if (!new_m) { -+ kmem_cache_free(vm_area_cachep, new); -+ goto out_err; -+ } -+ } -+#endif -+ - /* most fields are the same, copy all, and then fixup */ - *new = *vma; - -@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, - new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (vma_m) { -+ *new_m = *vma_m; -+ INIT_LIST_HEAD(&new_m->anon_vma_chain); -+ new_m->vm_mirror = new; -+ new->vm_mirror = new_m; -+ -+ if (new_below) -+ new_m->vm_end = addr_m; -+ else { -+ new_m->vm_start = addr_m; -+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT); -+ } -+ } -+#endif -+ - pol = mpol_dup(vma_policy(vma)); - if (IS_ERR(pol)) { - err = PTR_ERR(pol); -@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, - else - err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (!err && vma_m) { -+ if (anon_vma_clone(new_m, vma_m)) -+ goto out_free_mpol; -+ -+ mpol_get(pol); -+ vma_set_policy(new_m, pol); -+ -+ if (new_m->vm_file) { -+ get_file(new_m->vm_file); -+ if (vma_m->vm_flags & VM_EXECUTABLE) -+ added_exe_file_vma(mm); -+ } -+ -+ if (new_m->vm_ops && new_m->vm_ops->open) -+ new_m->vm_ops->open(new_m); -+ -+ if (new_below) -+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff + -+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m); -+ else -+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m); -+ -+ if (err) { -+ if (new_m->vm_ops && new_m->vm_ops->close) -+ new_m->vm_ops->close(new_m); -+ if (new_m->vm_file) { -+ if (vma_m->vm_flags & VM_EXECUTABLE) -+ removed_exe_file_vma(mm); -+ fput(new_m->vm_file); -+ } -+ mpol_put(pol); -+ } -+ } -+#endif -+ - /* Success. */ - if (!err) - return 0; -@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, - removed_exe_file_vma(mm); - fput(new->vm_file); - } -- unlink_anon_vmas(new); - out_free_mpol: - mpol_put(pol); - out_free_vma: -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (new_m) { -+ unlink_anon_vmas(new_m); -+ kmem_cache_free(vm_area_cachep, new_m); -+ } -+#endif -+ -+ unlink_anon_vmas(new); - kmem_cache_free(vm_area_cachep, new); - out_err: - return err; -@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, - int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, int new_below) - { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) { -+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE); -+ if (mm->map_count >= sysctl_max_map_count-1) -+ return -ENOMEM; -+ } else -+#endif -+ - if (mm->map_count >= sysctl_max_map_count) - return -ENOMEM; - -@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, - * work. This now handles partial unmappings. - * Jeremy Fitzhardinge jeremy@goop.org - */ -+#ifdef CONFIG_PAX_SEGMEXEC - int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) - { -+ int ret = __do_munmap(mm, start, len); -+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC)) -+ return ret; -+ -+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len); -+} -+ -+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len) -+#else -+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) -+#endif -+{ - unsigned long end; - struct vm_area_struct *vma, *prev, *last; - -+ /* -+ * mm->mmap_sem is required to protect against another thread -+ * changing the mappings in case we sleep. -+ */ -+ verify_mm_writelocked(mm); -+ - if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) - return -EINVAL; - -@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) - /* Fix up all other VM information */ - remove_vma_list(mm, vma); - -+ track_exec_limit(mm, start, end, 0UL); -+ - return 0; - } - -@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) - - profile_munmap(addr); - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && -+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len)) -+ return -EINVAL; -+#endif -+ - down_write(&mm->mmap_sem); - ret = do_munmap(mm, addr, len); - up_write(&mm->mmap_sem); - return ret; - } - --static inline void verify_mm_writelocked(struct mm_struct *mm) --{ --#ifdef CONFIG_DEBUG_VM -- if (unlikely(down_read_trylock(&mm->mmap_sem))) { -- WARN_ON(1); -- up_read(&mm->mmap_sem); -- } --#endif --} -- - /* - * this is really a simplified "do_mmap". it only handles - * anonymous maps. eventually we may be able to do some -@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) - struct rb_node ** rb_link, * rb_parent; - pgoff_t pgoff = addr >> PAGE_SHIFT; - int error; -+ unsigned long charged; - - len = PAGE_ALIGN(len); - if (!len) -@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len) - - flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; - -+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) -+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { -+ flags &= ~VM_EXEC; -+ -+#ifdef CONFIG_PAX_MPROTECT -+ if (mm->pax_flags & MF_PAX_MPROTECT) -+ flags &= ~VM_MAYEXEC; -+#endif -+ -+ } -+#endif -+ - error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); - if (error & ~PAGE_MASK) - return error; - -+ charged = len >> PAGE_SHIFT; -+ - /* - * mlock MCL_FUTURE? - */ - if (mm->def_flags & VM_LOCKED) { - unsigned long locked, lock_limit; -- locked = len >> PAGE_SHIFT; -+ locked = charged; - locked += mm->locked_vm; - lock_limit = rlimit(RLIMIT_MEMLOCK); - lock_limit >>= PAGE_SHIFT; -@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len) - /* - * Clear old maps. this also does some error checking for us - */ -- munmap_back: - vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); - if (vma && vma->vm_start < addr + len) { - if (do_munmap(mm, addr, len)) - return -ENOMEM; -- goto munmap_back; -+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); -+ BUG_ON(vma && vma->vm_start < addr + len); - } - - /* Check against address space limits *after* clearing old maps... */ -- if (!may_expand_vm(mm, len >> PAGE_SHIFT)) -+ if (!may_expand_vm(mm, charged)) - return -ENOMEM; - - if (mm->map_count > sysctl_max_map_count) - return -ENOMEM; - -- if (security_vm_enough_memory(len >> PAGE_SHIFT)) -+ if (security_vm_enough_memory(charged)) - return -ENOMEM; - - /* Can we just expand an old private anonymous mapping? */ -@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) - */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); - if (!vma) { -- vm_unacct_memory(len >> PAGE_SHIFT); -+ vm_unacct_memory(charged); - return -ENOMEM; - } - -@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) - vma_link(mm, vma, prev, rb_link, rb_parent); - out: - perf_event_mmap(vma); -- mm->total_vm += len >> PAGE_SHIFT; -+ mm->total_vm += charged; - if (flags & VM_LOCKED) { - if (!mlock_vma_pages_range(vma, addr, addr + len)) -- mm->locked_vm += (len >> PAGE_SHIFT); -+ mm->locked_vm += charged; - } -+ track_exec_limit(mm, addr, addr + len, flags); - return addr; - } - -@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm) - * Walk the list again, actually closing and freeing it, - * with preemption enabled, without holding any MM locks. - */ -- while (vma) -+ while (vma) { -+ vma->vm_mirror = NULL; - vma = remove_vma(vma); -+ } - - BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); - } -@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) - struct vm_area_struct * __vma, * prev; - struct rb_node ** rb_link, * rb_parent; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m = NULL; -+#endif -+ -+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1)) -+ return -EPERM; -+ - /* - * The vm_pgoff of a purely anonymous vma should be irrelevant - * until its first write fault, when page's anon_vma and index -@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) - if ((vma->vm_flags & VM_ACCOUNT) && - security_vm_enough_memory_mm(mm, vma_pages(vma))) - return -ENOMEM; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) { -+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); -+ if (!vma_m) -+ return -ENOMEM; -+ } -+#endif -+ - vma_link(mm, vma, prev, rb_link, rb_parent); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (vma_m) -+ BUG_ON(pax_mirror_vma(vma_m, vma)); -+#endif -+ - return 0; - } - -@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, - struct rb_node **rb_link, *rb_parent; - struct mempolicy *pol; - -+ BUG_ON(vma->vm_mirror); -+ - /* - * If anonymous vma has not yet been faulted, update new pgoff - * to match new location, to increase its chance of merging. -@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, - return NULL; - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma) -+{ -+ struct vm_area_struct *prev_m; -+ struct rb_node **rb_link_m, *rb_parent_m; -+ struct mempolicy *pol_m; -+ -+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)); -+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror); -+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m))); -+ *vma_m = *vma; -+ INIT_LIST_HEAD(&vma_m->anon_vma_chain); -+ if (anon_vma_clone(vma_m, vma)) -+ return -ENOMEM; -+ pol_m = vma_policy(vma_m); -+ mpol_get(pol_m); -+ vma_set_policy(vma_m, pol_m); -+ vma_m->vm_start += SEGMEXEC_TASK_SIZE; -+ vma_m->vm_end += SEGMEXEC_TASK_SIZE; -+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED); -+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags); -+ if (vma_m->vm_file) -+ get_file(vma_m->vm_file); -+ if (vma_m->vm_ops && vma_m->vm_ops->open) -+ vma_m->vm_ops->open(vma_m); -+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m); -+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m); -+ vma_m->vm_mirror = vma; -+ vma->vm_mirror = vma_m; -+ return 0; -+} -+#endif -+ - /* - * Return true if the calling process may expand its vm space by the passed - * number of pages -@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) - unsigned long lim; - - lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT; -- -+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1); - if (cur + npages > lim) - return 0; - return 1; -@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm, - vma->vm_start = addr; - vma->vm_end = addr + len; - -+#ifdef CONFIG_PAX_MPROTECT -+ if (mm->pax_flags & MF_PAX_MPROTECT) { -+#ifndef CONFIG_PAX_MPROTECT_COMPAT -+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) -+ return -EPERM; -+ if (!(vm_flags & VM_EXEC)) -+ vm_flags &= ~VM_MAYEXEC; -+#else -+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC) -+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC); -+#endif -+ else -+ vm_flags &= ~VM_MAYWRITE; -+ } -+#endif -+ - vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND; - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - -diff --git a/mm/mprotect.c b/mm/mprotect.c -index 5a688a2..27e031c 100644 ---- a/mm/mprotect.c -+++ b/mm/mprotect.c -@@ -23,10 +23,16 @@ - #include <linux/mmu_notifier.h> - #include <linux/migrate.h> - #include <linux/perf_event.h> -+ -+#ifdef CONFIG_PAX_MPROTECT -+#include <linux/elf.h> -+#endif -+ - #include <asm/uaccess.h> - #include <asm/pgtable.h> - #include <asm/cacheflush.h> - #include <asm/tlbflush.h> -+#include <asm/mmu_context.h> - - #ifndef pgprot_modify - static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) -@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma, - flush_tlb_range(vma, start, end); - } - -+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT -+/* called while holding the mmap semaphor for writing except stack expansion */ -+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) -+{ -+ unsigned long oldlimit, newlimit = 0UL; -+ -+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX)) -+ return; -+ -+ spin_lock(&mm->page_table_lock); -+ oldlimit = mm->context.user_cs_limit; -+ if ((prot & VM_EXEC) && oldlimit < end) -+ /* USER_CS limit moved up */ -+ newlimit = end; -+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end) -+ /* USER_CS limit moved down */ -+ newlimit = start; -+ -+ if (newlimit) { -+ mm->context.user_cs_limit = newlimit; -+ -+#ifdef CONFIG_SMP -+ wmb(); -+ cpus_clear(mm->context.cpu_user_cs_mask); -+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask); -+#endif -+ -+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id()); -+ } -+ spin_unlock(&mm->page_table_lock); -+ if (newlimit == end) { -+ struct vm_area_struct *vma = find_vma(mm, oldlimit); -+ -+ for (; vma && vma->vm_start < end; vma = vma->vm_next) -+ if (is_vm_hugetlb_page(vma)) -+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot); -+ else -+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma)); -+ } -+} -+#endif -+ - int - mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, - unsigned long start, unsigned long end, unsigned long newflags) -@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, - int error; - int dirty_accountable = 0; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m = NULL; -+ unsigned long start_m, end_m; -+ -+ start_m = start + SEGMEXEC_TASK_SIZE; -+ end_m = end + SEGMEXEC_TASK_SIZE; -+#endif -+ - if (newflags == oldflags) { - *pprev = vma; - return 0; - } - -+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) { -+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next; -+ -+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end) -+ return -ENOMEM; -+ -+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end) -+ return -ENOMEM; -+ } -+ - /* - * If we make a private mapping writable we increase our commit; - * but (without finer accounting) cannot reduce our commit if we -@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, - } - } - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) { -+ if (start != vma->vm_start) { -+ error = split_vma(mm, vma, start, 1); -+ if (error) -+ goto fail; -+ BUG_ON(!*pprev || (*pprev)->vm_next == vma); -+ *pprev = (*pprev)->vm_next; -+ } -+ -+ if (end != vma->vm_end) { -+ error = split_vma(mm, vma, end, 0); -+ if (error) -+ goto fail; -+ } -+ -+ if (pax_find_mirror_vma(vma)) { -+ error = __do_munmap(mm, start_m, end_m - start_m); -+ if (error) -+ goto fail; -+ } else { -+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); -+ if (!vma_m) { -+ error = -ENOMEM; -+ goto fail; -+ } -+ vma->vm_flags = newflags; -+ error = pax_mirror_vma(vma_m, vma); -+ if (error) { -+ vma->vm_flags = oldflags; -+ goto fail; -+ } -+ } -+ } -+#endif -+ - /* - * First try to merge with previous and/or next vma. - */ -@@ -204,9 +306,21 @@ success: - * vm_flags and vm_page_prot are protected by the mmap_sem - * held in write mode. - */ -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ)) -+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ; -+#endif -+ - vma->vm_flags = newflags; -+ -+#ifdef CONFIG_PAX_MPROTECT -+ if (mm->binfmt && mm->binfmt->handle_mprotect) -+ mm->binfmt->handle_mprotect(vma, newflags); -+#endif -+ - vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, -- vm_get_page_prot(newflags)); -+ vm_get_page_prot(vma->vm_flags)); - - if (vma_wants_writenotify(vma)) { - vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); -@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, - end = start + len; - if (end <= start) - return -ENOMEM; -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { -+ if (end > SEGMEXEC_TASK_SIZE) -+ return -EINVAL; -+ } else -+#endif -+ -+ if (end > TASK_SIZE) -+ return -EINVAL; -+ - if (!arch_validate_prot(prot)) - return -EINVAL; - -@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, - /* - * Does the application expect PROT_READ to imply PROT_EXEC: - */ -- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) -+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) - prot |= PROT_EXEC; - - vm_flags = calc_vm_prot_bits(prot); -@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, - if (start > vma->vm_start) - prev = vma; - -+#ifdef CONFIG_PAX_MPROTECT -+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect) -+ current->mm->binfmt->handle_mprotect(vma, vm_flags); -+#endif -+ - for (nstart = start ; ; ) { - unsigned long newflags; - -@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, - - /* newflags >> 4 shift VM_MAY% in place of VM_% */ - if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { -+ if (prot & (PROT_WRITE | PROT_EXEC)) -+ gr_log_rwxmprotect(vma->vm_file); -+ -+ error = -EACCES; -+ goto out; -+ } -+ -+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) { - error = -EACCES; - goto out; - } -@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, - error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); - if (error) - goto out; -+ -+ track_exec_limit(current->mm, nstart, tmp, vm_flags); -+ - nstart = tmp; - - if (nstart < prev->vm_end) -diff --git a/mm/mremap.c b/mm/mremap.c -index 506fa44..ccc0ba9 100644 ---- a/mm/mremap.c -+++ b/mm/mremap.c -@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, - continue; - pte = ptep_clear_flush(vma, old_addr, old_pte); - pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); -+ -+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT -+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC) -+ pte = pte_exprotect(pte); -+#endif -+ - set_pte_at(mm, new_addr, new_pte, pte); - } - -@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, - if (is_vm_hugetlb_page(vma)) - goto Einval; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (pax_find_mirror_vma(vma)) -+ goto Einval; -+#endif -+ - /* We can't remap across vm area boundaries */ - if (old_len > vma->vm_end - addr) - goto Efault; -@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned long addr, - unsigned long ret = -EINVAL; - unsigned long charged = 0; - unsigned long map_flags; -+ unsigned long pax_task_size = TASK_SIZE; - - if (new_addr & ~PAGE_MASK) - goto out; - -- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+#endif -+ -+ pax_task_size -= PAGE_SIZE; -+ -+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len) - goto out; - - /* Check if the location we're moving into overlaps the - * old location at all, and fail if it does. - */ -- if ((new_addr <= addr) && (new_addr+new_len) > addr) -- goto out; -- -- if ((addr <= new_addr) && (addr+old_len) > new_addr) -+ if (addr + old_len > new_addr && new_addr + new_len > addr) - goto out; - - ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); -@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long addr, - struct vm_area_struct *vma; - unsigned long ret = -EINVAL; - unsigned long charged = 0; -+ unsigned long pax_task_size = TASK_SIZE; - - if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) - goto out; -@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long addr, - if (!new_len) - goto out; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (mm->pax_flags & MF_PAX_SEGMEXEC) -+ pax_task_size = SEGMEXEC_TASK_SIZE; -+#endif -+ -+ pax_task_size -= PAGE_SIZE; -+ -+ if (new_len > pax_task_size || addr > pax_task_size-new_len || -+ old_len > pax_task_size || addr > pax_task_size-old_len) -+ goto out; -+ - if (flags & MREMAP_FIXED) { - if (flags & MREMAP_MAYMOVE) - ret = mremap_to(addr, old_len, new_addr, new_len); -@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long addr, - addr + new_len); - } - ret = addr; -+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags); - goto out; - } - } -@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long addr, - ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); - if (ret) - goto out; -+ -+ map_flags = vma->vm_flags; - ret = move_vma(vma, addr, old_len, new_len, new_addr); -+ if (!(ret & ~PAGE_MASK)) { -+ track_exec_limit(current->mm, addr, addr + old_len, 0UL); -+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags); -+ } - } - out: - if (ret & ~PAGE_MASK) -diff --git a/mm/nobootmem.c b/mm/nobootmem.c -index 6e93dc7..c98df0c 100644 ---- a/mm/nobootmem.c -+++ b/mm/nobootmem.c -@@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) - unsigned long __init free_all_memory_core_early(int nodeid) - { - int i; -- u64 start, end; -+ u64 start, end, startrange, endrange; - unsigned long count = 0; -- struct range *range = NULL; -+ struct range *range = NULL, rangerange = { 0, 0 }; - int nr_range; - - nr_range = get_free_all_memory_range(&range, nodeid); -+ startrange = __pa(range) >> PAGE_SHIFT; -+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT; - - for (i = 0; i < nr_range; i++) { - start = range[i].start; - end = range[i].end; -+ if (start <= endrange && startrange < end) { -+ BUG_ON(rangerange.start | rangerange.end); -+ rangerange = range[i]; -+ continue; -+ } - count += end - start; - __free_pages_memory(start, end); - } -+ start = rangerange.start; -+ end = rangerange.end; -+ count += end - start; -+ __free_pages_memory(start, end); - - return count; - } -diff --git a/mm/nommu.c b/mm/nommu.c -index 4358032..e79b99f 100644 ---- a/mm/nommu.c -+++ b/mm/nommu.c -@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ - int sysctl_overcommit_ratio = 50; /* default is 50% */ - int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; - int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; --int heap_stack_gap = 0; - - atomic_long_t mmap_pages_allocated; - -@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) - EXPORT_SYMBOL(find_vma); - - /* -- * find a VMA -- * - we don't extend stack VMAs under NOMMU conditions -- */ --struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) --{ -- return find_vma(mm, addr); --} -- --/* - * expand a stack to a given address - * - not supported under NOMMU conditions - */ -@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, - - /* most fields are the same, copy all, and then fixup */ - *new = *vma; -+ INIT_LIST_HEAD(&new->anon_vma_chain); - *region = *vma->vm_region; - new->vm_region = region; - -diff --git a/mm/oom_kill.c b/mm/oom_kill.c -index 626303b..e9a1785 100644 ---- a/mm/oom_kill.c -+++ b/mm/oom_kill.c -@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, - unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, - const nodemask_t *nodemask, unsigned long totalpages) - { -- int points; -+ long points; - - if (oom_unkillable_task(p, mem, nodemask)) - return 0; -diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 6e8ecb6..d9e3d7a 100644 ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -340,7 +340,7 @@ out: - * This usage means that zero-order pages may not be compound. - */ - --static void free_compound_page(struct page *page) -+void free_compound_page(struct page *page) - { - __free_pages_ok(page, compound_order(page)); - } -@@ -355,8 +355,8 @@ void prep_compound_page(struct page *page, unsigned long order) - __SetPageHead(page); - for (i = 1; i < nr_pages; i++) { - struct page *p = page + i; -- - __SetPageTail(p); -+ set_page_count(p, 0); - p->first_page = page; - } - } -@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order) - int i; - int bad = 0; - -+#ifdef CONFIG_PAX_MEMORY_SANITIZE -+ unsigned long index = 1UL << order; -+#endif -+ - trace_mm_page_free_direct(page, order); - kmemcheck_free_shadow(page, order); - -@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order) - debug_check_no_obj_freed(page_address(page), - PAGE_SIZE << order); - } -+ -+#ifdef CONFIG_PAX_MEMORY_SANITIZE -+ for (; index; --index) -+ sanitize_highpage(page + index - 1); -+#endif -+ - arch_free_page(page, order); - kernel_map_pages(page, 1 << order, 0); - -@@ -783,8 +793,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) - arch_alloc_page(page, order); - kernel_map_pages(page, 1 << order, 1); - -+#ifndef CONFIG_PAX_MEMORY_SANITIZE - if (gfp_flags & __GFP_ZERO) - prep_zero_page(page, order, gfp_flags); -+#endif - - if (order && (gfp_flags & __GFP_COMP)) - prep_compound_page(page, order); -@@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter) - int cpu; - struct zone *zone; - -+ pax_track_stack(); -+ - for_each_populated_zone(zone) { - if (skip_free_areas_node(filter, zone_to_nid(zone))) - continue; -@@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn) - unsigned long pfn; - - for (pfn = start_pfn; pfn < end_pfn; pfn++) { -+#ifdef CONFIG_X86_32 -+ /* boot failures in VMware 8 on 32bit vanilla since -+ this change */ -+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn))) -+#else - if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn))) -+#endif - return 1; - } - return 0; -@@ -3373,6 +3393,7 @@ static void setup_zone_migrate_reserve(struct zone *zone) - /* Get the start pfn, end pfn and the number of blocks to reserve */ - start_pfn = zone->zone_start_pfn; - end_pfn = start_pfn + zone->spanned_pages; -+ start_pfn = roundup(start_pfn, pageblock_nr_pages); - reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> - pageblock_order; - -diff --git a/mm/percpu.c b/mm/percpu.c -index bf80e55..c7c3f9a 100644 ---- a/mm/percpu.c -+++ b/mm/percpu.c -@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu __read_mostly; - static unsigned int pcpu_last_unit_cpu __read_mostly; - - /* the address of the first chunk which starts with the kernel static area */ --void *pcpu_base_addr __read_mostly; -+void *pcpu_base_addr __read_only; - EXPORT_SYMBOL_GPL(pcpu_base_addr); - - static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ -diff --git a/mm/rmap.c b/mm/rmap.c -index 8005080..198c2cd 100644 ---- a/mm/rmap.c -+++ b/mm/rmap.c -@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma) - struct anon_vma *anon_vma = vma->anon_vma; - struct anon_vma_chain *avc; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct anon_vma_chain *avc_m = NULL; -+#endif -+ - might_sleep(); - if (unlikely(!anon_vma)) { - struct mm_struct *mm = vma->vm_mm; -@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma) - if (!avc) - goto out_enomem; - -+#ifdef CONFIG_PAX_SEGMEXEC -+ avc_m = anon_vma_chain_alloc(GFP_KERNEL); -+ if (!avc_m) -+ goto out_enomem_free_avc; -+#endif -+ - anon_vma = find_mergeable_anon_vma(vma); - allocated = NULL; - if (!anon_vma) { -@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma) - /* page_table_lock to protect against threads */ - spin_lock(&mm->page_table_lock); - if (likely(!vma->anon_vma)) { -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma); -+ -+ if (vma_m) { -+ BUG_ON(vma_m->anon_vma); -+ vma_m->anon_vma = anon_vma; -+ avc_m->anon_vma = anon_vma; -+ avc_m->vma = vma; -+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain); -+ list_add(&avc_m->same_anon_vma, &anon_vma->head); -+ avc_m = NULL; -+ } -+#endif -+ - vma->anon_vma = anon_vma; - avc->anon_vma = anon_vma; - avc->vma = vma; -@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma) - - if (unlikely(allocated)) - put_anon_vma(allocated); -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (unlikely(avc_m)) -+ anon_vma_chain_free(avc_m); -+#endif -+ - if (unlikely(avc)) - anon_vma_chain_free(avc); - } - return 0; - - out_enomem_free_avc: -+ -+#ifdef CONFIG_PAX_SEGMEXEC -+ if (avc_m) -+ anon_vma_chain_free(avc_m); -+#endif -+ - anon_vma_chain_free(avc); - out_enomem: - return -ENOMEM; -@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, - * Attach the anon_vmas from src to dst. - * Returns 0 on success, -ENOMEM on failure. - */ --int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) -+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src) - { - struct anon_vma_chain *avc, *pavc; - struct anon_vma *root = NULL; -@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) - * the corresponding VMA in the parent process is attached to. - * Returns 0 on success, non-zero on failure. - */ --int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) -+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma) - { - struct anon_vma_chain *avc; - struct anon_vma *anon_vma; -diff --git a/mm/shmem.c b/mm/shmem.c -index 32f6763..431c405 100644 ---- a/mm/shmem.c -+++ b/mm/shmem.c -@@ -31,7 +31,7 @@ - #include <linux/module.h> - #include <linux/swap.h> - --static struct vfsmount *shm_mnt; -+struct vfsmount *shm_mnt; - - #ifdef CONFIG_SHMEM - /* -@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt; - #define BOGO_DIRENT_SIZE 20 - - /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ --#define SHORT_SYMLINK_LEN 128 -+#define SHORT_SYMLINK_LEN 64 - - struct shmem_xattr { - struct list_head list; /* anchored by shmem_inode_info->xattr_list */ -@@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, - struct mempolicy mpol, *spol; - struct vm_area_struct pvma; - -+ pax_track_stack(); -+ - spol = mpol_cond_copy(&mpol, - mpol_shared_policy_lookup(&info->policy, index)); - -@@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) - int err = -ENOMEM; - - /* Round up to L1_CACHE_BYTES to resist false sharing */ -- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), -- L1_CACHE_BYTES), GFP_KERNEL); -+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL); - if (!sbinfo) - return -ENOMEM; - -diff --git a/mm/slab.c b/mm/slab.c -index 893c76d..a742de2 100644 ---- a/mm/slab.c -+++ b/mm/slab.c -@@ -151,7 +151,7 @@ - - /* Legal flag mask for kmem_cache_create(). */ - #if DEBUG --# define CREATE_MASK (SLAB_RED_ZONE | \ -+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \ - SLAB_POISON | SLAB_HWCACHE_ALIGN | \ - SLAB_CACHE_DMA | \ - SLAB_STORE_USER | \ -@@ -159,7 +159,7 @@ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ - SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) - #else --# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ -+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \ - SLAB_CACHE_DMA | \ - SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ -@@ -288,7 +288,7 @@ struct kmem_list3 { - * Need this for bootstrapping a per node allocator. - */ - #define NUM_INIT_LISTS (3 * MAX_NUMNODES) --static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; -+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS]; - #define CACHE_CACHE 0 - #define SIZE_AC MAX_NUMNODES - #define SIZE_L3 (2 * MAX_NUMNODES) -@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent) - if ((x)->max_freeable < i) \ - (x)->max_freeable = i; \ - } while (0) --#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) --#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) --#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) --#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) -+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit) -+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss) -+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit) -+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss) - #else - #define STATS_INC_ACTIVE(x) do { } while (0) - #define STATS_DEC_ACTIVE(x) do { } while (0) -@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, - * reciprocal_divide(offset, cache->reciprocal_buffer_size) - */ - static inline unsigned int obj_to_index(const struct kmem_cache *cache, -- const struct slab *slab, void *obj) -+ const struct slab *slab, const void *obj) - { - u32 offset = (obj - slab->s_mem); - return reciprocal_divide(offset, cache->reciprocal_buffer_size); -@@ -564,7 +564,7 @@ struct cache_names { - static struct cache_names __initdata cache_names[] = { - #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, - #include <linux/kmalloc_sizes.h> -- {NULL,} -+ {NULL} - #undef CACHE - }; - -@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void) - sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, - sizes[INDEX_AC].cs_size, - ARCH_KMALLOC_MINALIGN, -- ARCH_KMALLOC_FLAGS|SLAB_PANIC, -+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY, - NULL); - - if (INDEX_AC != INDEX_L3) { -@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void) - kmem_cache_create(names[INDEX_L3].name, - sizes[INDEX_L3].cs_size, - ARCH_KMALLOC_MINALIGN, -- ARCH_KMALLOC_FLAGS|SLAB_PANIC, -+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY, - NULL); - } - -@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void) - sizes->cs_cachep = kmem_cache_create(names->name, - sizes->cs_size, - ARCH_KMALLOC_MINALIGN, -- ARCH_KMALLOC_FLAGS|SLAB_PANIC, -+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY, - NULL); - } - #ifdef CONFIG_ZONE_DMA -@@ -4327,10 +4327,10 @@ static int s_show(struct seq_file *m, void *p) - } - /* cpu stats */ - { -- unsigned long allochit = atomic_read(&cachep->allochit); -- unsigned long allocmiss = atomic_read(&cachep->allocmiss); -- unsigned long freehit = atomic_read(&cachep->freehit); -- unsigned long freemiss = atomic_read(&cachep->freemiss); -+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit); -+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss); -+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit); -+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss); - - seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", - allochit, allocmiss, freehit, freemiss); -@@ -4587,15 +4587,70 @@ static const struct file_operations proc_slabstats_operations = { - - static int __init slab_proc_init(void) - { -- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); -+ mode_t gr_mode = S_IRUGO; -+ -+#ifdef CONFIG_GRKERNSEC_PROC_ADD -+ gr_mode = S_IRUSR; -+#endif -+ -+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations); - #ifdef CONFIG_DEBUG_SLAB_LEAK -- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); -+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations); - #endif - return 0; - } - module_init(slab_proc_init); - #endif - -+void check_object_size(const void *ptr, unsigned long n, bool to) -+{ -+ -+#ifdef CONFIG_PAX_USERCOPY -+ struct page *page; -+ struct kmem_cache *cachep = NULL; -+ struct slab *slabp; -+ unsigned int objnr; -+ unsigned long offset; -+ const char *type; -+ -+ if (!n) -+ return; -+ -+ type = "<null>"; -+ if (ZERO_OR_NULL_PTR(ptr)) -+ goto report; -+ -+ if (!virt_addr_valid(ptr)) -+ return; -+ -+ page = virt_to_head_page(ptr); -+ -+ type = "<process stack>"; -+ if (!PageSlab(page)) { -+ if (object_is_on_stack(ptr, n) == -1) -+ goto report; -+ return; -+ } -+ -+ cachep = page_get_cache(page); -+ type = cachep->name; -+ if (!(cachep->flags & SLAB_USERCOPY)) -+ goto report; -+ -+ slabp = page_get_slab(page); -+ objnr = obj_to_index(cachep, slabp, ptr); -+ BUG_ON(objnr >= cachep->num); -+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep); -+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset) -+ return; -+ -+report: -+ pax_report_usercopy(ptr, n, to, type); -+#endif -+ -+} -+EXPORT_SYMBOL(check_object_size); -+ - /** - * ksize - get the actual amount of memory allocated for a given object - * @objp: Pointer to the object -diff --git a/mm/slob.c b/mm/slob.c -index bf39181..727f7a3 100644 ---- a/mm/slob.c -+++ b/mm/slob.c -@@ -29,7 +29,7 @@ - * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls - * alloc_pages() directly, allocating compound pages so the page order - * does not have to be separately tracked, and also stores the exact -- * allocation size in page->private so that it can be used to accurately -+ * allocation size in slob_page->size so that it can be used to accurately - * provide ksize(). These objects are detected in kfree() because slob_page() - * is false for them. - * -@@ -58,6 +58,7 @@ - */ - - #include <linux/kernel.h> -+#include <linux/sched.h> - #include <linux/slab.h> - #include <linux/mm.h> - #include <linux/swap.h> /* struct reclaim_state */ -@@ -102,7 +103,8 @@ struct slob_page { - unsigned long flags; /* mandatory */ - atomic_t _count; /* mandatory */ - slobidx_t units; /* free units left in page */ -- unsigned long pad[2]; -+ unsigned long pad[1]; -+ unsigned long size; /* size when >=PAGE_SIZE */ - slob_t *free; /* first free slob_t in page */ - struct list_head list; /* linked list of free pages */ - }; -@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large); - */ - static inline int is_slob_page(struct slob_page *sp) - { -- return PageSlab((struct page *)sp); -+ return PageSlab((struct page *)sp) && !sp->size; - } - - static inline void set_slob_page(struct slob_page *sp) -@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp) - - static inline struct slob_page *slob_page(const void *addr) - { -- return (struct slob_page *)virt_to_page(addr); -+ return (struct slob_page *)virt_to_head_page(addr); - } - - /* -@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next) - /* - * Return the size of a slob block. - */ --static slobidx_t slob_units(slob_t *s) -+static slobidx_t slob_units(const slob_t *s) - { - if (s->units > 0) - return s->units; -@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s) - /* - * Return the next free slob block pointer after this one. - */ --static slob_t *slob_next(slob_t *s) -+static slob_t *slob_next(const slob_t *s) - { - slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); - slobidx_t next; -@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s) - /* - * Returns true if s is the last free block in its page. - */ --static int slob_last(slob_t *s) -+static int slob_last(const slob_t *s) - { - return !((unsigned long)slob_next(s) & ~PAGE_MASK); - } -@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) - if (!page) - return NULL; - -+ set_slob_page(page); - return page_address(page); - } - -@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) - if (!b) - return NULL; - sp = slob_page(b); -- set_slob_page(sp); - - spin_lock_irqsave(&slob_lock, flags); - sp->units = SLOB_UNITS(PAGE_SIZE); - sp->free = b; -+ sp->size = 0; - INIT_LIST_HEAD(&sp->list); - set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); - set_slob_page_free(sp, slob_list); -@@ -476,10 +479,9 @@ out: - * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. - */ - --void *__kmalloc_node(size_t size, gfp_t gfp, int node) -+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align) - { -- unsigned int *m; -- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); -+ slob_t *m; - void *ret; - - gfp &= gfp_allowed_mask; -@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) - - if (!m) - return NULL; -- *m = size; -+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT); -+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT); -+ m[0].units = size; -+ m[1].units = align; - ret = (void *)m + align; - - trace_kmalloc_node(_RET_IP_, ret, -@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) - gfp |= __GFP_COMP; - ret = slob_new_pages(gfp, order, node); - if (ret) { -- struct page *page; -- page = virt_to_page(ret); -- page->private = size; -+ struct slob_page *sp; -+ sp = slob_page(ret); -+ sp->size = size; - } - - trace_kmalloc_node(_RET_IP_, ret, - size, PAGE_SIZE << order, gfp, node); - } - -- kmemleak_alloc(ret, size, 1, gfp); -+ return ret; -+} -+ -+void *__kmalloc_node(size_t size, gfp_t gfp, int node) -+{ -+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); -+ void *ret = __kmalloc_node_align(size, gfp, node, align); -+ -+ if (!ZERO_OR_NULL_PTR(ret)) -+ kmemleak_alloc(ret, size, 1, gfp); - return ret; - } - EXPORT_SYMBOL(__kmalloc_node); -@@ -533,13 +547,92 @@ void kfree(const void *block) - sp = slob_page(block); - if (is_slob_page(sp)) { - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); -- unsigned int *m = (unsigned int *)(block - align); -- slob_free(m, *m + align); -- } else -+ slob_t *m = (slob_t *)(block - align); -+ slob_free(m, m[0].units + align); -+ } else { -+ clear_slob_page(sp); -+ free_slob_page(sp); -+ sp->size = 0; - put_page(&sp->page); -+ } - } - EXPORT_SYMBOL(kfree); - -+void check_object_size(const void *ptr, unsigned long n, bool to) -+{ -+ -+#ifdef CONFIG_PAX_USERCOPY -+ struct slob_page *sp; -+ const slob_t *free; -+ const void *base; -+ unsigned long flags; -+ const char *type; -+ -+ if (!n) -+ return; -+ -+ type = "<null>"; -+ if (ZERO_OR_NULL_PTR(ptr)) -+ goto report; -+ -+ if (!virt_addr_valid(ptr)) -+ return; -+ -+ type = "<process stack>"; -+ sp = slob_page(ptr); -+ if (!PageSlab((struct page*)sp)) { -+ if (object_is_on_stack(ptr, n) == -1) -+ goto report; -+ return; -+ } -+ -+ type = "<slob>"; -+ if (sp->size) { -+ base = page_address(&sp->page); -+ if (base <= ptr && n <= sp->size - (ptr - base)) -+ return; -+ goto report; -+ } -+ -+ /* some tricky double walking to find the chunk */ -+ spin_lock_irqsave(&slob_lock, flags); -+ base = (void *)((unsigned long)ptr & PAGE_MASK); -+ free = sp->free; -+ -+ while (!slob_last(free) && (void *)free <= ptr) { -+ base = free + slob_units(free); -+ free = slob_next(free); -+ } -+ -+ while (base < (void *)free) { -+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units; -+ int size = SLOB_UNIT * SLOB_UNITS(m + align); -+ int offset; -+ -+ if (ptr < base + align) -+ break; -+ -+ offset = ptr - base - align; -+ if (offset >= m) { -+ base += size; -+ continue; -+ } -+ -+ if (n > m - offset) -+ break; -+ -+ spin_unlock_irqrestore(&slob_lock, flags); -+ return; -+ } -+ -+ spin_unlock_irqrestore(&slob_lock, flags); -+report: -+ pax_report_usercopy(ptr, n, to, type); -+#endif -+ -+} -+EXPORT_SYMBOL(check_object_size); -+ - /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ - size_t ksize(const void *block) - { -@@ -552,10 +645,10 @@ size_t ksize(const void *block) - sp = slob_page(block); - if (is_slob_page(sp)) { - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); -- unsigned int *m = (unsigned int *)(block - align); -- return SLOB_UNITS(*m) * SLOB_UNIT; -+ slob_t *m = (slob_t *)(block - align); -+ return SLOB_UNITS(m[0].units) * SLOB_UNIT; - } else -- return sp->page.private; -+ return sp->size; - } - EXPORT_SYMBOL(ksize); - -@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, - { - struct kmem_cache *c; - -+#ifdef CONFIG_PAX_USERCOPY -+ c = __kmalloc_node_align(sizeof(struct kmem_cache), -+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN); -+#else - c = slob_alloc(sizeof(struct kmem_cache), - GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); -+#endif - - if (c) { - c->name = name; -@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) - - lockdep_trace_alloc(flags); - -+#ifdef CONFIG_PAX_USERCOPY -+ b = __kmalloc_node_align(c->size, flags, node, c->align); -+#else - if (c->size < PAGE_SIZE) { - b = slob_alloc(c->size, flags, c->align, node); - trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, - SLOB_UNITS(c->size) * SLOB_UNIT, - flags, node); - } else { -+ struct slob_page *sp; -+ - b = slob_new_pages(flags, get_order(c->size), node); -+ sp = slob_page(b); -+ sp->size = c->size; - trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, - PAGE_SIZE << get_order(c->size), - flags, node); - } -+#endif - - if (c->ctor) - c->ctor(b); -@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); - - static void __kmem_cache_free(void *b, int size) - { -- if (size < PAGE_SIZE) -+ struct slob_page *sp = slob_page(b); -+ -+ if (is_slob_page(sp)) - slob_free(b, size); -- else -+ else { -+ clear_slob_page(sp); -+ free_slob_page(sp); -+ sp->size = 0; - slob_free_pages(b, get_order(size)); -+ } - } - - static void kmem_rcu_free(struct rcu_head *head) -@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head) - - void kmem_cache_free(struct kmem_cache *c, void *b) - { -+ int size = c->size; -+ -+#ifdef CONFIG_PAX_USERCOPY -+ if (size + c->align < PAGE_SIZE) { -+ size += c->align; -+ b -= c->align; -+ } -+#endif -+ - kmemleak_free_recursive(b, c->flags); - if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { - struct slob_rcu *slob_rcu; -- slob_rcu = b + (c->size - sizeof(struct slob_rcu)); -- slob_rcu->size = c->size; -+ slob_rcu = b + (size - sizeof(struct slob_rcu)); -+ slob_rcu->size = size; - call_rcu(&slob_rcu->head, kmem_rcu_free); - } else { -- __kmem_cache_free(b, c->size); -+ __kmem_cache_free(b, size); - } - -+#ifdef CONFIG_PAX_USERCOPY -+ trace_kfree(_RET_IP_, b); -+#else - trace_kmem_cache_free(_RET_IP_, b); -+#endif -+ - } - EXPORT_SYMBOL(kmem_cache_free); - -diff --git a/mm/slub.c b/mm/slub.c -index 7c54fe8..0bb4ac5 100644 ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -208,7 +208,7 @@ struct track { - - enum track_item { TRACK_ALLOC, TRACK_FREE }; - --#ifdef CONFIG_SYSFS -+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) - static int sysfs_slab_add(struct kmem_cache *); - static int sysfs_slab_alias(struct kmem_cache *, const char *); - static void sysfs_slab_remove(struct kmem_cache *); -@@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t) - if (!t->addr) - return; - -- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", -+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n", - s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); - #ifdef CONFIG_STACKTRACE - { -@@ -2456,6 +2456,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) - - page = virt_to_head_page(x); - -+ BUG_ON(!PageSlab(page)); -+ - slab_free(s, page, x, _RET_IP_); - - trace_kmem_cache_free(_RET_IP_, x); -@@ -2489,7 +2491,7 @@ static int slub_min_objects; - * Merge control. If this is set then no merging of slab caches will occur. - * (Could be removed. This was introduced to pacify the merge skeptics.) - */ --static int slub_nomerge; -+static int slub_nomerge = 1; - - /* - * Calculate the order of allocation given an slab object size. -@@ -2912,7 +2914,7 @@ static int kmem_cache_open(struct kmem_cache *s, - * list to avoid pounding the page allocator excessively. - */ - set_min_partial(s, ilog2(s->size)); -- s->refcount = 1; -+ atomic_set(&s->refcount, 1); - #ifdef CONFIG_NUMA - s->remote_node_defrag_ratio = 1000; - #endif -@@ -3017,8 +3019,7 @@ static inline int kmem_cache_close(struct kmem_cache *s) - void kmem_cache_destroy(struct kmem_cache *s) - { - down_write(&slub_lock); -- s->refcount--; -- if (!s->refcount) { -+ if (atomic_dec_and_test(&s->refcount)) { - list_del(&s->list); - if (kmem_cache_close(s)) { - printk(KERN_ERR "SLUB %s: %s called for cache that " -@@ -3228,6 +3229,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) - EXPORT_SYMBOL(__kmalloc_node); - #endif - -+void check_object_size(const void *ptr, unsigned long n, bool to) -+{ -+ -+#ifdef CONFIG_PAX_USERCOPY -+ struct page *page; -+ struct kmem_cache *s = NULL; -+ unsigned long offset; -+ const char *type; -+ -+ if (!n) -+ return; -+ -+ type = "<null>"; -+ if (ZERO_OR_NULL_PTR(ptr)) -+ goto report; -+ -+ if (!virt_addr_valid(ptr)) -+ return; -+ -+ page = virt_to_head_page(ptr); -+ -+ type = "<process stack>"; -+ if (!PageSlab(page)) { -+ if (object_is_on_stack(ptr, n) == -1) -+ goto report; -+ return; -+ } -+ -+ s = page->slab; -+ type = s->name; -+ if (!(s->flags & SLAB_USERCOPY)) -+ goto report; -+ -+ offset = (ptr - page_address(page)) % s->size; -+ if (offset <= s->objsize && n <= s->objsize - offset) -+ return; -+ -+report: -+ pax_report_usercopy(ptr, n, to, type); -+#endif -+ -+} -+EXPORT_SYMBOL(check_object_size); -+ - size_t ksize(const void *object) - { - struct page *page; -@@ -3502,7 +3547,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) - int node; - - list_add(&s->list, &slab_caches); -- s->refcount = -1; -+ atomic_set(&s->refcount, -1); - - for_each_node_state(node, N_NORMAL_MEMORY) { - struct kmem_cache_node *n = get_node(s, node); -@@ -3619,17 +3664,17 @@ void __init kmem_cache_init(void) - - /* Caches that are not of the two-to-the-power-of size */ - if (KMALLOC_MIN_SIZE <= 32) { -- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0); -+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY); - caches++; - } - - if (KMALLOC_MIN_SIZE <= 64) { -- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0); -+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY); - caches++; - } - - for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { -- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); -+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY); - caches++; - } - -@@ -3697,7 +3742,7 @@ static int slab_unmergeable(struct kmem_cache *s) - /* - * We may have set a slab to be unmergeable during bootstrap. - */ -- if (s->refcount < 0) -+ if (atomic_read(&s->refcount) < 0) - return 1; - - return 0; -@@ -3756,7 +3801,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, - down_write(&slub_lock); - s = find_mergeable(size, align, flags, name, ctor); - if (s) { -- s->refcount++; -+ atomic_inc(&s->refcount); - /* - * Adjust the object sizes so that we clear - * the complete object on kzalloc. -@@ -3765,7 +3810,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, - s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); - - if (sysfs_slab_alias(s, name)) { -- s->refcount--; -+ atomic_dec(&s->refcount); - goto err; - } - up_write(&slub_lock); -@@ -3893,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, - } - #endif - --#ifdef CONFIG_SYSFS -+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) - static int count_inuse(struct page *page) - { - return page->inuse; -@@ -4280,12 +4325,12 @@ static void resiliency_test(void) - validate_slab_cache(kmalloc_caches[9]); - } - #else --#ifdef CONFIG_SYSFS -+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) - static void resiliency_test(void) {}; - #endif - #endif - --#ifdef CONFIG_SYSFS -+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) - enum slab_stat_type { - SL_ALL, /* All slabs */ - SL_PARTIAL, /* Only partially allocated slabs */ -@@ -4495,7 +4540,7 @@ SLAB_ATTR_RO(ctor); - - static ssize_t aliases_show(struct kmem_cache *s, char *buf) - { -- return sprintf(buf, "%d\n", s->refcount - 1); -+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1); - } - SLAB_ATTR_RO(aliases); - -@@ -5025,6 +5070,7 @@ static char *create_unique_id(struct kmem_cache *s) - return name; - } - -+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) - static int sysfs_slab_add(struct kmem_cache *s) - { - int err; -@@ -5087,6 +5133,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) - kobject_del(&s->kobj); - kobject_put(&s->kobj); - } -+#endif - - /* - * Need to buffer aliases during bootup until sysfs becomes -@@ -5100,6 +5147,7 @@ struct saved_alias { - - static struct saved_alias *alias_list; - -+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) - static int sysfs_slab_alias(struct kmem_cache *s, const char *name) - { - struct saved_alias *al; -@@ -5122,6 +5170,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) - alias_list = al; - return 0; - } -+#endif - - static int __init slab_sysfs_init(void) - { -@@ -5257,7 +5306,13 @@ static const struct file_operations proc_slabinfo_operations = { - - static int __init slab_proc_init(void) - { -- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations); -+ mode_t gr_mode = S_IRUGO; -+ -+#ifdef CONFIG_GRKERNSEC_PROC_ADD -+ gr_mode = S_IRUSR; -+#endif -+ -+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations); - return 0; - } - module_init(slab_proc_init); -diff --git a/mm/swap.c b/mm/swap.c -index 87627f1..8a9eb34 100644 ---- a/mm/swap.c -+++ b/mm/swap.c -@@ -31,6 +31,7 @@ - #include <linux/backing-dev.h> - #include <linux/memcontrol.h> - #include <linux/gfp.h> -+#include <linux/hugetlb.h> - - #include "internal.h" - -@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page) - - __page_cache_release(page); - dtor = get_compound_page_dtor(page); -+ if (!PageHuge(page)) -+ BUG_ON(dtor != free_compound_page); - (*dtor)(page); - } - -diff --git a/mm/swapfile.c b/mm/swapfile.c -index 17bc224..1677059 100644 ---- a/mm/swapfile.c -+++ b/mm/swapfile.c -@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex); - - static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); - /* Activity counter to indicate that a swapon or swapoff has occurred */ --static atomic_t proc_poll_event = ATOMIC_INIT(0); -+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0); - - static inline unsigned char swap_count(unsigned char ent) - { -@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) - } - filp_close(swap_file, NULL); - err = 0; -- atomic_inc(&proc_poll_event); -+ atomic_inc_unchecked(&proc_poll_event); - wake_up_interruptible(&proc_poll_wait); - - out_dput: -@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait) - - poll_wait(file, &proc_poll_wait, wait); - -- if (seq->poll_event != atomic_read(&proc_poll_event)) { -- seq->poll_event = atomic_read(&proc_poll_event); -+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) { -+ seq->poll_event = atomic_read_unchecked(&proc_poll_event); - return POLLIN | POLLRDNORM | POLLERR | POLLPRI; - } - -@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file) - return ret; - - seq = file->private_data; -- seq->poll_event = atomic_read(&proc_poll_event); -+ seq->poll_event = atomic_read_unchecked(&proc_poll_event); - return 0; - } - -@@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) - (p->flags & SWP_DISCARDABLE) ? "D" : ""); - - mutex_unlock(&swapon_mutex); -- atomic_inc(&proc_poll_event); -+ atomic_inc_unchecked(&proc_poll_event); - wake_up_interruptible(&proc_poll_wait); - - if (S_ISREG(inode->i_mode)) -diff --git a/mm/util.c b/mm/util.c -index 88ea1bd..0f1dfdb 100644 ---- a/mm/util.c -+++ b/mm/util.c -@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user); - * allocated buffer. Use this if you don't want to free the buffer immediately - * like, for example, with RCU. - */ -+#undef __krealloc - void *__krealloc(const void *p, size_t new_size, gfp_t flags) - { - void *ret; -@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc); - * behaves exactly like kmalloc(). If @size is 0 and @p is not a - * %NULL pointer, the object pointed to is freed. - */ -+#undef krealloc - void *krealloc(const void *p, size_t new_size, gfp_t flags) - { - void *ret; -@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, - void arch_pick_mmap_layout(struct mm_struct *mm) - { - mm->mmap_base = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; - } -diff --git a/mm/vmalloc.c b/mm/vmalloc.c -index 56faf31..862c072 100644 ---- a/mm/vmalloc.c -+++ b/mm/vmalloc.c -@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) - - pte = pte_offset_kernel(pmd, addr); - do { -- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); -- WARN_ON(!pte_none(ptent) && !pte_present(ptent)); -+ -+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) { -+ BUG_ON(!pte_exec(*pte)); -+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC)); -+ continue; -+ } -+#endif -+ -+ { -+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); -+ WARN_ON(!pte_none(ptent) && !pte_present(ptent)); -+ } - } while (pte++, addr += PAGE_SIZE, addr != end); - } - -@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, - unsigned long end, pgprot_t prot, struct page **pages, int *nr) - { - pte_t *pte; -+ int ret = -ENOMEM; - - /* - * nr is a running index into the array which helps higher level -@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, - pte = pte_alloc_kernel(pmd, addr); - if (!pte) - return -ENOMEM; -+ -+ pax_open_kernel(); - do { - struct page *page = pages[*nr]; - -- if (WARN_ON(!pte_none(*pte))) -- return -EBUSY; -- if (WARN_ON(!page)) -- return -ENOMEM; -+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ if (pgprot_val(prot) & _PAGE_NX) -+#endif -+ -+ if (WARN_ON(!pte_none(*pte))) { -+ ret = -EBUSY; -+ goto out; -+ } -+ if (WARN_ON(!page)) { -+ ret = -ENOMEM; -+ goto out; -+ } - set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); - (*nr)++; - } while (pte++, addr += PAGE_SIZE, addr != end); -- return 0; -+ ret = 0; -+out: -+ pax_close_kernel(); -+ return ret; - } - - static int vmap_pmd_range(pud_t *pud, unsigned long addr, -@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x) - * and fall back on vmalloc() if that fails. Others - * just put it in the vmalloc space. - */ --#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) -+#ifdef CONFIG_MODULES -+#ifdef MODULES_VADDR - unsigned long addr = (unsigned long)x; - if (addr >= MODULES_VADDR && addr < MODULES_END) - return 1; - #endif -+ -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END) -+ return 1; -+#endif -+ -+#endif -+ - return is_vmalloc_addr(x); - } - -@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) - - if (!pgd_none(*pgd)) { - pud_t *pud = pud_offset(pgd, addr); -+#ifdef CONFIG_X86 -+ if (!pud_large(*pud)) -+#endif - if (!pud_none(*pud)) { - pmd_t *pmd = pmd_offset(pud, addr); -+#ifdef CONFIG_X86 -+ if (!pmd_large(*pmd)) -+#endif - if (!pmd_none(*pmd)) { - pte_t *ptep, pte; - -@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, - struct vm_struct *area; - - BUG_ON(in_interrupt()); -+ -+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) -+ if (flags & VM_KERNEXEC) { -+ if (start != VMALLOC_START || end != VMALLOC_END) -+ return NULL; -+ start = (unsigned long)MODULES_EXEC_VADDR; -+ end = (unsigned long)MODULES_EXEC_END; -+ } -+#endif -+ - if (flags & VM_IOREMAP) { - int bit = fls(size); - -@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count, - if (count > totalram_pages) - return NULL; - -+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) -+ if (!(pgprot_val(prot) & _PAGE_NX)) -+ flags |= VM_KERNEXEC; -+#endif -+ - area = get_vm_area_caller((count << PAGE_SHIFT), flags, - __builtin_return_address(0)); - if (!area) -@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, - if (!size || (size >> PAGE_SHIFT) > totalram_pages) - return NULL; - -+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) -+ if (!(pgprot_val(prot) & _PAGE_NX)) -+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC, -+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller); -+ else -+#endif -+ - area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, - start, end, node, gfp_mask, caller); - -@@ -1634,6 +1696,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, - return NULL; - - addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); -+ if (!addr) -+ return NULL; - - /* - * In this function, newly allocated vm_struct is not added -@@ -1672,6 +1736,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, - gfp_mask, prot, node, caller); - } - -+#undef __vmalloc - void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) - { - return __vmalloc_node(size, 1, gfp_mask, prot, -1, -@@ -1695,6 +1760,7 @@ static inline void *__vmalloc_node_flags(unsigned long size, - * For tight control over page level allocator and protection flags - * use __vmalloc() instead. - */ -+#undef vmalloc - void *vmalloc(unsigned long size) - { - return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM); -@@ -1711,6 +1777,7 @@ EXPORT_SYMBOL(vmalloc); - * For tight control over page level allocator and protection flags - * use __vmalloc() instead. - */ -+#undef vzalloc - void *vzalloc(unsigned long size) - { - return __vmalloc_node_flags(size, -1, -@@ -1725,6 +1792,7 @@ EXPORT_SYMBOL(vzalloc); - * The resulting memory area is zeroed so it can be mapped to userspace - * without leaking data. - */ -+#undef vmalloc_user - void *vmalloc_user(unsigned long size) - { - struct vm_struct *area; -@@ -1752,6 +1820,7 @@ EXPORT_SYMBOL(vmalloc_user); - * For tight control over page level allocator and protection flags - * use __vmalloc() instead. - */ -+#undef vmalloc_node - void *vmalloc_node(unsigned long size, int node) - { - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, -@@ -1771,6 +1840,7 @@ EXPORT_SYMBOL(vmalloc_node); - * For tight control over page level allocator and protection flags - * use __vmalloc_node() instead. - */ -+#undef vzalloc_node - void *vzalloc_node(unsigned long size, int node) - { - return __vmalloc_node_flags(size, node, -@@ -1793,10 +1863,10 @@ EXPORT_SYMBOL(vzalloc_node); - * For tight control over page level allocator and protection flags - * use __vmalloc() instead. - */ -- -+#undef vmalloc_exec - void *vmalloc_exec(unsigned long size) - { -- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, -+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC, - -1, __builtin_return_address(0)); - } - -@@ -1815,6 +1885,7 @@ void *vmalloc_exec(unsigned long size) - * Allocate enough 32bit PA addressable pages to cover @size from the - * page level allocator and map them into contiguous kernel virtual space. - */ -+#undef vmalloc_32 - void *vmalloc_32(unsigned long size) - { - return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, -@@ -1829,6 +1900,7 @@ EXPORT_SYMBOL(vmalloc_32); - * The resulting memory area is 32bit addressable and zeroed so it can be - * mapped to userspace without leaking data. - */ -+#undef vmalloc_32_user - void *vmalloc_32_user(unsigned long size) - { - struct vm_struct *area; -@@ -2091,6 +2163,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, - unsigned long uaddr = vma->vm_start; - unsigned long usize = vma->vm_end - vma->vm_start; - -+ BUG_ON(vma->vm_mirror); -+ - if ((PAGE_SIZE-1) & (unsigned long)addr) - return -EINVAL; - -diff --git a/mm/vmstat.c b/mm/vmstat.c -index d52b13d..381d1ac 100644 ---- a/mm/vmstat.c -+++ b/mm/vmstat.c -@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu) - * - * vm_stat contains the global counters - */ --atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; -+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; - EXPORT_SYMBOL(vm_stat); - - #ifdef CONFIG_SMP -@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu) - v = p->vm_stat_diff[i]; - p->vm_stat_diff[i] = 0; - local_irq_restore(flags); -- atomic_long_add(v, &zone->vm_stat[i]); -+ atomic_long_add_unchecked(v, &zone->vm_stat[i]); - global_diff[i] += v; - #ifdef CONFIG_NUMA - /* 3 seconds idle till flush */ -@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu) - - for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) - if (global_diff[i]) -- atomic_long_add(global_diff[i], &vm_stat[i]); -+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]); - } - - #endif -@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void) - start_cpu_timer(cpu); - #endif - #ifdef CONFIG_PROC_FS -- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); -- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); -- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); -- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); -+ { -+ mode_t gr_mode = S_IRUGO; -+#ifdef CONFIG_GRKERNSEC_PROC_ADD -+ gr_mode = S_IRUSR; -+#endif -+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations); -+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops); -+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP -+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations); -+#else -+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations); -+#endif -+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations); -+ } - #endif - return 0; - } -diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c -index 8970ba1..e3361fe 100644 ---- a/net/8021q/vlan.c -+++ b/net/8021q/vlan.c -@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) - err = -EPERM; - if (!capable(CAP_NET_ADMIN)) - break; -- if ((args.u.name_type >= 0) && -- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { -+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) { - struct vlan_net *vn; - - vn = net_generic(net, vlan_net_id); -diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c -index fdfdb57..38d368c 100644 ---- a/net/9p/trans_fd.c -+++ b/net/9p/trans_fd.c -@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len) - oldfs = get_fs(); - set_fs(get_ds()); - /* The cast to a user pointer is valid due to the set_fs() */ -- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos); -+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos); - set_fs(oldfs); - - if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) -diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c -index e317583..3c8aeaf 100644 ---- a/net/9p/trans_virtio.c -+++ b/net/9p/trans_virtio.c -@@ -327,7 +327,7 @@ req_retry_pinned: - } else { - char *pbuf; - if (req->tc->pubuf) -- pbuf = (__force char *) req->tc->pubuf; -+ pbuf = (char __force_kernel *) req->tc->pubuf; - else - pbuf = req->tc->pkbuf; - outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf, -@@ -357,7 +357,7 @@ req_retry_pinned: - } else { - char *pbuf; - if (req->tc->pubuf) -- pbuf = (__force char *) req->tc->pubuf; -+ pbuf = (char __force_kernel *) req->tc->pubuf; - else - pbuf = req->tc->pkbuf; - -diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c -index f41f026..fe76ea8 100644 ---- a/net/atm/atm_misc.c -+++ b/net/atm/atm_misc.c -@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize) - if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) - return 1; - atm_return(vcc, truesize); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - return 0; - } - EXPORT_SYMBOL(atm_charge); -@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size, - } - } - atm_return(vcc, guess); -- atomic_inc(&vcc->stats->rx_drop); -+ atomic_inc_unchecked(&vcc->stats->rx_drop); - return NULL; - } - EXPORT_SYMBOL(atm_alloc_charge); -@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal); - - void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to) - { --#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) -+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) - __SONET_ITEMS - #undef __HANDLE_ITEM - } -@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats); - - void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to) - { --#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) -+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i) - __SONET_ITEMS - #undef __HANDLE_ITEM - } -diff --git a/net/atm/lec.h b/net/atm/lec.h -index dfc0719..47c5322 100644 ---- a/net/atm/lec.h -+++ b/net/atm/lec.h -@@ -48,7 +48,7 @@ struct lane2_ops { - const u8 *tlvs, u32 sizeoftlvs); - void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr, - const u8 *tlvs, u32 sizeoftlvs); --}; -+} __no_const; - - /* - * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType -diff --git a/net/atm/mpc.h b/net/atm/mpc.h -index 0919a88..a23d54e 100644 ---- a/net/atm/mpc.h -+++ b/net/atm/mpc.h -@@ -33,7 +33,7 @@ struct mpoa_client { - struct mpc_parameters parameters; /* parameters for this client */ - - const struct net_device_ops *old_ops; -- struct net_device_ops new_ops; -+ net_device_ops_no_const new_ops; - }; - - -diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c -index d1b2d9a..7cc2219 100644 ---- a/net/atm/mpoa_caches.c -+++ b/net/atm/mpoa_caches.c -@@ -255,6 +255,8 @@ static void check_resolving_entries(struct mpoa_client *client) - struct timeval now; - struct k_message msg; - -+ pax_track_stack(); -+ - do_gettimeofday(&now); - - read_lock_bh(&client->ingress_lock); -diff --git a/net/atm/proc.c b/net/atm/proc.c -index 0d020de..011c7bb 100644 ---- a/net/atm/proc.c -+++ b/net/atm/proc.c -@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal, - const struct k_atm_aal_stats *stats) - { - seq_printf(seq, "%s ( %d %d %d %d %d )", aal, -- atomic_read(&stats->tx), atomic_read(&stats->tx_err), -- atomic_read(&stats->rx), atomic_read(&stats->rx_err), -- atomic_read(&stats->rx_drop)); -+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err), -+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err), -+ atomic_read_unchecked(&stats->rx_drop)); - } - - static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) -diff --git a/net/atm/resources.c b/net/atm/resources.c -index 23f45ce..c748f1a 100644 ---- a/net/atm/resources.c -+++ b/net/atm/resources.c -@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister); - static void copy_aal_stats(struct k_atm_aal_stats *from, - struct atm_aal_stats *to) - { --#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) -+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) - __AAL_STAT_ITEMS - #undef __HANDLE_ITEM - } -@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from, - static void subtract_aal_stats(struct k_atm_aal_stats *from, - struct atm_aal_stats *to) - { --#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) -+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i) - __AAL_STAT_ITEMS - #undef __HANDLE_ITEM - } -diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c -index db7aacf..991e539 100644 ---- a/net/batman-adv/hard-interface.c -+++ b/net/batman-adv/hard-interface.c -@@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface, - hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; - dev_add_pack(&hard_iface->batman_adv_ptype); - -- atomic_set(&hard_iface->seqno, 1); -- atomic_set(&hard_iface->frag_seqno, 1); -+ atomic_set_unchecked(&hard_iface->seqno, 1); -+ atomic_set_unchecked(&hard_iface->frag_seqno, 1); - bat_info(hard_iface->soft_iface, "Adding interface: %s\n", - hard_iface->net_dev->name); - -diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c -index 0f32c81..82d1895 100644 ---- a/net/batman-adv/routing.c -+++ b/net/batman-adv/routing.c -@@ -656,7 +656,7 @@ void receive_bat_packet(const struct ethhdr *ethhdr, - return; - - /* could be changed by schedule_own_packet() */ -- if_incoming_seqno = atomic_read(&if_incoming->seqno); -+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno); - - has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0); - -diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c -index 58d1447..2a66c8c 100644 ---- a/net/batman-adv/send.c -+++ b/net/batman-adv/send.c -@@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_iface *hard_iface) - - /* change sequence number to network order */ - batman_packet->seqno = -- htonl((uint32_t)atomic_read(&hard_iface->seqno)); -+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno)); - - batman_packet->ttvn = atomic_read(&bat_priv->ttvn); - batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc)); -@@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_iface *hard_iface) - else - batman_packet->gw_flags = NO_FLAGS; - -- atomic_inc(&hard_iface->seqno); -+ atomic_inc_unchecked(&hard_iface->seqno); - - slide_own_bcast_window(hard_iface); - send_time = own_send_time(bat_priv); -diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c -index 05dd351..2ecd19b 100644 ---- a/net/batman-adv/soft-interface.c -+++ b/net/batman-adv/soft-interface.c -@@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) - - /* set broadcast sequence number */ - bcast_packet->seqno = -- htonl(atomic_inc_return(&bat_priv->bcast_seqno)); -+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno)); - - add_bcast_packet_to_list(bat_priv, skb, 1); - -@@ -824,7 +824,7 @@ struct net_device *softif_create(const char *name) - atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN); - - atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); -- atomic_set(&bat_priv->bcast_seqno, 1); -+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1); - atomic_set(&bat_priv->ttvn, 0); - atomic_set(&bat_priv->tt_local_changes, 0); - atomic_set(&bat_priv->tt_ogm_append_cnt, 0); -diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h -index 51a0db7..b8a62be 100644 ---- a/net/batman-adv/types.h -+++ b/net/batman-adv/types.h -@@ -38,8 +38,8 @@ struct hard_iface { - int16_t if_num; - char if_status; - struct net_device *net_dev; -- atomic_t seqno; -- atomic_t frag_seqno; -+ atomic_unchecked_t seqno; -+ atomic_unchecked_t frag_seqno; - unsigned char *packet_buff; - int packet_len; - struct kobject *hardif_obj; -@@ -153,7 +153,7 @@ struct bat_priv { - atomic_t orig_interval; /* uint */ - atomic_t hop_penalty; /* uint */ - atomic_t log_level; /* uint */ -- atomic_t bcast_seqno; -+ atomic_unchecked_t bcast_seqno; - atomic_t bcast_queue_left; - atomic_t batman_queue_left; - atomic_t ttvn; /* tranlation table version number */ -diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c -index 32b125f..f1447e0 100644 ---- a/net/batman-adv/unicast.c -+++ b/net/batman-adv/unicast.c -@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, - frag1->flags = UNI_FRAG_HEAD | large_tail; - frag2->flags = large_tail; - -- seqno = atomic_add_return(2, &hard_iface->frag_seqno); -+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno); - frag1->seqno = htons(seqno - 1); - frag2->seqno = htons(seqno); - -diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c -index ea7f031..0615edc 100644 ---- a/net/bluetooth/hci_conn.c -+++ b/net/bluetooth/hci_conn.c -@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], - cp.handle = cpu_to_le16(conn->handle); - memcpy(cp.ltk, ltk, sizeof(cp.ltk)); - cp.ediv = ediv; -- memcpy(cp.rand, rand, sizeof(rand)); -+ memcpy(cp.rand, rand, sizeof(cp.rand)); - - hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); - } -@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]) - memset(&cp, 0, sizeof(cp)); - - cp.handle = cpu_to_le16(conn->handle); -- memcpy(cp.ltk, ltk, sizeof(ltk)); -+ memcpy(cp.ltk, ltk, sizeof(cp.ltk)); - - hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); - } -diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c -index e79ff75..215b57d 100644 ---- a/net/bridge/br_multicast.c -+++ b/net/bridge/br_multicast.c -@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, - nexthdr = ip6h->nexthdr; - offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr); - -- if (offset < 0 || nexthdr != IPPROTO_ICMPV6) -+ if (nexthdr != IPPROTO_ICMPV6) - return 0; - - /* Okay, we found ICMPv6 header */ -diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c -index 5864cc4..94cab18 100644 ---- a/net/bridge/netfilter/ebtables.c -+++ b/net/bridge/netfilter/ebtables.c -@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) - tmp.valid_hooks = t->table->valid_hooks; - } - mutex_unlock(&ebt_mutex); -- if (copy_to_user(user, &tmp, *len) != 0){ -+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){ - BUGPRINT("c2u Didn't work\n"); - ret = -EFAULT; - break; -@@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_user(struct ebt_table *t, - int ret; - void __user *pos; - -+ pax_track_stack(); -+ - memset(&tinfo, 0, sizeof(tinfo)); - - if (cmd == EBT_SO_GET_ENTRIES) { -diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c -index a986280..13444a1 100644 ---- a/net/caif/caif_socket.c -+++ b/net/caif/caif_socket.c -@@ -48,19 +48,20 @@ static struct dentry *debugfsdir; - #ifdef CONFIG_DEBUG_FS - struct debug_fs_counter { - atomic_t caif_nr_socks; -- atomic_t caif_sock_create; -- atomic_t num_connect_req; -- atomic_t num_connect_resp; -- atomic_t num_connect_fail_resp; -- atomic_t num_disconnect; -- atomic_t num_remote_shutdown_ind; -- atomic_t num_tx_flow_off_ind; -- atomic_t num_tx_flow_on_ind; -- atomic_t num_rx_flow_off; -- atomic_t num_rx_flow_on; -+ atomic_unchecked_t caif_sock_create; -+ atomic_unchecked_t num_connect_req; -+ atomic_unchecked_t num_connect_resp; -+ atomic_unchecked_t num_connect_fail_resp; -+ atomic_unchecked_t num_disconnect; -+ atomic_unchecked_t num_remote_shutdown_ind; -+ atomic_unchecked_t num_tx_flow_off_ind; -+ atomic_unchecked_t num_tx_flow_on_ind; -+ atomic_unchecked_t num_rx_flow_off; -+ atomic_unchecked_t num_rx_flow_on; - }; - static struct debug_fs_counter cnt; - #define dbfs_atomic_inc(v) atomic_inc_return(v) -+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v) - #define dbfs_atomic_dec(v) atomic_dec_return(v) - #else - #define dbfs_atomic_inc(v) 0 -@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) - atomic_read(&cf_sk->sk.sk_rmem_alloc), - sk_rcvbuf_lowwater(cf_sk)); - set_rx_flow_off(cf_sk); -- dbfs_atomic_inc(&cnt.num_rx_flow_off); -+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off); - caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); - } - -@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) - set_rx_flow_off(cf_sk); - if (net_ratelimit()) - pr_debug("sending flow OFF due to rmem_schedule\n"); -- dbfs_atomic_inc(&cnt.num_rx_flow_off); -+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off); - caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); - } - skb->dev = NULL; -@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr, - switch (flow) { - case CAIF_CTRLCMD_FLOW_ON_IND: - /* OK from modem to start sending again */ -- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind); -+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind); - set_tx_flow_on(cf_sk); - cf_sk->sk.sk_state_change(&cf_sk->sk); - break; - - case CAIF_CTRLCMD_FLOW_OFF_IND: - /* Modem asks us to shut up */ -- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind); -+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind); - set_tx_flow_off(cf_sk); - cf_sk->sk.sk_state_change(&cf_sk->sk); - break; -@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr, - /* We're now connected */ - caif_client_register_refcnt(&cf_sk->layer, - cfsk_hold, cfsk_put); -- dbfs_atomic_inc(&cnt.num_connect_resp); -+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp); - cf_sk->sk.sk_state = CAIF_CONNECTED; - set_tx_flow_on(cf_sk); - cf_sk->sk.sk_state_change(&cf_sk->sk); -@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr, - - case CAIF_CTRLCMD_INIT_FAIL_RSP: - /* Connect request failed */ -- dbfs_atomic_inc(&cnt.num_connect_fail_resp); -+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp); - cf_sk->sk.sk_err = ECONNREFUSED; - cf_sk->sk.sk_state = CAIF_DISCONNECTED; - cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; -@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr, - - case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: - /* Modem has closed this connection, or device is down. */ -- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind); -+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind); - cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; - cf_sk->sk.sk_err = ECONNRESET; - set_rx_flow_on(cf_sk); -@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk) - return; - - if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { -- dbfs_atomic_inc(&cnt.num_rx_flow_on); -+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on); - set_rx_flow_on(cf_sk); - caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); - } -@@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, - /*ifindex = id of the interface.*/ - cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; - -- dbfs_atomic_inc(&cnt.num_connect_req); -+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req); - cf_sk->layer.receive = caif_sktrecv_cb; - - err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, -@@ -943,7 +944,7 @@ static int caif_release(struct socket *sock) - spin_unlock_bh(&sk->sk_receive_queue.lock); - sock->sk = NULL; - -- dbfs_atomic_inc(&cnt.num_disconnect); -+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect); - - WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); - if (cf_sk->debugfs_socket_dir != NULL) -@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, - cf_sk->conn_req.protocol = protocol; - /* Increase the number of sockets created. */ - dbfs_atomic_inc(&cnt.caif_nr_socks); -- num = dbfs_atomic_inc(&cnt.caif_sock_create); -+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create); - #ifdef CONFIG_DEBUG_FS - if (!IS_ERR(debugfsdir)) { - -diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c -index e22671b..6598ea0 100644 ---- a/net/caif/cfctrl.c -+++ b/net/caif/cfctrl.c -@@ -9,6 +9,7 @@ - #include <linux/stddef.h> - #include <linux/spinlock.h> - #include <linux/slab.h> -+#include <linux/sched.h> - #include <net/caif/caif_layer.h> - #include <net/caif/cfpkt.h> - #include <net/caif/cfctrl.h> -@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void) - dev_info.id = 0xff; - memset(this, 0, sizeof(*this)); - cfsrvl_init(&this->serv, 0, &dev_info, false); -- atomic_set(&this->req_seq_no, 1); -- atomic_set(&this->rsp_seq_no, 1); -+ atomic_set_unchecked(&this->req_seq_no, 1); -+ atomic_set_unchecked(&this->rsp_seq_no, 1); - this->serv.layer.receive = cfctrl_recv; - sprintf(this->serv.layer.name, "ctrl"); - this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; -@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl, - struct cfctrl_request_info *req) - { - spin_lock_bh(&ctrl->info_list_lock); -- atomic_inc(&ctrl->req_seq_no); -- req->sequence_no = atomic_read(&ctrl->req_seq_no); -+ atomic_inc_unchecked(&ctrl->req_seq_no); -+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no); - list_add_tail(&req->list, &ctrl->list); - spin_unlock_bh(&ctrl->info_list_lock); - } -@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, - if (p != first) - pr_warn("Requests are not received in order\n"); - -- atomic_set(&ctrl->rsp_seq_no, -+ atomic_set_unchecked(&ctrl->rsp_seq_no, - p->sequence_no); - list_del(&p->list); - goto out; -@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) - struct cfctrl *cfctrl = container_obj(layer); - struct cfctrl_request_info rsp, *req; - -+ pax_track_stack(); - - cfpkt_extr_head(pkt, &cmdrsp, 1); - cmd = cmdrsp & CFCTRL_CMD_MASK; -diff --git a/net/compat.c b/net/compat.c -index c578d93..257fab7 100644 ---- a/net/compat.c -+++ b/net/compat.c -@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) - __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || - __get_user(kmsg->msg_flags, &umsg->msg_flags)) - return -EFAULT; -- kmsg->msg_name = compat_ptr(tmp1); -- kmsg->msg_iov = compat_ptr(tmp2); -- kmsg->msg_control = compat_ptr(tmp3); -+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1); -+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2); -+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3); - return 0; - } - -@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, - - if (kern_msg->msg_namelen) { - if (mode == VERIFY_READ) { -- int err = move_addr_to_kernel(kern_msg->msg_name, -+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name, - kern_msg->msg_namelen, - kern_address); - if (err < 0) -@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, - kern_msg->msg_name = NULL; - - tot_len = iov_from_user_compat_to_kern(kern_iov, -- (struct compat_iovec __user *)kern_msg->msg_iov, -+ (struct compat_iovec __force_user *)kern_msg->msg_iov, - kern_msg->msg_iovlen); - if (tot_len >= 0) - kern_msg->msg_iov = kern_iov; -@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, - - #define CMSG_COMPAT_FIRSTHDR(msg) \ - (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \ -- (struct compat_cmsghdr __user *)((msg)->msg_control) : \ -+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \ - (struct compat_cmsghdr __user *)NULL) - - #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \ - ((ucmlen) >= sizeof(struct compat_cmsghdr) && \ - (ucmlen) <= (unsigned long) \ - ((mhdr)->msg_controllen - \ -- ((char *)(ucmsg) - (char *)(mhdr)->msg_control))) -+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control))) - - static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg, - struct compat_cmsghdr __user *cmsg, int cmsg_len) - { - char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len); -- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) > -+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) > - msg->msg_controllen) - return NULL; - return (struct compat_cmsghdr __user *)ptr; -@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat - { - struct compat_timeval ctv; - struct compat_timespec cts[3]; -- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; -+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control; - struct compat_cmsghdr cmhdr; - int cmlen; - -@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat - - void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) - { -- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; -+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control; - int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int); - int fdnum = scm->fp->count; - struct file **fp = scm->fp->fp; -@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct socket *sock, int level, - return -EFAULT; - old_fs = get_fs(); - set_fs(KERNEL_DS); -- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime)); -+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime)); - set_fs(old_fs); - - return err; -@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname, - len = sizeof(ktime); - old_fs = get_fs(); - set_fs(KERNEL_DS); -- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len); -+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len); - set_fs(old_fs); - - if (!err) { -@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, - case MCAST_JOIN_GROUP: - case MCAST_LEAVE_GROUP: - { -- struct compat_group_req __user *gr32 = (void *)optval; -+ struct compat_group_req __user *gr32 = (void __user *)optval; - struct group_req __user *kgr = - compat_alloc_user_space(sizeof(struct group_req)); - u32 interface; -@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, - case MCAST_BLOCK_SOURCE: - case MCAST_UNBLOCK_SOURCE: - { -- struct compat_group_source_req __user *gsr32 = (void *)optval; -+ struct compat_group_source_req __user *gsr32 = (void __user *)optval; - struct group_source_req __user *kgsr = compat_alloc_user_space( - sizeof(struct group_source_req)); - u32 interface; -@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, - } - case MCAST_MSFILTER: - { -- struct compat_group_filter __user *gf32 = (void *)optval; -+ struct compat_group_filter __user *gf32 = (void __user *)optval; - struct group_filter __user *kgf; - u32 interface, fmode, numsrc; - -@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname, - char __user *optval, int __user *optlen, - int (*getsockopt)(struct sock *, int, int, char __user *, int __user *)) - { -- struct compat_group_filter __user *gf32 = (void *)optval; -+ struct compat_group_filter __user *gf32 = (void __user *)optval; - struct group_filter __user *kgf; - int __user *koptlen; - u32 interface, fmode, numsrc; -diff --git a/net/core/datagram.c b/net/core/datagram.c -index 18ac112..fe95ed9 100644 ---- a/net/core/datagram.c -+++ b/net/core/datagram.c -@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) - } - - kfree_skb(skb); -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - sk_mem_reclaim_partial(sk); - - return err; -diff --git a/net/core/dev.c b/net/core/dev.c -index ae5cf2d..2c950a1 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const char *name) - if (no_module && capable(CAP_NET_ADMIN)) - no_module = request_module("netdev-%s", name); - if (no_module && capable(CAP_SYS_MODULE)) { -+#ifdef CONFIG_GRKERNSEC_MODHARDEN -+ ___request_module(true, "grsec_modharden_netdev", "%s", name); -+#else - if (!request_module("%s", name)) - pr_err("Loading kernel module for a network device " - "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " - "instead\n", name); -+#endif - } - } - EXPORT_SYMBOL(dev_load); -@@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) - - struct dev_gso_cb { - void (*destructor)(struct sk_buff *skb); --}; -+} __no_const; - - #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) - -@@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb) - } - EXPORT_SYMBOL(netif_rx_ni); - --static void net_tx_action(struct softirq_action *h) -+static void net_tx_action(void) - { - struct softnet_data *sd = &__get_cpu_var(softnet_data); - -@@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *napi) - } - EXPORT_SYMBOL(netif_napi_del); - --static void net_rx_action(struct softirq_action *h) -+static void net_rx_action(void) - { - struct softnet_data *sd = &__get_cpu_var(softnet_data); - unsigned long time_limit = jiffies + 2; -diff --git a/net/core/flow.c b/net/core/flow.c -index 555a456..de48421 100644 ---- a/net/core/flow.c -+++ b/net/core/flow.c -@@ -61,7 +61,7 @@ struct flow_cache { - struct timer_list rnd_timer; - }; - --atomic_t flow_cache_genid = ATOMIC_INIT(0); -+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0); - EXPORT_SYMBOL(flow_cache_genid); - static struct flow_cache flow_cache_global; - static struct kmem_cache *flow_cachep __read_mostly; -@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg) - - static int flow_entry_valid(struct flow_cache_entry *fle) - { -- if (atomic_read(&flow_cache_genid) != fle->genid) -+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid) - return 0; - if (fle->object && !fle->object->ops->check(fle->object)) - return 0; -@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, - hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); - fcp->hash_count++; - } -- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { -+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) { - flo = fle->object; - if (!flo) - goto ret_object; -@@ -280,7 +280,7 @@ nocache: - } - flo = resolver(net, key, family, dir, flo, ctx); - if (fle) { -- fle->genid = atomic_read(&flow_cache_genid); -+ fle->genid = atomic_read_unchecked(&flow_cache_genid); - if (!IS_ERR(flo)) - fle->object = flo; - else -diff --git a/net/core/iovec.c b/net/core/iovec.c -index c40f27e..7f49254 100644 ---- a/net/core/iovec.c -+++ b/net/core/iovec.c -@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, - if (m->msg_namelen) { - if (mode == VERIFY_READ) { - void __user *namep; -- namep = (void __user __force *) m->msg_name; -+ namep = (void __force_user *) m->msg_name; - err = move_addr_to_kernel(namep, m->msg_namelen, - address); - if (err < 0) -@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, - } - - size = m->msg_iovlen * sizeof(struct iovec); -- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) -+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size)) - return -EFAULT; - - m->msg_iov = iov; -diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c -index 99d9e95..209bae2 100644 ---- a/net/core/rtnetlink.c -+++ b/net/core/rtnetlink.c -@@ -57,7 +57,7 @@ struct rtnl_link { - rtnl_doit_func doit; - rtnl_dumpit_func dumpit; - rtnl_calcit_func calcit; --}; -+} __no_const; - - static DEFINE_MUTEX(rtnl_mutex); - static u16 min_ifinfo_dump_size; -diff --git a/net/core/scm.c b/net/core/scm.c -index 811b53f..5d6c343 100644 ---- a/net/core/scm.c -+++ b/net/core/scm.c -@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send); - int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) - { - struct cmsghdr __user *cm -- = (__force struct cmsghdr __user *)msg->msg_control; -+ = (struct cmsghdr __force_user *)msg->msg_control; - struct cmsghdr cmhdr; - int cmlen = CMSG_LEN(len); - int err; -@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) - err = -EFAULT; - if (copy_to_user(cm, &cmhdr, sizeof cmhdr)) - goto out; -- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr))) -+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr))) - goto out; - cmlen = CMSG_SPACE(len); - if (msg->msg_controllen < cmlen) -@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg); - void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) - { - struct cmsghdr __user *cm -- = (__force struct cmsghdr __user*)msg->msg_control; -+ = (struct cmsghdr __force_user *)msg->msg_control; - - int fdmax = 0; - int fdnum = scm->fp->count; -@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) - if (fdnum < fdmax) - fdmax = fdnum; - -- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax; -+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax; - i++, cmfptr++) - { - int new_fd; -diff --git a/net/core/skbuff.c b/net/core/skbuff.c -index 387703f..035abcf 100644 ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, - struct sock *sk = skb->sk; - int ret = 0; - -+ pax_track_stack(); -+ - if (splice_grow_spd(pipe, &spd)) - return -ENOMEM; - -diff --git a/net/core/sock.c b/net/core/sock.c -index 11d67b3..df26d4b 100644 ---- a/net/core/sock.c -+++ b/net/core/sock.c -@@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) - */ - if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - trace_sock_rcvqueue_full(sk, skb); - return -ENOMEM; - } -@@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) - return err; - - if (!sk_rmem_schedule(sk, skb->truesize)) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - return -ENOBUFS; - } - -@@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) - skb_dst_force(skb); - - spin_lock_irqsave(&list->lock, flags); -- skb->dropcount = atomic_read(&sk->sk_drops); -+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops); - __skb_queue_tail(list, skb); - spin_unlock_irqrestore(&list->lock, flags); - -@@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) - skb->dev = NULL; - - if (sk_rcvqueues_full(sk, skb)) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - goto discard_and_relse; - } - if (nested) -@@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) - mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); - } else if (sk_add_backlog(sk, skb)) { - bh_unlock_sock(sk); -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - goto discard_and_relse; - } - -@@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, - if (len > sizeof(peercred)) - len = sizeof(peercred); - cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); -- if (copy_to_user(optval, &peercred, len)) -+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len)) - return -EFAULT; - goto lenout; - } -@@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, - return -ENOTCONN; - if (lv < len) - return -EINVAL; -- if (copy_to_user(optval, address, len)) -+ if (len > sizeof(address) || copy_to_user(optval, address, len)) - return -EFAULT; - goto lenout; - } -@@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, - - if (len > lv) - len = lv; -- if (copy_to_user(optval, &v, len)) -+ if (len > sizeof(v) || copy_to_user(optval, &v, len)) - return -EFAULT; - lenout: - if (put_user(len, optlen)) -@@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) - */ - smp_wmb(); - atomic_set(&sk->sk_refcnt, 1); -- atomic_set(&sk->sk_drops, 0); -+ atomic_set_unchecked(&sk->sk_drops, 0); - } - EXPORT_SYMBOL(sock_init_data); - -diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c -index 02e75d1..9a57a7c 100644 ---- a/net/decnet/sysctl_net_decnet.c -+++ b/net/decnet/sysctl_net_decnet.c -@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write, - - if (len > *lenp) len = *lenp; - -- if (copy_to_user(buffer, addr, len)) -+ if (len > sizeof addr || copy_to_user(buffer, addr, len)) - return -EFAULT; - - *lenp = len; -@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write, - - if (len > *lenp) len = *lenp; - -- if (copy_to_user(buffer, devname, len)) -+ if (len > sizeof devname || copy_to_user(buffer, devname, len)) - return -EFAULT; - - *lenp = len; -diff --git a/net/econet/Kconfig b/net/econet/Kconfig -index 39a2d29..f39c0fe 100644 ---- a/net/econet/Kconfig -+++ b/net/econet/Kconfig -@@ -4,7 +4,7 @@ - - config ECONET - tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)" -- depends on EXPERIMENTAL && INET -+ depends on EXPERIMENTAL && INET && BROKEN - ---help--- - Econet is a fairly old and slow networking protocol mainly used by - Acorn computers to access file and print servers. It uses native -diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c -index 92fc5f6..b790d91 100644 ---- a/net/ipv4/fib_frontend.c -+++ b/net/ipv4/fib_frontend.c -@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, - #ifdef CONFIG_IP_ROUTE_MULTIPATH - fib_sync_up(dev); - #endif -- atomic_inc(&net->ipv4.dev_addr_genid); -+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid); - rt_cache_flush(dev_net(dev), -1); - break; - case NETDEV_DOWN: - fib_del_ifaddr(ifa, NULL); -- atomic_inc(&net->ipv4.dev_addr_genid); -+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid); - if (ifa->ifa_dev->ifa_list == NULL) { - /* Last address was deleted from this interface. - * Disable IP. -@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo - #ifdef CONFIG_IP_ROUTE_MULTIPATH - fib_sync_up(dev); - #endif -- atomic_inc(&net->ipv4.dev_addr_genid); -+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid); - rt_cache_flush(dev_net(dev), -1); - break; - case NETDEV_DOWN: -diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c -index 80106d8..232e898 100644 ---- a/net/ipv4/fib_semantics.c -+++ b/net/ipv4/fib_semantics.c -@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) - nh->nh_saddr = inet_select_addr(nh->nh_dev, - nh->nh_gw, - nh->nh_parent->fib_scope); -- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); -+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid); - - return nh->nh_saddr; - } -diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c -index 389a2e6..ac1c1de 100644 ---- a/net/ipv4/inet_diag.c -+++ b/net/ipv4/inet_diag.c -@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk, - r->idiag_retrans = 0; - - r->id.idiag_if = sk->sk_bound_dev_if; -+ -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ r->id.idiag_cookie[0] = 0; -+ r->id.idiag_cookie[1] = 0; -+#else - r->id.idiag_cookie[0] = (u32)(unsigned long)sk; - r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); -+#endif - - r->id.idiag_sport = inet->inet_sport; - r->id.idiag_dport = inet->inet_dport; -@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, - r->idiag_family = tw->tw_family; - r->idiag_retrans = 0; - r->id.idiag_if = tw->tw_bound_dev_if; -+ -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ r->id.idiag_cookie[0] = 0; -+ r->id.idiag_cookie[1] = 0; -+#else - r->id.idiag_cookie[0] = (u32)(unsigned long)tw; - r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); -+#endif -+ - r->id.idiag_sport = tw->tw_sport; - r->id.idiag_dport = tw->tw_dport; - r->id.idiag_src[0] = tw->tw_rcv_saddr; -@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb, - if (sk == NULL) - goto unlock; - -+#ifndef CONFIG_GRKERNSEC_HIDESYM - err = -ESTALE; - if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE || - req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) && - ((u32)(unsigned long)sk != req->id.idiag_cookie[0] || - (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1])) - goto out; -+#endif - - err = -ENOMEM; - rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + -@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, - r->idiag_retrans = req->retrans; - - r->id.idiag_if = sk->sk_bound_dev_if; -+ -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ r->id.idiag_cookie[0] = 0; -+ r->id.idiag_cookie[1] = 0; -+#else - r->id.idiag_cookie[0] = (u32)(unsigned long)req; - r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1); -+#endif - - tmo = req->expires - jiffies; - if (tmo < 0) -diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c -index 984ec65..97ac518 100644 ---- a/net/ipv4/inet_hashtables.c -+++ b/net/ipv4/inet_hashtables.c -@@ -18,12 +18,15 @@ - #include <linux/sched.h> - #include <linux/slab.h> - #include <linux/wait.h> -+#include <linux/security.h> - - #include <net/inet_connection_sock.h> - #include <net/inet_hashtables.h> - #include <net/secure_seq.h> - #include <net/ip.h> - -+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet); -+ - /* - * Allocate and initialize a new local port bind bucket. - * The bindhash mutex for snum's hash chain must be held here. -@@ -530,6 +533,8 @@ ok: - twrefcnt += inet_twsk_bind_unhash(tw, hinfo); - spin_unlock(&head->lock); - -+ gr_update_task_in_ip_table(current, inet_sk(sk)); -+ - if (tw) { - inet_twsk_deschedule(tw, death_row); - while (twrefcnt) { -diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c -index 86f13c67..0bce60f 100644 ---- a/net/ipv4/inetpeer.c -+++ b/net/ipv4/inetpeer.c -@@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create) - unsigned int sequence; - int invalidated, gccnt = 0; - -+ pax_track_stack(); -+ - /* Attempt a lockless lookup first. - * Because of a concurrent writer, we might not find an existing entry. - */ -@@ -436,8 +438,8 @@ relookup: - if (p) { - p->daddr = *daddr; - atomic_set(&p->refcnt, 1); -- atomic_set(&p->rid, 0); -- atomic_set(&p->ip_id_count, -+ atomic_set_unchecked(&p->rid, 0); -+ atomic_set_unchecked(&p->ip_id_count, - (daddr->family == AF_INET) ? - secure_ip_id(daddr->addr.a4) : - secure_ipv6_id(daddr->addr.a6)); -diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c -index 0e0ab98..2ed7dd5 100644 ---- a/net/ipv4/ip_fragment.c -+++ b/net/ipv4/ip_fragment.c -@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp) - return 0; - - start = qp->rid; -- end = atomic_inc_return(&peer->rid); -+ end = atomic_inc_return_unchecked(&peer->rid); - qp->rid = end; - - rc = qp->q.fragments && (end - start) > max; -diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c -index 8905e92..0b179fb 100644 ---- a/net/ipv4/ip_sockglue.c -+++ b/net/ipv4/ip_sockglue.c -@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, - int val; - int len; - -+ pax_track_stack(); -+ - if (level != SOL_IP) - return -EOPNOTSUPP; - -@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, - len = min_t(unsigned int, len, opt->optlen); - if (put_user(len, optlen)) - return -EFAULT; -- if (copy_to_user(optval, opt->__data, len)) -+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) || -+ copy_to_user(optval, opt->__data, len)) - return -EFAULT; - return 0; - } -@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, - if (sk->sk_type != SOCK_STREAM) - return -ENOPROTOOPT; - -- msg.msg_control = optval; -+ msg.msg_control = (void __force_kernel *)optval; - msg.msg_controllen = len; - msg.msg_flags = flags; - -diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c -index 472a8c4..6507cd4 100644 ---- a/net/ipv4/ipconfig.c -+++ b/net/ipv4/ipconfig.c -@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg) - - mm_segment_t oldfs = get_fs(); - set_fs(get_ds()); -- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg); -+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg); - set_fs(oldfs); - return res; - } -@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg) - - mm_segment_t oldfs = get_fs(); - set_fs(get_ds()); -- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg); -+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg); - set_fs(oldfs); - return res; - } -@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg) - - mm_segment_t oldfs = get_fs(); - set_fs(get_ds()); -- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg); -+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg); - set_fs(oldfs); - return res; - } -diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c -index 076b7c8..9c8d038 100644 ---- a/net/ipv4/netfilter/nf_nat_snmp_basic.c -+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c -@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, - - *len = 0; - -- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); -+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC); - if (*octets == NULL) { - if (net_ratelimit()) - pr_notice("OOM in bsalg (%d)\n", __LINE__); -diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c -index 39b403f..8e6a0a8 100644 ---- a/net/ipv4/ping.c -+++ b/net/ipv4/ping.c -@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f, - sk_rmem_alloc_get(sp), - 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), - atomic_read(&sp->sk_refcnt), sp, -- atomic_read(&sp->sk_drops), len); -+ atomic_read_unchecked(&sp->sk_drops), len); - } - - static int ping_seq_show(struct seq_file *seq, void *v) -diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c -index 61714bd..c9cee6d 100644 ---- a/net/ipv4/raw.c -+++ b/net/ipv4/raw.c -@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) - int raw_rcv(struct sock *sk, struct sk_buff *skb) - { - if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - kfree_skb(skb); - return NET_RX_DROP; - } -@@ -737,16 +737,20 @@ static int raw_init(struct sock *sk) - - static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen) - { -+ struct icmp_filter filter; -+ - if (optlen > sizeof(struct icmp_filter)) - optlen = sizeof(struct icmp_filter); -- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen)) -+ if (copy_from_user(&filter, optval, optlen)) - return -EFAULT; -+ raw_sk(sk)->filter = filter; - return 0; - } - - static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen) - { - int len, ret = -EFAULT; -+ struct icmp_filter filter; - - if (get_user(len, optlen)) - goto out; -@@ -756,8 +760,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o - if (len > sizeof(struct icmp_filter)) - len = sizeof(struct icmp_filter); - ret = -EFAULT; -- if (put_user(len, optlen) || -- copy_to_user(optval, &raw_sk(sk)->filter, len)) -+ filter = raw_sk(sk)->filter; -+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len)) - goto out; - ret = 0; - out: return ret; -@@ -985,7 +989,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) - sk_wmem_alloc_get(sp), - sk_rmem_alloc_get(sp), - 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), -- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); -+ atomic_read(&sp->sk_refcnt), -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL, -+#else -+ sp, -+#endif -+ atomic_read_unchecked(&sp->sk_drops)); - } - - static int raw_seq_show(struct seq_file *seq, void *v) -diff --git a/net/ipv4/route.c b/net/ipv4/route.c -index 05ac666c..82384a7 100644 ---- a/net/ipv4/route.c -+++ b/net/ipv4/route.c -@@ -309,7 +309,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, - - static inline int rt_genid(struct net *net) - { -- return atomic_read(&net->ipv4.rt_genid); -+ return atomic_read_unchecked(&net->ipv4.rt_genid); - } - - #ifdef CONFIG_PROC_FS -@@ -842,7 +842,7 @@ static void rt_cache_invalidate(struct net *net) - unsigned char shuffle; - - get_random_bytes(&shuffle, sizeof(shuffle)); -- atomic_add(shuffle + 1U, &net->ipv4.rt_genid); -+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid); - redirect_genid++; - } - -@@ -2920,7 +2920,7 @@ static int rt_fill_info(struct net *net, - error = rt->dst.error; - if (peer) { - inet_peer_refcheck(rt->peer); -- id = atomic_read(&peer->ip_id_count) & 0xffff; -+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff; - if (peer->tcp_ts_stamp) { - ts = peer->tcp_ts; - tsage = get_seconds() - peer->tcp_ts_stamp; -diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c -index 46febca..98b73a4 100644 ---- a/net/ipv4/tcp.c -+++ b/net/ipv4/tcp.c -@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, - int val; - int err = 0; - -+ pax_track_stack(); -+ - /* These are data/string values, all the others are ints */ - switch (optname) { - case TCP_CONGESTION: { -@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level, - struct tcp_sock *tp = tcp_sk(sk); - int val, len; - -+ pax_track_stack(); -+ - if (get_user(len, optlen)) - return -EFAULT; - -diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c -index 7963e03..c44f5d0 100644 ---- a/net/ipv4/tcp_ipv4.c -+++ b/net/ipv4/tcp_ipv4.c -@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly; - int sysctl_tcp_low_latency __read_mostly; - EXPORT_SYMBOL(sysctl_tcp_low_latency); - -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+extern int grsec_enable_blackhole; -+#endif - - #ifdef CONFIG_TCP_MD5SIG - static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, -@@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) - return 0; - - reset: -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ if (!grsec_enable_blackhole) -+#endif - tcp_v4_send_reset(rsk, skb); - discard: - kfree_skb(skb); -@@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb) - TCP_SKB_CB(skb)->sacked = 0; - - sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); -- if (!sk) -+ if (!sk) { -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ ret = 1; -+#endif - goto no_tcp_socket; -- -+ } - process: -- if (sk->sk_state == TCP_TIME_WAIT) -+ if (sk->sk_state == TCP_TIME_WAIT) { -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ ret = 2; -+#endif - goto do_time_wait; -+ } - - if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { - NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); -@@ -1739,6 +1752,10 @@ no_tcp_socket: - bad_packet: - TCP_INC_STATS_BH(net, TCP_MIB_INERRS); - } else { -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ if (!grsec_enable_blackhole || (ret == 1 && -+ (skb->dev->flags & IFF_LOOPBACK))) -+#endif - tcp_v4_send_reset(NULL, skb); - } - -@@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req, - 0, /* non standard timer */ - 0, /* open_requests have no inode */ - atomic_read(&sk->sk_refcnt), -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL, -+#else - req, -+#endif - len); - } - -@@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) - sock_i_uid(sk), - icsk->icsk_probes_out, - sock_i_ino(sk), -- atomic_read(&sk->sk_refcnt), sk, -+ atomic_read(&sk->sk_refcnt), -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL, -+#else -+ sk, -+#endif - jiffies_to_clock_t(icsk->icsk_rto), - jiffies_to_clock_t(icsk->icsk_ack.ato), - (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, -@@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw, - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n", - i, src, srcp, dest, destp, tw->tw_substate, 0, 0, - 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, -- atomic_read(&tw->tw_refcnt), tw, len); -+ atomic_read(&tw->tw_refcnt), -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL, -+#else -+ tw, -+#endif -+ len); - } - - #define TMPSZ 150 -diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c -index 0ce3d06..e182e59 100644 ---- a/net/ipv4/tcp_minisocks.c -+++ b/net/ipv4/tcp_minisocks.c -@@ -27,6 +27,10 @@ - #include <net/inet_common.h> - #include <net/xfrm.h> - -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+extern int grsec_enable_blackhole; -+#endif -+ - int sysctl_tcp_syncookies __read_mostly = 1; - EXPORT_SYMBOL(sysctl_tcp_syncookies); - -@@ -750,6 +754,10 @@ listen_overflow: - - embryonic_reset: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); -+ -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ if (!grsec_enable_blackhole) -+#endif - if (!(flg & TCP_FLAG_RST)) - req->rsk_ops->send_reset(sk, skb); - -diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c -index 882e0b0..2eba47f 100644 ---- a/net/ipv4/tcp_output.c -+++ b/net/ipv4/tcp_output.c -@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, - int mss; - int s_data_desired = 0; - -+ pax_track_stack(); -+ - if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) - s_data_desired = cvp->s_data_desired; - skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); -diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c -index 85ee7eb..53277ab 100644 ---- a/net/ipv4/tcp_probe.c -+++ b/net/ipv4/tcp_probe.c -@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf, - if (cnt + width >= len) - break; - -- if (copy_to_user(buf + cnt, tbuf, width)) -+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width)) - return -EFAULT; - cnt += width; - } -diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c -index ecd44b0..b32fba6 100644 ---- a/net/ipv4/tcp_timer.c -+++ b/net/ipv4/tcp_timer.c -@@ -22,6 +22,10 @@ - #include <linux/gfp.h> - #include <net/tcp.h> - -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+extern int grsec_lastack_retries; -+#endif -+ - int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; - int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; - int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME; -@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk) - } - } - -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ if ((sk->sk_state == TCP_LAST_ACK) && -+ (grsec_lastack_retries > 0) && -+ (grsec_lastack_retries < retry_until)) -+ retry_until = grsec_lastack_retries; -+#endif -+ - if (retransmits_timed_out(sk, retry_until, - syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) { - /* Has it gone just too far? */ -diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c -index 1b5a193..bd354b0 100644 ---- a/net/ipv4/udp.c -+++ b/net/ipv4/udp.c -@@ -86,6 +86,7 @@ - #include <linux/types.h> - #include <linux/fcntl.h> - #include <linux/module.h> -+#include <linux/security.h> - #include <linux/socket.h> - #include <linux/sockios.h> - #include <linux/igmp.h> -@@ -108,6 +109,10 @@ - #include <trace/events/udp.h> - #include "udp_impl.h" - -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+extern int grsec_enable_blackhole; -+#endif -+ - struct udp_table udp_table __read_mostly; - EXPORT_SYMBOL(udp_table); - -@@ -565,6 +570,9 @@ found: - return s; - } - -+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb); -+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr); -+ - /* - * This routine is called by the ICMP module when it gets some - * sort of error condition. If err < 0 then the socket should -@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - dport = usin->sin_port; - if (dport == 0) - return -EINVAL; -+ -+ err = gr_search_udp_sendmsg(sk, usin); -+ if (err) -+ return err; - } else { - if (sk->sk_state != TCP_ESTABLISHED) - return -EDESTADDRREQ; -+ -+ err = gr_search_udp_sendmsg(sk, NULL); -+ if (err) -+ return err; -+ - daddr = inet->inet_daddr; - dport = inet->inet_dport; - /* Open fast path for connected socket. -@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk) - udp_lib_checksum_complete(skb)) { - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, - IS_UDPLITE(sk)); -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - __skb_unlink(skb, rcvq); - __skb_queue_tail(&list_kill, skb); - } -@@ -1185,6 +1202,10 @@ try_again: - if (!skb) - goto out; - -+ err = gr_search_udp_recvmsg(sk, skb); -+ if (err) -+ goto out_free; -+ - ulen = skb->len - sizeof(struct udphdr); - if (len > ulen) - len = ulen; -@@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) - - drop: - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - kfree_skb(skb); - return -1; - } -@@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **stack, unsigned int count, - skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); - - if (!skb1) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, - IS_UDPLITE(sk)); - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, -@@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, - goto csum_error; - - UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK)) -+#endif - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); - - /* -@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, - sk_wmem_alloc_get(sp), - sk_rmem_alloc_get(sp), - 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), -- atomic_read(&sp->sk_refcnt), sp, -- atomic_read(&sp->sk_drops), len); -+ atomic_read(&sp->sk_refcnt), -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL, -+#else -+ sp, -+#endif -+ atomic_read_unchecked(&sp->sk_drops), len); - } - - int udp4_seq_show(struct seq_file *seq, void *v) -diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c -index 12368c5..fbf899f 100644 ---- a/net/ipv6/addrconf.c -+++ b/net/ipv6/addrconf.c -@@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg) - p.iph.ihl = 5; - p.iph.protocol = IPPROTO_IPV6; - p.iph.ttl = 64; -- ifr.ifr_ifru.ifru_data = (__force void __user *)&p; -+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p; - - if (ops->ndo_do_ioctl) { - mm_segment_t oldfs = get_fs(); -diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c -index 8a58e8c..8b5e631 100644 ---- a/net/ipv6/inet6_connection_sock.c -+++ b/net/ipv6/inet6_connection_sock.c -@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, - #ifdef CONFIG_XFRM - { - struct rt6_info *rt = (struct rt6_info *)dst; -- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); -+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid); - } - #endif - } -@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) - #ifdef CONFIG_XFRM - if (dst) { - struct rt6_info *rt = (struct rt6_info *)dst; -- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { -+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) { - __sk_dst_reset(sk); - dst = NULL; - } -diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c -index 2fbda5f..26ed683 100644 ---- a/net/ipv6/ipv6_sockglue.c -+++ b/net/ipv6/ipv6_sockglue.c -@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, - int val, valbool; - int retv = -ENOPROTOOPT; - -+ pax_track_stack(); -+ - if (optval == NULL) - val=0; - else { -@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, - int len; - int val; - -+ pax_track_stack(); -+ - if (ip6_mroute_opt(optname)) - return ip6_mroute_getsockopt(sk, optname, optval, optlen); - -@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, - if (sk->sk_type != SOCK_STREAM) - return -ENOPROTOOPT; - -- msg.msg_control = optval; -+ msg.msg_control = (void __force_kernel *)optval; - msg.msg_controllen = len; - msg.msg_flags = flags; - -diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c -index 343852e..c92bd15 100644 ---- a/net/ipv6/raw.c -+++ b/net/ipv6/raw.c -@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) - { - if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) && - skb_checksum_complete(skb)) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - kfree_skb(skb); - return NET_RX_DROP; - } -@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) - struct raw6_sock *rp = raw6_sk(sk); - - if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - kfree_skb(skb); - return NET_RX_DROP; - } -@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) - - if (inet->hdrincl) { - if (skb_checksum_complete(skb)) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - kfree_skb(skb); - return NET_RX_DROP; - } -@@ -601,7 +601,7 @@ out: - return err; - } - --static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, -+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length, - struct flowi6 *fl6, struct dst_entry **dstp, - unsigned int flags) - { -@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, - u16 proto; - int err; - -+ pax_track_stack(); -+ - /* Rough check on arithmetic overflow, - better check is made in ip6_append_data(). - */ -@@ -909,12 +911,15 @@ do_confirm: - static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, - char __user *optval, int optlen) - { -+ struct icmp6_filter filter; -+ - switch (optname) { - case ICMPV6_FILTER: - if (optlen > sizeof(struct icmp6_filter)) - optlen = sizeof(struct icmp6_filter); -- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) -+ if (copy_from_user(&filter, optval, optlen)) - return -EFAULT; -+ raw6_sk(sk)->filter = filter; - return 0; - default: - return -ENOPROTOOPT; -@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) - { - int len; -+ struct icmp6_filter filter; - - switch (optname) { - case ICMPV6_FILTER: -@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, - len = sizeof(struct icmp6_filter); - if (put_user(len, optlen)) - return -EFAULT; -- if (copy_to_user(optval, &raw6_sk(sk)->filter, len)) -+ filter = raw6_sk(sk)->filter; -+ if (len > sizeof filter || copy_to_user(optval, &filter, len)) - return -EFAULT; - return 0; - default: -@@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) - 0, 0L, 0, - sock_i_uid(sp), 0, - sock_i_ino(sp), -- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); -+ atomic_read(&sp->sk_refcnt), -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL, -+#else -+ sp, -+#endif -+ atomic_read_unchecked(&sp->sk_drops)); - } - - static int raw6_seq_show(struct seq_file *seq, void *v) -diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c -index 7b8fc57..c6185da 100644 ---- a/net/ipv6/tcp_ipv6.c -+++ b/net/ipv6/tcp_ipv6.c -@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, - } - #endif - -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+extern int grsec_enable_blackhole; -+#endif -+ - static void tcp_v6_hash(struct sock *sk) - { - if (sk->sk_state != TCP_CLOSE) { -@@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) - return 0; - - reset: -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ if (!grsec_enable_blackhole) -+#endif - tcp_v6_send_reset(sk, skb); - discard: - if (opt_skb) -@@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *skb) - TCP_SKB_CB(skb)->sacked = 0; - - sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); -- if (!sk) -+ if (!sk) { -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ ret = 1; -+#endif - goto no_tcp_socket; -+ } - - process: -- if (sk->sk_state == TCP_TIME_WAIT) -+ if (sk->sk_state == TCP_TIME_WAIT) { -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ ret = 2; -+#endif - goto do_time_wait; -+ } - - if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { - NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); -@@ -1779,6 +1794,10 @@ no_tcp_socket: - bad_packet: - TCP_INC_STATS_BH(net, TCP_MIB_INERRS); - } else { -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ if (!grsec_enable_blackhole || (ret == 1 && -+ (skb->dev->flags & IFF_LOOPBACK))) -+#endif - tcp_v6_send_reset(NULL, skb); - } - -@@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file *seq, - uid, - 0, /* non standard timer */ - 0, /* open_requests have no inode */ -- 0, req); -+ 0, -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL -+#else -+ req -+#endif -+ ); - } - - static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) -@@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) - sock_i_uid(sp), - icsk->icsk_probes_out, - sock_i_ino(sp), -- atomic_read(&sp->sk_refcnt), sp, -+ atomic_read(&sp->sk_refcnt), -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL, -+#else -+ sp, -+#endif - jiffies_to_clock_t(icsk->icsk_rto), - jiffies_to_clock_t(icsk->icsk_ack.ato), - (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, -@@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct seq_file *seq, - dest->s6_addr32[2], dest->s6_addr32[3], destp, - tw->tw_substate, 0, 0, - 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, -- atomic_read(&tw->tw_refcnt), tw); -+ atomic_read(&tw->tw_refcnt), -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL -+#else -+ tw -+#endif -+ ); - } - - static int tcp6_seq_show(struct seq_file *seq, void *v) -diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c -index bb95e8e..ae0ee80 100644 ---- a/net/ipv6/udp.c -+++ b/net/ipv6/udp.c -@@ -50,6 +50,10 @@ - #include <linux/seq_file.h> - #include "udp_impl.h" - -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+extern int grsec_enable_blackhole; -+#endif -+ - int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) - { - const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; -@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) - - return 0; - drop: -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - drop_no_sk_drops_inc: - UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); - kfree_skb(skb); -@@ -624,7 +628,7 @@ static void flush_stack(struct sock **stack, unsigned int count, - continue; - } - drop: -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); - UDP6_INC_STATS_BH(sock_net(sk), -@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, - UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, - proto == IPPROTO_UDPLITE); - -+#ifdef CONFIG_GRKERNSEC_BLACKHOLE -+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK)) -+#endif - icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); - - kfree_skb(skb); -@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, - if (!sock_owned_by_user(sk)) - udpv6_queue_rcv_skb(sk, skb); - else if (sk_add_backlog(sk, skb)) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - bh_unlock_sock(sk); - sock_put(sk); - goto discard; -@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket - 0, 0L, 0, - sock_i_uid(sp), 0, - sock_i_ino(sp), -- atomic_read(&sp->sk_refcnt), sp, -- atomic_read(&sp->sk_drops)); -+ atomic_read(&sp->sk_refcnt), -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL, -+#else -+ sp, -+#endif -+ atomic_read_unchecked(&sp->sk_drops)); - } - - int udp6_seq_show(struct seq_file *seq, void *v) -diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c -index b3cc8b3..baa02d0 100644 ---- a/net/irda/ircomm/ircomm_tty.c -+++ b/net/irda/ircomm/ircomm_tty.c -@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, - add_wait_queue(&self->open_wait, &wait); - - IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", -- __FILE__,__LINE__, tty->driver->name, self->open_count ); -+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) ); - - /* As far as I can see, we protect open_count - Jean II */ - spin_lock_irqsave(&self->spinlock, flags); - if (!tty_hung_up_p(filp)) { - extra_count = 1; -- self->open_count--; -+ local_dec(&self->open_count); - } - spin_unlock_irqrestore(&self->spinlock, flags); -- self->blocked_open++; -+ local_inc(&self->blocked_open); - - while (1) { - if (tty->termios->c_cflag & CBAUD) { -@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, - } - - IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", -- __FILE__,__LINE__, tty->driver->name, self->open_count ); -+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) ); - - schedule(); - } -@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, - if (extra_count) { - /* ++ is not atomic, so this should be protected - Jean II */ - spin_lock_irqsave(&self->spinlock, flags); -- self->open_count++; -+ local_inc(&self->open_count); - spin_unlock_irqrestore(&self->spinlock, flags); - } -- self->blocked_open--; -+ local_dec(&self->blocked_open); - - IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", -- __FILE__,__LINE__, tty->driver->name, self->open_count); -+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count)); - - if (!retval) - self->flags |= ASYNC_NORMAL_ACTIVE; -@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) - } - /* ++ is not atomic, so this should be protected - Jean II */ - spin_lock_irqsave(&self->spinlock, flags); -- self->open_count++; -+ local_inc(&self->open_count); - - tty->driver_data = self; - self->tty = tty; - spin_unlock_irqrestore(&self->spinlock, flags); - - IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, -- self->line, self->open_count); -+ self->line, local_read(&self->open_count)); - - /* Not really used by us, but lets do it anyway */ - self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0; -@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) - return; - } - -- if ((tty->count == 1) && (self->open_count != 1)) { -+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) { - /* - * Uh, oh. tty->count is 1, which means that the tty - * structure will be freed. state->count should always -@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) - */ - IRDA_DEBUG(0, "%s(), bad serial port count; " - "tty->count is 1, state->count is %d\n", __func__ , -- self->open_count); -- self->open_count = 1; -+ local_read(&self->open_count)); -+ local_set(&self->open_count, 1); - } - -- if (--self->open_count < 0) { -+ if (local_dec_return(&self->open_count) < 0) { - IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n", -- __func__, self->line, self->open_count); -- self->open_count = 0; -+ __func__, self->line, local_read(&self->open_count)); -+ local_set(&self->open_count, 0); - } -- if (self->open_count) { -+ if (local_read(&self->open_count)) { - spin_unlock_irqrestore(&self->spinlock, flags); - - IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ ); -@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) - tty->closing = 0; - self->tty = NULL; - -- if (self->blocked_open) { -+ if (local_read(&self->blocked_open)) { - if (self->close_delay) - schedule_timeout_interruptible(self->close_delay); - wake_up_interruptible(&self->open_wait); -@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty) - spin_lock_irqsave(&self->spinlock, flags); - self->flags &= ~ASYNC_NORMAL_ACTIVE; - self->tty = NULL; -- self->open_count = 0; -+ local_set(&self->open_count, 0); - spin_unlock_irqrestore(&self->spinlock, flags); - - wake_up_interruptible(&self->open_wait); -@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m) - seq_putc(m, '\n'); - - seq_printf(m, "Role: %s\n", self->client ? "client" : "server"); -- seq_printf(m, "Open count: %d\n", self->open_count); -+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count)); - seq_printf(m, "Max data size: %d\n", self->max_data_size); - seq_printf(m, "Max header size: %d\n", self->max_header_size); - -diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c -index e2013e4..edfc1e3 100644 ---- a/net/iucv/af_iucv.c -+++ b/net/iucv/af_iucv.c -@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct sock *sk) - - write_lock_bh(&iucv_sk_list.lock); - -- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); -+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name)); - while (__iucv_get_sock_by_name(name)) { - sprintf(name, "%08x", -- atomic_inc_return(&iucv_sk_list.autobind_name)); -+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name)); - } - - write_unlock_bh(&iucv_sk_list.lock); -diff --git a/net/key/af_key.c b/net/key/af_key.c -index 1e733e9..c84de2f 100644 ---- a/net/key/af_key.c -+++ b/net/key/af_key.c -@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb, - struct xfrm_migrate m[XFRM_MAX_DEPTH]; - struct xfrm_kmaddress k; - -+ pax_track_stack(); -+ - if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1], - ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) || - !ext_hdrs[SADB_X_EXT_POLICY - 1]) { -@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc - static u32 get_acqseq(void) - { - u32 res; -- static atomic_t acqseq; -+ static atomic_unchecked_t acqseq; - - do { -- res = atomic_inc_return(&acqseq); -+ res = atomic_inc_return_unchecked(&acqseq); - } while (!res); - return res; - } -diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c -index 956b7e4..f01d328 100644 ---- a/net/lapb/lapb_iface.c -+++ b/net/lapb/lapb_iface.c -@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks - goto out; - - lapb->dev = dev; -- lapb->callbacks = *callbacks; -+ lapb->callbacks = callbacks; - - __lapb_insert_cb(lapb); - -@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb) - - void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) - { -- if (lapb->callbacks.connect_confirmation) -- lapb->callbacks.connect_confirmation(lapb->dev, reason); -+ if (lapb->callbacks->connect_confirmation) -+ lapb->callbacks->connect_confirmation(lapb->dev, reason); - } - - void lapb_connect_indication(struct lapb_cb *lapb, int reason) - { -- if (lapb->callbacks.connect_indication) -- lapb->callbacks.connect_indication(lapb->dev, reason); -+ if (lapb->callbacks->connect_indication) -+ lapb->callbacks->connect_indication(lapb->dev, reason); - } - - void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason) - { -- if (lapb->callbacks.disconnect_confirmation) -- lapb->callbacks.disconnect_confirmation(lapb->dev, reason); -+ if (lapb->callbacks->disconnect_confirmation) -+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason); - } - - void lapb_disconnect_indication(struct lapb_cb *lapb, int reason) - { -- if (lapb->callbacks.disconnect_indication) -- lapb->callbacks.disconnect_indication(lapb->dev, reason); -+ if (lapb->callbacks->disconnect_indication) -+ lapb->callbacks->disconnect_indication(lapb->dev, reason); - } - - int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb) - { -- if (lapb->callbacks.data_indication) -- return lapb->callbacks.data_indication(lapb->dev, skb); -+ if (lapb->callbacks->data_indication) -+ return lapb->callbacks->data_indication(lapb->dev, skb); - - kfree_skb(skb); - return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */ -@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) - { - int used = 0; - -- if (lapb->callbacks.data_transmit) { -- lapb->callbacks.data_transmit(lapb->dev, skb); -+ if (lapb->callbacks->data_transmit) { -+ lapb->callbacks->data_transmit(lapb->dev, skb); - used = 1; - } - -diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c -index a01d213..6a1f1ab 100644 ---- a/net/mac80211/debugfs_sta.c -+++ b/net/mac80211/debugfs_sta.c -@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, - struct tid_ampdu_rx *tid_rx; - struct tid_ampdu_tx *tid_tx; - -+ pax_track_stack(); -+ - rcu_read_lock(); - - p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", -@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, - struct sta_info *sta = file->private_data; - struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; - -+ pax_track_stack(); -+ - p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", - htc->ht_supported ? "" : "not "); - if (htc->ht_supported) { -diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h -index 9fab144..7f0fc14 100644 ---- a/net/mac80211/ieee80211_i.h -+++ b/net/mac80211/ieee80211_i.h -@@ -27,6 +27,7 @@ - #include <net/ieee80211_radiotap.h> - #include <net/cfg80211.h> - #include <net/mac80211.h> -+#include <asm/local.h> - #include "key.h" - #include "sta_info.h" - -@@ -754,7 +755,7 @@ struct ieee80211_local { - /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ - spinlock_t queue_stop_reason_lock; - -- int open_count; -+ local_t open_count; - int monitors, cooked_mntrs; - /* number of interfaces with corresponding FIF_ flags */ - int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll, -diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c -index 556e7e6..120dcaf 100644 ---- a/net/mac80211/iface.c -+++ b/net/mac80211/iface.c -@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) - break; - } - -- if (local->open_count == 0) { -+ if (local_read(&local->open_count) == 0) { - res = drv_start(local); - if (res) - goto err_del_bss; -@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) - memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); - - if (!is_valid_ether_addr(dev->dev_addr)) { -- if (!local->open_count) -+ if (!local_read(&local->open_count)) - drv_stop(local); - return -EADDRNOTAVAIL; - } -@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) - mutex_unlock(&local->mtx); - - if (coming_up) -- local->open_count++; -+ local_inc(&local->open_count); - - if (hw_reconf_flags) { - ieee80211_hw_config(local, hw_reconf_flags); -@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) - err_del_interface: - drv_remove_interface(local, &sdata->vif); - err_stop: -- if (!local->open_count) -+ if (!local_read(&local->open_count)) - drv_stop(local); - err_del_bss: - sdata->bss = NULL; -@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, - } - - if (going_down) -- local->open_count--; -+ local_dec(&local->open_count); - - switch (sdata->vif.type) { - case NL80211_IFTYPE_AP_VLAN: -@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, - - ieee80211_recalc_ps(local, -1); - -- if (local->open_count == 0) { -+ if (local_read(&local->open_count) == 0) { - if (local->ops->napi_poll) - napi_disable(&local->napi); - ieee80211_clear_tx_pending(local); -diff --git a/net/mac80211/main.c b/net/mac80211/main.c -index 3d90dad..36884d5 100644 ---- a/net/mac80211/main.c -+++ b/net/mac80211/main.c -@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) - local->hw.conf.power_level = power; - } - -- if (changed && local->open_count) { -+ if (changed && local_read(&local->open_count)) { - ret = drv_config(local, changed); - /* - * Goal: -diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c -index 0f48368..d48e688 100644 ---- a/net/mac80211/mlme.c -+++ b/net/mac80211/mlme.c -@@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, - bool have_higher_than_11mbit = false; - u16 ap_ht_cap_flags; - -+ pax_track_stack(); -+ - /* AssocResp and ReassocResp have identical structure */ - - aid = le16_to_cpu(mgmt->u.assoc_resp.aid); -diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c -index 6326d34..7225f61 100644 ---- a/net/mac80211/pm.c -+++ b/net/mac80211/pm.c -@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) - struct ieee80211_sub_if_data *sdata; - struct sta_info *sta; - -- if (!local->open_count) -+ if (!local_read(&local->open_count)) - goto suspend; - - ieee80211_scan_cancel(local); -@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) - cancel_work_sync(&local->dynamic_ps_enable_work); - del_timer_sync(&local->dynamic_ps_timer); - -- local->wowlan = wowlan && local->open_count; -+ local->wowlan = wowlan && local_read(&local->open_count); - if (local->wowlan) { - int err = drv_suspend(local, wowlan); - if (err < 0) { -@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) - } - - /* stop hardware - this must stop RX */ -- if (local->open_count) -+ if (local_read(&local->open_count)) - ieee80211_stop_device(local); - - suspend: -diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c -index 3d5a2cb..b17ad48 100644 ---- a/net/mac80211/rate.c -+++ b/net/mac80211/rate.c -@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, - - ASSERT_RTNL(); - -- if (local->open_count) -+ if (local_read(&local->open_count)) - return -EBUSY; - - if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { -diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c -index 4851e9e..d860e05 100644 ---- a/net/mac80211/rc80211_pid_debugfs.c -+++ b/net/mac80211/rc80211_pid_debugfs.c -@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf, - - spin_unlock_irqrestore(&events->lock, status); - -- if (copy_to_user(buf, pb, p)) -+ if (p > sizeof(pb) || copy_to_user(buf, pb, p)) - return -EFAULT; - - return p; -diff --git a/net/mac80211/util.c b/net/mac80211/util.c -index fd031e8..84fbfcf 100644 ---- a/net/mac80211/util.c -+++ b/net/mac80211/util.c -@@ -1170,7 +1170,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) - drv_set_coverage_class(local, hw->wiphy->coverage_class); - - /* everything else happens only if HW was up & running */ -- if (!local->open_count) -+ if (!local_read(&local->open_count)) - goto wake_up; - - /* -diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig -index 32bff6d..d0cf986 100644 ---- a/net/netfilter/Kconfig -+++ b/net/netfilter/Kconfig -@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP - - To compile it as a module, choose M here. If unsure, say N. - -+config NETFILTER_XT_MATCH_GRADM -+ tristate '"gradm" match support' -+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED -+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC -+ ---help--- -+ The gradm match allows to match on grsecurity RBAC being enabled. -+ It is useful when iptables rules are applied early on bootup to -+ prevent connections to the machine (except from a trusted host) -+ while the RBAC system is disabled. -+ - config NETFILTER_XT_MATCH_HASHLIMIT - tristate '"hashlimit" match support' - depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) -diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile -index 1a02853..5d8c22e 100644 ---- a/net/netfilter/Makefile -+++ b/net/netfilter/Makefile -@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o - obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o - obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o - obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o -+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o - obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o - obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o - obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o -diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c -index 12571fb..fb73976 100644 ---- a/net/netfilter/ipvs/ip_vs_conn.c -+++ b/net/netfilter/ipvs/ip_vs_conn.c -@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) - /* Increase the refcnt counter of the dest */ - atomic_inc(&dest->refcnt); - -- conn_flags = atomic_read(&dest->conn_flags); -+ conn_flags = atomic_read_unchecked(&dest->conn_flags); - if (cp->protocol != IPPROTO_UDP) - conn_flags &= ~IP_VS_CONN_F_ONE_PACKET; - /* Bind with the destination and its corresponding transmitter */ -@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, - atomic_set(&cp->refcnt, 1); - - atomic_set(&cp->n_control, 0); -- atomic_set(&cp->in_pkts, 0); -+ atomic_set_unchecked(&cp->in_pkts, 0); - - atomic_inc(&ipvs->conn_count); - if (flags & IP_VS_CONN_F_NO_CPORT) -@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp) - - /* Don't drop the entry if its number of incoming packets is not - located in [0, 8] */ -- i = atomic_read(&cp->in_pkts); -+ i = atomic_read_unchecked(&cp->in_pkts); - if (i > 8 || i < 0) return 0; - - if (!todrop_rate[i]) return 0; -diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c -index 4f77bb1..5d0bc26 100644 ---- a/net/netfilter/ipvs/ip_vs_core.c -+++ b/net/netfilter/ipvs/ip_vs_core.c -@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, - ret = cp->packet_xmit(skb, cp, pd->pp); - /* do not touch skb anymore */ - -- atomic_inc(&cp->in_pkts); -+ atomic_inc_unchecked(&cp->in_pkts); - ip_vs_conn_put(cp); - return ret; - } -@@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) - if (cp->flags & IP_VS_CONN_F_ONE_PACKET) - pkts = sysctl_sync_threshold(ipvs); - else -- pkts = atomic_add_return(1, &cp->in_pkts); -+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts); - - if ((ipvs->sync_state & IP_VS_STATE_MASTER) && - cp->protocol == IPPROTO_SCTP) { -diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c -index e3be48b..d658c8c 100644 ---- a/net/netfilter/ipvs/ip_vs_ctl.c -+++ b/net/netfilter/ipvs/ip_vs_ctl.c -@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, - ip_vs_rs_hash(ipvs, dest); - write_unlock_bh(&ipvs->rs_lock); - } -- atomic_set(&dest->conn_flags, conn_flags); -+ atomic_set_unchecked(&dest->conn_flags, conn_flags); - - /* bind the service */ - if (!dest->svc) { -@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) - " %-7s %-6d %-10d %-10d\n", - &dest->addr.in6, - ntohs(dest->port), -- ip_vs_fwd_name(atomic_read(&dest->conn_flags)), -+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)), - atomic_read(&dest->weight), - atomic_read(&dest->activeconns), - atomic_read(&dest->inactconns)); -@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) - "%-7s %-6d %-10d %-10d\n", - ntohl(dest->addr.ip), - ntohs(dest->port), -- ip_vs_fwd_name(atomic_read(&dest->conn_flags)), -+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)), - atomic_read(&dest->weight), - atomic_read(&dest->activeconns), - atomic_read(&dest->inactconns)); -@@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) - struct ip_vs_dest_user_kern udest; - struct netns_ipvs *ipvs = net_ipvs(net); - -+ pax_track_stack(); -+ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - -@@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get, - - entry.addr = dest->addr.ip; - entry.port = dest->port; -- entry.conn_flags = atomic_read(&dest->conn_flags); -+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags); - entry.weight = atomic_read(&dest->weight); - entry.u_threshold = dest->u_threshold; - entry.l_threshold = dest->l_threshold; -@@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) - NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); - - NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, -- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); -+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); -diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c -index 3cdd479..116afa8 100644 ---- a/net/netfilter/ipvs/ip_vs_sync.c -+++ b/net/netfilter/ipvs/ip_vs_sync.c -@@ -649,7 +649,7 @@ control: - * i.e only increment in_pkts for Templates. - */ - if (cp->flags & IP_VS_CONN_F_TEMPLATE) { -- int pkts = atomic_add_return(1, &cp->in_pkts); -+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts); - - if (pkts % sysctl_sync_period(ipvs) != 1) - return; -@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, - - if (opt) - memcpy(&cp->in_seq, opt, sizeof(*opt)); -- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); -+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs)); - cp->state = state; - cp->old_state = cp->state; - /* -diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c -index ee319a4..8a285ee 100644 ---- a/net/netfilter/ipvs/ip_vs_xmit.c -+++ b/net/netfilter/ipvs/ip_vs_xmit.c -@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, - else - rc = NF_ACCEPT; - /* do not touch skb anymore */ -- atomic_inc(&cp->in_pkts); -+ atomic_inc_unchecked(&cp->in_pkts); - goto out; - } - -@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, - else - rc = NF_ACCEPT; - /* do not touch skb anymore */ -- atomic_inc(&cp->in_pkts); -+ atomic_inc_unchecked(&cp->in_pkts); - goto out; - } - -diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c -index 2d8158a..5dca296 100644 ---- a/net/netfilter/nfnetlink_log.c -+++ b/net/netfilter/nfnetlink_log.c -@@ -70,7 +70,7 @@ struct nfulnl_instance { - }; - - static DEFINE_SPINLOCK(instances_lock); --static atomic_t global_seq; -+static atomic_unchecked_t global_seq; - - #define INSTANCE_BUCKETS 16 - static struct hlist_head instance_table[INSTANCE_BUCKETS]; -@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_instance *inst, - /* global sequence number */ - if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) - NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, -- htonl(atomic_inc_return(&global_seq))); -+ htonl(atomic_inc_return_unchecked(&global_seq))); - - if (data_len) { - struct nlattr *nla; -diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c -new file mode 100644 -index 0000000..6905327 ---- /dev/null -+++ b/net/netfilter/xt_gradm.c -@@ -0,0 +1,51 @@ -+/* -+ * gradm match for netfilter -+ * Copyright © Zbigniew Krzystolik, 2010 -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License; either version -+ * 2 or 3 as published by the Free Software Foundation. -+ */ -+#include <linux/module.h> -+#include <linux/moduleparam.h> -+#include <linux/skbuff.h> -+#include <linux/netfilter/x_tables.h> -+#include <linux/grsecurity.h> -+#include <linux/netfilter/xt_gradm.h> -+ -+static bool -+gradm_mt(const struct sk_buff *skb, struct xt_action_param *par) -+{ -+ const struct xt_gradm_mtinfo *info = par->matchinfo; -+ bool retval = false; -+ if (gr_acl_is_enabled()) -+ retval = true; -+ return retval ^ info->invflags; -+} -+ -+static struct xt_match gradm_mt_reg __read_mostly = { -+ .name = "gradm", -+ .revision = 0, -+ .family = NFPROTO_UNSPEC, -+ .match = gradm_mt, -+ .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)), -+ .me = THIS_MODULE, -+}; -+ -+static int __init gradm_mt_init(void) -+{ -+ return xt_register_match(&gradm_mt_reg); -+} -+ -+static void __exit gradm_mt_exit(void) -+{ -+ xt_unregister_match(&gradm_mt_reg); -+} -+ -+module_init(gradm_mt_init); -+module_exit(gradm_mt_exit); -+MODULE_AUTHOR("Zbigniew Krzystolik zbyniu@destrukcja.pl"); -+MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match"); -+MODULE_LICENSE("GPL"); -+MODULE_ALIAS("ipt_gradm"); -+MODULE_ALIAS("ip6t_gradm"); -diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c -index 42ecb71..8d687c0 100644 ---- a/net/netfilter/xt_statistic.c -+++ b/net/netfilter/xt_statistic.c -@@ -18,7 +18,7 @@ - #include <linux/netfilter/x_tables.h> - - struct xt_statistic_priv { -- atomic_t count; -+ atomic_unchecked_t count; - } ____cacheline_aligned_in_smp; - - MODULE_LICENSE("GPL"); -@@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) - break; - case XT_STATISTIC_MODE_NTH: - do { -- oval = atomic_read(&info->master->count); -+ oval = atomic_read_unchecked(&info->master->count); - nval = (oval == info->u.nth.every) ? 0 : oval + 1; -- } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval); -+ } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval); - if (nval == 0) - ret = !ret; - break; -@@ -63,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par) - info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); - if (info->master == NULL) - return -ENOMEM; -- atomic_set(&info->master->count, info->u.nth.count); -+ atomic_set_unchecked(&info->master->count, info->u.nth.count); - - return 0; - } -diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c -index 0a4db02..604f748 100644 ---- a/net/netlink/af_netlink.c -+++ b/net/netlink/af_netlink.c -@@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk) - sk->sk_error_report(sk); - } - } -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - } - - static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) -@@ -2000,7 +2000,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v) - sk_wmem_alloc_get(s), - nlk->cb, - atomic_read(&s->sk_refcnt), -- atomic_read(&s->sk_drops), -+ atomic_read_unchecked(&s->sk_drops), - sock_i_ino(s) - ); - -diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c -index 732152f..60bb09e 100644 ---- a/net/netrom/af_netrom.c -+++ b/net/netrom/af_netrom.c -@@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr, - struct sock *sk = sock->sk; - struct nr_sock *nr = nr_sk(sk); - -+ memset(sax, 0, sizeof(*sax)); - lock_sock(sk); - if (peer != 0) { - if (sk->sk_state != TCP_ESTABLISHED) { -@@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr, - *uaddr_len = sizeof(struct full_sockaddr_ax25); - } else { - sax->fsa_ax25.sax25_family = AF_NETROM; -- sax->fsa_ax25.sax25_ndigis = 0; - sax->fsa_ax25.sax25_call = nr->source_addr; - *uaddr_len = sizeof(struct sockaddr_ax25); - } -diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c -index fabb4fa..e146b73 100644 ---- a/net/packet/af_packet.c -+++ b/net/packet/af_packet.c -@@ -954,7 +954,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, - - spin_lock(&sk->sk_receive_queue.lock); - po->stats.tp_packets++; -- skb->dropcount = atomic_read(&sk->sk_drops); -+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops); - __skb_queue_tail(&sk->sk_receive_queue, skb); - spin_unlock(&sk->sk_receive_queue.lock); - sk->sk_data_ready(sk, skb->len); -@@ -963,7 +963,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, - drop_n_acct: - spin_lock(&sk->sk_receive_queue.lock); - po->stats.tp_drops++; -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - spin_unlock(&sk->sk_receive_queue.lock); - - drop_n_restore: -@@ -2479,7 +2479,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, - case PACKET_HDRLEN: - if (len > sizeof(int)) - len = sizeof(int); -- if (copy_from_user(&val, optval, len)) -+ if (len > sizeof(val) || copy_from_user(&val, optval, len)) - return -EFAULT; - switch (val) { - case TPACKET_V1: -@@ -2526,7 +2526,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, - - if (put_user(len, optlen)) - return -EFAULT; -- if (copy_to_user(optval, data, len)) -+ if (len > sizeof(st) || copy_to_user(optval, data, len)) - return -EFAULT; - return 0; - } -diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c -index c6fffd9..a7ffa0c 100644 ---- a/net/phonet/af_phonet.c -+++ b/net/phonet/af_phonet.c -@@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol) - { - struct phonet_protocol *pp; - -- if (protocol >= PHONET_NPROTO) -+ if (protocol < 0 || protocol >= PHONET_NPROTO) - return NULL; - - rcu_read_lock(); -@@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol, - { - int err = 0; - -- if (protocol >= PHONET_NPROTO) -+ if (protocol < 0 || protocol >= PHONET_NPROTO) - return -EINVAL; - - err = proto_register(pp->prot, 1); -diff --git a/net/phonet/pep.c b/net/phonet/pep.c -index f17fd84..edffce8 100644 ---- a/net/phonet/pep.c -+++ b/net/phonet/pep.c -@@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) - - case PNS_PEP_CTRL_REQ: - if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - break; - } - __skb_pull(skb, 4); -@@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) - } - - if (pn->rx_credits == 0) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - err = -ENOBUFS; - break; - } -@@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) - } - - if (pn->rx_credits == 0) { -- atomic_inc(&sk->sk_drops); -+ atomic_inc_unchecked(&sk->sk_drops); - err = NET_RX_DROP; - break; - } -diff --git a/net/phonet/socket.c b/net/phonet/socket.c -index ab07711..9d4ac5d 100644 ---- a/net/phonet/socket.c -+++ b/net/phonet/socket.c -@@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v) - pn->resource, sk->sk_state, - sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), - sock_i_uid(sk), sock_i_ino(sk), -- atomic_read(&sk->sk_refcnt), sk, -- atomic_read(&sk->sk_drops), &len); -+ atomic_read(&sk->sk_refcnt), -+#ifdef CONFIG_GRKERNSEC_HIDESYM -+ NULL, -+#else -+ sk, -+#endif -+ atomic_read_unchecked(&sk->sk_drops), &len); - } - seq_printf(seq, "%*s\n", 127 - len, ""); - return 0; -diff --git a/net/rds/cong.c b/net/rds/cong.c -index 6daaa49..fbf6af5 100644 ---- a/net/rds/cong.c -+++ b/net/rds/cong.c -@@ -77,7 +77,7 @@ - * finds that the saved generation number is smaller than the global generation - * number, it wakes up the process. - */ --static atomic_t rds_cong_generation = ATOMIC_INIT(0); -+static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0); - - /* - * Congestion monitoring -@@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask) - rdsdebug("waking map %p for %pI4\n", - map, &map->m_addr); - rds_stats_inc(s_cong_update_received); -- atomic_inc(&rds_cong_generation); -+ atomic_inc_unchecked(&rds_cong_generation); - if (waitqueue_active(&map->m_waitq)) - wake_up(&map->m_waitq); - if (waitqueue_active(&rds_poll_waitq)) -@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated); - - int rds_cong_updated_since(unsigned long *recent) - { -- unsigned long gen = atomic_read(&rds_cong_generation); -+ unsigned long gen = atomic_read_unchecked(&rds_cong_generation); - - if (likely(*recent == gen)) - return 0; -diff --git a/net/rds/ib.h b/net/rds/ib.h -index edfaaaf..8c89879 100644 ---- a/net/rds/ib.h -+++ b/net/rds/ib.h -@@ -128,7 +128,7 @@ struct rds_ib_connection { - /* sending acks */ - unsigned long i_ack_flags; - #ifdef KERNEL_HAS_ATOMIC64 -- atomic64_t i_ack_next; /* next ACK to send */ -+ atomic64_unchecked_t i_ack_next; /* next ACK to send */ - #else - spinlock_t i_ack_lock; /* protect i_ack_next */ - u64 i_ack_next; /* next ACK to send */ -diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c -index cd67026..0b9a54a 100644 ---- a/net/rds/ib_cm.c -+++ b/net/rds/ib_cm.c -@@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn) - /* Clear the ACK state */ - clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); - #ifdef KERNEL_HAS_ATOMIC64 -- atomic64_set(&ic->i_ack_next, 0); -+ atomic64_set_unchecked(&ic->i_ack_next, 0); - #else - ic->i_ack_next = 0; - #endif -diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c -index e29e0ca..fa3a6a3 100644 ---- a/net/rds/ib_recv.c -+++ b/net/rds/ib_recv.c -@@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic) - static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, - int ack_required) - { -- atomic64_set(&ic->i_ack_next, seq); -+ atomic64_set_unchecked(&ic->i_ack_next, seq); - if (ack_required) { - smp_mb__before_clear_bit(); - set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); -@@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic) - clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); - smp_mb__after_clear_bit(); - -- return atomic64_read(&ic->i_ack_next); -+ return atomic64_read_unchecked(&ic->i_ack_next); - } - #endif - -diff --git a/net/rds/iw.h b/net/rds/iw.h -index 04ce3b1..48119a6 100644 ---- a/net/rds/iw.h -+++ b/net/rds/iw.h -@@ -134,7 +134,7 @@ struct rds_iw_connection { - /* sending acks */ - unsigned long i_ack_flags; - #ifdef KERNEL_HAS_ATOMIC64 -- atomic64_t i_ack_next; /* next ACK to send */ -+ atomic64_unchecked_t i_ack_next; /* next ACK to send */ - #else - spinlock_t i_ack_lock; /* protect i_ack_next */ - u64 i_ack_next; /* next ACK to send */ -diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c -index 9556d28..f046d0e 100644 ---- a/net/rds/iw_cm.c -+++ b/net/rds/iw_cm.c -@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn) - /* Clear the ACK state */ - clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); - #ifdef KERNEL_HAS_ATOMIC64 -- atomic64_set(&ic->i_ack_next, 0); -+ atomic64_set_unchecked(&ic->i_ack_next, 0); - #else - ic->i_ack_next = 0; - #endif -diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c -index 4e1de17..d121708 100644 ---- a/net/rds/iw_rdma.c -+++ b/net/rds/iw_rdma.c -@@ -184,6 +184,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i - struct rdma_cm_id *pcm_id; - int rc; - -+ pax_track_stack(); -+ - src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; - dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; - -diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c -index 5e57347..3916042 100644 ---- a/net/rds/iw_recv.c -+++ b/net/rds/iw_recv.c -@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic) - static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, - int ack_required) - { -- atomic64_set(&ic->i_ack_next, seq); -+ atomic64_set_unchecked(&ic->i_ack_next, seq); - if (ack_required) { - smp_mb__before_clear_bit(); - set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); -@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic) - clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); - smp_mb__after_clear_bit(); - -- return atomic64_read(&ic->i_ack_next); -+ return atomic64_read_unchecked(&ic->i_ack_next); - } - #endif - -diff --git a/net/rds/tcp.c b/net/rds/tcp.c -index 8e0a320..ee8e38f 100644 ---- a/net/rds/tcp.c -+++ b/net/rds/tcp.c -@@ -58,7 +58,7 @@ void rds_tcp_nonagle(struct socket *sock) - int val = 1; - - set_fs(KERNEL_DS); -- sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val, -+ sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val, - sizeof(val)); - set_fs(oldfs); - } -diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c -index 1b4fd68..2234175 100644 ---- a/net/rds/tcp_send.c -+++ b/net/rds/tcp_send.c -@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val) - - oldfs = get_fs(); - set_fs(KERNEL_DS); -- sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val, -+ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val, - sizeof(val)); - set_fs(oldfs); - } -diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c -index 74c064c..fdec26f 100644 ---- a/net/rxrpc/af_rxrpc.c -+++ b/net/rxrpc/af_rxrpc.c -@@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops; - __be32 rxrpc_epoch; - - /* current debugging ID */ --atomic_t rxrpc_debug_id; -+atomic_unchecked_t rxrpc_debug_id; - - /* count of skbs currently in use */ - atomic_t rxrpc_n_skbs; -diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c -index f99cfce..3682692 100644 ---- a/net/rxrpc/ar-ack.c -+++ b/net/rxrpc/ar-ack.c -@@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call) - - _enter("{%d,%d,%d,%d},", - call->acks_hard, call->acks_unacked, -- atomic_read(&call->sequence), -+ atomic_read_unchecked(&call->sequence), - CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); - - stop = 0; -@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call) - - /* each Tx packet has a new serial number */ - sp->hdr.serial = -- htonl(atomic_inc_return(&call->conn->serial)); -+ htonl(atomic_inc_return_unchecked(&call->conn->serial)); - - hdr = (struct rxrpc_header *) txb->head; - hdr->serial = sp->hdr.serial; -@@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) - */ - static void rxrpc_clear_tx_window(struct rxrpc_call *call) - { -- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence)); -+ rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence)); - } - - /* -@@ -629,7 +629,7 @@ process_further: - - latest = ntohl(sp->hdr.serial); - hard = ntohl(ack.firstPacket); -- tx = atomic_read(&call->sequence); -+ tx = atomic_read_unchecked(&call->sequence); - - _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", - latest, -@@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_struct *work) - u32 abort_code = RX_PROTOCOL_ERROR; - u8 *acks = NULL; - -+ pax_track_stack(); -+ - //printk("\n--------------------\n"); - _enter("{%d,%s,%lx} [%lu]", - call->debug_id, rxrpc_call_states[call->state], call->events, -@@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_struct *work) - goto maybe_reschedule; - - send_ACK_with_skew: -- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - -+ ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) - - ntohl(ack.serial)); - send_ACK: - mtu = call->conn->trans->peer->if_mtu; -@@ -1173,7 +1175,7 @@ send_ACK: - ackinfo.rxMTU = htonl(5692); - ackinfo.jumbo_max = htonl(4); - -- hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); -+ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial)); - _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", - ntohl(hdr.serial), - ntohs(ack.maxSkew), -@@ -1191,7 +1193,7 @@ send_ACK: - send_message: - _debug("send message"); - -- hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); -+ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial)); - _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial)); - send_message_2: - -diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c -index bf656c2..48f9d27 100644 ---- a/net/rxrpc/ar-call.c -+++ b/net/rxrpc/ar-call.c -@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) - spin_lock_init(&call->lock); - rwlock_init(&call->state_lock); - atomic_set(&call->usage, 1); -- call->debug_id = atomic_inc_return(&rxrpc_debug_id); -+ call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); - call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; - - memset(&call->sock_node, 0xed, sizeof(call->sock_node)); -diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c -index 4106ca9..a338d7a 100644 ---- a/net/rxrpc/ar-connection.c -+++ b/net/rxrpc/ar-connection.c -@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) - rwlock_init(&conn->lock); - spin_lock_init(&conn->state_lock); - atomic_set(&conn->usage, 1); -- conn->debug_id = atomic_inc_return(&rxrpc_debug_id); -+ conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); - conn->avail_calls = RXRPC_MAXCALLS; - conn->size_align = 4; - conn->header_size = sizeof(struct rxrpc_header); -diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c -index e7ed43a..6afa140 100644 ---- a/net/rxrpc/ar-connevent.c -+++ b/net/rxrpc/ar-connevent.c -@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, - - len = iov[0].iov_len + iov[1].iov_len; - -- hdr.serial = htonl(atomic_inc_return(&conn->serial)); -+ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); - _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code); - - ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); -diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c -index 1a2b0633..e8d1382 100644 ---- a/net/rxrpc/ar-input.c -+++ b/net/rxrpc/ar-input.c -@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) - /* track the latest serial number on this connection for ACK packet - * information */ - serial = ntohl(sp->hdr.serial); -- hi_serial = atomic_read(&call->conn->hi_serial); -+ hi_serial = atomic_read_unchecked(&call->conn->hi_serial); - while (serial > hi_serial) -- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial, -+ hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial, - serial); - - /* request ACK generation for any ACK or DATA packet that requests -diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h -index 8e22bd3..f66d1c0 100644 ---- a/net/rxrpc/ar-internal.h -+++ b/net/rxrpc/ar-internal.h -@@ -272,8 +272,8 @@ struct rxrpc_connection { - int error; /* error code for local abort */ - int debug_id; /* debug ID for printks */ - unsigned call_counter; /* call ID counter */ -- atomic_t serial; /* packet serial number counter */ -- atomic_t hi_serial; /* highest serial number received */ -+ atomic_unchecked_t serial; /* packet serial number counter */ -+ atomic_unchecked_t hi_serial; /* highest serial number received */ - u8 avail_calls; /* number of calls available */ - u8 size_align; /* data size alignment (for security) */ - u8 header_size; /* rxrpc + security header size */ -@@ -346,7 +346,7 @@ struct rxrpc_call { - spinlock_t lock; - rwlock_t state_lock; /* lock for state transition */ - atomic_t usage; -- atomic_t sequence; /* Tx data packet sequence counter */ -+ atomic_unchecked_t sequence; /* Tx data packet sequence counter */ - u32 abort_code; /* local/remote abort code */ - enum { /* current state of call */ - RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ -@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code) - */ - extern atomic_t rxrpc_n_skbs; - extern __be32 rxrpc_epoch; --extern atomic_t rxrpc_debug_id; -+extern atomic_unchecked_t rxrpc_debug_id; - extern struct workqueue_struct *rxrpc_workqueue; - - /* -diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c -index 87f7135..74d3703 100644 ---- a/net/rxrpc/ar-local.c -+++ b/net/rxrpc/ar-local.c -@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx) - spin_lock_init(&local->lock); - rwlock_init(&local->services_lock); - atomic_set(&local->usage, 1); -- local->debug_id = atomic_inc_return(&rxrpc_debug_id); -+ local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); - memcpy(&local->srx, srx, sizeof(*srx)); - } - -diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c -index 5f22e26..e5bd20f 100644 ---- a/net/rxrpc/ar-output.c -+++ b/net/rxrpc/ar-output.c -@@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb *iocb, - sp->hdr.cid = call->cid; - sp->hdr.callNumber = call->call_id; - sp->hdr.seq = -- htonl(atomic_inc_return(&call->sequence)); -+ htonl(atomic_inc_return_unchecked(&call->sequence)); - sp->hdr.serial = -- htonl(atomic_inc_return(&conn->serial)); -+ htonl(atomic_inc_return_unchecked(&conn->serial)); - sp->hdr.type = RXRPC_PACKET_TYPE_DATA; - sp->hdr.userStatus = 0; - sp->hdr.securityIndex = conn->security_ix; -diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c -index 2754f09..b20e38f 100644 ---- a/net/rxrpc/ar-peer.c -+++ b/net/rxrpc/ar-peer.c -@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx, - INIT_LIST_HEAD(&peer->error_targets); - spin_lock_init(&peer->lock); - atomic_set(&peer->usage, 1); -- peer->debug_id = atomic_inc_return(&rxrpc_debug_id); -+ peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); - memcpy(&peer->srx, srx, sizeof(*srx)); - - rxrpc_assess_MTU_size(peer); -diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c -index 38047f7..9f48511 100644 ---- a/net/rxrpc/ar-proc.c -+++ b/net/rxrpc/ar-proc.c -@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) - atomic_read(&conn->usage), - rxrpc_conn_states[conn->state], - key_serial(conn->key), -- atomic_read(&conn->serial), -- atomic_read(&conn->hi_serial)); -+ atomic_read_unchecked(&conn->serial), -+ atomic_read_unchecked(&conn->hi_serial)); - - return 0; - } -diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c -index 92df566..87ec1bf 100644 ---- a/net/rxrpc/ar-transport.c -+++ b/net/rxrpc/ar-transport.c -@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, - spin_lock_init(&trans->client_lock); - rwlock_init(&trans->conn_lock); - atomic_set(&trans->usage, 1); -- trans->debug_id = atomic_inc_return(&rxrpc_debug_id); -+ trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); - - if (peer->srx.transport.family == AF_INET) { - switch (peer->srx.transport_type) { -diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c -index 7635107..5000b71 100644 ---- a/net/rxrpc/rxkad.c -+++ b/net/rxrpc/rxkad.c -@@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, - u16 check; - int nsg; - -+ pax_track_stack(); -+ - sp = rxrpc_skb(skb); - - _enter(""); -@@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call, - u16 check; - int nsg; - -+ pax_track_stack(); -+ - _enter(""); - - sp = rxrpc_skb(skb); -@@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) - - len = iov[0].iov_len + iov[1].iov_len; - -- hdr.serial = htonl(atomic_inc_return(&conn->serial)); -+ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); - _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial)); - - ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); -@@ -660,7 +664,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn, - - len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; - -- hdr->serial = htonl(atomic_inc_return(&conn->serial)); -+ hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial)); - _proto("Tx RESPONSE %%%u", ntohl(hdr->serial)); - - ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); -diff --git a/net/sctp/auth.c b/net/sctp/auth.c -index 865e68f..bf81204 100644 ---- a/net/sctp/auth.c -+++ b/net/sctp/auth.c -@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) - struct sctp_auth_bytes *key; - - /* Verify that we are not going to overflow INT_MAX */ -- if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes)) -+ if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes))) - return NULL; - - /* Allocate the shared key */ -diff --git a/net/sctp/proc.c b/net/sctp/proc.c -index 05a6ce2..c8bf836 100644 ---- a/net/sctp/proc.c -+++ b/net/sctp/proc.c -@@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) - seq_printf(seq, - "%8pK %8pK %-3d %-3d %-2d %-4d " - "%4d %8d %8d %7d %5lu %-5d %5d ", -- assoc, sk, sctp_sk(sk)->type, sk->sk_state, -+ assoc, sk, -+ sctp_sk(sk)->type, sk->sk_state, - assoc->state, hash, - assoc->assoc_id, - assoc->sndbuf_used, -diff --git a/net/sctp/socket.c b/net/sctp/socket.c -index 836aa63..d779d7b 100644 ---- a/net/sctp/socket.c -+++ b/net/sctp/socket.c -@@ -4575,7 +4575,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, - addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; - if (space_left < addrlen) - return -ENOMEM; -- if (copy_to_user(to, &temp, addrlen)) -+ if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen)) - return -EFAULT; - to += addrlen; - cnt++; -diff --git a/net/socket.c b/net/socket.c -index ffe92ca..8057b85 100644 ---- a/net/socket.c -+++ b/net/socket.c -@@ -88,6 +88,7 @@ - #include <linux/nsproxy.h> - #include <linux/magic.h> - #include <linux/slab.h> -+#include <linux/in.h> - - #include <asm/uaccess.h> - #include <asm/unistd.h> -@@ -105,6 +106,8 @@ - #include <linux/sockios.h> - #include <linux/atalk.h> - -+#include <linux/grsock.h> -+ - static int sock_no_open(struct inode *irrelevant, struct file *dontcare); - static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos); -@@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type, - &sockfs_dentry_operations, SOCKFS_MAGIC); - } - --static struct vfsmount *sock_mnt __read_mostly; -+struct vfsmount *sock_mnt __read_mostly; - - static struct file_system_type sock_fs_type = { - .name = "sockfs", -@@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol, - return -EAFNOSUPPORT; - if (type < 0 || type >= SOCK_MAX) - return -EINVAL; -+ if (protocol < 0) -+ return -EINVAL; - - /* Compatibility. - -@@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) - if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) - flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; - -+ if(!gr_search_socket(family, type, protocol)) { -+ retval = -EACCES; -+ goto out; -+ } -+ -+ if (gr_handle_sock_all(family, type, protocol)) { -+ retval = -EACCES; -+ goto out; -+ } -+ - retval = sock_create(family, type, protocol, &sock); - if (retval < 0) - goto out; -@@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) - if (sock) { - err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); - if (err >= 0) { -+ if (gr_handle_sock_server((struct sockaddr *)&address)) { -+ err = -EACCES; -+ goto error; -+ } -+ err = gr_search_bind(sock, (struct sockaddr_in *)&address); -+ if (err) -+ goto error; -+ - err = security_socket_bind(sock, - (struct sockaddr *)&address, - addrlen); -@@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) - (struct sockaddr *) - &address, addrlen); - } -+error: - fput_light(sock->file, fput_needed); - } - return err; -@@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) - if ((unsigned)backlog > somaxconn) - backlog = somaxconn; - -+ if (gr_handle_sock_server_other(sock->sk)) { -+ err = -EPERM; -+ goto error; -+ } -+ -+ err = gr_search_listen(sock); -+ if (err) -+ goto error; -+ - err = security_socket_listen(sock, backlog); - if (!err) - err = sock->ops->listen(sock, backlog); - -+error: - fput_light(sock->file, fput_needed); - } - return err; -@@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, - newsock->type = sock->type; - newsock->ops = sock->ops; - -+ if (gr_handle_sock_server_other(sock->sk)) { -+ err = -EPERM; -+ sock_release(newsock); -+ goto out_put; -+ } -+ -+ err = gr_search_accept(sock); -+ if (err) { -+ sock_release(newsock); -+ goto out_put; -+ } -+ - /* - * We don't need try_module_get here, as the listening socket (sock) - * has the protocol module (sock->ops->owner) held. -@@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, - fd_install(newfd, newfile); - err = newfd; - -+ gr_attach_curr_ip(newsock->sk); -+ - out_put: - fput_light(sock->file, fput_needed); - out: -@@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, - int, addrlen) - { - struct socket *sock; -+ struct sockaddr *sck; - struct sockaddr_storage address; - int err, fput_needed; - -@@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, - if (err < 0) - goto out_put; - -+ sck = (struct sockaddr *)&address; -+ -+ if (gr_handle_sock_client(sck)) { -+ err = -EACCES; -+ goto out_put; -+ } -+ -+ err = gr_search_connect(sock, (struct sockaddr_in *)sck); -+ if (err) -+ goto out_put; -+ - err = - security_socket_connect(sock, (struct sockaddr *)&address, addrlen); - if (err) -@@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, - unsigned char *ctl_buf = ctl; - int err, ctl_len, iov_size, total_len; - -+ pax_track_stack(); -+ - err = -EFAULT; - if (MSG_CMSG_COMPAT & flags) { - if (get_compat_msghdr(msg_sys, msg_compat)) -@@ -1950,7 +2012,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, - * checking falls down on this. - */ - if (copy_from_user(ctl_buf, -- (void __user __force *)msg_sys->msg_control, -+ (void __force_user *)msg_sys->msg_control, - ctl_len)) - goto out_freectl; - msg_sys->msg_control = ctl_buf; -@@ -2120,7 +2182,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, - * kernel msghdr to use the kernel address space) - */ - -- uaddr = (__force void __user *)msg_sys->msg_name; -+ uaddr = (void __force_user *)msg_sys->msg_name; - uaddr_len = COMPAT_NAMELEN(msg); - if (MSG_CMSG_COMPAT & flags) { - err = verify_compat_iovec(msg_sys, iov, -@@ -2748,7 +2810,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) - } - - ifr = compat_alloc_user_space(buf_size); -- rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); -+ rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8); - - if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) - return -EFAULT; -@@ -2772,12 +2834,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); - - if (copy_in_user(rxnfc, compat_rxnfc, -- (void *)(&rxnfc->fs.m_ext + 1) - -- (void *)rxnfc) || -+ (void __user *)(&rxnfc->fs.m_ext + 1) - -+ (void __user *)rxnfc) || - copy_in_user(&rxnfc->fs.ring_cookie, - &compat_rxnfc->fs.ring_cookie, -- (void *)(&rxnfc->fs.location + 1) - -- (void *)&rxnfc->fs.ring_cookie) || -+ (void __user *)(&rxnfc->fs.location + 1) - -+ (void __user *)&rxnfc->fs.ring_cookie) || - copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, - sizeof(rxnfc->rule_cnt))) - return -EFAULT; -@@ -2789,12 +2851,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) - - if (convert_out) { - if (copy_in_user(compat_rxnfc, rxnfc, -- (const void *)(&rxnfc->fs.m_ext + 1) - -- (const void *)rxnfc) || -+ (const void __user *)(&rxnfc->fs.m_ext + 1) - -+ (const void __user *)rxnfc) || - copy_in_user(&compat_rxnfc->fs.ring_cookie, - &rxnfc->fs.ring_cookie, -- (const void *)(&rxnfc->fs.location + 1) - -- (const void *)&rxnfc->fs.ring_cookie) || -+ (const void __user *)(&rxnfc->fs.location + 1) - -+ (const void __user *)&rxnfc->fs.ring_cookie) || - copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, - sizeof(rxnfc->rule_cnt))) - return -EFAULT; -@@ -2864,7 +2926,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd, - old_fs = get_fs(); - set_fs(KERNEL_DS); - err = dev_ioctl(net, cmd, -- (struct ifreq __user __force *) &kifr); -+ (struct ifreq __force_user *) &kifr); - set_fs(old_fs); - - return err; -@@ -2973,7 +3035,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd, - - old_fs = get_fs(); - set_fs(KERNEL_DS); -- err = dev_ioctl(net, cmd, (void __user __force *)&ifr); -+ err = dev_ioctl(net, cmd, (void __force_user *)&ifr); - set_fs(old_fs); - - if (cmd == SIOCGIFMAP && !err) { -@@ -3078,7 +3140,7 @@ static int routing_ioctl(struct net *net, struct socket *sock, - ret |= __get_user(rtdev, &(ur4->rt_dev)); - if (rtdev) { - ret |= copy_from_user(devname, compat_ptr(rtdev), 15); -- r4.rt_dev = (char __user __force *)devname; -+ r4.rt_dev = (char __force_user *)devname; - devname[15] = 0; - } else - r4.rt_dev = NULL; -@@ -3318,8 +3380,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname, - int __user *uoptlen; - int err; - -- uoptval = (char __user __force *) optval; -- uoptlen = (int __user __force *) optlen; -+ uoptval = (char __force_user *) optval; -+ uoptlen = (int __force_user *) optlen; - - set_fs(KERNEL_DS); - if (level == SOL_SOCKET) -@@ -3339,7 +3401,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, - char __user *uoptval; - int err; - -- uoptval = (char __user __force *) optval; -+ uoptval = (char __force_user *) optval; - - set_fs(KERNEL_DS); - if (level == SOL_SOCKET) -diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c -index d12ffa5..0b5a6e2 100644 ---- a/net/sunrpc/sched.c -+++ b/net/sunrpc/sched.c -@@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word) - #ifdef RPC_DEBUG - static void rpc_task_set_debuginfo(struct rpc_task *task) - { -- static atomic_t rpc_pid; -+ static atomic_unchecked_t rpc_pid; - -- task->tk_pid = atomic_inc_return(&rpc_pid); -+ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid); - } - #else - static inline void rpc_task_set_debuginfo(struct rpc_task *task) -diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c -index 767d494..fe17e9d 100644 ---- a/net/sunrpc/svcsock.c -+++ b/net/sunrpc/svcsock.c -@@ -394,7 +394,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp, - int buflen, unsigned int base) - { - size_t save_iovlen; -- void __user *save_iovbase; -+ void *save_iovbase; - unsigned int i; - int ret; - -diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c -index 09af4fa..77110a9 100644 ---- a/net/sunrpc/xprtrdma/svc_rdma.c -+++ b/net/sunrpc/xprtrdma/svc_rdma.c -@@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE; - static unsigned int min_max_inline = 4096; - static unsigned int max_max_inline = 65536; - --atomic_t rdma_stat_recv; --atomic_t rdma_stat_read; --atomic_t rdma_stat_write; --atomic_t rdma_stat_sq_starve; --atomic_t rdma_stat_rq_starve; --atomic_t rdma_stat_rq_poll; --atomic_t rdma_stat_rq_prod; --atomic_t rdma_stat_sq_poll; --atomic_t rdma_stat_sq_prod; -+atomic_unchecked_t rdma_stat_recv; -+atomic_unchecked_t rdma_stat_read; -+atomic_unchecked_t rdma_stat_write; -+atomic_unchecked_t rdma_stat_sq_starve; -+atomic_unchecked_t rdma_stat_rq_starve; -+atomic_unchecked_t rdma_stat_rq_poll; -+atomic_unchecked_t rdma_stat_rq_prod; -+atomic_unchecked_t rdma_stat_sq_poll; -+atomic_unchecked_t rdma_stat_sq_prod; - - /* Temporary NFS request map and context caches */ - struct kmem_cache *svc_rdma_map_cachep; -@@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write, - len -= *ppos; - if (len > *lenp) - len = *lenp; -- if (len && copy_to_user(buffer, str_buf, len)) -+ if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len))) - return -EFAULT; - *lenp = len; - *ppos += len; -@@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = { - { - .procname = "rdma_stat_read", - .data = &rdma_stat_read, -- .maxlen = sizeof(atomic_t), -+ .maxlen = sizeof(atomic_unchecked_t), - .mode = 0644, - .proc_handler = read_reset_stat, - }, - { - .procname = "rdma_stat_recv", - .data = &rdma_stat_recv, -- .maxlen = sizeof(atomic_t), -+ .maxlen = sizeof(atomic_unchecked_t), - .mode = 0644, - .proc_handler = read_reset_stat, - }, - { - .procname = "rdma_stat_write", - .data = &rdma_stat_write, -- .maxlen = sizeof(atomic_t), -+ .maxlen = sizeof(atomic_unchecked_t), - .mode = 0644, - .proc_handler = read_reset_stat, - }, - { - .procname = "rdma_stat_sq_starve", - .data = &rdma_stat_sq_starve, -- .maxlen = sizeof(atomic_t), -+ .maxlen = sizeof(atomic_unchecked_t), - .mode = 0644, - .proc_handler = read_reset_stat, - }, - { - .procname = "rdma_stat_rq_starve", - .data = &rdma_stat_rq_starve, -- .maxlen = sizeof(atomic_t), -+ .maxlen = sizeof(atomic_unchecked_t), - .mode = 0644, - .proc_handler = read_reset_stat, - }, - { - .procname = "rdma_stat_rq_poll", - .data = &rdma_stat_rq_poll, -- .maxlen = sizeof(atomic_t), -+ .maxlen = sizeof(atomic_unchecked_t), - .mode = 0644, - .proc_handler = read_reset_stat, - }, - { - .procname = "rdma_stat_rq_prod", - .data = &rdma_stat_rq_prod, -- .maxlen = sizeof(atomic_t), -+ .maxlen = sizeof(atomic_unchecked_t), - .mode = 0644, - .proc_handler = read_reset_stat, - }, - { - .procname = "rdma_stat_sq_poll", - .data = &rdma_stat_sq_poll, -- .maxlen = sizeof(atomic_t), -+ .maxlen = sizeof(atomic_unchecked_t), - .mode = 0644, - .proc_handler = read_reset_stat, - }, - { - .procname = "rdma_stat_sq_prod", - .data = &rdma_stat_sq_prod, -- .maxlen = sizeof(atomic_t), -+ .maxlen = sizeof(atomic_unchecked_t), - .mode = 0644, - .proc_handler = read_reset_stat, - }, -diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c -index df67211..c354b13 100644 ---- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c -+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c -@@ -499,7 +499,7 @@ next_sge: - svc_rdma_put_context(ctxt, 0); - goto out; - } -- atomic_inc(&rdma_stat_read); -+ atomic_inc_unchecked(&rdma_stat_read); - - if (read_wr.num_sge < chl_map->ch[ch_no].count) { - chl_map->ch[ch_no].count -= read_wr.num_sge; -@@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) - dto_q); - list_del_init(&ctxt->dto_q); - } else { -- atomic_inc(&rdma_stat_rq_starve); -+ atomic_inc_unchecked(&rdma_stat_rq_starve); - clear_bit(XPT_DATA, &xprt->xpt_flags); - ctxt = NULL; - } -@@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) - dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n", - ctxt, rdma_xprt, rqstp, ctxt->wc_status); - BUG_ON(ctxt->wc_status != IB_WC_SUCCESS); -- atomic_inc(&rdma_stat_recv); -+ atomic_inc_unchecked(&rdma_stat_recv); - - /* Build up the XDR from the receive buffers. */ - rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); -diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c -index 249a835..fb2794b 100644 ---- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c -+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c -@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, - write_wr.wr.rdma.remote_addr = to; - - /* Post It */ -- atomic_inc(&rdma_stat_write); -+ atomic_inc_unchecked(&rdma_stat_write); - if (svc_rdma_send(xprt, &write_wr)) - goto err; - return 0; -diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c -index a385430..32254ea 100644 ---- a/net/sunrpc/xprtrdma/svc_rdma_transport.c -+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c -@@ -299,7 +299,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt) - return; - - ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); -- atomic_inc(&rdma_stat_rq_poll); -+ atomic_inc_unchecked(&rdma_stat_rq_poll); - - while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { - ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; -@@ -321,7 +321,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt) - } - - if (ctxt) -- atomic_inc(&rdma_stat_rq_prod); -+ atomic_inc_unchecked(&rdma_stat_rq_prod); - - set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); - /* -@@ -393,7 +393,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) - return; - - ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); -- atomic_inc(&rdma_stat_sq_poll); -+ atomic_inc_unchecked(&rdma_stat_sq_poll); - while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { - if (wc.status != IB_WC_SUCCESS) - /* Close the transport */ -@@ -411,7 +411,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) - } - - if (ctxt) -- atomic_inc(&rdma_stat_sq_prod); -+ atomic_inc_unchecked(&rdma_stat_sq_prod); - } - - static void sq_comp_handler(struct ib_cq *cq, void *cq_context) -@@ -1273,7 +1273,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) - spin_lock_bh(&xprt->sc_lock); - if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { - spin_unlock_bh(&xprt->sc_lock); -- atomic_inc(&rdma_stat_sq_starve); -+ atomic_inc_unchecked(&rdma_stat_sq_starve); - - /* See if we can opportunistically reap SQ WR to make room */ - sq_cq_reap(xprt); -diff --git a/net/sysctl_net.c b/net/sysctl_net.c -index ca84212..3aa338f 100644 ---- a/net/sysctl_net.c -+++ b/net/sysctl_net.c -@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root, - struct ctl_table *table) - { - /* Allow network administrator to have same access as root. */ -- if (capable(CAP_NET_ADMIN)) { -+ if (capable_nolog(CAP_NET_ADMIN)) { - int mode = (table->mode >> 6) & 7; - return (mode << 6) | (mode << 3) | mode; - } -diff --git a/net/tipc/link.c b/net/tipc/link.c -index f89570c..016cf63 100644 ---- a/net/tipc/link.c -+++ b/net/tipc/link.c -@@ -1170,7 +1170,7 @@ static int link_send_sections_long(struct tipc_port *sender, - struct tipc_msg fragm_hdr; - struct sk_buff *buf, *buf_chain, *prev; - u32 fragm_crs, fragm_rest, hsz, sect_rest; -- const unchar *sect_crs; -+ const unchar __user *sect_crs; - int curr_sect; - u32 fragm_no; - -@@ -1214,7 +1214,7 @@ again: - - if (!sect_rest) { - sect_rest = msg_sect[++curr_sect].iov_len; -- sect_crs = (const unchar *)msg_sect[curr_sect].iov_base; -+ sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base; - } - - if (sect_rest < fragm_rest) -@@ -1233,7 +1233,7 @@ error: - } - } else - skb_copy_to_linear_data_offset(buf, fragm_crs, -- sect_crs, sz); -+ (const void __force_kernel *)sect_crs, sz); - sect_crs += sz; - sect_rest -= sz; - fragm_crs += sz; -diff --git a/net/tipc/msg.c b/net/tipc/msg.c -index 83d5096..dcba497 100644 ---- a/net/tipc/msg.c -+++ b/net/tipc/msg.c -@@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, - msg_sect[cnt].iov_len); - else - skb_copy_to_linear_data_offset(*buf, pos, -- msg_sect[cnt].iov_base, -+ (const void __force_kernel *)msg_sect[cnt].iov_base, - msg_sect[cnt].iov_len); - pos += msg_sect[cnt].iov_len; - } -diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c -index 6cf7268..7a488ce 100644 ---- a/net/tipc/subscr.c -+++ b/net/tipc/subscr.c -@@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub, - { - struct iovec msg_sect; - -- msg_sect.iov_base = (void *)&sub->evt; -+ msg_sect.iov_base = (void __force_user *)&sub->evt; - msg_sect.iov_len = sizeof(struct tipc_event); - - sub->evt.event = htohl(event, sub->swap); -diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c -index ec68e1c..fdd792f 100644 ---- a/net/unix/af_unix.c -+++ b/net/unix/af_unix.c -@@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net, - err = -ECONNREFUSED; - if (!S_ISSOCK(inode->i_mode)) - goto put_fail; -+ -+ if (!gr_acl_handle_unix(path.dentry, path.mnt)) { -+ err = -EACCES; -+ goto put_fail; -+ } -+ - u = unix_find_socket_byinode(inode); - if (!u) - goto put_fail; -@@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net, - if (u) { - struct dentry *dentry; - dentry = unix_sk(u)->dentry; -+ -+ if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) { -+ err = -EPERM; -+ sock_put(u); -+ goto fail; -+ } -+ - if (dentry) - touch_atime(unix_sk(u)->mnt, dentry); - } else -@@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) - err = security_path_mknod(&path, dentry, mode, 0); - if (err) - goto out_mknod_drop_write; -+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) { -+ err = -EACCES; -+ goto out_mknod_drop_write; -+ } - err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); - out_mknod_drop_write: - mnt_drop_write(path.mnt); - if (err) - goto out_mknod_dput; -+ -+ gr_handle_create(dentry, path.mnt); -+ - mutex_unlock(&path.dentry->d_inode->i_mutex); - dput(path.dentry); - path.dentry = dentry; -diff --git a/net/wireless/core.h b/net/wireless/core.h -index 8672e02..48782dd 100644 ---- a/net/wireless/core.h -+++ b/net/wireless/core.h -@@ -27,7 +27,7 @@ struct cfg80211_registered_device { - struct mutex mtx; - - /* rfkill support */ -- struct rfkill_ops rfkill_ops; -+ rfkill_ops_no_const rfkill_ops; - struct rfkill *rfkill; - struct work_struct rfkill_sync; - -diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c -index fdbc23c..212d53e 100644 ---- a/net/wireless/wext-core.c -+++ b/net/wireless/wext-core.c -@@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, - */ - - /* Support for very large requests */ -- if ((descr->flags & IW_DESCR_FLAG_NOMAX) && -- (user_length > descr->max_tokens)) { -+ if (user_length > descr->max_tokens) { - /* Allow userspace to GET more than max so - * we can support any size GET requests. - * There is still a limit : -ENOMEM. -@@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, - } - } - -- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { -- /* -- * If this is a GET, but not NOMAX, it means that the extra -- * data is not bounded by userspace, but by max_tokens. Thus -- * set the length to max_tokens. This matches the extra data -- * allocation. -- * The driver should fill it with the number of tokens it -- * provided, and it may check iwp->length rather than having -- * knowledge of max_tokens. If the driver doesn't change the -- * iwp->length, this ioctl just copies back max_token tokens -- * filled with zeroes. Hopefully the driver isn't claiming -- * them to be valid data. -- */ -- iwp->length = descr->max_tokens; -- } -- - err = handler(dev, info, (union iwreq_data *) iwp, extra); - - iwp->length += essid_compat; -diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c -index 552df27..8e7f238 100644 ---- a/net/xfrm/xfrm_policy.c -+++ b/net/xfrm/xfrm_policy.c -@@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy) - { - policy->walk.dead = 1; - -- atomic_inc(&policy->genid); -+ atomic_inc_unchecked(&policy->genid); - - if (del_timer(&policy->timer)) - xfrm_pol_put(policy); -@@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) - hlist_add_head(&policy->bydst, chain); - xfrm_pol_hold(policy); - net->xfrm.policy_count[dir]++; -- atomic_inc(&flow_cache_genid); -+ atomic_inc_unchecked(&flow_cache_genid); - if (delpol) - __xfrm_policy_unlink(delpol, dir); - policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); -@@ -1530,7 +1530,7 @@ free_dst: - goto out; - } - --static int inline -+static inline int - xfrm_dst_alloc_copy(void **target, const void *src, int size) - { - if (!*target) { -@@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size) - return 0; - } - --static int inline -+static inline int - xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) - { - #ifdef CONFIG_XFRM_SUB_POLICY -@@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) - #endif - } - --static int inline -+static inline int - xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl) - { - #ifdef CONFIG_XFRM_SUB_POLICY -@@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, - - xdst->num_pols = num_pols; - memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); -- xdst->policy_genid = atomic_read(&pols[0]->genid); -+ xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid); - - return xdst; - } -@@ -2335,7 +2335,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first) - if (xdst->xfrm_genid != dst->xfrm->genid) - return 0; - if (xdst->num_pols > 0 && -- xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) -+ xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid)) - return 0; - - mtu = dst_mtu(dst->child); -@@ -2870,7 +2870,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol, - sizeof(pol->xfrm_vec[i].saddr)); - pol->xfrm_vec[i].encap_family = mp->new_family; - /* flush bundles */ -- atomic_inc(&pol->genid); -+ atomic_inc_unchecked(&pol->genid); - } - } - -diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c -index 0256b8a..9341ef6 100644 ---- a/net/xfrm/xfrm_user.c -+++ b/net/xfrm/xfrm_user.c -@@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) - struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; - int i; - -+ pax_track_stack(); -+ - if (xp->xfrm_nr == 0) - return 0; - -@@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, - int err; - int n = 0; - -+ pax_track_stack(); -+ - if (attrs[XFRMA_MIGRATE] == NULL) - return -EINVAL; - -diff --git a/scripts/Makefile.build b/scripts/Makefile.build -index a0fd502..a8e6e83 100644 ---- a/scripts/Makefile.build -+++ b/scripts/Makefile.build -@@ -109,7 +109,7 @@ endif - endif - - # Do not include host rules unless needed --ifneq ($(hostprogs-y)$(hostprogs-m),) -+ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),) - include scripts/Makefile.host - endif - -diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean -index 686cb0d..9d653bf 100644 ---- a/scripts/Makefile.clean -+++ b/scripts/Makefile.clean -@@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn)) - __clean-files := $(extra-y) $(always) \ - $(targets) $(clean-files) \ - $(host-progs) \ -- $(hostprogs-y) $(hostprogs-m) $(hostprogs-) -+ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \ -+ $(hostlibs-y) $(hostlibs-m) $(hostlibs-) - - __clean-files := $(filter-out $(no-clean-files), $(__clean-files)) - -diff --git a/scripts/Makefile.host b/scripts/Makefile.host -index 1ac414f..a1c1451 100644 ---- a/scripts/Makefile.host -+++ b/scripts/Makefile.host -@@ -31,6 +31,7 @@ - # Note: Shared libraries consisting of C++ files are not supported - - __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m)) -+__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m)) - - # C code - # Executables compiled from a single .c file -@@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs))) - # Shared libaries (only .c supported) - # Shared libraries (.so) - all .so files referenced in "xxx-objs" - host-cshlib := $(sort $(filter %.so, $(host-cobjs))) -+host-cshlib += $(sort $(filter %.so, $(__hostlibs))) - # Remove .so files from "xxx-objs" - host-cobjs := $(filter-out %.so,$(host-cobjs)) - -diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c -index 291228e..6c55203 100644 ---- a/scripts/basic/fixdep.c -+++ b/scripts/basic/fixdep.c -@@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz) - /* - * Lookup a value in the configuration string. - */ --static int is_defined_config(const char *name, int len, unsigned int hash) -+static int is_defined_config(const char *name, unsigned int len, unsigned int hash) - { - struct item *aux; - -@@ -211,10 +211,10 @@ static void clear_config(void) - /* - * Record the use of a CONFIG_* word. - */ --static void use_config(const char *m, int slen) -+static void use_config(const char *m, unsigned int slen) - { - unsigned int hash = strhash(m, slen); -- int c, i; -+ unsigned int c, i; - - if (is_defined_config(m, slen, hash)) - return; -@@ -235,9 +235,9 @@ static void use_config(const char *m, int slen) - - static void parse_config_file(const char *map, size_t len) - { -- const int *end = (const int *) (map + len); -+ const unsigned int *end = (const unsigned int *) (map + len); - /* start at +1, so that p can never be < map */ -- const int *m = (const int *) map + 1; -+ const unsigned int *m = (const unsigned int *) map + 1; - const char *p, *q; - - for (; m < end; m++) { -@@ -405,7 +405,7 @@ static void print_deps(void) - static void traps(void) - { - static char test[] __attribute__((aligned(sizeof(int)))) = "CONF"; -- int *p = (int *)test; -+ unsigned int *p = (unsigned int *)test; - - if (*p != INT_CONF) { - fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n", -diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh -new file mode 100644 -index 0000000..8729101 ---- /dev/null -+++ b/scripts/gcc-plugin.sh -@@ -0,0 +1,2 @@ -+#!/bin/sh -+echo -e "#include "gcc-plugin.h"\n#include "tree.h"\n#include "tm.h"\n#include "rtl.h"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y" -diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c -index e26e2fb..f84937b 100644 ---- a/scripts/mod/file2alias.c -+++ b/scripts/mod/file2alias.c -@@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id, - unsigned long size, unsigned long id_size, - void *symval) - { -- int i; -+ unsigned int i; - - if (size % id_size || size < id_size) { - if (cross_build != 0) -@@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id, - /* USB is special because the bcdDevice can be matched against a numeric range */ - /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */ - static void do_usb_entry(struct usb_device_id *id, -- unsigned int bcdDevice_initial, int bcdDevice_initial_digits, -+ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits, - unsigned char range_lo, unsigned char range_hi, - unsigned char max, struct module *mod) - { -@@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod) - { - unsigned int devlo, devhi; - unsigned char chi, clo, max; -- int ndigits; -+ unsigned int ndigits; - - id->match_flags = TO_NATIVE(id->match_flags); - id->idVendor = TO_NATIVE(id->idVendor); -@@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size, - for (i = 0; i < count; i++) { - const char *id = (char *)devs[i].id; - char acpi_id[sizeof(devs[0].id)]; -- int j; -+ unsigned int j; - - buf_printf(&mod->dev_table_buf, - "MODULE_ALIAS("pnp:d%s*");\n", id); -@@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size, - - for (j = 0; j < PNP_MAX_DEVICES; j++) { - const char *id = (char *)card->devs[j].id; -- int i2, j2; -+ unsigned int i2, j2; - int dup = 0; - - if (!id[0]) -@@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size, - /* add an individual alias for every device entry */ - if (!dup) { - char acpi_id[sizeof(card->devs[0].id)]; -- int k; -+ unsigned int k; - - buf_printf(&mod->dev_table_buf, - "MODULE_ALIAS("pnp:d%s*");\n", id); -@@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, const char *s) - static int do_dmi_entry(const char *filename, struct dmi_system_id *id, - char *alias) - { -- int i, j; -+ unsigned int i, j; - - sprintf(alias, "dmi*"); - -diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c -index a509ff8..5822633 100644 ---- a/scripts/mod/modpost.c -+++ b/scripts/mod/modpost.c -@@ -919,6 +919,7 @@ enum mismatch { - ANY_INIT_TO_ANY_EXIT, - ANY_EXIT_TO_ANY_INIT, - EXPORT_TO_INIT_EXIT, -+ DATA_TO_TEXT - }; - - struct sectioncheck { -@@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = { - .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL }, - .mismatch = EXPORT_TO_INIT_EXIT, - .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL }, -+}, -+/* Do not reference code from writable data */ -+{ -+ .fromsec = { DATA_SECTIONS, NULL }, -+ .tosec = { TEXT_SECTIONS, NULL }, -+ .mismatch = DATA_TO_TEXT - } - }; - -@@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr, - continue; - if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) - continue; -- if (sym->st_value == addr) -- return sym; - /* Find a symbol nearby - addr are maybe negative */ - d = sym->st_value - addr; -+ if (d == 0) -+ return sym; - if (d < 0) - d = addr - sym->st_value; - if (d < distance) { -@@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname, - tosym, prl_to, prl_to, tosym); - free(prl_to); - break; -+ case DATA_TO_TEXT: -+/* -+ fprintf(stderr, -+ "The variable %s references\n" -+ "the %s %s%s%s\n", -+ fromsym, to, sec2annotation(tosec), tosym, to_p); -+*/ -+ break; - } - fprintf(stderr, "\n"); - } -@@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf, - static void check_sec_ref(struct module *mod, const char *modname, - struct elf_info *elf) - { -- int i; -+ unsigned int i; - Elf_Shdr *sechdrs = elf->sechdrs; - - /* Walk through all sections */ -@@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf, - va_end(ap); - } - --void buf_write(struct buffer *buf, const char *s, int len) -+void buf_write(struct buffer *buf, const char *s, unsigned int len) - { - if (buf->size - buf->pos < len) { - buf->size += len + SZ; -@@ -1966,7 +1981,7 @@ static void write_if_changed(struct buffer *b, const char *fname) - if (fstat(fileno(file), &st) < 0) - goto close_write; - -- if (st.st_size != b->pos) -+ if (st.st_size != (off_t)b->pos) - goto close_write; - - tmp = NOFAIL(malloc(b->pos)); -diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h -index 2031119..b5433af 100644 ---- a/scripts/mod/modpost.h -+++ b/scripts/mod/modpost.h -@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr); - - struct buffer { - char *p; -- int pos; -- int size; -+ unsigned int pos; -+ unsigned int size; - }; - - void __attribute__((format(printf, 2, 3))) - buf_printf(struct buffer *buf, const char *fmt, ...); - - void --buf_write(struct buffer *buf, const char *s, int len); -+buf_write(struct buffer *buf, const char *s, unsigned int len); - - struct module { - struct module *next; -diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c -index 9dfcd6d..099068e 100644 ---- a/scripts/mod/sumversion.c -+++ b/scripts/mod/sumversion.c -@@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum, - goto out; - } - -- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) { -+ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) { - warn("writing sum in %s failed: %s\n", - filename, strerror(errno)); - goto out; -diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c -index 5c11312..72742b5 100644 ---- a/scripts/pnmtologo.c -+++ b/scripts/pnmtologo.c -@@ -237,14 +237,14 @@ static void write_header(void) - fprintf(out, " * Linux logo %s\n", logoname); - fputs(" */\n\n", out); - fputs("#include <linux/linux_logo.h>\n\n", out); -- fprintf(out, "static unsigned char %s_data[] __initdata = {\n", -+ fprintf(out, "static unsigned char %s_data[] = {\n", - logoname); - } - - static void write_footer(void) - { - fputs("\n};\n\n", out); -- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname); -+ fprintf(out, "const struct linux_logo %s = {\n", logoname); - fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]); - fprintf(out, "\t.width\t\t= %d,\n", logo_width); - fprintf(out, "\t.height\t\t= %d,\n", logo_height); -@@ -374,7 +374,7 @@ static void write_logo_clut224(void) - fputs("\n};\n\n", out); - - /* write logo clut */ -- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", -+ fprintf(out, "static unsigned char %s_clut[] = {\n", - logoname); - write_hex_cnt = 0; - for (i = 0; i < logo_clutsize; i++) { -diff --git a/security/Kconfig b/security/Kconfig -index e0f08b5..7388edd 100644 ---- a/security/Kconfig -+++ b/security/Kconfig -@@ -4,6 +4,586 @@ - - menu "Security options" - -+source grsecurity/Kconfig -+ -+menu "PaX" -+ -+ config ARCH_TRACK_EXEC_LIMIT -+ bool -+ -+ config PAX_KERNEXEC_PLUGIN -+ bool -+ -+ config PAX_PER_CPU_PGD -+ bool -+ -+ config TASK_SIZE_MAX_SHIFT -+ int -+ depends on X86_64 -+ default 47 if !PAX_PER_CPU_PGD -+ default 42 if PAX_PER_CPU_PGD -+ -+ config PAX_ENABLE_PAE -+ bool -+ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM)) -+ -+config PAX -+ bool "Enable various PaX features" -+ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) -+ help -+ This allows you to enable various PaX features. PaX adds -+ intrusion prevention mechanisms to the kernel that reduce -+ the risks posed by exploitable memory corruption bugs. -+ -+menu "PaX Control" -+ depends on PAX -+ -+config PAX_SOFTMODE -+ bool 'Support soft mode' -+ select PAX_PT_PAX_FLAGS -+ help -+ Enabling this option will allow you to run PaX in soft mode, that -+ is, PaX features will not be enforced by default, only on executables -+ marked explicitly. You must also enable PT_PAX_FLAGS support as it -+ is the only way to mark executables for soft mode use. -+ -+ Soft mode can be activated by using the "pax_softmode=1" kernel command -+ line option on boot. Furthermore you can control various PaX features -+ at runtime via the entries in /proc/sys/kernel/pax. -+ -+config PAX_EI_PAX -+ bool 'Use legacy ELF header marking' -+ help -+ Enabling this option will allow you to control PaX features on -+ a per executable basis via the 'chpax' utility available at -+ http://pax.grsecurity.net/. The control flags will be read from -+ an otherwise reserved part of the ELF header. This marking has -+ numerous drawbacks (no support for soft-mode, toolchain does not -+ know about the non-standard use of the ELF header) therefore it -+ has been deprecated in favour of PT_PAX_FLAGS support. -+ -+ Note that if you enable PT_PAX_FLAGS marking support as well, -+ the PT_PAX_FLAG marks will override the legacy EI_PAX marks. -+ -+config PAX_PT_PAX_FLAGS -+ bool 'Use ELF program header marking' -+ help -+ Enabling this option will allow you to control PaX features on -+ a per executable basis via the 'paxctl' utility available at -+ http://pax.grsecurity.net/. The control flags will be read from -+ a PaX specific ELF program header (PT_PAX_FLAGS). This marking -+ has the benefits of supporting both soft mode and being fully -+ integrated into the toolchain (the binutils patch is available -+ from http://pax.grsecurity.net). -+ -+ If your toolchain does not support PT_PAX_FLAGS markings, -+ you can create one in most cases with 'paxctl -C'. -+ -+ Note that if you enable the legacy EI_PAX marking support as well, -+ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks. -+ -+choice -+ prompt 'MAC system integration' -+ default PAX_HAVE_ACL_FLAGS -+ help -+ Mandatory Access Control systems have the option of controlling -+ PaX flags on a per executable basis, choose the method supported -+ by your particular system. -+ -+ - "none": if your MAC system does not interact with PaX, -+ - "direct": if your MAC system defines pax_set_initial_flags() itself, -+ - "hook": if your MAC system uses the pax_set_initial_flags_func callback. -+ -+ NOTE: this option is for developers/integrators only. -+ -+ config PAX_NO_ACL_FLAGS -+ bool 'none' -+ -+ config PAX_HAVE_ACL_FLAGS -+ bool 'direct' -+ -+ config PAX_HOOK_ACL_FLAGS -+ bool 'hook' -+endchoice -+ -+endmenu -+ -+menu "Non-executable pages" -+ depends on PAX -+ -+config PAX_NOEXEC -+ bool "Enforce non-executable pages" -+ depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86) -+ help -+ By design some architectures do not allow for protecting memory -+ pages against execution or even if they do, Linux does not make -+ use of this feature. In practice this means that if a page is -+ readable (such as the stack or heap) it is also executable. -+ -+ There is a well known exploit technique that makes use of this -+ fact and a common programming mistake where an attacker can -+ introduce code of his choice somewhere in the attacked program's -+ memory (typically the stack or the heap) and then execute it. -+ -+ If the attacked program was running with different (typically -+ higher) privileges than that of the attacker, then he can elevate -+ his own privilege level (e.g. get a root shell, write to files for -+ which he does not have write access to, etc). -+ -+ Enabling this option will let you choose from various features -+ that prevent the injection and execution of 'foreign' code in -+ a program. -+ -+ This will also break programs that rely on the old behaviour and -+ expect that dynamically allocated memory via the malloc() family -+ of functions is executable (which it is not). Notable examples -+ are the XFree86 4.x server, the java runtime and wine. -+ -+config PAX_PAGEEXEC -+ bool "Paging based non-executable pages" -+ depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7) -+ select S390_SWITCH_AMODE if S390 -+ select S390_EXEC_PROTECT if S390 -+ select ARCH_TRACK_EXEC_LIMIT if X86_32 -+ help -+ This implementation is based on the paging feature of the CPU. -+ On i386 without hardware non-executable bit support there is a -+ variable but usually low performance impact, however on Intel's -+ P4 core based CPUs it is very high so you should not enable this -+ for kernels meant to be used on such CPUs. -+ -+ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386 -+ with hardware non-executable bit support there is no performance -+ impact, on ppc the impact is negligible. -+ -+ Note that several architectures require various emulations due to -+ badly designed userland ABIs, this will cause a performance impact -+ but will disappear as soon as userland is fixed. For example, ppc -+ userland MUST have been built with secure-plt by a recent toolchain. -+ -+config PAX_SEGMEXEC -+ bool "Segmentation based non-executable pages" -+ depends on PAX_NOEXEC && X86_32 -+ help -+ This implementation is based on the segmentation feature of the -+ CPU and has a very small performance impact, however applications -+ will be limited to a 1.5 GB address space instead of the normal -+ 3 GB. -+ -+config PAX_EMUTRAMP -+ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86) -+ default y if PARISC -+ help -+ There are some programs and libraries that for one reason or -+ another attempt to execute special small code snippets from -+ non-executable memory pages. Most notable examples are the -+ signal handler return code generated by the kernel itself and -+ the GCC trampolines. -+ -+ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then -+ such programs will no longer work under your kernel. -+ -+ As a remedy you can say Y here and use the 'chpax' or 'paxctl' -+ utilities to enable trampoline emulation for the affected programs -+ yet still have the protection provided by the non-executable pages. -+ -+ On parisc you MUST enable this option and EMUSIGRT as well, otherwise -+ your system will not even boot. -+ -+ Alternatively you can say N here and use the 'chpax' or 'paxctl' -+ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC -+ for the affected files. -+ -+ NOTE: enabling this feature *may* open up a loophole in the -+ protection provided by non-executable pages that an attacker -+ could abuse. Therefore the best solution is to not have any -+ files on your system that would require this option. This can -+ be achieved by not using libc5 (which relies on the kernel -+ signal handler return code) and not using or rewriting programs -+ that make use of the nested function implementation of GCC. -+ Skilled users can just fix GCC itself so that it implements -+ nested function calls in a way that does not interfere with PaX. -+ -+config PAX_EMUSIGRT -+ bool "Automatically emulate sigreturn trampolines" -+ depends on PAX_EMUTRAMP && PARISC -+ default y -+ help -+ Enabling this option will have the kernel automatically detect -+ and emulate signal return trampolines executing on the stack -+ that would otherwise lead to task termination. -+ -+ This solution is intended as a temporary one for users with -+ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17, -+ Modula-3 runtime, etc) or executables linked to such, basically -+ everything that does not specify its own SA_RESTORER function in -+ normal executable memory like glibc 2.1+ does. -+ -+ On parisc you MUST enable this option, otherwise your system will -+ not even boot. -+ -+ NOTE: this feature cannot be disabled on a per executable basis -+ and since it *does* open up a loophole in the protection provided -+ by non-executable pages, the best solution is to not have any -+ files on your system that would require this option. -+ -+config PAX_MPROTECT -+ bool "Restrict mprotect()" -+ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) -+ help -+ Enabling this option will prevent programs from -+ - changing the executable status of memory pages that were -+ not originally created as executable, -+ - making read-only executable pages writable again, -+ - creating executable pages from anonymous memory, -+ - making read-only-after-relocations (RELRO) data pages writable again. -+ -+ You should say Y here to complete the protection provided by -+ the enforcement of non-executable pages. -+ -+ NOTE: you can use the 'chpax' or 'paxctl' utilities to control -+ this feature on a per file basis. -+ -+config PAX_MPROTECT_COMPAT -+ bool "Use legacy/compat protection demoting (read help)" -+ depends on PAX_MPROTECT -+ default n -+ help -+ The current implementation of PAX_MPROTECT denies RWX allocations/mprotects -+ by sending the proper error code to the application. For some broken -+ userland, this can cause problems with Python or other applications. The -+ current implementation however allows for applications like clamav to -+ detect if JIT compilation/execution is allowed and to fall back gracefully -+ to an interpreter-based mode if it does not. While we encourage everyone -+ to use the current implementation as-is and push upstream to fix broken -+ userland (note that the RWX logging option can assist with this), in some -+ environments this may not be possible. Having to disable MPROTECT -+ completely on certain binaries reduces the security benefit of PaX, -+ so this option is provided for those environments to revert to the old -+ behavior. -+ -+config PAX_ELFRELOCS -+ bool "Allow ELF text relocations (read help)" -+ depends on PAX_MPROTECT -+ default n -+ help -+ Non-executable pages and mprotect() restrictions are effective -+ in preventing the introduction of new executable code into an -+ attacked task's address space. There remain only two venues -+ for this kind of attack: if the attacker can execute already -+ existing code in the attacked task then he can either have it -+ create and mmap() a file containing his code or have it mmap() -+ an already existing ELF library that does not have position -+ independent code in it and use mprotect() on it to make it -+ writable and copy his code there. While protecting against -+ the former approach is beyond PaX, the latter can be prevented -+ by having only PIC ELF libraries on one's system (which do not -+ need to relocate their code). If you are sure this is your case, -+ as is the case with all modern Linux distributions, then leave -+ this option disabled. You should say 'n' here. -+ -+config PAX_ETEXECRELOCS -+ bool "Allow ELF ET_EXEC text relocations" -+ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC) -+ select PAX_ELFRELOCS -+ default y -+ help -+ On some architectures there are incorrectly created applications -+ that require text relocations and would not work without enabling -+ this option. If you are an alpha, ia64 or parisc user, you should -+ enable this option and disable it once you have made sure that -+ none of your applications need it. -+ -+config PAX_EMUPLT -+ bool "Automatically emulate ELF PLT" -+ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC) -+ default y -+ help -+ Enabling this option will have the kernel automatically detect -+ and emulate the Procedure Linkage Table entries in ELF files. -+ On some architectures such entries are in writable memory, and -+ become non-executable leading to task termination. Therefore -+ it is mandatory that you enable this option on alpha, parisc, -+ sparc and sparc64, otherwise your system would not even boot. -+ -+ NOTE: this feature *does* open up a loophole in the protection -+ provided by the non-executable pages, therefore the proper -+ solution is to modify the toolchain to produce a PLT that does -+ not need to be writable. -+ -+config PAX_DLRESOLVE -+ bool 'Emulate old glibc resolver stub' -+ depends on PAX_EMUPLT && SPARC -+ default n -+ help -+ This option is needed if userland has an old glibc (before 2.4) -+ that puts a 'save' instruction into the runtime generated resolver -+ stub that needs special emulation. -+ -+config PAX_KERNEXEC -+ bool "Enforce non-executable kernel pages" -+ depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN -+ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE) -+ select PAX_KERNEXEC_PLUGIN if X86_64 -+ help -+ This is the kernel land equivalent of PAGEEXEC and MPROTECT, -+ that is, enabling this option will make it harder to inject -+ and execute 'foreign' code in kernel memory itself. -+ -+ Note that on x86_64 kernels there is a known regression when -+ this feature and KVM/VMX are both enabled in the host kernel. -+ -+choice -+ prompt "Return Address Instrumentation Method" -+ default PAX_KERNEXEC_PLUGIN_METHOD_BTS -+ depends on PAX_KERNEXEC_PLUGIN -+ help -+ Select the method used to instrument function pointer dereferences. -+ Note that binary modules cannot be instrumented by this approach. -+ -+ config PAX_KERNEXEC_PLUGIN_METHOD_BTS -+ bool "bts" -+ help -+ This method is compatible with binary only modules but has -+ a higher runtime overhead. -+ -+ config PAX_KERNEXEC_PLUGIN_METHOD_OR -+ bool "or" -+ depends on !PARAVIRT -+ help -+ This method is incompatible with binary only modules but has -+ a lower runtime overhead. -+endchoice -+ -+config PAX_KERNEXEC_PLUGIN_METHOD -+ string -+ default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS -+ default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR -+ default "" -+ -+config PAX_KERNEXEC_MODULE_TEXT -+ int "Minimum amount of memory reserved for module code" -+ default "4" -+ depends on PAX_KERNEXEC && X86_32 && MODULES -+ help -+ Due to implementation details the kernel must reserve a fixed -+ amount of memory for module code at compile time that cannot be -+ changed at runtime. Here you can specify the minimum amount -+ in MB that will be reserved. Due to the same implementation -+ details this size will always be rounded up to the next 2/4 MB -+ boundary (depends on PAE) so the actually available memory for -+ module code will usually be more than this minimum. -+ -+ The default 4 MB should be enough for most users but if you have -+ an excessive number of modules (e.g., most distribution configs -+ compile many drivers as modules) or use huge modules such as -+ nvidia's kernel driver, you will need to adjust this amount. -+ A good rule of thumb is to look at your currently loaded kernel -+ modules and add up their sizes. -+ -+endmenu -+ -+menu "Address Space Layout Randomization" -+ depends on PAX -+ -+config PAX_ASLR -+ bool "Address Space Layout Randomization" -+ depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS -+ help -+ Many if not most exploit techniques rely on the knowledge of -+ certain addresses in the attacked program. The following options -+ will allow the kernel to apply a certain amount of randomization -+ to specific parts of the program thereby forcing an attacker to -+ guess them in most cases. Any failed guess will most likely crash -+ the attacked program which allows the kernel to detect such attempts -+ and react on them. PaX itself provides no reaction mechanisms, -+ instead it is strongly encouraged that you make use of Nergal's -+ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's -+ (http://www.grsecurity.net/) built-in crash detection features or -+ develop one yourself. -+ -+ By saying Y here you can choose to randomize the following areas: -+ - top of the task's kernel stack -+ - top of the task's userland stack -+ - base address for mmap() requests that do not specify one -+ (this includes all libraries) -+ - base address of the main executable -+ -+ It is strongly recommended to say Y here as address space layout -+ randomization has negligible impact on performance yet it provides -+ a very effective protection. -+ -+ NOTE: you can use the 'chpax' or 'paxctl' utilities to control -+ this feature on a per file basis. -+ -+config PAX_RANDKSTACK -+ bool "Randomize kernel stack base" -+ depends on X86_TSC && X86 -+ help -+ By saying Y here the kernel will randomize every task's kernel -+ stack on every system call. This will not only force an attacker -+ to guess it but also prevent him from making use of possible -+ leaked information about it. -+ -+ Since the kernel stack is a rather scarce resource, randomization -+ may cause unexpected stack overflows, therefore you should very -+ carefully test your system. Note that once enabled in the kernel -+ configuration, this feature cannot be disabled on a per file basis. -+ -+config PAX_RANDUSTACK -+ bool "Randomize user stack base" -+ depends on PAX_ASLR -+ help -+ By saying Y here the kernel will randomize every task's userland -+ stack. The randomization is done in two steps where the second -+ one may apply a big amount of shift to the top of the stack and -+ cause problems for programs that want to use lots of memory (more -+ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is). -+ For this reason the second step can be controlled by 'chpax' or -+ 'paxctl' on a per file basis. -+ -+config PAX_RANDMMAP -+ bool "Randomize mmap() base" -+ depends on PAX_ASLR -+ help -+ By saying Y here the kernel will use a randomized base address for -+ mmap() requests that do not specify one themselves. As a result -+ all dynamically loaded libraries will appear at random addresses -+ and therefore be harder to exploit by a technique where an attacker -+ attempts to execute library code for his purposes (e.g. spawn a -+ shell from an exploited program that is running at an elevated -+ privilege level). -+ -+ Furthermore, if a program is relinked as a dynamic ELF file, its -+ base address will be randomized as well, completing the full -+ randomization of the address space layout. Attacking such programs -+ becomes a guess game. You can find an example of doing this at -+ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at -+ http://www.grsecurity.net/grsec-gcc-specs.tar.gz . -+ -+ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this -+ feature on a per file basis. -+ -+endmenu -+ -+menu "Miscellaneous hardening features" -+ -+config PAX_MEMORY_SANITIZE -+ bool "Sanitize all freed memory" -+ help -+ By saying Y here the kernel will erase memory pages as soon as they -+ are freed. This in turn reduces the lifetime of data stored in the -+ pages, making it less likely that sensitive information such as -+ passwords, cryptographic secrets, etc stay in memory for too long. -+ -+ This is especially useful for programs whose runtime is short, long -+ lived processes and the kernel itself benefit from this as long as -+ they operate on whole memory pages and ensure timely freeing of pages -+ that may hold sensitive information. -+ -+ The tradeoff is performance impact, on a single CPU system kernel -+ compilation sees a 3% slowdown, other systems and workloads may vary -+ and you are advised to test this feature on your expected workload -+ before deploying it. -+ -+ Note that this feature does not protect data stored in live pages, -+ e.g., process memory swapped to disk may stay there for a long time. -+ -+config PAX_MEMORY_STACKLEAK -+ bool "Sanitize kernel stack" -+ depends on X86 -+ help -+ By saying Y here the kernel will erase the kernel stack before it -+ returns from a system call. This in turn reduces the information -+ that a kernel stack leak bug can reveal. -+ -+ Note that such a bug can still leak information that was put on -+ the stack by the current system call (the one eventually triggering -+ the bug) but traces of earlier system calls on the kernel stack -+ cannot leak anymore. -+ -+ The tradeoff is performance impact: on a single CPU system kernel -+ compilation sees a 1% slowdown, other systems and workloads may vary -+ and you are advised to test this feature on your expected workload -+ before deploying it. -+ -+ Note: full support for this feature requires gcc with plugin support -+ so make sure your compiler is at least gcc 4.5.0 (cross compilation -+ is not supported). Using older gcc versions means that functions -+ with large enough stack frames may leave uninitialized memory behind -+ that may be exposed to a later syscall leaking the stack. -+ -+config PAX_MEMORY_UDEREF -+ bool "Prevent invalid userland pointer dereference" -+ depends on X86 && !UML_X86 && !XEN -+ select PAX_PER_CPU_PGD if X86_64 -+ help -+ By saying Y here the kernel will be prevented from dereferencing -+ userland pointers in contexts where the kernel expects only kernel -+ pointers. This is both a useful runtime debugging feature and a -+ security measure that prevents exploiting a class of kernel bugs. -+ -+ The tradeoff is that some virtualization solutions may experience -+ a huge slowdown and therefore you should not enable this feature -+ for kernels meant to run in such environments. Whether a given VM -+ solution is affected or not is best determined by simply trying it -+ out, the performance impact will be obvious right on boot as this -+ mechanism engages from very early on. A good rule of thumb is that -+ VMs running on CPUs without hardware virtualization support (i.e., -+ the majority of IA-32 CPUs) will likely experience the slowdown. -+ -+config PAX_REFCOUNT -+ bool "Prevent various kernel object reference counter overflows" -+ depends on GRKERNSEC && (X86 || SPARC64) -+ help -+ By saying Y here the kernel will detect and prevent overflowing -+ various (but not all) kinds of object reference counters. Such -+ overflows can normally occur due to bugs only and are often, if -+ not always, exploitable. -+ -+ The tradeoff is that data structures protected by an overflowed -+ refcount will never be freed and therefore will leak memory. Note -+ that this leak also happens even without this protection but in -+ that case the overflow can eventually trigger the freeing of the -+ data structure while it is still being used elsewhere, resulting -+ in the exploitable situation that this feature prevents. -+ -+ Since this has a negligible performance impact, you should enable -+ this feature. -+ -+config PAX_USERCOPY -+ bool "Harden heap object copies between kernel and userland" -+ depends on X86 || PPC || SPARC || ARM -+ depends on GRKERNSEC && (SLAB || SLUB || SLOB) -+ help -+ By saying Y here the kernel will enforce the size of heap objects -+ when they are copied in either direction between the kernel and -+ userland, even if only a part of the heap object is copied. -+ -+ Specifically, this checking prevents information leaking from the -+ kernel heap during kernel to userland copies (if the kernel heap -+ object is otherwise fully initialized) and prevents kernel heap -+ overflows during userland to kernel copies. -+ -+ Note that the current implementation provides the strictest bounds -+ checks for the SLUB allocator. -+ -+ Enabling this option also enables per-slab cache protection against -+ data in a given cache being copied into/out of via userland -+ accessors. Though the whitelist of regions will be reduced over -+ time, it notably protects important data structures like task structs. -+ -+ If frame pointers are enabled on x86, this option will also restrict -+ copies into and out of the kernel stack to local variables within a -+ single frame. -+ -+ Since this has a negligible performance impact, you should enable -+ this feature. -+ -+endmenu -+ -+endmenu -+ - config KEYS - bool "Enable access key retention support" - help -@@ -167,7 +747,7 @@ config INTEL_TXT - config LSM_MMAP_MIN_ADDR - int "Low address space for LSM to protect from user allocation" - depends on SECURITY && SECURITY_SELINUX -- default 32768 if ARM -+ default 32768 if ALPHA || ARM || PARISC || SPARC32 - default 65536 - help - This is the portion of low virtual memory which should be protected -diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c -index 3783202..1852837 100644 ---- a/security/apparmor/lsm.c -+++ b/security/apparmor/lsm.c -@@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task, - return error; - } - --static struct security_operations apparmor_ops = { -+static struct security_operations apparmor_ops __read_only = { - .name = "apparmor", - - .ptrace_access_check = apparmor_ptrace_access_check, -diff --git a/security/commoncap.c b/security/commoncap.c -index a93b3b7..4410df9 100644 ---- a/security/commoncap.c -+++ b/security/commoncap.c -@@ -28,6 +28,7 @@ - #include <linux/prctl.h> - #include <linux/securebits.h> - #include <linux/user_namespace.h> -+#include <net/sock.h> - - /* - * If a non-root user executes a setuid-root binary in -@@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb) - - int cap_netlink_recv(struct sk_buff *skb, int cap) - { -- if (!cap_raised(current_cap(), cap)) -+ if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap)) - return -EPERM; - return 0; - } -@@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm) - { - const struct cred *cred = current_cred(); - -+ if (gr_acl_enable_at_secure()) -+ return 1; -+ - if (cred->uid != 0) { - if (bprm->cap_effective) - return 1; -diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h -index 08408bd..67e6e78 100644 ---- a/security/integrity/ima/ima.h -+++ b/security/integrity/ima/ima.h -@@ -85,8 +85,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename, - extern spinlock_t ima_queue_lock; - - struct ima_h_table { -- atomic_long_t len; /* number of stored measurements in the list */ -- atomic_long_t violations; -+ atomic_long_unchecked_t len; /* number of stored measurements in the list */ -+ atomic_long_unchecked_t violations; - struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE]; - }; - extern struct ima_h_table ima_htable; -diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c -index da36d2c..e1e1965 100644 ---- a/security/integrity/ima/ima_api.c -+++ b/security/integrity/ima/ima_api.c -@@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename, - int result; - - /* can overflow, only indicator */ -- atomic_long_inc(&ima_htable.violations); -+ atomic_long_inc_unchecked(&ima_htable.violations); - - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { -diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c -index ef21b96..d53e674 100644 ---- a/security/integrity/ima/ima_fs.c -+++ b/security/integrity/ima/ima_fs.c -@@ -28,12 +28,12 @@ - static int valid_policy = 1; - #define TMPBUFLEN 12 - static ssize_t ima_show_htable_value(char __user *buf, size_t count, -- loff_t *ppos, atomic_long_t *val) -+ loff_t *ppos, atomic_long_unchecked_t *val) - { - char tmpbuf[TMPBUFLEN]; - ssize_t len; - -- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val)); -+ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val)); - return simple_read_from_buffer(buf, count, ppos, tmpbuf, len); - } - -diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c -index 8e28f04..d5951b1 100644 ---- a/security/integrity/ima/ima_queue.c -+++ b/security/integrity/ima/ima_queue.c -@@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry) - INIT_LIST_HEAD(&qe->later); - list_add_tail_rcu(&qe->later, &ima_measurements); - -- atomic_long_inc(&ima_htable.len); -+ atomic_long_inc_unchecked(&ima_htable.len); - key = ima_hash_key(entry->digest); - hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); - return 0; -diff --git a/security/keys/compat.c b/security/keys/compat.c -index 338b510..a235861 100644 ---- a/security/keys/compat.c -+++ b/security/keys/compat.c -@@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov( - if (ret == 0) - goto no_payload_free; - -- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); -+ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid); - - if (iov != iovstack) - kfree(iov); -diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c -index eca5191..da9c7f0 100644 ---- a/security/keys/keyctl.c -+++ b/security/keys/keyctl.c -@@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key) - /* - * Copy the iovec data from userspace - */ --static long copy_from_user_iovec(void *buffer, const struct iovec *iov, -+static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov, - unsigned ioc) - { - for (; ioc > 0; ioc--) { -@@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov, - * If successful, 0 will be returned. - */ - long keyctl_instantiate_key_common(key_serial_t id, -- const struct iovec *payload_iov, -+ const struct iovec __user *payload_iov, - unsigned ioc, - size_t plen, - key_serial_t ringid) -@@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id, - [0].iov_len = plen - }; - -- return keyctl_instantiate_key_common(id, iov, 1, plen, ringid); -+ return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid); - } - - return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid); -@@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id, - if (ret == 0) - goto no_payload_free; - -- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); -+ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid); - - if (iov != iovstack) - kfree(iov); -diff --git a/security/keys/keyring.c b/security/keys/keyring.c -index 30e242f..ec111ab 100644 ---- a/security/keys/keyring.c -+++ b/security/keys/keyring.c -@@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring, - ret = -EFAULT; - - for (loop = 0; loop < klist->nkeys; loop++) { -+ key_serial_t serial; - key = klist->keys[loop]; -+ serial = key->serial; - - tmp = sizeof(key_serial_t); - if (tmp > buflen) - tmp = buflen; - -- if (copy_to_user(buffer, -- &key->serial, -- tmp) != 0) -+ if (copy_to_user(buffer, &serial, tmp)) - goto error; - - buflen -= tmp; -diff --git a/security/min_addr.c b/security/min_addr.c -index f728728..6457a0c 100644 ---- a/security/min_addr.c -+++ b/security/min_addr.c -@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; - */ - static void update_mmap_min_addr(void) - { -+#ifndef SPARC - #ifdef CONFIG_LSM_MMAP_MIN_ADDR - if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR) - mmap_min_addr = dac_mmap_min_addr; -@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void) - #else - mmap_min_addr = dac_mmap_min_addr; - #endif -+#endif - } - - /* -diff --git a/security/security.c b/security/security.c -index d9e1533..91427f2 100644 ---- a/security/security.c -+++ b/security/security.c -@@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] = - /* things that live in capability.c */ - extern void __init security_fixup_ops(struct security_operations *ops); - --static struct security_operations *security_ops; --static struct security_operations default_security_ops = { -+static struct security_operations *security_ops __read_only; -+static struct security_operations default_security_ops __read_only = { - .name = "default", - }; - -@@ -67,7 +67,9 @@ int __init security_init(void) - - void reset_security_ops(void) - { -+ pax_open_kernel(); - security_ops = &default_security_ops; -+ pax_close_kernel(); - } - - /* Save user chosen LSM */ -diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c -index 266a229..61bd553 100644 ---- a/security/selinux/hooks.c -+++ b/security/selinux/hooks.c -@@ -93,7 +93,6 @@ - #define NUM_SEL_MNT_OPTS 5 - - extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); --extern struct security_operations *security_ops; - - /* SECMARK reference count */ - atomic_t selinux_secmark_refcount = ATOMIC_INIT(0); -@@ -5455,7 +5454,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer) - - #endif - --static struct security_operations selinux_ops = { -+static struct security_operations selinux_ops __read_only = { - .name = "selinux", - - .ptrace_access_check = selinux_ptrace_access_check, -diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h -index b43813c..74be837 100644 ---- a/security/selinux/include/xfrm.h -+++ b/security/selinux/include/xfrm.h -@@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); - - static inline void selinux_xfrm_notify_policyload(void) - { -- atomic_inc(&flow_cache_genid); -+ atomic_inc_unchecked(&flow_cache_genid); - } - #else - static inline int selinux_xfrm_enabled(void) -diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c -index f6917bc..8e8713e 100644 ---- a/security/selinux/ss/services.c -+++ b/security/selinux/ss/services.c -@@ -1814,6 +1814,8 @@ int security_load_policy(void *data, size_t len) - int rc = 0; - struct policy_file file = { data, len }, *fp = &file; - -+ pax_track_stack(); -+ - if (!ss_initialized) { - avtab_cache_init(); - rc = policydb_read(&policydb, fp); -diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c -index b9c5e14..20ab779 100644 ---- a/security/smack/smack_lsm.c -+++ b/security/smack/smack_lsm.c -@@ -3393,7 +3393,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) - return 0; - } - --struct security_operations smack_ops = { -+struct security_operations smack_ops __read_only = { - .name = "smack", - - .ptrace_access_check = smack_ptrace_access_check, -diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c -index f776400..f95b158c 100644 ---- a/security/tomoyo/tomoyo.c -+++ b/security/tomoyo/tomoyo.c -@@ -446,7 +446,7 @@ static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path) - * tomoyo_security_ops is a "struct security_operations" which is used for - * registering TOMOYO. - */ --static struct security_operations tomoyo_security_ops = { -+static struct security_operations tomoyo_security_ops __read_only = { - .name = "tomoyo", - .cred_alloc_blank = tomoyo_cred_alloc_blank, - .cred_prepare = tomoyo_cred_prepare, -diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c -index 3687a6c..652565e 100644 ---- a/sound/aoa/codecs/onyx.c -+++ b/sound/aoa/codecs/onyx.c -@@ -54,7 +54,7 @@ struct onyx { - spdif_locked:1, - analog_locked:1, - original_mute:2; -- int open_count; -+ local_t open_count; - struct codec_info *codec_info; - - /* mutex serializes concurrent access to the device -@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii, - struct onyx *onyx = cii->codec_data; - - mutex_lock(&onyx->mutex); -- onyx->open_count++; -+ local_inc(&onyx->open_count); - mutex_unlock(&onyx->mutex); - - return 0; -@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii, - struct onyx *onyx = cii->codec_data; - - mutex_lock(&onyx->mutex); -- onyx->open_count--; -- if (!onyx->open_count) -+ if (local_dec_and_test(&onyx->open_count)) - onyx->spdif_locked = onyx->analog_locked = 0; - mutex_unlock(&onyx->mutex); - -diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h -index ffd2025..df062c9 100644 ---- a/sound/aoa/codecs/onyx.h -+++ b/sound/aoa/codecs/onyx.h -@@ -11,6 +11,7 @@ - #include <linux/i2c.h> - #include <asm/pmac_low_i2c.h> - #include <asm/prom.h> -+#include <asm/local.h> - - /* PCM3052 register definitions */ - -diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c -index 23c34a0..a2673a5 100644 ---- a/sound/core/oss/pcm_oss.c -+++ b/sound/core/oss/pcm_oss.c -@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const - if (in_kernel) { - mm_segment_t fs; - fs = snd_enter_user(); -- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames); -+ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames); - snd_leave_user(fs); - } else { -- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames); -+ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames); - } - if (ret != -EPIPE && ret != -ESTRPIPE) - break; -@@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p - if (in_kernel) { - mm_segment_t fs; - fs = snd_enter_user(); -- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames); -+ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames); - snd_leave_user(fs); - } else { -- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames); -+ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames); - } - if (ret == -EPIPE) { - if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { -@@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha - struct snd_pcm_plugin_channel *channels; - size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8; - if (!in_kernel) { -- if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes)) -+ if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes)) - return -EFAULT; - buf = runtime->oss.buffer; - } -@@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha - } - } else { - tmp = snd_pcm_oss_write2(substream, -- (const char __force *)buf, -+ (const char __force_kernel *)buf, - runtime->oss.period_bytes, 0); - if (tmp <= 0) - goto err; -@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf, - struct snd_pcm_runtime *runtime = substream->runtime; - snd_pcm_sframes_t frames, frames1; - #ifdef CONFIG_SND_PCM_OSS_PLUGINS -- char __user *final_dst = (char __force __user *)buf; -+ char __user *final_dst = (char __force_user *)buf; - if (runtime->oss.plugin_first) { - struct snd_pcm_plugin_channel *channels; - size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8; -@@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use - xfer += tmp; - runtime->oss.buffer_used -= tmp; - } else { -- tmp = snd_pcm_oss_read2(substream, (char __force *)buf, -+ tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf, - runtime->oss.period_bytes, 0); - if (tmp <= 0) - goto err; -@@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) - size1); - size1 /= runtime->channels; /* frames */ - fs = snd_enter_user(); -- snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1); -+ snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1); - snd_leave_user(fs); - } - } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) { -diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c -index 91cdf94..4085161 100644 ---- a/sound/core/pcm_compat.c -+++ b/sound/core/pcm_compat.c -@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream, - int err; - - fs = snd_enter_user(); -- err = snd_pcm_delay(substream, &delay); -+ err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay); - snd_leave_user(fs); - if (err < 0) - return err; -diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c -index 1c6be91..c761a59 100644 ---- a/sound/core/pcm_native.c -+++ b/sound/core/pcm_native.c -@@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, - switch (substream->stream) { - case SNDRV_PCM_STREAM_PLAYBACK: - result = snd_pcm_playback_ioctl1(NULL, substream, cmd, -- (void __user *)arg); -+ (void __force_user *)arg); - break; - case SNDRV_PCM_STREAM_CAPTURE: - result = snd_pcm_capture_ioctl1(NULL, substream, cmd, -- (void __user *)arg); -+ (void __force_user *)arg); - break; - default: - result = -EINVAL; -diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c -index 1f99767..14636533 100644 ---- a/sound/core/seq/seq_device.c -+++ b/sound/core/seq/seq_device.c -@@ -63,7 +63,7 @@ struct ops_list { - int argsize; /* argument size */ - - /* operators */ -- struct snd_seq_dev_ops ops; -+ struct snd_seq_dev_ops *ops; - - /* registred devices */ - struct list_head dev_list; /* list of devices */ -@@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry, - - mutex_lock(&ops->reg_mutex); - /* copy driver operators */ -- ops->ops = *entry; -+ ops->ops = entry; - ops->driver |= DRIVER_LOADED; - ops->argsize = argsize; - -@@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops) - dev->name, ops->id, ops->argsize, dev->argsize); - return -EINVAL; - } -- if (ops->ops.init_device(dev) >= 0) { -+ if (ops->ops->init_device(dev) >= 0) { - dev->status = SNDRV_SEQ_DEVICE_REGISTERED; - ops->num_init_devices++; - } else { -@@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops) - dev->name, ops->id, ops->argsize, dev->argsize); - return -EINVAL; - } -- if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) { -+ if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) { - dev->status = SNDRV_SEQ_DEVICE_FREE; - dev->driver_data = NULL; - ops->num_init_devices--; -diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c -index 8539ab0..be8a121 100644 ---- a/sound/drivers/mts64.c -+++ b/sound/drivers/mts64.c -@@ -28,6 +28,7 @@ - #include <sound/initval.h> - #include <sound/rawmidi.h> - #include <sound/control.h> -+#include <asm/local.h> - - #define CARD_NAME "Miditerminal 4140" - #define DRIVER_NAME "MTS64" -@@ -66,7 +67,7 @@ struct mts64 { - struct pardevice *pardev; - int pardev_claimed; - -- int open_count; -+ local_t open_count; - int current_midi_output_port; - int current_midi_input_port; - u8 mode[MTS64_NUM_INPUT_PORTS]; -@@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream) - { - struct mts64 *mts = substream->rmidi->private_data; - -- if (mts->open_count == 0) { -+ if (local_read(&mts->open_count) == 0) { - /* We don't need a spinlock here, because this is just called - if the device has not been opened before. - So there aren't any IRQs from the device */ -@@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream) - - msleep(50); - } -- ++(mts->open_count); -+ local_inc(&mts->open_count); - - return 0; - } -@@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream) - struct mts64 *mts = substream->rmidi->private_data; - unsigned long flags; - -- --(mts->open_count); -- if (mts->open_count == 0) { -+ if (local_dec_return(&mts->open_count) == 0) { - /* We need the spinlock_irqsave here because we can still - have IRQs at this point */ - spin_lock_irqsave(&mts->lock, flags); -@@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream) - - msleep(500); - -- } else if (mts->open_count < 0) -- mts->open_count = 0; -+ } else if (local_read(&mts->open_count) < 0) -+ local_set(&mts->open_count, 0); - - return 0; - } -diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c -index f07e38d..7aae69a 100644 ---- a/sound/drivers/opl4/opl4_lib.c -+++ b/sound/drivers/opl4/opl4_lib.c -@@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch clemens@ladisch.de"); - MODULE_DESCRIPTION("OPL4 driver"); - MODULE_LICENSE("GPL"); - --static void inline snd_opl4_wait(struct snd_opl4 *opl4) -+static inline void snd_opl4_wait(struct snd_opl4 *opl4) - { - int timeout = 10; - while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0) -diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c -index f2b0ba2..429efc5 100644 ---- a/sound/drivers/portman2x4.c -+++ b/sound/drivers/portman2x4.c -@@ -47,6 +47,7 @@ - #include <sound/initval.h> - #include <sound/rawmidi.h> - #include <sound/control.h> -+#include <asm/local.h> - - #define CARD_NAME "Portman 2x4" - #define DRIVER_NAME "portman" -@@ -84,7 +85,7 @@ struct portman { - struct pardevice *pardev; - int pardev_claimed; - -- int open_count; -+ local_t open_count; - int mode[PORTMAN_NUM_INPUT_PORTS]; - struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS]; - }; -diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c -index 87657dd..a8268d4 100644 ---- a/sound/firewire/amdtp.c -+++ b/sound/firewire/amdtp.c -@@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle) - ptr = s->pcm_buffer_pointer + data_blocks; - if (ptr >= pcm->runtime->buffer_size) - ptr -= pcm->runtime->buffer_size; -- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr; -+ ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr; - - s->pcm_period_pointer += data_blocks; - if (s->pcm_period_pointer >= pcm->runtime->period_size) { -@@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start); - */ - void amdtp_out_stream_update(struct amdtp_out_stream *s) - { -- ACCESS_ONCE(s->source_node_id_field) = -+ ACCESS_ONCE_RW(s->source_node_id_field) = - (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24; - } - EXPORT_SYMBOL(amdtp_out_stream_update); -diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h -index 537a9cb..8e8c8e9 100644 ---- a/sound/firewire/amdtp.h -+++ b/sound/firewire/amdtp.h -@@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s) - static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s, - struct snd_pcm_substream *pcm) - { -- ACCESS_ONCE(s->pcm) = pcm; -+ ACCESS_ONCE_RW(s->pcm) = pcm; - } - - /** -diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c -index 4400308..261e9f3 100644 ---- a/sound/firewire/isight.c -+++ b/sound/firewire/isight.c -@@ -97,7 +97,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count) - ptr += count; - if (ptr >= runtime->buffer_size) - ptr -= runtime->buffer_size; -- ACCESS_ONCE(isight->buffer_pointer) = ptr; -+ ACCESS_ONCE_RW(isight->buffer_pointer) = ptr; - - isight->period_counter += count; - if (isight->period_counter >= runtime->period_size) { -@@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream, - if (err < 0) - return err; - -- ACCESS_ONCE(isight->pcm_active) = true; -+ ACCESS_ONCE_RW(isight->pcm_active) = true; - - return 0; - } -@@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream) - { - struct isight *isight = substream->private_data; - -- ACCESS_ONCE(isight->pcm_active) = false; -+ ACCESS_ONCE_RW(isight->pcm_active) = false; - - mutex_lock(&isight->mutex); - isight_stop_streaming(isight); -@@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd) - - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: -- ACCESS_ONCE(isight->pcm_running) = true; -+ ACCESS_ONCE_RW(isight->pcm_running) = true; - break; - case SNDRV_PCM_TRIGGER_STOP: -- ACCESS_ONCE(isight->pcm_running) = false; -+ ACCESS_ONCE_RW(isight->pcm_running) = false; - break; - default: - return -EINVAL; -diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c -index fe79a16..4d9714e 100644 ---- a/sound/isa/cmi8330.c -+++ b/sound/isa/cmi8330.c -@@ -172,7 +172,7 @@ struct snd_cmi8330 { - - struct snd_pcm *pcm; - struct snd_cmi8330_stream { -- struct snd_pcm_ops ops; -+ snd_pcm_ops_no_const ops; - snd_pcm_open_callback_t open; - void *private_data; /* sb or wss */ - } streams[2]; -diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c -index 733b014..56ce96f 100644 ---- a/sound/oss/sb_audio.c -+++ b/sound/oss/sb_audio.c -@@ -901,7 +901,7 @@ sb16_copy_from_user(int dev, - buf16 = (signed short *)(localbuf + localoffs); - while (c) - { -- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); -+ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); - if (copy_from_user(lbuf8, - userbuf+useroffs + p, - locallen)) -diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c -index 09d4648..cf234c7 100644 ---- a/sound/oss/swarm_cs4297a.c -+++ b/sound/oss/swarm_cs4297a.c -@@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void) - { - struct cs4297a_state *s; - u32 pwr, id; -- mm_segment_t fs; - int rval; - #ifndef CONFIG_BCM_CS4297A_CSWARM - u64 cfg; -@@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void) - if (!rval) { - char *sb1250_duart_present; - -+#if 0 -+ mm_segment_t fs; - fs = get_fs(); - set_fs(KERNEL_DS); --#if 0 - val = SOUND_MASK_LINE; - mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val); - for (i = 0; i < ARRAY_SIZE(initvol); i++) { - val = initvol[i].vol; - mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val); - } -+ set_fs(fs); - // cs4297a_write_ac97(s, 0x18, 0x0808); - #else - // cs4297a_write_ac97(s, 0x5e, 0x180); - cs4297a_write_ac97(s, 0x02, 0x0808); - cs4297a_write_ac97(s, 0x18, 0x0808); - #endif -- set_fs(fs); - - list_add(&s->list, &cs4297a_devs); - -diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h -index 755f2b0..5c12361 100644 ---- a/sound/pci/hda/hda_codec.h -+++ b/sound/pci/hda/hda_codec.h -@@ -611,7 +611,7 @@ struct hda_bus_ops { - /* notify power-up/down from codec to controller */ - void (*pm_notify)(struct hda_bus *bus); - #endif --}; -+} __no_const; - - /* template to pass to the bus constructor */ - struct hda_bus_template { -@@ -713,6 +713,7 @@ struct hda_codec_ops { - #endif - void (*reboot_notify)(struct hda_codec *codec); - }; -+typedef struct hda_codec_ops __no_const hda_codec_ops_no_const; - - /* record for amp information cache */ - struct hda_cache_head { -@@ -743,7 +744,7 @@ struct hda_pcm_ops { - struct snd_pcm_substream *substream); - int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec, - struct snd_pcm_substream *substream); --}; -+} __no_const; - - /* PCM information for each substream */ - struct hda_pcm_stream { -@@ -801,7 +802,7 @@ struct hda_codec { - const char *modelname; /* model name for preset */ - - /* set by patch */ -- struct hda_codec_ops patch_ops; -+ hda_codec_ops_no_const patch_ops; - - /* PCM to create, set by patch_ops.build_pcms callback */ - unsigned int num_pcms; -diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h -index 0da778a..bc38b84 100644 ---- a/sound/pci/ice1712/ice1712.h -+++ b/sound/pci/ice1712/ice1712.h -@@ -269,7 +269,7 @@ struct snd_ak4xxx_private { - unsigned int mask_flags; /* total mask bits */ - struct snd_akm4xxx_ops { - void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate); -- } ops; -+ } __no_const ops; - }; - - struct snd_ice1712_spdif { -@@ -285,7 +285,7 @@ struct snd_ice1712_spdif { - int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol); - void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol); - int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol); -- } ops; -+ } __no_const ops; - }; - - -diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c -index f3260e6..4a285d8 100644 ---- a/sound/pci/ymfpci/ymfpci_main.c -+++ b/sound/pci/ymfpci/ymfpci_main.c -@@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip) - if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0) - break; - } -- if (atomic_read(&chip->interrupt_sleep_count)) { -- atomic_set(&chip->interrupt_sleep_count, 0); -+ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) { -+ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); - wake_up(&chip->interrupt_sleep); - } - __end: -@@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) - continue; - init_waitqueue_entry(&wait, current); - add_wait_queue(&chip->interrupt_sleep, &wait); -- atomic_inc(&chip->interrupt_sleep_count); -+ atomic_inc_unchecked(&chip->interrupt_sleep_count); - schedule_timeout_uninterruptible(msecs_to_jiffies(50)); - remove_wait_queue(&chip->interrupt_sleep, &wait); - } -@@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id) - snd_ymfpci_writel(chip, YDSXGR_MODE, mode); - spin_unlock(&chip->reg_lock); - -- if (atomic_read(&chip->interrupt_sleep_count)) { -- atomic_set(&chip->interrupt_sleep_count, 0); -+ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) { -+ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); - wake_up(&chip->interrupt_sleep); - } - } -@@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card, - spin_lock_init(&chip->reg_lock); - spin_lock_init(&chip->voice_lock); - init_waitqueue_head(&chip->interrupt_sleep); -- atomic_set(&chip->interrupt_sleep_count, 0); -+ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); - chip->card = card; - chip->pci = pci; - chip->irq = -1; -diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c -index 2879c88..224159e 100644 ---- a/sound/soc/soc-pcm.c -+++ b/sound/soc/soc-pcm.c -@@ -568,7 +568,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream) - } - - /* ASoC PCM operations */ --static struct snd_pcm_ops soc_pcm_ops = { -+static snd_pcm_ops_no_const soc_pcm_ops = { - .open = soc_pcm_open, - .close = soc_pcm_close, - .hw_params = soc_pcm_hw_params, -diff --git a/sound/usb/card.h b/sound/usb/card.h -index ae4251d..0961361 100644 ---- a/sound/usb/card.h -+++ b/sound/usb/card.h -@@ -44,6 +44,7 @@ struct snd_urb_ops { - int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u); - int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u); - }; -+typedef struct snd_urb_ops __no_const snd_urb_ops_no_const; - - struct snd_usb_substream { - struct snd_usb_stream *stream; -@@ -93,7 +94,7 @@ struct snd_usb_substream { - struct snd_pcm_hw_constraint_list rate_list; /* limited rates */ - spinlock_t lock; - -- struct snd_urb_ops ops; /* callbacks (must be filled at init) */ -+ snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */ - }; - - struct snd_usb_stream { -diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile -new file mode 100644 -index 0000000..b044b80 ---- /dev/null -+++ b/tools/gcc/Makefile -@@ -0,0 +1,21 @@ -+#CC := gcc -+#PLUGIN_SOURCE_FILES := pax_plugin.c -+#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES)) -+GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin) -+#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -+ -+HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -+ -+hostlibs-y := constify_plugin.so -+hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so -+hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so -+hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so -+hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so -+ -+always := $(hostlibs-y) -+ -+constify_plugin-objs := constify_plugin.o -+stackleak_plugin-objs := stackleak_plugin.o -+kallocstat_plugin-objs := kallocstat_plugin.o -+kernexec_plugin-objs := kernexec_plugin.o -+checker_plugin-objs := checker_plugin.o -diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c -new file mode 100644 -index 0000000..d41b5af ---- /dev/null -+++ b/tools/gcc/checker_plugin.c -@@ -0,0 +1,171 @@ -+/* -+ * Copyright 2011 by the PaX Team pageexec@freemail.hu -+ * Licensed under the GPL v2 -+ * -+ * Note: the choice of the license means that the compilation process is -+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, -+ * but for the kernel it doesn't matter since it doesn't link against -+ * any of the gcc libraries -+ * -+ * gcc plugin to implement various sparse (source code checker) features -+ * -+ * TODO: -+ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch) -+ * -+ * BUGS: -+ * - none known -+ */ -+#include "gcc-plugin.h" -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tree.h" -+#include "tree-pass.h" -+#include "flags.h" -+#include "intl.h" -+#include "toplev.h" -+#include "plugin.h" -+//#include "expr.h" where are you... -+#include "diagnostic.h" -+#include "plugin-version.h" -+#include "tm.h" -+#include "function.h" -+#include "basic-block.h" -+#include "gimple.h" -+#include "rtl.h" -+#include "emit-rtl.h" -+#include "tree-flow.h" -+#include "target.h" -+ -+extern void c_register_addr_space (const char *str, addr_space_t as); -+extern enum machine_mode default_addr_space_pointer_mode (addr_space_t); -+extern enum machine_mode default_addr_space_address_mode (addr_space_t); -+extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as); -+extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as); -+extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as); -+ -+extern void print_gimple_stmt(FILE *, gimple, int, int); -+extern rtx emit_move_insn(rtx x, rtx y); -+ -+int plugin_is_GPL_compatible; -+ -+static struct plugin_info checker_plugin_info = { -+ .version = "201111150100", -+}; -+ -+#define ADDR_SPACE_KERNEL 0 -+#define ADDR_SPACE_FORCE_KERNEL 1 -+#define ADDR_SPACE_USER 2 -+#define ADDR_SPACE_FORCE_USER 3 -+#define ADDR_SPACE_IOMEM 0 -+#define ADDR_SPACE_FORCE_IOMEM 0 -+#define ADDR_SPACE_PERCPU 0 -+#define ADDR_SPACE_FORCE_PERCPU 0 -+#define ADDR_SPACE_RCU 0 -+#define ADDR_SPACE_FORCE_RCU 0 -+ -+static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace) -+{ -+ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC); -+} -+ -+static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace) -+{ -+ return default_addr_space_address_mode(ADDR_SPACE_GENERIC); -+} -+ -+static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as) -+{ -+ return default_addr_space_valid_pointer_mode(mode, as); -+} -+ -+static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as) -+{ -+ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC); -+} -+ -+static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as) -+{ -+ return default_addr_space_legitimize_address(x, oldx, mode, as); -+} -+ -+static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset) -+{ -+ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL) -+ return true; -+ -+ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER) -+ return true; -+ -+ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM) -+ return true; -+ -+ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER) -+ return true; -+ -+ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM) -+ return true; -+ -+ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL) -+ return true; -+ -+ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL) -+ return true; -+ -+ return subset == superset; -+} -+ -+static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type) -+{ -+// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type)); -+// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type)); -+ -+ return op; -+} -+ -+static void register_checker_address_spaces(void *event_data, void *data) -+{ -+ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL); -+ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL); -+ c_register_addr_space("__user", ADDR_SPACE_USER); -+ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER); -+// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM); -+// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM); -+// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU); -+// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU); -+// c_register_addr_space("__rcu", ADDR_SPACE_RCU); -+// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU); -+ -+ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode; -+ targetm.addr_space.address_mode = checker_addr_space_address_mode; -+ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode; -+ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p; -+// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address; -+ targetm.addr_space.subset_p = checker_addr_space_subset_p; -+ targetm.addr_space.convert = checker_addr_space_convert; -+} -+ -+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) -+{ -+ const char * const plugin_name = plugin_info->base_name; -+ const int argc = plugin_info->argc; -+ const struct plugin_argument * const argv = plugin_info->argv; -+ int i; -+ -+ if (!plugin_default_version_check(version, &gcc_version)) { -+ error(G_("incompatible gcc/plugin versions")); -+ return 1; -+ } -+ -+ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info); -+ -+ for (i = 0; i < argc; ++i) -+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); -+ -+ if (TARGET_64BIT == 0) -+ return 0; -+ -+ register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL); -+ -+ return 0; -+} -diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c -new file mode 100644 -index 0000000..704a564 ---- /dev/null -+++ b/tools/gcc/constify_plugin.c -@@ -0,0 +1,303 @@ -+/* -+ * Copyright 2011 by Emese Revfy re.emese@gmail.com -+ * Copyright 2011 by PaX Team pageexec@freemail.hu -+ * Licensed under the GPL v2, or (at your option) v3 -+ * -+ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification. -+ * -+ * Homepage: -+ * http://www.grsecurity.net/~ephox/const_plugin/ -+ * -+ * Usage: -+ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c -+ * $ gcc -fplugin=constify_plugin.so test.c -O2 -+ */ -+ -+#include "gcc-plugin.h" -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tree.h" -+#include "tree-pass.h" -+#include "flags.h" -+#include "intl.h" -+#include "toplev.h" -+#include "plugin.h" -+#include "diagnostic.h" -+#include "plugin-version.h" -+#include "tm.h" -+#include "function.h" -+#include "basic-block.h" -+#include "gimple.h" -+#include "rtl.h" -+#include "emit-rtl.h" -+#include "tree-flow.h" -+ -+#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE) -+ -+int plugin_is_GPL_compatible; -+ -+static struct plugin_info const_plugin_info = { -+ .version = "201111150100", -+ .help = "no-constify\tturn off constification\n", -+}; -+ -+static void constify_type(tree type); -+static bool walk_struct(tree node); -+ -+static tree deconstify_type(tree old_type) -+{ -+ tree new_type, field; -+ -+ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST); -+ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type)); -+ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field)) -+ DECL_FIELD_CONTEXT(field) = new_type; -+ TYPE_READONLY(new_type) = 0; -+ C_TYPE_FIELDS_READONLY(new_type) = 0; -+ return new_type; -+} -+ -+static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) -+{ -+ tree type; -+ -+ *no_add_attrs = true; -+ if (TREE_CODE(*node) == FUNCTION_DECL) { -+ error("%qE attribute does not apply to functions", name); -+ return NULL_TREE; -+ } -+ -+ if (TREE_CODE(*node) == VAR_DECL) { -+ error("%qE attribute does not apply to variables", name); -+ return NULL_TREE; -+ } -+ -+ if (TYPE_P(*node)) { -+ if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE) -+ *no_add_attrs = false; -+ else -+ error("%qE attribute applies to struct and union types only", name); -+ return NULL_TREE; -+ } -+ -+ type = TREE_TYPE(*node); -+ -+ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) { -+ error("%qE attribute applies to struct and union types only", name); -+ return NULL_TREE; -+ } -+ -+ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) { -+ error("%qE attribute is already applied to the type", name); -+ return NULL_TREE; -+ } -+ -+ if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) { -+ error("%qE attribute used on type that is not constified", name); -+ return NULL_TREE; -+ } -+ -+ if (TREE_CODE(*node) == TYPE_DECL) { -+ TREE_TYPE(*node) = deconstify_type(type); -+ TREE_READONLY(*node) = 0; -+ return NULL_TREE; -+ } -+ -+ return NULL_TREE; -+} -+ -+static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) -+{ -+ *no_add_attrs = true; -+ if (!TYPE_P(*node)) { -+ error("%qE attribute applies to types only", name); -+ return NULL_TREE; -+ } -+ -+ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) { -+ error("%qE attribute applies to struct and union types only", name); -+ return NULL_TREE; -+ } -+ -+ *no_add_attrs = false; -+ constify_type(*node); -+ return NULL_TREE; -+} -+ -+static struct attribute_spec no_const_attr = { -+ .name = "no_const", -+ .min_length = 0, -+ .max_length = 0, -+ .decl_required = false, -+ .type_required = false, -+ .function_type_required = false, -+ .handler = handle_no_const_attribute, -+#if BUILDING_GCC_VERSION >= 4007 -+ .affects_type_identity = true -+#endif -+}; -+ -+static struct attribute_spec do_const_attr = { -+ .name = "do_const", -+ .min_length = 0, -+ .max_length = 0, -+ .decl_required = false, -+ .type_required = false, -+ .function_type_required = false, -+ .handler = handle_do_const_attribute, -+#if BUILDING_GCC_VERSION >= 4007 -+ .affects_type_identity = true -+#endif -+}; -+ -+static void register_attributes(void *event_data, void *data) -+{ -+ register_attribute(&no_const_attr); -+ register_attribute(&do_const_attr); -+} -+ -+static void constify_type(tree type) -+{ -+ TYPE_READONLY(type) = 1; -+ C_TYPE_FIELDS_READONLY(type) = 1; -+} -+ -+static bool is_fptr(tree field) -+{ -+ tree ptr = TREE_TYPE(field); -+ -+ if (TREE_CODE(ptr) != POINTER_TYPE) -+ return false; -+ -+ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE; -+} -+ -+static bool walk_struct(tree node) -+{ -+ tree field; -+ -+ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) -+ return false; -+ -+ if (TYPE_FIELDS(node) == NULL_TREE) -+ return false; -+ -+ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) { -+ tree type = TREE_TYPE(field); -+ enum tree_code code = TREE_CODE(type); -+ if (code == RECORD_TYPE || code == UNION_TYPE) { -+ if (!(walk_struct(type))) -+ return false; -+ } else if (!is_fptr(field) && !TREE_READONLY(field)) -+ return false; -+ } -+ return true; -+} -+ -+static void finish_type(void *event_data, void *data) -+{ -+ tree type = (tree)event_data; -+ -+ if (type == NULL_TREE) -+ return; -+ -+ if (TYPE_READONLY(type)) -+ return; -+ -+ if (walk_struct(type)) -+ constify_type(type); -+} -+ -+static unsigned int check_local_variables(void); -+ -+struct gimple_opt_pass pass_local_variable = { -+ { -+ .type = GIMPLE_PASS, -+ .name = "check_local_variables", -+ .gate = NULL, -+ .execute = check_local_variables, -+ .sub = NULL, -+ .next = NULL, -+ .static_pass_number = 0, -+ .tv_id = TV_NONE, -+ .properties_required = 0, -+ .properties_provided = 0, -+ .properties_destroyed = 0, -+ .todo_flags_start = 0, -+ .todo_flags_finish = 0 -+ } -+}; -+ -+static unsigned int check_local_variables(void) -+{ -+ tree var; -+ referenced_var_iterator rvi; -+ -+#if BUILDING_GCC_VERSION == 4005 -+ FOR_EACH_REFERENCED_VAR(var, rvi) { -+#else -+ FOR_EACH_REFERENCED_VAR(cfun, var, rvi) { -+#endif -+ tree type = TREE_TYPE(var); -+ -+ if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var)) -+ continue; -+ -+ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) -+ continue; -+ -+ if (!TYPE_READONLY(type)) -+ continue; -+ -+// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var))) -+// continue; -+ -+// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) -+// continue; -+ -+ if (walk_struct(type)) { -+ error("constified variable %qE cannot be local", var); -+ return 1; -+ } -+ } -+ return 0; -+} -+ -+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) -+{ -+ const char * const plugin_name = plugin_info->base_name; -+ const int argc = plugin_info->argc; -+ const struct plugin_argument * const argv = plugin_info->argv; -+ int i; -+ bool constify = true; -+ -+ struct register_pass_info local_variable_pass_info = { -+ .pass = &pass_local_variable.pass, -+ .reference_pass_name = "*referenced_vars", -+ .ref_pass_instance_number = 0, -+ .pos_op = PASS_POS_INSERT_AFTER -+ }; -+ -+ if (!plugin_default_version_check(version, &gcc_version)) { -+ error(G_("incompatible gcc/plugin versions")); -+ return 1; -+ } -+ -+ for (i = 0; i < argc; ++i) { -+ if (!(strcmp(argv[i].key, "no-constify"))) { -+ constify = false; -+ continue; -+ } -+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); -+ } -+ -+ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info); -+ if (constify) { -+ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL); -+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info); -+ } -+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); -+ -+ return 0; -+} -diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c -new file mode 100644 -index 0000000..a5eabce ---- /dev/null -+++ b/tools/gcc/kallocstat_plugin.c -@@ -0,0 +1,167 @@ -+/* -+ * Copyright 2011 by the PaX Team pageexec@freemail.hu -+ * Licensed under the GPL v2 -+ * -+ * Note: the choice of the license means that the compilation process is -+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, -+ * but for the kernel it doesn't matter since it doesn't link against -+ * any of the gcc libraries -+ * -+ * gcc plugin to find the distribution of k*alloc sizes -+ * -+ * TODO: -+ * -+ * BUGS: -+ * - none known -+ */ -+#include "gcc-plugin.h" -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tree.h" -+#include "tree-pass.h" -+#include "flags.h" -+#include "intl.h" -+#include "toplev.h" -+#include "plugin.h" -+//#include "expr.h" where are you... -+#include "diagnostic.h" -+#include "plugin-version.h" -+#include "tm.h" -+#include "function.h" -+#include "basic-block.h" -+#include "gimple.h" -+#include "rtl.h" -+#include "emit-rtl.h" -+ -+extern void print_gimple_stmt(FILE *, gimple, int, int); -+ -+int plugin_is_GPL_compatible; -+ -+static const char * const kalloc_functions[] = { -+ "__kmalloc", -+ "kmalloc", -+ "kmalloc_large", -+ "kmalloc_node", -+ "kmalloc_order", -+ "kmalloc_order_trace", -+ "kmalloc_slab", -+ "kzalloc", -+ "kzalloc_node", -+}; -+ -+static struct plugin_info kallocstat_plugin_info = { -+ .version = "201111150100", -+}; -+ -+static unsigned int execute_kallocstat(void); -+ -+static struct gimple_opt_pass kallocstat_pass = { -+ .pass = { -+ .type = GIMPLE_PASS, -+ .name = "kallocstat", -+ .gate = NULL, -+ .execute = execute_kallocstat, -+ .sub = NULL, -+ .next = NULL, -+ .static_pass_number = 0, -+ .tv_id = TV_NONE, -+ .properties_required = 0, -+ .properties_provided = 0, -+ .properties_destroyed = 0, -+ .todo_flags_start = 0, -+ .todo_flags_finish = 0 -+ } -+}; -+ -+static bool is_kalloc(const char *fnname) -+{ -+ size_t i; -+ -+ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++) -+ if (!strcmp(fnname, kalloc_functions[i])) -+ return true; -+ return false; -+} -+ -+static unsigned int execute_kallocstat(void) -+{ -+ basic_block bb; -+ -+ // 1. loop through BBs and GIMPLE statements -+ FOR_EACH_BB(bb) { -+ gimple_stmt_iterator gsi; -+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { -+ // gimple match: -+ tree fndecl, size; -+ gimple call_stmt; -+ const char *fnname; -+ -+ // is it a call -+ call_stmt = gsi_stmt(gsi); -+ if (!is_gimple_call(call_stmt)) -+ continue; -+ fndecl = gimple_call_fndecl(call_stmt); -+ if (fndecl == NULL_TREE) -+ continue; -+ if (TREE_CODE(fndecl) != FUNCTION_DECL) -+ continue; -+ -+ // is it a call to k*alloc -+ fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl)); -+ if (!is_kalloc(fnname)) -+ continue; -+ -+ // is the size arg the result of a simple const assignment -+ size = gimple_call_arg(call_stmt, 0); -+ while (true) { -+ gimple def_stmt; -+ expanded_location xloc; -+ size_t size_val; -+ -+ if (TREE_CODE(size) != SSA_NAME) -+ break; -+ def_stmt = SSA_NAME_DEF_STMT(size); -+ if (!def_stmt || !is_gimple_assign(def_stmt)) -+ break; -+ if (gimple_num_ops(def_stmt) != 2) -+ break; -+ size = gimple_assign_rhs1(def_stmt); -+ if (!TREE_CONSTANT(size)) -+ continue; -+ xloc = expand_location(gimple_location(def_stmt)); -+ if (!xloc.file) -+ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); -+ size_val = TREE_INT_CST_LOW(size); -+ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line); -+ break; -+ } -+//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); -+//debug_tree(gimple_call_fn(call_stmt)); -+//print_node(stderr, "pax", fndecl, 4); -+ } -+ } -+ -+ return 0; -+} -+ -+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) -+{ -+ const char * const plugin_name = plugin_info->base_name; -+ struct register_pass_info kallocstat_pass_info = { -+ .pass = &kallocstat_pass.pass, -+ .reference_pass_name = "ssa", -+ .ref_pass_instance_number = 0, -+ .pos_op = PASS_POS_INSERT_AFTER -+ }; -+ -+ if (!plugin_default_version_check(version, &gcc_version)) { -+ error(G_("incompatible gcc/plugin versions")); -+ return 1; -+ } -+ -+ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info); -+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info); -+ -+ return 0; -+} -diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c -new file mode 100644 -index 0000000..51f747e ---- /dev/null -+++ b/tools/gcc/kernexec_plugin.c -@@ -0,0 +1,348 @@ -+/* -+ * Copyright 2011 by the PaX Team pageexec@freemail.hu -+ * Licensed under the GPL v2 -+ * -+ * Note: the choice of the license means that the compilation process is -+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, -+ * but for the kernel it doesn't matter since it doesn't link against -+ * any of the gcc libraries -+ * -+ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386 -+ * -+ * TODO: -+ * -+ * BUGS: -+ * - none known -+ */ -+#include "gcc-plugin.h" -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tree.h" -+#include "tree-pass.h" -+#include "flags.h" -+#include "intl.h" -+#include "toplev.h" -+#include "plugin.h" -+//#include "expr.h" where are you... -+#include "diagnostic.h" -+#include "plugin-version.h" -+#include "tm.h" -+#include "function.h" -+#include "basic-block.h" -+#include "gimple.h" -+#include "rtl.h" -+#include "emit-rtl.h" -+#include "tree-flow.h" -+ -+extern void print_gimple_stmt(FILE *, gimple, int, int); -+extern rtx emit_move_insn(rtx x, rtx y); -+ -+int plugin_is_GPL_compatible; -+ -+static struct plugin_info kernexec_plugin_info = { -+ .version = "201111291120", -+ .help = "method=[bts|or]\tinstrumentation method\n" -+}; -+ -+static unsigned int execute_kernexec_fptr(void); -+static unsigned int execute_kernexec_retaddr(void); -+static bool kernexec_cmodel_check(void); -+ -+static void (*kernexec_instrument_fptr)(gimple_stmt_iterator); -+static void (*kernexec_instrument_retaddr)(rtx); -+ -+static struct gimple_opt_pass kernexec_fptr_pass = { -+ .pass = { -+ .type = GIMPLE_PASS, -+ .name = "kernexec_fptr", -+ .gate = kernexec_cmodel_check, -+ .execute = execute_kernexec_fptr, -+ .sub = NULL, -+ .next = NULL, -+ .static_pass_number = 0, -+ .tv_id = TV_NONE, -+ .properties_required = 0, -+ .properties_provided = 0, -+ .properties_destroyed = 0, -+ .todo_flags_start = 0, -+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi -+ } -+}; -+ -+static struct rtl_opt_pass kernexec_retaddr_pass = { -+ .pass = { -+ .type = RTL_PASS, -+ .name = "kernexec_retaddr", -+ .gate = kernexec_cmodel_check, -+ .execute = execute_kernexec_retaddr, -+ .sub = NULL, -+ .next = NULL, -+ .static_pass_number = 0, -+ .tv_id = TV_NONE, -+ .properties_required = 0, -+ .properties_provided = 0, -+ .properties_destroyed = 0, -+ .todo_flags_start = 0, -+ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect -+ } -+}; -+ -+static bool kernexec_cmodel_check(void) -+{ -+ tree section; -+ -+ if (ix86_cmodel != CM_KERNEL) -+ return false; -+ -+ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl)); -+ if (!section || !TREE_VALUE(section)) -+ return true; -+ -+ section = TREE_VALUE(TREE_VALUE(section)); -+ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10)) -+ return true; -+ -+ return false; -+} -+ -+/* -+ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce -+ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference -+ */ -+static void kernexec_instrument_fptr_bts(gimple_stmt_iterator gsi) -+{ -+ gimple assign_intptr, assign_new_fptr, call_stmt; -+ tree intptr, old_fptr, new_fptr, kernexec_mask; -+ -+ call_stmt = gsi_stmt(gsi); -+ old_fptr = gimple_call_fn(call_stmt); -+ -+ // create temporary unsigned long variable used for bitops and cast fptr to it -+ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts"); -+ add_referenced_var(intptr); -+ mark_sym_for_renaming(intptr); -+ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr)); -+ gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT); -+ update_stmt(assign_intptr); -+ -+ // apply logical or to temporary unsigned long and bitmask -+ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL); -+// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL); -+ assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask)); -+ gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT); -+ update_stmt(assign_intptr); -+ -+ // cast temporary unsigned long back to a temporary fptr variable -+ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec"); -+ add_referenced_var(new_fptr); -+ mark_sym_for_renaming(new_fptr); -+ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr)); -+ gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT); -+ update_stmt(assign_new_fptr); -+ -+ // replace call stmt fn with the new fptr -+ gimple_call_set_fn(call_stmt, new_fptr); -+ update_stmt(call_stmt); -+} -+ -+static void kernexec_instrument_fptr_or(gimple_stmt_iterator gsi) -+{ -+ gimple asm_or_stmt, call_stmt; -+ tree old_fptr, new_fptr, input, output; -+ VEC(tree, gc) *inputs = NULL; -+ VEC(tree, gc) *outputs = NULL; -+ -+ call_stmt = gsi_stmt(gsi); -+ old_fptr = gimple_call_fn(call_stmt); -+ -+ // create temporary fptr variable -+ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or"); -+ add_referenced_var(new_fptr); -+ mark_sym_for_renaming(new_fptr); -+ -+ // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr)); -+ input = build_tree_list(NULL_TREE, build_string(2, "0")); -+ input = chainon(NULL_TREE, build_tree_list(input, old_fptr)); -+ output = build_tree_list(NULL_TREE, build_string(3, "=r")); -+ output = chainon(NULL_TREE, build_tree_list(output, new_fptr)); -+ VEC_safe_push(tree, gc, inputs, input); -+ VEC_safe_push(tree, gc, outputs, output); -+ asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL); -+ gimple_asm_set_volatile(asm_or_stmt, true); -+ gsi_insert_before(&gsi, asm_or_stmt, GSI_SAME_STMT); -+ update_stmt(asm_or_stmt); -+ -+ // replace call stmt fn with the new fptr -+ gimple_call_set_fn(call_stmt, new_fptr); -+ update_stmt(call_stmt); -+} -+ -+/* -+ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer -+ */ -+static unsigned int execute_kernexec_fptr(void) -+{ -+ basic_block bb; -+ gimple_stmt_iterator gsi; -+ -+ // 1. loop through BBs and GIMPLE statements -+ FOR_EACH_BB(bb) { -+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { -+ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D)); -+ tree fn; -+ gimple call_stmt; -+ -+ // is it a call ... -+ call_stmt = gsi_stmt(gsi); -+ if (!is_gimple_call(call_stmt)) -+ continue; -+ fn = gimple_call_fn(call_stmt); -+ if (TREE_CODE(fn) == ADDR_EXPR) -+ continue; -+ if (TREE_CODE(fn) != SSA_NAME) -+ gcc_unreachable(); -+ -+ // ... through a function pointer -+ fn = SSA_NAME_VAR(fn); -+ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) -+ continue; -+ fn = TREE_TYPE(fn); -+ if (TREE_CODE(fn) != POINTER_TYPE) -+ continue; -+ fn = TREE_TYPE(fn); -+ if (TREE_CODE(fn) != FUNCTION_TYPE) -+ continue; -+ -+ kernexec_instrument_fptr(gsi); -+ -+//debug_tree(gimple_call_fn(call_stmt)); -+//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); -+ } -+ } -+ -+ return 0; -+} -+ -+// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn -+static void kernexec_instrument_retaddr_bts(rtx insn) -+{ -+ rtx btsq; -+ rtvec argvec, constraintvec, labelvec; -+ int line; -+ -+ // create asm volatile("btsq $63,(%%rsp)":::) -+ argvec = rtvec_alloc(0); -+ constraintvec = rtvec_alloc(0); -+ labelvec = rtvec_alloc(0); -+ line = expand_location(RTL_LOCATION(insn)).line; -+ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); -+ MEM_VOLATILE_P(btsq) = 1; -+// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS -+ emit_insn_before(btsq, insn); -+} -+ -+// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn -+static void kernexec_instrument_retaddr_or(rtx insn) -+{ -+ rtx orq; -+ rtvec argvec, constraintvec, labelvec; -+ int line; -+ -+ // create asm volatile("orq %%r10,(%%rsp)":::) -+ argvec = rtvec_alloc(0); -+ constraintvec = rtvec_alloc(0); -+ labelvec = rtvec_alloc(0); -+ line = expand_location(RTL_LOCATION(insn)).line; -+ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); -+ MEM_VOLATILE_P(orq) = 1; -+// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS -+ emit_insn_before(orq, insn); -+} -+ -+/* -+ * find all asm level function returns and forcibly set the highest bit of the return address -+ */ -+static unsigned int execute_kernexec_retaddr(void) -+{ -+ rtx insn; -+ -+ // 1. find function returns -+ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { -+ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil)) -+ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil)) -+ rtx body; -+ -+ // is it a retn -+ if (!JUMP_P(insn)) -+ continue; -+ body = PATTERN(insn); -+ if (GET_CODE(body) == PARALLEL) -+ body = XVECEXP(body, 0, 0); -+ if (GET_CODE(body) != RETURN) -+ continue; -+ kernexec_instrument_retaddr(insn); -+ } -+ -+// print_simple_rtl(stderr, get_insns()); -+// print_rtl(stderr, get_insns()); -+ -+ return 0; -+} -+ -+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) -+{ -+ const char * const plugin_name = plugin_info->base_name; -+ const int argc = plugin_info->argc; -+ const struct plugin_argument * const argv = plugin_info->argv; -+ int i; -+ struct register_pass_info kernexec_fptr_pass_info = { -+ .pass = &kernexec_fptr_pass.pass, -+ .reference_pass_name = "ssa", -+ .ref_pass_instance_number = 0, -+ .pos_op = PASS_POS_INSERT_AFTER -+ }; -+ struct register_pass_info kernexec_retaddr_pass_info = { -+ .pass = &kernexec_retaddr_pass.pass, -+ .reference_pass_name = "pro_and_epilogue", -+ .ref_pass_instance_number = 0, -+ .pos_op = PASS_POS_INSERT_AFTER -+ }; -+ -+ if (!plugin_default_version_check(version, &gcc_version)) { -+ error(G_("incompatible gcc/plugin versions")); -+ return 1; -+ } -+ -+ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info); -+ -+ if (TARGET_64BIT == 0) -+ return 0; -+ -+ for (i = 0; i < argc; ++i) { -+ if (!strcmp(argv[i].key, "method")) { -+ if (!argv[i].value) { -+ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); -+ continue; -+ } -+ if (!strcmp(argv[i].value, "bts")) { -+ kernexec_instrument_fptr = kernexec_instrument_fptr_bts; -+ kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts; -+ } else if (!strcmp(argv[i].value, "or")) { -+ kernexec_instrument_fptr = kernexec_instrument_fptr_or; -+ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or; -+ fix_register("r10", 1, 1); -+ } else -+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); -+ continue; -+ } -+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); -+ } -+ if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr) -+ error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name); -+ -+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info); -+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info); -+ -+ return 0; -+} -diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c -new file mode 100644 -index 0000000..d44f37c ---- /dev/null -+++ b/tools/gcc/stackleak_plugin.c -@@ -0,0 +1,291 @@ -+/* -+ * Copyright 2011 by the PaX Team pageexec@freemail.hu -+ * Licensed under the GPL v2 -+ * -+ * Note: the choice of the license means that the compilation process is -+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, -+ * but for the kernel it doesn't matter since it doesn't link against -+ * any of the gcc libraries -+ * -+ * gcc plugin to help implement various PaX features -+ * -+ * - track lowest stack pointer -+ * -+ * TODO: -+ * - initialize all local variables -+ * -+ * BUGS: -+ * - none known -+ */ -+#include "gcc-plugin.h" -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tree.h" -+#include "tree-pass.h" -+#include "flags.h" -+#include "intl.h" -+#include "toplev.h" -+#include "plugin.h" -+//#include "expr.h" where are you... -+#include "diagnostic.h" -+#include "plugin-version.h" -+#include "tm.h" -+#include "function.h" -+#include "basic-block.h" -+#include "gimple.h" -+#include "rtl.h" -+#include "emit-rtl.h" -+ -+extern void print_gimple_stmt(FILE *, gimple, int, int); -+ -+int plugin_is_GPL_compatible; -+ -+static int track_frame_size = -1; -+static const char track_function[] = "pax_track_stack"; -+static const char check_function[] = "pax_check_alloca"; -+static bool init_locals; -+ -+static struct plugin_info stackleak_plugin_info = { -+ .version = "201111150100", -+ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n" -+// "initialize-locals\t\tforcibly initialize all stack frames\n" -+}; -+ -+static bool gate_stackleak_track_stack(void); -+static unsigned int execute_stackleak_tree_instrument(void); -+static unsigned int execute_stackleak_final(void); -+ -+static struct gimple_opt_pass stackleak_tree_instrument_pass = { -+ .pass = { -+ .type = GIMPLE_PASS, -+ .name = "stackleak_tree_instrument", -+ .gate = gate_stackleak_track_stack, -+ .execute = execute_stackleak_tree_instrument, -+ .sub = NULL, -+ .next = NULL, -+ .static_pass_number = 0, -+ .tv_id = TV_NONE, -+ .properties_required = PROP_gimple_leh | PROP_cfg, -+ .properties_provided = 0, -+ .properties_destroyed = 0, -+ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, -+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa -+ } -+}; -+ -+static struct rtl_opt_pass stackleak_final_rtl_opt_pass = { -+ .pass = { -+ .type = RTL_PASS, -+ .name = "stackleak_final", -+ .gate = gate_stackleak_track_stack, -+ .execute = execute_stackleak_final, -+ .sub = NULL, -+ .next = NULL, -+ .static_pass_number = 0, -+ .tv_id = TV_NONE, -+ .properties_required = 0, -+ .properties_provided = 0, -+ .properties_destroyed = 0, -+ .todo_flags_start = 0, -+ .todo_flags_finish = TODO_dump_func -+ } -+}; -+ -+static bool gate_stackleak_track_stack(void) -+{ -+ return track_frame_size >= 0; -+} -+ -+static void stackleak_check_alloca(gimple_stmt_iterator gsi) -+{ -+ gimple check_alloca; -+ tree fndecl, fntype, alloca_size; -+ -+ // insert call to void pax_check_alloca(unsigned long size) -+ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE); -+ fndecl = build_fn_decl(check_function, fntype); -+ DECL_ASSEMBLER_NAME(fndecl); // for LTO -+ alloca_size = gimple_call_arg(gsi_stmt(gsi), 0); -+ check_alloca = gimple_build_call(fndecl, 1, alloca_size); -+ gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING); -+} -+ -+static void stackleak_add_instrumentation(gimple_stmt_iterator gsi) -+{ -+ gimple track_stack; -+ tree fndecl, fntype; -+ -+ // insert call to void pax_track_stack(void) -+ fntype = build_function_type_list(void_type_node, NULL_TREE); -+ fndecl = build_fn_decl(track_function, fntype); -+ DECL_ASSEMBLER_NAME(fndecl); // for LTO -+ track_stack = gimple_build_call(fndecl, 0); -+ gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING); -+} -+ -+#if BUILDING_GCC_VERSION == 4005 -+static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code) -+{ -+ tree fndecl; -+ -+ if (!is_gimple_call(stmt)) -+ return false; -+ fndecl = gimple_call_fndecl(stmt); -+ if (!fndecl) -+ return false; -+ if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL) -+ return false; -+// print_node(stderr, "pax", fndecl, 4); -+ return DECL_FUNCTION_CODE(fndecl) == code; -+} -+#endif -+ -+static bool is_alloca(gimple stmt) -+{ -+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA)) -+ return true; -+ -+#if BUILDING_GCC_VERSION >= 4007 -+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) -+ return true; -+#endif -+ -+ return false; -+} -+ -+static unsigned int execute_stackleak_tree_instrument(void) -+{ -+ basic_block bb, entry_bb; -+ bool prologue_instrumented = false; -+ -+ entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb; -+ -+ // 1. loop through BBs and GIMPLE statements -+ FOR_EACH_BB(bb) { -+ gimple_stmt_iterator gsi; -+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { -+ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450> -+ if (!is_alloca(gsi_stmt(gsi))) -+ continue; -+ -+ // 2. insert stack overflow check before each __builtin_alloca call -+ stackleak_check_alloca(gsi); -+ -+ // 3. insert track call after each __builtin_alloca call -+ stackleak_add_instrumentation(gsi); -+ if (bb == entry_bb) -+ prologue_instrumented = true; -+ } -+ } -+ -+ // 4. insert track call at the beginning -+ if (!prologue_instrumented) { -+ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; -+ if (dom_info_available_p(CDI_DOMINATORS)) -+ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR); -+ stackleak_add_instrumentation(gsi_start_bb(bb)); -+ } -+ -+ return 0; -+} -+ -+static unsigned int execute_stackleak_final(void) -+{ -+ rtx insn; -+ -+ if (cfun->calls_alloca) -+ return 0; -+ -+ // keep calls only if function frame is big enough -+ if (get_frame_size() >= track_frame_size) -+ return 0; -+ -+ // 1. find pax_track_stack calls -+ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { -+ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil)) -+ rtx body; -+ -+ if (!CALL_P(insn)) -+ continue; -+ body = PATTERN(insn); -+ if (GET_CODE(body) != CALL) -+ continue; -+ body = XEXP(body, 0); -+ if (GET_CODE(body) != MEM) -+ continue; -+ body = XEXP(body, 0); -+ if (GET_CODE(body) != SYMBOL_REF) -+ continue; -+ if (strcmp(XSTR(body, 0), track_function)) -+ continue; -+// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); -+ // 2. delete call -+ insn = delete_insn_and_edges(insn); -+#if BUILDING_GCC_VERSION >= 4007 -+ if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION) -+ insn = delete_insn_and_edges(insn); -+#endif -+ } -+ -+// print_simple_rtl(stderr, get_insns()); -+// print_rtl(stderr, get_insns()); -+// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); -+ -+ return 0; -+} -+ -+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) -+{ -+ const char * const plugin_name = plugin_info->base_name; -+ const int argc = plugin_info->argc; -+ const struct plugin_argument * const argv = plugin_info->argv; -+ int i; -+ struct register_pass_info stackleak_tree_instrument_pass_info = { -+ .pass = &stackleak_tree_instrument_pass.pass, -+// .reference_pass_name = "tree_profile", -+ .reference_pass_name = "optimized", -+ .ref_pass_instance_number = 0, -+ .pos_op = PASS_POS_INSERT_AFTER -+ }; -+ struct register_pass_info stackleak_final_pass_info = { -+ .pass = &stackleak_final_rtl_opt_pass.pass, -+ .reference_pass_name = "final", -+ .ref_pass_instance_number = 0, -+ .pos_op = PASS_POS_INSERT_BEFORE -+ }; -+ -+ if (!plugin_default_version_check(version, &gcc_version)) { -+ error(G_("incompatible gcc/plugin versions")); -+ return 1; -+ } -+ -+ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info); -+ -+ for (i = 0; i < argc; ++i) { -+ if (!strcmp(argv[i].key, "track-lowest-sp")) { -+ if (!argv[i].value) { -+ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); -+ continue; -+ } -+ track_frame_size = atoi(argv[i].value); -+ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0) -+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); -+ continue; -+ } -+ if (!strcmp(argv[i].key, "initialize-locals")) { -+ if (argv[i].value) { -+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); -+ continue; -+ } -+ init_locals = true; -+ continue; -+ } -+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); -+ } -+ -+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info); -+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info); -+ -+ return 0; -+} -diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h -index 6789d78..4afd019 100644 ---- a/tools/perf/util/include/asm/alternative-asm.h -+++ b/tools/perf/util/include/asm/alternative-asm.h -@@ -5,4 +5,7 @@ - - #define altinstruction_entry # - -+ .macro pax_force_retaddr rip=0, reload=0 -+ .endm -+ - #endif -diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c -index af0f22f..9a7d479 100644 ---- a/usr/gen_init_cpio.c -+++ b/usr/gen_init_cpio.c -@@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location, - int retval; - int rc = -1; - int namesize; -- int i; -+ unsigned int i; - - mode |= S_IFREG; - -@@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location) - *env_var = *expanded = '\0'; - strncat(env_var, start + 2, end - start - 2); - strncat(expanded, new_location, start - new_location); -- strncat(expanded, getenv(env_var), PATH_MAX); -- strncat(expanded, end + 1, PATH_MAX); -+ strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded)); -+ strncat(expanded, end + 1, PATH_MAX - strlen(expanded)); - strncpy(new_location, expanded, PATH_MAX); -+ new_location[PATH_MAX] = 0; - } else - break; - } -diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index aefdda3..8e8fbb9 100644 ---- a/virt/kvm/kvm_main.c -+++ b/virt/kvm/kvm_main.c -@@ -73,7 +73,7 @@ LIST_HEAD(vm_list); - - static cpumask_var_t cpus_hardware_enabled; - static int kvm_usage_count = 0; --static atomic_t hardware_enable_failed; -+static atomic_unchecked_t hardware_enable_failed; - - struct kmem_cache *kvm_vcpu_cache; - EXPORT_SYMBOL_GPL(kvm_vcpu_cache); -@@ -2266,7 +2266,7 @@ static void hardware_enable_nolock(void *junk) - - if (r) { - cpumask_clear_cpu(cpu, cpus_hardware_enabled); -- atomic_inc(&hardware_enable_failed); -+ atomic_inc_unchecked(&hardware_enable_failed); - printk(KERN_INFO "kvm: enabling virtualization on " - "CPU%d failed\n", cpu); - } -@@ -2320,10 +2320,10 @@ static int hardware_enable_all(void) - - kvm_usage_count++; - if (kvm_usage_count == 1) { -- atomic_set(&hardware_enable_failed, 0); -+ atomic_set_unchecked(&hardware_enable_failed, 0); - on_each_cpu(hardware_enable_nolock, NULL, 1); - -- if (atomic_read(&hardware_enable_failed)) { -+ if (atomic_read_unchecked(&hardware_enable_failed)) { - hardware_disable_all_nolock(); - r = -EBUSY; - } -@@ -2588,7 +2588,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, - kvm_arch_vcpu_put(vcpu); - } - --int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, -+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align, - struct module *module) - { - int r; -@@ -2651,7 +2651,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, - if (!vcpu_align) - vcpu_align = __alignof__(struct kvm_vcpu); - kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, -- 0, NULL); -+ SLAB_USERCOPY, NULL); - if (!kvm_vcpu_cache) { - r = -ENOMEM; - goto out_free_3; -@@ -2661,9 +2661,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, - if (r) - goto out_free; - -- kvm_chardev_ops.owner = module; -- kvm_vm_fops.owner = module; -- kvm_vcpu_fops.owner = module; -+ pax_open_kernel(); -+ *(void **)&kvm_chardev_ops.owner = module; -+ *(void **)&kvm_vm_fops.owner = module; -+ *(void **)&kvm_vcpu_fops.owner = module; -+ pax_close_kernel(); - - r = misc_register(&kvm_dev); - if (r) { diff --git a/kernel/patches/grsecurity-2.9-3.2.12-201203221944.patch b/kernel/patches/grsecurity-2.9-3.2.12-201203221944.patch new file mode 100644 index 0000000..94d7e91 --- /dev/null +++ b/kernel/patches/grsecurity-2.9-3.2.12-201203221944.patch @@ -0,0 +1,86164 @@ +diff --git a/Documentation/dontdiff b/Documentation/dontdiff +index dfa6fc6..df93044 100644 +--- a/Documentation/dontdiff ++++ b/Documentation/dontdiff +@@ -2,9 +2,11 @@ + *.aux + *.bin + *.bz2 ++*.c.[012]*.* + *.cis + *.cpio + *.csp ++*.dbg + *.dsp + *.dvi + *.elf +@@ -14,6 +16,7 @@ + *.gcov + *.gen.S + *.gif ++*.gmo + *.grep + *.grp + *.gz +@@ -48,9 +51,11 @@ + *.tab.h + *.tex + *.ver ++*.vim + *.xml + *.xz + *_MODULES ++*_reg_safe.h + *_vga16.c + *~ + #*# +@@ -70,6 +75,7 @@ Kerntypes + Module.markers + Module.symvers + PENDING ++PERF* + SCCS + System.map* + TAGS +@@ -93,19 +99,24 @@ bounds.h + bsetup + btfixupprep + build ++builtin-policy.h + bvmlinux + bzImage* + capability_names.h + capflags.c + classlist.h* ++clut_vga16.c ++common-cmds.h + comp*.log + compile.h* + conf + config + config-* + config_data.h* ++config.c + config.mak + config.mak.autogen ++config.tmp + conmakehash + consolemap_deftbl.c* + cpustr.h +@@ -116,9 +127,11 @@ devlist.h* + dnotify_test + docproc + dslm ++dtc-lexer.lex.c + elf2ecoff + elfconfig.h* + evergreen_reg_safe.h ++exception_policy.conf + fixdep + flask.h + fore200e_mkfirm +@@ -126,12 +139,15 @@ fore200e_pca_fw.c* + gconf + gconf.glade.h + gen-devlist ++gen-kdb_cmds.c + gen_crc32table + gen_init_cpio + generated + genheaders + genksyms + *_gray256.c ++hash ++hid-example + hpet_example + hugepage-mmap + hugepage-shm +@@ -146,7 +162,7 @@ int32.c + int4.c + int8.c + kallsyms +-kconfig ++kern_constants.h + keywords.c + ksym.c* + ksym.h* +@@ -154,7 +170,7 @@ kxgettext + lkc_defs.h + lex.c + lex.*.c +-linux ++lib1funcs.S + logo_*.c + logo_*_clut224.c + logo_*_mono.c +@@ -166,14 +182,15 @@ machtypes.h + map + map_hugetlb + maui_boot.h +-media + mconf ++mdp + miboot* + mk_elfconfig + mkboot + mkbugboot + mkcpustr + mkdep ++mkpiggy + mkprep + mkregtable + mktables +@@ -209,6 +226,7 @@ r300_reg_safe.h + r420_reg_safe.h + r600_reg_safe.h + recordmcount ++regdb.c + relocs + rlim_names.h + rn50_reg_safe.h +@@ -219,6 +237,7 @@ setup + setup.bin + setup.elf + sImage ++slabinfo + sm_tbl* + split-include + syscalltab.h +@@ -229,6 +248,7 @@ tftpboot.img + timeconst.h + times.h* + trix_boot.h ++user_constants.h + utsrelease.h* + vdso-syms.lds + vdso.lds +@@ -246,7 +266,9 @@ vmlinux + vmlinux-* + vmlinux.aout + vmlinux.bin.all ++vmlinux.bin.bz2 + vmlinux.lds ++vmlinux.relocs + vmlinuz + voffset.h + vsyscall.lds +@@ -254,9 +276,11 @@ vsyscall_32.lds + wanxlfw.inc + uImage + unifdef ++utsrelease.h + wakeup.bin + wakeup.elf + wakeup.lds + zImage* + zconf.hash.c ++zconf.lex.c + zoffset.h +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index 81c287f..d456d02 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + the specified number of seconds. This is to be used if + your oopses keep scrolling off the screen. + ++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain ++ virtualization environments that don't cope well with the ++ expand down segment used by UDEREF on X86-32 or the frequent ++ page table updates on X86-64. ++ ++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already. ++ + pcbit= [HW,ISDN] + + pcd. [PARIDE] +diff --git a/Makefile b/Makefile +index 15e80f1..4fb87db 100644 +--- a/Makefile ++++ b/Makefile +@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ + + HOSTCC = gcc + HOSTCXX = g++ +-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer +-HOSTCXXFLAGS = -O2 ++HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks ++HOSTCLFAGS += $(call cc-option, -Wno-empty-body) ++HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks + + # Decide whether to build built-in, modular, or both. + # Normally, just do built-in. +@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc + # Rules shared between *config targets and build targets + + # Basic helpers built in scripts/ +-PHONY += scripts_basic +-scripts_basic: ++PHONY += scripts_basic gcc-plugins ++scripts_basic: gcc-plugins + $(Q)$(MAKE) $(build)=scripts/basic + $(Q)rm -f .tmp_quiet_recordmcount + +@@ -564,6 +565,53 @@ else + KBUILD_CFLAGS += -O2 + endif + ++ifndef DISABLE_PAX_PLUGINS ++ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y) ++ifndef DISABLE_PAX_CONSTIFY_PLUGIN ++CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN ++endif ++ifdef CONFIG_PAX_MEMORY_STACKLEAK ++STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN ++STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100 ++endif ++ifdef CONFIG_KALLOCSTAT_PLUGIN ++KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so ++endif ++ifdef CONFIG_PAX_KERNEXEC_PLUGIN ++KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so ++KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN ++KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN ++endif ++ifdef CONFIG_CHECKER_PLUGIN ++ifeq ($(call cc-ifversion, -ge, 0406, y), y) ++CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN ++endif ++endif ++COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so ++ifdef CONFIG_PAX_SIZE_OVERFLOW ++SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN ++endif ++GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) ++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN_CFLAGS) ++GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS) ++export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN ++ifeq ($(KBUILD_EXTMOD),) ++gcc-plugins: ++ $(Q)$(MAKE) $(build)=tools/gcc ++else ++gcc-plugins: ; ++endif ++else ++gcc-plugins: ++ifeq ($(call cc-ifversion, -ge, 0405, y), y) ++ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.)) ++else ++ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least" ++endif ++ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure" ++endif ++endif ++ + include $(srctree)/arch/$(SRCARCH)/Makefile + + ifneq ($(CONFIG_FRAME_WARN),0) +@@ -708,7 +756,7 @@ export mod_strip_cmd + + + ifeq ($(KBUILD_EXTMOD),) +-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ ++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ + + vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ + $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ +@@ -932,6 +980,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE + + # The actual objects are generated when descending, + # make sure no implicit rule kicks in ++$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) + $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ; + + # Handle descending into subdirectories listed in $(vmlinux-dirs) +@@ -941,7 +991,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ; + # Error messages still appears in the original language + + PHONY += $(vmlinux-dirs) +-$(vmlinux-dirs): prepare scripts ++$(vmlinux-dirs): gcc-plugins prepare scripts + $(Q)$(MAKE) $(build)=$@ + + # Store (new) KERNELRELASE string in include/config/kernel.release +@@ -985,6 +1035,7 @@ prepare0: archprepare FORCE + $(Q)$(MAKE) $(build)=. + + # All the preparing.. ++prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) + prepare: prepare0 + + # Generate some files +@@ -1086,6 +1137,8 @@ all: modules + # using awk while concatenating to the final file. + + PHONY += modules ++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) + modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin + $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order + @$(kecho) ' Building modules, stage 2.'; +@@ -1101,7 +1154,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin) + + # Target to prepare building external modules + PHONY += modules_prepare +-modules_prepare: prepare scripts ++modules_prepare: gcc-plugins prepare scripts + + # Target to install modules + PHONY += modules_install +@@ -1198,6 +1251,7 @@ distclean: mrproper + ( -name '*.orig' -o -name '*.rej' -o -name '*~' \ + -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ + -o -name '.*.rej' \ ++ -o -name '.*.rej' -o -name '*.so' \ + -o -name '*%' -o -name '.*.cmd' -o -name 'core' ) \ + -type f -print | xargs rm -f + +@@ -1358,6 +1412,8 @@ PHONY += $(module-dirs) modules + $(module-dirs): crmodverdir $(objtree)/Module.symvers + $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) + ++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) + modules: $(module-dirs) + @$(kecho) ' Building modules, stage 2.'; + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost +@@ -1484,17 +1540,21 @@ else + target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@)) + endif + +-%.s: %.c prepare scripts FORCE ++%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%.s: %.c gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + %.i: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +-%.o: %.c prepare scripts FORCE ++%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%.o: %.c gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + %.lst: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +-%.s: %.S prepare scripts FORCE ++%.s: %.S gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +-%.o: %.S prepare scripts FORCE ++%.o: %.S gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + %.symtypes: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +@@ -1504,11 +1564,15 @@ endif + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) +-%/: prepare scripts FORCE ++%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%/: gcc-plugins prepare scripts FORCE + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) +-%.ko: prepare scripts FORCE ++%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%.ko: gcc-plugins prepare scripts FORCE + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) $(@:.ko=.o) +diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h +index 640f909..48b6597 100644 +--- a/arch/alpha/include/asm/atomic.h ++++ b/arch/alpha/include/asm/atomic.h +@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + #define atomic_dec(v) atomic_sub(1,(v)) + #define atomic64_dec(v) atomic64_sub(1,(v)) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #define smp_mb__before_atomic_dec() smp_mb() + #define smp_mb__after_atomic_dec() smp_mb() + #define smp_mb__before_atomic_inc() smp_mb() +diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h +index ad368a9..fbe0f25 100644 +--- a/arch/alpha/include/asm/cache.h ++++ b/arch/alpha/include/asm/cache.h +@@ -4,19 +4,19 @@ + #ifndef __ARCH_ALPHA_CACHE_H + #define __ARCH_ALPHA_CACHE_H + ++#include <linux/const.h> + + /* Bytes per L1 (data) cache line. */ + #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6) +-# define L1_CACHE_BYTES 64 + # define L1_CACHE_SHIFT 6 + #else + /* Both EV4 and EV5 are write-through, read-allocate, + direct-mapped, physical. + */ +-# define L1_CACHE_BYTES 32 + # define L1_CACHE_SHIFT 5 + #endif + ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + #define SMP_CACHE_BYTES L1_CACHE_BYTES + + #endif +diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h +index da5449e..7418343 100644 +--- a/arch/alpha/include/asm/elf.h ++++ b/arch/alpha/include/asm/elf.h +@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) ++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) ++#endif ++ + /* $0 is set by ld.so to a pointer to a function which might be + registered using atexit. This provides a mean for the dynamic + linker to call DT_FINI functions for shared libraries that have +diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h +index de98a73..bd4f1f8 100644 +--- a/arch/alpha/include/asm/pgtable.h ++++ b/arch/alpha/include/asm/pgtable.h +@@ -101,6 +101,17 @@ struct vm_area_struct; + #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) + #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) + #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) ++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) ++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_COPY_NOEXEC PAGE_COPY ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++#endif ++ + #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) + + #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) +diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c +index 2fd00b7..cfd5069 100644 +--- a/arch/alpha/kernel/module.c ++++ b/arch/alpha/kernel/module.c +@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, + + /* The small sections were sorted to the end of the segment. + The following should definitely cover them. */ +- gp = (u64)me->module_core + me->core_size - 0x8000; ++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000; + got = sechdrs[me->arch.gotsecindex].sh_addr; + + for (i = 0; i < n; i++) { +diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c +index 01e8715..be0e80f 100644 +--- a/arch/alpha/kernel/osf_sys.c ++++ b/arch/alpha/kernel/osf_sys.c +@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len, + /* At this point: (!vma || addr < vma->vm_end). */ + if (limit - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = vma->vm_end; + vma = vma->vm_next; +@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + merely specific addresses, but regions of memory -- perhaps + this feature should be incorporated into all ports? */ + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); + if (addr != (unsigned long) -ENOMEM) +@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + } + + /* Next, try allocating at TASK_UNMAPPED_BASE. */ +- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), +- len, limit); ++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit); ++ + if (addr != (unsigned long) -ENOMEM) + return addr; + +diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c +index fadd5f8..904e73a 100644 +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c +@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm) + __reload_thread(pcb); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (regs->pc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int ldah, ldq, jmp; ++ ++ err = get_user(ldah, (unsigned int *)regs->pc); ++ err |= get_user(ldq, (unsigned int *)(regs->pc+4)); ++ err |= get_user(jmp, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((ldah & 0xFFFF0000U) == 0x277B0000U && ++ (ldq & 0xFFFF0000U) == 0xA77B0000U && ++ jmp == 0x6BFB0000U) ++ { ++ unsigned long r27, addr; ++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; ++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; ++ ++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); ++ err = get_user(r27, (unsigned long *)addr); ++ if (err) ++ break; ++ ++ regs->r27 = r27; ++ regs->pc = r27; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #2 */ ++ unsigned int ldah, lda, br; ++ ++ err = get_user(ldah, (unsigned int *)regs->pc); ++ err |= get_user(lda, (unsigned int *)(regs->pc+4)); ++ err |= get_user(br, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((ldah & 0xFFFF0000U) == 0x277B0000U && ++ (lda & 0xFFFF0000U) == 0xA77B0000U && ++ (br & 0xFFE00000U) == 0xC3E00000U) ++ { ++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL; ++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; ++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; ++ ++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); ++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation */ ++ unsigned int br; ++ ++ err = get_user(br, (unsigned int *)regs->pc); ++ ++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) { ++ unsigned int br2, ldq, nop, jmp; ++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; ++ ++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); ++ err = get_user(br2, (unsigned int *)addr); ++ err |= get_user(ldq, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ err |= get_user(jmp, (unsigned int *)(addr+12)); ++ err |= get_user(resolver, (unsigned long *)(addr+16)); ++ ++ if (err) ++ break; ++ ++ if (br2 == 0xC3600000U && ++ ldq == 0xA77B000CU && ++ nop == 0x47FF041FU && ++ jmp == 0x6B7B0000U) ++ { ++ regs->r28 = regs->pc+4; ++ regs->r27 = addr+16; ++ regs->pc = resolver; ++ return 3; ++ } ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif + + /* + * This routine handles page faults. It determines the address, +@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr, + good_area: + si_code = SEGV_ACCERR; + if (cause < 0) { +- if (!(vma->vm_flags & VM_EXEC)) ++ if (!(vma->vm_flags & VM_EXEC)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) ++ goto bad_area; ++ ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); ++ do_group_exit(SIGKILL); ++#else + goto bad_area; ++#endif ++ ++ } + } else if (!cause) { + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) +diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h +index 86976d0..683de93 100644 +--- a/arch/arm/include/asm/atomic.h ++++ b/arch/arm/include/asm/atomic.h +@@ -15,6 +15,10 @@ + #include <linux/types.h> + #include <asm/system.h> + ++#ifdef CONFIG_GENERIC_ATOMIC64 ++#include <asm-generic/atomic64.h> ++#endif ++ + #define ATOMIC_INIT(i) { (i) } + + #ifdef __KERNEL__ +@@ -25,7 +29,15 @@ + * atomic_set() is the clrex or dummy strex done on every exception return. + */ + #define atomic_read(v) (*(volatile int *)&(v)->counter) ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return v->counter; ++} + #define atomic_set(v,i) (((v)->counter) = (i)) ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} + + #if __LINUX_ARM_ARCH__ >= 6 + +@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v) + int result; + + __asm__ __volatile__("@ atomic_add\n" ++"1: ldrex %1, [%3]\n" ++" adds %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strex %1, %0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "Ir" (i) ++ : "cc"); ++} ++ ++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) ++{ ++ unsigned long tmp; ++ int result; ++ ++ __asm__ __volatile__("@ atomic_add_unchecked\n" + "1: ldrex %0, [%3]\n" + " add %0, %0, %4\n" + " strex %1, %0, [%3]\n" +@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v) + smp_mb(); + + __asm__ __volatile__("@ atomic_add_return\n" ++"1: ldrex %1, [%3]\n" ++" adds %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strex %1, %0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "Ir" (i) ++ : "cc"); ++ ++ smp_mb(); ++ ++ return result; ++} ++ ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ ++ unsigned long tmp; ++ int result; ++ ++ smp_mb(); ++ ++ __asm__ __volatile__("@ atomic_add_return_unchecked\n" + "1: ldrex %0, [%3]\n" + " add %0, %0, %4\n" + " strex %1, %0, [%3]\n" +@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v) + int result; + + __asm__ __volatile__("@ atomic_sub\n" ++"1: ldrex %1, [%3]\n" ++" subs %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strex %1, %0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "Ir" (i) ++ : "cc"); ++} ++ ++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) ++{ ++ unsigned long tmp; ++ int result; ++ ++ __asm__ __volatile__("@ atomic_sub_unchecked\n" + "1: ldrex %0, [%3]\n" + " sub %0, %0, %4\n" + " strex %1, %0, [%3]\n" +@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v) + smp_mb(); + + __asm__ __volatile__("@ atomic_sub_return\n" +-"1: ldrex %0, [%3]\n" +-" sub %0, %0, %4\n" ++"1: ldrex %1, [%3]\n" ++" sub %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " strex %1, %0, [%3]\n" + " teq %1, #0\n" + " bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) + return oldval; + } + ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new) ++{ ++ unsigned long oldval, res; ++ ++ smp_mb(); ++ ++ do { ++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n" ++ "ldrex %1, [%3]\n" ++ "mov %0, #0\n" ++ "teq %1, %4\n" ++ "strexeq %0, %5, [%3]\n" ++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) ++ : "r" (&ptr->counter), "Ir" (old), "r" (new) ++ : "cc"); ++ } while (res); ++ ++ smp_mb(); ++ ++ return oldval; ++} ++ + static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) + { + unsigned long tmp, tmp2; +@@ -207,6 +349,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) + #endif /* __LINUX_ARM_ARCH__ */ + + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} + + static inline int __atomic_add_unless(atomic_t *v, int a, int u) + { +@@ -219,11 +365,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) + } + + #define atomic_inc(v) atomic_add(1, v) ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_add_unchecked(1, v); ++} + #define atomic_dec(v) atomic_sub(1, v) ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_sub_unchecked(1, v); ++} + + #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v) == 0; ++} + #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) + #define atomic_inc_return(v) (atomic_add_return(1, v)) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v); ++} + #define atomic_dec_return(v) (atomic_sub_return(1, v)) + #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +@@ -239,6 +401,14 @@ typedef struct { + u64 __aligned(8) counter; + } atomic64_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ u64 __aligned(8) counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif ++ + #define ATOMIC64_INIT(i) { (i) } + + static inline u64 atomic64_read(atomic64_t *v) +@@ -254,6 +424,19 @@ static inline u64 atomic64_read(atomic64_t *v) + return result; + } + ++static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v) ++{ ++ u64 result; ++ ++ __asm__ __volatile__("@ atomic64_read_unchecked\n" ++" ldrexd %0, %H0, [%1]" ++ : "=&r" (result) ++ : "r" (&v->counter), "Qo" (v->counter) ++ ); ++ ++ return result; ++} ++ + static inline void atomic64_set(atomic64_t *v, u64 i) + { + u64 tmp; +@@ -268,6 +451,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i) + : "cc"); + } + ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i) ++{ ++ u64 tmp; ++ ++ __asm__ __volatile__("@ atomic64_set_unchecked\n" ++"1: ldrexd %0, %H0, [%2]\n" ++" strexd %0, %3, %H3, [%2]\n" ++" teq %0, #0\n" ++" bne 1b" ++ : "=&r" (tmp), "=Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++} ++ + static inline void atomic64_add(u64 i, atomic64_t *v) + { + u64 result; +@@ -276,6 +473,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v) + __asm__ __volatile__("@ atomic64_add\n" + "1: ldrexd %0, %H0, [%3]\n" + " adds %0, %0, %4\n" ++" adcs %H0, %H0, %H4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strexd %1, %0, %H0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++} ++ ++static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v) ++{ ++ u64 result; ++ unsigned long tmp; ++ ++ __asm__ __volatile__("@ atomic64_add_unchecked\n" ++"1: ldrexd %0, %H0, [%3]\n" ++" adds %0, %0, %4\n" + " adc %H0, %H0, %H4\n" + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" +@@ -287,12 +514,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v) + + static inline u64 atomic64_add_return(u64 i, atomic64_t *v) + { +- u64 result; +- unsigned long tmp; ++ u64 result, tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_return\n" ++"1: ldrexd %1, %H1, [%3]\n" ++" adds %0, %1, %4\n" ++" adcs %H0, %H1, %H4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++" mov %H0, %H1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strexd %1, %0, %H0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++ ++ smp_mb(); ++ ++ return result; ++} ++ ++static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v) ++{ ++ u64 result; ++ unsigned long tmp; ++ ++ smp_mb(); ++ ++ __asm__ __volatile__("@ atomic64_add_return_unchecked\n" + "1: ldrexd %0, %H0, [%3]\n" + " adds %0, %0, %4\n" + " adc %H0, %H0, %H4\n" +@@ -316,6 +580,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) + __asm__ __volatile__("@ atomic64_sub\n" + "1: ldrexd %0, %H0, [%3]\n" + " subs %0, %0, %4\n" ++" sbcs %H0, %H0, %H4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strexd %1, %0, %H0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++} ++ ++static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v) ++{ ++ u64 result; ++ unsigned long tmp; ++ ++ __asm__ __volatile__("@ atomic64_sub_unchecked\n" ++"1: ldrexd %0, %H0, [%3]\n" ++" subs %0, %0, %4\n" + " sbc %H0, %H0, %H4\n" + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" +@@ -327,18 +621,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) + + static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) + { +- u64 result; +- unsigned long tmp; ++ u64 result, tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_sub_return\n" +-"1: ldrexd %0, %H0, [%3]\n" +-" subs %0, %0, %4\n" +-" sbc %H0, %H0, %H4\n" ++"1: ldrexd %1, %H1, [%3]\n" ++" subs %0, %1, %4\n" ++" sbc %H0, %H1, %H4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++" mov %H0, %H1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" + " bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); +@@ -372,6 +680,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) + return oldval; + } + ++static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new) ++{ ++ u64 oldval; ++ unsigned long res; ++ ++ smp_mb(); ++ ++ do { ++ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n" ++ "ldrexd %1, %H1, [%3]\n" ++ "mov %0, #0\n" ++ "teq %1, %4\n" ++ "teqeq %H1, %H4\n" ++ "strexdeq %0, %5, %H5, [%3]" ++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) ++ : "r" (&ptr->counter), "r" (old), "r" (new) ++ : "cc"); ++ } while (res); ++ ++ smp_mb(); ++ ++ return oldval; ++} ++ + static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) + { + u64 result; +@@ -395,21 +727,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) + + static inline u64 atomic64_dec_if_positive(atomic64_t *v) + { +- u64 result; +- unsigned long tmp; ++ u64 result, tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_dec_if_positive\n" +-"1: ldrexd %0, %H0, [%3]\n" +-" subs %0, %0, #1\n" +-" sbc %H0, %H0, #0\n" ++"1: ldrexd %1, %H1, [%3]\n" ++" subs %0, %1, #1\n" ++" sbc %H0, %H1, #0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++" mov %H0, %H1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " teq %H0, #0\n" +-" bmi 2f\n" ++" bmi 4f\n" + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" + " bne 1b\n" +-"2:" ++"4:\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter) + : "cc"); +@@ -432,13 +777,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) + " teq %0, %5\n" + " teqeq %H0, %H5\n" + " moveq %1, #0\n" +-" beq 2f\n" ++" beq 4f\n" + " adds %0, %0, %6\n" + " adc %H0, %H0, %H6\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " strexd %2, %0, %H0, [%4]\n" + " teq %2, #0\n" + " bne 1b\n" +-"2:" ++"4:\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); +@@ -451,10 +808,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) + + #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) + #define atomic64_inc(v) atomic64_add(1LL, (v)) ++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v)) + #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) ++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v)) + #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) + #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) + #define atomic64_dec(v) atomic64_sub(1LL, (v)) ++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v)) + #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) + #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h +index 75fe66b..2255c86 100644 +--- a/arch/arm/include/asm/cache.h ++++ b/arch/arm/include/asm/cache.h +@@ -4,8 +4,10 @@ + #ifndef __ASMARM_CACHE_H + #define __ASMARM_CACHE_H + ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* + * Memory returned by kmalloc() may be used for DMA, so we must make +diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h +index d5d8d5c..ad92c96 100644 +--- a/arch/arm/include/asm/cacheflush.h ++++ b/arch/arm/include/asm/cacheflush.h +@@ -108,7 +108,7 @@ struct cpu_cache_fns { + void (*dma_unmap_area)(const void *, size_t, int); + + void (*dma_flush_range)(const void *, const void *); +-}; ++} __no_const; + + /* + * Select the calling method +diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h +index 0e9ce8d..6ef1e03 100644 +--- a/arch/arm/include/asm/elf.h ++++ b/arch/arm/include/asm/elf.h +@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x00008000UL ++ ++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) ++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) ++#endif + + /* When the program starts, a1 contains a pointer to a function to be + registered with atexit, as per the SVR4 ABI. A value of 0 means we +@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); + extern void elf_set_personality(const struct elf32_hdr *); + #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) + +-struct mm_struct; +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + extern int vectors_user_mapping(void); + #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES +diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h +index e51b1e8..32a3113 100644 +--- a/arch/arm/include/asm/kmap_types.h ++++ b/arch/arm/include/asm/kmap_types.h +@@ -21,6 +21,7 @@ enum km_type { + KM_L1_CACHE, + KM_L2_CACHE, + KM_KDB, ++ KM_CLEARPAGE, + KM_TYPE_NR + }; + +diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h +index 53426c6..c7baff3 100644 +--- a/arch/arm/include/asm/outercache.h ++++ b/arch/arm/include/asm/outercache.h +@@ -35,7 +35,7 @@ struct outer_cache_fns { + #endif + void (*set_debug)(unsigned long); + void (*resume)(void); +-}; ++} __no_const; + + #ifdef CONFIG_OUTER_CACHE + +diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h +index ca94653..6ac0d56 100644 +--- a/arch/arm/include/asm/page.h ++++ b/arch/arm/include/asm/page.h +@@ -123,7 +123,7 @@ struct cpu_user_fns { + void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); + void (*cpu_copy_user_highpage)(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +-}; ++} __no_const; + + #ifdef MULTI_USER + extern struct cpu_user_fns cpu_user; +diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h +index 984014b..a6d914f 100644 +--- a/arch/arm/include/asm/system.h ++++ b/arch/arm/include/asm/system.h +@@ -90,6 +90,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, + + #define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) ++#define xchg_unchecked(ptr,x) \ ++ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + + extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +@@ -101,7 +103,7 @@ extern int __pure cpu_architecture(void); + extern void cpu_init(void); + + void arm_machine_restart(char mode, const char *cmd); +-extern void (*arm_pm_restart)(char str, const char *cmd); ++extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn; + + #define UDBG_UNDEFINED (1 << 0) + #define UDBG_SYSCALL (1 << 1) +@@ -526,6 +528,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, + + #endif /* __LINUX_ARM_ARCH__ >= 6 */ + ++#define _ASM_EXTABLE(from, to) \ ++" .pushsection __ex_table,"a"\n"\ ++" .align 3\n" \ ++" .long " #from ", " #to"\n" \ ++" .popsection" ++ ++ + #endif /* __ASSEMBLY__ */ + + #define arch_align_stack(x) (x) +diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h +index b293616..96310e5 100644 +--- a/arch/arm/include/asm/uaccess.h ++++ b/arch/arm/include/asm/uaccess.h +@@ -22,6 +22,8 @@ + #define VERIFY_READ 0 + #define VERIFY_WRITE 1 + ++extern void check_object_size(const void *ptr, unsigned long n, bool to); ++ + /* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is +@@ -387,8 +389,23 @@ do { \ + + + #ifdef CONFIG_MMU +-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); +-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); ++extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n); ++extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n); ++ ++static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ return ___copy_from_user(to, from, n); ++} ++ ++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ return ___copy_to_user(to, from, n); ++} ++ + extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); + extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); + extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); +@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n); + + static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (access_ok(VERIFY_READ, from, n)) + n = __copy_from_user(to, from, n); + else /* security hole - plug it */ +@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u + + static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (access_ok(VERIFY_WRITE, to, n)) + n = __copy_to_user(to, from, n); + return n; +diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c +index 5b0bce6..becd81c 100644 +--- a/arch/arm/kernel/armksyms.c ++++ b/arch/arm/kernel/armksyms.c +@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user); + #ifdef CONFIG_MMU + EXPORT_SYMBOL(copy_page); + +-EXPORT_SYMBOL(__copy_from_user); +-EXPORT_SYMBOL(__copy_to_user); ++EXPORT_SYMBOL(___copy_from_user); ++EXPORT_SYMBOL(___copy_to_user); + EXPORT_SYMBOL(__clear_user); + + EXPORT_SYMBOL(__get_user_1); +diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c +index 3d0c6fb..9d326fa 100644 +--- a/arch/arm/kernel/process.c ++++ b/arch/arm/kernel/process.c +@@ -28,7 +28,6 @@ + #include <linux/tick.h> + #include <linux/utsname.h> + #include <linux/uaccess.h> +-#include <linux/random.h> + #include <linux/hw_breakpoint.h> + #include <linux/cpuidle.h> + +@@ -92,7 +91,7 @@ static int __init hlt_setup(char *__unused) + __setup("nohlt", nohlt_setup); + __setup("hlt", hlt_setup); + +-void arm_machine_restart(char mode, const char *cmd) ++__noreturn void arm_machine_restart(char mode, const char *cmd) + { + /* Disable interrupts first */ + local_irq_disable(); +@@ -134,7 +133,7 @@ void arm_machine_restart(char mode, const char *cmd) + void (*pm_power_off)(void); + EXPORT_SYMBOL(pm_power_off); + +-void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart; ++void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart; + EXPORT_SYMBOL_GPL(arm_pm_restart); + + static void do_nothing(void *unused) +@@ -248,6 +247,7 @@ void machine_power_off(void) + machine_shutdown(); + if (pm_power_off) + pm_power_off(); ++ BUG(); + } + + void machine_restart(char *cmd) +@@ -484,12 +484,6 @@ unsigned long get_wchan(struct task_struct *p) + return 0; + } + +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long range_end = mm->brk + 0x02000000; +- return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +-} +- + #ifdef CONFIG_MMU + /* + * The vectors page is always readable from user space for the +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c +index 8fc2c8f..064c150 100644 +--- a/arch/arm/kernel/setup.c ++++ b/arch/arm/kernel/setup.c +@@ -108,13 +108,13 @@ struct processor processor __read_mostly; + struct cpu_tlb_fns cpu_tlb __read_mostly; + #endif + #ifdef MULTI_USER +-struct cpu_user_fns cpu_user __read_mostly; ++struct cpu_user_fns cpu_user __read_only; + #endif + #ifdef MULTI_CACHE +-struct cpu_cache_fns cpu_cache __read_mostly; ++struct cpu_cache_fns cpu_cache __read_only; + #endif + #ifdef CONFIG_OUTER_CACHE +-struct outer_cache_fns outer_cache __read_mostly; ++struct outer_cache_fns outer_cache __read_only; + EXPORT_SYMBOL(outer_cache); + #endif + +diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c +index 99a5727..a3d5bb1 100644 +--- a/arch/arm/kernel/traps.c ++++ b/arch/arm/kernel/traps.c +@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt + + static DEFINE_RAW_SPINLOCK(die_lock); + ++extern void gr_handle_kernel_exploit(void); ++ + /* + * This function is protected against re-entrancy. + */ +@@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); ++ ++ gr_handle_kernel_exploit(); ++ + if (ret != NOTIFY_STOP) + do_exit(SIGSEGV); + } +diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S +index 66a477a..bee61d3 100644 +--- a/arch/arm/lib/copy_from_user.S ++++ b/arch/arm/lib/copy_from_user.S +@@ -16,7 +16,7 @@ + /* + * Prototype: + * +- * size_t __copy_from_user(void *to, const void *from, size_t n) ++ * size_t ___copy_from_user(void *to, const void *from, size_t n) + * + * Purpose: + * +@@ -84,11 +84,11 @@ + + .text + +-ENTRY(__copy_from_user) ++ENTRY(___copy_from_user) + + #include "copy_template.S" + +-ENDPROC(__copy_from_user) ++ENDPROC(___copy_from_user) + + .pushsection .fixup,"ax" + .align 0 +diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S +index 6ee2f67..d1cce76 100644 +--- a/arch/arm/lib/copy_page.S ++++ b/arch/arm/lib/copy_page.S +@@ -10,6 +10,7 @@ + * ASM optimised string functions + */ + #include <linux/linkage.h> ++#include <linux/const.h> + #include <asm/assembler.h> + #include <asm/asm-offsets.h> + #include <asm/cache.h> +diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S +index d066df6..df28194 100644 +--- a/arch/arm/lib/copy_to_user.S ++++ b/arch/arm/lib/copy_to_user.S +@@ -16,7 +16,7 @@ + /* + * Prototype: + * +- * size_t __copy_to_user(void *to, const void *from, size_t n) ++ * size_t ___copy_to_user(void *to, const void *from, size_t n) + * + * Purpose: + * +@@ -88,11 +88,11 @@ + .text + + ENTRY(__copy_to_user_std) +-WEAK(__copy_to_user) ++WEAK(___copy_to_user) + + #include "copy_template.S" + +-ENDPROC(__copy_to_user) ++ENDPROC(___copy_to_user) + ENDPROC(__copy_to_user_std) + + .pushsection .fixup,"ax" +diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S +index d0ece2a..5ae2f39 100644 +--- a/arch/arm/lib/uaccess.S ++++ b/arch/arm/lib/uaccess.S +@@ -20,7 +20,7 @@ + + #define PAGE_SHIFT 12 + +-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n) ++/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n) + * Purpose : copy a block to user memory from kernel memory + * Params : to - user memory + * : from - kernel memory +@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault + sub r2, r2, ip + b .Lc2u_dest_aligned + +-ENTRY(__copy_to_user) ++ENTRY(___copy_to_user) + stmfd sp!, {r2, r4 - r7, lr} + cmp r2, #4 + blt .Lc2u_not_enough +@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 + USER( T(strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished +-ENDPROC(__copy_to_user) ++ENDPROC(___copy_to_user) + + .pushsection .fixup,"ax" + .align 0 + 9001: ldmfd sp!, {r0, r4 - r7, pc} + .popsection + +-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n); ++/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n); + * Purpose : copy a block from user memory to kernel memory + * Params : to - kernel memory + * : from - user memory +@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault + sub r2, r2, ip + b .Lcfu_dest_aligned + +-ENTRY(__copy_from_user) ++ENTRY(___copy_from_user) + stmfd sp!, {r0, r2, r4 - r7, lr} + cmp r2, #4 + blt .Lcfu_not_enough +@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault + USER( T(ldrgtb) r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .Lcfu_finished +-ENDPROC(__copy_from_user) ++ENDPROC(___copy_from_user) + + .pushsection .fixup,"ax" + .align 0 +diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c +index 025f742..8432b08 100644 +--- a/arch/arm/lib/uaccess_with_memcpy.c ++++ b/arch/arm/lib/uaccess_with_memcpy.c +@@ -104,7 +104,7 @@ out: + } + + unsigned long +-__copy_to_user(void __user *to, const void *from, unsigned long n) ++___copy_to_user(void __user *to, const void *from, unsigned long n) + { + /* + * This test is stubbed out of the main function above to keep +diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c +index e9d5f4a..f099699 100644 +--- a/arch/arm/mach-omap2/board-n8x0.c ++++ b/arch/arm/mach-omap2/board-n8x0.c +@@ -593,7 +593,7 @@ static int n8x0_menelaus_late_init(struct device *dev) + } + #endif + +-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = { ++static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = { + .late_init = n8x0_menelaus_late_init, + }; + +diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c +index 2b2d51c..0127490 100644 +--- a/arch/arm/mach-ux500/mbox-db5500.c ++++ b/arch/arm/mach-ux500/mbox-db5500.c +@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev, + return sprintf(buf, "0x%X\n", mbox_value); + } + +-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo); ++static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo); + + static int mbox_show(struct seq_file *s, void *data) + { +diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c +index aa33949..d366075 100644 +--- a/arch/arm/mm/fault.c ++++ b/arch/arm/mm/fault.c +@@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (fsr & FSR_LNX_PF) { ++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + tsk->thread.address = addr; + tsk->thread.error_code = fsr; + tsk->thread.trap_no = 14; +@@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + } + #endif /* CONFIG_MMU */ + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (__force unsigned char __user *)pc+i)) ++ printk(KERN_CONT "?? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++ ++ printk(KERN_ERR "PAX: bytes at SP-4: "); ++ for (i = -1; i < 20; i++) { ++ unsigned long c; ++ if (get_user(c, (__force unsigned long __user *)sp+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08lx ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * First Level Translation Fault Handler + * +@@ -628,6 +662,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) + const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); + struct siginfo info; + ++#ifdef CONFIG_PAX_REFCOUNT ++ if (fsr_fs(ifsr) == 2) { ++ unsigned int bkpt; ++ ++ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) { ++ current->thread.error_code = ifsr; ++ current->thread.trap_no = 0; ++ pax_report_refcount_overflow(regs); ++ fixup_exception(regs); ++ return; ++ } ++ } ++#endif ++ + if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) + return; + +diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c +index 44b628e..623ee2a 100644 +--- a/arch/arm/mm/mmap.c ++++ b/arch/arm/mm/mmap.c +@@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (len > TASK_SIZE) + return -ENOMEM; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (len > mm->cached_hole_size) { +- start_addr = addr = mm->free_area_cache; ++ start_addr = addr = mm->free_area_cache; + } else { +- start_addr = addr = TASK_UNMAPPED_BASE; +- mm->cached_hole_size = 0; ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; + } + /* 8 bits of randomness in 20 address space bits */ + if ((current->flags & PF_RANDOMIZE) && +@@ -89,14 +92,14 @@ full_search: + * Start a new search - just in case we missed + * some holes. + */ +- if (start_addr != TASK_UNMAPPED_BASE) { +- start_addr = addr = TASK_UNMAPPED_BASE; ++ if (start_addr != mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ +diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h +index 4c1a363..df311d0 100644 +--- a/arch/arm/plat-samsung/include/plat/dma-ops.h ++++ b/arch/arm/plat-samsung/include/plat/dma-ops.h +@@ -41,7 +41,7 @@ struct samsung_dma_ops { + int (*started)(unsigned ch); + int (*flush)(unsigned ch); + int (*stop)(unsigned ch); +-}; ++} __no_const; + + extern void *samsung_dmadev_get_ops(void); + extern void *s3c_dma_get_ops(void); +diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h +index 5f28cae..3d23723 100644 +--- a/arch/arm/plat-samsung/include/plat/ehci.h ++++ b/arch/arm/plat-samsung/include/plat/ehci.h +@@ -14,7 +14,7 @@ + struct s5p_ehci_platdata { + int (*phy_init)(struct platform_device *pdev, int type); + int (*phy_exit)(struct platform_device *pdev, int type); +-}; ++} __no_const; + + extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd); + +diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h +index c3a58a1..78fbf54 100644 +--- a/arch/avr32/include/asm/cache.h ++++ b/arch/avr32/include/asm/cache.h +@@ -1,8 +1,10 @@ + #ifndef __ASM_AVR32_CACHE_H + #define __ASM_AVR32_CACHE_H + ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* + * Memory returned by kmalloc() may be used for DMA, so we must make +diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h +index 3b3159b..425ea94 100644 +--- a/arch/avr32/include/asm/elf.h ++++ b/arch/avr32/include/asm/elf.h +@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x00001000UL ++ ++#define PAX_DELTA_MMAP_LEN 15 ++#define PAX_DELTA_STACK_LEN 15 ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, +diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h +index b7f5c68..556135c 100644 +--- a/arch/avr32/include/asm/kmap_types.h ++++ b/arch/avr32/include/asm/kmap_types.h +@@ -22,7 +22,8 @@ D(10) KM_IRQ0, + D(11) KM_IRQ1, + D(12) KM_SOFTIRQ0, + D(13) KM_SOFTIRQ1, +-D(14) KM_TYPE_NR ++D(14) KM_CLEARPAGE, ++D(15) KM_TYPE_NR + }; + + #undef D +diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c +index f7040a1..db9f300 100644 +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c +@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) + + int exception_trace = 1; + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (unsigned char *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * This routine handles page faults. It determines the address and the + * problem, and then passes it off to one of the appropriate routines. +@@ -156,6 +173,16 @@ bad_area: + up_read(&mm->mmap_sem); + + if (user_mode(regs)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) { ++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { ++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++ } ++#endif ++ + if (exception_trace && printk_ratelimit()) + printk("%s%s[%d]: segfault at %08lx pc %08lx " + "sp %08lx ecr %lu\n", +diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h +index 568885a..f8008df 100644 +--- a/arch/blackfin/include/asm/cache.h ++++ b/arch/blackfin/include/asm/cache.h +@@ -7,6 +7,7 @@ + #ifndef __ARCH_BLACKFIN_CACHE_H + #define __ARCH_BLACKFIN_CACHE_H + ++#include <linux/const.h> + #include <linux/linkage.h> /* for asmlinkage */ + + /* +@@ -14,7 +15,7 @@ + * Blackfin loads 32 bytes for cache + */ + #define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + #define SMP_CACHE_BYTES L1_CACHE_BYTES + + #define ARCH_DMA_MINALIGN L1_CACHE_BYTES +diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h +index aea2718..3639a60 100644 +--- a/arch/cris/include/arch-v10/arch/cache.h ++++ b/arch/cris/include/arch-v10/arch/cache.h +@@ -1,8 +1,9 @@ + #ifndef _ASM_ARCH_CACHE_H + #define _ASM_ARCH_CACHE_H + ++#include <linux/const.h> + /* Etrax 100LX have 32-byte cache-lines. */ +-#define L1_CACHE_BYTES 32 + #define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* _ASM_ARCH_CACHE_H */ +diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h +index 1de779f..336fad3 100644 +--- a/arch/cris/include/arch-v32/arch/cache.h ++++ b/arch/cris/include/arch-v32/arch/cache.h +@@ -1,11 +1,12 @@ + #ifndef _ASM_CRIS_ARCH_CACHE_H + #define _ASM_CRIS_ARCH_CACHE_H + ++#include <linux/const.h> + #include <arch/hwregs/dma.h> + + /* A cache-line is 32 bytes. */ +-#define L1_CACHE_BYTES 32 + #define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __read_mostly __attribute__((__section__(".data.read_mostly"))) + +diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h +index 0d8a7d6..d0c9ff5 100644 +--- a/arch/frv/include/asm/atomic.h ++++ b/arch/frv/include/asm/atomic.h +@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v); + #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) + #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) + { + int c, old; +diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h +index 2797163..c2a401d 100644 +--- a/arch/frv/include/asm/cache.h ++++ b/arch/frv/include/asm/cache.h +@@ -12,10 +12,11 @@ + #ifndef __ASM_CACHE_H + #define __ASM_CACHE_H + ++#include <linux/const.h> + + /* bytes per L1 cache line */ + #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) + #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) +diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h +index f8e16b2..c73ff79 100644 +--- a/arch/frv/include/asm/kmap_types.h ++++ b/arch/frv/include/asm/kmap_types.h +@@ -23,6 +23,7 @@ enum km_type { + KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, ++ KM_CLEARPAGE, + KM_TYPE_NR + }; + +diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c +index 385fd30..6c3d97e 100644 +--- a/arch/frv/mm/elf-fdpic.c ++++ b/arch/frv/mm/elf-fdpic.c +@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(current->mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + goto success; + } + +@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + for (; vma; vma = vma->vm_next) { + if (addr > limit) + break; +- if (addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + goto success; + addr = vma->vm_end; + } +@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + for (; vma; vma = vma->vm_next) { + if (addr > limit) + break; +- if (addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + goto success; + addr = vma->vm_end; + } +diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h +index c635028..6d9445a 100644 +--- a/arch/h8300/include/asm/cache.h ++++ b/arch/h8300/include/asm/cache.h +@@ -1,8 +1,10 @@ + #ifndef __ARCH_H8300_CACHE_H + #define __ARCH_H8300_CACHE_H + ++#include <linux/const.h> ++ + /* bytes per L1 cache line */ +-#define L1_CACHE_BYTES 4 ++#define L1_CACHE_BYTES _AC(4,UL) + + /* m68k-elf-gcc 2.95.2 doesn't like these */ + +diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h +index 0f01de2..d37d309 100644 +--- a/arch/hexagon/include/asm/cache.h ++++ b/arch/hexagon/include/asm/cache.h +@@ -21,9 +21,11 @@ + #ifndef __ASM_CACHE_H + #define __ASM_CACHE_H + ++#include <linux/const.h> ++ + /* Bytes per L1 cache line */ +-#define L1_CACHE_SHIFT (5) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __cacheline_aligned __aligned(L1_CACHE_BYTES) + #define ____cacheline_aligned __aligned(L1_CACHE_BYTES) +diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h +index 3fad89e..3047da5 100644 +--- a/arch/ia64/include/asm/atomic.h ++++ b/arch/ia64/include/asm/atomic.h +@@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v) + #define atomic64_inc(v) atomic64_add(1, (v)) + #define atomic64_dec(v) atomic64_sub(1, (v)) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + /* Atomic operations are already serializing */ + #define smp_mb__before_atomic_dec() barrier() + #define smp_mb__after_atomic_dec() barrier() +diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h +index 988254a..e1ee885 100644 +--- a/arch/ia64/include/asm/cache.h ++++ b/arch/ia64/include/asm/cache.h +@@ -1,6 +1,7 @@ + #ifndef _ASM_IA64_CACHE_H + #define _ASM_IA64_CACHE_H + ++#include <linux/const.h> + + /* + * Copyright (C) 1998-2000 Hewlett-Packard Co +@@ -9,7 +10,7 @@ + + /* Bytes per L1 (data) cache line. */ + #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #ifdef CONFIG_SMP + # define SMP_CACHE_SHIFT L1_CACHE_SHIFT +diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h +index b5298eb..67c6e62 100644 +--- a/arch/ia64/include/asm/elf.h ++++ b/arch/ia64/include/asm/elf.h +@@ -42,6 +42,13 @@ + */ + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) ++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) ++#endif ++ + #define PT_IA_64_UNWIND 0x70000001 + + /* IA-64 relocations: */ +diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h +index 1a97af3..7529d31 100644 +--- a/arch/ia64/include/asm/pgtable.h ++++ b/arch/ia64/include/asm/pgtable.h +@@ -12,7 +12,7 @@ + * David Mosberger-Tang davidm@hpl.hp.com + */ + +- ++#include <linux/const.h> + #include <asm/mman.h> + #include <asm/page.h> + #include <asm/processor.h> +@@ -143,6 +143,17 @@ + #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) + #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) + #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) ++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) ++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++# define PAGE_COPY_NOEXEC PAGE_COPY ++#endif ++ + #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) + #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) + #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) +diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h +index b77768d..e0795eb 100644 +--- a/arch/ia64/include/asm/spinlock.h ++++ b/arch/ia64/include/asm/spinlock.h +@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) + unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; + + asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); +- ACCESS_ONCE(*p) = (tmp + 2) & ~1; ++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1; + } + + static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) +diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h +index 449c8c0..432a3d2 100644 +--- a/arch/ia64/include/asm/uaccess.h ++++ b/arch/ia64/include/asm/uaccess.h +@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) + const void *__cu_from = (from); \ + long __cu_len = (n); \ + \ +- if (__access_ok(__cu_to, __cu_len, get_fs())) \ ++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \ + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ + __cu_len; \ + }) +@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) + long __cu_len = (n); \ + \ + __chk_user_ptr(__cu_from); \ +- if (__access_ok(__cu_from, __cu_len, get_fs())) \ ++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \ + __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ + __cu_len; \ + }) +diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c +index 24603be..948052d 100644 +--- a/arch/ia64/kernel/module.c ++++ b/arch/ia64/kernel/module.c +@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt) + void + module_free (struct module *mod, void *module_region) + { +- if (mod && mod->arch.init_unw_table && +- module_region == mod->module_init) { ++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) { + unw_remove_unwind_table(mod->arch.init_unw_table); + mod->arch.init_unw_table = NULL; + } +@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, + } + + static inline int ++in_init_rx (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx; ++} ++ ++static inline int ++in_init_rw (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw; ++} ++ ++static inline int + in_init (const struct module *mod, uint64_t addr) + { +- return addr - (uint64_t) mod->module_init < mod->init_size; ++ return in_init_rx(mod, addr) || in_init_rw(mod, addr); ++} ++ ++static inline int ++in_core_rx (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx; ++} ++ ++static inline int ++in_core_rw (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw; + } + + static inline int + in_core (const struct module *mod, uint64_t addr) + { +- return addr - (uint64_t) mod->module_core < mod->core_size; ++ return in_core_rx(mod, addr) || in_core_rw(mod, addr); + } + + static inline int +@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, + break; + + case RV_BDREL: +- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); ++ if (in_init_rx(mod, val)) ++ val -= (uint64_t) mod->module_init_rx; ++ else if (in_init_rw(mod, val)) ++ val -= (uint64_t) mod->module_init_rw; ++ else if (in_core_rx(mod, val)) ++ val -= (uint64_t) mod->module_core_rx; ++ else if (in_core_rw(mod, val)) ++ val -= (uint64_t) mod->module_core_rw; + break; + + case RV_LTV: +@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind + * addresses have been selected... + */ + uint64_t gp; +- if (mod->core_size > MAX_LTOFF) ++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF) + /* + * This takes advantage of fact that SHF_ARCH_SMALL gets allocated + * at the end of the module. + */ +- gp = mod->core_size - MAX_LTOFF / 2; ++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2; + else +- gp = mod->core_size / 2; +- gp = (uint64_t) mod->module_core + ((gp + 7) & -8); ++ gp = (mod->core_size_rx + mod->core_size_rw) / 2; ++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8); + mod->arch.gp = gp; + DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); + } +diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c +index 609d500..7dde2a8 100644 +--- a/arch/ia64/kernel/sys_ia64.c ++++ b/arch/ia64/kernel/sys_ia64.c +@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len + if (REGION_NUMBER(addr) == RGN_HPAGE) + addr = 0; + #endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ addr = mm->free_area_cache; ++ else ++#endif ++ + if (!addr) + addr = mm->free_area_cache; + +@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { +- if (start_addr != TASK_UNMAPPED_BASE) { ++ if (start_addr != mm->mmap_base) { + /* Start a new search --- just in case we missed some holes. */ +- addr = TASK_UNMAPPED_BASE; ++ addr = mm->mmap_base; + goto full_search; + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* Remember the address where we stopped this search: */ + mm->free_area_cache = addr + len; + return addr; +diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S +index 53c0ba0..2accdde 100644 +--- a/arch/ia64/kernel/vmlinux.lds.S ++++ b/arch/ia64/kernel/vmlinux.lds.S +@@ -199,7 +199,7 @@ SECTIONS { + /* Per-cpu data: */ + . = ALIGN(PERCPU_PAGE_SIZE); + PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) +- __phys_per_cpu_start = __per_cpu_load; ++ __phys_per_cpu_start = per_cpu_load; + /* + * ensure percpu data fits + * into percpu page size +diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c +index 20b3593..1ce77f0 100644 +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c +@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address) + return pte_present(pte); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + void __kprobes + ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) + { +@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re + mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) + | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); + +- if ((vma->vm_flags & mask) != mask) ++ if ((vma->vm_flags & mask) != mask) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) ++ goto bad_area; ++ ++ up_read(&mm->mmap_sem); ++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + goto bad_area; + ++ } ++ + /* + * If for any reason at all we couldn't handle the fault, make + * sure we exit gracefully rather than endlessly redo the +diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c +index 5ca674b..e0e1b70 100644 +--- a/arch/ia64/mm/hugetlbpage.c ++++ b/arch/ia64/mm/hugetlbpage.c +@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) + return -ENOMEM; +- if (!vmm || (addr + len) <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = ALIGN(vmm->vm_end, HPAGE_SIZE); + } +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c +index 00cb0e2..2ad8024 100644 +--- a/arch/ia64/mm/init.c ++++ b/arch/ia64/mm/init.c +@@ -120,6 +120,19 @@ ia64_init_addr_space (void) + vma->vm_start = current->thread.rbs_bot & PAGE_MASK; + vma->vm_end = vma->vm_start + PAGE_SIZE; + vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) { ++ vma->vm_flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->pax_flags & MF_PAX_MPROTECT) ++ vma->vm_flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + down_write(¤t->mm->mmap_sem); + if (insert_vm_struct(current->mm, vma)) { +diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h +index 40b3ee9..8c2c112 100644 +--- a/arch/m32r/include/asm/cache.h ++++ b/arch/m32r/include/asm/cache.h +@@ -1,8 +1,10 @@ + #ifndef _ASM_M32R_CACHE_H + #define _ASM_M32R_CACHE_H + ++#include <linux/const.h> ++ + /* L1 cache line size */ + #define L1_CACHE_SHIFT 4 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* _ASM_M32R_CACHE_H */ +diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c +index 82abd15..d95ae5d 100644 +--- a/arch/m32r/lib/usercopy.c ++++ b/arch/m32r/lib/usercopy.c +@@ -14,6 +14,9 @@ + unsigned long + __generic_copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + prefetch(from); + if (access_ok(VERIFY_WRITE, to, n)) + __copy_user(to,from,n); +@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n) + unsigned long + __generic_copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + prefetchw(to); + if (access_ok(VERIFY_READ, from, n)) + __copy_user_zeroing(to,from,n); +diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h +index 0395c51..5f26031 100644 +--- a/arch/m68k/include/asm/cache.h ++++ b/arch/m68k/include/asm/cache.h +@@ -4,9 +4,11 @@ + #ifndef __ARCH_M68K_CACHE_H + #define __ARCH_M68K_CACHE_H + ++#include <linux/const.h> ++ + /* bytes per L1 cache line */ + #define L1_CACHE_SHIFT 4 +-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h +index 4efe96a..60e8699 100644 +--- a/arch/microblaze/include/asm/cache.h ++++ b/arch/microblaze/include/asm/cache.h +@@ -13,11 +13,12 @@ + #ifndef _ASM_MICROBLAZE_CACHE_H + #define _ASM_MICROBLAZE_CACHE_H + ++#include <linux/const.h> + #include <asm/registers.h> + + #define L1_CACHE_SHIFT 5 + /* word-granular cache in microblaze */ +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define SMP_CACHE_BYTES L1_CACHE_BYTES + +diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h +index 1d93f81..67794d0 100644 +--- a/arch/mips/include/asm/atomic.h ++++ b/arch/mips/include/asm/atomic.h +@@ -21,6 +21,10 @@ + #include <asm/war.h> + #include <asm/system.h> + ++#ifdef CONFIG_GENERIC_ATOMIC64 ++#include <asm-generic/atomic64.h> ++#endif ++ + #define ATOMIC_INIT(i) { (i) } + + /* +@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + */ + #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* CONFIG_64BIT */ + + /* +diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h +index b4db69f..8f3b093 100644 +--- a/arch/mips/include/asm/cache.h ++++ b/arch/mips/include/asm/cache.h +@@ -9,10 +9,11 @@ + #ifndef _ASM_CACHE_H + #define _ASM_CACHE_H + ++#include <linux/const.h> + #include <kmalloc.h> + + #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define SMP_CACHE_SHIFT L1_CACHE_SHIFT + #define SMP_CACHE_BYTES L1_CACHE_BYTES +diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h +index 455c0ac..ad65fbe 100644 +--- a/arch/mips/include/asm/elf.h ++++ b/arch/mips/include/asm/elf.h +@@ -372,13 +372,16 @@ extern const char *__elf_platform; + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + #endif + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 + struct linux_binprm; + extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + +-struct mm_struct; +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + #endif /* _ASM_ELF_H */ +diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h +index e59cd1a..8e329d6 100644 +--- a/arch/mips/include/asm/page.h ++++ b/arch/mips/include/asm/page.h +@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from, + #ifdef CONFIG_CPU_MIPS32 + typedef struct { unsigned long pte_low, pte_high; } pte_t; + #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) +- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) ++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) + #else + typedef struct { unsigned long long pte; } pte_t; + #define pte_val(x) ((x).pte) +diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h +index 6018c80..7c37203 100644 +--- a/arch/mips/include/asm/system.h ++++ b/arch/mips/include/asm/system.h +@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void); + */ + #define __ARCH_WANT_UNLOCKED_CTXSW + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + + #endif /* _ASM_SYSTEM_H */ +diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c +index 9fdd8bc..4bd7f1a 100644 +--- a/arch/mips/kernel/binfmt_elfn32.c ++++ b/arch/mips/kernel/binfmt_elfn32.c +@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + #undef ELF_ET_DYN_BASE + #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #include <asm/processor.h> + #include <linux/module.h> + #include <linux/elfcore.h> +diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c +index ff44823..97f8906 100644 +--- a/arch/mips/kernel/binfmt_elfo32.c ++++ b/arch/mips/kernel/binfmt_elfo32.c +@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + #undef ELF_ET_DYN_BASE + #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #include <asm/processor.h> + + /* +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c +index c47f96e..661d418 100644 +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task) + out: + return pc; + } +- +-/* +- * Don't forget that the stack pointer must be aligned on a 8 bytes +- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. +- */ +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() & ~PAGE_MASK; +- +- return sp & ALMASK; +-} +diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c +index 937cf33..adb39bb 100644 +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c +@@ -28,6 +28,23 @@ + #include <asm/highmem.h> /* For VMALLOC_END */ + #include <linux/kdebug.h> + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate +diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c +index 302d779..7d35bf8 100644 +--- a/arch/mips/mm/mmap.c ++++ b/arch/mips/mm/mmap.c +@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + do_color_align = 1; + + /* requesting a specific address */ ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len)) + return addr; + } + +@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = vma->vm_end; + if (do_color_align) +@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr - len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vmm, addr - len, len)) + /* cache the address as a hint for next time */ + return mm->free_area_cache = addr - len; + } +@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (check_heap_stack_gap(vmm, addr, len)) { + /* cache the address as a hint for next time */ + return mm->free_area_cache = addr; + } +@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + mm->unmap_area = arch_unmap_area_topdown; + } + } +- +-static inline unsigned long brk_rnd(void) +-{ +- unsigned long rnd = get_random_int(); +- +- rnd = rnd << PAGE_SHIFT; +- /* 8MB for 32bit, 256MB for 64bit */ +- if (TASK_IS_32BIT_ADDR) +- rnd = rnd & 0x7ffffful; +- else +- rnd = rnd & 0xffffffful; +- +- return rnd; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long base = mm->brk; +- unsigned long ret; +- +- ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < mm->brk) +- return mm->brk; +- +- return ret; +-} +diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h +index 967d144..db12197 100644 +--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h ++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h +@@ -11,12 +11,14 @@ + #ifndef _ASM_PROC_CACHE_H + #define _ASM_PROC_CACHE_H + ++#include <linux/const.h> ++ + /* L1 cache */ + + #define L1_CACHE_NWAYS 4 /* number of ways in caches */ + #define L1_CACHE_NENTRIES 256 /* number of entries in each way */ +-#define L1_CACHE_BYTES 16 /* bytes per entry */ + #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */ ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */ + #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */ + + #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */ +diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h +index bcb5df2..84fabd2 100644 +--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h ++++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h +@@ -16,13 +16,15 @@ + #ifndef _ASM_PROC_CACHE_H + #define _ASM_PROC_CACHE_H + ++#include <linux/const.h> ++ + /* + * L1 cache + */ + #define L1_CACHE_NWAYS 4 /* number of ways in caches */ + #define L1_CACHE_NENTRIES 128 /* number of entries in each way */ +-#define L1_CACHE_BYTES 32 /* bytes per entry */ + #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */ ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */ + #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */ + + #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */ +diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h +index 4ce7a01..449202a 100644 +--- a/arch/openrisc/include/asm/cache.h ++++ b/arch/openrisc/include/asm/cache.h +@@ -19,11 +19,13 @@ + #ifndef __ASM_OPENRISC_CACHE_H + #define __ASM_OPENRISC_CACHE_H + ++#include <linux/const.h> ++ + /* FIXME: How can we replace these with values from the CPU... + * they shouldn't be hard-coded! + */ + +-#define L1_CACHE_BYTES 16 + #define L1_CACHE_SHIFT 4 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* __ASM_OPENRISC_CACHE_H */ +diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h +index 4054b31..a10c105 100644 +--- a/arch/parisc/include/asm/atomic.h ++++ b/arch/parisc/include/asm/atomic.h +@@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* !CONFIG_64BIT */ + + +diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h +index 47f11c7..3420df2 100644 +--- a/arch/parisc/include/asm/cache.h ++++ b/arch/parisc/include/asm/cache.h +@@ -5,6 +5,7 @@ + #ifndef __ARCH_PARISC_CACHE_H + #define __ARCH_PARISC_CACHE_H + ++#include <linux/const.h> + + /* + * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have +@@ -15,13 +16,13 @@ + * just ruin performance. + */ + #ifdef CONFIG_PA20 +-#define L1_CACHE_BYTES 64 + #define L1_CACHE_SHIFT 6 + #else +-#define L1_CACHE_BYTES 32 + #define L1_CACHE_SHIFT 5 + #endif + ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) ++ + #ifndef __ASSEMBLY__ + + #define SMP_CACHE_BYTES L1_CACHE_BYTES +diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h +index 19f6cb1..6c78cf2 100644 +--- a/arch/parisc/include/asm/elf.h ++++ b/arch/parisc/include/asm/elf.h +@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */ + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x10000UL ++ ++#define PAX_DELTA_MMAP_LEN 16 ++#define PAX_DELTA_STACK_LEN 16 ++#endif ++ + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, + but it's not easy, and we've already done it here. */ +diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h +index 22dadeb..f6c2be4 100644 +--- a/arch/parisc/include/asm/pgtable.h ++++ b/arch/parisc/include/asm/pgtable.h +@@ -210,6 +210,17 @@ struct vm_area_struct; + #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) + #define PAGE_COPY PAGE_EXECREAD + #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) ++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) ++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_COPY_NOEXEC PAGE_COPY ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++#endif ++ + #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) + #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) + #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) +diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c +index 5e34ccf..672bc9c 100644 +--- a/arch/parisc/kernel/module.c ++++ b/arch/parisc/kernel/module.c +@@ -98,16 +98,38 @@ + + /* three functions to determine where in the module core + * or init pieces the location is */ ++static inline int in_init_rx(struct module *me, void *loc) ++{ ++ return (loc >= me->module_init_rx && ++ loc < (me->module_init_rx + me->init_size_rx)); ++} ++ ++static inline int in_init_rw(struct module *me, void *loc) ++{ ++ return (loc >= me->module_init_rw && ++ loc < (me->module_init_rw + me->init_size_rw)); ++} ++ + static inline int in_init(struct module *me, void *loc) + { +- return (loc >= me->module_init && +- loc <= (me->module_init + me->init_size)); ++ return in_init_rx(me, loc) || in_init_rw(me, loc); ++} ++ ++static inline int in_core_rx(struct module *me, void *loc) ++{ ++ return (loc >= me->module_core_rx && ++ loc < (me->module_core_rx + me->core_size_rx)); ++} ++ ++static inline int in_core_rw(struct module *me, void *loc) ++{ ++ return (loc >= me->module_core_rw && ++ loc < (me->module_core_rw + me->core_size_rw)); + } + + static inline int in_core(struct module *me, void *loc) + { +- return (loc >= me->module_core && +- loc <= (me->module_core + me->core_size)); ++ return in_core_rx(me, loc) || in_core_rw(me, loc); + } + + static inline int in_local(struct module *me, void *loc) +@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, + } + + /* align things a bit */ +- me->core_size = ALIGN(me->core_size, 16); +- me->arch.got_offset = me->core_size; +- me->core_size += gots * sizeof(struct got_entry); ++ me->core_size_rw = ALIGN(me->core_size_rw, 16); ++ me->arch.got_offset = me->core_size_rw; ++ me->core_size_rw += gots * sizeof(struct got_entry); + +- me->core_size = ALIGN(me->core_size, 16); +- me->arch.fdesc_offset = me->core_size; +- me->core_size += fdescs * sizeof(Elf_Fdesc); ++ me->core_size_rw = ALIGN(me->core_size_rw, 16); ++ me->arch.fdesc_offset = me->core_size_rw; ++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc); + + me->arch.got_max = gots; + me->arch.fdesc_max = fdescs; +@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) + + BUG_ON(value == 0); + +- got = me->module_core + me->arch.got_offset; ++ got = me->module_core_rw + me->arch.got_offset; + for (i = 0; got[i].addr; i++) + if (got[i].addr == value) + goto out; +@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) + #ifdef CONFIG_64BIT + static Elf_Addr get_fdesc(struct module *me, unsigned long value) + { +- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; ++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset; + + if (!value) { + printk(KERN_ERR "%s: zero OPD requested!\n", me->name); +@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) + + /* Create new one */ + fdesc->addr = value; +- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; ++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; + return (Elf_Addr)fdesc; + } + #endif /* CONFIG_64BIT */ +@@ -845,7 +867,7 @@ register_unwind_table(struct module *me, + + table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; + end = table + sechdrs[me->arch.unwind_section].sh_size; +- gp = (Elf_Addr)me->module_core + me->arch.got_offset; ++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; + + DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", + me->arch.unwind_section, table, end, gp); +diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c +index c9b9322..02d8940 100644 +--- a/arch/parisc/kernel/sys_parisc.c ++++ b/arch/parisc/kernel/sys_parisc.c +@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len) + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = vma->vm_end; + } +@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping, + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; + if (addr < vma->vm_end) /* handle wraparound */ +@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (flags & MAP_FIXED) + return addr; + if (!addr) +- addr = TASK_UNMAPPED_BASE; ++ addr = current->mm->mmap_base; + + if (filp) { + addr = get_shared_area(filp->f_mapping, addr, len, pgoff); +diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c +index f19e660..414fe24 100644 +--- a/arch/parisc/kernel/traps.c ++++ b/arch/parisc/kernel/traps.c +@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm,regs->iaoq[0]); +- if (vma && (regs->iaoq[0] >= vma->vm_start) +- && (vma->vm_flags & VM_EXEC)) { +- ++ if (vma && (regs->iaoq[0] >= vma->vm_start)) { + fault_address = regs->iaoq[0]; + fault_space = regs->iasq[0]; + +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c +index 18162ce..94de376 100644 +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -15,6 +15,7 @@ + #include <linux/sched.h> + #include <linux/interrupt.h> + #include <linux/module.h> ++#include <linux/unistd.h> + + #include <asm/uaccess.h> + #include <asm/traps.h> +@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data); + static unsigned long + parisc_acctyp(unsigned long code, unsigned int inst) + { +- if (code == 6 || code == 16) ++ if (code == 6 || code == 7 || code == 16) + return VM_EXEC; + + switch (inst & 0xf0000000) { +@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst) + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when rt_sigreturn trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: unpatched PLT emulation */ ++ unsigned int bl, depwi; ++ ++ err = get_user(bl, (unsigned int *)instruction_pointer(regs)); ++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4)); ++ ++ if (err) ++ break; ++ ++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { ++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; ++ ++ err = get_user(ldw, (unsigned int *)addr); ++ err |= get_user(bv, (unsigned int *)(addr+4)); ++ err |= get_user(ldw2, (unsigned int *)(addr+8)); ++ ++ if (err) ++ break; ++ ++ if (ldw == 0x0E801096U && ++ bv == 0xEAC0C000U && ++ ldw2 == 0x0E881095U) ++ { ++ unsigned int resolver, map; ++ ++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8)); ++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12)); ++ if (err) ++ break; ++ ++ regs->gr[20] = instruction_pointer(regs)+8; ++ regs->gr[21] = map; ++ regs->gr[22] = resolver; ++ regs->iaoq[0] = resolver | 3UL; ++ regs->iaoq[1] = regs->iaoq[0] + 4; ++ return 3; ++ } ++ } ++ } while (0); ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ ++#ifndef CONFIG_PAX_EMUSIGRT ++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) ++ return 1; ++#endif ++ ++ do { /* PaX: rt_sigreturn emulation */ ++ unsigned int ldi1, ldi2, bel, nop; ++ ++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); ++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); ++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); ++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); ++ ++ if (err) ++ break; ++ ++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && ++ ldi2 == 0x3414015AU && ++ bel == 0xE4008200U && ++ nop == 0x08000240U) ++ { ++ regs->gr[25] = (ldi1 & 2) >> 1; ++ regs->gr[20] = __NR_rt_sigreturn; ++ regs->gr[31] = regs->iaoq[1] + 16; ++ regs->sr[0] = regs->iasq[1]; ++ regs->iaoq[0] = 0x100UL; ++ regs->iaoq[1] = regs->iaoq[0] + 4; ++ regs->iasq[0] = regs->sr[2]; ++ regs->iasq[1] = regs->sr[2]; ++ return 2; ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + int fixup_exception(struct pt_regs *regs) + { + const struct exception_table_entry *fix; +@@ -192,8 +303,33 @@ good_area: + + acc_type = parisc_acctyp(code,regs->iir); + +- if ((vma->vm_flags & acc_type) != acc_type) ++ if ((vma->vm_flags & acc_type) != acc_type) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && ++ (address & ~3UL) == instruction_pointer(regs)) ++ { ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 3: ++ return; ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ case 2: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + goto bad_area; ++ } + + /* + * If for any reason at all we couldn't handle the fault, make +diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h +index 02e41b5..ec6e26c 100644 +--- a/arch/powerpc/include/asm/atomic.h ++++ b/arch/powerpc/include/asm/atomic.h +@@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* __powerpc64__ */ + + #endif /* __KERNEL__ */ +diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h +index 4b50941..5605819 100644 +--- a/arch/powerpc/include/asm/cache.h ++++ b/arch/powerpc/include/asm/cache.h +@@ -3,6 +3,7 @@ + + #ifdef __KERNEL__ + ++#include <linux/const.h> + + /* bytes per L1 cache line */ + #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) +@@ -22,7 +23,7 @@ + #define L1_CACHE_SHIFT 7 + #endif + +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define SMP_CACHE_BYTES L1_CACHE_BYTES + +diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h +index 3bf9cca..e7457d0 100644 +--- a/arch/powerpc/include/asm/elf.h ++++ b/arch/powerpc/include/asm/elf.h +@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG]; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-extern unsigned long randomize_et_dyn(unsigned long base); +-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) ++#define ELF_ET_DYN_BASE (0x20000000) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (0x10000000UL) ++ ++#ifdef __powerpc64__ ++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28) ++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28) ++#else ++#define PAX_DELTA_MMAP_LEN 15 ++#define PAX_DELTA_STACK_LEN 15 ++#endif ++#endif + + /* + * Our registers are always unsigned longs, whether we're a 32 bit +@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, + (0x7ff >> (PAGE_SHIFT - 12)) : \ + (0x3ffff >> (PAGE_SHIFT - 12))) + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + #endif /* __KERNEL__ */ + + /* +diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h +index bca8fdc..61e9580 100644 +--- a/arch/powerpc/include/asm/kmap_types.h ++++ b/arch/powerpc/include/asm/kmap_types.h +@@ -27,6 +27,7 @@ enum km_type { + KM_PPC_SYNC_PAGE, + KM_PPC_SYNC_ICACHE, + KM_KDB, ++ KM_CLEARPAGE, + KM_TYPE_NR + }; + +diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h +index d4a7f64..451de1c 100644 +--- a/arch/powerpc/include/asm/mman.h ++++ b/arch/powerpc/include/asm/mman.h +@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) + } + #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot) + +-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) ++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags) + { + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); + } +diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h +index dd9c4fd..a2ced87 100644 +--- a/arch/powerpc/include/asm/page.h ++++ b/arch/powerpc/include/asm/page.h +@@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr; + * and needs to be executable. This means the whole heap ends + * up being executable. + */ +-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ +- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) ++#define VM_DATA_DEFAULT_FLAGS32 \ ++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ ++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +@@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr; + #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) + #endif + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + /* + * Use the top bit of the higher-level page table entries to indicate whether + * the entries we point to contain hugepages. This works because we know that +diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h +index fb40ede..d3ce956 100644 +--- a/arch/powerpc/include/asm/page_64.h ++++ b/arch/powerpc/include/asm/page_64.h +@@ -144,15 +144,18 @@ do { \ + * stack by default, so in the absence of a PT_GNU_STACK program header + * we turn execute permission off. + */ +-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ +- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) ++#define VM_STACK_DEFAULT_FLAGS32 \ ++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ ++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + ++#ifndef CONFIG_PAX_PAGEEXEC + #define VM_STACK_DEFAULT_FLAGS \ + (is_32bit_task() ? \ + VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) ++#endif + + #include <asm-generic/getorder.h> + +diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h +index 88b0bd9..e32bc67 100644 +--- a/arch/powerpc/include/asm/pgtable.h ++++ b/arch/powerpc/include/asm/pgtable.h +@@ -2,6 +2,7 @@ + #define _ASM_POWERPC_PGTABLE_H + #ifdef __KERNEL__ + ++#include <linux/const.h> + #ifndef __ASSEMBLY__ + #include <asm/processor.h> /* For TASK_SIZE */ + #include <asm/mmu.h> +diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h +index 4aad413..85d86bf 100644 +--- a/arch/powerpc/include/asm/pte-hash32.h ++++ b/arch/powerpc/include/asm/pte-hash32.h +@@ -21,6 +21,7 @@ + #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ + #define _PAGE_USER 0x004 /* usermode access allowed */ + #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ ++#define _PAGE_EXEC _PAGE_GUARDED + #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ + #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ + #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h +index 559da19..7e5835c 100644 +--- a/arch/powerpc/include/asm/reg.h ++++ b/arch/powerpc/include/asm/reg.h +@@ -212,6 +212,7 @@ + #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ + #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ + #define DSISR_NOHPTE 0x40000000 /* no translation found */ ++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */ + #define DSISR_PROTFAULT 0x08000000 /* protection fault */ + #define DSISR_ISSTORE 0x02000000 /* access was a store */ + #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ +diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h +index e30a13d..2b7d994 100644 +--- a/arch/powerpc/include/asm/system.h ++++ b/arch/powerpc/include/asm/system.h +@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, + #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + #endif + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + + /* Used in very early kernel initialization. */ + extern unsigned long reloc_offset(void); +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h +index bd0fb84..a42a14b 100644 +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -13,6 +13,8 @@ + #define VERIFY_READ 0 + #define VERIFY_WRITE 1 + ++extern void check_object_size(const void *ptr, unsigned long n, bool to); ++ + /* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with +@@ -327,52 +329,6 @@ do { \ + extern unsigned long __copy_tofrom_user(void __user *to, + const void __user *from, unsigned long size); + +-#ifndef __powerpc64__ +- +-static inline unsigned long copy_from_user(void *to, +- const void __user *from, unsigned long n) +-{ +- unsigned long over; +- +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_tofrom_user((__force void __user *)to, from, n); +- if ((unsigned long)from < TASK_SIZE) { +- over = (unsigned long)from + n - TASK_SIZE; +- return __copy_tofrom_user((__force void __user *)to, from, +- n - over) + over; +- } +- return n; +-} +- +-static inline unsigned long copy_to_user(void __user *to, +- const void *from, unsigned long n) +-{ +- unsigned long over; +- +- if (access_ok(VERIFY_WRITE, to, n)) +- return __copy_tofrom_user(to, (__force void __user *)from, n); +- if ((unsigned long)to < TASK_SIZE) { +- over = (unsigned long)to + n - TASK_SIZE; +- return __copy_tofrom_user(to, (__force void __user *)from, +- n - over) + over; +- } +- return n; +-} +- +-#else /* __powerpc64__ */ +- +-#define __copy_in_user(to, from, size) \ +- __copy_tofrom_user((to), (from), (size)) +- +-extern unsigned long copy_from_user(void *to, const void __user *from, +- unsigned long n); +-extern unsigned long copy_to_user(void __user *to, const void *from, +- unsigned long n); +-extern unsigned long copy_in_user(void __user *to, const void __user *from, +- unsigned long n); +- +-#endif /* __powerpc64__ */ +- + static inline unsigned long __copy_from_user_inatomic(void *to, + const void __user *from, unsigned long n) + { +@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, + if (ret == 0) + return 0; + } ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ + return __copy_tofrom_user((__force void __user *)to, from, n); + } + +@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, + if (ret == 0) + return 0; + } ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ + return __copy_tofrom_user(to, (__force const void __user *)from, n); + } + +@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to, + return __copy_to_user_inatomic(to, from, size); + } + ++#ifndef __powerpc64__ ++ ++static inline unsigned long __must_check copy_from_user(void *to, ++ const void __user *from, unsigned long n) ++{ ++ unsigned long over; ++ ++ if ((long)n < 0) ++ return n; ++ ++ if (access_ok(VERIFY_READ, from, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ return __copy_tofrom_user((__force void __user *)to, from, n); ++ } ++ if ((unsigned long)from < TASK_SIZE) { ++ over = (unsigned long)from + n - TASK_SIZE; ++ if (!__builtin_constant_p(n - over)) ++ check_object_size(to, n - over, false); ++ return __copy_tofrom_user((__force void __user *)to, from, ++ n - over) + over; ++ } ++ return n; ++} ++ ++static inline unsigned long __must_check copy_to_user(void __user *to, ++ const void *from, unsigned long n) ++{ ++ unsigned long over; ++ ++ if ((long)n < 0) ++ return n; ++ ++ if (access_ok(VERIFY_WRITE, to, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ return __copy_tofrom_user(to, (__force void __user *)from, n); ++ } ++ if ((unsigned long)to < TASK_SIZE) { ++ over = (unsigned long)to + n - TASK_SIZE; ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n - over, true); ++ return __copy_tofrom_user(to, (__force void __user *)from, ++ n - over) + over; ++ } ++ return n; ++} ++ ++#else /* __powerpc64__ */ ++ ++#define __copy_in_user(to, from, size) \ ++ __copy_tofrom_user((to), (from), (size)) ++ ++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ if ((long)n < 0 || n > INT_MAX) ++ return n; ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ ++ if (likely(access_ok(VERIFY_READ, from, n))) ++ n = __copy_from_user(to, from, n); ++ else ++ memset(to, 0, n); ++ return n; ++} ++ ++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ if ((long)n < 0 || n > INT_MAX) ++ return n; ++ ++ if (likely(access_ok(VERIFY_WRITE, to, n))) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ n = __copy_to_user(to, from, n); ++ } ++ return n; ++} ++ ++extern unsigned long copy_in_user(void __user *to, const void __user *from, ++ unsigned long n); ++ ++#endif /* __powerpc64__ */ ++ + extern unsigned long __clear_user(void __user *addr, unsigned long size); + + static inline unsigned long clear_user(void __user *addr, unsigned long size) +diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S +index 429983c..7af363b 100644 +--- a/arch/powerpc/kernel/exceptions-64e.S ++++ b/arch/powerpc/kernel/exceptions-64e.S +@@ -587,6 +587,7 @@ storage_fault_common: + std r14,_DAR(r1) + std r15,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD ++ bl .save_nvgprs + mr r4,r14 + mr r5,r15 + ld r14,PACA_EXGEN+EX_R14(r13) +@@ -596,8 +597,7 @@ storage_fault_common: + cmpdi r3,0 + bne- 1f + b .ret_from_except_lite +-1: bl .save_nvgprs +- mr r5,r3 ++1: mr r5,r3 + addi r3,r1,STACK_FRAME_OVERHEAD + ld r4,_DAR(r1) + bl .bad_page_fault +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S +index cf9c69b..ebc9640 100644 +--- a/arch/powerpc/kernel/exceptions-64s.S ++++ b/arch/powerpc/kernel/exceptions-64s.S +@@ -1004,10 +1004,10 @@ handle_page_fault: + 11: ld r4,_DAR(r1) + ld r5,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD ++ bl .save_nvgprs + bl .do_page_fault + cmpdi r3,0 + beq+ 13f +- bl .save_nvgprs + mr r5,r3 + addi r3,r1,STACK_FRAME_OVERHEAD + lwz r4,_DAR(r1) +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c +index 745c1e7..59d97a6 100644 +--- a/arch/powerpc/kernel/irq.c ++++ b/arch/powerpc/kernel/irq.c +@@ -547,9 +547,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, + host->ops = ops; + host->of_node = of_node_get(of_node); + +- if (host->ops->match == NULL) +- host->ops->match = default_irq_host_match; +- + raw_spin_lock_irqsave(&irq_big_lock, flags); + + /* If it's a legacy controller, check for duplicates and +@@ -622,7 +619,12 @@ struct irq_host *irq_find_host(struct device_node *node) + */ + raw_spin_lock_irqsave(&irq_big_lock, flags); + list_for_each_entry(h, &irq_hosts, link) +- if (h->ops->match(h, node)) { ++ if (h->ops->match) { ++ if (h->ops->match(h, node)) { ++ found = h; ++ break; ++ } ++ } else if (default_irq_host_match(h, node)) { + found = h; + break; + } +diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c +index 0b6d796..d760ddb 100644 +--- a/arch/powerpc/kernel/module_32.c ++++ b/arch/powerpc/kernel/module_32.c +@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, + me->arch.core_plt_section = i; + } + if (!me->arch.core_plt_section || !me->arch.init_plt_section) { +- printk("Module doesn't contain .plt or .init.plt sections.\n"); ++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name); + return -ENOEXEC; + } + +@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location, + + DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); + /* Init, or core PLT? */ +- if (location >= mod->module_core +- && location < mod->module_core + mod->core_size) ++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) || ++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw)) + entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; +- else ++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) || ++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw)) + entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; ++ else { ++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name); ++ return ~0UL; ++ } + + /* Find this entry, or if that fails, the next avail. entry */ + while (entry->jump[0]) { +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c +index 6457574..08b28d3 100644 +--- a/arch/powerpc/kernel/process.c ++++ b/arch/powerpc/kernel/process.c +@@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs) + * Lookup NIP late so we have the best change of getting the + * above info out without failing + */ +- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); +- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); ++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip); ++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link); + #endif + show_stack(current, (unsigned long *) regs->gpr[1]); + if (!user_mode(regs)) +@@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) + newsp = stack[0]; + ip = stack[STACK_FRAME_LR_SAVE]; + if (!firstframe || ip != lr) { +- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); ++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip); + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((ip == rth || ip == mrth) && curr_frame >= 0) { +- printk(" (%pS)", ++ printk(" (%pA)", + (void *)current->ret_stack[curr_frame].ret); + curr_frame--; + } +@@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) + struct pt_regs *regs = (struct pt_regs *) + (sp + STACK_FRAME_OVERHEAD); + lr = regs->link; +- printk("--- Exception: %lx at %pS\n LR = %pS\n", ++ printk("--- Exception: %lx at %pA\n LR = %pA\n", + regs->trap, (void *)regs->nip, (void *)lr); + firstframe = 1; + } +@@ -1263,58 +1263,3 @@ void thread_info_cache_init(void) + } + + #endif /* THREAD_SHIFT < PAGE_SHIFT */ +- +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() & ~PAGE_MASK; +- return sp & ~0xf; +-} +- +-static inline unsigned long brk_rnd(void) +-{ +- unsigned long rnd = 0; +- +- /* 8MB for 32bit, 1GB for 64bit */ +- if (is_32bit_task()) +- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); +- else +- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); +- +- return rnd << PAGE_SHIFT; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long base = mm->brk; +- unsigned long ret; +- +-#ifdef CONFIG_PPC_STD_MMU_64 +- /* +- * If we are using 1TB segments and we are allowed to randomise +- * the heap, we can put it above 1TB so it is backed by a 1TB +- * segment. Otherwise the heap will be in the bottom 1TB +- * which always uses 256MB segments and this may result in a +- * performance penalty. +- */ +- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) +- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); +-#endif +- +- ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < mm->brk) +- return mm->brk; +- +- return ret; +-} +- +-unsigned long randomize_et_dyn(unsigned long base) +-{ +- unsigned long ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < base) +- return base; +- +- return ret; +-} +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c +index 836a5a1..27289a3 100644 +--- a/arch/powerpc/kernel/signal_32.c ++++ b/arch/powerpc/kernel/signal_32.c +@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, + /* Save user registers on the stack */ + frame = &rt_sf->uc.uc_mcontext; + addr = frame; +- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { ++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { + if (save_user_regs(regs, frame, 0, 1)) + goto badframe; + regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c +index a50b5ec..547078a 100644 +--- a/arch/powerpc/kernel/signal_64.c ++++ b/arch/powerpc/kernel/signal_64.c +@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, + current->thread.fpscr.val = 0; + + /* Set up to return from userspace. */ +- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { ++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { + regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; + } else { + err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c +index 5459d14..10f8070 100644 +--- a/arch/powerpc/kernel/traps.c ++++ b/arch/powerpc/kernel/traps.c +@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void) + static inline void pmac_backlight_unblank(void) { } + #endif + ++extern void gr_handle_kernel_exploit(void); ++ + int die(const char *str, struct pt_regs *regs, long err) + { + static struct { +@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err) + if (panic_on_oops) + panic("Fatal exception"); + ++ gr_handle_kernel_exploit(); ++ + oops_exit(); + do_exit(err); + +diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c +index 7d14bb6..1305601 100644 +--- a/arch/powerpc/kernel/vdso.c ++++ b/arch/powerpc/kernel/vdso.c +@@ -35,6 +35,7 @@ + #include <asm/firmware.h> + #include <asm/vdso.h> + #include <asm/vdso_datapage.h> ++#include <asm/mman.h> + + #include "setup.h" + +@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + vdso_base = VDSO32_MBASE; + #endif + +- current->mm->context.vdso_base = 0; ++ current->mm->context.vdso_base = ~0UL; + + /* vDSO has a problem and was disabled, just don't "enable" it for the + * process +@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + vdso_base = get_unmapped_area(NULL, vdso_base, + (vdso_pages << PAGE_SHIFT) + + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), +- 0, 0); ++ 0, MAP_PRIVATE | MAP_EXECUTABLE); + if (IS_ERR_VALUE(vdso_base)) { + rc = vdso_base; + goto fail_mmapsem; +diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c +index 5eea6f3..5d10396 100644 +--- a/arch/powerpc/lib/usercopy_64.c ++++ b/arch/powerpc/lib/usercopy_64.c +@@ -9,22 +9,6 @@ + #include <linux/module.h> + #include <asm/uaccess.h> + +-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) +-{ +- if (likely(access_ok(VERIFY_READ, from, n))) +- n = __copy_from_user(to, from, n); +- else +- memset(to, 0, n); +- return n; +-} +- +-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) +-{ +- if (likely(access_ok(VERIFY_WRITE, to, n))) +- n = __copy_to_user(to, from, n); +- return n; +-} +- + unsigned long copy_in_user(void __user *to, const void __user *from, + unsigned long n) + { +@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, + return n; + } + +-EXPORT_SYMBOL(copy_from_user); +-EXPORT_SYMBOL(copy_to_user); + EXPORT_SYMBOL(copy_in_user); + +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c +index 5efe8c9..db9ceef 100644 +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -32,6 +32,10 @@ + #include <linux/perf_event.h> + #include <linux/magic.h> + #include <linux/ratelimit.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> ++#include <linux/unistd.h> + + #include <asm/firmware.h> + #include <asm/page.h> +@@ -43,6 +47,7 @@ + #include <asm/tlbflush.h> + #include <asm/siginfo.h> + #include <mm/mmu_decl.h> ++#include <asm/ptrace.h> + + #ifdef CONFIG_KPROBES + static inline int notify_page_fault(struct pt_regs *regs) +@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs) + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (regs->nip = fault address) ++ * ++ * returns 1 when task should be killed ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int __user *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * Check whether the instruction at regs->nip is a store using + * an update addressing form which will update r1. +@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, + * indicate errors in DSISR but can validly be set in SRR1. + */ + if (trap == 0x400) +- error_code &= 0x48200000; ++ error_code &= 0x58200000; + else + is_write = error_code & DSISR_ISSTORE; + #else +@@ -259,7 +291,7 @@ good_area: + * "undefined". Of those that can be set, this is the only + * one which seems bad. + */ +- if (error_code & 0x10000000) ++ if (error_code & DSISR_GUARDED) + /* Guarded storage error. */ + goto bad_area; + #endif /* CONFIG_8xx */ +@@ -274,7 +306,7 @@ good_area: + * processors use the same I/D cache coherency mechanism + * as embedded. + */ +- if (error_code & DSISR_PROTFAULT) ++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED)) + goto bad_area; + #endif /* CONFIG_PPC_STD_MMU */ + +@@ -343,6 +375,23 @@ bad_area: + bad_area_nosemaphore: + /* User mode accesses cause a SIGSEGV */ + if (user_mode(regs)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) { ++#ifdef CONFIG_PPC_STD_MMU ++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) { ++#else ++ if (is_exec && regs->nip == address) { ++#endif ++ switch (pax_handle_fetch_fault(regs)) { ++ } ++ ++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]); ++ do_group_exit(SIGKILL); ++ } ++ } ++#endif ++ + _exception(SIGSEGV, regs, code, address); + return 0; + } +diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c +index 5a783d8..c23e14b 100644 +--- a/arch/powerpc/mm/mmap_64.c ++++ b/arch/powerpc/mm/mmap_64.c +@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c +index 73709f7..6b90313 100644 +--- a/arch/powerpc/mm/slice.c ++++ b/arch/powerpc/mm/slice.c +@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, + if ((mm->task_size - len) < addr) + return 0; + vma = find_vma(mm, addr); +- return (!vma || (addr + len) <= vma->vm_start); ++ return check_heap_stack_gap(vma, addr, len); + } + + static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) +@@ -256,7 +256,7 @@ full_search: + addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); + continue; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ +@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, + } + } + +- addr = mm->mmap_base; +- while (addr > len) { ++ if (mm->mmap_base < len) ++ addr = -ENOMEM; ++ else ++ addr = mm->mmap_base - len; ++ ++ while (!IS_ERR_VALUE(addr)) { + /* Go down by chunk size */ +- addr = _ALIGN_DOWN(addr - len, 1ul << pshift); ++ addr = _ALIGN_DOWN(addr, 1ul << pshift); + + /* Check for hit with different page size */ + mask = slice_range_to_mask(addr, len); +@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || (addr + len) <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* remember the address as a hint for next time */ + if (use_cache) + mm->free_area_cache = addr; +@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ +- addr = vma->vm_start; ++ addr = skip_heap_stack_gap(vma, len); + } + + /* +@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, + if (fixed && addr > (mm->task_size - len)) + return -EINVAL; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP)) ++ addr = 0; ++#endif ++ + /* If hint, make sure it matches our alignment restrictions */ + if (!fixed && addr) { + addr = _ALIGN_UP(addr, 1ul << pshift); +diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h +index 8517d2a..d2738d4 100644 +--- a/arch/s390/include/asm/atomic.h ++++ b/arch/s390/include/asm/atomic.h +@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) + #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #define smp_mb__before_atomic_dec() smp_mb() + #define smp_mb__after_atomic_dec() smp_mb() + #define smp_mb__before_atomic_inc() smp_mb() +diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h +index 2a30d5a..5e5586f 100644 +--- a/arch/s390/include/asm/cache.h ++++ b/arch/s390/include/asm/cache.h +@@ -11,8 +11,10 @@ + #ifndef __ARCH_S390_CACHE_H + #define __ARCH_S390_CACHE_H + +-#define L1_CACHE_BYTES 256 ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT 8 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + #define NET_SKB_PAD 32 + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) +diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h +index 547f1a6..0b22b53 100644 +--- a/arch/s390/include/asm/elf.h ++++ b/arch/s390/include/asm/elf.h +@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-extern unsigned long randomize_et_dyn(unsigned long base); +-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) ++#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26) ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. */ +@@ -211,7 +217,4 @@ struct linux_binprm; + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 + int arch_setup_additional_pages(struct linux_binprm *, int); + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + #endif +diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h +index ef573c1..75a1ce6 100644 +--- a/arch/s390/include/asm/system.h ++++ b/arch/s390/include/asm/system.h +@@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command); + extern void (*_machine_halt)(void); + extern void (*_machine_power_off)(void); + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + + static inline int tprot(unsigned long addr) + { +diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h +index 2b23885..e136e31 100644 +--- a/arch/s390/include/asm/uaccess.h ++++ b/arch/s390/include/asm/uaccess.h +@@ -235,6 +235,10 @@ static inline unsigned long __must_check + copy_to_user(void __user *to, const void *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + if (access_ok(VERIFY_WRITE, to, n)) + n = __copy_to_user(to, from, n); + return n; +@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n) + static inline unsigned long __must_check + __copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n) && (n <= 256)) + return uaccess.copy_from_user_small(n, from, to); + else +@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) + unsigned int sz = __compiletime_object_size(to); + + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + if (unlikely(sz != -1 && sz < n)) { + copy_from_user_overflow(); + return n; +diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c +index dfcb343..eda788a 100644 +--- a/arch/s390/kernel/module.c ++++ b/arch/s390/kernel/module.c +@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + + /* Increase core size by size of got & plt and set start + offsets for got and plt. */ +- me->core_size = ALIGN(me->core_size, 4); +- me->arch.got_offset = me->core_size; +- me->core_size += me->arch.got_size; +- me->arch.plt_offset = me->core_size; +- me->core_size += me->arch.plt_size; ++ me->core_size_rw = ALIGN(me->core_size_rw, 4); ++ me->arch.got_offset = me->core_size_rw; ++ me->core_size_rw += me->arch.got_size; ++ me->arch.plt_offset = me->core_size_rx; ++ me->core_size_rx += me->arch.plt_size; + return 0; + } + +@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + if (info->got_initialized == 0) { + Elf_Addr *gotent; + +- gotent = me->module_core + me->arch.got_offset + ++ gotent = me->module_core_rw + me->arch.got_offset + + info->got_offset; + *gotent = val; + info->got_initialized = 1; +@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + else if (r_type == R_390_GOTENT || + r_type == R_390_GOTPLTENT) + *(unsigned int *) loc = +- (val + (Elf_Addr) me->module_core - loc) >> 1; ++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1; + else if (r_type == R_390_GOT64 || + r_type == R_390_GOTPLT64) + *(unsigned long *) loc = val; +@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ + if (info->plt_initialized == 0) { + unsigned int *ip; +- ip = me->module_core + me->arch.plt_offset + ++ ip = me->module_core_rx + me->arch.plt_offset + + info->plt_offset; + #ifndef CONFIG_64BIT + ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ +@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + val - loc + 0xffffUL < 0x1ffffeUL) || + (r_type == R_390_PLT32DBL && + val - loc + 0xffffffffULL < 0x1fffffffeULL))) +- val = (Elf_Addr) me->module_core + ++ val = (Elf_Addr) me->module_core_rx + + me->arch.plt_offset + + info->plt_offset; + val += rela->r_addend - loc; +@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + case R_390_GOTOFF32: /* 32 bit offset to GOT. */ + case R_390_GOTOFF64: /* 64 bit offset to GOT. */ + val = val + rela->r_addend - +- ((Elf_Addr) me->module_core + me->arch.got_offset); ++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset); + if (r_type == R_390_GOTOFF16) + *(unsigned short *) loc = val; + else if (r_type == R_390_GOTOFF32) +@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + break; + case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ + case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ +- val = (Elf_Addr) me->module_core + me->arch.got_offset + ++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset + + rela->r_addend - loc; + if (r_type == R_390_GOTPC) + *(unsigned int *) loc = val; +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c +index 53088e2..9f44a36 100644 +--- a/arch/s390/kernel/process.c ++++ b/arch/s390/kernel/process.c +@@ -320,39 +320,3 @@ unsigned long get_wchan(struct task_struct *p) + } + return 0; + } +- +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() & ~PAGE_MASK; +- return sp & ~0xf; +-} +- +-static inline unsigned long brk_rnd(void) +-{ +- /* 8MB for 32bit, 1GB for 64bit */ +- if (is_32bit_task()) +- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; +- else +- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); +- +- if (ret < mm->brk) +- return mm->brk; +- return ret; +-} +- +-unsigned long randomize_et_dyn(unsigned long base) +-{ +- unsigned long ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (!(current->flags & PF_RANDOMIZE)) +- return base; +- if (ret < base) +- return base; +- return ret; +-} +diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c +index a0155c0..34cc491 100644 +--- a/arch/s390/mm/mmap.c ++++ b/arch/s390/mm/mmap.c +@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = s390_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = s390_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h +index ae3d59f..f65f075 100644 +--- a/arch/score/include/asm/cache.h ++++ b/arch/score/include/asm/cache.h +@@ -1,7 +1,9 @@ + #ifndef _ASM_SCORE_CACHE_H + #define _ASM_SCORE_CACHE_H + ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT 4 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* _ASM_SCORE_CACHE_H */ +diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h +index 589d5c7..669e274 100644 +--- a/arch/score/include/asm/system.h ++++ b/arch/score/include/asm/system.h +@@ -17,7 +17,7 @@ do { \ + #define finish_arch_switch(prev) do {} while (0) + + typedef void (*vi_handler_t)(void); +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) (x) + + #define mb() barrier() + #define rmb() barrier() +diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c +index 25d0803..d6c8e36 100644 +--- a/arch/score/kernel/process.c ++++ b/arch/score/kernel/process.c +@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task) + + return task_pt_regs(task)->cp0_epc; + } +- +-unsigned long arch_align_stack(unsigned long sp) +-{ +- return sp; +-} +diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h +index ef9e555..331bd29 100644 +--- a/arch/sh/include/asm/cache.h ++++ b/arch/sh/include/asm/cache.h +@@ -9,10 +9,11 @@ + #define __ASM_SH_CACHE_H + #ifdef __KERNEL__ + ++#include <linux/const.h> + #include <linux/init.h> + #include <cpu/cache.h> + +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c +index afeb710..d1d1289 100644 +--- a/arch/sh/mm/mmap.c ++++ b/arch/sh/mm/mmap.c +@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -106,7 +105,7 @@ full_search: + } + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (unlikely(mm->mmap_base < len)) + goto bottomup; + +- addr = mm->mmap_base-len; +- if (do_colour_align) +- addr = COLOUR_ALIGN_DOWN(addr, pgoff); ++ addr = mm->mmap_base - len; + + do { ++ if (do_colour_align) ++ addr = COLOUR_ALIGN_DOWN(addr, pgoff); + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } +@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ +- addr = vma->vm_start-len; +- if (do_colour_align) +- addr = COLOUR_ALIGN_DOWN(addr, pgoff); +- } while (likely(len < vma->vm_start)); ++ addr = skip_heap_stack_gap(vma, len); ++ } while (!IS_ERR_VALUE(addr)); + + bottomup: + /* +diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig +index f92602e..27060b2 100644 +--- a/arch/sparc/Kconfig ++++ b/arch/sparc/Kconfig +@@ -31,6 +31,7 @@ config SPARC + + config SPARC32 + def_bool !64BIT ++ select GENERIC_ATOMIC64 + + config SPARC64 + def_bool 64BIT +diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile +index eddcfb3..b117d90 100644 +--- a/arch/sparc/Makefile ++++ b/arch/sparc/Makefile +@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/ + # Export what is needed by arch/sparc/boot/Makefile + export VMLINUX_INIT VMLINUX_MAIN + VMLINUX_INIT := $(head-y) $(init-y) +-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ ++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ + VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y) + VMLINUX_MAIN += $(drivers-y) $(net-y) + +diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h +index 5c3c8b6..ba822fa 100644 +--- a/arch/sparc/include/asm/atomic_32.h ++++ b/arch/sparc/include/asm/atomic_32.h +@@ -13,6 +13,8 @@ + + #include <linux/types.h> + ++#include <asm-generic/atomic64.h> ++ + #ifdef __KERNEL__ + + #include <asm/system.h> +diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h +index 9f421df..b81fc12 100644 +--- a/arch/sparc/include/asm/atomic_64.h ++++ b/arch/sparc/include/asm/atomic_64.h +@@ -14,18 +14,40 @@ + #define ATOMIC64_INIT(i) { (i) } + + #define atomic_read(v) (*(volatile int *)&(v)->counter) ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return v->counter; ++} + #define atomic64_read(v) (*(volatile long *)&(v)->counter) ++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ return v->counter; ++} + + #define atomic_set(v, i) (((v)->counter) = i) ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} + #define atomic64_set(v, i) (((v)->counter) = i) ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) ++{ ++ v->counter = i; ++} + + extern void atomic_add(int, atomic_t *); ++extern void atomic_add_unchecked(int, atomic_unchecked_t *); + extern void atomic64_add(long, atomic64_t *); ++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *); + extern void atomic_sub(int, atomic_t *); ++extern void atomic_sub_unchecked(int, atomic_unchecked_t *); + extern void atomic64_sub(long, atomic64_t *); ++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *); + + extern int atomic_add_ret(int, atomic_t *); ++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *); + extern long atomic64_add_ret(long, atomic64_t *); ++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *); + extern int atomic_sub_ret(int, atomic_t *); + extern long atomic64_sub_ret(long, atomic64_t *); + +@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *); + #define atomic64_dec_return(v) atomic64_sub_ret(1, v) + + #define atomic_inc_return(v) atomic_add_ret(1, v) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_ret_unchecked(1, v); ++} + #define atomic64_inc_return(v) atomic64_add_ret(1, v) ++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ return atomic64_add_ret_unchecked(1, v); ++} + + #define atomic_sub_return(i, v) atomic_sub_ret(i, v) + #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) + + #define atomic_add_return(i, v) atomic_add_ret(i, v) ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ ++ return atomic_add_ret_unchecked(i, v); ++} + #define atomic64_add_return(i, v) atomic64_add_ret(i, v) ++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) ++{ ++ return atomic64_add_ret_unchecked(i, v); ++} + + /* + * atomic_inc_and_test - increment and test +@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *); + * other cases. + */ + #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_inc_return_unchecked(v) == 0; ++} + #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) + + #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) +@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *); + #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) + + #define atomic_inc(v) atomic_add(1, v) ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_add_unchecked(1, v); ++} + #define atomic64_inc(v) atomic64_add(1, v) ++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) ++{ ++ atomic64_add_unchecked(1, v); ++} + + #define atomic_dec(v) atomic_sub(1, v) ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_sub_unchecked(1, v); ++} + #define atomic64_dec(v) atomic64_sub(1, v) ++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) ++{ ++ atomic64_sub_unchecked(1, v); ++} + + #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) + #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) + + #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} + + static inline int __atomic_add_unless(atomic_t *v, int a, int u) + { +- int c, old; ++ int c, old, new; + c = atomic_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addcc %2, %0, %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "tvs %%icc, 6\n" ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a) ++ : "cc"); ++ ++ old = atomic_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; +@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) + #define atomic64_cmpxchg(v, o, n) \ + ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) + #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) ++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) ++{ ++ return xchg(&v->counter, new); ++} + + static inline long atomic64_add_unless(atomic64_t *v, long a, long u) + { +- long c, old; ++ long c, old, new; + c = atomic64_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic64_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addcc %2, %0, %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "tvs %%xcc, 6\n" ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a) ++ : "cc"); ++ ++ old = atomic64_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; + } +- return c != (u); ++ return c != u; + } + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h +index 69358b5..9d0d492 100644 +--- a/arch/sparc/include/asm/cache.h ++++ b/arch/sparc/include/asm/cache.h +@@ -7,10 +7,12 @@ + #ifndef _SPARC_CACHE_H + #define _SPARC_CACHE_H + ++#include <linux/const.h> ++ + #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) + + #define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES 32 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #ifdef CONFIG_SPARC32 + #define SMP_CACHE_BYTES_SHIFT 5 +diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h +index 4269ca6..e3da77f 100644 +--- a/arch/sparc/include/asm/elf_32.h ++++ b/arch/sparc/include/asm/elf_32.h +@@ -114,6 +114,13 @@ typedef struct { + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x10000UL ++ ++#define PAX_DELTA_MMAP_LEN 16 ++#define PAX_DELTA_STACK_LEN 16 ++#endif ++ + /* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This can NOT be done in userspace + on Sparc. */ +diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h +index 7df8b7f..4946269 100644 +--- a/arch/sparc/include/asm/elf_64.h ++++ b/arch/sparc/include/asm/elf_64.h +@@ -180,6 +180,13 @@ typedef struct { + #define ELF_ET_DYN_BASE 0x0000010000000000UL + #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29) ++#endif ++ + extern unsigned long sparc64_elf_hwcap; + #define ELF_HWCAP sparc64_elf_hwcap + +diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h +index 156707b..aefa786 100644 +--- a/arch/sparc/include/asm/page_32.h ++++ b/arch/sparc/include/asm/page_32.h +@@ -8,6 +8,8 @@ + #ifndef _SPARC_PAGE_H + #define _SPARC_PAGE_H + ++#include <linux/const.h> ++ + #define PAGE_SHIFT 12 + + #ifndef __ASSEMBLY__ +diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h +index a790cc6..091ed94 100644 +--- a/arch/sparc/include/asm/pgtable_32.h ++++ b/arch/sparc/include/asm/pgtable_32.h +@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd) + BTFIXUPDEF_INT(page_none) + BTFIXUPDEF_INT(page_copy) + BTFIXUPDEF_INT(page_readonly) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++BTFIXUPDEF_INT(page_shared_noexec) ++BTFIXUPDEF_INT(page_copy_noexec) ++BTFIXUPDEF_INT(page_readonly_noexec) ++#endif ++ + BTFIXUPDEF_INT(page_kernel) + + #define PMD_SHIFT SUN4C_PMD_SHIFT +@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED; + #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy)) + #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly)) + ++#ifdef CONFIG_PAX_PAGEEXEC ++extern pgprot_t PAGE_SHARED_NOEXEC; ++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec)) ++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec)) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_COPY_NOEXEC PAGE_COPY ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++#endif ++ + extern unsigned long page_kernel; + + #ifdef MODULE +diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h +index f6ae2b2..b03ffc7 100644 +--- a/arch/sparc/include/asm/pgtsrmmu.h ++++ b/arch/sparc/include/asm/pgtsrmmu.h +@@ -115,6 +115,13 @@ + SRMMU_EXEC | SRMMU_REF) + #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ + SRMMU_EXEC | SRMMU_REF) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF) ++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) ++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) ++#endif ++ + #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ + SRMMU_DIRTY | SRMMU_REF) + +diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h +index 9689176..63c18ea 100644 +--- a/arch/sparc/include/asm/spinlock_64.h ++++ b/arch/sparc/include/asm/spinlock_64.h +@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla + + /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ + +-static void inline arch_read_lock(arch_rwlock_t *lock) ++static inline void arch_read_lock(arch_rwlock_t *lock) + { + unsigned long tmp1, tmp2; + + __asm__ __volatile__ ( + "1: ldsw [%2], %0\n" + " brlz,pn %0, 2f\n" +-"4: add %0, 1, %1\n" ++"4: addcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%icc, 1b\n" +@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock) + " .previous" + : "=&r" (tmp1), "=&r" (tmp2) + : "r" (lock) +- : "memory"); ++ : "memory", "cc"); + } + +-static int inline arch_read_trylock(arch_rwlock_t *lock) ++static inline int arch_read_trylock(arch_rwlock_t *lock) + { + int tmp1, tmp2; + +@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) + "1: ldsw [%2], %0\n" + " brlz,a,pn %0, 2f\n" + " mov 0, %0\n" +-" add %0, 1, %1\n" ++" addcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%icc, 1b\n" +@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) + return tmp1; + } + +-static void inline arch_read_unlock(arch_rwlock_t *lock) ++static inline void arch_read_unlock(arch_rwlock_t *lock) + { + unsigned long tmp1, tmp2; + + __asm__ __volatile__( + "1: lduw [%2], %0\n" +-" sub %0, 1, %1\n" ++" subcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%xcc, 1b\n" +@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock) + : "memory"); + } + +-static void inline arch_write_lock(arch_rwlock_t *lock) ++static inline void arch_write_lock(arch_rwlock_t *lock) + { + unsigned long mask, tmp1, tmp2; + +@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock) + : "memory"); + } + +-static void inline arch_write_unlock(arch_rwlock_t *lock) ++static inline void arch_write_unlock(arch_rwlock_t *lock) + { + __asm__ __volatile__( + " stw %%g0, [%0]" +@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock) + : "memory"); + } + +-static int inline arch_write_trylock(arch_rwlock_t *lock) ++static inline int arch_write_trylock(arch_rwlock_t *lock) + { + unsigned long mask, tmp1, tmp2, result; + +diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h +index fa57532..e1a4c53 100644 +--- a/arch/sparc/include/asm/thread_info_32.h ++++ b/arch/sparc/include/asm/thread_info_32.h +@@ -50,6 +50,8 @@ struct thread_info { + unsigned long w_saved; + + struct restart_block restart_block; ++ ++ unsigned long lowest_stack; + }; + + /* +diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h +index 60d86be..952dea1 100644 +--- a/arch/sparc/include/asm/thread_info_64.h ++++ b/arch/sparc/include/asm/thread_info_64.h +@@ -63,6 +63,8 @@ struct thread_info { + struct pt_regs *kern_una_regs; + unsigned int kern_una_insn; + ++ unsigned long lowest_stack; ++ + unsigned long fpregs[0] __attribute__ ((aligned(64))); + }; + +diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h +index e88fbe5..96b0ce5 100644 +--- a/arch/sparc/include/asm/uaccess.h ++++ b/arch/sparc/include/asm/uaccess.h +@@ -1,5 +1,13 @@ + #ifndef ___ASM_SPARC_UACCESS_H + #define ___ASM_SPARC_UACCESS_H ++ ++#ifdef __KERNEL__ ++#ifndef __ASSEMBLY__ ++#include <linux/types.h> ++extern void check_object_size(const void *ptr, unsigned long n, bool to); ++#endif ++#endif ++ + #if defined(__sparc__) && defined(__arch64__) + #include <asm/uaccess_64.h> + #else +diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h +index 8303ac4..07f333d 100644 +--- a/arch/sparc/include/asm/uaccess_32.h ++++ b/arch/sparc/include/asm/uaccess_32.h +@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig + + static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) + { +- if (n && __access_ok((unsigned long) to, n)) ++ if ((long)n < 0) ++ return n; ++ ++ if (n && __access_ok((unsigned long) to, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); + return __copy_user(to, (__force void __user *) from, n); +- else ++ } else + return n; + } + + static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ + return __copy_user(to, (__force void __user *) from, n); + } + + static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) + { +- if (n && __access_ok((unsigned long) from, n)) ++ if ((long)n < 0) ++ return n; ++ ++ if (n && __access_ok((unsigned long) from, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); + return __copy_user((__force void __user *) to, from, n); +- else ++ } else + return n; + } + + static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + return __copy_user((__force void __user *) to, from, n); + } + +diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h +index 3e1449f..5293a0e 100644 +--- a/arch/sparc/include/asm/uaccess_64.h ++++ b/arch/sparc/include/asm/uaccess_64.h +@@ -10,6 +10,7 @@ + #include <linux/compiler.h> + #include <linux/string.h> + #include <linux/thread_info.h> ++#include <linux/kernel.h> + #include <asm/asi.h> + #include <asm/system.h> + #include <asm/spitfire.h> +@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from, + static inline unsigned long __must_check + copy_from_user(void *to, const void __user *from, unsigned long size) + { +- unsigned long ret = ___copy_from_user(to, from, size); ++ unsigned long ret; + ++ if ((long)size < 0 || size > INT_MAX) ++ return size; ++ ++ if (!__builtin_constant_p(size)) ++ check_object_size(to, size, false); ++ ++ ret = ___copy_from_user(to, from, size); + if (unlikely(ret)) + ret = copy_from_user_fixup(to, from, size); + +@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from, + static inline unsigned long __must_check + copy_to_user(void __user *to, const void *from, unsigned long size) + { +- unsigned long ret = ___copy_to_user(to, from, size); ++ unsigned long ret; + ++ if ((long)size < 0 || size > INT_MAX) ++ return size; ++ ++ if (!__builtin_constant_p(size)) ++ check_object_size(from, size, true); ++ ++ ret = ___copy_to_user(to, from, size); + if (unlikely(ret)) + ret = copy_to_user_fixup(to, from, size); + return ret; +diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile +index cb85458..e063f17 100644 +--- a/arch/sparc/kernel/Makefile ++++ b/arch/sparc/kernel/Makefile +@@ -3,7 +3,7 @@ + # + + asflags-y := -ansi +-ccflags-y := -Werror ++#ccflags-y := -Werror + + extra-y := head_$(BITS).o + extra-y += init_task.o +diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c +index f793742..4d880af 100644 +--- a/arch/sparc/kernel/process_32.c ++++ b/arch/sparc/kernel/process_32.c +@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp) + rw->ins[4], rw->ins[5], + rw->ins[6], + rw->ins[7]); +- printk("%pS\n", (void *) rw->ins[7]); ++ printk("%pA\n", (void *) rw->ins[7]); + rw = (struct reg_window32 *) rw->ins[6]; + } + spin_unlock_irqrestore(&sparc_backtrace_lock, flags); +@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r) + + printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", + r->psr, r->pc, r->npc, r->y, print_tainted()); +- printk("PC: <%pS>\n", (void *) r->pc); ++ printk("PC: <%pA>\n", (void *) r->pc); + printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], + r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); + printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], + r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); +- printk("RPC: <%pS>\n", (void *) r->u_regs[15]); ++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]); + + printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], +@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) + rw = (struct reg_window32 *) fp; + pc = rw->ins[7]; + printk("[%08lx : ", pc); +- printk("%pS ] ", (void *) pc); ++ printk("%pA ] ", (void *) pc); + fp = rw->ins[6]; + } while (++count < 16); + printk("\n"); +diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c +index 3739a06..48b2ff0 100644 +--- a/arch/sparc/kernel/process_64.c ++++ b/arch/sparc/kernel/process_64.c +@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs) + printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", + rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); + if (regs->tstate & TSTATE_PRIV) +- printk("I7: <%pS>\n", (void *) rwk->ins[7]); ++ printk("I7: <%pA>\n", (void *) rwk->ins[7]); + } + + void show_regs(struct pt_regs *regs) + { + printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, + regs->tpc, regs->tnpc, regs->y, print_tainted()); +- printk("TPC: <%pS>\n", (void *) regs->tpc); ++ printk("TPC: <%pA>\n", (void *) regs->tpc); + printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", + regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], + regs->u_regs[3]); +@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs) + printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", + regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], + regs->u_regs[15]); +- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); ++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]); + show_regwindow(regs); + show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); + } +@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void) + ((tp && tp->task) ? tp->task->pid : -1)); + + if (gp->tstate & TSTATE_PRIV) { +- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", ++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n", + (void *) gp->tpc, + (void *) gp->o7, + (void *) gp->i7, +diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c +index 42b282f..28ce9f2 100644 +--- a/arch/sparc/kernel/sys_sparc_32.c ++++ b/arch/sparc/kernel/sys_sparc_32.c +@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + if (ARCH_SUN4C && len > 0x20000000) + return -ENOMEM; + if (!addr) +- addr = TASK_UNMAPPED_BASE; ++ addr = current->mm->mmap_base; + + if (flags & MAP_SHARED) + addr = COLOUR_ALIGN(addr); +@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + } + if (TASK_SIZE - PAGE_SIZE - len < addr) + return -ENOMEM; +- if (!vmm || addr + len <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = vmm->vm_end; + if (flags & MAP_SHARED) +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c +index 441521a..b767073 100644 +--- a/arch/sparc/kernel/sys_sparc_64.c ++++ b/arch/sparc/kernel/sys_sparc_64.c +@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ +- if ((flags & MAP_SHARED) && ++ if ((filp || (flags & MAP_SHARED)) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; +@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + + if (len > mm->cached_hole_size) { +- start_addr = addr = mm->free_area_cache; ++ start_addr = addr = mm->free_area_cache; + } else { +- start_addr = addr = TASK_UNMAPPED_BASE; ++ start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + } + +@@ -174,14 +177,14 @@ full_search: + vma = find_vma(mm, VA_EXCLUDE_END); + } + if (unlikely(task_size < addr)) { +- if (start_addr != TASK_UNMAPPED_BASE) { +- start_addr = addr = TASK_UNMAPPED_BASE; ++ if (start_addr != mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ +- if ((flags & MAP_SHARED) && ++ if ((filp || (flags & MAP_SHARED)) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; +@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (unlikely(mm->mmap_base < len)) + goto bottomup; + +- addr = mm->mmap_base-len; +- if (do_color_align) +- addr = COLOUR_ALIGN_DOWN(addr, pgoff); ++ addr = mm->mmap_base - len; + + do { ++ if (do_color_align) ++ addr = COLOUR_ALIGN_DOWN(addr, pgoff); + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } +@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ +- addr = vma->vm_start-len; +- if (do_color_align) +- addr = COLOUR_ALIGN_DOWN(addr, pgoff); +- } while (likely(len < vma->vm_start)); ++ addr = skip_heap_stack_gap(vma, len); ++ } while (!IS_ERR_VALUE(addr)); + + bottomup: + /* +@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + gap == RLIM_INFINITY || + sysctl_legacy_va_layout) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { +@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + gap = (task_size / 6 * 5); + + mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c +index 591f20c..0f1b925 100644 +--- a/arch/sparc/kernel/traps_32.c ++++ b/arch/sparc/kernel/traps_32.c +@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc) + #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") + #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") + ++extern void gr_handle_kernel_exploit(void); ++ + void die_if_kernel(char *str, struct pt_regs *regs) + { + static int die_counter; +@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs) + count++ < 30 && + (((unsigned long) rw) >= PAGE_OFFSET) && + !(((unsigned long) rw) & 0x7)) { +- printk("Caller[%08lx]: %pS\n", rw->ins[7], ++ printk("Caller[%08lx]: %pA\n", rw->ins[7], + (void *) rw->ins[7]); + rw = (struct reg_window32 *)rw->ins[6]; + } + } + printk("Instruction DUMP:"); + instruction_dump ((unsigned long *) regs->pc); +- if(regs->psr & PSR_PS) ++ if(regs->psr & PSR_PS) { ++ gr_handle_kernel_exploit(); + do_exit(SIGKILL); ++ } + do_exit(SIGSEGV); + } + +diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c +index 0cbdaa4..438e4c9 100644 +--- a/arch/sparc/kernel/traps_64.c ++++ b/arch/sparc/kernel/traps_64.c +@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) + i + 1, + p->trapstack[i].tstate, p->trapstack[i].tpc, + p->trapstack[i].tnpc, p->trapstack[i].tt); +- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); ++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc); + } + } + +@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl) + + lvl -= 0x100; + if (regs->tstate & TSTATE_PRIV) { ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ if (lvl == 6) ++ pax_report_refcount_overflow(regs); ++#endif ++ + sprintf(buffer, "Kernel bad sw trap %lx", lvl); + die_if_kernel(buffer, regs); + } +@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl) + void bad_trap_tl1(struct pt_regs *regs, long lvl) + { + char buffer[32]; +- ++ + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, + 0, lvl, SIGTRAP) == NOTIFY_STOP) + return; + ++#ifdef CONFIG_PAX_REFCOUNT ++ if (lvl == 6) ++ pax_report_refcount_overflow(regs); ++#endif ++ + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + + sprintf (buffer, "Bad trap %lx at tl>0", lvl); +@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in + regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); + printk("%s" "ERROR(%d): ", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); +- printk("TPC<%pS>\n", (void *) regs->tpc); ++ printk("TPC<%pA>\n", (void *) regs->tpc); + printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, +@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) + smp_processor_id(), + (type & 0x1) ? 'I' : 'D', + regs->tpc); +- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc); + panic("Irrecoverable Cheetah+ parity error."); + } + +@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) + smp_processor_id(), + (type & 0x1) ? 'I' : 'D', + regs->tpc); +- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc); + } + + struct sun4v_error_entry { +@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) + + printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", + regs->tpc, tl); +- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); +- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", ++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n", + (void *) regs->u_regs[UREG_I7]); + printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " + "pte[%lx] error[%lx]\n", +@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) + + printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", + regs->tpc, tl); +- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); +- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", ++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n", + (void *) regs->u_regs[UREG_I7]); + printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " + "pte[%lx] error[%lx]\n", +@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) + fp = (unsigned long)sf->fp + STACK_BIAS; + } + +- printk(" [%016lx] %pS\n", pc, (void *) pc); ++ printk(" [%016lx] %pA\n", pc, (void *) pc); + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((pc + 8UL) == (unsigned long) &return_to_handler) { + int index = tsk->curr_ret_stack; + if (tsk->ret_stack && index >= graph) { + pc = tsk->ret_stack[index - graph].ret; +- printk(" [%016lx] %pS\n", pc, (void *) pc); ++ printk(" [%016lx] %pA\n", pc, (void *) pc); + graph++; + } + } +@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) + return (struct reg_window *) (fp + STACK_BIAS); + } + ++extern void gr_handle_kernel_exploit(void); ++ + void die_if_kernel(char *str, struct pt_regs *regs) + { + static int die_counter; +@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) + while (rw && + count++ < 30 && + kstack_valid(tp, (unsigned long) rw)) { +- printk("Caller[%016lx]: %pS\n", rw->ins[7], ++ printk("Caller[%016lx]: %pA\n", rw->ins[7], + (void *) rw->ins[7]); + + rw = kernel_stack_up(rw); +@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs) + } + user_instruction_dump ((unsigned int __user *) regs->tpc); + } +- if (regs->tstate & TSTATE_PRIV) ++ if (regs->tstate & TSTATE_PRIV) { ++ gr_handle_kernel_exploit(); + do_exit(SIGKILL); ++ } + do_exit(SIGSEGV); + } + EXPORT_SYMBOL(die_if_kernel); +diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c +index 76e4ac1..78f8bb1 100644 +--- a/arch/sparc/kernel/unaligned_64.c ++++ b/arch/sparc/kernel/unaligned_64.c +@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs) + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); + + if (__ratelimit(&ratelimit)) { +- printk("Kernel unaligned access at TPC[%lx] %pS\n", ++ printk("Kernel unaligned access at TPC[%lx] %pA\n", + regs->tpc, (void *) regs->tpc); + } + } +diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile +index a3fc437..fea9957 100644 +--- a/arch/sparc/lib/Makefile ++++ b/arch/sparc/lib/Makefile +@@ -2,7 +2,7 @@ + # + + asflags-y := -ansi -DST_DIV0=0x02 +-ccflags-y := -Werror ++#ccflags-y := -Werror + + lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o + lib-$(CONFIG_SPARC32) += memcpy.o memset.o +diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S +index 59186e0..f747d7a 100644 +--- a/arch/sparc/lib/atomic_64.S ++++ b/arch/sparc/lib/atomic_64.S +@@ -18,7 +18,12 @@ + atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_add, .-atomic_add + ++ .globl atomic_add_unchecked ++ .type atomic_add_unchecked,#function ++atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ add %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic_add_unchecked, .-atomic_add_unchecked ++ + .globl atomic_sub + .type atomic_sub,#function + atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_sub, .-atomic_sub + ++ .globl atomic_sub_unchecked ++ .type atomic_sub_unchecked,#function ++atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ sub %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic_sub_unchecked, .-atomic_sub_unchecked ++ + .globl atomic_add_ret + .type atomic_add_ret,#function + atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_add_ret, .-atomic_add_ret + ++ .globl atomic_add_ret_unchecked ++ .type atomic_add_ret_unchecked,#function ++atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ add %g7, %o0, %g7 ++ sra %g7, 0, %o0 ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked ++ + .globl atomic_sub_ret + .type atomic_sub_ret,#function + atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_add, .-atomic64_add + ++ .globl atomic64_add_unchecked ++ .type atomic64_add_unchecked,#function ++atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic64_add_unchecked, .-atomic64_add_unchecked ++ + .globl atomic64_sub + .type atomic64_sub,#function + atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_sub, .-atomic64_sub + ++ .globl atomic64_sub_unchecked ++ .type atomic64_sub_unchecked,#function ++atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ subcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked ++ + .globl atomic64_add_ret + .type atomic64_add_ret,#function + atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_add_ret, .-atomic64_add_ret + ++ .globl atomic64_add_ret_unchecked ++ .type atomic64_add_ret_unchecked,#function ++atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ add %g7, %o0, %g7 ++ mov %g7, %o0 ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked ++ + .globl atomic64_sub_ret + .type atomic64_sub_ret,#function + atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c +index 1b30bb3..b4a16c7 100644 +--- a/arch/sparc/lib/ksyms.c ++++ b/arch/sparc/lib/ksyms.c +@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write); + + /* Atomic counter implementation. */ + EXPORT_SYMBOL(atomic_add); ++EXPORT_SYMBOL(atomic_add_unchecked); + EXPORT_SYMBOL(atomic_add_ret); ++EXPORT_SYMBOL(atomic_add_ret_unchecked); + EXPORT_SYMBOL(atomic_sub); ++EXPORT_SYMBOL(atomic_sub_unchecked); + EXPORT_SYMBOL(atomic_sub_ret); + EXPORT_SYMBOL(atomic64_add); ++EXPORT_SYMBOL(atomic64_add_unchecked); + EXPORT_SYMBOL(atomic64_add_ret); ++EXPORT_SYMBOL(atomic64_add_ret_unchecked); + EXPORT_SYMBOL(atomic64_sub); ++EXPORT_SYMBOL(atomic64_sub_unchecked); + EXPORT_SYMBOL(atomic64_sub_ret); + + /* Atomic bit operations. */ +diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile +index 301421c..e2535d1 100644 +--- a/arch/sparc/mm/Makefile ++++ b/arch/sparc/mm/Makefile +@@ -2,7 +2,7 @@ + # + + asflags-y := -ansi +-ccflags-y := -Werror ++#ccflags-y := -Werror + + obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o + obj-y += fault_$(BITS).o +diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c +index 8023fd7..c8e89e9 100644 +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c +@@ -21,6 +21,9 @@ + #include <linux/perf_event.h> + #include <linux/interrupt.h> + #include <linux/kdebug.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> + + #include <asm/system.h> + #include <asm/page.h> +@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) + return safe_compute_effective_address(regs, insn); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++#ifdef CONFIG_PAX_DLRESOLVE ++static void pax_emuplt_close(struct vm_area_struct *vma) ++{ ++ vma->vm_mm->call_dl_resolve = 0UL; ++} ++ ++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ unsigned int *kaddr; ++ ++ vmf->page = alloc_page(GFP_HIGHUSER); ++ if (!vmf->page) ++ return VM_FAULT_OOM; ++ ++ kaddr = kmap(vmf->page); ++ memset(kaddr, 0, PAGE_SIZE); ++ kaddr[0] = 0x9DE3BFA8U; /* save */ ++ flush_dcache_page(vmf->page); ++ kunmap(vmf->page); ++ return VM_FAULT_MAJOR; ++} ++ ++static const struct vm_operations_struct pax_vm_ops = { ++ .close = pax_emuplt_close, ++ .fault = pax_emuplt_fault ++}; ++ ++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) ++{ ++ int ret; ++ ++ INIT_LIST_HEAD(&vma->anon_vma_chain); ++ vma->vm_mm = current->mm; ++ vma->vm_start = addr; ++ vma->vm_end = addr + PAGE_SIZE; ++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ vma->vm_ops = &pax_vm_ops; ++ ++ ret = insert_vm_struct(current->mm, vma); ++ if (ret) ++ return ret; ++ ++ ++current->mm->total_vm; ++ return 0; ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->pc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int sethi1, sethi2, jmpl; ++ ++ err = get_user(sethi1, (unsigned int *)regs->pc); ++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4)); ++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U) ++ { ++ unsigned int addr; ++ ++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; ++ addr = regs->u_regs[UREG_G1]; ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ { /* PaX: patched PLT emulation #2 */ ++ unsigned int ba; ++ ++ err = get_user(ba, (unsigned int *)regs->pc); ++ ++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) { ++ unsigned int addr; ++ ++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } ++ ++ do { /* PaX: patched PLT emulation #3 */ ++ unsigned int sethi, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->pc); ++ err |= get_user(jmpl, (unsigned int *)(regs->pc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U && ++ nop == 0x01000000U) ++ { ++ unsigned int addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 1 */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->pc); ++ err |= get_user(ba, (unsigned int *)(regs->pc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && ++ nop == 0x01000000U) ++ { ++ unsigned int addr, save, call; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); ++ else ++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); ++ ++ err = get_user(save, (unsigned int *)addr); ++ err |= get_user(call, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ if (err) ++ break; ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ struct vm_area_struct *vma; ++ unsigned long call_dl_resolve; ++ ++ down_read(¤t->mm->mmap_sem); ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_read(¤t->mm->mmap_sem); ++ if (likely(call_dl_resolve)) ++ goto emulate; ++ ++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ ++ down_write(¤t->mm->mmap_sem); ++ if (current->mm->call_dl_resolve) { ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ goto emulate; ++ } ++ ++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); ++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ if (pax_insert_vma(vma, call_dl_resolve)) { ++ up_write(¤t->mm->mmap_sem); ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ current->mm->call_dl_resolve = call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ ++emulate: ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->pc = call_dl_resolve; ++ regs->npc = addr+4; ++ return 3; ++ } ++#endif ++ ++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ ++ if ((save & 0xFFC00000U) == 0x05000000U && ++ (call & 0xFFFFE000U) == 0x85C0A000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G2] = addr + 4; ++ addr = (save & 0x003FFFFFU) << 10; ++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 3; ++ } ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 2 */ ++ unsigned int save, call, nop; ++ ++ err = get_user(save, (unsigned int *)(regs->pc-4)); ++ err |= get_user(call, (unsigned int *)regs->pc); ++ err |= get_user(nop, (unsigned int *)(regs->pc+4)); ++ if (err) ++ break; ++ ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); ++ ++ regs->u_regs[UREG_RETPC] = regs->pc; ++ regs->pc = dl_resolve; ++ regs->npc = dl_resolve+4; ++ return 3; ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, + int text_fault) + { +@@ -280,6 +545,24 @@ good_area: + if(!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + /* Allow reads even for write-only mappings */ + if(!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto bad_area; +diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c +index 504c062..6fcb9c6 100644 +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -21,6 +21,9 @@ + #include <linux/kprobes.h> + #include <linux/kdebug.h> + #include <linux/percpu.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> + + #include <asm/page.h> + #include <asm/pgtable.h> +@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) + printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", + regs->tpc); + printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); +- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); ++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]); + printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); + dump_stack(); + unhandled_fault(regs->tpc, current, regs); +@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, + show_regs(regs); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++#ifdef CONFIG_PAX_DLRESOLVE ++static void pax_emuplt_close(struct vm_area_struct *vma) ++{ ++ vma->vm_mm->call_dl_resolve = 0UL; ++} ++ ++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ unsigned int *kaddr; ++ ++ vmf->page = alloc_page(GFP_HIGHUSER); ++ if (!vmf->page) ++ return VM_FAULT_OOM; ++ ++ kaddr = kmap(vmf->page); ++ memset(kaddr, 0, PAGE_SIZE); ++ kaddr[0] = 0x9DE3BFA8U; /* save */ ++ flush_dcache_page(vmf->page); ++ kunmap(vmf->page); ++ return VM_FAULT_MAJOR; ++} ++ ++static const struct vm_operations_struct pax_vm_ops = { ++ .close = pax_emuplt_close, ++ .fault = pax_emuplt_fault ++}; ++ ++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) ++{ ++ int ret; ++ ++ INIT_LIST_HEAD(&vma->anon_vma_chain); ++ vma->vm_mm = current->mm; ++ vma->vm_start = addr; ++ vma->vm_end = addr + PAGE_SIZE; ++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ vma->vm_ops = &pax_vm_ops; ++ ++ ret = insert_vm_struct(current->mm, vma); ++ if (ret) ++ return ret; ++ ++ ++current->mm->total_vm; ++ return 0; ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->tpc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int sethi1, sethi2, jmpl; ++ ++ err = get_user(sethi1, (unsigned int *)regs->tpc); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; ++ addr = regs->u_regs[UREG_G1]; ++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ { /* PaX: patched PLT emulation #2 */ ++ unsigned int ba; ++ ++ err = get_user(ba, (unsigned int *)regs->tpc); ++ ++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) { ++ unsigned long addr; ++ ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } ++ ++ do { /* PaX: patched PLT emulation #3 */ ++ unsigned int sethi, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #4 */ ++ unsigned int sethi, mov1, call, mov2; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(call, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ mov1 == 0x8210000FU && ++ (call & 0xC0000000U) == 0x40000000U && ++ mov2 == 0x9E100001U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; ++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #5 */ ++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(or1, (unsigned int *)(regs->tpc+12)); ++ err |= get_user(or2, (unsigned int *)(regs->tpc+16)); ++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+28)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ (or1 & 0xFFFFE000U) == 0x82106000U && ++ (or2 & 0xFFFFE000U) == 0x8A116000U && ++ sllx == 0x83287020U && ++ jmpl == 0x81C04005U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); ++ regs->u_regs[UREG_G1] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); ++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #6 */ ++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12)); ++ err |= get_user(or, (unsigned int *)(regs->tpc+16)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+24)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ sllx == 0x83287020U && ++ (or & 0xFFFFE000U) == 0x8A116000U && ++ jmpl == 0x81C04005U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); ++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 1 */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ unsigned int save, call; ++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); ++ else ++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ err = get_user(save, (unsigned int *)addr); ++ err |= get_user(call, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ if (err) ++ break; ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ struct vm_area_struct *vma; ++ unsigned long call_dl_resolve; ++ ++ down_read(¤t->mm->mmap_sem); ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_read(¤t->mm->mmap_sem); ++ if (likely(call_dl_resolve)) ++ goto emulate; ++ ++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ ++ down_write(¤t->mm->mmap_sem); ++ if (current->mm->call_dl_resolve) { ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ goto emulate; ++ } ++ ++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); ++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ if (pax_insert_vma(vma, call_dl_resolve)) { ++ up_write(¤t->mm->mmap_sem); ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ current->mm->call_dl_resolve = call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ ++emulate: ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->tpc = call_dl_resolve; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++#endif ++ ++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ ++ if ((save & 0xFFC00000U) == 0x05000000U && ++ (call & 0xFFFFE000U) == 0x85C0A000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G2] = addr + 4; ++ addr = (save & 0x003FFFFFU) << 10; ++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++ ++ /* PaX: 64-bit PLT stub */ ++ err = get_user(sethi1, (unsigned int *)addr); ++ err |= get_user(sethi2, (unsigned int *)(addr+4)); ++ err |= get_user(or1, (unsigned int *)(addr+8)); ++ err |= get_user(or2, (unsigned int *)(addr+12)); ++ err |= get_user(sllx, (unsigned int *)(addr+16)); ++ err |= get_user(add, (unsigned int *)(addr+20)); ++ err |= get_user(jmpl, (unsigned int *)(addr+24)); ++ err |= get_user(nop, (unsigned int *)(addr+28)); ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x09000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ (or1 & 0xFFFFE000U) == 0x88112000U && ++ (or2 & 0xFFFFE000U) == 0x8A116000U && ++ sllx == 0x89293020U && ++ add == 0x8A010005U && ++ jmpl == 0x89C14000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); ++ regs->u_regs[UREG_G4] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); ++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4]; ++ regs->u_regs[UREG_G4] = addr + 24; ++ addr = regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++ } ++ } while (0); ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ do { /* PaX: unpatched PLT emulation step 2 */ ++ unsigned int save, call, nop; ++ ++ err = get_user(save, (unsigned int *)(regs->tpc-4)); ++ err |= get_user(call, (unsigned int *)regs->tpc); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+4)); ++ if (err) ++ break; ++ ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ dl_resolve &= 0xFFFFFFFFUL; ++ ++ regs->u_regs[UREG_RETPC] = regs->tpc; ++ regs->tpc = dl_resolve; ++ regs->tnpc = dl_resolve+4; ++ return 3; ++ } ++ } while (0); ++#endif ++ ++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (ba & 0xFFF00000U) == 0x30600000U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) + { + struct mm_struct *mm = current->mm; +@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) + if (!vma) + goto bad_area; + ++#ifdef CONFIG_PAX_PAGEEXEC ++ /* PaX: detect ITLB misses on non-exec pages */ ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address && ++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) ++ { ++ if (address != regs->tpc) ++ goto good_area; ++ ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS)); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + /* Pure DTLB misses do not tell us whether the fault causing + * load/store/atomic was a write or not, it only says that there + * was no match. So in such a case we (carefully) read the +diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c +index 07e1453..0a7d9e9 100644 +--- a/arch/sparc/mm/hugetlbpage.c ++++ b/arch/sparc/mm/hugetlbpage.c +@@ -67,7 +67,7 @@ full_search: + } + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (unlikely(mm->mmap_base < len)) + goto bottomup; + +- addr = (mm->mmap_base-len) & HPAGE_MASK; ++ addr = mm->mmap_base - len; + + do { ++ addr &= HPAGE_MASK; + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } +@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ +- addr = (vma->vm_start-len) & HPAGE_MASK; +- } while (likely(len < vma->vm_start)); ++ addr = skip_heap_stack_gap(vma, len); ++ } while (!IS_ERR_VALUE(addr)); + + bottomup: + /* +@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + if (addr) { + addr = ALIGN(addr, HPAGE_SIZE); + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) +diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c +index 7b00de6..78239f4 100644 +--- a/arch/sparc/mm/init_32.c ++++ b/arch/sparc/mm/init_32.c +@@ -316,6 +316,9 @@ extern void device_scan(void); + pgprot_t PAGE_SHARED __read_mostly; + EXPORT_SYMBOL(PAGE_SHARED); + ++pgprot_t PAGE_SHARED_NOEXEC __read_mostly; ++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC); ++ + void __init paging_init(void) + { + switch(sparc_cpu_model) { +@@ -344,17 +347,17 @@ void __init paging_init(void) + + /* Initialize the protection map with non-constant, MMU dependent values. */ + protection_map[0] = PAGE_NONE; +- protection_map[1] = PAGE_READONLY; +- protection_map[2] = PAGE_COPY; +- protection_map[3] = PAGE_COPY; ++ protection_map[1] = PAGE_READONLY_NOEXEC; ++ protection_map[2] = PAGE_COPY_NOEXEC; ++ protection_map[3] = PAGE_COPY_NOEXEC; + protection_map[4] = PAGE_READONLY; + protection_map[5] = PAGE_READONLY; + protection_map[6] = PAGE_COPY; + protection_map[7] = PAGE_COPY; + protection_map[8] = PAGE_NONE; +- protection_map[9] = PAGE_READONLY; +- protection_map[10] = PAGE_SHARED; +- protection_map[11] = PAGE_SHARED; ++ protection_map[9] = PAGE_READONLY_NOEXEC; ++ protection_map[10] = PAGE_SHARED_NOEXEC; ++ protection_map[11] = PAGE_SHARED_NOEXEC; + protection_map[12] = PAGE_READONLY; + protection_map[13] = PAGE_READONLY; + protection_map[14] = PAGE_SHARED; +diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c +index cbef74e..c38fead 100644 +--- a/arch/sparc/mm/srmmu.c ++++ b/arch/sparc/mm/srmmu.c +@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void) + PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); + BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); + BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC); ++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC)); ++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC)); ++#endif ++ + BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); + page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); + +diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h +index 27fe667..36d474c 100644 +--- a/arch/tile/include/asm/atomic_64.h ++++ b/arch/tile/include/asm/atomic_64.h +@@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + /* Atomic dec and inc don't implement barrier, so provide them if needed. */ + #define smp_mb__before_atomic_dec() smp_mb() + #define smp_mb__after_atomic_dec() smp_mb() +diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h +index 392e533..536b092 100644 +--- a/arch/tile/include/asm/cache.h ++++ b/arch/tile/include/asm/cache.h +@@ -15,11 +15,12 @@ + #ifndef _ASM_TILE_CACHE_H + #define _ASM_TILE_CACHE_H + ++#include <linux/const.h> + #include <arch/chip.h> + + /* bytes per L1 data cache line */ + #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE() +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* bytes per L2 cache line */ + #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE() +diff --git a/arch/um/Makefile b/arch/um/Makefile +index 7730af6..cce5b19 100644 +--- a/arch/um/Makefile ++++ b/arch/um/Makefile +@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\ + $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \ + $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include + ++ifdef CONSTIFY_PLUGIN ++USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify ++endif ++ + #This will adjust *FLAGS accordingly to the platform. + include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS) + +diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h +index 19e1bdd..3665b77 100644 +--- a/arch/um/include/asm/cache.h ++++ b/arch/um/include/asm/cache.h +@@ -1,6 +1,7 @@ + #ifndef __UM_CACHE_H + #define __UM_CACHE_H + ++#include <linux/const.h> + + #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT) + # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) +@@ -12,6 +13,6 @@ + # define L1_CACHE_SHIFT 5 + #endif + +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif +diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h +index 6c03acd..a5e0215 100644 +--- a/arch/um/include/asm/kmap_types.h ++++ b/arch/um/include/asm/kmap_types.h +@@ -23,6 +23,7 @@ enum km_type { + KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, ++ KM_CLEARPAGE, + KM_TYPE_NR + }; + +diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h +index 7cfc3ce..cbd1a58 100644 +--- a/arch/um/include/asm/page.h ++++ b/arch/um/include/asm/page.h +@@ -14,6 +14,9 @@ + #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) + #define PAGE_MASK (~(PAGE_SIZE-1)) + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + #ifndef __ASSEMBLY__ + + struct page; +diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c +index c533835..84db18e 100644 +--- a/arch/um/kernel/process.c ++++ b/arch/um/kernel/process.c +@@ -406,22 +406,6 @@ int singlestepping(void * t) + return 2; + } + +-/* +- * Only x86 and x86_64 have an arch_align_stack(). +- * All other arches have "#define arch_align_stack(x) (x)" +- * in their asm/system.h +- * As this is included in UML from asm-um/system-generic.h, +- * we can use it to behave as the subarch does. +- */ +-#ifndef arch_align_stack +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() % 8192; +- return sp & ~0xf; +-} +-#endif +- + unsigned long get_wchan(struct task_struct *p) + { + unsigned long stack_page, sp, ip; +diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h +index ad8f795..2c7eec6 100644 +--- a/arch/unicore32/include/asm/cache.h ++++ b/arch/unicore32/include/asm/cache.h +@@ -12,8 +12,10 @@ + #ifndef __UNICORE_CACHE_H__ + #define __UNICORE_CACHE_H__ + +-#define L1_CACHE_SHIFT (5) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#include <linux/const.h> ++ ++#define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* + * Memory returned by kmalloc() may be used for DMA, so we must make +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index efb4294..61bc18c 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -235,7 +235,7 @@ config X86_HT + + config X86_32_LAZY_GS + def_bool y +- depends on X86_32 && !CC_STACKPROTECTOR ++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF + + config ARCH_HWEIGHT_CFLAGS + string +@@ -1022,7 +1022,7 @@ choice + + config NOHIGHMEM + bool "off" +- depends on !X86_NUMAQ ++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) + ---help--- + Linux can use up to 64 Gigabytes of physical memory on x86 systems. + However, the address space of 32-bit x86 processors is only 4 +@@ -1059,7 +1059,7 @@ config NOHIGHMEM + + config HIGHMEM4G + bool "4GB" +- depends on !X86_NUMAQ ++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) + ---help--- + Select this if you have a 32-bit processor and between 1 and 4 + gigabytes of physical RAM. +@@ -1113,7 +1113,7 @@ config PAGE_OFFSET + hex + default 0xB0000000 if VMSPLIT_3G_OPT + default 0x80000000 if VMSPLIT_2G +- default 0x78000000 if VMSPLIT_2G_OPT ++ default 0x70000000 if VMSPLIT_2G_OPT + default 0x40000000 if VMSPLIT_1G + default 0xC0000000 + depends on X86_32 +@@ -1496,6 +1496,7 @@ config SECCOMP + + config CC_STACKPROTECTOR + bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" ++ depends on X86_64 || !PAX_MEMORY_UDEREF + ---help--- + This option turns on the -fstack-protector GCC feature. This + feature puts, at the beginning of functions, a canary value on +@@ -1553,6 +1554,7 @@ config KEXEC_JUMP + config PHYSICAL_START + hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP) + default "0x1000000" ++ range 0x400000 0x40000000 + ---help--- + This gives the physical address where the kernel is loaded. + +@@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS + config PHYSICAL_ALIGN + hex "Alignment value to which kernel should be aligned" if X86_32 + default "0x1000000" ++ range 0x400000 0x1000000 if PAX_KERNEXEC + range 0x2000 0x1000000 + ---help--- + This value puts the alignment restrictions on physical address +@@ -1647,9 +1650,10 @@ config HOTPLUG_CPU + Say N if you want to disable CPU hotplug. + + config COMPAT_VDSO +- def_bool y ++ def_bool n + prompt "Compat VDSO support" + depends on X86_32 || IA32_EMULATION ++ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF + ---help--- + Map the 32-bit VDSO to the predictable old-style address too. + +diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu +index e3ca7e0..b30b28a 100644 +--- a/arch/x86/Kconfig.cpu ++++ b/arch/x86/Kconfig.cpu +@@ -341,7 +341,7 @@ config X86_PPRO_FENCE + + config X86_F00F_BUG + def_bool y +- depends on M586MMX || M586TSC || M586 || M486 || M386 ++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC + + config X86_INVD_BUG + def_bool y +@@ -365,7 +365,7 @@ config X86_POPAD_OK + + config X86_ALIGNMENT_16 + def_bool y +- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 ++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 + + config X86_INTEL_USERCOPY + def_bool y +@@ -411,7 +411,7 @@ config X86_CMPXCHG64 + # generates cmov. + config X86_CMOV + def_bool y +- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) ++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) + + config X86_MINIMUM_CPU_FAMILY + int +diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug +index bf56e17..05f9891 100644 +--- a/arch/x86/Kconfig.debug ++++ b/arch/x86/Kconfig.debug +@@ -81,7 +81,7 @@ config X86_PTDUMP + config DEBUG_RODATA + bool "Write protect kernel read-only data structures" + default y +- depends on DEBUG_KERNEL ++ depends on DEBUG_KERNEL && BROKEN + ---help--- + Mark the kernel read-only data as write-protected in the pagetables, + in order to catch accidental (and incorrect) writes to such const +@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST + + config DEBUG_SET_MODULE_RONX + bool "Set loadable kernel module data as NX and text as RO" +- depends on MODULES ++ depends on MODULES && BROKEN + ---help--- + This option helps catch unintended modifications to loadable + kernel module's text and read-only data. It also prevents execution +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index b02e509..2631e48 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -46,6 +46,7 @@ else + UTS_MACHINE := x86_64 + CHECKFLAGS += -D__x86_64__ -m64 + ++ biarch := $(call cc-option,-m64) + KBUILD_AFLAGS += -m64 + KBUILD_CFLAGS += -m64 + +@@ -195,3 +196,12 @@ define archhelp + echo ' FDARGS="..." arguments for the booted kernel' + echo ' FDINITRD=file initrd for the booted kernel' + endef ++ ++define OLD_LD ++ ++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils. ++*** Please upgrade your binutils to 2.18 or newer ++endef ++ ++archprepare: ++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD))) +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile +index 95365a8..52f857b 100644 +--- a/arch/x86/boot/Makefile ++++ b/arch/x86/boot/Makefile +@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ + $(call cc-option, -fno-stack-protector) \ + $(call cc-option, -mpreferred-stack-boundary=2) + KBUILD_CFLAGS += $(call cc-option, -m32) ++ifdef CONSTIFY_PLUGIN ++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify ++endif + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n + +diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h +index 878e4b9..20537ab 100644 +--- a/arch/x86/boot/bitops.h ++++ b/arch/x86/boot/bitops.h +@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr) + u8 v; + const u32 *p = (const u32 *)addr; + +- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); ++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); + return v; + } + +@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr) + + static inline void set_bit(int nr, void *addr) + { +- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); ++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); + } + + #endif /* BOOT_BITOPS_H */ +diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h +index c7093bd..d4247ffe0 100644 +--- a/arch/x86/boot/boot.h ++++ b/arch/x86/boot/boot.h +@@ -85,7 +85,7 @@ static inline void io_delay(void) + static inline u16 ds(void) + { + u16 seg; +- asm("movw %%ds,%0" : "=rm" (seg)); ++ asm volatile("movw %%ds,%0" : "=rm" (seg)); + return seg; + } + +@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr) + static inline int memcmp(const void *s1, const void *s2, size_t len) + { + u8 diff; +- asm("repe; cmpsb; setnz %0" ++ asm volatile("repe; cmpsb; setnz %0" + : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); + return diff; + } +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 09664ef..edc5d03 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small + KBUILD_CFLAGS += $(cflags-y) + KBUILD_CFLAGS += $(call cc-option,-ffreestanding) + KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) ++ifdef CONSTIFY_PLUGIN ++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify ++endif + + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index 67a655a..b924059 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -76,7 +76,7 @@ ENTRY(startup_32) + notl %eax + andl %eax, %ebx + #else +- movl $LOAD_PHYSICAL_ADDR, %ebx ++ movl $____LOAD_PHYSICAL_ADDR, %ebx + #endif + + /* Target address to relocate to for decompression */ +@@ -162,7 +162,7 @@ relocated: + * and where it was actually loaded. + */ + movl %ebp, %ebx +- subl $LOAD_PHYSICAL_ADDR, %ebx ++ subl $____LOAD_PHYSICAL_ADDR, %ebx + jz 2f /* Nothing to be done if loaded at compiled addr. */ + /* + * Process relocations. +@@ -170,8 +170,7 @@ relocated: + + 1: subl $4, %edi + movl (%edi), %ecx +- testl %ecx, %ecx +- jz 2f ++ jecxz 2f + addl %ebx, -__PAGE_OFFSET(%ebx, %ecx) + jmp 1b + 2: +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index 35af09d..99c9676 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -91,7 +91,7 @@ ENTRY(startup_32) + notl %eax + andl %eax, %ebx + #else +- movl $LOAD_PHYSICAL_ADDR, %ebx ++ movl $____LOAD_PHYSICAL_ADDR, %ebx + #endif + + /* Target address to relocate to for decompression */ +@@ -233,7 +233,7 @@ ENTRY(startup_64) + notq %rax + andq %rax, %rbp + #else +- movq $LOAD_PHYSICAL_ADDR, %rbp ++ movq $____LOAD_PHYSICAL_ADDR, %rbp + #endif + + /* Target address to relocate to for decompression */ +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c +index 3a19d04..7c1d55a 100644 +--- a/arch/x86/boot/compressed/misc.c ++++ b/arch/x86/boot/compressed/misc.c +@@ -310,7 +310,7 @@ static void parse_elf(void *output) + case PT_LOAD: + #ifdef CONFIG_RELOCATABLE + dest = output; +- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); ++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR); + #else + dest = (void *)(phdr->p_paddr); + #endif +@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, + error("Destination address too large"); + #endif + #ifndef CONFIG_RELOCATABLE +- if ((unsigned long)output != LOAD_PHYSICAL_ADDR) ++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR) + error("Wrong destination address"); + #endif + +diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c +index 89bbf4e..869908e 100644 +--- a/arch/x86/boot/compressed/relocs.c ++++ b/arch/x86/boot/compressed/relocs.c +@@ -13,8 +13,11 @@ + + static void die(char *fmt, ...); + ++#include "../../../../include/generated/autoconf.h" ++ + #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + static Elf32_Ehdr ehdr; ++static Elf32_Phdr *phdr; + static unsigned long reloc_count, reloc_idx; + static unsigned long *relocs; + +@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp) + } + } + ++static void read_phdrs(FILE *fp) ++{ ++ unsigned int i; ++ ++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr)); ++ if (!phdr) { ++ die("Unable to allocate %d program headers\n", ++ ehdr.e_phnum); ++ } ++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) { ++ die("Seek to %d failed: %s\n", ++ ehdr.e_phoff, strerror(errno)); ++ } ++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) { ++ die("Cannot read ELF program headers: %s\n", ++ strerror(errno)); ++ } ++ for(i = 0; i < ehdr.e_phnum; i++) { ++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type); ++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset); ++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr); ++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr); ++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz); ++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz); ++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags); ++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align); ++ } ++ ++} ++ + static void read_shdrs(FILE *fp) + { +- int i; ++ unsigned int i; + Elf32_Shdr shdr; + + secs = calloc(ehdr.e_shnum, sizeof(struct section)); +@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp) + + static void read_strtabs(FILE *fp) + { +- int i; ++ unsigned int i; + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + if (sec->shdr.sh_type != SHT_STRTAB) { +@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp) + + static void read_symtabs(FILE *fp) + { +- int i,j; ++ unsigned int i,j; + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + if (sec->shdr.sh_type != SHT_SYMTAB) { +@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp) + + static void read_relocs(FILE *fp) + { +- int i,j; ++ unsigned int i,j; ++ uint32_t base; ++ + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + if (sec->shdr.sh_type != SHT_REL) { +@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp) + die("Cannot read symbol table: %s\n", + strerror(errno)); + } ++ base = 0; ++ for (j = 0; j < ehdr.e_phnum; j++) { ++ if (phdr[j].p_type != PT_LOAD ) ++ continue; ++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz) ++ continue; ++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr; ++ break; ++ } + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { + Elf32_Rel *rel = &sec->reltab[j]; +- rel->r_offset = elf32_to_cpu(rel->r_offset); ++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base; + rel->r_info = elf32_to_cpu(rel->r_info); + } + } +@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp) + + static void print_absolute_symbols(void) + { +- int i; ++ unsigned int i; + printf("Absolute symbols\n"); + printf(" Num: Value Size Type Bind Visibility Name\n"); + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + char *sym_strtab; + Elf32_Sym *sh_symtab; +- int j; ++ unsigned int j; + + if (sec->shdr.sh_type != SHT_SYMTAB) { + continue; +@@ -431,14 +475,14 @@ static void print_absolute_symbols(void) + + static void print_absolute_relocs(void) + { +- int i, printed = 0; ++ unsigned int i, printed = 0; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + struct section *sec_applies, *sec_symtab; + char *sym_strtab; + Elf32_Sym *sh_symtab; +- int j; ++ unsigned int j; + if (sec->shdr.sh_type != SHT_REL) { + continue; + } +@@ -499,13 +543,13 @@ static void print_absolute_relocs(void) + + static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) + { +- int i; ++ unsigned int i; + /* Walk through the relocations */ + for (i = 0; i < ehdr.e_shnum; i++) { + char *sym_strtab; + Elf32_Sym *sh_symtab; + struct section *sec_applies, *sec_symtab; +- int j; ++ unsigned int j; + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL) { +@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) + !is_rel_reloc(sym_name(sym_strtab, sym))) { + continue; + } ++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */ ++ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load")) ++ continue; ++ ++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32) ++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */ ++ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext")) ++ continue; ++ if (!strcmp(sec_name(sym->st_shndx), ".init.text")) ++ continue; ++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text")) ++ continue; ++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR")) ++ continue; ++#endif ++ + switch (r_type) { + case R_386_NONE: + case R_386_PC32: +@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb) + + static void emit_relocs(int as_text) + { +- int i; ++ unsigned int i; + /* Count how many relocations I have and allocate space for them. */ + reloc_count = 0; + walk_relocs(count_reloc); +@@ -665,6 +725,7 @@ int main(int argc, char **argv) + fname, strerror(errno)); + } + read_ehdr(fp); ++ read_phdrs(fp); + read_shdrs(fp); + read_strtabs(fp); + read_symtabs(fp); +diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c +index 4d3ff03..e4972ff 100644 +--- a/arch/x86/boot/cpucheck.c ++++ b/arch/x86/boot/cpucheck.c +@@ -74,7 +74,7 @@ static int has_fpu(void) + u16 fcw = -1, fsw = -1; + u32 cr0; + +- asm("movl %%cr0,%0" : "=r" (cr0)); ++ asm volatile("movl %%cr0,%0" : "=r" (cr0)); + if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { + cr0 &= ~(X86_CR0_EM|X86_CR0_TS); + asm volatile("movl %0,%%cr0" : : "r" (cr0)); +@@ -90,7 +90,7 @@ static int has_eflag(u32 mask) + { + u32 f0, f1; + +- asm("pushfl ; " ++ asm volatile("pushfl ; " + "pushfl ; " + "popl %0 ; " + "movl %0,%1 ; " +@@ -115,7 +115,7 @@ static void get_flags(void) + set_bit(X86_FEATURE_FPU, cpu.flags); + + if (has_eflag(X86_EFLAGS_ID)) { +- asm("cpuid" ++ asm volatile("cpuid" + : "=a" (max_intel_level), + "=b" (cpu_vendor[0]), + "=d" (cpu_vendor[1]), +@@ -124,7 +124,7 @@ static void get_flags(void) + + if (max_intel_level >= 0x00000001 && + max_intel_level <= 0x0000ffff) { +- asm("cpuid" ++ asm volatile("cpuid" + : "=a" (tfms), + "=c" (cpu.flags[4]), + "=d" (cpu.flags[0]) +@@ -136,7 +136,7 @@ static void get_flags(void) + cpu.model += ((tfms >> 16) & 0xf) << 4; + } + +- asm("cpuid" ++ asm volatile("cpuid" + : "=a" (max_amd_level) + : "a" (0x80000000) + : "ebx", "ecx", "edx"); +@@ -144,7 +144,7 @@ static void get_flags(void) + if (max_amd_level >= 0x80000001 && + max_amd_level <= 0x8000ffff) { + u32 eax = 0x80000001; +- asm("cpuid" ++ asm volatile("cpuid" + : "+a" (eax), + "=c" (cpu.flags[6]), + "=d" (cpu.flags[1]) +@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) + u32 ecx = MSR_K7_HWCR; + u32 eax, edx; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + eax &= ~(1 << 15); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + get_flags(); /* Make sure it really did something */ + err = check_flags(); +@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) + u32 ecx = MSR_VIA_FCR; + u32 eax, edx; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + eax |= (1<<1)|(1<<7); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + set_bit(X86_FEATURE_CX8, cpu.flags); + err = check_flags(); +@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) + u32 eax, edx; + u32 level = 1; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); +- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); +- asm("cpuid" ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); ++ asm volatile("cpuid" + : "+a" (level), "=d" (cpu.flags[0]) + : : "ecx", "ebx"); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + err = check_flags(); + } +diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S +index bdb4d45..0476680 100644 +--- a/arch/x86/boot/header.S ++++ b/arch/x86/boot/header.S +@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to + # single linked list of + # struct setup_data + +-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr ++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr + + #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset) + #define VO_INIT_SIZE (VO__end - VO__text) +diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c +index db75d07..8e6d0af 100644 +--- a/arch/x86/boot/memory.c ++++ b/arch/x86/boot/memory.c +@@ -19,7 +19,7 @@ + + static int detect_memory_e820(void) + { +- int count = 0; ++ unsigned int count = 0; + struct biosregs ireg, oreg; + struct e820entry *desc = boot_params.e820_map; + static struct e820entry buf; /* static so it is zeroed */ +diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c +index 11e8c6e..fdbb1ed 100644 +--- a/arch/x86/boot/video-vesa.c ++++ b/arch/x86/boot/video-vesa.c +@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void) + + boot_params.screen_info.vesapm_seg = oreg.es; + boot_params.screen_info.vesapm_off = oreg.di; ++ boot_params.screen_info.vesapm_size = oreg.cx; + } + + /* +diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c +index 43eda28..5ab5fdb 100644 +--- a/arch/x86/boot/video.c ++++ b/arch/x86/boot/video.c +@@ -96,7 +96,7 @@ static void store_mode_params(void) + static unsigned int get_entry(void) + { + char entry_buf[4]; +- int i, len = 0; ++ unsigned int i, len = 0; + int key; + unsigned int v; + +diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S +index 5b577d5..3c1fed4 100644 +--- a/arch/x86/crypto/aes-x86_64-asm_64.S ++++ b/arch/x86/crypto/aes-x86_64-asm_64.S +@@ -8,6 +8,8 @@ + * including this sentence is retained in full. + */ + ++#include <asm/alternative-asm.h> ++ + .extern crypto_ft_tab + .extern crypto_it_tab + .extern crypto_fl_tab +@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \ + je B192; \ + leaq 32(r9),r9; + ++#define ret pax_force_retaddr 0, 1; ret ++ + #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \ + movq r1,r2; \ + movq r3,r4; \ +diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S +index be6d9e3..21fbbca 100644 +--- a/arch/x86/crypto/aesni-intel_asm.S ++++ b/arch/x86/crypto/aesni-intel_asm.S +@@ -31,6 +31,7 @@ + + #include <linux/linkage.h> + #include <asm/inst.h> ++#include <asm/alternative-asm.h> + + #ifdef __x86_64__ + .data +@@ -1436,7 +1437,9 @@ _return_T_done_decrypt: + pop %r14 + pop %r13 + pop %r12 ++ pax_force_retaddr 0, 1 + ret ++ENDPROC(aesni_gcm_dec) + + + /***************************************************************************** +@@ -1699,7 +1702,9 @@ _return_T_done_encrypt: + pop %r14 + pop %r13 + pop %r12 ++ pax_force_retaddr 0, 1 + ret ++ENDPROC(aesni_gcm_enc) + + #endif + +@@ -1714,6 +1719,7 @@ _key_expansion_256a: + pxor %xmm1, %xmm0 + movaps %xmm0, (TKEYP) + add $0x10, TKEYP ++ pax_force_retaddr_bts + ret + + .align 4 +@@ -1738,6 +1744,7 @@ _key_expansion_192a: + shufps $0b01001110, %xmm2, %xmm1 + movaps %xmm1, 0x10(TKEYP) + add $0x20, TKEYP ++ pax_force_retaddr_bts + ret + + .align 4 +@@ -1757,6 +1764,7 @@ _key_expansion_192b: + + movaps %xmm0, (TKEYP) + add $0x10, TKEYP ++ pax_force_retaddr_bts + ret + + .align 4 +@@ -1769,6 +1777,7 @@ _key_expansion_256b: + pxor %xmm1, %xmm2 + movaps %xmm2, (TKEYP) + add $0x10, TKEYP ++ pax_force_retaddr_bts + ret + + /* +@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key) + #ifndef __x86_64__ + popl KEYP + #endif ++ pax_force_retaddr 0, 1 + ret ++ENDPROC(aesni_set_key) + + /* + * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) +@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc) + popl KLEN + popl KEYP + #endif ++ pax_force_retaddr 0, 1 + ret ++ENDPROC(aesni_enc) + + /* + * _aesni_enc1: internal ABI +@@ -1959,6 +1972,7 @@ _aesni_enc1: + AESENC KEY STATE + movaps 0x70(TKEYP), KEY + AESENCLAST KEY STATE ++ pax_force_retaddr_bts + ret + + /* +@@ -2067,6 +2081,7 @@ _aesni_enc4: + AESENCLAST KEY STATE2 + AESENCLAST KEY STATE3 + AESENCLAST KEY STATE4 ++ pax_force_retaddr_bts + ret + + /* +@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec) + popl KLEN + popl KEYP + #endif ++ pax_force_retaddr 0, 1 + ret ++ENDPROC(aesni_dec) + + /* + * _aesni_dec1: internal ABI +@@ -2146,6 +2163,7 @@ _aesni_dec1: + AESDEC KEY STATE + movaps 0x70(TKEYP), KEY + AESDECLAST KEY STATE ++ pax_force_retaddr_bts + ret + + /* +@@ -2254,6 +2272,7 @@ _aesni_dec4: + AESDECLAST KEY STATE2 + AESDECLAST KEY STATE3 + AESDECLAST KEY STATE4 ++ pax_force_retaddr_bts + ret + + /* +@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc) + popl KEYP + popl LEN + #endif ++ pax_force_retaddr 0, 1 + ret ++ENDPROC(aesni_ecb_enc) + + /* + * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, +@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec) + popl KEYP + popl LEN + #endif ++ pax_force_retaddr 0, 1 + ret ++ENDPROC(aesni_ecb_dec) + + /* + * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, +@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc) + popl LEN + popl IVP + #endif ++ pax_force_retaddr 0, 1 + ret ++ENDPROC(aesni_cbc_enc) + + /* + * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, +@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec) + popl LEN + popl IVP + #endif ++ pax_force_retaddr 0, 1 + ret ++ENDPROC(aesni_cbc_dec) + + #ifdef __x86_64__ + .align 16 +@@ -2524,6 +2551,7 @@ _aesni_inc_init: + mov $1, TCTR_LOW + MOVQ_R64_XMM TCTR_LOW INC + MOVQ_R64_XMM CTR TCTR_LOW ++ pax_force_retaddr_bts + ret + + /* +@@ -2552,6 +2580,7 @@ _aesni_inc: + .Linc_low: + movaps CTR, IV + PSHUFB_XMM BSWAP_MASK IV ++ pax_force_retaddr_bts + ret + + /* +@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc) + .Lctr_enc_ret: + movups IV, (IVP) + .Lctr_enc_just_ret: ++ pax_force_retaddr 0, 1 + ret ++ENDPROC(aesni_ctr_enc) + #endif +diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S +index 391d245..67f35c2 100644 +--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S ++++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S +@@ -20,6 +20,8 @@ + * + */ + ++#include <asm/alternative-asm.h> ++ + .file "blowfish-x86_64-asm.S" + .text + +@@ -151,9 +153,11 @@ __blowfish_enc_blk: + jnz __enc_xor; + + write_block(); ++ pax_force_retaddr 0, 1 + ret; + __enc_xor: + xor_block(); ++ pax_force_retaddr 0, 1 + ret; + + .align 8 +@@ -188,6 +192,7 @@ blowfish_dec_blk: + + movq %r11, %rbp; + ++ pax_force_retaddr 0, 1 + ret; + + /********************************************************************** +@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way: + + popq %rbx; + popq %rbp; ++ pax_force_retaddr 0, 1 + ret; + + __enc_xor4: +@@ -349,6 +355,7 @@ __enc_xor4: + + popq %rbx; + popq %rbp; ++ pax_force_retaddr 0, 1 + ret; + + .align 8 +@@ -386,5 +393,6 @@ blowfish_dec_blk_4way: + popq %rbx; + popq %rbp; + ++ pax_force_retaddr 0, 1 + ret; + +diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S +index 6214a9b..1f4fc9a 100644 +--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S ++++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S +@@ -1,3 +1,5 @@ ++#include <asm/alternative-asm.h> ++ + # enter ECRYPT_encrypt_bytes + .text + .p2align 5 +@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes: + add %r11,%rsp + mov %rdi,%rax + mov %rsi,%rdx ++ pax_force_retaddr 0, 1 + ret + # bytesatleast65: + ._bytesatleast65: +@@ -891,6 +894,7 @@ ECRYPT_keysetup: + add %r11,%rsp + mov %rdi,%rax + mov %rsi,%rdx ++ pax_force_retaddr + ret + # enter ECRYPT_ivsetup + .text +@@ -917,4 +921,5 @@ ECRYPT_ivsetup: + add %r11,%rsp + mov %rdi,%rax + mov %rsi,%rdx ++ pax_force_retaddr + ret +diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S +index b2c2f57..8470cab 100644 +--- a/arch/x86/crypto/sha1_ssse3_asm.S ++++ b/arch/x86/crypto/sha1_ssse3_asm.S +@@ -28,6 +28,8 @@ + * (at your option) any later version. + */ + ++#include <asm/alternative-asm.h> ++ + #define CTX %rdi // arg1 + #define BUF %rsi // arg2 + #define CNT %rdx // arg3 +@@ -104,6 +106,7 @@ + pop %r12 + pop %rbp + pop %rbx ++ pax_force_retaddr 0, 1 + ret + + .size \name, .-\name +diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +index 5b012a2..36d5364 100644 +--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S ++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +@@ -20,6 +20,8 @@ + * + */ + ++#include <asm/alternative-asm.h> ++ + .file "twofish-x86_64-asm-3way.S" + .text + +@@ -260,6 +262,7 @@ __twofish_enc_blk_3way: + popq %r13; + popq %r14; + popq %r15; ++ pax_force_retaddr 0, 1 + ret; + + __enc_xor3: +@@ -271,6 +274,7 @@ __enc_xor3: + popq %r13; + popq %r14; + popq %r15; ++ pax_force_retaddr 0, 1 + ret; + + .global twofish_dec_blk_3way +@@ -312,5 +316,6 @@ twofish_dec_blk_3way: + popq %r13; + popq %r14; + popq %r15; ++ pax_force_retaddr 0, 1 + ret; + +diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S +index 7bcf3fc..f53832f 100644 +--- a/arch/x86/crypto/twofish-x86_64-asm_64.S ++++ b/arch/x86/crypto/twofish-x86_64-asm_64.S +@@ -21,6 +21,7 @@ + .text + + #include <asm/asm-offsets.h> ++#include <asm/alternative-asm.h> + + #define a_offset 0 + #define b_offset 4 +@@ -268,6 +269,7 @@ twofish_enc_blk: + + popq R1 + movq $1,%rax ++ pax_force_retaddr 0, 1 + ret + + twofish_dec_blk: +@@ -319,4 +321,5 @@ twofish_dec_blk: + + popq R1 + movq $1,%rax ++ pax_force_retaddr 0, 1 + ret +diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c +index fd84387..887aa7e 100644 +--- a/arch/x86/ia32/ia32_aout.c ++++ b/arch/x86/ia32/ia32_aout.c +@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, + unsigned long dump_start, dump_size; + struct user32 dump; + ++ memset(&dump, 0, sizeof(dump)); ++ + fs = get_fs(); + set_fs(KERNEL_DS); + has_dumped = 1; +@@ -315,6 +317,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) + current->mm->free_area_cache = TASK_UNMAPPED_BASE; + current->mm->cached_hole_size = 0; + ++ retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); ++ if (retval < 0) { ++ /* Someone check-me: is this error path enough? */ ++ send_sig(SIGKILL, current, 0); ++ return retval; ++ } ++ + install_exec_creds(bprm); + current->flags &= ~PF_FORKNOEXEC; + +@@ -410,13 +419,6 @@ beyond_if: + + set_brk(current->mm->start_brk, current->mm->brk); + +- retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); +- if (retval < 0) { +- /* Someone check-me: is this error path enough? */ +- send_sig(SIGKILL, current, 0); +- return retval; +- } +- + current->mm->start_stack = + (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); + /* start thread */ +diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c +index 6557769..ef6ae89 100644 +--- a/arch/x86/ia32/ia32_signal.c ++++ b/arch/x86/ia32/ia32_signal.c +@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, + } + seg = get_fs(); + set_fs(KERNEL_DS); +- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp); ++ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp); + set_fs(seg); + if (ret >= 0 && uoss_ptr) { + if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t))) +@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, + */ + static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + size_t frame_size, +- void **fpstate) ++ void __user **fpstate) + { + unsigned long sp; + +@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + + if (used_math()) { + sp = sp - sig_xstate_ia32_size; +- *fpstate = (struct _fpstate_ia32 *) sp; ++ *fpstate = (struct _fpstate_ia32 __user *) sp; + if (save_i387_xstate_ia32(*fpstate) < 0) + return (void __user *) -1L; + } +@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + sp -= frame_size; + /* Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. */ +- sp = ((sp + 4) & -16ul) - 4; ++ sp = ((sp - 12) & -16ul) - 4; + return (void __user *) sp; + } + +@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, + * These are actually not used anymore, but left because some + * gdb versions depend on them as a marker. + */ +- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); ++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); + } put_user_catch(err); + + if (err) +@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + 0xb8, + __NR_ia32_rt_sigreturn, + 0x80cd, +- 0, ++ 0 + }; + + frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); +@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + + if (ka->sa.sa_flags & SA_RESTORER) + restorer = ka->sa.sa_restorer; ++ else if (current->mm->context.vdso) ++ /* Return stub is in 32bit vsyscall page */ ++ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); + else +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, +- rt_sigreturn); ++ restorer = &frame->retcode; + put_user_ex(ptr_to_compat(restorer), &frame->pretcode); + + /* + * Not actually used anymore, but left because some gdb + * versions need it. + */ +- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); ++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); + } put_user_catch(err); + + if (err) +diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S +index a6253ec..4ad2120 100644 +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -13,7 +13,9 @@ + #include <asm/thread_info.h> + #include <asm/segment.h> + #include <asm/irqflags.h> ++#include <asm/pgtable.h> + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ + #include <linux/elf-em.h> +@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit) + ENDPROC(native_irq_enable_sysexit) + #endif + ++ .macro pax_enter_kernel_user ++ pax_set_fptr_mask ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_enter_kernel_user ++#endif ++ .endm ++ ++ .macro pax_exit_kernel_user ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel_user ++#endif ++#ifdef CONFIG_PAX_RANDKSTACK ++ pushq %rax ++ pushq %r11 ++ call pax_randomize_kstack ++ popq %r11 ++ popq %rax ++#endif ++ .endm ++ ++.macro pax_erase_kstack ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++ call pax_erase_kstack ++#endif ++.endm ++ + /* + * 32bit SYSENTER instruction entry. + * +@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target) + CFI_REGISTER rsp,rbp + SWAPGS_UNSAFE_STACK + movq PER_CPU_VAR(kernel_stack), %rsp +- addq $(KERNEL_STACK_OFFSET),%rsp +- /* +- * No need to follow this irqs on/off section: the syscall +- * disabled irqs, here we enable it straight after entry: +- */ +- ENABLE_INTERRUPTS(CLBR_NONE) + movl %ebp,%ebp /* zero extension */ + pushq_cfi $__USER32_DS + /*CFI_REL_OFFSET ss,0*/ +@@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target) + CFI_REL_OFFSET rsp,0 + pushfq_cfi + /*CFI_REL_OFFSET rflags,0*/ +- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d +- CFI_REGISTER rip,r10 ++ orl $X86_EFLAGS_IF,(%rsp) ++ GET_THREAD_INFO(%r11) ++ movl TI_sysenter_return(%r11), %r11d ++ CFI_REGISTER rip,r11 + pushq_cfi $__USER32_CS + /*CFI_REL_OFFSET cs,0*/ + movl %eax, %eax +- pushq_cfi %r10 ++ pushq_cfi %r11 + CFI_REL_OFFSET rip,0 + pushq_cfi %rax + cld + SAVE_ARGS 0,1,0 ++ pax_enter_kernel_user ++ /* ++ * No need to follow this irqs on/off section: the syscall ++ * disabled irqs, here we enable it straight after entry: ++ */ ++ ENABLE_INTERRUPTS(CLBR_NONE) + /* no need to do an access_ok check here because rbp has been + 32bit zero extended */ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov $PAX_USER_SHADOW_BASE,%r11 ++ add %r11,%rbp ++#endif ++ + 1: movl (%rbp),%ebp + .section __ex_table,"a" + .quad 1b,ia32_badarg + .previous +- GET_THREAD_INFO(%r10) +- orl $TS_COMPAT,TI_status(%r10) +- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) ++ GET_THREAD_INFO(%r11) ++ orl $TS_COMPAT,TI_status(%r11) ++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) + CFI_REMEMBER_STATE + jnz sysenter_tracesys + cmpq $(IA32_NR_syscalls-1),%rax +@@ -162,13 +198,15 @@ sysenter_do_call: + sysenter_dispatch: + call *ia32_sys_call_table(,%rax,8) + movq %rax,RAX-ARGOFFSET(%rsp) +- GET_THREAD_INFO(%r10) ++ GET_THREAD_INFO(%r11) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- testl $_TIF_ALLWORK_MASK,TI_flags(%r10) ++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11) + jnz sysexit_audit + sysexit_from_sys_call: +- andl $~TS_COMPAT,TI_status(%r10) ++ pax_exit_kernel_user ++ pax_erase_kstack ++ andl $~TS_COMPAT,TI_status(%r11) + /* clear IF, that popfq doesn't enable interrupts early */ + andl $~0x200,EFLAGS-R11(%rsp) + movl RIP-R11(%rsp),%edx /* User %eip */ +@@ -194,6 +232,9 @@ sysexit_from_sys_call: + movl %eax,%esi /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ + call audit_syscall_entry ++ ++ pax_erase_kstack ++ + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ + cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys +@@ -205,7 +246,7 @@ sysexit_from_sys_call: + .endm + + .macro auditsys_exit exit +- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) ++ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) + jnz ia32_ret_from_sys_call + TRACE_IRQS_ON + sti +@@ -215,12 +256,12 @@ sysexit_from_sys_call: + movzbl %al,%edi /* zero-extend that into %edi */ + inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ + call audit_syscall_exit +- GET_THREAD_INFO(%r10) ++ GET_THREAD_INFO(%r11) + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */ + movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi + cli + TRACE_IRQS_OFF +- testl %edi,TI_flags(%r10) ++ testl %edi,TI_flags(%r11) + jz \exit + CLEAR_RREGS -ARGOFFSET + jmp int_with_check +@@ -238,7 +279,7 @@ sysexit_audit: + + sysenter_tracesys: + #ifdef CONFIG_AUDITSYSCALL +- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) ++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) + jz sysenter_auditsys + #endif + SAVE_REST +@@ -246,6 +287,9 @@ sysenter_tracesys: + movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */ + movq %rsp,%rdi /* &pt_regs -> arg1 */ + call syscall_trace_enter ++ ++ pax_erase_kstack ++ + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ + RESTORE_REST + cmpq $(IA32_NR_syscalls-1),%rax +@@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target) + ENTRY(ia32_cstar_target) + CFI_STARTPROC32 simple + CFI_SIGNAL_FRAME +- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET ++ CFI_DEF_CFA rsp,0 + CFI_REGISTER rip,rcx + /*CFI_REGISTER rflags,r11*/ + SWAPGS_UNSAFE_STACK + movl %esp,%r8d + CFI_REGISTER rsp,r8 + movq PER_CPU_VAR(kernel_stack),%rsp ++ SAVE_ARGS 8*6,0,0 ++ pax_enter_kernel_user + /* + * No need to follow this irqs on/off section: the syscall + * disabled irqs and here we enable it straight after entry: + */ + ENABLE_INTERRUPTS(CLBR_NONE) +- SAVE_ARGS 8,0,0 + movl %eax,%eax /* zero extension */ + movq %rax,ORIG_RAX-ARGOFFSET(%rsp) + movq %rcx,RIP-ARGOFFSET(%rsp) +@@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target) + /* no need to do an access_ok check here because r8 has been + 32bit zero extended */ + /* hardware stack frame is complete now */ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov $PAX_USER_SHADOW_BASE,%r11 ++ add %r11,%r8 ++#endif ++ + 1: movl (%r8),%r9d + .section __ex_table,"a" + .quad 1b,ia32_badarg + .previous +- GET_THREAD_INFO(%r10) +- orl $TS_COMPAT,TI_status(%r10) +- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) ++ GET_THREAD_INFO(%r11) ++ orl $TS_COMPAT,TI_status(%r11) ++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) + CFI_REMEMBER_STATE + jnz cstar_tracesys + cmpq $IA32_NR_syscalls-1,%rax +@@ -321,13 +372,15 @@ cstar_do_call: + cstar_dispatch: + call *ia32_sys_call_table(,%rax,8) + movq %rax,RAX-ARGOFFSET(%rsp) +- GET_THREAD_INFO(%r10) ++ GET_THREAD_INFO(%r11) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- testl $_TIF_ALLWORK_MASK,TI_flags(%r10) ++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11) + jnz sysretl_audit + sysretl_from_sys_call: +- andl $~TS_COMPAT,TI_status(%r10) ++ pax_exit_kernel_user ++ pax_erase_kstack ++ andl $~TS_COMPAT,TI_status(%r11) + RESTORE_ARGS 0,-ARG_SKIP,0,0,0 + movl RIP-ARGOFFSET(%rsp),%ecx + CFI_REGISTER rip,rcx +@@ -355,7 +408,7 @@ sysretl_audit: + + cstar_tracesys: + #ifdef CONFIG_AUDITSYSCALL +- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) ++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) + jz cstar_auditsys + #endif + xchgl %r9d,%ebp +@@ -364,6 +417,9 @@ cstar_tracesys: + movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ + movq %rsp,%rdi /* &pt_regs -> arg1 */ + call syscall_trace_enter ++ ++ pax_erase_kstack ++ + LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ + RESTORE_REST + xchgl %ebp,%r9d +@@ -409,20 +465,21 @@ ENTRY(ia32_syscall) + CFI_REL_OFFSET rip,RIP-RIP + PARAVIRT_ADJUST_EXCEPTION_FRAME + SWAPGS +- /* +- * No need to follow this irqs on/off section: the syscall +- * disabled irqs and here we enable it straight after entry: +- */ +- ENABLE_INTERRUPTS(CLBR_NONE) + movl %eax,%eax + pushq_cfi %rax + cld + /* note the registers are not zero extended to the sf. + this could be a problem. */ + SAVE_ARGS 0,1,0 +- GET_THREAD_INFO(%r10) +- orl $TS_COMPAT,TI_status(%r10) +- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) ++ pax_enter_kernel_user ++ /* ++ * No need to follow this irqs on/off section: the syscall ++ * disabled irqs and here we enable it straight after entry: ++ */ ++ ENABLE_INTERRUPTS(CLBR_NONE) ++ GET_THREAD_INFO(%r11) ++ orl $TS_COMPAT,TI_status(%r11) ++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) + jnz ia32_tracesys + cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys +@@ -441,6 +498,9 @@ ia32_tracesys: + movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ + movq %rsp,%rdi /* &pt_regs -> arg1 */ + call syscall_trace_enter ++ ++ pax_erase_kstack ++ + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ + RESTORE_REST + cmpq $(IA32_NR_syscalls-1),%rax +@@ -455,6 +515,7 @@ ia32_badsys: + + quiet_ni_syscall: + movq $-ENOSYS,%rax ++ pax_force_retaddr + ret + CFI_ENDPROC + +diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c +index f6f5c53..b358b28 100644 +--- a/arch/x86/ia32/sys_ia32.c ++++ b/arch/x86/ia32/sys_ia32.c +@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, + */ + static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) + { +- typeof(ubuf->st_uid) uid = 0; +- typeof(ubuf->st_gid) gid = 0; ++ typeof(((struct stat64 *)0)->st_uid) uid = 0; ++ typeof(((struct stat64 *)0)->st_gid) gid = 0; + SET_UID(uid, stat->uid); + SET_GID(gid, stat->gid); + if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) || +@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, + } + set_fs(KERNEL_DS); + ret = sys_rt_sigprocmask(how, +- set ? (sigset_t __user *)&s : NULL, +- oset ? (sigset_t __user *)&s : NULL, ++ set ? (sigset_t __force_user *)&s : NULL, ++ oset ? (sigset_t __force_user *)&s : NULL, + sigsetsize); + set_fs(old_fs); + if (ret) +@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds) + return alarm_setitimer(seconds); + } + +-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, ++asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr, + int options) + { + return compat_sys_wait4(pid, stat_addr, options, NULL); +@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid, + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); ++ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t); + set_fs(old_fs); + if (put_compat_timespec(&t, interval)) + return -EFAULT; +@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize); ++ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize); + set_fs(old_fs); + if (!ret) { + switch (_NSIG_WORDS) { +@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig, + if (copy_siginfo_from_user32(&info, uinfo)) + return -EFAULT; + set_fs(KERNEL_DS); +- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info); ++ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info); + set_fs(old_fs); + return ret; + } +@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, + return -EFAULT; + + set_fs(KERNEL_DS); +- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, ++ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL, + count); + set_fs(old_fs); + +diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h +index 091508b..7692c6f 100644 +--- a/arch/x86/include/asm/alternative-asm.h ++++ b/arch/x86/include/asm/alternative-asm.h +@@ -4,10 +4,10 @@ + + #ifdef CONFIG_SMP + .macro LOCK_PREFIX +-1: lock ++672: lock + .section .smp_locks,"a" + .balign 4 +- .long 1b - . ++ .long 672b - . + .previous + .endm + #else +@@ -15,6 +15,45 @@ + .endm + #endif + ++#ifdef KERNEXEC_PLUGIN ++ .macro pax_force_retaddr_bts rip=0 ++ btsq $63,\rip(%rsp) ++ .endm ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ .macro pax_force_retaddr rip=0, reload=0 ++ btsq $63,\rip(%rsp) ++ .endm ++ .macro pax_force_fptr ptr ++ btsq $63,\ptr ++ .endm ++ .macro pax_set_fptr_mask ++ .endm ++#endif ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR ++ .macro pax_force_retaddr rip=0, reload=0 ++ .if \reload ++ pax_set_fptr_mask ++ .endif ++ orq %r10,\rip(%rsp) ++ .endm ++ .macro pax_force_fptr ptr ++ orq %r10,\ptr ++ .endm ++ .macro pax_set_fptr_mask ++ movabs $0x8000000000000000,%r10 ++ .endm ++#endif ++#else ++ .macro pax_force_retaddr rip=0, reload=0 ++ .endm ++ .macro pax_force_fptr ptr ++ .endm ++ .macro pax_force_retaddr_bts rip=0 ++ .endm ++ .macro pax_set_fptr_mask ++ .endm ++#endif ++ + .macro altinstruction_entry orig alt feature orig_len alt_len + .long \orig - . + .long \alt - . +diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h +index 37ad100..7d47faa 100644 +--- a/arch/x86/include/asm/alternative.h ++++ b/arch/x86/include/asm/alternative.h +@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end) + ".section .discard,"aw",@progbits\n" \ + " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ + ".previous\n" \ +- ".section .altinstr_replacement, "ax"\n" \ ++ ".section .altinstr_replacement, "a"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" + +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index 1a6c09a..fec2432 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void) + + #ifdef CONFIG_X86_LOCAL_APIC + +-extern unsigned int apic_verbosity; ++extern int apic_verbosity; + extern int local_apic_timer_c2_ok; + + extern int disable_apic; +diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h +index 20370c6..a2eb9b0 100644 +--- a/arch/x86/include/asm/apm.h ++++ b/arch/x86/include/asm/apm.h +@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +- "lcall *%%cs:apm_bios_entry\n\t" ++ "lcall *%%ss:apm_bios_entry\n\t" + "setc %%al\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" +@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +- "lcall *%%cs:apm_bios_entry\n\t" ++ "lcall *%%ss:apm_bios_entry\n\t" + "setc %%bl\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" +diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h +index 58cb6d4..ca9010d 100644 +--- a/arch/x86/include/asm/atomic.h ++++ b/arch/x86/include/asm/atomic.h +@@ -22,7 +22,18 @@ + */ + static inline int atomic_read(const atomic_t *v) + { +- return (*(volatile int *)&(v)->counter); ++ return (*(volatile const int *)&(v)->counter); ++} ++ ++/** ++ * atomic_read_unchecked - read atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically reads the value of @v. ++ */ ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return (*(volatile const int *)&(v)->counter); + } + + /** +@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i) + } + + /** ++ * atomic_set_unchecked - set atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * @i: required value ++ * ++ * Atomically sets the value of @v to @i. ++ */ ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} ++ ++/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t +@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i) + */ + static inline void atomic_add(int i, atomic_t *v) + { +- asm volatile(LOCK_PREFIX "addl %1,%0" ++ asm volatile(LOCK_PREFIX "addl %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subl %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter) ++ : "ir" (i)); ++} ++ ++/** ++ * atomic_add_unchecked - add integer to atomic variable ++ * @i: integer value to add ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "addl %1,%0\n" + : "+m" (v->counter) + : "ir" (i)); + } +@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v) + */ + static inline void atomic_sub(int i, atomic_t *v) + { +- asm volatile(LOCK_PREFIX "subl %1,%0" ++ asm volatile(LOCK_PREFIX "subl %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addl %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter) ++ : "ir" (i)); ++} ++ ++/** ++ * atomic_sub_unchecked - subtract integer from atomic variable ++ * @i: integer value to subtract ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically subtracts @i from @v. ++ */ ++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "subl %1,%0\n" + : "+m" (v->counter) + : "ir" (i)); + } +@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" ++ asm volatile(LOCK_PREFIX "subl %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addl %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) + */ + static inline void atomic_inc(atomic_t *v) + { +- asm volatile(LOCK_PREFIX "incl %0" ++ asm volatile(LOCK_PREFIX "incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter)); ++} ++ ++/** ++ * atomic_inc_unchecked - increment atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically increments @v by 1. ++ */ ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "incl %0\n" + : "+m" (v->counter)); + } + +@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v) + */ + static inline void atomic_dec(atomic_t *v) + { +- asm volatile(LOCK_PREFIX "decl %0" ++ asm volatile(LOCK_PREFIX "decl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter)); ++} ++ ++/** ++ * atomic_dec_unchecked - decrement atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically decrements @v by 1. ++ */ ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "decl %0\n" + : "+m" (v->counter)); + } + +@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "decl %0; sete %1" ++ asm volatile(LOCK_PREFIX "decl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "incl %0; sete %1" ++ asm volatile(LOCK_PREFIX "incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" ++ : "+m" (v->counter), "=qm" (c) ++ : : "memory"); ++ return c != 0; ++} ++ ++/** ++ * atomic_inc_and_test_unchecked - increment and test ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically increments @v by 1 ++ * and returns true if the result is zero, or false for all ++ * other cases. ++ */ ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) ++{ ++ unsigned char c; ++ ++ asm volatile(LOCK_PREFIX "incl %0\n" ++ "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" ++ asm volatile(LOCK_PREFIX "addl %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subl %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sets %1\n" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v) + goto no_xadd; + #endif + /* Modern 486+ processor */ +- return i + xadd(&v->counter, i); ++ return i + xadd_check_overflow(&v->counter, i); + + #ifdef CONFIG_M386 + no_xadd: /* Legacy 386 processor */ +@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */ + } + + /** ++ * atomic_add_return_unchecked - add integer and return ++ * @i: integer value to add ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically adds @i to @v and returns @i + @v ++ */ ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ ++#ifdef CONFIG_M386 ++ int __i; ++ unsigned long flags; ++ if (unlikely(boot_cpu_data.x86 <= 3)) ++ goto no_xadd; ++#endif ++ /* Modern 486+ processor */ ++ return i + xadd(&v->counter, i); ++ ++#ifdef CONFIG_M386 ++no_xadd: /* Legacy 386 processor */ ++ raw_local_irq_save(flags); ++ __i = atomic_read_unchecked(v); ++ atomic_set_unchecked(v, i + __i); ++ raw_local_irq_restore(flags); ++ return i + __i; ++#endif ++} ++ ++/** + * atomic_sub_return - subtract integer and return + * @v: pointer of type atomic_t + * @i: integer value to subtract +@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) + } + + #define atomic_inc_return(v) (atomic_add_return(1, v)) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v); ++} + #define atomic_dec_return(v) (atomic_sub_return(1, v)) + + static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) + return cmpxchg(&v->counter, old, new); + } + ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} ++ + static inline int atomic_xchg(atomic_t *v, int new) + { + return xchg(&v->counter, new); + } + ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} ++ + /** + * __atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t +@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new) + */ + static inline int __atomic_add_unless(atomic_t *v, int a, int u) + { +- int c, old; ++ int c, old, new; + c = atomic_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addl %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "subl %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a)); ++ ++ old = atomic_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; +@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) + return c; + } + ++/** ++ * atomic_inc_not_zero_hint - increment if not null ++ * @v: pointer of type atomic_t ++ * @hint: probable value of the atomic before the increment ++ * ++ * This version of atomic_inc_not_zero() gives a hint of probable ++ * value of the atomic. This helps processor to not read the memory ++ * before doing the atomic read/modify/write cycle, lowering ++ * number of bus transactions on some arches. ++ * ++ * Returns: 0 if increment was not done, 1 otherwise. ++ */ ++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint ++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) ++{ ++ int val, c = hint, new; ++ ++ /* sanity test, should be removed by compiler if hint is a constant */ ++ if (!hint) ++ return __atomic_add_unless(v, 1, 0); ++ ++ do { ++ asm volatile("incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "decl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c)); ++ ++ val = atomic_cmpxchg(v, c, new); ++ if (val == c) ++ return 1; ++ c = val; ++ } while (c); ++ ++ return 0; ++} + + /* + * atomic_dec_if_positive - decrement by 1 if old value positive +diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h +index 24098aa..1e37723 100644 +--- a/arch/x86/include/asm/atomic64_32.h ++++ b/arch/x86/include/asm/atomic64_32.h +@@ -12,6 +12,14 @@ typedef struct { + u64 __aligned(8) counter; + } atomic64_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ u64 __aligned(8) counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif ++ + #define ATOMIC64_INIT(val) { (val) } + + #ifdef CONFIG_X86_CMPXCHG64 +@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n + } + + /** ++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable ++ * @p: pointer to type atomic64_unchecked_t ++ * @o: expected value ++ * @n: new value ++ * ++ * Atomically sets @v to @n if it was equal to @o and returns ++ * the old value. ++ */ ++ ++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n) ++{ ++ return cmpxchg64(&v->counter, o, n); ++} ++ ++/** + * atomic64_xchg - xchg atomic64 variable + * @v: pointer to type atomic64_t + * @n: value to assign +@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i) + } + + /** ++ * atomic64_set_unchecked - set atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * @n: value to assign ++ * ++ * Atomically sets the value of @v to @n. ++ */ ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) ++{ ++ unsigned high = (unsigned)(i >> 32); ++ unsigned low = (unsigned)i; ++ asm volatile(ATOMIC64_ALTERNATIVE(set) ++ : "+b" (low), "+c" (high) ++ : "S" (v) ++ : "eax", "edx", "memory" ++ ); ++} ++ ++/** + * atomic64_read - read atomic64 variable + * @v: pointer to type atomic64_t + * +@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v) + } + + /** ++ * atomic64_read_unchecked - read atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically reads the value of @v and returns it. ++ */ ++static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v) ++{ ++ long long r; ++ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked) ++ : "=A" (r), "+c" (v) ++ : : "memory" ++ ); ++ return r; ++ } ++ ++/** + * atomic64_add_return - add and return + * @i: integer value to add + * @v: pointer to type atomic64_t +@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) + return i; + } + ++/** ++ * atomic64_add_return_unchecked - add and return ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v and returns @i + *@v ++ */ ++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v) ++{ ++ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked) ++ : "+A" (i), "+c" (v) ++ : : "memory" ++ ); ++ return i; ++} ++ + /* + * Other variants with different arithmetic operators: + */ +@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v) + return a; + } + ++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ long long a; ++ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked) ++ : "=A" (a) ++ : "S" (v) ++ : "memory", "ecx" ++ ); ++ return a; ++} ++ + static inline long long atomic64_dec_return(atomic64_t *v) + { + long long a; +@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v) + } + + /** ++ * atomic64_add_unchecked - add integer to atomic64 variable ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v) ++{ ++ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked) ++ : "+A" (i), "+c" (v) ++ : : "memory" ++ ); ++ return i; ++} ++ ++/** + * atomic64_sub - subtract the atomic64 variable + * @i: integer value to subtract + * @v: pointer to type atomic64_t +diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h +index 0e1cbfc..5623683 100644 +--- a/arch/x86/include/asm/atomic64_64.h ++++ b/arch/x86/include/asm/atomic64_64.h +@@ -18,7 +18,19 @@ + */ + static inline long atomic64_read(const atomic64_t *v) + { +- return (*(volatile long *)&(v)->counter); ++ return (*(volatile const long *)&(v)->counter); ++} ++ ++/** ++ * atomic64_read_unchecked - read atomic64 variable ++ * @v: pointer of type atomic64_unchecked_t ++ * ++ * Atomically reads the value of @v. ++ * Doesn't imply a read memory barrier. ++ */ ++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ return (*(volatile const long *)&(v)->counter); + } + + /** +@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i) + } + + /** ++ * atomic64_set_unchecked - set atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * @i: required value ++ * ++ * Atomically sets the value of @v to @i. ++ */ ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) ++{ ++ v->counter = i; ++} ++ ++/** + * atomic64_add - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_t +@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i) + */ + static inline void atomic64_add(long i, atomic64_t *v) + { ++ asm volatile(LOCK_PREFIX "addq %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subq %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "er" (i), "m" (v->counter)); ++} ++ ++/** ++ * atomic64_add_unchecked - add integer to atomic64 variable ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) ++{ + asm volatile(LOCK_PREFIX "addq %1,%0" + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); +@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v) + */ + static inline void atomic64_sub(long i, atomic64_t *v) + { +- asm volatile(LOCK_PREFIX "subq %1,%0" ++ asm volatile(LOCK_PREFIX "subq %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addq %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "er" (i), "m" (v->counter)); ++} ++ ++/** ++ * atomic64_sub_unchecked - subtract the atomic64 variable ++ * @i: integer value to subtract ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically subtracts @i from @v. ++ */ ++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "subq %1,%0\n" + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); + } +@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" ++ asm volatile(LOCK_PREFIX "subq %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addq %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "=m" (v->counter), "=qm" (c) + : "er" (i), "m" (v->counter) : "memory"); + return c; +@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) + */ + static inline void atomic64_inc(atomic64_t *v) + { ++ asm volatile(LOCK_PREFIX "incq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decq %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "m" (v->counter)); ++} ++ ++/** ++ * atomic64_inc_unchecked - increment atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically increments @v by 1. ++ */ ++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) ++{ + asm volatile(LOCK_PREFIX "incq %0" + : "=m" (v->counter) + : "m" (v->counter)); +@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v) + */ + static inline void atomic64_dec(atomic64_t *v) + { +- asm volatile(LOCK_PREFIX "decq %0" ++ asm volatile(LOCK_PREFIX "decq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incq %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "m" (v->counter)); ++} ++ ++/** ++ * atomic64_dec_unchecked - decrement atomic64 variable ++ * @v: pointer to type atomic64_t ++ * ++ * Atomically decrements @v by 1. ++ */ ++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "decq %0\n" + : "=m" (v->counter) + : "m" (v->counter)); + } +@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "decq %0; sete %1" ++ asm volatile(LOCK_PREFIX "decq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incq %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); + return c != 0; +@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "incq %0; sete %1" ++ asm volatile(LOCK_PREFIX "incq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decq %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); + return c != 0; +@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" ++ asm volatile(LOCK_PREFIX "addq %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subq %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sets %1\n" + : "=m" (v->counter), "=qm" (c) + : "er" (i), "m" (v->counter) : "memory"); + return c; +@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) + */ + static inline long atomic64_add_return(long i, atomic64_t *v) + { ++ return i + xadd_check_overflow(&v->counter, i); ++} ++ ++/** ++ * atomic64_add_return_unchecked - add and return ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v and returns @i + @v ++ */ ++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) ++{ + return i + xadd(&v->counter, i); + } + +@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) + } + + #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) ++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ return atomic64_add_return_unchecked(1, v); ++} + #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) + + static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) +@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) + return cmpxchg(&v->counter, old, new); + } + ++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} ++ + static inline long atomic64_xchg(atomic64_t *v, long new) + { + return xchg(&v->counter, new); +@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new) + */ + static inline int atomic64_add_unless(atomic64_t *v, long a, long u) + { +- long c, old; ++ long c, old, new; + c = atomic64_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic64_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("add %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "sub %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a)); ++ ++ old = atomic64_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; + } +- return c != (u); ++ return c != u; + } + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h +index 1775d6e..b65017f 100644 +--- a/arch/x86/include/asm/bitops.h ++++ b/arch/x86/include/asm/bitops.h +@@ -38,7 +38,7 @@ + * a mask operation on a byte. + */ + #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) +-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) ++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3)) + #define CONST_MASK(nr) (1 << ((nr) & 7)) + + /** +diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h +index 5e1a2ee..c9f9533 100644 +--- a/arch/x86/include/asm/boot.h ++++ b/arch/x86/include/asm/boot.h +@@ -11,10 +11,15 @@ + #include <asm/pgtable_types.h> + + /* Physical address where kernel should be loaded. */ +-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ ++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + + (CONFIG_PHYSICAL_ALIGN - 1)) \ + & ~(CONFIG_PHYSICAL_ALIGN - 1)) + ++#ifndef __ASSEMBLY__ ++extern unsigned char __LOAD_PHYSICAL_ADDR[]; ++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR) ++#endif ++ + /* Minimum kernel alignment, as a power of two */ + #ifdef CONFIG_X86_64 + #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT +diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h +index 48f99f1..d78ebf9 100644 +--- a/arch/x86/include/asm/cache.h ++++ b/arch/x86/include/asm/cache.h +@@ -5,12 +5,13 @@ + + /* L1 cache line size */ + #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) ++#define __read_only __attribute__((__section__(".data..read_only"))) + + #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT +-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) ++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT) + + #ifdef CONFIG_X86_VSMP + #ifdef CONFIG_SMP +diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h +index 4e12668..501d239 100644 +--- a/arch/x86/include/asm/cacheflush.h ++++ b/arch/x86/include/asm/cacheflush.h +@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg) + unsigned long pg_flags = pg->flags & _PGMT_MASK; + + if (pg_flags == _PGMT_DEFAULT) +- return -1; ++ return ~0UL; + else if (pg_flags == _PGMT_WC) + return _PAGE_CACHE_WC; + else if (pg_flags == _PGMT_UC_MINUS) +diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h +index 46fc474..b02b0f9 100644 +--- a/arch/x86/include/asm/checksum_32.h ++++ b/arch/x86/include/asm/checksum_32.h +@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + ++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst, ++ int len, __wsum sum, ++ int *src_err_ptr, int *dst_err_ptr); ++ ++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst, ++ int len, __wsum sum, ++ int *src_err_ptr, int *dst_err_ptr); ++ + /* + * Note: when you get a NULL pointer exception here this means someone + * passed in an incorrect kernel address to one of these functions. +@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src, + int *err_ptr) + { + might_sleep(); +- return csum_partial_copy_generic((__force void *)src, dst, ++ return csum_partial_copy_generic_from_user((__force void *)src, dst, + len, sum, err_ptr, NULL); + } + +@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src, + { + might_sleep(); + if (access_ok(VERIFY_WRITE, dst, len)) +- return csum_partial_copy_generic(src, (__force void *)dst, ++ return csum_partial_copy_generic_to_user(src, (__force void *)dst, + len, sum, NULL, err_ptr); + + if (len) +diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h +index 5d3acdf..6447a02 100644 +--- a/arch/x86/include/asm/cmpxchg.h ++++ b/arch/x86/include/asm/cmpxchg.h +@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void) + __compiletime_error("Bad argument size for cmpxchg"); + extern void __xadd_wrong_size(void) + __compiletime_error("Bad argument size for xadd"); ++extern void __xadd_check_overflow_wrong_size(void) ++ __compiletime_error("Bad argument size for xadd_check_overflow"); + + /* + * Constants for operation sizes. On 32-bit, the 64-bit size it set to +@@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void) + __ret; \ + }) + ++#define __xadd_check_overflow(ptr, inc, lock) \ ++ ({ \ ++ __typeof__ (*(ptr)) __ret = (inc); \ ++ switch (sizeof(*(ptr))) { \ ++ case __X86_CASE_L: \ ++ asm volatile (lock "xaddl %0, %1\n" \ ++ "jno 0f\n" \ ++ "mov %0,%1\n" \ ++ "int $4\n0:\n" \ ++ _ASM_EXTABLE(0b, 0b) \ ++ : "+r" (__ret), "+m" (*(ptr)) \ ++ : : "memory", "cc"); \ ++ break; \ ++ case __X86_CASE_Q: \ ++ asm volatile (lock "xaddq %q0, %1\n" \ ++ "jno 0f\n" \ ++ "mov %0,%1\n" \ ++ "int $4\n0:\n" \ ++ _ASM_EXTABLE(0b, 0b) \ ++ : "+r" (__ret), "+m" (*(ptr)) \ ++ : : "memory", "cc"); \ ++ break; \ ++ default: \ ++ __xadd_check_overflow_wrong_size(); \ ++ } \ ++ __ret; \ ++ }) ++ + /* + * xadd() adds "inc" to "*ptr" and atomically returns the previous + * value of "*ptr". +@@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void) + #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") + #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") + ++#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX) ++ + #endif /* ASM_X86_CMPXCHG_H */ +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h +index f3444f7..051a196 100644 +--- a/arch/x86/include/asm/cpufeature.h ++++ b/arch/x86/include/asm/cpufeature.h +@@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) + ".section .discard,"aw",@progbits\n" + " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ + ".previous\n" +- ".section .altinstr_replacement,"ax"\n" ++ ".section .altinstr_replacement,"a"\n" + "3: movb $1,%0\n" + "4:\n" + ".previous\n" +diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h +index 41935fa..3b40db8 100644 +--- a/arch/x86/include/asm/desc.h ++++ b/arch/x86/include/asm/desc.h +@@ -4,6 +4,7 @@ + #include <asm/desc_defs.h> + #include <asm/ldt.h> + #include <asm/mmu.h> ++#include <asm/pgtable.h> + + #include <linux/smp.h> + +@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in + + desc->type = (info->read_exec_only ^ 1) << 1; + desc->type |= info->contents << 2; ++ desc->type |= info->seg_not_present ^ 1; + + desc->s = 1; + desc->dpl = 0x3; +@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in + } + + extern struct desc_ptr idt_descr; +-extern gate_desc idt_table[]; +- +-struct gdt_page { +- struct desc_struct gdt[GDT_ENTRIES]; +-} __attribute__((aligned(PAGE_SIZE))); +- +-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); ++extern gate_desc idt_table[256]; + ++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)]; + static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) + { +- return per_cpu(gdt_page, cpu).gdt; ++ return cpu_gdt_table[cpu]; + } + + #ifdef CONFIG_X86_64 +@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type, + unsigned long base, unsigned dpl, unsigned flags, + unsigned short seg) + { +- gate->a = (seg << 16) | (base & 0xffff); +- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8); ++ gate->gate.offset_low = base; ++ gate->gate.seg = seg; ++ gate->gate.reserved = 0; ++ gate->gate.type = type; ++ gate->gate.s = 0; ++ gate->gate.dpl = dpl; ++ gate->gate.p = 1; ++ gate->gate.offset_high = base >> 16; + } + + #endif +@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) + + static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) + { ++ pax_open_kernel(); + memcpy(&idt[entry], gate, sizeof(*gate)); ++ pax_close_kernel(); + } + + static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) + { ++ pax_open_kernel(); + memcpy(&ldt[entry], desc, 8); ++ pax_close_kernel(); + } + + static inline void +@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int + default: size = sizeof(*gdt); break; + } + ++ pax_open_kernel(); + memcpy(&gdt[entry], desc, size); ++ pax_close_kernel(); + } + + static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, +@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) + + static inline void native_load_tr_desc(void) + { ++ pax_open_kernel(); + asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); ++ pax_close_kernel(); + } + + static inline void native_load_gdt(const struct desc_ptr *dtr) +@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) + struct desc_struct *gdt = get_cpu_gdt_table(cpu); + unsigned int i; + ++ pax_open_kernel(); + for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) + gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; ++ pax_close_kernel(); + } + + #define _LDT_empty(info) \ +@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) + desc->limit = (limit >> 16) & 0xf; + } + +-static inline void _set_gate(int gate, unsigned type, void *addr, ++static inline void _set_gate(int gate, unsigned type, const void *addr, + unsigned dpl, unsigned ist, unsigned seg) + { + gate_desc s; +@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr, + * Pentium F0 0F bugfix can have resulted in the mapped + * IDT being write-protected. + */ +-static inline void set_intr_gate(unsigned int n, void *addr) ++static inline void set_intr_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); +@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr) + /* + * This routine sets up an interrupt gate at directory privilege level 3. + */ +-static inline void set_system_intr_gate(unsigned int n, void *addr) ++static inline void set_system_intr_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); + } + +-static inline void set_system_trap_gate(unsigned int n, void *addr) ++static inline void set_system_trap_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); + } + +-static inline void set_trap_gate(unsigned int n, void *addr) ++static inline void set_trap_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS); +@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr) + static inline void set_task_gate(unsigned int n, unsigned int gdt_entry) + { + BUG_ON((unsigned)n > 0xFF); +- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); ++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3)); + } + +-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist) ++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS); + } + +-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) ++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); + } + ++#ifdef CONFIG_X86_32 ++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu) ++{ ++ struct desc_struct d; ++ ++ if (likely(limit)) ++ limit = (limit - 1UL) >> PAGE_SHIFT; ++ pack_descriptor(&d, base, limit, 0xFB, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S); ++} ++#endif ++ + #endif /* _ASM_X86_DESC_H */ +diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h +index 278441f..b95a174 100644 +--- a/arch/x86/include/asm/desc_defs.h ++++ b/arch/x86/include/asm/desc_defs.h +@@ -31,6 +31,12 @@ struct desc_struct { + unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; + unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; + }; ++ struct { ++ u16 offset_low; ++ u16 seg; ++ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1; ++ unsigned offset_high: 16; ++ } gate; + }; + } __attribute__((packed)); + +diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h +index 908b969..a1f4eb4 100644 +--- a/arch/x86/include/asm/e820.h ++++ b/arch/x86/include/asm/e820.h +@@ -69,7 +69,7 @@ struct e820map { + #define ISA_START_ADDRESS 0xa0000 + #define ISA_END_ADDRESS 0x100000 + +-#define BIOS_BEGIN 0x000a0000 ++#define BIOS_BEGIN 0x000c0000 + #define BIOS_END 0x00100000 + + #define BIOS_ROM_BASE 0xffe00000 +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h +index 5f962df..7289f09 100644 +--- a/arch/x86/include/asm/elf.h ++++ b/arch/x86/include/asm/elf.h +@@ -238,7 +238,25 @@ extern int force_personality32; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + ++#ifdef CONFIG_PAX_SEGMEXEC ++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2) ++#else + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++#ifdef CONFIG_X86_32 ++#define PAX_ELF_ET_DYN_BASE 0x10000000UL ++ ++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) ++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) ++#else ++#define PAX_ELF_ET_DYN_BASE 0x400000UL ++ ++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) ++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) ++#endif ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, +@@ -291,9 +309,7 @@ do { \ + + #define ARCH_DLINFO \ + do { \ +- if (vdso_enabled) \ +- NEW_AUX_ENT(AT_SYSINFO_EHDR, \ +- (unsigned long)current->mm->context.vdso); \ ++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \ + } while (0) + + #define AT_SYSINFO 32 +@@ -304,7 +320,7 @@ do { \ + + #endif /* !CONFIG_X86_32 */ + +-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) ++#define VDSO_CURRENT_BASE (current->mm->context.vdso) + + #define VDSO_ENTRY \ + ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) +@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, + extern int syscall32_setup_pages(struct linux_binprm *, int exstack); + #define compat_arch_setup_additional_pages syscall32_setup_pages + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + /* + * True on X86_32 or when emulating IA32 on X86_64 + */ +diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h +index cc70c1c..d96d011 100644 +--- a/arch/x86/include/asm/emergency-restart.h ++++ b/arch/x86/include/asm/emergency-restart.h +@@ -15,6 +15,6 @@ enum reboot_type { + + extern enum reboot_type reboot_type; + +-extern void machine_emergency_restart(void); ++extern void machine_emergency_restart(void) __noreturn; + + #endif /* _ASM_X86_EMERGENCY_RESTART_H */ +diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h +index dbe82a5..c6d8a00 100644 +--- a/arch/x86/include/asm/floppy.h ++++ b/arch/x86/include/asm/floppy.h +@@ -157,6 +157,7 @@ static unsigned long dma_mem_alloc(unsigned long size) + } + + ++static unsigned long vdma_mem_alloc(unsigned long size) __size_overflow(1); + static unsigned long vdma_mem_alloc(unsigned long size) + { + return (unsigned long)vmalloc(size); +diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h +index d09bb03..4ea4194 100644 +--- a/arch/x86/include/asm/futex.h ++++ b/arch/x86/include/asm/futex.h +@@ -12,16 +12,18 @@ + #include <asm/system.h> + + #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ ++ typecheck(u32 __user *, uaddr); \ + asm volatile("1:\t" insn "\n" \ + "2:\t.section .fixup,"ax"\n" \ + "3:\tmov\t%3, %1\n" \ + "\tjmp\t2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ ++ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\ + : "i" (-EFAULT), "0" (oparg), "1" (0)) + + #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ ++ typecheck(u32 __user *, uaddr); \ + asm volatile("1:\tmovl %2, %0\n" \ + "\tmovl\t%0, %3\n" \ + "\t" insn "\n" \ +@@ -34,7 +36,7 @@ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=&a" (oldval), "=&r" (ret), \ +- "+m" (*uaddr), "=&r" (tem) \ ++ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \ + : "r" (oparg), "i" (-EFAULT), "1" (0)) + + static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) + + switch (op) { + case FUTEX_OP_SET: +- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); ++ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: +- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, ++ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval, + uaddr, oparg); + break; + case FUTEX_OP_OR: +@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + +- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" ++ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n" + "2:\t.section .fixup, "ax"\n" + "3:\tmov %3, %0\n" + "\tjmp 2b\n" + "\t.previous\n" + _ASM_EXTABLE(1b, 3b) +- : "+r" (ret), "=a" (oldval), "+m" (*uaddr) ++ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr)) + : "i" (-EFAULT), "r" (newval), "1" (oldval) + : "memory" + ); +diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h +index eb92a6e..b98b2f4 100644 +--- a/arch/x86/include/asm/hw_irq.h ++++ b/arch/x86/include/asm/hw_irq.h +@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void); + extern void enable_IO_APIC(void); + + /* Statistics */ +-extern atomic_t irq_err_count; +-extern atomic_t irq_mis_count; ++extern atomic_unchecked_t irq_err_count; ++extern atomic_unchecked_t irq_mis_count; + + /* EISA */ + extern void eisa_set_level_irq(unsigned int irq); +diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h +index a850b4d..bae26dc 100644 +--- a/arch/x86/include/asm/i387.h ++++ b/arch/x86/include/asm/i387.h +@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) + { + int err; + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE) ++ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE); ++#endif ++ + /* See comment in fxsave() below. */ + #ifdef CONFIG_AS_FXSAVEQ + asm volatile("1: fxrstorq %[fx]\n\t" +@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) + { + int err; + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE) ++ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE); ++#endif ++ + /* + * Clear the bytes not touched by the fxsave and reserved + * for the SW usage. +@@ -424,7 +434,7 @@ static inline bool interrupted_kernel_fpu_idle(void) + static inline bool interrupted_user_mode(void) + { + struct pt_regs *regs = get_irq_regs(); +- return regs && user_mode_vm(regs); ++ return regs && user_mode(regs); + } + + /* +diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h +index d8e8eef..99f81ae 100644 +--- a/arch/x86/include/asm/io.h ++++ b/arch/x86/include/asm/io.h +@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void); + + #include <linux/vmalloc.h> + ++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE ++static inline int valid_phys_addr_range(unsigned long addr, size_t count) ++{ ++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; ++} ++ ++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count) ++{ ++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; ++} ++ + /* + * Convert a virtual cached pointer to an uncached pointer + */ +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index bba3cf8..06bc8da 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void) + sti; \ + sysexit + ++#define GET_CR0_INTO_RDI mov %cr0, %rdi ++#define SET_RDI_INTO_CR0 mov %rdi, %cr0 ++#define GET_CR3_INTO_RDI mov %cr3, %rdi ++#define SET_RDI_INTO_CR3 mov %rdi, %cr3 ++ + #else + #define INTERRUPT_RETURN iret + #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit +diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h +index 5478825..839e88c 100644 +--- a/arch/x86/include/asm/kprobes.h ++++ b/arch/x86/include/asm/kprobes.h +@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t; + #define RELATIVEJUMP_SIZE 5 + #define RELATIVECALL_OPCODE 0xe8 + #define RELATIVE_ADDR_SIZE 4 +-#define MAX_STACK_SIZE 64 +-#define MIN_STACK_SIZE(ADDR) \ +- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ +- THREAD_SIZE - (unsigned long)(ADDR))) \ +- ? (MAX_STACK_SIZE) \ +- : (((unsigned long)current_thread_info()) + \ +- THREAD_SIZE - (unsigned long)(ADDR))) ++#define MAX_STACK_SIZE 64UL ++#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR)) + + #define flush_insn_slot(p) do { } while (0) + +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index b4973f4..a42170a 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -459,7 +459,7 @@ struct kvm_arch { + unsigned int n_requested_mmu_pages; + unsigned int n_max_mmu_pages; + unsigned int indirect_shadow_pages; +- atomic_t invlpg_counter; ++ atomic_unchecked_t invlpg_counter; + struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; + /* + * Hash table of struct kvm_mmu_page. +@@ -638,7 +638,7 @@ struct kvm_x86_ops { + int (*check_intercept)(struct kvm_vcpu *vcpu, + struct x86_instruction_info *info, + enum x86_intercept_stage stage); +-}; ++} __do_const; + + struct kvm_arch_async_pf { + u32 token; +@@ -667,9 +667,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); + int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); + + int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, +- const void *val, int bytes); ++ const void *val, int bytes) __size_overflow(2); + int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, +- gpa_t addr, unsigned long *ret); ++ gpa_t addr, unsigned long *ret) __size_overflow(2,3); + u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); + + extern bool tdp_enabled; +@@ -730,7 +730,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); + int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); + + int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); +-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); ++int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) __size_overflow(3); + + unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); + void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); +@@ -755,7 +755,7 @@ int fx_init(struct kvm_vcpu *vcpu); + void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); + void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, + const u8 *new, int bytes, +- bool guest_initiated); ++ bool guest_initiated) __size_overflow(2); + int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); + void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); + int kvm_mmu_load(struct kvm_vcpu *vcpu); +diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h +index 9cdae5d..300d20f 100644 +--- a/arch/x86/include/asm/local.h ++++ b/arch/x86/include/asm/local.h +@@ -18,26 +18,58 @@ typedef struct { + + static inline void local_inc(local_t *l) + { +- asm volatile(_ASM_INC "%0" ++ asm volatile(_ASM_INC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_DEC "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+m" (l->a.counter)); + } + + static inline void local_dec(local_t *l) + { +- asm volatile(_ASM_DEC "%0" ++ asm volatile(_ASM_DEC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_INC "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+m" (l->a.counter)); + } + + static inline void local_add(long i, local_t *l) + { +- asm volatile(_ASM_ADD "%1,%0" ++ asm volatile(_ASM_ADD "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_SUB "%1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+m" (l->a.counter) + : "ir" (i)); + } + + static inline void local_sub(long i, local_t *l) + { +- asm volatile(_ASM_SUB "%1,%0" ++ asm volatile(_ASM_SUB "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_ADD "%1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+m" (l->a.counter) + : "ir" (i)); + } +@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l) + { + unsigned char c; + +- asm volatile(_ASM_SUB "%2,%0; sete %1" ++ asm volatile(_ASM_SUB "%2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_ADD "%2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l) + { + unsigned char c; + +- asm volatile(_ASM_DEC "%0; sete %1" ++ asm volatile(_ASM_DEC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_INC "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l) + { + unsigned char c; + +- asm volatile(_ASM_INC "%0; sete %1" ++ asm volatile(_ASM_INC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_DEC "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l) + { + unsigned char c; + +- asm volatile(_ASM_ADD "%2,%0; sets %1" ++ asm volatile(_ASM_ADD "%2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_SUB "%2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sets %1\n" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l) + #endif + /* Modern 486+ processor */ + __i = i; +- asm volatile(_ASM_XADD "%0, %1;" ++ asm volatile(_ASM_XADD "%0, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_MOV "%0,%1\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+r" (i), "+m" (l->a.counter) + : : "memory"); + return i + __i; +diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h +index 593e51d..fa69c9a 100644 +--- a/arch/x86/include/asm/mman.h ++++ b/arch/x86/include/asm/mman.h +@@ -5,4 +5,14 @@ + + #include <asm-generic/mman.h> + ++#ifdef __KERNEL__ ++#ifndef __ASSEMBLY__ ++#ifdef CONFIG_X86_32 ++#define arch_mmap_check i386_mmap_check ++int i386_mmap_check(unsigned long addr, unsigned long len, ++ unsigned long flags); ++#endif ++#endif ++#endif ++ + #endif /* _ASM_X86_MMAN_H */ +diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h +index 5f55e69..e20bfb1 100644 +--- a/arch/x86/include/asm/mmu.h ++++ b/arch/x86/include/asm/mmu.h +@@ -9,7 +9,7 @@ + * we put the segment information here. + */ + typedef struct { +- void *ldt; ++ struct desc_struct *ldt; + int size; + + #ifdef CONFIG_X86_64 +@@ -18,7 +18,19 @@ typedef struct { + #endif + + struct mutex lock; +- void *vdso; ++ unsigned long vdso; ++ ++#ifdef CONFIG_X86_32 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ unsigned long user_cs_base; ++ unsigned long user_cs_limit; ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ cpumask_t cpu_user_cs_mask; ++#endif ++ ++#endif ++#endif + } mm_context_t; + + #ifdef CONFIG_SMP +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h +index 6902152..399f3a2 100644 +--- a/arch/x86/include/asm/mmu_context.h ++++ b/arch/x86/include/asm/mmu_context.h +@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm); + + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) + { ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ unsigned int i; ++ pgd_t *pgd; ++ ++ pax_open_kernel(); ++ pgd = get_cpu_pgd(smp_processor_id()); ++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i) ++ set_pgd_batched(pgd+i, native_make_pgd(0)); ++ pax_close_kernel(); ++#endif ++ + #ifdef CONFIG_SMP + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) + percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); +@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) + { + unsigned cpu = smp_processor_id(); ++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++ int tlbstate = TLBSTATE_OK; ++#endif + + if (likely(prev != next)) { + #ifdef CONFIG_SMP ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++ tlbstate = percpu_read(cpu_tlbstate.state); ++#endif + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + percpu_write(cpu_tlbstate.active_mm, next); + #endif + cpumask_set_cpu(cpu, mm_cpumask(next)); + + /* Re-load page tables */ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pax_open_kernel(); ++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS); ++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS); ++ pax_close_kernel(); ++ load_cr3(get_cpu_pgd(cpu)); ++#else + load_cr3(next->pgd); ++#endif + + /* stop flush ipis for the previous mm */ + cpumask_clear_cpu(cpu, mm_cpumask(prev)); +@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + */ + if (unlikely(prev->context.ldt != next->context.ldt)) + load_LDT_nolock(&next->context); +- } ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ if (!(__supported_pte_mask & _PAGE_NX)) { ++ smp_mb__before_clear_bit(); ++ cpu_clear(cpu, prev->context.cpu_user_cs_mask); ++ smp_mb__after_clear_bit(); ++ cpu_set(cpu, next->context.cpu_user_cs_mask); ++ } ++#endif ++ ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base || ++ prev->context.user_cs_limit != next->context.user_cs_limit)) ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); + #ifdef CONFIG_SMP ++ else if (unlikely(tlbstate != TLBSTATE_OK)) ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); ++#endif ++#endif ++ ++ } + else { ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pax_open_kernel(); ++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS); ++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS); ++ pax_close_kernel(); ++ load_cr3(get_cpu_pgd(cpu)); ++#endif ++ ++#ifdef CONFIG_SMP + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); + +@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + * tlb flush IPI delivery. We must reload CR3 + * to make sure to use no freed page tables. + */ ++ ++#ifndef CONFIG_PAX_PER_CPU_PGD + load_cr3(next->pgd); ++#endif ++ + load_LDT_nolock(&next->context); ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++ if (!(__supported_pte_mask & _PAGE_NX)) ++ cpu_set(cpu, next->context.cpu_user_cs_mask); ++#endif ++ ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX))) ++#endif ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); ++#endif ++ + } ++#endif + } +-#endif + } + + #define activate_mm(prev, next) \ +diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h +index 9eae775..c914fea 100644 +--- a/arch/x86/include/asm/module.h ++++ b/arch/x86/include/asm/module.h +@@ -5,6 +5,7 @@ + + #ifdef CONFIG_X86_64 + /* X86_64 does not define MODULE_PROC_FAMILY */ ++#define MODULE_PROC_FAMILY "" + #elif defined CONFIG_M386 + #define MODULE_PROC_FAMILY "386 " + #elif defined CONFIG_M486 +@@ -59,8 +60,20 @@ + #error unknown processor family + #endif + +-#ifdef CONFIG_X86_32 +-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS ++#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS " ++#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR) ++#define MODULE_PAX_KERNEXEC "KERNEXEC_OR " ++#else ++#define MODULE_PAX_KERNEXEC "" + #endif + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#define MODULE_PAX_UDEREF "UDEREF " ++#else ++#define MODULE_PAX_UDEREF "" ++#endif ++ ++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF ++ + #endif /* _ASM_X86_MODULE_H */ +diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h +index 7639dbf..e08a58c 100644 +--- a/arch/x86/include/asm/page_64_types.h ++++ b/arch/x86/include/asm/page_64_types.h +@@ -56,7 +56,7 @@ void copy_page(void *to, void *from); + + /* duplicated to the one in bootmem.h */ + extern unsigned long max_pfn; +-extern unsigned long phys_base; ++extern const unsigned long phys_base; + + extern unsigned long __phys_addr(unsigned long); + #define __phys_reloc_hide(x) (x) +diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h +index a7d2db9..edb023e 100644 +--- a/arch/x86/include/asm/paravirt.h ++++ b/arch/x86/include/asm/paravirt.h +@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) + val); + } + ++static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd) ++{ ++ pgdval_t val = native_pgd_val(pgd); ++ ++ if (sizeof(pgdval_t) > sizeof(long)) ++ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp, ++ val, (u64)val >> 32); ++ else ++ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp, ++ val); ++} ++ + static inline void pgd_clear(pgd_t *pgdp) + { + set_pgd(pgdp, __pgd(0)); +@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, + pv_mmu_ops.set_fixmap(idx, phys, flags); + } + ++#ifdef CONFIG_PAX_KERNEXEC ++static inline unsigned long pax_open_kernel(void) ++{ ++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel); ++} ++ ++static inline unsigned long pax_close_kernel(void) ++{ ++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel); ++} ++#else ++static inline unsigned long pax_open_kernel(void) { return 0; } ++static inline unsigned long pax_close_kernel(void) { return 0; } ++#endif ++ + #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) + + static inline int arch_spin_is_locked(struct arch_spinlock *lock) +@@ -964,7 +991,7 @@ extern void default_banner(void); + + #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) + #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) +-#define PARA_INDIRECT(addr) *%cs:addr ++#define PARA_INDIRECT(addr) *%ss:addr + #endif + + #define INTERRUPT_RETURN \ +@@ -1041,6 +1068,21 @@ extern void default_banner(void); + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ + CLBR_NONE, \ + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) ++ ++#define GET_CR0_INTO_RDI \ ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ ++ mov %rax,%rdi ++ ++#define SET_RDI_INTO_CR0 \ ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) ++ ++#define GET_CR3_INTO_RDI \ ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \ ++ mov %rax,%rdi ++ ++#define SET_RDI_INTO_CR3 \ ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3) ++ + #endif /* CONFIG_X86_32 */ + + #endif /* __ASSEMBLY__ */ +diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h +index 8e8b9a4..f07d725 100644 +--- a/arch/x86/include/asm/paravirt_types.h ++++ b/arch/x86/include/asm/paravirt_types.h +@@ -84,20 +84,20 @@ struct pv_init_ops { + */ + unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, + unsigned long addr, unsigned len); +-}; ++} __no_const; + + + struct pv_lazy_ops { + /* Set deferred update mode, used for batching operations. */ + void (*enter)(void); + void (*leave)(void); +-}; ++} __no_const; + + struct pv_time_ops { + unsigned long long (*sched_clock)(void); + unsigned long long (*steal_clock)(int cpu); + unsigned long (*get_tsc_khz)(void); +-}; ++} __no_const; + + struct pv_cpu_ops { + /* hooks for various privileged instructions */ +@@ -193,7 +193,7 @@ struct pv_cpu_ops { + + void (*start_context_switch)(struct task_struct *prev); + void (*end_context_switch)(struct task_struct *next); +-}; ++} __no_const; + + struct pv_irq_ops { + /* +@@ -224,7 +224,7 @@ struct pv_apic_ops { + unsigned long start_eip, + unsigned long start_esp); + #endif +-}; ++} __no_const; + + struct pv_mmu_ops { + unsigned long (*read_cr2)(void); +@@ -313,6 +313,7 @@ struct pv_mmu_ops { + struct paravirt_callee_save make_pud; + + void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); ++ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval); + #endif /* PAGETABLE_LEVELS == 4 */ + #endif /* PAGETABLE_LEVELS >= 3 */ + +@@ -324,6 +325,12 @@ struct pv_mmu_ops { + an mfn. We can tell which is which from the index. */ + void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, + phys_addr_t phys, pgprot_t flags); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ unsigned long (*pax_open_kernel)(void); ++ unsigned long (*pax_close_kernel)(void); ++#endif ++ + }; + + struct arch_spinlock; +@@ -334,7 +341,7 @@ struct pv_lock_ops { + void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); + int (*spin_trylock)(struct arch_spinlock *lock); + void (*spin_unlock)(struct arch_spinlock *lock); +-}; ++} __no_const; + + /* This contains all the paravirt structures: we get a convenient + * number for each function using the offset which we use to indicate +diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h +index b4389a4..b7ff22c 100644 +--- a/arch/x86/include/asm/pgalloc.h ++++ b/arch/x86/include/asm/pgalloc.h +@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) + { + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); ++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); ++} ++ ++static inline void pmd_populate_user(struct mm_struct *mm, ++ pmd_t *pmd, pte_t *pte) ++{ ++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); + } + +diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h +index 98391db..8f6984e 100644 +--- a/arch/x86/include/asm/pgtable-2level.h ++++ b/arch/x86/include/asm/pgtable-2level.h +@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte) + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + *pmdp = pmd; ++ pax_close_kernel(); + } + + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) +diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h +index effff47..f9e4035 100644 +--- a/arch/x86/include/asm/pgtable-3level.h ++++ b/arch/x86/include/asm/pgtable-3level.h +@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); ++ pax_close_kernel(); + } + + static inline void native_set_pud(pud_t *pudp, pud_t pud) + { ++ pax_open_kernel(); + set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); ++ pax_close_kernel(); + } + + /* +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h +index 18601c8..3d716d1 100644 +--- a/arch/x86/include/asm/pgtable.h ++++ b/arch/x86/include/asm/pgtable.h +@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); + + #ifndef __PAGETABLE_PUD_FOLDED + #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) ++#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd) + #define pgd_clear(pgd) native_pgd_clear(pgd) + #endif + +@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); + + #define arch_end_context_switch(prev) do {} while(0) + ++#define pax_open_kernel() native_pax_open_kernel() ++#define pax_close_kernel() native_pax_close_kernel() + #endif /* CONFIG_PARAVIRT */ + ++#define __HAVE_ARCH_PAX_OPEN_KERNEL ++#define __HAVE_ARCH_PAX_CLOSE_KERNEL ++ ++#ifdef CONFIG_PAX_KERNEXEC ++static inline unsigned long native_pax_open_kernel(void) ++{ ++ unsigned long cr0; ++ ++ preempt_disable(); ++ barrier(); ++ cr0 = read_cr0() ^ X86_CR0_WP; ++ BUG_ON(unlikely(cr0 & X86_CR0_WP)); ++ write_cr0(cr0); ++ return cr0 ^ X86_CR0_WP; ++} ++ ++static inline unsigned long native_pax_close_kernel(void) ++{ ++ unsigned long cr0; ++ ++ cr0 = read_cr0() ^ X86_CR0_WP; ++ BUG_ON(unlikely(!(cr0 & X86_CR0_WP))); ++ write_cr0(cr0); ++ barrier(); ++ preempt_enable_no_resched(); ++ return cr0 ^ X86_CR0_WP; ++} ++#else ++static inline unsigned long native_pax_open_kernel(void) { return 0; } ++static inline unsigned long native_pax_close_kernel(void) { return 0; } ++#endif ++ + /* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ ++static inline int pte_user(pte_t pte) ++{ ++ return pte_val(pte) & _PAGE_USER; ++} ++ + static inline int pte_dirty(pte_t pte) + { + return pte_flags(pte) & _PAGE_DIRTY; +@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte) + return pte_clear_flags(pte, _PAGE_RW); + } + ++static inline pte_t pte_mkread(pte_t pte) ++{ ++ return __pte(pte_val(pte) | _PAGE_USER); ++} ++ + static inline pte_t pte_mkexec(pte_t pte) + { +- return pte_clear_flags(pte, _PAGE_NX); ++#ifdef CONFIG_X86_PAE ++ if (__supported_pte_mask & _PAGE_NX) ++ return pte_clear_flags(pte, _PAGE_NX); ++ else ++#endif ++ return pte_set_flags(pte, _PAGE_USER); ++} ++ ++static inline pte_t pte_exprotect(pte_t pte) ++{ ++#ifdef CONFIG_X86_PAE ++ if (__supported_pte_mask & _PAGE_NX) ++ return pte_set_flags(pte, _PAGE_NX); ++ else ++#endif ++ return pte_clear_flags(pte, _PAGE_USER); + } + + static inline pte_t pte_mkdirty(pte_t pte) +@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr); + #endif + + #ifndef __ASSEMBLY__ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD]; ++static inline pgd_t *get_cpu_pgd(unsigned int cpu) ++{ ++ return cpu_pgd[cpu]; ++} ++#endif ++ + #include <linux/mm_types.h> + + static inline int pte_none(pte_t pte) +@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) + + static inline int pgd_bad(pgd_t pgd) + { +- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; ++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE; + } + + static inline int pgd_none(pgd_t pgd) +@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd) + * pgd_offset() returns a (pgd_t *) + * pgd_index() is used get the offset into the pgd page's array of pgd_t's; + */ +-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) ++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address)) ++#endif ++ + /* + * a shortcut which implies the use of the kernel's pgd, instead + * of a process's +@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd) + #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) + #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) + ++#ifdef CONFIG_X86_32 ++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY ++#else ++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT ++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT)) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT) ++#else ++#define PAX_USER_SHADOW_BASE (_AC(0,UL)) ++#endif ++ ++#endif ++ + #ifndef __ASSEMBLY__ + + extern int direct_gbpages; +@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, + * dst and src can be on the same page, but the range must not overlap, + * and must not cross a page boundary. + */ +-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) ++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count) + { +- memcpy(dst, src, count * sizeof(pgd_t)); ++ pax_open_kernel(); ++ while (count--) ++ *dst++ = *src++; ++ pax_close_kernel(); + } + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count); ++#endif ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count); ++#else ++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {} ++#endif + + #include <asm-generic/pgtable.h> + #endif /* __ASSEMBLY__ */ +diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h +index 0c92113..34a77c6 100644 +--- a/arch/x86/include/asm/pgtable_32.h ++++ b/arch/x86/include/asm/pgtable_32.h +@@ -25,9 +25,6 @@ + struct mm_struct; + struct vm_area_struct; + +-extern pgd_t swapper_pg_dir[1024]; +-extern pgd_t initial_page_table[1024]; +- + static inline void pgtable_cache_init(void) { } + static inline void check_pgt_cache(void) { } + void paging_init(void); +@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); + # include <asm/pgtable-2level.h> + #endif + ++extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; ++extern pgd_t initial_page_table[PTRS_PER_PGD]; ++#ifdef CONFIG_X86_PAE ++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; ++#endif ++ + #if defined(CONFIG_HIGHPTE) + #define pte_offset_map(dir, address) \ + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ +@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); + /* Clear a kernel PTE and flush it from the TLB */ + #define kpte_clear_flush(ptep, vaddr) \ + do { \ ++ pax_open_kernel(); \ + pte_clear(&init_mm, (vaddr), (ptep)); \ ++ pax_close_kernel(); \ + __flush_tlb_one((vaddr)); \ + } while (0) + +@@ -74,6 +79,9 @@ do { \ + + #endif /* !__ASSEMBLY__ */ + ++#define HAVE_ARCH_UNMAPPED_AREA ++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN ++ + /* + * kern_addr_valid() is (1) for FLATMEM and (0) for + * SPARSEMEM and DISCONTIGMEM +diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h +index ed5903b..c7fe163 100644 +--- a/arch/x86/include/asm/pgtable_32_types.h ++++ b/arch/x86/include/asm/pgtable_32_types.h +@@ -8,7 +8,7 @@ + */ + #ifdef CONFIG_X86_PAE + # include <asm/pgtable-3level_types.h> +-# define PMD_SIZE (1UL << PMD_SHIFT) ++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) + # define PMD_MASK (~(PMD_SIZE - 1)) + #else + # include <asm/pgtable-2level_types.h> +@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ + # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) + #endif + ++#ifdef CONFIG_PAX_KERNEXEC ++#ifndef __ASSEMBLY__ ++extern unsigned char MODULES_EXEC_VADDR[]; ++extern unsigned char MODULES_EXEC_END[]; ++#endif ++#include <asm/boot.h> ++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET) ++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET) ++#else ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++#endif ++ + #define MODULES_VADDR VMALLOC_START + #define MODULES_END VMALLOC_END + #define MODULES_LEN (MODULES_VADDR - MODULES_END) +diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h +index 975f709..107976d 100644 +--- a/arch/x86/include/asm/pgtable_64.h ++++ b/arch/x86/include/asm/pgtable_64.h +@@ -16,10 +16,14 @@ + + extern pud_t level3_kernel_pgt[512]; + extern pud_t level3_ident_pgt[512]; ++extern pud_t level3_vmalloc_start_pgt[512]; ++extern pud_t level3_vmalloc_end_pgt[512]; ++extern pud_t level3_vmemmap_pgt[512]; ++extern pud_t level2_vmemmap_pgt[512]; + extern pmd_t level2_kernel_pgt[512]; + extern pmd_t level2_fixmap_pgt[512]; +-extern pmd_t level2_ident_pgt[512]; +-extern pgd_t init_level4_pgt[]; ++extern pmd_t level2_ident_pgt[512*2]; ++extern pgd_t init_level4_pgt[512]; + + #define swapper_pg_dir init_level4_pgt + +@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + *pmdp = pmd; ++ pax_close_kernel(); + } + + static inline void native_pmd_clear(pmd_t *pmd) +@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud) + + static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) + { ++ pax_open_kernel(); ++ *pgdp = pgd; ++ pax_close_kernel(); ++} ++ ++static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd) ++{ + *pgdp = pgd; + } + +diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h +index 766ea16..5b96cb3 100644 +--- a/arch/x86/include/asm/pgtable_64_types.h ++++ b/arch/x86/include/asm/pgtable_64_types.h +@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t; + #define MODULES_VADDR _AC(0xffffffffa0000000, UL) + #define MODULES_END _AC(0xffffffffff000000, UL) + #define MODULES_LEN (MODULES_END - MODULES_VADDR) ++#define MODULES_EXEC_VADDR MODULES_VADDR ++#define MODULES_EXEC_END MODULES_END ++ ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) + + #endif /* _ASM_X86_PGTABLE_64_DEFS_H */ +diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h +index 013286a..8b42f4f 100644 +--- a/arch/x86/include/asm/pgtable_types.h ++++ b/arch/x86/include/asm/pgtable_types.h +@@ -16,13 +16,12 @@ + #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ + #define _PAGE_BIT_PAT 7 /* on 4KB pages */ + #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ +-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ ++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */ + #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ + #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ + #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ +-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 +-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 +-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */ ++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL ++#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */ + #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ + + /* If _PAGE_BIT_PRESENT is clear, we use these: */ +@@ -40,7 +39,6 @@ + #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) + #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) + #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) +-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) + #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) + #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) + #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) +@@ -57,8 +55,10 @@ + + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) +-#else ++#elif defined(CONFIG_KMEMCHECK) + #define _PAGE_NX (_AT(pteval_t, 0)) ++#else ++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) + #endif + + #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) +@@ -96,6 +96,9 @@ + #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED) + ++#define PAGE_READONLY_NOEXEC PAGE_READONLY ++#define PAGE_SHARED_NOEXEC PAGE_SHARED ++ + #define __PAGE_KERNEL_EXEC \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) + #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) +@@ -106,7 +109,7 @@ + #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) + #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) + #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) +-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) ++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER) + #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) + #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT) + #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) +@@ -168,8 +171,8 @@ + * bits are combined, this will alow user to access the high address mapped + * VDSO in the presence of CONFIG_COMPAT_VDSO + */ +-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ +-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ ++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ ++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ + #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ + #endif + +@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd) + { + return native_pgd_val(pgd) & PTE_FLAGS_MASK; + } ++#endif + ++#if PAGETABLE_LEVELS == 3 ++#include <asm-generic/pgtable-nopud.h> ++#endif ++ ++#if PAGETABLE_LEVELS == 2 ++#include <asm-generic/pgtable-nopmd.h> ++#endif ++ ++#ifndef __ASSEMBLY__ + #if PAGETABLE_LEVELS > 3 + typedef struct { pudval_t pud; } pud_t; + +@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud) + return pud.pud; + } + #else +-#include <asm-generic/pgtable-nopud.h> +- + static inline pudval_t native_pud_val(pud_t pud) + { + return native_pgd_val(pud.pgd); +@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) + return pmd.pmd; + } + #else +-#include <asm-generic/pgtable-nopmd.h> +- + static inline pmdval_t native_pmd_val(pmd_t pmd) + { + return native_pgd_val(pmd.pud.pgd); +@@ -283,7 +292,6 @@ typedef struct page *pgtable_t; + + extern pteval_t __supported_pte_mask; + extern void set_nx(void); +-extern int nx_enabled; + + #define pgprot_writecombine pgprot_writecombine + extern pgprot_t pgprot_writecombine(pgprot_t prot); +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index bb3ee36..781a6b8 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -268,7 +268,7 @@ struct tss_struct { + + } ____cacheline_aligned; + +-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); ++extern struct tss_struct init_tss[NR_CPUS]; + + /* + * Save the original ist values for checking stack pointers during debugging +@@ -861,11 +861,18 @@ static inline void spin_lock_prefetch(const void *x) + */ + #define TASK_SIZE PAGE_OFFSET + #define TASK_SIZE_MAX TASK_SIZE ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2) ++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE) ++#else + #define STACK_TOP TASK_SIZE +-#define STACK_TOP_MAX STACK_TOP ++#endif ++ ++#define STACK_TOP_MAX TASK_SIZE + + #define INIT_THREAD { \ +- .sp0 = sizeof(init_stack) + (long)&init_stack, \ ++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ + .vm86_info = NULL, \ + .sysenter_cs = __KERNEL_CS, \ + .io_bitmap_ptr = NULL, \ +@@ -879,7 +886,7 @@ static inline void spin_lock_prefetch(const void *x) + */ + #define INIT_TSS { \ + .x86_tss = { \ +- .sp0 = sizeof(init_stack) + (long)&init_stack, \ ++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ + .ss0 = __KERNEL_DS, \ + .ss1 = __KERNEL_CS, \ + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ +@@ -890,11 +897,7 @@ static inline void spin_lock_prefetch(const void *x) + extern unsigned long thread_saved_pc(struct task_struct *tsk); + + #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) +-#define KSTK_TOP(info) \ +-({ \ +- unsigned long *__ptr = (unsigned long *)(info); \ +- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ +-}) ++#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0) + + /* + * The below -8 is to reserve 8 bytes on top of the ring0 stack. +@@ -909,7 +912,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); + #define task_pt_regs(task) \ + ({ \ + struct pt_regs *__regs__; \ +- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ ++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \ + __regs__ - 1; \ + }) + +@@ -919,13 +922,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); + /* + * User space process size. 47bits minus one guard page. + */ +-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) ++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE) + + /* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ + #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ +- 0xc0000000 : 0xFFFFe000) ++ 0xc0000000 : 0xFFFFf000) + + #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ + IA32_PAGE_OFFSET : TASK_SIZE_MAX) +@@ -936,11 +939,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); + #define STACK_TOP_MAX TASK_SIZE_MAX + + #define INIT_THREAD { \ +- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ ++ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \ + } + + #define INIT_TSS { \ +- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ ++ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \ + } + + /* +@@ -962,6 +965,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, + */ + #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) + ++#ifdef CONFIG_PAX_SEGMEXEC ++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3)) ++#endif ++ + #define KSTK_EIP(task) (task_pt_regs(task)->ip) + + /* Get/set a process' ability to use the timestamp counter instruction */ +diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h +index 3566454..4bdfb8c 100644 +--- a/arch/x86/include/asm/ptrace.h ++++ b/arch/x86/include/asm/ptrace.h +@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) + } + + /* +- * user_mode_vm(regs) determines whether a register set came from user mode. ++ * user_mode(regs) determines whether a register set came from user mode. + * This is true if V8086 mode was enabled OR if the register set was from + * protected mode with RPL-3 CS value. This tricky test checks that with + * one comparison. Many places in the kernel can bypass this full check +- * if they have already ruled out V8086 mode, so user_mode(regs) can be used. ++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can ++ * be used. + */ +-static inline int user_mode(struct pt_regs *regs) ++static inline int user_mode_novm(struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; + #else +- return !!(regs->cs & 3); ++ return !!(regs->cs & SEGMENT_RPL_MASK); + #endif + } + +-static inline int user_mode_vm(struct pt_regs *regs) ++static inline int user_mode(struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= + USER_RPL; + #else +- return user_mode(regs); ++ return user_mode_novm(regs); + #endif + } + +@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs) + #ifdef CONFIG_X86_64 + static inline bool user_64bit_mode(struct pt_regs *regs) + { ++ unsigned long cs = regs->cs & 0xffff; + #ifndef CONFIG_PARAVIRT + /* + * On non-paravirt systems, this is the only long mode CPL 3 + * selector. We do not allow long mode selectors in the LDT. + */ +- return regs->cs == __USER_CS; ++ return cs == __USER_CS; + #else + /* Headers are too twisted for this to go in paravirt.h. */ +- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; ++ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs; + #endif + } + #endif +diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h +index 92f29706..a79cbbb 100644 +--- a/arch/x86/include/asm/reboot.h ++++ b/arch/x86/include/asm/reboot.h +@@ -6,19 +6,19 @@ + struct pt_regs; + + struct machine_ops { +- void (*restart)(char *cmd); +- void (*halt)(void); +- void (*power_off)(void); ++ void (* __noreturn restart)(char *cmd); ++ void (* __noreturn halt)(void); ++ void (* __noreturn power_off)(void); + void (*shutdown)(void); + void (*crash_shutdown)(struct pt_regs *); +- void (*emergency_restart)(void); +-}; ++ void (* __noreturn emergency_restart)(void); ++} __no_const; + + extern struct machine_ops machine_ops; + + void native_machine_crash_shutdown(struct pt_regs *regs); + void native_machine_shutdown(void); +-void machine_real_restart(unsigned int type); ++void machine_real_restart(unsigned int type) __noreturn; + /* These must match dispatch_table in reboot_32.S */ + #define MRR_BIOS 0 + #define MRR_APM 1 +diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h +index 2dbe4a7..ce1db00 100644 +--- a/arch/x86/include/asm/rwsem.h ++++ b/arch/x86/include/asm/rwsem.h +@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem) + { + asm volatile("# beginning down_read\n\t" + LOCK_PREFIX _ASM_INC "(%1)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX _ASM_DEC "(%1)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* adds 0x00000001 */ + " jns 1f\n" + " call call_rwsem_down_read_failed\n" +@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) + "1:\n\t" + " mov %1,%2\n\t" + " add %3,%2\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "sub %3,%2\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + " jle 2f\n\t" + LOCK_PREFIX " cmpxchg %2,%0\n\t" + " jnz 1b\n\t" +@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) + long tmp; + asm volatile("# beginning down_write\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "mov %1,(%2)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* adds 0xffff0001, returns the old value */ + " test %1,%1\n\t" + /* was the count 0 before? */ +@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem) + long tmp; + asm volatile("# beginning __up_read\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "mov %1,(%2)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* subtracts 1, returns the old value */ + " jns 1f\n\t" + " call call_rwsem_wake\n" /* expects old value in %edx */ +@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem) + long tmp; + asm volatile("# beginning __up_write\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "mov %1,(%2)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* subtracts 0xffff0001, returns the old value */ + " jns 1f\n\t" + " call call_rwsem_wake\n" /* expects old value in %edx */ +@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem) + { + asm volatile("# beginning __downgrade_write\n\t" + LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* + * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) + * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) +@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem) + */ + static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) + { +- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" ++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX _ASM_SUB "%1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+m" (sem->count) + : "er" (delta)); + } +@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) + */ + static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) + { +- return delta + xadd(&sem->count, delta); ++ return delta + xadd_check_overflow(&sem->count, delta); + } + + #endif /* __KERNEL__ */ +diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h +index 5e64171..f58957e 100644 +--- a/arch/x86/include/asm/segment.h ++++ b/arch/x86/include/asm/segment.h +@@ -64,10 +64,15 @@ + * 26 - ESPFIX small SS + * 27 - per-cpu [ offset to per-cpu data area ] + * 28 - stack_canary-20 [ for stack protector ] +- * 29 - unused +- * 30 - unused ++ * 29 - PCI BIOS CS ++ * 30 - PCI BIOS DS + * 31 - TSS for double fault handler + */ ++#define GDT_ENTRY_KERNEXEC_EFI_CS (1) ++#define GDT_ENTRY_KERNEXEC_EFI_DS (2) ++#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8) ++#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8) ++ + #define GDT_ENTRY_TLS_MIN 6 + #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) + +@@ -79,6 +84,8 @@ + + #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0) + ++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4) ++ + #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1) + + #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4) +@@ -104,6 +111,12 @@ + #define __KERNEL_STACK_CANARY 0 + #endif + ++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17) ++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8) ++ ++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18) ++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8) ++ + #define GDT_ENTRY_DOUBLEFAULT_TSS 31 + + /* +@@ -141,7 +154,7 @@ + */ + + /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ +-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) ++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16) + + + #else +@@ -165,6 +178,8 @@ + #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3) + #define __USER32_DS __USER_DS + ++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7 ++ + #define GDT_ENTRY_TSS 8 /* needs two entries */ + #define GDT_ENTRY_LDT 10 /* needs two entries */ + #define GDT_ENTRY_TLS_MIN 12 +@@ -185,6 +200,7 @@ + #endif + + #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) ++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8) + #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) + #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) + #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) +diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h +index 73b11bc..d4a3b63 100644 +--- a/arch/x86/include/asm/smp.h ++++ b/arch/x86/include/asm/smp.h +@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); + /* cpus sharing the last level cache: */ + DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); + DECLARE_PER_CPU(u16, cpu_llc_id); +-DECLARE_PER_CPU(int, cpu_number); ++DECLARE_PER_CPU(unsigned int, cpu_number); + + static inline struct cpumask *cpu_sibling_mask(int cpu) + { +@@ -77,7 +77,7 @@ struct smp_ops { + + void (*send_call_func_ipi)(const struct cpumask *mask); + void (*send_call_func_single_ipi)(int cpu); +-}; ++} __no_const; + + /* Globals due to paravirt */ + extern void set_cpu_sibling_map(int cpu); +@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata; + extern int safe_smp_processor_id(void); + + #elif defined(CONFIG_X86_64_SMP) +-#define raw_smp_processor_id() (percpu_read(cpu_number)) +- +-#define stack_smp_processor_id() \ +-({ \ +- struct thread_info *ti; \ +- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ +- ti->cpu; \ +-}) ++#define raw_smp_processor_id() (percpu_read(cpu_number)) ++#define stack_smp_processor_id() raw_smp_processor_id() + #define safe_smp_processor_id() smp_processor_id() + + #endif +diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h +index 972c260..43ab1fd 100644 +--- a/arch/x86/include/asm/spinlock.h ++++ b/arch/x86/include/asm/spinlock.h +@@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock) + static inline void arch_read_lock(arch_rwlock_t *rw) + { + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + "jns 1f\n" + "call __read_lock_failed\n\t" + "1:\n" +@@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) + static inline void arch_write_lock(arch_rwlock_t *rw) + { + asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + "jz 1f\n" + "call __write_lock_failed\n\t" + "1:\n" +@@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) + + static inline void arch_read_unlock(arch_rwlock_t *rw) + { +- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" ++ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + :"+m" (rw->lock) : : "memory"); + } + + static inline void arch_write_unlock(arch_rwlock_t *rw) + { +- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" ++ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); + } + +diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h +index 1575177..cb23f52 100644 +--- a/arch/x86/include/asm/stackprotector.h ++++ b/arch/x86/include/asm/stackprotector.h +@@ -48,7 +48,7 @@ + * head_32 for boot CPU and setup_per_cpu_areas() for others. + */ + #define GDT_STACK_CANARY_INIT \ +- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18), ++ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17), + + /* + * Initialize the stackprotector canary value. +@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu) + + static inline void load_stack_canary_segment(void) + { +-#ifdef CONFIG_X86_32 ++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF) + asm volatile ("mov %0, %%gs" : : "r" (0)); + #endif + } +diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h +index 70bbe39..4ae2bd4 100644 +--- a/arch/x86/include/asm/stacktrace.h ++++ b/arch/x86/include/asm/stacktrace.h +@@ -11,28 +11,20 @@ + + extern int kstack_depth_to_print; + +-struct thread_info; ++struct task_struct; + struct stacktrace_ops; + +-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, +- unsigned long *stack, +- unsigned long bp, +- const struct stacktrace_ops *ops, +- void *data, +- unsigned long *end, +- int *graph); ++typedef unsigned long walk_stack_t(struct task_struct *task, ++ void *stack_start, ++ unsigned long *stack, ++ unsigned long bp, ++ const struct stacktrace_ops *ops, ++ void *data, ++ unsigned long *end, ++ int *graph); + +-extern unsigned long +-print_context_stack(struct thread_info *tinfo, +- unsigned long *stack, unsigned long bp, +- const struct stacktrace_ops *ops, void *data, +- unsigned long *end, int *graph); +- +-extern unsigned long +-print_context_stack_bp(struct thread_info *tinfo, +- unsigned long *stack, unsigned long bp, +- const struct stacktrace_ops *ops, void *data, +- unsigned long *end, int *graph); ++extern walk_stack_t print_context_stack; ++extern walk_stack_t print_context_stack_bp; + + /* Generic stack tracer with callbacks */ + +@@ -40,7 +32,7 @@ struct stacktrace_ops { + void (*address)(void *data, unsigned long address, int reliable); + /* On negative return stop dumping */ + int (*stack)(void *data, char *name); +- walk_stack_t walk_stack; ++ walk_stack_t *walk_stack; + }; + + void dump_trace(struct task_struct *tsk, struct pt_regs *regs, +diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h +index cb23852..2dde194 100644 +--- a/arch/x86/include/asm/sys_ia32.h ++++ b/arch/x86/include/asm/sys_ia32.h +@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *, + compat_sigset_t __user *, unsigned int); + asmlinkage long sys32_alarm(unsigned int); + +-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); ++asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); + asmlinkage long sys32_sysfs(int, u32, u32); + + asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, +diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h +index f1d8b44..a4de8b7 100644 +--- a/arch/x86/include/asm/syscalls.h ++++ b/arch/x86/include/asm/syscalls.h +@@ -30,7 +30,7 @@ long sys_clone(unsigned long, unsigned long, void __user *, + void __user *, struct pt_regs *); + + /* kernel/ldt.c */ +-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); ++asmlinkage int sys_modify_ldt(int, void __user *, unsigned long) __size_overflow(3); + + /* kernel/signal.c */ + long sys_rt_sigreturn(struct pt_regs *); +diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h +index 2d2f01c..f985723 100644 +--- a/arch/x86/include/asm/system.h ++++ b/arch/x86/include/asm/system.h +@@ -129,7 +129,7 @@ do { \ + "call __switch_to\n\t" \ + "movq "__percpu_arg([current_task])",%%rsi\n\t" \ + __switch_canary \ +- "movq %P[thread_info](%%rsi),%%r8\n\t" \ ++ "movq "__percpu_arg([thread_info])",%%r8\n\t" \ + "movq %%rax,%%rdi\n\t" \ + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ + "jnz ret_from_fork\n\t" \ +@@ -140,7 +140,7 @@ do { \ + [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ + [ti_flags] "i" (offsetof(struct thread_info, flags)), \ + [_tif_fork] "i" (_TIF_FORK), \ +- [thread_info] "i" (offsetof(struct task_struct, stack)), \ ++ [thread_info] "m" (current_tinfo), \ + [current_task] "m" (current_task) \ + __switch_canary_iparam \ + : "memory", "cc" __EXTRA_CLOBBER) +@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment) + { + unsigned long __limit; + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); +- return __limit + 1; ++ return __limit; + } + + static inline void native_clts(void) +@@ -397,13 +397,13 @@ void enable_hlt(void); + + void cpu_idle_wait(void); + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + extern void free_init_pages(char *what, unsigned long begin, unsigned long end); + + void default_idle(void); + bool set_pm_idle_to_default(void); + +-void stop_this_cpu(void *dummy); ++void stop_this_cpu(void *dummy) __noreturn; + + /* + * Force strict CPU ordering. +diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h +index d7ef849..6af292e 100644 +--- a/arch/x86/include/asm/thread_info.h ++++ b/arch/x86/include/asm/thread_info.h +@@ -10,6 +10,7 @@ + #include <linux/compiler.h> + #include <asm/page.h> + #include <asm/types.h> ++#include <asm/percpu.h> + + /* + * low level task data that entry.S needs immediate access to +@@ -24,7 +25,6 @@ struct exec_domain; + #include <linux/atomic.h> + + struct thread_info { +- struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + __u32 flags; /* low level flags */ + __u32 status; /* thread synchronous flags */ +@@ -34,18 +34,12 @@ struct thread_info { + mm_segment_t addr_limit; + struct restart_block restart_block; + void __user *sysenter_return; +-#ifdef CONFIG_X86_32 +- unsigned long previous_esp; /* ESP of the previous stack in +- case of nested (IRQ) stacks +- */ +- __u8 supervisor_stack[0]; +-#endif ++ unsigned long lowest_stack; + int uaccess_err; + }; + +-#define INIT_THREAD_INFO(tsk) \ ++#define INIT_THREAD_INFO \ + { \ +- .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ +@@ -56,7 +50,7 @@ struct thread_info { + }, \ + } + +-#define init_thread_info (init_thread_union.thread_info) ++#define init_thread_info (init_thread_union.stack) + #define init_stack (init_thread_union.stack) + + #else /* !__ASSEMBLY__ */ +@@ -170,45 +164,40 @@ struct thread_info { + ret; \ + }) + +-#ifdef CONFIG_X86_32 +- +-#define STACK_WARN (THREAD_SIZE/8) +-/* +- * macros/functions for gaining access to the thread information structure +- * +- * preempt_count needs to be 1 initially, until the scheduler is functional. +- */ +-#ifndef __ASSEMBLY__ +- +- +-/* how to get the current stack pointer from C */ +-register unsigned long current_stack_pointer asm("esp") __used; +- +-/* how to get the thread information struct from C */ +-static inline struct thread_info *current_thread_info(void) +-{ +- return (struct thread_info *) +- (current_stack_pointer & ~(THREAD_SIZE - 1)); +-} +- +-#else /* !__ASSEMBLY__ */ +- ++#ifdef __ASSEMBLY__ + /* how to get the thread information struct from ASM */ + #define GET_THREAD_INFO(reg) \ +- movl $-THREAD_SIZE, reg; \ +- andl %esp, reg ++ mov PER_CPU_VAR(current_tinfo), reg + + /* use this one if reg already contains %esp */ +-#define GET_THREAD_INFO_WITH_ESP(reg) \ +- andl $-THREAD_SIZE, reg ++#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg) ++#else ++/* how to get the thread information struct from C */ ++DECLARE_PER_CPU(struct thread_info *, current_tinfo); ++ ++static __always_inline struct thread_info *current_thread_info(void) ++{ ++ return percpu_read_stable(current_tinfo); ++} ++#endif ++ ++#ifdef CONFIG_X86_32 ++ ++#define STACK_WARN (THREAD_SIZE/8) ++/* ++ * macros/functions for gaining access to the thread information structure ++ * ++ * preempt_count needs to be 1 initially, until the scheduler is functional. ++ */ ++#ifndef __ASSEMBLY__ ++ ++/* how to get the current stack pointer from C */ ++register unsigned long current_stack_pointer asm("esp") __used; + + #endif + + #else /* X86_32 */ + +-#include <asm/percpu.h> +-#define KERNEL_STACK_OFFSET (5*8) +- + /* + * macros/functions for gaining access to the thread information structure + * preempt_count needs to be 1 initially, until the scheduler is functional. +@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void) + #ifndef __ASSEMBLY__ + DECLARE_PER_CPU(unsigned long, kernel_stack); + +-static inline struct thread_info *current_thread_info(void) +-{ +- struct thread_info *ti; +- ti = (void *)(percpu_read_stable(kernel_stack) + +- KERNEL_STACK_OFFSET - THREAD_SIZE); +- return ti; +-} +- +-#else /* !__ASSEMBLY__ */ +- +-/* how to get the thread information struct from ASM */ +-#define GET_THREAD_INFO(reg) \ +- movq PER_CPU_VAR(kernel_stack),reg ; \ +- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg +- ++/* how to get the current stack pointer from C */ ++register unsigned long current_stack_pointer asm("rsp") __used; + #endif + + #endif /* !X86_32 */ +@@ -264,5 +240,16 @@ extern void arch_task_cache_init(void); + extern void free_thread_info(struct thread_info *ti); + extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); + #define arch_task_cache_init arch_task_cache_init ++ ++#define __HAVE_THREAD_FUNCTIONS ++#define task_thread_info(task) (&(task)->tinfo) ++#define task_stack_page(task) ((task)->stack) ++#define setup_thread_stack(p, org) do {} while (0) ++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1) ++ ++#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR ++extern struct task_struct *alloc_task_struct_node(int node); ++extern void free_task_struct(struct task_struct *); ++ + #endif + #endif /* _ASM_X86_THREAD_INFO_H */ +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h +index 36361bf..324f262 100644 +--- a/arch/x86/include/asm/uaccess.h ++++ b/arch/x86/include/asm/uaccess.h +@@ -7,12 +7,15 @@ + #include <linux/compiler.h> + #include <linux/thread_info.h> + #include <linux/string.h> ++#include <linux/sched.h> + #include <asm/asm.h> + #include <asm/page.h> + + #define VERIFY_READ 0 + #define VERIFY_WRITE 1 + ++extern void check_object_size(const void *ptr, unsigned long n, bool to); ++ + /* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with +@@ -28,7 +31,12 @@ + + #define get_ds() (KERNEL_DS) + #define get_fs() (current_thread_info()->addr_limit) ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++void __set_fs(mm_segment_t x); ++void set_fs(mm_segment_t x); ++#else + #define set_fs(x) (current_thread_info()->addr_limit = (x)) ++#endif + + #define segment_eq(a, b) ((a).seg == (b).seg) + +@@ -76,7 +84,33 @@ + * checks that the pointer is in the user space range - after calling + * this function, memory access functions may still return -EFAULT. + */ +-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) ++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) ++#define access_ok(type, addr, size) \ ++({ \ ++ long __size = size; \ ++ unsigned long __addr = (unsigned long)addr; \ ++ unsigned long __addr_ao = __addr & PAGE_MASK; \ ++ unsigned long __end_ao = __addr + __size - 1; \ ++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \ ++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \ ++ while(__addr_ao <= __end_ao) { \ ++ char __c_ao; \ ++ __addr_ao += PAGE_SIZE; \ ++ if (__size > PAGE_SIZE) \ ++ cond_resched(); \ ++ if (__get_user(__c_ao, (char __user *)__addr)) \ ++ break; \ ++ if (type != VERIFY_WRITE) { \ ++ __addr = __addr_ao; \ ++ continue; \ ++ } \ ++ if (__put_user(__c_ao, (char __user *)__addr)) \ ++ break; \ ++ __addr = __addr_ao; \ ++ } \ ++ } \ ++ __ret_ao; \ ++}) + + /* + * The exception table consists of pairs of addresses: the first is the +@@ -182,12 +216,20 @@ extern int __get_user_bad(void); + asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ + : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") + +- ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __copyuser_seg "gs;" ++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n" ++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n" ++#else ++#define __copyuser_seg ++#define __COPYUSER_SET_ES ++#define __COPYUSER_RESTORE_ES ++#endif + + #ifdef CONFIG_X86_32 + #define __put_user_asm_u64(x, addr, err, errret) \ +- asm volatile("1: movl %%eax,0(%2)\n" \ +- "2: movl %%edx,4(%2)\n" \ ++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \ ++ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \ + "3:\n" \ + ".section .fixup,"ax"\n" \ + "4: movl %3,%0\n" \ +@@ -199,8 +241,8 @@ extern int __get_user_bad(void); + : "A" (x), "r" (addr), "i" (errret), "0" (err)) + + #define __put_user_asm_ex_u64(x, addr) \ +- asm volatile("1: movl %%eax,0(%1)\n" \ +- "2: movl %%edx,4(%1)\n" \ ++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \ ++ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \ + "3:\n" \ + _ASM_EXTABLE(1b, 2b - 1b) \ + _ASM_EXTABLE(2b, 3b - 2b) \ +@@ -252,7 +294,7 @@ extern void __put_user_8(void); + __typeof__(*(ptr)) __pu_val; \ + __chk_user_ptr(ptr); \ + might_fault(); \ +- __pu_val = x; \ ++ __pu_val = (x); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __put_user_x(1, __pu_val, ptr, __ret_pu); \ +@@ -373,7 +415,7 @@ do { \ + } while (0) + + #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ +- asm volatile("1: mov"itype" %2,%"rtype"1\n" \ ++ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\ + "2:\n" \ + ".section .fixup,"ax"\n" \ + "3: mov %3,%0\n" \ +@@ -381,7 +423,7 @@ do { \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "=r" (err), ltype(x) \ ++ : "=r" (err), ltype (x) \ + : "m" (__m(addr)), "i" (errret), "0" (err)) + + #define __get_user_size_ex(x, ptr, size) \ +@@ -406,7 +448,7 @@ do { \ + } while (0) + + #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ +- asm volatile("1: mov"itype" %1,%"rtype"0\n" \ ++ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\ + "2:\n" \ + _ASM_EXTABLE(1b, 2b - 1b) \ + : ltype(x) : "m" (__m(addr))) +@@ -423,13 +465,24 @@ do { \ + int __gu_err; \ + unsigned long __gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ +- (x) = (__force __typeof__(*(ptr)))__gu_val; \ ++ (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ + }) + + /* FIXME: this hack is definitely wrong -AK */ + struct __large_struct { unsigned long buf[100]; }; +-#define __m(x) (*(struct __large_struct __user *)(x)) ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define ____m(x) \ ++({ \ ++ unsigned long ____x = (unsigned long)(x); \ ++ if (____x < PAX_USER_SHADOW_BASE) \ ++ ____x += PAX_USER_SHADOW_BASE; \ ++ (void __user *)____x; \ ++}) ++#else ++#define ____m(x) (x) ++#endif ++#define __m(x) (*(struct __large_struct __user *)____m(x)) + + /* + * Tell gcc we read from memory instead of writing: this is because +@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; }; + * aliasing issues. + */ + #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ +- asm volatile("1: mov"itype" %"rtype"1,%2\n" \ ++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\ + "2:\n" \ + ".section .fixup,"ax"\n" \ + "3: mov %3,%0\n" \ +@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; }; + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r"(err) \ +- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) ++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err)) + + #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ +- asm volatile("1: mov"itype" %"rtype"0,%1\n" \ ++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\ + "2:\n" \ + _ASM_EXTABLE(1b, 2b - 1b) \ + : : ltype(x), "m" (__m(addr))) +@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; }; + * On error, the variable @x is set to zero. + */ + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __get_user(x, ptr) get_user((x), (ptr)) ++#else + #define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) ++#endif + + /** + * __put_user: - Write a simple value into user space, with less checking. +@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; }; + * Returns zero on success, or -EFAULT on error. + */ + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __put_user(x, ptr) put_user((x), (ptr)) ++#else + #define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) ++#endif + + #define __get_user_unaligned __get_user + #define __put_user_unaligned __put_user +@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; }; + #define get_user_ex(x, ptr) do { \ + unsigned long __gue_val; \ + __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ +- (x) = (__force __typeof__(*(ptr)))__gue_val; \ ++ (x) = (__typeof__(*(ptr)))__gue_val; \ + } while (0) + + #ifdef CONFIG_X86_WP_WORKS_OK +diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h +index 566e803..7183d0b 100644 +--- a/arch/x86/include/asm/uaccess_32.h ++++ b/arch/x86/include/asm/uaccess_32.h +@@ -11,15 +11,15 @@ + #include <asm/page.h> + + unsigned long __must_check __copy_to_user_ll +- (void __user *to, const void *from, unsigned long n); ++ (void __user *to, const void *from, unsigned long n) __size_overflow(3); + unsigned long __must_check __copy_from_user_ll +- (void *to, const void __user *from, unsigned long n); ++ (void *to, const void __user *from, unsigned long n) __size_overflow(3); + unsigned long __must_check __copy_from_user_ll_nozero +- (void *to, const void __user *from, unsigned long n); ++ (void *to, const void __user *from, unsigned long n) __size_overflow(3); + unsigned long __must_check __copy_from_user_ll_nocache +- (void *to, const void __user *from, unsigned long n); ++ (void *to, const void __user *from, unsigned long n) __size_overflow(3); + unsigned long __must_check __copy_from_user_ll_nocache_nozero +- (void *to, const void __user *from, unsigned long n); ++ (void *to, const void __user *from, unsigned long n) __size_overflow(3); + + /** + * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. +@@ -41,8 +41,13 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero + */ + + static __always_inline unsigned long __must_check ++__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __size_overflow(3); ++static __always_inline unsigned long __must_check + __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) + return ret; + } + } ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); + return __copy_to_user_ll(to, from, n); + } + +@@ -79,15 +86,23 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) + * On success, this will be zero. + */ + static __always_inline unsigned long __must_check ++__copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3); ++static __always_inline unsigned long __must_check + __copy_to_user(void __user *to, const void *from, unsigned long n) + { + might_fault(); ++ + return __copy_to_user_inatomic(to, from, n); + } + + static __always_inline unsigned long ++__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __size_overflow(3); ++static __always_inline unsigned long + __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + /* Avoid zeroing the tail if the copy fails.. + * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, + * but as the zeroing behaviour is only significant when n is not +@@ -134,9 +149,15 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) + * for explanation of why this is needed. + */ + static __always_inline unsigned long ++__copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3); ++static __always_inline unsigned long + __copy_from_user(void *to, const void __user *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -152,13 +173,21 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) + return ret; + } + } ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); + return __copy_from_user_ll(to, from, n); + } + + static __always_inline unsigned long __copy_from_user_nocache(void *to, ++ const void __user *from, unsigned long n) __size_overflow(3); ++static __always_inline unsigned long __copy_from_user_nocache(void *to, + const void __user *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -179,17 +208,24 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, + + static __always_inline unsigned long + __copy_from_user_inatomic_nocache(void *to, const void __user *from, ++ unsigned long n) __size_overflow(3); ++static __always_inline unsigned long ++__copy_from_user_inatomic_nocache(void *to, const void __user *from, + unsigned long n) + { +- return __copy_from_user_ll_nocache_nozero(to, from, n); ++ if ((long)n < 0) ++ return n; ++ ++ return __copy_from_user_ll_nocache_nozero(to, from, n); + } + +-unsigned long __must_check copy_to_user(void __user *to, +- const void *from, unsigned long n); +-unsigned long __must_check _copy_from_user(void *to, +- const void __user *from, +- unsigned long n); +- ++extern void copy_to_user_overflow(void) ++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS ++ __compiletime_error("copy_to_user() buffer size is not provably correct") ++#else ++ __compiletime_warning("copy_to_user() buffer size is not provably correct") ++#endif ++; + + extern void copy_from_user_overflow(void) + #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS +@@ -199,17 +235,65 @@ extern void copy_from_user_overflow(void) + #endif + ; + +-static inline unsigned long __must_check copy_from_user(void *to, +- const void __user *from, +- unsigned long n) ++/** ++ * copy_to_user: - Copy a block of data into user space. ++ * @to: Destination address, in user space. ++ * @from: Source address, in kernel space. ++ * @n: Number of bytes to copy. ++ * ++ * Context: User context only. This function may sleep. ++ * ++ * Copy data from kernel space to user space. ++ * ++ * Returns number of bytes that could not be copied. ++ * On success, this will be zero. ++ */ ++static inline unsigned long __must_check ++copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3); ++static inline unsigned long __must_check ++copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ int sz = __compiletime_object_size(from); ++ ++ if (unlikely(sz != -1 && sz < n)) ++ copy_to_user_overflow(); ++ else if (access_ok(VERIFY_WRITE, to, n)) ++ n = __copy_to_user(to, from, n); ++ return n; ++} ++ ++/** ++ * copy_from_user: - Copy a block of data from user space. ++ * @to: Destination address, in kernel space. ++ * @from: Source address, in user space. ++ * @n: Number of bytes to copy. ++ * ++ * Context: User context only. This function may sleep. ++ * ++ * Copy data from user space to kernel space. ++ * ++ * Returns number of bytes that could not be copied. ++ * On success, this will be zero. ++ * ++ * If some data could not be copied, this function will pad the copied ++ * data to the requested size using zero bytes. ++ */ ++static inline unsigned long __must_check ++copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3); ++static inline unsigned long __must_check ++copy_from_user(void *to, const void __user *from, unsigned long n) + { + int sz = __compiletime_object_size(to); + +- if (likely(sz == -1 || sz >= n)) +- n = _copy_from_user(to, from, n); +- else ++ if (unlikely(sz != -1 && sz < n)) + copy_from_user_overflow(); +- ++ else if (access_ok(VERIFY_READ, from, n)) ++ n = __copy_from_user(to, from, n); ++ else if ((long)n > 0) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ memset(to, 0, n); ++ } + return n; + } + +@@ -235,7 +319,7 @@ long __must_check __strncpy_from_user(char *dst, + #define strlen_user(str) strnlen_user(str, LONG_MAX) + + long strnlen_user(const char __user *str, long n); +-unsigned long __must_check clear_user(void __user *mem, unsigned long len); +-unsigned long __must_check __clear_user(void __user *mem, unsigned long len); ++unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2); ++unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2); + + #endif /* _ASM_X86_UACCESS_32_H */ +diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h +index 1c66d30..e294b5f 100644 +--- a/arch/x86/include/asm/uaccess_64.h ++++ b/arch/x86/include/asm/uaccess_64.h +@@ -10,6 +10,9 @@ + #include <asm/alternative.h> + #include <asm/cpufeature.h> + #include <asm/page.h> ++#include <asm/pgtable.h> ++ ++#define set_fs(x) (current_thread_info()->addr_limit = (x)) + + /* + * Copy To/From Userspace +@@ -17,12 +20,14 @@ + + /* Handles exceptions in both to and from, but doesn't do access_ok */ + __must_check unsigned long +-copy_user_generic_string(void *to, const void *from, unsigned len); ++copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3); + __must_check unsigned long +-copy_user_generic_unrolled(void *to, const void *from, unsigned len); ++copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3); + + static __always_inline __must_check unsigned long +-copy_user_generic(void *to, const void *from, unsigned len) ++copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3); ++static __always_inline __must_check unsigned long ++copy_user_generic(void *to, const void *from, unsigned long len) + { + unsigned ret; + +@@ -32,142 +37,237 @@ copy_user_generic(void *to, const void *from, unsigned len) + ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), + "=d" (len)), + "1" (to), "2" (from), "3" (len) +- : "memory", "rcx", "r8", "r9", "r10", "r11"); ++ : "memory", "rcx", "r8", "r9", "r11"); + return ret; + } + ++static __always_inline __must_check unsigned long ++__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3); ++static __always_inline __must_check unsigned long ++__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3); + __must_check unsigned long +-_copy_to_user(void __user *to, const void *from, unsigned len); +-__must_check unsigned long +-_copy_from_user(void *to, const void __user *from, unsigned len); +-__must_check unsigned long +-copy_in_user(void __user *to, const void __user *from, unsigned len); ++copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3); + + static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, ++ unsigned long n) __size_overflow(3); ++static inline unsigned long __must_check copy_from_user(void *to, ++ const void __user *from, + unsigned long n) + { +- int sz = __compiletime_object_size(to); +- + might_fault(); +- if (likely(sz == -1 || sz >= n)) +- n = _copy_from_user(to, from, n); +-#ifdef CONFIG_DEBUG_VM +- else +- WARN(1, "Buffer overflow detected!\n"); +-#endif ++ ++ if (access_ok(VERIFY_READ, from, n)) ++ n = __copy_from_user(to, from, n); ++ else if (n < INT_MAX) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ memset(to, 0, n); ++ } + return n; + } + + static __always_inline __must_check +-int copy_to_user(void __user *dst, const void *src, unsigned size) ++int copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3); ++static __always_inline __must_check ++int copy_to_user(void __user *dst, const void *src, unsigned long size) + { + might_fault(); + +- return _copy_to_user(dst, src, size); ++ if (access_ok(VERIFY_WRITE, dst, size)) ++ size = __copy_to_user(dst, src, size); ++ return size; + } + + static __always_inline __must_check +-int __copy_from_user(void *dst, const void __user *src, unsigned size) ++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) __size_overflow(3); ++static __always_inline __must_check ++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) + { +- int ret = 0; ++ int sz = __compiletime_object_size(dst); ++ unsigned ret = 0; + + might_fault(); +- if (!__builtin_constant_p(size)) +- return copy_user_generic(dst, (__force void *)src, size); ++ ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_READ, src, size)) ++ return size; ++#endif ++ ++ if (unlikely(sz != -1 && sz < size)) { ++#ifdef CONFIG_DEBUG_VM ++ WARN(1, "Buffer overflow detected!\n"); ++#endif ++ return size; ++ } ++ ++ if (!__builtin_constant_p(size)) { ++ check_object_size(dst, size, false); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; ++#endif ++ ++ return copy_user_generic(dst, (__force_kernel const void *)src, size); ++ } + switch (size) { +- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, ++ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src, + ret, "b", "b", "=q", 1); + return ret; +- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, ++ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src, + ret, "w", "w", "=r", 2); + return ret; +- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, ++ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src, + ret, "l", "k", "=r", 4); + return ret; +- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src, + ret, "q", "", "=r", 8); + return ret; + case 10: +- __get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src, + ret, "q", "", "=r", 10); + if (unlikely(ret)) + return ret; + __get_user_asm(*(u16 *)(8 + (char *)dst), +- (u16 __user *)(8 + (char __user *)src), ++ (const u16 __user *)(8 + (const char __user *)src), + ret, "w", "w", "=r", 2); + return ret; + case 16: +- __get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src, + ret, "q", "", "=r", 16); + if (unlikely(ret)) + return ret; + __get_user_asm(*(u64 *)(8 + (char *)dst), +- (u64 __user *)(8 + (char __user *)src), ++ (const u64 __user *)(8 + (const char __user *)src), + ret, "q", "", "=r", 8); + return ret; + default: +- return copy_user_generic(dst, (__force void *)src, size); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; ++#endif ++ ++ return copy_user_generic(dst, (__force_kernel const void *)src, size); + } + } + + static __always_inline __must_check +-int __copy_to_user(void __user *dst, const void *src, unsigned size) ++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3); ++static __always_inline __must_check ++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) + { +- int ret = 0; ++ int sz = __compiletime_object_size(src); ++ unsigned ret = 0; + + might_fault(); +- if (!__builtin_constant_p(size)) +- return copy_user_generic((__force void *)dst, src, size); ++ ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_WRITE, dst, size)) ++ return size; ++#endif ++ ++ if (unlikely(sz != -1 && sz < size)) { ++#ifdef CONFIG_DEBUG_VM ++ WARN(1, "Buffer overflow detected!\n"); ++#endif ++ return size; ++ } ++ ++ if (!__builtin_constant_p(size)) { ++ check_object_size(src, size, true); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; ++#endif ++ ++ return copy_user_generic((__force_kernel void *)dst, src, size); ++ } + switch (size) { +- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, ++ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst, + ret, "b", "b", "iq", 1); + return ret; +- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, ++ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst, + ret, "w", "w", "ir", 2); + return ret; +- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, ++ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst, + ret, "l", "k", "ir", 4); + return ret; +- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst, + ret, "q", "", "er", 8); + return ret; + case 10: +- __put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst, + ret, "q", "", "er", 10); + if (unlikely(ret)) + return ret; + asm("":::"memory"); +- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, ++ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst, + ret, "w", "w", "ir", 2); + return ret; + case 16: +- __put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst, + ret, "q", "", "er", 16); + if (unlikely(ret)) + return ret; + asm("":::"memory"); +- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, ++ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst, + ret, "q", "", "er", 8); + return ret; + default: +- return copy_user_generic((__force void *)dst, src, size); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; ++#endif ++ ++ return copy_user_generic((__force_kernel void *)dst, src, size); + } + } + + static __always_inline __must_check +-int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) __size_overflow(3); ++static __always_inline __must_check ++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) + { +- int ret = 0; ++ unsigned ret = 0; + + might_fault(); +- if (!__builtin_constant_p(size)) +- return copy_user_generic((__force void *)dst, +- (__force void *)src, size); ++ ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_READ, src, size)) ++ return size; ++ if (!__access_ok(VERIFY_WRITE, dst, size)) ++ return size; ++#endif ++ ++ if (!__builtin_constant_p(size)) { ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; ++#endif ++ ++ return copy_user_generic((__force_kernel void *)dst, ++ (__force_kernel const void *)src, size); ++ } + switch (size) { + case 1: { + u8 tmp; +- __get_user_asm(tmp, (u8 __user *)src, ++ __get_user_asm(tmp, (const u8 __user *)src, + ret, "b", "b", "=q", 1); + if (likely(!ret)) + __put_user_asm(tmp, (u8 __user *)dst, +@@ -176,7 +276,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + } + case 2: { + u16 tmp; +- __get_user_asm(tmp, (u16 __user *)src, ++ __get_user_asm(tmp, (const u16 __user *)src, + ret, "w", "w", "=r", 2); + if (likely(!ret)) + __put_user_asm(tmp, (u16 __user *)dst, +@@ -186,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + + case 4: { + u32 tmp; +- __get_user_asm(tmp, (u32 __user *)src, ++ __get_user_asm(tmp, (const u32 __user *)src, + ret, "l", "k", "=r", 4); + if (likely(!ret)) + __put_user_asm(tmp, (u32 __user *)dst, +@@ -195,7 +295,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + } + case 8: { + u64 tmp; +- __get_user_asm(tmp, (u64 __user *)src, ++ __get_user_asm(tmp, (const u64 __user *)src, + ret, "q", "", "=r", 8); + if (likely(!ret)) + __put_user_asm(tmp, (u64 __user *)dst, +@@ -203,8 +303,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + return ret; + } + default: +- return copy_user_generic((__force void *)dst, +- (__force void *)src, size); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; ++#endif ++ ++ return copy_user_generic((__force_kernel void *)dst, ++ (__force_kernel const void *)src, size); + } + } + +@@ -215,39 +323,83 @@ __strncpy_from_user(char *dst, const char __user *src, long count); + __must_check long strnlen_user(const char __user *str, long n); + __must_check long __strnlen_user(const char __user *str, long n); + __must_check long strlen_user(const char __user *str); +-__must_check unsigned long clear_user(void __user *mem, unsigned long len); +-__must_check unsigned long __clear_user(void __user *mem, unsigned long len); ++__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2); ++__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2); + + static __must_check __always_inline int +-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) ++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) __size_overflow(3); ++static __must_check __always_inline int ++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) + { +- return copy_user_generic(dst, (__force const void *)src, size); ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_READ, src, size)) ++ return size; ++ ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; ++#endif ++ ++ return copy_user_generic(dst, (__force_kernel const void *)src, size); + } + +-static __must_check __always_inline int +-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) ++static __must_check __always_inline unsigned long ++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) __size_overflow(3); ++static __must_check __always_inline unsigned long ++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) + { +- return copy_user_generic((__force void *)dst, src, size); ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_WRITE, dst, size)) ++ return size; ++ ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; ++#endif ++ ++ return copy_user_generic((__force_kernel void *)dst, src, size); + } + +-extern long __copy_user_nocache(void *dst, const void __user *src, +- unsigned size, int zerorest); ++extern unsigned long __copy_user_nocache(void *dst, const void __user *src, ++ unsigned long size, int zerorest) __size_overflow(3); + +-static inline int +-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) ++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) __size_overflow(3); ++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) + { + might_sleep(); ++ ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_READ, src, size)) ++ return size; ++#endif ++ + return __copy_user_nocache(dst, src, size, 1); + } + +-static inline int +-__copy_from_user_inatomic_nocache(void *dst, const void __user *src, +- unsigned size) ++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src, ++ unsigned long size) __size_overflow(3); ++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src, ++ unsigned long size) + { ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!__access_ok(VERIFY_READ, src, size)) ++ return size; ++#endif ++ + return __copy_user_nocache(dst, src, size, 0); + } + +-unsigned long +-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); ++extern unsigned long ++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3); + + #endif /* _ASM_X86_UACCESS_64_H */ +diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h +index bb05228..d763d5b 100644 +--- a/arch/x86/include/asm/vdso.h ++++ b/arch/x86/include/asm/vdso.h +@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[]; + #define VDSO32_SYMBOL(base, name) \ + ({ \ + extern const char VDSO32_##name[]; \ +- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ ++ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ + }) + #endif + +diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h +index 1971e65..1e3559b 100644 +--- a/arch/x86/include/asm/x86_init.h ++++ b/arch/x86/include/asm/x86_init.h +@@ -28,7 +28,7 @@ struct x86_init_mpparse { + void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); + void (*find_smp_config)(void); + void (*get_smp_config)(unsigned int early); +-}; ++} __no_const; + + /** + * struct x86_init_resources - platform specific resource related ops +@@ -42,7 +42,7 @@ struct x86_init_resources { + void (*probe_roms)(void); + void (*reserve_resources)(void); + char *(*memory_setup)(void); +-}; ++} __no_const; + + /** + * struct x86_init_irqs - platform specific interrupt setup +@@ -55,7 +55,7 @@ struct x86_init_irqs { + void (*pre_vector_init)(void); + void (*intr_init)(void); + void (*trap_init)(void); +-}; ++} __no_const; + + /** + * struct x86_init_oem - oem platform specific customizing functions +@@ -65,7 +65,7 @@ struct x86_init_irqs { + struct x86_init_oem { + void (*arch_setup)(void); + void (*banner)(void); +-}; ++} __no_const; + + /** + * struct x86_init_mapping - platform specific initial kernel pagetable setup +@@ -76,7 +76,7 @@ struct x86_init_oem { + */ + struct x86_init_mapping { + void (*pagetable_reserve)(u64 start, u64 end); +-}; ++} __no_const; + + /** + * struct x86_init_paging - platform specific paging functions +@@ -86,7 +86,7 @@ struct x86_init_mapping { + struct x86_init_paging { + void (*pagetable_setup_start)(pgd_t *base); + void (*pagetable_setup_done)(pgd_t *base); +-}; ++} __no_const; + + /** + * struct x86_init_timers - platform specific timer setup +@@ -101,7 +101,7 @@ struct x86_init_timers { + void (*tsc_pre_init)(void); + void (*timer_init)(void); + void (*wallclock_init)(void); +-}; ++} __no_const; + + /** + * struct x86_init_iommu - platform specific iommu setup +@@ -109,7 +109,7 @@ struct x86_init_timers { + */ + struct x86_init_iommu { + int (*iommu_init)(void); +-}; ++} __no_const; + + /** + * struct x86_init_pci - platform specific pci init functions +@@ -123,7 +123,7 @@ struct x86_init_pci { + int (*init)(void); + void (*init_irq)(void); + void (*fixup_irqs)(void); +-}; ++} __no_const; + + /** + * struct x86_init_ops - functions for platform specific setup +@@ -139,7 +139,7 @@ struct x86_init_ops { + struct x86_init_timers timers; + struct x86_init_iommu iommu; + struct x86_init_pci pci; +-}; ++} __no_const; + + /** + * struct x86_cpuinit_ops - platform specific cpu hotplug setups +@@ -147,7 +147,7 @@ struct x86_init_ops { + */ + struct x86_cpuinit_ops { + void (*setup_percpu_clockev)(void); +-}; ++} __no_const; + + /** + * struct x86_platform_ops - platform specific runtime functions +@@ -169,7 +169,7 @@ struct x86_platform_ops { + void (*nmi_init)(void); + unsigned char (*get_nmi_reason)(void); + int (*i8042_detect)(void); +-}; ++} __no_const; + + struct pci_dev; + +@@ -177,7 +177,7 @@ struct x86_msi_ops { + int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); + void (*teardown_msi_irq)(unsigned int irq); + void (*teardown_msi_irqs)(struct pci_dev *dev); +-}; ++} __no_const; + + extern struct x86_init_ops x86_init; + extern struct x86_cpuinit_ops x86_cpuinit; +diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h +index c6ce245..ffbdab7 100644 +--- a/arch/x86/include/asm/xsave.h ++++ b/arch/x86/include/asm/xsave.h +@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf) + { + int err; + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if ((unsigned long)buf < PAX_USER_SHADOW_BASE) ++ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE); ++#endif ++ + /* + * Clear the xsave header first, so that reserved fields are + * initialized to zero. +@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf) + static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) + { + int err; +- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); ++ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf); + u32 lmask = mask; + u32 hmask = mask >> 32; + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE) ++ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE); ++#endif ++ + __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" + "2:\n" + ".section .fixup,"ax"\n" +diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile +index 6a564ac..9b1340c 100644 +--- a/arch/x86/kernel/acpi/realmode/Makefile ++++ b/arch/x86/kernel/acpi/realmode/Makefile +@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \ + $(call cc-option, -fno-stack-protector) \ + $(call cc-option, -mpreferred-stack-boundary=2) + KBUILD_CFLAGS += $(call cc-option, -m32) ++ifdef CONSTIFY_PLUGIN ++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify ++endif + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n + +diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S +index b4fd836..4358fe3 100644 +--- a/arch/x86/kernel/acpi/realmode/wakeup.S ++++ b/arch/x86/kernel/acpi/realmode/wakeup.S +@@ -108,6 +108,9 @@ wakeup_code: + /* Do any other stuff... */ + + #ifndef CONFIG_64BIT ++ /* Recheck NX bit overrides (64bit path does this in trampoline */ ++ call verify_cpu ++ + /* This could also be done in C code... */ + movl pmode_cr3, %eax + movl %eax, %cr3 +@@ -131,6 +134,7 @@ wakeup_code: + movl pmode_cr0, %eax + movl %eax, %cr0 + jmp pmode_return ++# include "../../verify_cpu.S" + #else + pushw $0 + pushw trampoline_segment +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index 103b6ab..2004d0a 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void) + header->trampoline_segment = trampoline_address() >> 4; + #ifdef CONFIG_SMP + stack_start = (unsigned long)temp_stack + sizeof(temp_stack); ++ ++ pax_open_kernel(); + early_gdt_descr.address = + (unsigned long)get_cpu_gdt_table(smp_processor_id()); ++ pax_close_kernel(); ++ + initial_gs = per_cpu_offset(smp_processor_id()); + #endif + initial_code = (unsigned long)wakeup_long64; +diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S +index 13ab720..95d5442 100644 +--- a/arch/x86/kernel/acpi/wakeup_32.S ++++ b/arch/x86/kernel/acpi/wakeup_32.S +@@ -30,13 +30,11 @@ wakeup_pmode_return: + # and restore the stack ... but you need gdt for this to work + movl saved_context_esp, %esp + +- movl %cs:saved_magic, %eax +- cmpl $0x12345678, %eax ++ cmpl $0x12345678, saved_magic + jne bogus_magic + + # jump to place where we left off +- movl saved_eip, %eax +- jmp *%eax ++ jmp *(saved_eip) + + bogus_magic: + jmp bogus_magic +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c +index 1f84794..e23f862 100644 +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start, + */ + for (a = start; a < end; a++) { + instr = (u8 *)&a->instr_offset + a->instr_offset; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++ if (instr < (u8 *)_text || (u8 *)_einittext <= instr) ++ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ + replacement = (u8 *)&a->repl_offset + a->repl_offset; + BUG_ON(a->replacementlen > a->instrlen); + BUG_ON(a->instrlen > sizeof(insnbuf)); +@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end, + for (poff = start; poff < end; poff++) { + u8 *ptr = (u8 *)poff + *poff; + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr) ++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ + if (!*poff || ptr < text || ptr >= text_end) + continue; + /* turn DS segment override prefix into lock prefix */ +- if (*ptr == 0x3e) ++ if (*ktla_ktva(ptr) == 0x3e) + text_poke(ptr, ((unsigned char []){0xf0}), 1); + }; + mutex_unlock(&text_mutex); +@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end, + for (poff = start; poff < end; poff++) { + u8 *ptr = (u8 *)poff + *poff; + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr) ++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ + if (!*poff || ptr < text || ptr >= text_end) + continue; + /* turn lock prefix into DS segment override prefix */ +- if (*ptr == 0xf0) ++ if (*ktla_ktva(ptr) == 0xf0) + text_poke(ptr, ((unsigned char []){0x3E}), 1); + }; + mutex_unlock(&text_mutex); +@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, + + BUG_ON(p->len > MAX_PATCH_LEN); + /* prep the buffer with the original instructions */ +- memcpy(insnbuf, p->instr, p->len); ++ memcpy(insnbuf, ktla_ktva(p->instr), p->len); + used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, + (unsigned long)p->instr, p->len); + +@@ -568,7 +587,7 @@ void __init alternative_instructions(void) + if (smp_alt_once) + free_init_pages("SMP alternatives", + (unsigned long)__smp_locks, +- (unsigned long)__smp_locks_end); ++ PAGE_ALIGN((unsigned long)__smp_locks_end)); + + restart_nmi(); + } +@@ -585,13 +604,17 @@ void __init alternative_instructions(void) + * instructions. And on the local CPU you need to be protected again NMI or MCE + * handlers seeing an inconsistent instruction while you patch. + */ +-void *__init_or_module text_poke_early(void *addr, const void *opcode, ++void *__kprobes text_poke_early(void *addr, const void *opcode, + size_t len) + { + unsigned long flags; + local_irq_save(flags); +- memcpy(addr, opcode, len); ++ ++ pax_open_kernel(); ++ memcpy(ktla_ktva(addr), opcode, len); + sync_core(); ++ pax_close_kernel(); ++ + local_irq_restore(flags); + /* Could also do a CLFLUSH here to speed up CPU recovery; but + that causes hangs on some VIA CPUs. */ +@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, + */ + void *__kprobes text_poke(void *addr, const void *opcode, size_t len) + { +- unsigned long flags; +- char *vaddr; ++ unsigned char *vaddr = ktla_ktva(addr); + struct page *pages[2]; +- int i; ++ size_t i; + + if (!core_kernel_text((unsigned long)addr)) { +- pages[0] = vmalloc_to_page(addr); +- pages[1] = vmalloc_to_page(addr + PAGE_SIZE); ++ pages[0] = vmalloc_to_page(vaddr); ++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE); + } else { +- pages[0] = virt_to_page(addr); ++ pages[0] = virt_to_page(vaddr); + WARN_ON(!PageReserved(pages[0])); +- pages[1] = virt_to_page(addr + PAGE_SIZE); ++ pages[1] = virt_to_page(vaddr + PAGE_SIZE); + } + BUG_ON(!pages[0]); +- local_irq_save(flags); +- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); +- if (pages[1]) +- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); +- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); +- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); +- clear_fixmap(FIX_TEXT_POKE0); +- if (pages[1]) +- clear_fixmap(FIX_TEXT_POKE1); +- local_flush_tlb(); +- sync_core(); +- /* Could also do a CLFLUSH here to speed up CPU recovery; but +- that causes hangs on some VIA CPUs. */ ++ text_poke_early(addr, opcode, len); + for (i = 0; i < len; i++) +- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); +- local_irq_restore(flags); ++ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]); + return addr; + } + +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index f98d84c..e402a69 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -174,7 +174,7 @@ int first_system_vector = 0xfe; + /* + * Debug level, exported for io_apic.c + */ +-unsigned int apic_verbosity; ++int apic_verbosity; + + int pic_mode; + +@@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs) + apic_write(APIC_ESR, 0); + v1 = apic_read(APIC_ESR); + ack_APIC_irq(); +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + + apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)", + smp_processor_id(), v0 , v1); +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index 6d939d7..0697fcc 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, + } + EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); + +-void lock_vector_lock(void) ++void lock_vector_lock(void) __acquires(vector_lock) + { + /* Used to the online set of cpus does not change + * during assign_irq_vector. +@@ -1104,7 +1104,7 @@ void lock_vector_lock(void) + raw_spin_lock(&vector_lock); + } + +-void unlock_vector_lock(void) ++void unlock_vector_lock(void) __releases(vector_lock) + { + raw_spin_unlock(&vector_lock); + } +@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data) + ack_APIC_irq(); + } + +-atomic_t irq_mis_count; ++atomic_unchecked_t irq_mis_count; + + static void ack_apic_level(struct irq_data *data) + { +@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data) + * at the cpu. + */ + if (!(v & (1 << (i & 0x1f)))) { +- atomic_inc(&irq_mis_count); ++ atomic_inc_unchecked(&irq_mis_count); + + eoi_ioapic_irq(irq, cfg); + } +diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c +index a46bd38..6b906d7 100644 +--- a/arch/x86/kernel/apm_32.c ++++ b/arch/x86/kernel/apm_32.c +@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex); + * This is for buggy BIOS's that refer to (real mode) segment 0x40 + * even though they are called in protected mode. + */ +-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, ++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, + (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); + + static const char driver_version[] = "1.16ac"; /* no spaces */ +@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call) + BUG_ON(cpu != 0); + gdt = get_cpu_gdt_table(cpu); + save_desc_40 = gdt[0x40 / 8]; ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + apm_irq_save(flags); + APM_DO_SAVE_SEGS; +@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call) + &call->esi); + APM_DO_RESTORE_SEGS; + apm_irq_restore(flags); ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + + return call->eax & 0xff; +@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call) + BUG_ON(cpu != 0); + gdt = get_cpu_gdt_table(cpu); + save_desc_40 = gdt[0x40 / 8]; ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + apm_irq_save(flags); + APM_DO_SAVE_SEGS; +@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call) + &call->eax); + APM_DO_RESTORE_SEGS; + apm_irq_restore(flags); ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + return error; + } +@@ -2347,12 +2361,15 @@ static int __init apm_init(void) + * code to that CPU. + */ + gdt = get_cpu_gdt_table(0); ++ ++ pax_open_kernel(); + set_desc_base(&gdt[APM_CS >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); + set_desc_base(&gdt[APM_CS_16 >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); + set_desc_base(&gdt[APM_DS >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); ++ pax_close_kernel(); + + proc_create("apm", 0, NULL, &apm_file_ops); + +diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c +index 4f13faf..87db5d2 100644 +--- a/arch/x86/kernel/asm-offsets.c ++++ b/arch/x86/kernel/asm-offsets.c +@@ -33,6 +33,8 @@ void common(void) { + OFFSET(TI_status, thread_info, status); + OFFSET(TI_addr_limit, thread_info, addr_limit); + OFFSET(TI_preempt_count, thread_info, preempt_count); ++ OFFSET(TI_lowest_stack, thread_info, lowest_stack); ++ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo)); + + BLANK(); + OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); +@@ -53,8 +55,26 @@ void common(void) { + OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); + OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0); + #endif + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3); ++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3); ++#ifdef CONFIG_X86_64 ++ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched); ++#endif ++#endif ++ ++#endif ++ ++ BLANK(); ++ DEFINE(PAGE_SIZE_asm, PAGE_SIZE); ++ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); ++ DEFINE(THREAD_SIZE_asm, THREAD_SIZE); ++ + #ifdef CONFIG_XEN + BLANK(); + OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); +diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c +index e72a119..6e2955d 100644 +--- a/arch/x86/kernel/asm-offsets_64.c ++++ b/arch/x86/kernel/asm-offsets_64.c +@@ -69,6 +69,7 @@ int main(void) + BLANK(); + #undef ENTRY + ++ DEFINE(TSS_size, sizeof(struct tss_struct)); + OFFSET(TSS_ist, tss_struct, x86_tss.ist); + BLANK(); + +diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile +index 25f24dc..4094a7f 100644 +--- a/arch/x86/kernel/cpu/Makefile ++++ b/arch/x86/kernel/cpu/Makefile +@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg + CFLAGS_REMOVE_perf_event.o = -pg + endif + +-# Make sure load_percpu_segment has no stackprotector +-nostackp := $(call cc-option, -fno-stack-protector) +-CFLAGS_common.o := $(nostackp) +- + obj-y := intel_cacheinfo.o scattered.o topology.o + obj-y += proc.o capflags.o powerflags.o common.o + obj-y += vmware.o hypervisor.o sched.o mshyperv.o +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 0bab2b1..d0a1bf8 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, + unsigned int size) + { + /* AMD errata T13 (order #21922) */ +- if ((c->x86 == 6)) { ++ if (c->x86 == 6) { + /* Duron Rev A0 */ + if (c->x86_model == 3 && c->x86_mask == 0) + size = 64; +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index aa003b1..47ea638 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = { + + static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; + +-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { +-#ifdef CONFIG_X86_64 +- /* +- * We need valid kernel segments for data and code in long mode too +- * IRET will check the segment types kkeil 2000/10/28 +- * Also sysret mandates a special GDT layout +- * +- * TLS descriptors are currently at a different place compared to i386. +- * Hopefully nobody expects them at a fixed place (Wine?) +- */ +- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), +-#else +- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), +- /* +- * Segments used for calling PnP BIOS have byte granularity. +- * They code segments and data segments have fixed 64k limits, +- * the transfer segment sizes are set at run time. +- */ +- /* 32-bit code */ +- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), +- /* 16-bit code */ +- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), +- /* +- * The APM segments have byte granularity and their bases +- * are set at run time. All have 64k limits. +- */ +- /* 32-bit code */ +- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), +- /* 16-bit code */ +- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), +- /* data */ +- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), +- +- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- GDT_STACK_CANARY_INIT +-#endif +-} }; +-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); +- + static int __init x86_xsave_setup(char *s) + { + setup_clear_cpu_cap(X86_FEATURE_XSAVE); +@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu) + { + struct desc_ptr gdt_descr; + +- gdt_descr.address = (long)get_cpu_gdt_table(cpu); ++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); + gdt_descr.size = GDT_SIZE - 1; + load_gdt(&gdt_descr); + /* Reload the per-cpu base */ +@@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) + /* Filter out anything that depends on CPUID levels we don't have */ + filter_cpuid_features(c, true); + ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) ++ setup_clear_cpu_cap(X86_FEATURE_SEP); ++#endif ++ + /* If the model name is still unset, do table lookup. */ + if (!c->x86_model_id[0]) { + const char *p; +@@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg) + } + __setup("clearcpuid=", setup_disablecpuid); + ++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo; ++EXPORT_PER_CPU_SYMBOL(current_tinfo); ++ + #ifdef CONFIG_X86_64 + struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; + +@@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = + EXPORT_PER_CPU_SYMBOL(current_task); + + DEFINE_PER_CPU(unsigned long, kernel_stack) = +- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; ++ (unsigned long)&init_thread_union - 16 + THREAD_SIZE; + EXPORT_PER_CPU_SYMBOL(kernel_stack); + + DEFINE_PER_CPU(char *, irq_stack_ptr) = +@@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) + { + memset(regs, 0, sizeof(struct pt_regs)); + regs->fs = __KERNEL_PERCPU; +- regs->gs = __KERNEL_STACK_CANARY; ++ savesegment(gs, regs->gs); + + return regs; + } +@@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void) + int i; + + cpu = stack_smp_processor_id(); +- t = &per_cpu(init_tss, cpu); ++ t = init_tss + cpu; + oist = &per_cpu(orig_ist, cpu); + + #ifdef CONFIG_NUMA +@@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void) + switch_to_new_gdt(cpu); + loadsegment(fs, 0); + +- load_idt((const struct desc_ptr *)&idt_descr); ++ load_idt(&idt_descr); + + memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); + syscall_init(); +@@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void) + wrmsrl(MSR_KERNEL_GS_BASE, 0); + barrier(); + +- x86_configure_nx(); + if (cpu != 0) + enable_x2apic(); + +@@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void) + { + int cpu = smp_processor_id(); + struct task_struct *curr = current; +- struct tss_struct *t = &per_cpu(init_tss, cpu); ++ struct tss_struct *t = init_tss + cpu; + struct thread_struct *thread = &curr->thread; + + if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c +index 5231312..a78a987 100644 +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void) + * Update the IDT descriptor and reload the IDT so that + * it uses the read-only mapped virtual address. + */ +- idt_descr.address = fix_to_virt(FIX_F00F_IDT); ++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT); + load_idt(&idt_descr); + } + #endif +diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c +index 319882e..993534e 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c ++++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c +@@ -173,6 +173,8 @@ static void raise_mce(struct mce *m) + + /* Error injection interface */ + static ssize_t mce_write(struct file *filp, const char __user *ubuf, ++ size_t usize, loff_t *off) __size_overflow(3); ++static ssize_t mce_write(struct file *filp, const char __user *ubuf, + size_t usize, loff_t *off) + { + struct mce m; +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c +index 2af127d..8ff7ac0 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce.c ++++ b/arch/x86/kernel/cpu/mcheck/mce.c +@@ -42,6 +42,7 @@ + #include <asm/processor.h> + #include <asm/mce.h> + #include <asm/msr.h> ++#include <asm/local.h> + + #include "mce-internal.h" + +@@ -202,7 +203,7 @@ static void print_mce(struct mce *m) + !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", + m->cs, m->ip); + +- if (m->cs == __KERNEL_CS) ++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS) + print_symbol("{%s}", m->ip); + pr_cont("\n"); + } +@@ -235,10 +236,10 @@ static void print_mce(struct mce *m) + + #define PANIC_TIMEOUT 5 /* 5 seconds */ + +-static atomic_t mce_paniced; ++static atomic_unchecked_t mce_paniced; + + static int fake_panic; +-static atomic_t mce_fake_paniced; ++static atomic_unchecked_t mce_fake_paniced; + + /* Panic in progress. Enable interrupts and wait for final IPI */ + static void wait_for_panic(void) +@@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) + /* + * Make sure only one CPU runs in machine check panic + */ +- if (atomic_inc_return(&mce_paniced) > 1) ++ if (atomic_inc_return_unchecked(&mce_paniced) > 1) + wait_for_panic(); + barrier(); + +@@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) + console_verbose(); + } else { + /* Don't log too much for fake panic */ +- if (atomic_inc_return(&mce_fake_paniced) > 1) ++ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1) + return; + } + /* First print corrected ones that are still unlogged */ +@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t) + * might have been modified by someone else. + */ + rmb(); +- if (atomic_read(&mce_paniced)) ++ if (atomic_read_unchecked(&mce_paniced)) + wait_for_panic(); + if (!monarch_timeout) + goto out; +@@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code) + } + + /* Call the installed machine check handler for this CPU setup. */ +-void (*machine_check_vector)(struct pt_regs *, long error_code) = ++void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only = + unexpected_machine_check; + + /* +@@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) + return; + } + ++ pax_open_kernel(); + machine_check_vector = do_machine_check; ++ pax_close_kernel(); + + __mcheck_cpu_init_generic(); + __mcheck_cpu_init_vendor(c); +@@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) + */ + + static DEFINE_SPINLOCK(mce_chrdev_state_lock); +-static int mce_chrdev_open_count; /* #times opened */ ++static local_t mce_chrdev_open_count; /* #times opened */ + static int mce_chrdev_open_exclu; /* already open exclusive? */ + + static int mce_chrdev_open(struct inode *inode, struct file *file) +@@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file) + spin_lock(&mce_chrdev_state_lock); + + if (mce_chrdev_open_exclu || +- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) { ++ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) { + spin_unlock(&mce_chrdev_state_lock); + + return -EBUSY; +@@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file) + + if (file->f_flags & O_EXCL) + mce_chrdev_open_exclu = 1; +- mce_chrdev_open_count++; ++ local_inc(&mce_chrdev_open_count); + + spin_unlock(&mce_chrdev_state_lock); + +@@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file) + { + spin_lock(&mce_chrdev_state_lock); + +- mce_chrdev_open_count--; ++ local_dec(&mce_chrdev_open_count); + mce_chrdev_open_exclu = 0; + + spin_unlock(&mce_chrdev_state_lock); +@@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void) + static void mce_reset(void) + { + cpu_missing = 0; +- atomic_set(&mce_fake_paniced, 0); ++ atomic_set_unchecked(&mce_fake_paniced, 0); + atomic_set(&mce_executing, 0); + atomic_set(&mce_callin, 0); + atomic_set(&global_nwo, 0); +diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c +index 5c0e653..0882b0a 100644 +--- a/arch/x86/kernel/cpu/mcheck/p5.c ++++ b/arch/x86/kernel/cpu/mcheck/p5.c +@@ -12,6 +12,7 @@ + #include <asm/system.h> + #include <asm/mce.h> + #include <asm/msr.h> ++#include <asm/pgtable.h> + + /* By default disabled */ + int mce_p5_enabled __read_mostly; +@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) + if (!cpu_has(c, X86_FEATURE_MCE)) + return; + ++ pax_open_kernel(); + machine_check_vector = pentium_machine_check; ++ pax_close_kernel(); + /* Make sure the vector pointer is visible before we enable MCEs: */ + wmb(); + +diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c +index 54060f5..c1a7577 100644 +--- a/arch/x86/kernel/cpu/mcheck/winchip.c ++++ b/arch/x86/kernel/cpu/mcheck/winchip.c +@@ -11,6 +11,7 @@ + #include <asm/system.h> + #include <asm/mce.h> + #include <asm/msr.h> ++#include <asm/pgtable.h> + + /* Machine check handler for WinChip C6: */ + static void winchip_machine_check(struct pt_regs *regs, long error_code) +@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) + { + u32 lo, hi; + ++ pax_open_kernel(); + machine_check_vector = winchip_machine_check; ++ pax_close_kernel(); + /* Make sure the vector pointer is visible before we enable MCEs: */ + wmb(); + +diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c +index 7928963..1b16001 100644 +--- a/arch/x86/kernel/cpu/mtrr/if.c ++++ b/arch/x86/kernel/cpu/mtrr/if.c +@@ -91,6 +91,8 @@ mtrr_file_del(unsigned long base, unsigned long size, + * "base=%Lx size=%Lx type=%s" or "disable=%d" + */ + static ssize_t ++mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) __size_overflow(3); ++static ssize_t + mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) + { + int i, err; +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c +index 6b96110..0da73eb 100644 +--- a/arch/x86/kernel/cpu/mtrr/main.c ++++ b/arch/x86/kernel/cpu/mtrr/main.c +@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex); + u64 size_or_mask, size_and_mask; + static bool mtrr_aps_delayed_init; + +-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; ++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only; + + const struct mtrr_ops *mtrr_if; + +diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h +index df5e41f..816c719 100644 +--- a/arch/x86/kernel/cpu/mtrr/mtrr.h ++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h +@@ -25,7 +25,7 @@ struct mtrr_ops { + int (*validate_add_page)(unsigned long base, unsigned long size, + unsigned int type); + int (*have_wrcomb)(void); +-}; ++} __do_const; + + extern int generic_get_free_region(unsigned long base, unsigned long size, + int replace_reg); +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c +index 2bda212..78cc605 100644 +--- a/arch/x86/kernel/cpu/perf_event.c ++++ b/arch/x86/kernel/cpu/perf_event.c +@@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) + break; + + perf_callchain_store(entry, frame.return_address); +- fp = frame.next_frame; ++ fp = (const void __force_user *)frame.next_frame; + } + } + +diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c +index 13ad899..f642b9a 100644 +--- a/arch/x86/kernel/crash.c ++++ b/arch/x86/kernel/crash.c +@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + struct pt_regs fixed_regs; +-#endif + +-#ifdef CONFIG_X86_32 +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + crash_fixup_ss_esp(&fixed_regs, regs); + regs = &fixed_regs; + } +diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c +index 37250fe..bf2ec74 100644 +--- a/arch/x86/kernel/doublefault_32.c ++++ b/arch/x86/kernel/doublefault_32.c +@@ -11,7 +11,7 @@ + + #define DOUBLEFAULT_STACKSIZE (1024) + static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; +-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) ++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2) + + #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) + +@@ -21,7 +21,7 @@ static void doublefault_fn(void) + unsigned long gdt, tss; + + store_gdt(&gdt_desc); +- gdt = gdt_desc.address; ++ gdt = (unsigned long)gdt_desc.address; + + printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); + +@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = { + /* 0x2 bit is always set */ + .flags = X86_EFLAGS_SF | 0x2, + .sp = STACK_START, +- .es = __USER_DS, ++ .es = __KERNEL_DS, + .cs = __KERNEL_CS, + .ss = __KERNEL_DS, +- .ds = __USER_DS, ++ .ds = __KERNEL_DS, + .fs = __KERNEL_PERCPU, + + .__cr3 = __pa_nodebug(swapper_pg_dir), +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c +index 1aae78f..aab3a3d 100644 +--- a/arch/x86/kernel/dumpstack.c ++++ b/arch/x86/kernel/dumpstack.c +@@ -2,6 +2,9 @@ + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs + */ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <linux/kallsyms.h> + #include <linux/kprobes.h> + #include <linux/uaccess.h> +@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable) + static void + print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, +- struct thread_info *tinfo, int *graph) ++ struct task_struct *task, int *graph) + { +- struct task_struct *task = tinfo->task; + unsigned long ret_addr; + int index = task->curr_ret_stack; + +@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data, + static inline void + print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, +- struct thread_info *tinfo, int *graph) ++ struct task_struct *task, int *graph) + { } + #endif + +@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data, + * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack + */ + +-static inline int valid_stack_ptr(struct thread_info *tinfo, +- void *p, unsigned int size, void *end) ++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end) + { +- void *t = tinfo; + if (end) { + if (p < end && p >= (end-THREAD_SIZE)) + return 1; +@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, + } + + unsigned long +-print_context_stack(struct thread_info *tinfo, ++print_context_stack(struct task_struct *task, void *stack_start, + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data, + unsigned long *end, int *graph) + { + struct stack_frame *frame = (struct stack_frame *)bp; + +- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { ++ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) { + unsigned long addr; + + addr = *stack; +@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo, + } else { + ops->address(data, addr, 0); + } +- print_ftrace_graph_addr(addr, data, ops, tinfo, graph); ++ print_ftrace_graph_addr(addr, data, ops, task, graph); + } + stack++; + } +@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo, + EXPORT_SYMBOL_GPL(print_context_stack); + + unsigned long +-print_context_stack_bp(struct thread_info *tinfo, ++print_context_stack_bp(struct task_struct *task, void *stack_start, + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data, + unsigned long *end, int *graph) +@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo, + struct stack_frame *frame = (struct stack_frame *)bp; + unsigned long *ret_addr = &frame->return_address; + +- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { ++ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) { + unsigned long addr = *ret_addr; + + if (!__kernel_text_address(addr)) +@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo, + ops->address(data, addr, 1); + frame = frame->next_frame; + ret_addr = &frame->return_address; +- print_ftrace_graph_addr(addr, data, ops, tinfo, graph); ++ print_ftrace_graph_addr(addr, data, ops, task, graph); + } + + return (unsigned long)frame; +@@ -186,7 +186,7 @@ void dump_stack(void) + + bp = stack_frame(current, NULL); + printk("Pid: %d, comm: %.20s %s %s %.*s\n", +- current->pid, current->comm, print_tainted(), ++ task_pid_nr(current), current->comm, print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); +@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void) + } + EXPORT_SYMBOL_GPL(oops_begin); + ++extern void gr_handle_kernel_exploit(void); ++ + void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) + { + if (regs && kexec_should_crash(current)) +@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); +- do_exit(signr); ++ ++ gr_handle_kernel_exploit(); ++ ++ do_group_exit(signr); + } + + int __kprobes __die(const char *str, struct pt_regs *regs, long err) +@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) + + show_registers(regs); + #ifdef CONFIG_X86_32 +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + sp = regs->sp; + ss = regs->ss & 0xffff; + } else { +@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err) + unsigned long flags = oops_begin(); + int sig = SIGSEGV; + +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + report_bug(regs->ip, regs); + + if (__die(str, regs, err)) +diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c +index c99f9ed..2a15d80 100644 +--- a/arch/x86/kernel/dumpstack_32.c ++++ b/arch/x86/kernel/dumpstack_32.c +@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + bp = stack_frame(task, regs); + + for (;;) { +- struct thread_info *context; ++ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1)); + +- context = (struct thread_info *) +- ((unsigned long)stack & (~(THREAD_SIZE - 1))); +- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph); ++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph); + +- stack = (unsigned long *)context->previous_esp; +- if (!stack) ++ if (stack_start == task_stack_page(task)) + break; ++ stack = *(unsigned long **)stack_start; + if (ops->stack(data, "IRQ") < 0) + break; + touch_nmi_watchdog(); +@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs) + * When in-kernel, we also print out the stack and code at the + * time of the fault.. + */ +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + unsigned int code_prologue = code_bytes * 43 / 64; + unsigned int code_len = code_bytes; + unsigned char c; + u8 *ip; ++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]); + + printk(KERN_EMERG "Stack:\n"); + show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); + + printk(KERN_EMERG "Code: "); + +- ip = (u8 *)regs->ip - code_prologue; ++ ip = (u8 *)regs->ip - code_prologue + cs_base; + if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { + /* try starting at IP */ +- ip = (u8 *)regs->ip; ++ ip = (u8 *)regs->ip + cs_base; + code_len = code_len - code_prologue + 1; + } + for (i = 0; i < code_len; i++, ip++) { +@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs) + printk(KERN_CONT " Bad EIP value."); + break; + } +- if (ip == (u8 *)regs->ip) ++ if (ip == (u8 *)regs->ip + cs_base) + printk(KERN_CONT "<%02x> ", c); + else + printk(KERN_CONT "%02x ", c); +@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip) + { + unsigned short ud2; + ++ ip = ktla_ktva(ip); + if (ip < PAGE_OFFSET) + return 0; + if (probe_kernel_address((unsigned short *)ip, ud2)) +@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip) + + return ud2 == 0x0b0f; + } ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++void pax_check_alloca(unsigned long size) ++{ ++ unsigned long sp = (unsigned long)&sp, stack_left; ++ ++ /* all kernel stacks are of the same size */ ++ stack_left = sp & (THREAD_SIZE - 1); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++} ++EXPORT_SYMBOL(pax_check_alloca); ++#endif +diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c +index 6d728d9..279514e 100644 +--- a/arch/x86/kernel/dumpstack_64.c ++++ b/arch/x86/kernel/dumpstack_64.c +@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + unsigned long *irq_stack_end = + (unsigned long *)per_cpu(irq_stack_ptr, cpu); + unsigned used = 0; +- struct thread_info *tinfo; + int graph = 0; + unsigned long dummy; ++ void *stack_start; + + if (!task) + task = current; +@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + * current stack address. If the stacks consist of nested + * exceptions + */ +- tinfo = task_thread_info(task); + for (;;) { + char *id; + unsigned long *estack_end; ++ + estack_end = in_exception_stack(cpu, (unsigned long)stack, + &used, &id); + +@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + if (ops->stack(data, id) < 0) + break; + +- bp = ops->walk_stack(tinfo, stack, bp, ops, ++ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops, + data, estack_end, &graph); + ops->stack(data, "<EOE>"); + /* +@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + if (in_irq_stack(stack, irq_stack, irq_stack_end)) { + if (ops->stack(data, "IRQ") < 0) + break; +- bp = ops->walk_stack(tinfo, stack, bp, ++ bp = ops->walk_stack(task, irq_stack, stack, bp, + ops, data, irq_stack_end, &graph); + /* + * We link to the next stack (which would be +@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + /* + * This handles the process stack: + */ +- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); ++ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1)); ++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph); + put_cpu(); + } + EXPORT_SYMBOL(dump_trace); +@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip) + + return ud2 == 0x0b0f; + } ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++void pax_check_alloca(unsigned long size) ++{ ++ unsigned long sp = (unsigned long)&sp, stack_start, stack_end; ++ unsigned cpu, used; ++ char *id; ++ ++ /* check the process stack first */ ++ stack_start = (unsigned long)task_stack_page(current); ++ stack_end = stack_start + THREAD_SIZE; ++ if (likely(stack_start <= sp && sp < stack_end)) { ++ unsigned long stack_left = sp & (THREAD_SIZE - 1); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++ return; ++ } ++ ++ cpu = get_cpu(); ++ ++ /* check the irq stacks */ ++ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu); ++ stack_start = stack_end - IRQ_STACK_SIZE; ++ if (stack_start <= sp && sp < stack_end) { ++ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1); ++ put_cpu(); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++ return; ++ } ++ ++ /* check the exception stacks */ ++ used = 0; ++ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id); ++ stack_start = stack_end - EXCEPTION_STKSZ; ++ if (stack_end && stack_start <= sp && sp < stack_end) { ++ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1); ++ put_cpu(); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++ return; ++ } ++ ++ put_cpu(); ++ ++ /* unknown stack */ ++ BUG(); ++} ++EXPORT_SYMBOL(pax_check_alloca); ++#endif +diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c +index cd28a35..c72ed9a 100644 +--- a/arch/x86/kernel/early_printk.c ++++ b/arch/x86/kernel/early_printk.c +@@ -7,6 +7,7 @@ + #include <linux/pci_regs.h> + #include <linux/pci_ids.h> + #include <linux/errno.h> ++#include <linux/sched.h> + #include <asm/io.h> + #include <asm/processor.h> + #include <asm/fcntl.h> +diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S +index f3f6f53..0841b66 100644 +--- a/arch/x86/kernel/entry_32.S ++++ b/arch/x86/kernel/entry_32.S +@@ -186,13 +186,146 @@ + /*CFI_REL_OFFSET gs, PT_GS*/ + .endm + .macro SET_KERNEL_GS reg ++ ++#ifdef CONFIG_CC_STACKPROTECTOR + movl $(__KERNEL_STACK_CANARY), \reg ++#elif defined(CONFIG_PAX_MEMORY_UDEREF) ++ movl $(__USER_DS), \reg ++#else ++ xorl \reg, \reg ++#endif ++ + movl \reg, %gs + .endm + + #endif /* CONFIG_X86_32_LAZY_GS */ + +-.macro SAVE_ALL ++.macro pax_enter_kernel ++#ifdef CONFIG_PAX_KERNEXEC ++ call pax_enter_kernel ++#endif ++.endm ++ ++.macro pax_exit_kernel ++#ifdef CONFIG_PAX_KERNEXEC ++ call pax_exit_kernel ++#endif ++.endm ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ENTRY(pax_enter_kernel) ++#ifdef CONFIG_PARAVIRT ++ pushl %eax ++ pushl %ecx ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0) ++ mov %eax, %esi ++#else ++ mov %cr0, %esi ++#endif ++ bts $16, %esi ++ jnc 1f ++ mov %cs, %esi ++ cmp $__KERNEL_CS, %esi ++ jz 3f ++ ljmp $__KERNEL_CS, $3f ++1: ljmp $__KERNEXEC_KERNEL_CS, $2f ++2: ++#ifdef CONFIG_PARAVIRT ++ mov %esi, %eax ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) ++#else ++ mov %esi, %cr0 ++#endif ++3: ++#ifdef CONFIG_PARAVIRT ++ popl %ecx ++ popl %eax ++#endif ++ ret ++ENDPROC(pax_enter_kernel) ++ ++ENTRY(pax_exit_kernel) ++#ifdef CONFIG_PARAVIRT ++ pushl %eax ++ pushl %ecx ++#endif ++ mov %cs, %esi ++ cmp $__KERNEXEC_KERNEL_CS, %esi ++ jnz 2f ++#ifdef CONFIG_PARAVIRT ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); ++ mov %eax, %esi ++#else ++ mov %cr0, %esi ++#endif ++ btr $16, %esi ++ ljmp $__KERNEL_CS, $1f ++1: ++#ifdef CONFIG_PARAVIRT ++ mov %esi, %eax ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0); ++#else ++ mov %esi, %cr0 ++#endif ++2: ++#ifdef CONFIG_PARAVIRT ++ popl %ecx ++ popl %eax ++#endif ++ ret ++ENDPROC(pax_exit_kernel) ++#endif ++ ++.macro pax_erase_kstack ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++ call pax_erase_kstack ++#endif ++.endm ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++/* ++ * ebp: thread_info ++ * ecx, edx: can be clobbered ++ */ ++ENTRY(pax_erase_kstack) ++ pushl %edi ++ pushl %eax ++ ++ mov TI_lowest_stack(%ebp), %edi ++ mov $-0xBEEF, %eax ++ std ++ ++1: mov %edi, %ecx ++ and $THREAD_SIZE_asm - 1, %ecx ++ shr $2, %ecx ++ repne scasl ++ jecxz 2f ++ ++ cmp $2*16, %ecx ++ jc 2f ++ ++ mov $2*16, %ecx ++ repe scasl ++ jecxz 2f ++ jne 1b ++ ++2: cld ++ mov %esp, %ecx ++ sub %edi, %ecx ++ shr $2, %ecx ++ rep stosl ++ ++ mov TI_task_thread_sp0(%ebp), %edi ++ sub $128, %edi ++ mov %edi, TI_lowest_stack(%ebp) ++ ++ popl %eax ++ popl %edi ++ ret ++ENDPROC(pax_erase_kstack) ++#endif ++ ++.macro __SAVE_ALL _DS + cld + PUSH_GS + pushl_cfi %fs +@@ -215,7 +348,7 @@ + CFI_REL_OFFSET ecx, 0 + pushl_cfi %ebx + CFI_REL_OFFSET ebx, 0 +- movl $(__USER_DS), %edx ++ movl $_DS, %edx + movl %edx, %ds + movl %edx, %es + movl $(__KERNEL_PERCPU), %edx +@@ -223,6 +356,15 @@ + SET_KERNEL_GS %edx + .endm + ++.macro SAVE_ALL ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ __SAVE_ALL __KERNEL_DS ++ pax_enter_kernel ++#else ++ __SAVE_ALL __USER_DS ++#endif ++.endm ++ + .macro RESTORE_INT_REGS + popl_cfi %ebx + CFI_RESTORE ebx +@@ -308,7 +450,7 @@ ENTRY(ret_from_fork) + popfl_cfi + jmp syscall_exit + CFI_ENDPROC +-END(ret_from_fork) ++ENDPROC(ret_from_fork) + + /* + * Interrupt exit functions should be protected against kprobes +@@ -333,7 +475,15 @@ check_userspace: + movb PT_CS(%esp), %al + andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax + cmpl $USER_RPL, %eax ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ jae resume_userspace ++ ++ PAX_EXIT_KERNEL ++ jmp resume_kernel ++#else + jb resume_kernel # not returning to v8086 or userspace ++#endif + + ENTRY(resume_userspace) + LOCKDEP_SYS_EXIT +@@ -345,8 +495,8 @@ ENTRY(resume_userspace) + andl $_TIF_WORK_MASK, %ecx # is there any work to be done on + # int/exception return? + jne work_pending +- jmp restore_all +-END(ret_from_exception) ++ jmp restore_all_pax ++ENDPROC(ret_from_exception) + + #ifdef CONFIG_PREEMPT + ENTRY(resume_kernel) +@@ -361,7 +511,7 @@ need_resched: + jz restore_all + call preempt_schedule_irq + jmp need_resched +-END(resume_kernel) ++ENDPROC(resume_kernel) + #endif + CFI_ENDPROC + /* +@@ -395,23 +545,34 @@ sysenter_past_esp: + /*CFI_REL_OFFSET cs, 0*/ + /* + * Push current_thread_info()->sysenter_return to the stack. +- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words +- * pushed above; +8 corresponds to copy_thread's esp0 setting. + */ +- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) ++ pushl_cfi $0 + CFI_REL_OFFSET eip, 0 + + pushl_cfi %eax + SAVE_ALL ++ GET_THREAD_INFO(%ebp) ++ movl TI_sysenter_return(%ebp),%ebp ++ movl %ebp,PT_EIP(%esp) + ENABLE_INTERRUPTS(CLBR_NONE) + + /* + * Load the potential sixth argument from user stack. + * Careful about security. + */ ++ movl PT_OLDESP(%esp),%ebp ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov PT_OLDSS(%esp),%ds ++1: movl %ds:(%ebp),%ebp ++ push %ss ++ pop %ds ++#else + cmpl $__PAGE_OFFSET-3,%ebp + jae syscall_fault + 1: movl (%ebp),%ebp ++#endif ++ + movl %ebp,PT_EBP(%esp) + .section __ex_table,"a" + .align 4 +@@ -434,12 +595,24 @@ sysenter_do_call: + testl $_TIF_ALLWORK_MASK, %ecx + jne sysexit_audit + sysenter_exit: ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pushl_cfi %eax ++ movl %esp, %eax ++ call pax_randomize_kstack ++ popl_cfi %eax ++#endif ++ ++ pax_erase_kstack ++ + /* if something modifies registers it must also disable sysexit */ + movl PT_EIP(%esp), %edx + movl PT_OLDESP(%esp), %ecx + xorl %ebp,%ebp + TRACE_IRQS_ON + 1: mov PT_FS(%esp), %fs ++2: mov PT_DS(%esp), %ds ++3: mov PT_ES(%esp), %es + PTGS_TO_GS + ENABLE_INTERRUPTS_SYSEXIT + +@@ -456,6 +629,9 @@ sysenter_audit: + movl %eax,%edx /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ + call audit_syscall_entry ++ ++ pax_erase_kstack ++ + pushl_cfi %ebx + movl PT_EAX(%esp),%eax /* reload syscall number */ + jmp sysenter_do_call +@@ -482,11 +658,17 @@ sysexit_audit: + + CFI_ENDPROC + .pushsection .fixup,"ax" +-2: movl $0,PT_FS(%esp) ++4: movl $0,PT_FS(%esp) ++ jmp 1b ++5: movl $0,PT_DS(%esp) ++ jmp 1b ++6: movl $0,PT_ES(%esp) + jmp 1b + .section __ex_table,"a" + .align 4 +- .long 1b,2b ++ .long 1b,4b ++ .long 2b,5b ++ .long 3b,6b + .popsection + PTGS_TO_GS_EX + ENDPROC(ia32_sysenter_target) +@@ -519,6 +701,15 @@ syscall_exit: + testl $_TIF_ALLWORK_MASK, %ecx # current->work + jne syscall_exit_work + ++restore_all_pax: ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ movl %esp, %eax ++ call pax_randomize_kstack ++#endif ++ ++ pax_erase_kstack ++ + restore_all: + TRACE_IRQS_IRET + restore_all_notrace: +@@ -578,14 +769,34 @@ ldt_ss: + * compensating for the offset by changing to the ESPFIX segment with + * a base address that matches for the difference. + */ +-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) ++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx) + mov %esp, %edx /* load kernel esp */ + mov PT_OLDESP(%esp), %eax /* load userspace esp */ + mov %dx, %ax /* eax: new kernel esp */ + sub %eax, %edx /* offset (low word is 0) */ ++#ifdef CONFIG_SMP ++ movl PER_CPU_VAR(cpu_number), %ebx ++ shll $PAGE_SHIFT_asm, %ebx ++ addl $cpu_gdt_table, %ebx ++#else ++ movl $cpu_gdt_table, %ebx ++#endif + shr $16, %edx +- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ +- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mov %cr0, %esi ++ btr $16, %esi ++ mov %esi, %cr0 ++#endif ++ ++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */ ++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ bts $16, %esi ++ mov %esi, %cr0 ++#endif ++ + pushl_cfi $__ESPFIX_SS + pushl_cfi %eax /* new kernel esp */ + /* Disable interrupts, but do not irqtrace this section: we +@@ -614,34 +825,28 @@ work_resched: + movl TI_flags(%ebp), %ecx + andl $_TIF_WORK_MASK, %ecx # is there any work to be done other + # than syscall tracing? +- jz restore_all ++ jz restore_all_pax + testb $_TIF_NEED_RESCHED, %cl + jnz work_resched + + work_notifysig: # deal with pending signals and + # notify-resume requests ++ movl %esp, %eax + #ifdef CONFIG_VM86 + testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) +- movl %esp, %eax +- jne work_notifysig_v86 # returning to kernel-space or ++ jz 1f # returning to kernel-space or + # vm86-space +- xorl %edx, %edx +- call do_notify_resume +- jmp resume_userspace_sig + +- ALIGN +-work_notifysig_v86: + pushl_cfi %ecx # save ti_flags for do_notify_resume + call save_v86_state # %eax contains pt_regs pointer + popl_cfi %ecx + movl %eax, %esp +-#else +- movl %esp, %eax ++1: + #endif + xorl %edx, %edx + call do_notify_resume + jmp resume_userspace_sig +-END(work_pending) ++ENDPROC(work_pending) + + # perform syscall exit tracing + ALIGN +@@ -649,11 +854,14 @@ syscall_trace_entry: + movl $-ENOSYS,PT_EAX(%esp) + movl %esp, %eax + call syscall_trace_enter ++ ++ pax_erase_kstack ++ + /* What it returned is what we'll actually use. */ + cmpl $(nr_syscalls), %eax + jnae syscall_call + jmp syscall_exit +-END(syscall_trace_entry) ++ENDPROC(syscall_trace_entry) + + # perform syscall exit tracing + ALIGN +@@ -666,20 +874,24 @@ syscall_exit_work: + movl %esp, %eax + call syscall_trace_leave + jmp resume_userspace +-END(syscall_exit_work) ++ENDPROC(syscall_exit_work) + CFI_ENDPROC + + RING0_INT_FRAME # can't unwind into user space anyway + syscall_fault: ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ push %ss ++ pop %ds ++#endif + GET_THREAD_INFO(%ebp) + movl $-EFAULT,PT_EAX(%esp) + jmp resume_userspace +-END(syscall_fault) ++ENDPROC(syscall_fault) + + syscall_badsys: + movl $-ENOSYS,PT_EAX(%esp) + jmp resume_userspace +-END(syscall_badsys) ++ENDPROC(syscall_badsys) + CFI_ENDPROC + /* + * End of kprobes section +@@ -753,6 +965,36 @@ ptregs_clone: + CFI_ENDPROC + ENDPROC(ptregs_clone) + ++ ALIGN; ++ENTRY(kernel_execve) ++ CFI_STARTPROC ++ pushl_cfi %ebp ++ sub $PT_OLDSS+4,%esp ++ pushl_cfi %edi ++ pushl_cfi %ecx ++ pushl_cfi %eax ++ lea 3*4(%esp),%edi ++ mov $PT_OLDSS/4+1,%ecx ++ xorl %eax,%eax ++ rep stosl ++ popl_cfi %eax ++ popl_cfi %ecx ++ popl_cfi %edi ++ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp) ++ pushl_cfi %esp ++ call sys_execve ++ add $4,%esp ++ CFI_ADJUST_CFA_OFFSET -4 ++ GET_THREAD_INFO(%ebp) ++ test %eax,%eax ++ jz syscall_exit ++ add $PT_OLDSS+4,%esp ++ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4 ++ popl_cfi %ebp ++ ret ++ CFI_ENDPROC ++ENDPROC(kernel_execve) ++ + .macro FIXUP_ESPFIX_STACK + /* + * Switch back for ESPFIX stack to the normal zerobased stack +@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone) + * normal stack and adjusts ESP with the matching offset. + */ + /* fixup the stack */ +- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ +- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ ++#ifdef CONFIG_SMP ++ movl PER_CPU_VAR(cpu_number), %ebx ++ shll $PAGE_SHIFT_asm, %ebx ++ addl $cpu_gdt_table, %ebx ++#else ++ movl $cpu_gdt_table, %ebx ++#endif ++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */ ++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */ + shl $16, %eax + addl %esp, %eax /* the adjusted stack pointer */ + pushl_cfi $__KERNEL_DS +@@ -816,7 +1065,7 @@ vector=vector+1 + .endr + 2: jmp common_interrupt + .endr +-END(irq_entries_start) ++ENDPROC(irq_entries_start) + + .previous + END(interrupt) +@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error) + pushl_cfi $do_coprocessor_error + jmp error_code + CFI_ENDPROC +-END(coprocessor_error) ++ENDPROC(coprocessor_error) + + ENTRY(simd_coprocessor_error) + RING0_INT_FRAME +@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error) + #endif + jmp error_code + CFI_ENDPROC +-END(simd_coprocessor_error) ++ENDPROC(simd_coprocessor_error) + + ENTRY(device_not_available) + RING0_INT_FRAME +@@ -893,7 +1142,7 @@ ENTRY(device_not_available) + pushl_cfi $do_device_not_available + jmp error_code + CFI_ENDPROC +-END(device_not_available) ++ENDPROC(device_not_available) + + #ifdef CONFIG_PARAVIRT + ENTRY(native_iret) +@@ -902,12 +1151,12 @@ ENTRY(native_iret) + .align 4 + .long native_iret, iret_exc + .previous +-END(native_iret) ++ENDPROC(native_iret) + + ENTRY(native_irq_enable_sysexit) + sti + sysexit +-END(native_irq_enable_sysexit) ++ENDPROC(native_irq_enable_sysexit) + #endif + + ENTRY(overflow) +@@ -916,7 +1165,7 @@ ENTRY(overflow) + pushl_cfi $do_overflow + jmp error_code + CFI_ENDPROC +-END(overflow) ++ENDPROC(overflow) + + ENTRY(bounds) + RING0_INT_FRAME +@@ -924,7 +1173,7 @@ ENTRY(bounds) + pushl_cfi $do_bounds + jmp error_code + CFI_ENDPROC +-END(bounds) ++ENDPROC(bounds) + + ENTRY(invalid_op) + RING0_INT_FRAME +@@ -932,7 +1181,7 @@ ENTRY(invalid_op) + pushl_cfi $do_invalid_op + jmp error_code + CFI_ENDPROC +-END(invalid_op) ++ENDPROC(invalid_op) + + ENTRY(coprocessor_segment_overrun) + RING0_INT_FRAME +@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun) + pushl_cfi $do_coprocessor_segment_overrun + jmp error_code + CFI_ENDPROC +-END(coprocessor_segment_overrun) ++ENDPROC(coprocessor_segment_overrun) + + ENTRY(invalid_TSS) + RING0_EC_FRAME + pushl_cfi $do_invalid_TSS + jmp error_code + CFI_ENDPROC +-END(invalid_TSS) ++ENDPROC(invalid_TSS) + + ENTRY(segment_not_present) + RING0_EC_FRAME + pushl_cfi $do_segment_not_present + jmp error_code + CFI_ENDPROC +-END(segment_not_present) ++ENDPROC(segment_not_present) + + ENTRY(stack_segment) + RING0_EC_FRAME + pushl_cfi $do_stack_segment + jmp error_code + CFI_ENDPROC +-END(stack_segment) ++ENDPROC(stack_segment) + + ENTRY(alignment_check) + RING0_EC_FRAME + pushl_cfi $do_alignment_check + jmp error_code + CFI_ENDPROC +-END(alignment_check) ++ENDPROC(alignment_check) + + ENTRY(divide_error) + RING0_INT_FRAME +@@ -976,7 +1225,7 @@ ENTRY(divide_error) + pushl_cfi $do_divide_error + jmp error_code + CFI_ENDPROC +-END(divide_error) ++ENDPROC(divide_error) + + #ifdef CONFIG_X86_MCE + ENTRY(machine_check) +@@ -985,7 +1234,7 @@ ENTRY(machine_check) + pushl_cfi machine_check_vector + jmp error_code + CFI_ENDPROC +-END(machine_check) ++ENDPROC(machine_check) + #endif + + ENTRY(spurious_interrupt_bug) +@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug) + pushl_cfi $do_spurious_interrupt_bug + jmp error_code + CFI_ENDPROC +-END(spurious_interrupt_bug) ++ENDPROC(spurious_interrupt_bug) + /* + * End of kprobes section + */ +@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, + + ENTRY(mcount) + ret +-END(mcount) ++ENDPROC(mcount) + + ENTRY(ftrace_caller) + cmpl $0, function_trace_stop +@@ -1138,7 +1387,7 @@ ftrace_graph_call: + .globl ftrace_stub + ftrace_stub: + ret +-END(ftrace_caller) ++ENDPROC(ftrace_caller) + + #else /* ! CONFIG_DYNAMIC_FTRACE */ + +@@ -1174,7 +1423,7 @@ trace: + popl %ecx + popl %eax + jmp ftrace_stub +-END(mcount) ++ENDPROC(mcount) + #endif /* CONFIG_DYNAMIC_FTRACE */ + #endif /* CONFIG_FUNCTION_TRACER */ + +@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller) + popl %ecx + popl %eax + ret +-END(ftrace_graph_caller) ++ENDPROC(ftrace_graph_caller) + + .globl return_to_handler + return_to_handler: +@@ -1209,7 +1458,6 @@ return_to_handler: + jmp *%ecx + #endif + +-.section .rodata,"a" + #include "syscall_table_32.S" + + syscall_table_size=(.-sys_call_table) +@@ -1255,15 +1503,18 @@ error_code: + movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart + REG_TO_PTGS %ecx + SET_KERNEL_GS %ecx +- movl $(__USER_DS), %ecx ++ movl $(__KERNEL_DS), %ecx + movl %ecx, %ds + movl %ecx, %es ++ ++ pax_enter_kernel ++ + TRACE_IRQS_OFF + movl %esp,%eax # pt_regs pointer + call *%edi + jmp ret_from_exception + CFI_ENDPROC +-END(page_fault) ++ENDPROC(page_fault) + + /* + * Debug traps and NMI can happen at the one SYSENTER instruction +@@ -1305,7 +1556,7 @@ debug_stack_correct: + call do_debug + jmp ret_from_exception + CFI_ENDPROC +-END(debug) ++ENDPROC(debug) + + /* + * NMI is doubly nasty. It can happen _while_ we're handling +@@ -1342,6 +1593,9 @@ nmi_stack_correct: + xorl %edx,%edx # zero error code + movl %esp,%eax # pt_regs pointer + call do_nmi ++ ++ pax_exit_kernel ++ + jmp restore_all_notrace + CFI_ENDPROC + +@@ -1378,12 +1632,15 @@ nmi_espfix_stack: + FIXUP_ESPFIX_STACK # %eax == %esp + xorl %edx,%edx # zero error code + call do_nmi ++ ++ pax_exit_kernel ++ + RESTORE_REGS + lss 12+4(%esp), %esp # back to espfix stack + CFI_ADJUST_CFA_OFFSET -24 + jmp irq_return + CFI_ENDPROC +-END(nmi) ++ENDPROC(nmi) + + ENTRY(int3) + RING0_INT_FRAME +@@ -1395,14 +1652,14 @@ ENTRY(int3) + call do_int3 + jmp ret_from_exception + CFI_ENDPROC +-END(int3) ++ENDPROC(int3) + + ENTRY(general_protection) + RING0_EC_FRAME + pushl_cfi $do_general_protection + jmp error_code + CFI_ENDPROC +-END(general_protection) ++ENDPROC(general_protection) + + #ifdef CONFIG_KVM_GUEST + ENTRY(async_page_fault) +@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault) + pushl_cfi $do_async_page_fault + jmp error_code + CFI_ENDPROC +-END(async_page_fault) ++ENDPROC(async_page_fault) + #endif + + /* +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index faf8d5e..4f16a68 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -55,6 +55,8 @@ + #include <asm/paravirt.h> + #include <asm/ftrace.h> + #include <asm/percpu.h> ++#include <asm/pgtable.h> ++#include <asm/alternative-asm.h> + + /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ + #include <linux/elf-em.h> +@@ -68,8 +70,9 @@ + #ifdef CONFIG_FUNCTION_TRACER + #ifdef CONFIG_DYNAMIC_FTRACE + ENTRY(mcount) ++ pax_force_retaddr + retq +-END(mcount) ++ENDPROC(mcount) + + ENTRY(ftrace_caller) + cmpl $0, function_trace_stop +@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call) + #endif + + GLOBAL(ftrace_stub) ++ pax_force_retaddr + retq +-END(ftrace_caller) ++ENDPROC(ftrace_caller) + + #else /* ! CONFIG_DYNAMIC_FTRACE */ + ENTRY(mcount) +@@ -112,6 +116,7 @@ ENTRY(mcount) + #endif + + GLOBAL(ftrace_stub) ++ pax_force_retaddr + retq + + trace: +@@ -121,12 +126,13 @@ trace: + movq 8(%rbp), %rsi + subq $MCOUNT_INSN_SIZE, %rdi + ++ pax_force_fptr ftrace_trace_function + call *ftrace_trace_function + + MCOUNT_RESTORE_FRAME + + jmp ftrace_stub +-END(mcount) ++ENDPROC(mcount) + #endif /* CONFIG_DYNAMIC_FTRACE */ + #endif /* CONFIG_FUNCTION_TRACER */ + +@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller) + + MCOUNT_RESTORE_FRAME + ++ pax_force_retaddr + retq +-END(ftrace_graph_caller) ++ENDPROC(ftrace_graph_caller) + + GLOBAL(return_to_handler) + subq $24, %rsp +@@ -163,6 +170,7 @@ GLOBAL(return_to_handler) + movq 8(%rsp), %rdx + movq (%rsp), %rax + addq $24, %rsp ++ pax_force_fptr %rdi + jmp *%rdi + #endif + +@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64) + ENDPROC(native_usergs_sysret64) + #endif /* CONFIG_PARAVIRT */ + ++ .macro ljmpq sel, off ++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM) ++ .byte 0x48; ljmp *1234f(%rip) ++ .pushsection .rodata ++ .align 16 ++ 1234: .quad \off; .word \sel ++ .popsection ++#else ++ pushq $\sel ++ pushq $\off ++ lretq ++#endif ++ .endm ++ ++ .macro pax_enter_kernel ++ pax_set_fptr_mask ++#ifdef CONFIG_PAX_KERNEXEC ++ call pax_enter_kernel ++#endif ++ .endm ++ ++ .macro pax_exit_kernel ++#ifdef CONFIG_PAX_KERNEXEC ++ call pax_exit_kernel ++#endif ++ .endm ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ENTRY(pax_enter_kernel) ++ pushq %rdi ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++ GET_CR0_INTO_RDI ++ bts $16,%rdi ++ jnc 3f ++ mov %cs,%edi ++ cmp $__KERNEL_CS,%edi ++ jnz 2f ++1: ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI) ++#endif ++ ++ popq %rdi ++ pax_force_retaddr ++ retq ++ ++2: ljmpq __KERNEL_CS,1f ++3: ljmpq __KERNEXEC_KERNEL_CS,4f ++4: SET_RDI_INTO_CR0 ++ jmp 1b ++ENDPROC(pax_enter_kernel) ++ ++ENTRY(pax_exit_kernel) ++ pushq %rdi ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++ mov %cs,%rdi ++ cmp $__KERNEXEC_KERNEL_CS,%edi ++ jz 2f ++1: ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI); ++#endif ++ ++ popq %rdi ++ pax_force_retaddr ++ retq ++ ++2: GET_CR0_INTO_RDI ++ btr $16,%rdi ++ ljmpq __KERNEL_CS,3f ++3: SET_RDI_INTO_CR0 ++ jmp 1b ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI); ++#endif ++ ++ popq %rdi ++ pax_force_retaddr ++ retq ++ENDPROC(pax_exit_kernel) ++#endif ++ ++ .macro pax_enter_kernel_user ++ pax_set_fptr_mask ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_enter_kernel_user ++#endif ++ .endm ++ ++ .macro pax_exit_kernel_user ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel_user ++#endif ++#ifdef CONFIG_PAX_RANDKSTACK ++ pushq %rax ++ call pax_randomize_kstack ++ popq %rax ++#endif ++ .endm ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ENTRY(pax_enter_kernel_user) ++ pushq %rdi ++ pushq %rbx ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++ GET_CR3_INTO_RDI ++ mov %rdi,%rbx ++ add $__START_KERNEL_map,%rbx ++ sub phys_base(%rip),%rbx ++ ++#ifdef CONFIG_PARAVIRT ++ pushq %rdi ++ cmpl $0, pv_info+PARAVIRT_enabled ++ jz 1f ++ i = 0 ++ .rept USER_PGD_PTRS ++ mov i*8(%rbx),%rsi ++ mov $0,%sil ++ lea i*8(%rbx),%rdi ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched) ++ i = i + 1 ++ .endr ++ jmp 2f ++1: ++#endif ++ ++ i = 0 ++ .rept USER_PGD_PTRS ++ movb $0,i*8(%rbx) ++ i = i + 1 ++ .endr ++ ++#ifdef CONFIG_PARAVIRT ++2: popq %rdi ++#endif ++ SET_RDI_INTO_CR3 ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ GET_CR0_INTO_RDI ++ bts $16,%rdi ++ SET_RDI_INTO_CR0 ++#endif ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI) ++#endif ++ ++ popq %rbx ++ popq %rdi ++ pax_force_retaddr ++ retq ++ENDPROC(pax_enter_kernel_user) ++ ++ENTRY(pax_exit_kernel_user) ++ push %rdi ++ ++#ifdef CONFIG_PARAVIRT ++ pushq %rbx ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ GET_CR0_INTO_RDI ++ btr $16,%rdi ++ SET_RDI_INTO_CR0 ++#endif ++ ++ GET_CR3_INTO_RDI ++ add $__START_KERNEL_map,%rdi ++ sub phys_base(%rip),%rdi ++ ++#ifdef CONFIG_PARAVIRT ++ cmpl $0, pv_info+PARAVIRT_enabled ++ jz 1f ++ mov %rdi,%rbx ++ i = 0 ++ .rept USER_PGD_PTRS ++ mov i*8(%rbx),%rsi ++ mov $0x67,%sil ++ lea i*8(%rbx),%rdi ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched) ++ i = i + 1 ++ .endr ++ jmp 2f ++1: ++#endif ++ ++ i = 0 ++ .rept USER_PGD_PTRS ++ movb $0x67,i*8(%rdi) ++ i = i + 1 ++ .endr ++ ++#ifdef CONFIG_PARAVIRT ++2: PV_RESTORE_REGS(CLBR_RDI) ++ popq %rbx ++#endif ++ ++ popq %rdi ++ pax_force_retaddr ++ retq ++ENDPROC(pax_exit_kernel_user) ++#endif ++ ++.macro pax_erase_kstack ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++ call pax_erase_kstack ++#endif ++.endm ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++/* ++ * r11: thread_info ++ * rcx, rdx: can be clobbered ++ */ ++ENTRY(pax_erase_kstack) ++ pushq %rdi ++ pushq %rax ++ pushq %r11 ++ ++ GET_THREAD_INFO(%r11) ++ mov TI_lowest_stack(%r11), %rdi ++ mov $-0xBEEF, %rax ++ std ++ ++1: mov %edi, %ecx ++ and $THREAD_SIZE_asm - 1, %ecx ++ shr $3, %ecx ++ repne scasq ++ jecxz 2f ++ ++ cmp $2*8, %ecx ++ jc 2f ++ ++ mov $2*8, %ecx ++ repe scasq ++ jecxz 2f ++ jne 1b ++ ++2: cld ++ mov %esp, %ecx ++ sub %edi, %ecx ++ ++ cmp $THREAD_SIZE_asm, %rcx ++ jb 3f ++ ud2 ++3: ++ ++ shr $3, %ecx ++ rep stosq ++ ++ mov TI_task_thread_sp0(%r11), %rdi ++ sub $256, %rdi ++ mov %rdi, TI_lowest_stack(%r11) ++ ++ popq %r11 ++ popq %rax ++ popq %rdi ++ pax_force_retaddr ++ ret ++ENDPROC(pax_erase_kstack) ++#endif + + .macro TRACE_IRQS_IRETQ offset=ARGOFFSET + #ifdef CONFIG_TRACE_IRQFLAGS +@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64) + .endm + + .macro UNFAKE_STACK_FRAME +- addq $8*6, %rsp +- CFI_ADJUST_CFA_OFFSET -(6*8) ++ addq $8*6 + ARG_SKIP, %rsp ++ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP) + .endm + + /* +@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64) + movq %rsp, %rsi + + leaq -RBP(%rsp),%rdi /* arg1 for handler */ +- testl $3, CS(%rdi) ++ testb $3, CS(%rdi) + je 1f + SWAPGS + /* +@@ -355,9 +639,10 @@ ENTRY(save_rest) + movq_cfi r15, R15+16 + movq %r11, 8(%rsp) /* return address */ + FIXUP_TOP_OF_STACK %r11, 16 ++ pax_force_retaddr + ret + CFI_ENDPROC +-END(save_rest) ++ENDPROC(save_rest) + + /* save complete stack frame */ + .pushsection .kprobes.text, "ax" +@@ -386,9 +671,10 @@ ENTRY(save_paranoid) + js 1f /* negative -> in kernel */ + SWAPGS + xorl %ebx,%ebx +-1: ret ++1: pax_force_retaddr_bts ++ ret + CFI_ENDPROC +-END(save_paranoid) ++ENDPROC(save_paranoid) + .popsection + + /* +@@ -410,7 +696,7 @@ ENTRY(ret_from_fork) + + RESTORE_REST + +- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? ++ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread? + je int_ret_from_sys_call + + testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET +@@ -420,7 +706,7 @@ ENTRY(ret_from_fork) + jmp ret_from_sys_call # go to the SYSRET fastpath + + CFI_ENDPROC +-END(ret_from_fork) ++ENDPROC(ret_from_fork) + + /* + * System call entry. Up to 6 arguments in registers are supported. +@@ -456,7 +742,7 @@ END(ret_from_fork) + ENTRY(system_call) + CFI_STARTPROC simple + CFI_SIGNAL_FRAME +- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET ++ CFI_DEF_CFA rsp,0 + CFI_REGISTER rip,rcx + /*CFI_REGISTER rflags,r11*/ + SWAPGS_UNSAFE_STACK +@@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs) + + movq %rsp,PER_CPU_VAR(old_rsp) + movq PER_CPU_VAR(kernel_stack),%rsp ++ SAVE_ARGS 8*6,0 ++ pax_enter_kernel_user + /* + * No need to follow this irqs off/on section - it's straight + * and short: + */ + ENABLE_INTERRUPTS(CLBR_NONE) +- SAVE_ARGS 8,0 + movq %rax,ORIG_RAX-ARGOFFSET(%rsp) + movq %rcx,RIP-ARGOFFSET(%rsp) + CFI_REL_OFFSET rip,RIP-ARGOFFSET +@@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs) + system_call_fastpath: + cmpq $__NR_syscall_max,%rax + ja badsys +- movq %r10,%rcx ++ movq R10-ARGOFFSET(%rsp),%rcx + call *sys_call_table(,%rax,8) # XXX: rip relative + movq %rax,RAX-ARGOFFSET(%rsp) + /* +@@ -503,6 +790,8 @@ sysret_check: + andl %edi,%edx + jnz sysret_careful + CFI_REMEMBER_STATE ++ pax_exit_kernel_user ++ pax_erase_kstack + /* + * sysretq will re-enable interrupts: + */ +@@ -554,14 +843,18 @@ badsys: + * jump back to the normal fast path. + */ + auditsys: +- movq %r10,%r9 /* 6th arg: 4th syscall arg */ ++ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */ + movq %rdx,%r8 /* 5th arg: 3rd syscall arg */ + movq %rsi,%rcx /* 4th arg: 2nd syscall arg */ + movq %rdi,%rdx /* 3rd arg: 1st syscall arg */ + movq %rax,%rsi /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ + call audit_syscall_entry ++ ++ pax_erase_kstack ++ + LOAD_ARGS 0 /* reload call-clobbered registers */ ++ pax_set_fptr_mask + jmp system_call_fastpath + + /* +@@ -591,16 +884,20 @@ tracesys: + FIXUP_TOP_OF_STACK %rdi + movq %rsp,%rdi + call syscall_trace_enter ++ ++ pax_erase_kstack ++ + /* + * Reload arg registers from stack in case ptrace changed them. + * We don't reload %rax because syscall_trace_enter() returned + * the value it wants us to use in the table lookup. + */ + LOAD_ARGS ARGOFFSET, 1 ++ pax_set_fptr_mask + RESTORE_REST + cmpq $__NR_syscall_max,%rax + ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ +- movq %r10,%rcx /* fixup for C */ ++ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */ + call *sys_call_table(,%rax,8) + movq %rax,RAX-ARGOFFSET(%rsp) + /* Use IRET because user could have changed frame */ +@@ -612,7 +909,7 @@ tracesys: + GLOBAL(int_ret_from_sys_call) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- testl $3,CS-ARGOFFSET(%rsp) ++ testb $3,CS-ARGOFFSET(%rsp) + je retint_restore_args + movl $_TIF_ALLWORK_MASK,%edi + /* edi: mask to check */ +@@ -623,6 +920,7 @@ GLOBAL(int_with_check) + andl %edi,%edx + jnz int_careful + andl $~TS_COMPAT,TI_status(%rcx) ++ pax_erase_kstack + jmp retint_swapgs + + /* Either reschedule or signal or syscall exit tracking needed. */ +@@ -669,7 +967,7 @@ int_restore_rest: + TRACE_IRQS_OFF + jmp int_with_check + CFI_ENDPROC +-END(system_call) ++ENDPROC(system_call) + + /* + * Certain special system calls that need to save a complete full stack frame. +@@ -685,7 +983,7 @@ ENTRY(\label) + call \func + jmp ptregscall_common + CFI_ENDPROC +-END(\label) ++ENDPROC(\label) + .endm + + PTREGSCALL stub_clone, sys_clone, %r8 +@@ -703,9 +1001,10 @@ ENTRY(ptregscall_common) + movq_cfi_restore R12+8, r12 + movq_cfi_restore RBP+8, rbp + movq_cfi_restore RBX+8, rbx ++ pax_force_retaddr + ret $REST_SKIP /* pop extended registers */ + CFI_ENDPROC +-END(ptregscall_common) ++ENDPROC(ptregscall_common) + + ENTRY(stub_execve) + CFI_STARTPROC +@@ -720,7 +1019,7 @@ ENTRY(stub_execve) + RESTORE_REST + jmp int_ret_from_sys_call + CFI_ENDPROC +-END(stub_execve) ++ENDPROC(stub_execve) + + /* + * sigreturn is special because it needs to restore all registers on return. +@@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn) + RESTORE_REST + jmp int_ret_from_sys_call + CFI_ENDPROC +-END(stub_rt_sigreturn) ++ENDPROC(stub_rt_sigreturn) + + /* + * Build the entry stubs and pointer table with some assembler magic. +@@ -773,7 +1072,7 @@ vector=vector+1 + 2: jmp common_interrupt + .endr + CFI_ENDPROC +-END(irq_entries_start) ++ENDPROC(irq_entries_start) + + .previous + END(interrupt) +@@ -793,6 +1092,16 @@ END(interrupt) + subq $ORIG_RAX-RBP, %rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP + SAVE_ARGS_IRQ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rdi) ++ jnz 1f ++ pax_enter_kernel ++ jmp 2f ++1: pax_enter_kernel_user ++2: ++#else ++ pax_enter_kernel ++#endif + call \func + .endm + +@@ -824,7 +1133,7 @@ ret_from_intr: + + exit_intr: + GET_THREAD_INFO(%rcx) +- testl $3,CS-ARGOFFSET(%rsp) ++ testb $3,CS-ARGOFFSET(%rsp) + je retint_kernel + + /* Interrupt came from user space */ +@@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */ + * The iretq could re-enable interrupts: + */ + DISABLE_INTERRUPTS(CLBR_ANY) ++ pax_exit_kernel_user + TRACE_IRQS_IRETQ + SWAPGS + jmp restore_args + + retint_restore_args: /* return to kernel space */ + DISABLE_INTERRUPTS(CLBR_ANY) ++ pax_exit_kernel ++ pax_force_retaddr RIP-ARGOFFSET + /* + * The iretq could re-enable interrupts: + */ +@@ -940,7 +1252,7 @@ ENTRY(retint_kernel) + #endif + + CFI_ENDPROC +-END(common_interrupt) ++ENDPROC(common_interrupt) + /* + * End of kprobes section + */ +@@ -956,7 +1268,7 @@ ENTRY(\sym) + interrupt \do_sym + jmp ret_from_intr + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + + #ifdef CONFIG_SMP +@@ -1021,12 +1333,22 @@ ENTRY(\sym) + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 + call error_entry + DEFAULT_FRAME 0 ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ pax_enter_kernel ++ jmp 2f ++1: pax_enter_kernel_user ++2: ++#else ++ pax_enter_kernel ++#endif + movq %rsp,%rdi /* pt_regs pointer */ + xorl %esi,%esi /* no error code */ + call \do_sym + jmp error_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + + .macro paranoidzeroentry sym do_sym +@@ -1038,15 +1360,25 @@ ENTRY(\sym) + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 + call save_paranoid + TRACE_IRQS_OFF ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ pax_enter_kernel ++ jmp 2f ++1: pax_enter_kernel_user ++2: ++#else ++ pax_enter_kernel ++#endif + movq %rsp,%rdi /* pt_regs pointer */ + xorl %esi,%esi /* no error code */ + call \do_sym + jmp paranoid_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + +-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) ++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12) + .macro paranoidzeroentry_ist sym do_sym ist + ENTRY(\sym) + INTR_FRAME +@@ -1056,14 +1388,30 @@ ENTRY(\sym) + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 + call save_paranoid + TRACE_IRQS_OFF ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ pax_enter_kernel ++ jmp 2f ++1: pax_enter_kernel_user ++2: ++#else ++ pax_enter_kernel ++#endif + movq %rsp,%rdi /* pt_regs pointer */ + xorl %esi,%esi /* no error code */ ++#ifdef CONFIG_SMP ++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d ++ lea init_tss(%r12), %r12 ++#else ++ lea init_tss(%rip), %r12 ++#endif + subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) + call \do_sym + addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) + jmp paranoid_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + + .macro errorentry sym do_sym +@@ -1074,13 +1422,23 @@ ENTRY(\sym) + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 + call error_entry + DEFAULT_FRAME 0 ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ pax_enter_kernel ++ jmp 2f ++1: pax_enter_kernel_user ++2: ++#else ++ pax_enter_kernel ++#endif + movq %rsp,%rdi /* pt_regs pointer */ + movq ORIG_RAX(%rsp),%rsi /* get error code */ + movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ + call \do_sym + jmp error_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + + /* error code is on the stack already */ +@@ -1093,13 +1451,23 @@ ENTRY(\sym) + call save_paranoid + DEFAULT_FRAME 0 + TRACE_IRQS_OFF ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ pax_enter_kernel ++ jmp 2f ++1: pax_enter_kernel_user ++2: ++#else ++ pax_enter_kernel ++#endif + movq %rsp,%rdi /* pt_regs pointer */ + movq ORIG_RAX(%rsp),%rsi /* get error code */ + movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ + call \do_sym + jmp paranoid_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + + zeroentry divide_error do_divide_error +@@ -1129,9 +1497,10 @@ gs_change: + 2: mfence /* workaround */ + SWAPGS + popfq_cfi ++ pax_force_retaddr + ret + CFI_ENDPROC +-END(native_load_gs_index) ++ENDPROC(native_load_gs_index) + + .section __ex_table,"a" + .align 8 +@@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper) + * Here we are in the child and the registers are set as they were + * at kernel_thread() invocation in the parent. + */ ++ pax_force_fptr %rsi + call *%rsi + # exit + mov %eax, %edi + call do_exit + ud2 # padding for call trace + CFI_ENDPROC +-END(kernel_thread_helper) ++ENDPROC(kernel_thread_helper) + + /* + * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. +@@ -1186,11 +1556,11 @@ ENTRY(kernel_execve) + RESTORE_REST + testq %rax,%rax + je int_ret_from_sys_call +- RESTORE_ARGS + UNFAKE_STACK_FRAME ++ pax_force_retaddr + ret + CFI_ENDPROC +-END(kernel_execve) ++ENDPROC(kernel_execve) + + /* Call softirq on interrupt stack. Interrupts are off. */ + ENTRY(call_softirq) +@@ -1208,9 +1578,10 @@ ENTRY(call_softirq) + CFI_DEF_CFA_REGISTER rsp + CFI_ADJUST_CFA_OFFSET -8 + decl PER_CPU_VAR(irq_count) ++ pax_force_retaddr + ret + CFI_ENDPROC +-END(call_softirq) ++ENDPROC(call_softirq) + + #ifdef CONFIG_XEN + zeroentry xen_hypervisor_callback xen_do_hypervisor_callback +@@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) + decl PER_CPU_VAR(irq_count) + jmp error_exit + CFI_ENDPROC +-END(xen_do_hypervisor_callback) ++ENDPROC(xen_do_hypervisor_callback) + + /* + * Hypervisor uses this for application faults while it executes. +@@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback) + SAVE_ALL + jmp error_exit + CFI_ENDPROC +-END(xen_failsafe_callback) ++ENDPROC(xen_failsafe_callback) + + apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ + xen_hvm_callback_vector xen_evtchn_do_upcall +@@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit) + TRACE_IRQS_OFF + testl %ebx,%ebx /* swapgs needed? */ + jnz paranoid_restore +- testl $3,CS(%rsp) ++ testb $3,CS(%rsp) + jnz paranoid_userspace ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pax_exit_kernel ++ TRACE_IRQS_IRETQ 0 ++ SWAPGS_UNSAFE_STACK ++ RESTORE_ALL 8 ++ pax_force_retaddr_bts ++ jmp irq_return ++#endif + paranoid_swapgs: ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pax_exit_kernel_user ++#else ++ pax_exit_kernel ++#endif + TRACE_IRQS_IRETQ 0 + SWAPGS_UNSAFE_STACK + RESTORE_ALL 8 + jmp irq_return + paranoid_restore: ++ pax_exit_kernel + TRACE_IRQS_IRETQ 0 + RESTORE_ALL 8 ++ pax_force_retaddr_bts + jmp irq_return + paranoid_userspace: + GET_THREAD_INFO(%rcx) +@@ -1394,7 +1780,7 @@ paranoid_schedule: + TRACE_IRQS_OFF + jmp paranoid_userspace + CFI_ENDPROC +-END(paranoid_exit) ++ENDPROC(paranoid_exit) + + /* + * Exception entry point. This expects an error code/orig_rax on the stack. +@@ -1421,12 +1807,13 @@ ENTRY(error_entry) + movq_cfi r14, R14+8 + movq_cfi r15, R15+8 + xorl %ebx,%ebx +- testl $3,CS+8(%rsp) ++ testb $3,CS+8(%rsp) + je error_kernelspace + error_swapgs: + SWAPGS + error_sti: + TRACE_IRQS_OFF ++ pax_force_retaddr_bts + ret + + /* +@@ -1453,7 +1840,7 @@ bstep_iret: + movq %rcx,RIP+8(%rsp) + jmp error_swapgs + CFI_ENDPROC +-END(error_entry) ++ENDPROC(error_entry) + + + /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ +@@ -1473,7 +1860,7 @@ ENTRY(error_exit) + jnz retint_careful + jmp retint_swapgs + CFI_ENDPROC +-END(error_exit) ++ENDPROC(error_exit) + + + /* runs on exception stack */ +@@ -1485,6 +1872,16 @@ ENTRY(nmi) + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 + call save_paranoid + DEFAULT_FRAME 0 ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ pax_enter_kernel ++ jmp 2f ++1: pax_enter_kernel_user ++2: ++#else ++ pax_enter_kernel ++#endif + /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ + movq %rsp,%rdi + movq $-1,%rsi +@@ -1495,12 +1892,28 @@ ENTRY(nmi) + DISABLE_INTERRUPTS(CLBR_NONE) + testl %ebx,%ebx /* swapgs needed? */ + jnz nmi_restore +- testl $3,CS(%rsp) ++ testb $3,CS(%rsp) + jnz nmi_userspace ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pax_exit_kernel ++ SWAPGS_UNSAFE_STACK ++ RESTORE_ALL 8 ++ pax_force_retaddr_bts ++ jmp irq_return ++#endif + nmi_swapgs: ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pax_exit_kernel_user ++#else ++ pax_exit_kernel ++#endif + SWAPGS_UNSAFE_STACK ++ RESTORE_ALL 8 ++ jmp irq_return + nmi_restore: ++ pax_exit_kernel + RESTORE_ALL 8 ++ pax_force_retaddr_bts + jmp irq_return + nmi_userspace: + GET_THREAD_INFO(%rcx) +@@ -1529,14 +1942,14 @@ nmi_schedule: + jmp paranoid_exit + CFI_ENDPROC + #endif +-END(nmi) ++ENDPROC(nmi) + + ENTRY(ignore_sysret) + CFI_STARTPROC + mov $-ENOSYS,%eax + sysret + CFI_ENDPROC +-END(ignore_sysret) ++ENDPROC(ignore_sysret) + + /* + * End of kprobes section +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c +index c9a281f..ce2f317 100644 +--- a/arch/x86/kernel/ftrace.c ++++ b/arch/x86/kernel/ftrace.c +@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */ + static const void *mod_code_newcode; /* holds the text to write to the IP */ + + static unsigned nmi_wait_count; +-static atomic_t nmi_update_count = ATOMIC_INIT(0); ++static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0); + + int ftrace_arch_read_dyn_info(char *buf, int size) + { +@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size) + + r = snprintf(buf, size, "%u %u", + nmi_wait_count, +- atomic_read(&nmi_update_count)); ++ atomic_read_unchecked(&nmi_update_count)); + return r; + } + +@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void) + + if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { + smp_rmb(); ++ pax_open_kernel(); + ftrace_mod_code(); +- atomic_inc(&nmi_update_count); ++ pax_close_kernel(); ++ atomic_inc_unchecked(&nmi_update_count); + } + /* Must have previous changes seen before executions */ + smp_mb(); +@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code, + { + unsigned char replaced[MCOUNT_INSN_SIZE]; + ++ ip = ktla_ktva(ip); ++ + /* + * Note: Due to modules and __init, code can + * disappear and change, we need to protect against faulting +@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) + unsigned char old[MCOUNT_INSN_SIZE], *new; + int ret; + +- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); ++ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE); + new = ftrace_call_replace(ip, (unsigned long)func); + ret = ftrace_modify_code(ip, old, new); + +@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip, + { + unsigned char code[MCOUNT_INSN_SIZE]; + ++ ip = ktla_ktva(ip); ++ + if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + +diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c +index 3bb0850..55a56f4 100644 +--- a/arch/x86/kernel/head32.c ++++ b/arch/x86/kernel/head32.c +@@ -19,6 +19,7 @@ + #include <asm/io_apic.h> + #include <asm/bios_ebda.h> + #include <asm/tlbflush.h> ++#include <asm/boot.h> + + static void __init i386_default_early_setup(void) + { +@@ -33,7 +34,7 @@ void __init i386_start_kernel(void) + { + memblock_init(); + +- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); ++ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + + #ifdef CONFIG_BLK_DEV_INITRD + /* Reserve INITRD */ +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S +index ce0be7c..c41476e 100644 +--- a/arch/x86/kernel/head_32.S ++++ b/arch/x86/kernel/head_32.S +@@ -25,6 +25,12 @@ + /* Physical address */ + #define pa(X) ((X) - __PAGE_OFFSET) + ++#ifdef CONFIG_PAX_KERNEXEC ++#define ta(X) (X) ++#else ++#define ta(X) ((X) - __PAGE_OFFSET) ++#endif ++ + /* + * References to members of the new_cpu_data structure. + */ +@@ -54,11 +60,7 @@ + * and small than max_low_pfn, otherwise will waste some page table entries + */ + +-#if PTRS_PER_PMD > 1 +-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) +-#else +-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) +-#endif ++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE) + + /* Number of possible pages in the lowmem region */ + LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) +@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE + RESERVE_BRK(pagetables, INIT_MAP_SIZE) + + /* ++ * Real beginning of normal "text" segment ++ */ ++ENTRY(stext) ++ENTRY(_stext) ++ ++/* + * 32-bit kernel entrypoint; only used by the boot CPU. On entry, + * %esi points to the real-mode code as a 32-bit pointer. + * CS and DS must be 4 GB flat segments, but we don't depend on +@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) + * can. + */ + __HEAD ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ jmp startup_32 ++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */ ++.fill PAGE_SIZE-5,1,0xcc ++#endif ++ + ENTRY(startup_32) + movl pa(stack_start),%ecx + +@@ -105,6 +120,57 @@ ENTRY(startup_32) + 2: + leal -__PAGE_OFFSET(%ecx),%esp + ++#ifdef CONFIG_SMP ++ movl $pa(cpu_gdt_table),%edi ++ movl $__per_cpu_load,%eax ++ movw %ax,__KERNEL_PERCPU + 2(%edi) ++ rorl $16,%eax ++ movb %al,__KERNEL_PERCPU + 4(%edi) ++ movb %ah,__KERNEL_PERCPU + 7(%edi) ++ movl $__per_cpu_end - 1,%eax ++ subl $__per_cpu_start,%eax ++ movw %ax,__KERNEL_PERCPU + 0(%edi) ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ movl $NR_CPUS,%ecx ++ movl $pa(cpu_gdt_table),%edi ++1: ++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi) ++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi) ++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi) ++ addl $PAGE_SIZE_asm,%edi ++ loop 1b ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ movl $pa(boot_gdt),%edi ++ movl $__LOAD_PHYSICAL_ADDR,%eax ++ movw %ax,__BOOT_CS + 2(%edi) ++ rorl $16,%eax ++ movb %al,__BOOT_CS + 4(%edi) ++ movb %ah,__BOOT_CS + 7(%edi) ++ rorl $16,%eax ++ ++ ljmp $(__BOOT_CS),$1f ++1: ++ ++ movl $NR_CPUS,%ecx ++ movl $pa(cpu_gdt_table),%edi ++ addl $__PAGE_OFFSET,%eax ++1: ++ movw %ax,__KERNEL_CS + 2(%edi) ++ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi) ++ rorl $16,%eax ++ movb %al,__KERNEL_CS + 4(%edi) ++ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi) ++ movb %ah,__KERNEL_CS + 7(%edi) ++ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi) ++ rorl $16,%eax ++ addl $PAGE_SIZE_asm,%edi ++ loop 1b ++#endif ++ + /* + * Clear BSS first so that there are no surprises... + */ +@@ -195,8 +261,11 @@ ENTRY(startup_32) + movl %eax, pa(max_pfn_mapped) + + /* Do early initialization of the fixmap area */ +- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax +- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) ++#ifdef CONFIG_COMPAT_VDSO ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8) ++#else ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8) ++#endif + #else /* Not PAE */ + + page_pde_offset = (__PAGE_OFFSET >> 20); +@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20); + movl %eax, pa(max_pfn_mapped) + + /* Do early initialization of the fixmap area */ +- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax +- movl %eax,pa(initial_page_table+0xffc) ++#ifdef CONFIG_COMPAT_VDSO ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc) ++#else ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc) ++#endif + #endif + + #ifdef CONFIG_PARAVIRT +@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20); + cmpl $num_subarch_entries, %eax + jae bad_subarch + +- movl pa(subarch_entries)(,%eax,4), %eax +- subl $__PAGE_OFFSET, %eax +- jmp *%eax ++ jmp *pa(subarch_entries)(,%eax,4) + + bad_subarch: + WEAK(lguest_entry) +@@ -255,10 +325,10 @@ WEAK(xen_entry) + __INITDATA + + subarch_entries: +- .long default_entry /* normal x86/PC */ +- .long lguest_entry /* lguest hypervisor */ +- .long xen_entry /* Xen hypervisor */ +- .long default_entry /* Moorestown MID */ ++ .long ta(default_entry) /* normal x86/PC */ ++ .long ta(lguest_entry) /* lguest hypervisor */ ++ .long ta(xen_entry) /* Xen hypervisor */ ++ .long ta(default_entry) /* Moorestown MID */ + num_subarch_entries = (. - subarch_entries) / 4 + .previous + #else +@@ -312,6 +382,7 @@ default_entry: + orl %edx,%eax + movl %eax,%cr4 + ++#ifdef CONFIG_X86_PAE + testb $X86_CR4_PAE, %al # check if PAE is enabled + jz 6f + +@@ -340,6 +411,9 @@ default_entry: + /* Make changes effective */ + wrmsr + ++ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4) ++#endif ++ + 6: + + /* +@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP + 1: movl $(__KERNEL_DS),%eax # reload all the segment registers + movl %eax,%ss # after changing gdt. + +- movl $(__USER_DS),%eax # DS/ES contains default USER segment ++# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment + movl %eax,%ds + movl %eax,%es + +@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP + */ + cmpb $0,ready + jne 1f +- movl $gdt_page,%eax ++ movl $cpu_gdt_table,%eax + movl $stack_canary,%ecx ++#ifdef CONFIG_SMP ++ addl $__per_cpu_load,%ecx ++#endif + movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) + shrl $16, %ecx + movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) + movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) + 1: +-#endif + movl $(__KERNEL_STACK_CANARY),%eax ++#elif defined(CONFIG_PAX_MEMORY_UDEREF) ++ movl $(__USER_DS),%eax ++#else ++ xorl %eax,%eax ++#endif + movl %eax,%gs + + xorl %eax,%eax # Clear LDT +@@ -558,22 +639,22 @@ early_page_fault: + jmp early_fault + + early_fault: +- cld + #ifdef CONFIG_PRINTK ++ cmpl $1,%ss:early_recursion_flag ++ je hlt_loop ++ incl %ss:early_recursion_flag ++ cld + pusha + movl $(__KERNEL_DS),%eax + movl %eax,%ds + movl %eax,%es +- cmpl $2,early_recursion_flag +- je hlt_loop +- incl early_recursion_flag + movl %cr2,%eax + pushl %eax + pushl %edx /* trapno */ + pushl $fault_msg + call printk ++; call dump_stack + #endif +- call dump_stack + hlt_loop: + hlt + jmp hlt_loop +@@ -581,8 +662,11 @@ hlt_loop: + /* This is the default interrupt "handler" :-) */ + ALIGN + ignore_int: +- cld + #ifdef CONFIG_PRINTK ++ cmpl $2,%ss:early_recursion_flag ++ je hlt_loop ++ incl %ss:early_recursion_flag ++ cld + pushl %eax + pushl %ecx + pushl %edx +@@ -591,9 +675,6 @@ ignore_int: + movl $(__KERNEL_DS),%eax + movl %eax,%ds + movl %eax,%es +- cmpl $2,early_recursion_flag +- je hlt_loop +- incl early_recursion_flag + pushl 16(%esp) + pushl 24(%esp) + pushl 32(%esp) +@@ -622,29 +703,43 @@ ENTRY(initial_code) + /* + * BSS section + */ +-__PAGE_ALIGNED_BSS +- .align PAGE_SIZE + #ifdef CONFIG_X86_PAE ++.section .initial_pg_pmd,"a",@progbits + initial_pg_pmd: + .fill 1024*KPMDS,4,0 + #else ++.section .initial_page_table,"a",@progbits + ENTRY(initial_page_table) + .fill 1024,4,0 + #endif ++.section .initial_pg_fixmap,"a",@progbits + initial_pg_fixmap: + .fill 1024,4,0 ++.section .empty_zero_page,"a",@progbits + ENTRY(empty_zero_page) + .fill 4096,1,0 ++.section .swapper_pg_dir,"a",@progbits + ENTRY(swapper_pg_dir) ++#ifdef CONFIG_X86_PAE ++ .fill 4,8,0 ++#else + .fill 1024,4,0 ++#endif ++ ++/* ++ * The IDT has to be page-aligned to simplify the Pentium ++ * F0 0F bug workaround.. We have a special link segment ++ * for this. ++ */ ++.section .idt,"a",@progbits ++ENTRY(idt_table) ++ .fill 256,8,0 + + /* + * This starts the data section. + */ + #ifdef CONFIG_X86_PAE +-__PAGE_ALIGNED_DATA +- /* Page-aligned for the benefit of paravirt? */ +- .align PAGE_SIZE ++.section .initial_page_table,"a",@progbits + ENTRY(initial_page_table) + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ + # if KPMDS == 3 +@@ -663,18 +758,27 @@ ENTRY(initial_page_table) + # error "Kernel PMDs should be 1, 2 or 3" + # endif + .align PAGE_SIZE /* needs to be page-sized too */ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ENTRY(cpu_pgd) ++ .rept NR_CPUS ++ .fill 4,8,0 ++ .endr ++#endif ++ + #endif + + .data + .balign 4 + ENTRY(stack_start) +- .long init_thread_union+THREAD_SIZE ++ .long init_thread_union+THREAD_SIZE-8 + ++ready: .byte 0 ++ ++.section .rodata,"a",@progbits + early_recursion_flag: + .long 0 + +-ready: .byte 0 +- + int_msg: + .asciz "Unknown interrupt or fault at: %p %p %p\n" + +@@ -707,7 +811,7 @@ fault_msg: + .word 0 # 32 bit align gdt_desc.address + boot_gdt_descr: + .word __BOOT_DS+7 +- .long boot_gdt - __PAGE_OFFSET ++ .long pa(boot_gdt) + + .word 0 # 32-bit align idt_desc.address + idt_descr: +@@ -718,7 +822,7 @@ idt_descr: + .word 0 # 32 bit align gdt_desc.address + ENTRY(early_gdt_descr) + .word GDT_ENTRIES*8-1 +- .long gdt_page /* Overwritten for secondary CPUs */ ++ .long cpu_gdt_table /* Overwritten for secondary CPUs */ + + /* + * The boot_gdt must mirror the equivalent in setup.S and is +@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr) + .align L1_CACHE_BYTES + ENTRY(boot_gdt) + .fill GDT_ENTRY_BOOT_CS,8,0 +- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ +- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ ++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */ ++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */ ++ ++ .align PAGE_SIZE_asm ++ENTRY(cpu_gdt_table) ++ .rept NR_CPUS ++ .quad 0x0000000000000000 /* NULL descriptor */ ++ .quad 0x0000000000000000 /* 0x0b reserved */ ++ .quad 0x0000000000000000 /* 0x13 reserved */ ++ .quad 0x0000000000000000 /* 0x1b reserved */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */ ++#else ++ .quad 0x0000000000000000 /* 0x20 unused */ ++#endif ++ ++ .quad 0x0000000000000000 /* 0x28 unused */ ++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */ ++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */ ++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */ ++ .quad 0x0000000000000000 /* 0x4b reserved */ ++ .quad 0x0000000000000000 /* 0x53 reserved */ ++ .quad 0x0000000000000000 /* 0x5b reserved */ ++ ++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */ ++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */ ++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */ ++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */ ++ ++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */ ++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */ ++ ++ /* ++ * Segments used for calling PnP BIOS have byte granularity. ++ * The code segments and data segments have fixed 64k limits, ++ * the transfer segment sizes are set at run time. ++ */ ++ .quad 0x00409b000000ffff /* 0x90 32-bit code */ ++ .quad 0x00009b000000ffff /* 0x98 16-bit code */ ++ .quad 0x000093000000ffff /* 0xa0 16-bit data */ ++ .quad 0x0000930000000000 /* 0xa8 16-bit data */ ++ .quad 0x0000930000000000 /* 0xb0 16-bit data */ ++ ++ /* ++ * The APM segments have byte granularity and their bases ++ * are set at run time. All have 64k limits. ++ */ ++ .quad 0x00409b000000ffff /* 0xb8 APM CS code */ ++ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */ ++ .quad 0x004093000000ffff /* 0xc8 APM DS data */ ++ ++ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */ ++ .quad 0x0040930000000000 /* 0xd8 - PERCPU */ ++ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */ ++ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */ ++ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */ ++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ ++ ++ /* Be sure this is zeroed to avoid false validations in Xen */ ++ .fill PAGE_SIZE_asm - GDT_SIZE,1,0 ++ .endr +diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S +index e11e394..9aebc5d 100644 +--- a/arch/x86/kernel/head_64.S ++++ b/arch/x86/kernel/head_64.S +@@ -19,6 +19,8 @@ + #include <asm/cache.h> + #include <asm/processor-flags.h> + #include <asm/percpu.h> ++#include <asm/cpufeature.h> ++#include <asm/alternative-asm.h> + + #ifdef CONFIG_PARAVIRT + #include <asm/asm-offsets.h> +@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) + L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) + L4_START_KERNEL = pgd_index(__START_KERNEL_map) + L3_START_KERNEL = pud_index(__START_KERNEL_map) ++L4_VMALLOC_START = pgd_index(VMALLOC_START) ++L3_VMALLOC_START = pud_index(VMALLOC_START) ++L4_VMALLOC_END = pgd_index(VMALLOC_END) ++L3_VMALLOC_END = pud_index(VMALLOC_END) ++L4_VMEMMAP_START = pgd_index(VMEMMAP_START) ++L3_VMEMMAP_START = pud_index(VMEMMAP_START) + + .text + __HEAD +@@ -85,35 +93,23 @@ startup_64: + */ + addq %rbp, init_level4_pgt + 0(%rip) + addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip) + addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) + + addq %rbp, level3_ident_pgt + 0(%rip) ++#ifndef CONFIG_XEN ++ addq %rbp, level3_ident_pgt + 8(%rip) ++#endif + +- addq %rbp, level3_kernel_pgt + (510*8)(%rip) +- addq %rbp, level3_kernel_pgt + (511*8)(%rip) ++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip) ++ ++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip) ++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip) + + addq %rbp, level2_fixmap_pgt + (506*8)(%rip) +- +- /* Add an Identity mapping if I am above 1G */ +- leaq _text(%rip), %rdi +- andq $PMD_PAGE_MASK, %rdi +- +- movq %rdi, %rax +- shrq $PUD_SHIFT, %rax +- andq $(PTRS_PER_PUD - 1), %rax +- jz ident_complete +- +- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx +- leaq level3_ident_pgt(%rip), %rbx +- movq %rdx, 0(%rbx, %rax, 8) +- +- movq %rdi, %rax +- shrq $PMD_SHIFT, %rax +- andq $(PTRS_PER_PMD - 1), %rax +- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx +- leaq level2_spare_pgt(%rip), %rbx +- movq %rdx, 0(%rbx, %rax, 8) +-ident_complete: ++ addq %rbp, level2_fixmap_pgt + (507*8)(%rip) + + /* + * Fixup the kernel text+data virtual addresses. Note that +@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64) + * after the boot processor executes this code. + */ + +- /* Enable PAE mode and PGE */ +- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax ++ /* Enable PAE mode and PSE/PGE */ ++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax + movq %rax, %cr4 + + /* Setup early boot stage 4 level pagetables. */ +@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64) + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_SCE, %eax /* Enable System Call */ +- btl $20,%edi /* No Execute supported? */ ++ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */ + jnc 1f + btsl $_EFER_NX, %eax ++ leaq init_level4_pgt(%rip), %rdi ++#ifndef CONFIG_EFI ++ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi) ++#endif ++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi) ++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi) ++ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi) ++ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip) + 1: wrmsr /* Make changes effective */ + + /* Setup cr0 */ +@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64) + * jump. In addition we need to ensure %cs is set so we make this + * a far return. + */ ++ pax_set_fptr_mask + movq initial_code(%rip),%rax + pushq $0 # fake return address to stop unwinder + pushq $__KERNEL_CS # set correct cs +@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64) + bad_address: + jmp bad_address + +- .section ".init.text","ax" ++ __INIT + #ifdef CONFIG_EARLY_PRINTK + .globl early_idt_handlers + early_idt_handlers: +@@ -314,18 +319,23 @@ ENTRY(early_idt_handler) + #endif /* EARLY_PRINTK */ + 1: hlt + jmp 1b ++ .previous + + #ifdef CONFIG_EARLY_PRINTK ++ __INITDATA + early_recursion_flag: + .long 0 ++ .previous + ++ .section .rodata,"a",@progbits + early_idt_msg: + .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" + early_idt_ripmsg: + .asciz "RIP %s\n" ++ .previous + #endif /* CONFIG_EARLY_PRINTK */ +- .previous + ++ .section .rodata,"a",@progbits + #define NEXT_PAGE(name) \ + .balign PAGE_SIZE; \ + ENTRY(name) +@@ -338,7 +348,6 @@ ENTRY(name) + i = i + 1 ; \ + .endr + +- .data + /* + * This default setting generates an ident mapping at address 0x100000 + * and a mapping for the kernel that precisely maps virtual address +@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt) + .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 + .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE ++ .org init_level4_pgt + L4_VMALLOC_START*8, 0 ++ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE ++ .org init_level4_pgt + L4_VMALLOC_END*8, 0 ++ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE ++ .org init_level4_pgt + L4_VMEMMAP_START*8, 0 ++ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_START_KERNEL*8, 0 + /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ + .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++NEXT_PAGE(cpu_pgd) ++ .rept NR_CPUS ++ .fill 512,8,0 ++ .endr ++#endif ++ + NEXT_PAGE(level3_ident_pgt) + .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE ++#ifdef CONFIG_XEN + .fill 511,8,0 ++#else ++ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE ++ .fill 510,8,0 ++#endif ++ ++NEXT_PAGE(level3_vmalloc_start_pgt) ++ .fill 512,8,0 ++ ++NEXT_PAGE(level3_vmalloc_end_pgt) ++ .fill 512,8,0 ++ ++NEXT_PAGE(level3_vmemmap_pgt) ++ .fill L3_VMEMMAP_START,8,0 ++ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE + + NEXT_PAGE(level3_kernel_pgt) + .fill L3_START_KERNEL,8,0 +@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt) + .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE + .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE + ++NEXT_PAGE(level2_vmemmap_pgt) ++ .fill 512,8,0 ++ + NEXT_PAGE(level2_fixmap_pgt) +- .fill 506,8,0 +- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE +- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ +- .fill 5,8,0 ++ .fill 507,8,0 ++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE ++ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */ ++ .fill 4,8,0 + +-NEXT_PAGE(level1_fixmap_pgt) ++NEXT_PAGE(level1_vsyscall_pgt) + .fill 512,8,0 + +-NEXT_PAGE(level2_ident_pgt) +- /* Since I easily can, map the first 1G. ++ /* Since I easily can, map the first 2G. + * Don't set NX because code runs from these pages. + */ +- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) ++NEXT_PAGE(level2_ident_pgt) ++ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD) + + NEXT_PAGE(level2_kernel_pgt) + /* +@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt) + * If you want to increase this then increase MODULES_VADDR + * too.) + */ +- PMDS(0, __PAGE_KERNEL_LARGE_EXEC, +- KERNEL_IMAGE_SIZE/PMD_SIZE) +- +-NEXT_PAGE(level2_spare_pgt) +- .fill 512, 8, 0 ++ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) + + #undef PMDS + #undef NEXT_PAGE + +- .data ++ .align PAGE_SIZE ++ENTRY(cpu_gdt_table) ++ .rept NR_CPUS ++ .quad 0x0000000000000000 /* NULL descriptor */ ++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ ++ .quad 0x00af9b000000ffff /* __KERNEL_CS */ ++ .quad 0x00cf93000000ffff /* __KERNEL_DS */ ++ .quad 0x00cffb000000ffff /* __USER32_CS */ ++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ ++ .quad 0x00affb000000ffff /* __USER_CS */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */ ++#else ++ .quad 0x0 /* unused */ ++#endif ++ ++ .quad 0,0 /* TSS */ ++ .quad 0,0 /* LDT */ ++ .quad 0,0,0 /* three TLS descriptors */ ++ .quad 0x0000f40000000000 /* node/CPU stored in limit */ ++ /* asm/segment.h:GDT_ENTRIES must match this */ ++ ++ /* zero the remaining page */ ++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 ++ .endr ++ + .align 16 + .globl early_gdt_descr + early_gdt_descr: + .word GDT_ENTRIES*8-1 + early_gdt_descr_base: +- .quad INIT_PER_CPU_VAR(gdt_page) ++ .quad cpu_gdt_table + + ENTRY(phys_base) + /* This must match the first entry in level2_kernel_pgt */ + .quad 0x0000000000000000 + + #include "../../x86/xen/xen-head.S" +- +- .section .bss, "aw", @nobits ++ ++ .section .rodata,"a",@progbits + .align L1_CACHE_BYTES + ENTRY(idt_table) +- .skip IDT_ENTRIES * 16 ++ .fill 512,8,0 + + __PAGE_ALIGNED_BSS + .align PAGE_SIZE +diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c +index 9c3bd4a..e1d9b35 100644 +--- a/arch/x86/kernel/i386_ksyms_32.c ++++ b/arch/x86/kernel/i386_ksyms_32.c +@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void); + EXPORT_SYMBOL(cmpxchg8b_emu); + #endif + ++EXPORT_SYMBOL_GPL(cpu_gdt_table); ++ + /* Networking helper routines. */ + EXPORT_SYMBOL(csum_partial_copy_generic); ++EXPORT_SYMBOL(csum_partial_copy_generic_to_user); ++EXPORT_SYMBOL(csum_partial_copy_generic_from_user); + + EXPORT_SYMBOL(__get_user_1); + EXPORT_SYMBOL(__get_user_2); +@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr); + + EXPORT_SYMBOL(csum_partial); + EXPORT_SYMBOL(empty_zero_page); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR); ++#endif +diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c +index 739d859..d1d6be7 100644 +--- a/arch/x86/kernel/i387.c ++++ b/arch/x86/kernel/i387.c +@@ -188,6 +188,9 @@ int xfpregs_active(struct task_struct *target, const struct user_regset *regset) + + int xfpregs_get(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, ++ void *kbuf, void __user *ubuf) __size_overflow(4); ++int xfpregs_get(struct task_struct *target, const struct user_regset *regset, ++ unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) + { + int ret; +@@ -207,6 +210,9 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, + + int xfpregs_set(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, ++ const void *kbuf, const void __user *ubuf) __size_overflow(4); ++int xfpregs_set(struct task_struct *target, const struct user_regset *regset, ++ unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) + { + int ret; +@@ -240,6 +246,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, + + int xstateregs_get(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, ++ void *kbuf, void __user *ubuf) __size_overflow(4); ++int xstateregs_get(struct task_struct *target, const struct user_regset *regset, ++ unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) + { + int ret; +@@ -269,6 +278,9 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, + + int xstateregs_set(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, ++ const void *kbuf, const void __user *ubuf) __size_overflow(4); ++int xstateregs_set(struct task_struct *target, const struct user_regset *regset, ++ unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) + { + int ret; +@@ -439,6 +451,9 @@ static void convert_to_fxsr(struct task_struct *tsk, + + int fpregs_get(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, ++ void *kbuf, void __user *ubuf) __size_overflow(3,4); ++int fpregs_get(struct task_struct *target, const struct user_regset *regset, ++ unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) + { + struct user_i387_ia32_struct env; +@@ -471,6 +486,9 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, + + int fpregs_set(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, ++ const void *kbuf, const void __user *ubuf) __size_overflow(3,4); ++int fpregs_set(struct task_struct *target, const struct user_regset *regset, ++ unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) + { + struct user_i387_ia32_struct env; +@@ -619,6 +637,8 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) + } + + static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf, ++ unsigned int size) __size_overflow(2); ++static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf, + unsigned int size) + { + struct task_struct *tsk = current; +diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c +index 6104852..6114160 100644 +--- a/arch/x86/kernel/i8259.c ++++ b/arch/x86/kernel/i8259.c +@@ -210,7 +210,7 @@ spurious_8259A_irq: + "spurious 8259A interrupt: IRQ%d.\n", irq); + spurious_irq_mask |= irqmask; + } +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + /* + * Theoretically we do not have to handle this IRQ, + * but in Linux this does not cause problems and is +diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c +index 43e9ccf..44ccf6f 100644 +--- a/arch/x86/kernel/init_task.c ++++ b/arch/x86/kernel/init_task.c +@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); + * way process stacks are handled. This is done by having a special + * "init_task" linker map entry.. + */ +-union thread_union init_thread_union __init_task_data = +- { INIT_THREAD_INFO(init_task) }; ++union thread_union init_thread_union __init_task_data; + + /* + * Initial task structure. +@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task); + * section. Since TSS's are completely CPU-local, we want them + * on exact cacheline boundaries, to eliminate cacheline ping-pong. + */ +-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; +- ++struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS }; ++EXPORT_SYMBOL(init_tss); +diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c +index 8c96897..be66bfa 100644 +--- a/arch/x86/kernel/ioport.c ++++ b/arch/x86/kernel/ioport.c +@@ -6,6 +6,7 @@ + #include <linux/sched.h> + #include <linux/kernel.h> + #include <linux/capability.h> ++#include <linux/security.h> + #include <linux/errno.h> + #include <linux/types.h> + #include <linux/ioport.h> +@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) + + if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) + return -EINVAL; ++#ifdef CONFIG_GRKERNSEC_IO ++ if (turn_on && grsec_disable_privio) { ++ gr_handle_ioperm(); ++ return -EPERM; ++ } ++#endif + if (turn_on && !capable(CAP_SYS_RAWIO)) + return -EPERM; + +@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) + * because the ->io_bitmap_max value must match the bitmap + * contents: + */ +- tss = &per_cpu(init_tss, get_cpu()); ++ tss = init_tss + get_cpu(); + + if (turn_on) + bitmap_clear(t->io_bitmap_ptr, from, num); +@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs) + return -EINVAL; + /* Trying to gain more privileges? */ + if (level > old) { ++#ifdef CONFIG_GRKERNSEC_IO ++ if (grsec_disable_privio) { ++ gr_handle_iopl(); ++ return -EPERM; ++ } ++#endif + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + } +diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c +index 429e0c9..17b3ece 100644 +--- a/arch/x86/kernel/irq.c ++++ b/arch/x86/kernel/irq.c +@@ -18,7 +18,7 @@ + #include <asm/mce.h> + #include <asm/hw_irq.h> + +-atomic_t irq_err_count; ++atomic_unchecked_t irq_err_count; + + /* Function pointer for generic interrupt vector handling */ + void (*x86_platform_ipi_callback)(void) = NULL; +@@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec) + seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); + seq_printf(p, " Machine check polls\n"); + #endif +- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); ++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count)); + #if defined(CONFIG_X86_IO_APIC) +- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); ++ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count)); + #endif + return 0; + } +@@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu) + + u64 arch_irq_stat(void) + { +- u64 sum = atomic_read(&irq_err_count); ++ u64 sum = atomic_read_unchecked(&irq_err_count); + + #ifdef CONFIG_X86_IO_APIC +- sum += atomic_read(&irq_mis_count); ++ sum += atomic_read_unchecked(&irq_mis_count); + #endif + return sum; + } +diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c +index 7209070..cbcd71a 100644 +--- a/arch/x86/kernel/irq_32.c ++++ b/arch/x86/kernel/irq_32.c +@@ -36,7 +36,7 @@ static int check_stack_overflow(void) + __asm__ __volatile__("andl %%esp,%0" : + "=r" (sp) : "0" (THREAD_SIZE - 1)); + +- return sp < (sizeof(struct thread_info) + STACK_WARN); ++ return sp < STACK_WARN; + } + + static void print_stack_overflow(void) +@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { } + * per-CPU IRQ handling contexts (thread information and stack) + */ + union irq_ctx { +- struct thread_info tinfo; +- u32 stack[THREAD_SIZE/sizeof(u32)]; ++ unsigned long previous_esp; ++ u32 stack[THREAD_SIZE/sizeof(u32)]; + } __attribute__((aligned(THREAD_SIZE))); + + static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); +@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack) + static inline int + execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) + { +- union irq_ctx *curctx, *irqctx; ++ union irq_ctx *irqctx; + u32 *isp, arg1, arg2; + +- curctx = (union irq_ctx *) current_thread_info(); + irqctx = __this_cpu_read(hardirq_ctx); + + /* +@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) + * handler) we can't do that and just have to keep using the + * current stack (which is the irq stack already after all) + */ +- if (unlikely(curctx == irqctx)) ++ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE)) + return 0; + + /* build the stack frame on the IRQ stack */ +- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); +- irqctx->tinfo.task = curctx->tinfo.task; +- irqctx->tinfo.previous_esp = current_stack_pointer; ++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); ++ irqctx->previous_esp = current_stack_pointer; + +- /* +- * Copy the softirq bits in preempt_count so that the +- * softirq checks work in the hardirq context. +- */ +- irqctx->tinfo.preempt_count = +- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | +- (curctx->tinfo.preempt_count & SOFTIRQ_MASK); ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(MAKE_MM_SEG(0)); ++#endif + + if (unlikely(overflow)) + call_on_stack(print_stack_overflow, isp); +@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) + : "0" (irq), "1" (desc), "2" (isp), + "D" (desc->handle_irq) + : "memory", "cc", "ecx"); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(current_thread_info()->addr_limit); ++#endif ++ + return 1; + } + +@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) + */ + void __cpuinit irq_ctx_init(int cpu) + { +- union irq_ctx *irqctx; +- + if (per_cpu(hardirq_ctx, cpu)) + return; + +- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), +- THREAD_FLAGS, +- THREAD_ORDER)); +- memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); +- irqctx->tinfo.cpu = cpu; +- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; +- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); +- +- per_cpu(hardirq_ctx, cpu) = irqctx; +- +- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), +- THREAD_FLAGS, +- THREAD_ORDER)); +- memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); +- irqctx->tinfo.cpu = cpu; +- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); +- +- per_cpu(softirq_ctx, cpu) = irqctx; ++ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER)); ++ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER)); + + printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); +@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu) + asmlinkage void do_softirq(void) + { + unsigned long flags; +- struct thread_info *curctx; + union irq_ctx *irqctx; + u32 *isp; + +@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void) + local_irq_save(flags); + + if (local_softirq_pending()) { +- curctx = current_thread_info(); + irqctx = __this_cpu_read(softirq_ctx); +- irqctx->tinfo.task = curctx->task; +- irqctx->tinfo.previous_esp = current_stack_pointer; ++ irqctx->previous_esp = current_stack_pointer; + + /* build the stack frame on the softirq stack */ +- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); ++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(MAKE_MM_SEG(0)); ++#endif + + call_on_stack(__do_softirq, isp); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(current_thread_info()->addr_limit); ++#endif ++ + /* + * Shouldn't happen, we returned above if in_interrupt(): + */ +diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c +index 69bca46..0bac999 100644 +--- a/arch/x86/kernel/irq_64.c ++++ b/arch/x86/kernel/irq_64.c +@@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs) + #ifdef CONFIG_DEBUG_STACKOVERFLOW + u64 curbase = (u64)task_stack_page(current); + +- if (user_mode_vm(regs)) ++ if (user_mode(regs)) + return; + + WARN_ONCE(regs->sp >= curbase && +diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c +index faba577..93b9e71 100644 +--- a/arch/x86/kernel/kgdb.c ++++ b/arch/x86/kernel/kgdb.c +@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) + #ifdef CONFIG_X86_32 + switch (regno) { + case GDB_SS: +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + *(unsigned long *)mem = __KERNEL_DS; + break; + case GDB_SP: +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + *(unsigned long *)mem = kernel_stack_pointer(regs); + break; + case GDB_GS: +@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, + case 'k': + /* clear the trace bit */ + linux_regs->flags &= ~X86_EFLAGS_TF; +- atomic_set(&kgdb_cpu_doing_single_step, -1); ++ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1); + + /* set the trace bit if we're stepping */ + if (remcomInBuffer[0] == 's') { + linux_regs->flags |= X86_EFLAGS_TF; +- atomic_set(&kgdb_cpu_doing_single_step, ++ atomic_set_unchecked(&kgdb_cpu_doing_single_step, + raw_smp_processor_id()); + } + +@@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) + + switch (cmd) { + case DIE_DEBUG: +- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { ++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) { + if (user_mode(regs)) + return single_step_cont(regs, args); + break; +diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c +index 7da647d..56fe348 100644 +--- a/arch/x86/kernel/kprobes.c ++++ b/arch/x86/kernel/kprobes.c +@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) + } __attribute__((packed)) *insn; + + insn = (struct __arch_relative_insn *)from; ++ ++ pax_open_kernel(); + insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); + insn->op = op; ++ pax_close_kernel(); + } + + /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ +@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes) + kprobe_opcode_t opcode; + kprobe_opcode_t *orig_opcodes = opcodes; + +- if (search_exception_tables((unsigned long)opcodes)) ++ if (search_exception_tables(ktva_ktla((unsigned long)opcodes))) + return 0; /* Page fault may occur on this address. */ + + retry: +@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) + } + } + insn_get_length(&insn); ++ pax_open_kernel(); + memcpy(dest, insn.kaddr, insn.length); ++ pax_close_kernel(); + + #ifdef CONFIG_X86_64 + if (insn_rip_relative(&insn)) { +@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) + (u8 *) dest; + BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ + disp = (u8 *) dest + insn_offset_displacement(&insn); ++ pax_open_kernel(); + *(s32 *) disp = (s32) newdisp; ++ pax_close_kernel(); + } + #endif + return insn.length; +@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) + */ + __copy_instruction(p->ainsn.insn, p->addr, 0); + +- if (can_boost(p->addr)) ++ if (can_boost(ktla_ktva(p->addr))) + p->ainsn.boostable = 0; + else + p->ainsn.boostable = -1; + +- p->opcode = *p->addr; ++ p->opcode = *(ktla_ktva(p->addr)); + } + + int __kprobes arch_prepare_kprobe(struct kprobe *p) +@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, + * nor set current_kprobe, because it doesn't use single + * stepping. + */ +- regs->ip = (unsigned long)p->ainsn.insn; ++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); + preempt_enable_no_resched(); + return; + } +@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, + if (p->opcode == BREAKPOINT_INSTRUCTION) + regs->ip = (unsigned long)p->addr; + else +- regs->ip = (unsigned long)p->ainsn.insn; ++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); + } + + /* +@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) + setup_singlestep(p, regs, kcb, 0); + return 1; + } +- } else if (*addr != BREAKPOINT_INSTRUCTION) { ++ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed +@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void) + " movq %rax, 152(%rsp)\n" + RESTORE_REGS_STRING + " popfq\n" ++#ifdef KERNEXEC_PLUGIN ++ " btsq $63,(%rsp)\n" ++#endif + #else + " pushf\n" + SAVE_REGS_STRING +@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p, + struct pt_regs *regs, struct kprobe_ctlblk *kcb) + { + unsigned long *tos = stack_addr(regs); +- unsigned long copy_ip = (unsigned long)p->ainsn.insn; ++ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn); + unsigned long orig_ip = (unsigned long)p->addr; + kprobe_opcode_t *insn = p->ainsn.insn; + +@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + struct die_args *args = data; + int ret = NOTIFY_DONE; + +- if (args->regs && user_mode_vm(args->regs)) ++ if (args->regs && user_mode(args->regs)) + return ret; + + switch (val) { +@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) + * Verify if the address gap is in 2GB range, because this uses + * a relative jump. + */ +- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; ++ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE; + if (abs(rel) > 0x7fffffff) + return -ERANGE; + +@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) + synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); + + /* Set probe function call */ +- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); ++ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback)); + + /* Set returning jmp instruction at the tail of out-of-line buffer */ + synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, +- (u8 *)op->kp.addr + op->optinsn.size); ++ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size); + + flush_icache_range((unsigned long) buf, + (unsigned long) buf + TMPL_END_IDX + +@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, + ((long)op->kp.addr + RELATIVEJUMP_SIZE)); + + /* Backup instructions which will be replaced by jump address */ +- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, ++ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE, + RELATIVE_ADDR_SIZE); + + insn_buf[0] = RELATIVEJUMP_OPCODE; +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index a9c2116..a52d4fc 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void) + pv_mmu_ops.set_pud = kvm_set_pud; + #if PAGETABLE_LEVELS == 4 + pv_mmu_ops.set_pgd = kvm_set_pgd; ++ pv_mmu_ops.set_pgd_batched = kvm_set_pgd; + #endif + #endif + pv_mmu_ops.flush_tlb_user = kvm_flush_tlb; +diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c +index ea69726..8b497c9 100644 +--- a/arch/x86/kernel/ldt.c ++++ b/arch/x86/kernel/ldt.c +@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) + if (reload) { + #ifdef CONFIG_SMP + preempt_disable(); +- load_LDT(pc); ++ load_LDT_nolock(pc); + if (!cpumask_equal(mm_cpumask(current->mm), + cpumask_of(smp_processor_id()))) + smp_call_function(flush_ldt, current->mm, 1); + preempt_enable(); + #else +- load_LDT(pc); ++ load_LDT_nolock(pc); + #endif + } + if (oldsize) { +@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) + return err; + + for (i = 0; i < old->size; i++) +- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); ++ write_ldt_entry(new->ldt, i, old->ldt + i); + return 0; + } + +@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) + retval = copy_ldt(&mm->context, &old_mm->context); + mutex_unlock(&old_mm->context.lock); + } ++ ++ if (tsk == current) { ++ mm->context.vdso = 0; ++ ++#ifdef CONFIG_X86_32 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ mm->context.user_cs_base = 0UL; ++ mm->context.user_cs_limit = ~0UL; ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ cpus_clear(mm->context.cpu_user_cs_mask); ++#endif ++ ++#endif ++#endif ++ ++ } ++ + return retval; + } + +@@ -141,6 +159,7 @@ void destroy_context(struct mm_struct *mm) + } + } + ++static int read_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2); + static int read_ldt(void __user *ptr, unsigned long bytecount) + { + int err; +@@ -230,6 +249,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) + } + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) { ++ error = -EINVAL; ++ goto out_unlock; ++ } ++#endif ++ + fill_ldt(&ldt, &ldt_info); + if (oldmode) + ldt.avl = 0; +diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c +index a3fa43b..8966f4c 100644 +--- a/arch/x86/kernel/machine_kexec_32.c ++++ b/arch/x86/kernel/machine_kexec_32.c +@@ -27,7 +27,7 @@ + #include <asm/cacheflush.h> + #include <asm/debugreg.h> + +-static void set_idt(void *newidt, __u16 limit) ++static void set_idt(struct desc_struct *newidt, __u16 limit) + { + struct desc_ptr curidt; + +@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit) + } + + +-static void set_gdt(void *newgdt, __u16 limit) ++static void set_gdt(struct desc_struct *newgdt, __u16 limit) + { + struct desc_ptr curgdt; + +@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image) + } + + control_page = page_address(image->control_code_page); +- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); ++ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE); + + relocate_kernel_ptr = control_page; + page_list[PA_CONTROL_PAGE] = __pa(control_page); +diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c +index 3ca42d0..79d24cd 100644 +--- a/arch/x86/kernel/microcode_intel.c ++++ b/arch/x86/kernel/microcode_intel.c +@@ -434,15 +434,16 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) + return ret; + } + ++static int get_ucode_user(void *to, const void *from, size_t n) __size_overflow(3); + static int get_ucode_user(void *to, const void *from, size_t n) + { +- return copy_from_user(to, from, n); ++ return copy_from_user(to, (const void __force_user *)from, n); + } + + static enum ucode_state + request_microcode_user(int cpu, const void __user *buf, size_t size) + { +- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); ++ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user); + } + + static void microcode_fini_cpu(int cpu) +diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c +index 925179f..267ac7a 100644 +--- a/arch/x86/kernel/module.c ++++ b/arch/x86/kernel/module.c +@@ -36,15 +36,60 @@ + #define DEBUGP(fmt...) + #endif + +-void *module_alloc(unsigned long size) ++static inline void *__module_alloc(unsigned long size, pgprot_t prot) + { +- if (PAGE_ALIGN(size) > MODULES_LEN) ++ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN) + return NULL; + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, +- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, ++ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot, + -1, __builtin_return_address(0)); + } + ++void *module_alloc(unsigned long size) ++{ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ return __module_alloc(size, PAGE_KERNEL); ++#else ++ return __module_alloc(size, PAGE_KERNEL_EXEC); ++#endif ++ ++} ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_X86_32 ++void *module_alloc_exec(unsigned long size) ++{ ++ struct vm_struct *area; ++ ++ if (size == 0) ++ return NULL; ++ ++ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END); ++ return area ? area->addr : NULL; ++} ++EXPORT_SYMBOL(module_alloc_exec); ++ ++void module_free_exec(struct module *mod, void *module_region) ++{ ++ vunmap(module_region); ++} ++EXPORT_SYMBOL(module_free_exec); ++#else ++void module_free_exec(struct module *mod, void *module_region) ++{ ++ module_free(mod, module_region); ++} ++EXPORT_SYMBOL(module_free_exec); ++ ++void *module_alloc_exec(unsigned long size) ++{ ++ return __module_alloc(size, PAGE_KERNEL_RX); ++} ++EXPORT_SYMBOL(module_alloc_exec); ++#endif ++#endif ++ + #ifdef CONFIG_X86_32 + int apply_relocate(Elf32_Shdr *sechdrs, + const char *strtab, +@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs, + unsigned int i; + Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; + Elf32_Sym *sym; +- uint32_t *location; ++ uint32_t *plocation, location; + + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ +- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr +- + rel[i].r_offset; ++ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; ++ location = (uint32_t)plocation; ++ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR) ++ plocation = ktla_ktva((void *)plocation); + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr +@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs, + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_386_32: + /* We add the value into the location given */ +- *location += sym->st_value; ++ pax_open_kernel(); ++ *plocation += sym->st_value; ++ pax_close_kernel(); + break; + case R_386_PC32: + /* Add the value, subtract its postition */ +- *location += sym->st_value - (uint32_t)location; ++ pax_open_kernel(); ++ *plocation += sym->st_value - location; ++ pax_close_kernel(); + break; + default: + printk(KERN_ERR "module %s: Unknown relocation: %u\n", +@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, + case R_X86_64_NONE: + break; + case R_X86_64_64: ++ pax_open_kernel(); + *(u64 *)loc = val; ++ pax_close_kernel(); + break; + case R_X86_64_32: ++ pax_open_kernel(); + *(u32 *)loc = val; ++ pax_close_kernel(); + if (val != *(u32 *)loc) + goto overflow; + break; + case R_X86_64_32S: ++ pax_open_kernel(); + *(s32 *)loc = val; ++ pax_close_kernel(); + if ((s64)val != *(s32 *)loc) + goto overflow; + break; + case R_X86_64_PC32: + val -= (u64)loc; ++ pax_open_kernel(); + *(u32 *)loc = val; ++ pax_close_kernel(); ++ + #if 0 + if ((s64)val != *(s32 *)loc) + goto overflow; +diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c +index e88f37b..1353db6 100644 +--- a/arch/x86/kernel/nmi.c ++++ b/arch/x86/kernel/nmi.c +@@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs) + dotraplinkage notrace __kprobes void + do_nmi(struct pt_regs *regs, long error_code) + { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (!user_mode(regs)) { ++ unsigned long cs = regs->cs & 0xFFFF; ++ unsigned long ip = ktva_ktla(regs->ip); ++ ++ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext) ++ regs->ip = ip; ++ } ++#endif ++ + nmi_enter(); + + inc_irq_stat(__nmi_count); +diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c +index 676b8c7..870ba04 100644 +--- a/arch/x86/kernel/paravirt-spinlocks.c ++++ b/arch/x86/kernel/paravirt-spinlocks.c +@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) + arch_spin_lock(lock); + } + +-struct pv_lock_ops pv_lock_ops = { ++struct pv_lock_ops pv_lock_ops __read_only = { + #ifdef CONFIG_SMP + .spin_is_locked = __ticket_spin_is_locked, + .spin_is_contended = __ticket_spin_is_contended, +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c +index d90272e..6bb013b 100644 +--- a/arch/x86/kernel/paravirt.c ++++ b/arch/x86/kernel/paravirt.c +@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x) + { + return x; + } ++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) ++PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64); ++#endif + + void __init default_banner(void) + { +@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, + if (opfunc == NULL) + /* If there's no function, patch it with a ud2a (BUG) */ + ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); +- else if (opfunc == _paravirt_nop) ++ else if (opfunc == (void *)_paravirt_nop) + /* If the operation is a nop, then nop the callsite */ + ret = paravirt_patch_nop(); + + /* identity functions just return their single argument */ +- else if (opfunc == _paravirt_ident_32) ++ else if (opfunc == (void *)_paravirt_ident_32) + ret = paravirt_patch_ident_32(insnbuf, len); +- else if (opfunc == _paravirt_ident_64) ++ else if (opfunc == (void *)_paravirt_ident_64) + ret = paravirt_patch_ident_64(insnbuf, len); ++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) ++ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64) ++ ret = paravirt_patch_ident_64(insnbuf, len); ++#endif + + else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || + type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || +@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len, + if (insn_len > len || start == NULL) + insn_len = len; + else +- memcpy(insnbuf, start, insn_len); ++ memcpy(insnbuf, ktla_ktva(start), insn_len); + + return insn_len; + } +@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void) + preempt_enable(); + } + +-struct pv_info pv_info = { ++struct pv_info pv_info __read_only = { + .name = "bare hardware", + .paravirt_enabled = 0, + .kernel_rpl = 0, +@@ -313,16 +320,16 @@ struct pv_info pv_info = { + #endif + }; + +-struct pv_init_ops pv_init_ops = { ++struct pv_init_ops pv_init_ops __read_only = { + .patch = native_patch, + }; + +-struct pv_time_ops pv_time_ops = { ++struct pv_time_ops pv_time_ops __read_only = { + .sched_clock = native_sched_clock, + .steal_clock = native_steal_clock, + }; + +-struct pv_irq_ops pv_irq_ops = { ++struct pv_irq_ops pv_irq_ops __read_only = { + .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), + .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), + .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), +@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = { + #endif + }; + +-struct pv_cpu_ops pv_cpu_ops = { ++struct pv_cpu_ops pv_cpu_ops __read_only = { + .cpuid = native_cpuid, + .get_debugreg = native_get_debugreg, + .set_debugreg = native_set_debugreg, +@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = { + .end_context_switch = paravirt_nop, + }; + +-struct pv_apic_ops pv_apic_ops = { ++struct pv_apic_ops pv_apic_ops __read_only = { + #ifdef CONFIG_X86_LOCAL_APIC + .startup_ipi_hook = paravirt_nop, + #endif + }; + +-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) ++#ifdef CONFIG_X86_32 ++#ifdef CONFIG_X86_PAE ++/* 64-bit pagetable entries */ ++#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64) ++#else + /* 32-bit pagetable entries */ + #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) ++#endif + #else + /* 64-bit pagetable entries */ + #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) + #endif + +-struct pv_mmu_ops pv_mmu_ops = { ++struct pv_mmu_ops pv_mmu_ops __read_only = { + + .read_cr2 = native_read_cr2, + .write_cr2 = native_write_cr2, +@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = { + .make_pud = PTE_IDENT, + + .set_pgd = native_set_pgd, ++ .set_pgd_batched = native_set_pgd_batched, + #endif + #endif /* PAGETABLE_LEVELS >= 3 */ + +@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = { + }, + + .set_fixmap = native_set_fixmap, ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ .pax_open_kernel = native_pax_open_kernel, ++ .pax_close_kernel = native_pax_close_kernel, ++#endif ++ + }; + + EXPORT_SYMBOL_GPL(pv_time_ops); +diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c +index 35ccf75..7a15747 100644 +--- a/arch/x86/kernel/pci-iommu_table.c ++++ b/arch/x86/kernel/pci-iommu_table.c +@@ -2,7 +2,7 @@ + #include <asm/iommu_table.h> + #include <linux/string.h> + #include <linux/kallsyms.h> +- ++#include <linux/sched.h> + + #define DEBUG 1 + +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index ee5d4fb..426649b 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk) + + void free_thread_info(struct thread_info *ti) + { +- free_thread_xstate(ti->task); + free_pages((unsigned long)ti, THREAD_ORDER); + } + ++static struct kmem_cache *task_struct_cachep; ++ + void arch_task_cache_init(void) + { +- task_xstate_cachep = +- kmem_cache_create("task_xstate", xstate_size, ++ /* create a slab on which task_structs can be allocated */ ++ task_struct_cachep = ++ kmem_cache_create("task_struct", sizeof(struct task_struct), ++ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); ++ ++ task_xstate_cachep = ++ kmem_cache_create("task_xstate", xstate_size, + __alignof__(union thread_xstate), +- SLAB_PANIC | SLAB_NOTRACK, NULL); ++ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL); ++} ++ ++struct task_struct *alloc_task_struct_node(int node) ++{ ++ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); ++} ++ ++void free_task_struct(struct task_struct *task) ++{ ++ free_thread_xstate(task); ++ kmem_cache_free(task_struct_cachep, task); + } + + /* +@@ -70,7 +87,7 @@ void exit_thread(void) + unsigned long *bp = t->io_bitmap_ptr; + + if (bp) { +- struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); ++ struct tss_struct *tss = init_tss + get_cpu(); + + t->io_bitmap_ptr = NULL; + clear_thread_flag(TIF_IO_BITMAP); +@@ -106,7 +123,7 @@ void show_regs_common(void) + + printk(KERN_CONT "\n"); + printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", +- current->pid, current->comm, print_tainted(), ++ task_pid_nr(current), current->comm, print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); +@@ -120,6 +137,9 @@ void flush_thread(void) + { + struct task_struct *tsk = current; + ++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF) ++ loadsegment(gs, 0); ++#endif + flush_ptrace_hw_breakpoint(tsk); + memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); + /* +@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) + regs.di = (unsigned long) arg; + + #ifdef CONFIG_X86_32 +- regs.ds = __USER_DS; +- regs.es = __USER_DS; ++ regs.ds = __KERNEL_DS; ++ regs.es = __KERNEL_DS; + regs.fs = __KERNEL_PERCPU; +- regs.gs = __KERNEL_STACK_CANARY; ++ savesegment(gs, regs.gs); + #else + regs.ss = __KERNEL_DS; + #endif +@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void) + + return ret; + } +-void stop_this_cpu(void *dummy) ++__noreturn void stop_this_cpu(void *dummy) + { + local_irq_disable(); + /* +@@ -653,16 +673,37 @@ static int __init idle_setup(char *str) + } + early_param("idle", idle_setup); + +-unsigned long arch_align_stack(unsigned long sp) ++#ifdef CONFIG_PAX_RANDKSTACK ++void pax_randomize_kstack(struct pt_regs *regs) + { +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() % 8192; +- return sp & ~0xf; +-} ++ struct thread_struct *thread = ¤t->thread; ++ unsigned long time; + +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long range_end = mm->brk + 0x02000000; +- return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +-} ++ if (!randomize_va_space) ++ return; ++ ++ if (v8086_mode(regs)) ++ return; + ++ rdtscl(time); ++ ++ /* P4 seems to return a 0 LSB, ignore it */ ++#ifdef CONFIG_MPENTIUM4 ++ time &= 0x3EUL; ++ time <<= 2; ++#elif defined(CONFIG_X86_64) ++ time &= 0xFUL; ++ time <<= 4; ++#else ++ time &= 0x1FUL; ++ time <<= 3; ++#endif ++ ++ thread->sp0 ^= time; ++ load_sp0(init_tss + smp_processor_id(), thread); ++ ++#ifdef CONFIG_X86_64 ++ percpu_write(kernel_stack, thread->sp0); ++#endif ++} ++#endif +diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c +index 8598296..bfadef0 100644 +--- a/arch/x86/kernel/process_32.c ++++ b/arch/x86/kernel/process_32.c +@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); + unsigned long thread_saved_pc(struct task_struct *tsk) + { + return ((unsigned long *)tsk->thread.sp)[3]; ++//XXX return tsk->thread.eip; + } + + #ifndef CONFIG_SMP +@@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all) + unsigned long sp; + unsigned short ss, gs; + +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + sp = regs->sp; + ss = regs->ss & 0xffff; +- gs = get_user_gs(regs); + } else { + sp = kernel_stack_pointer(regs); + savesegment(ss, ss); +- savesegment(gs, gs); + } ++ gs = get_user_gs(regs); + + show_regs_common(); + +@@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, + struct task_struct *tsk; + int err; + +- childregs = task_pt_regs(p); ++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8; + *childregs = *regs; + childregs->ax = 0; + childregs->sp = sp; + + p->thread.sp = (unsigned long) childregs; + p->thread.sp0 = (unsigned long) (childregs+1); ++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p); + + p->thread.ip = (unsigned long) ret_from_fork; + +@@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + struct thread_struct *prev = &prev_p->thread, + *next = &next_p->thread; + int cpu = smp_processor_id(); +- struct tss_struct *tss = &per_cpu(init_tss, cpu); ++ struct tss_struct *tss = init_tss + cpu; + fpu_switch_t fpu; + + /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ +@@ -320,6 +321,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + */ + lazy_save_gs(prev->gs); + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(task_thread_info(next_p)->addr_limit); ++#endif ++ + /* + * Load the per-thread Thread-Local Storage descriptor. + */ +@@ -350,6 +355,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + */ + arch_end_context_switch(next_p); + ++ percpu_write(current_task, next_p); ++ percpu_write(current_tinfo, &next_p->tinfo); ++ + /* + * Restore %gs if needed (which is common) + */ +@@ -358,8 +366,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + + switch_fpu_finish(next_p, fpu); + +- percpu_write(current_task, next_p); +- + return prev_p; + } + +@@ -389,4 +395,3 @@ unsigned long get_wchan(struct task_struct *p) + } while (count++ < 16); + return 0; + } +- +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c +index 6a364a6..b147d11 100644 +--- a/arch/x86/kernel/process_64.c ++++ b/arch/x86/kernel/process_64.c +@@ -89,7 +89,7 @@ static void __exit_idle(void) + void exit_idle(void) + { + /* idle loop has pid 0 */ +- if (current->pid) ++ if (task_pid_nr(current)) + return; + __exit_idle(); + } +@@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, + struct pt_regs *childregs; + struct task_struct *me = current; + +- childregs = ((struct pt_regs *) +- (THREAD_SIZE + task_stack_page(p))) - 1; ++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16; + *childregs = *regs; + + childregs->ax = 0; +@@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, + p->thread.sp = (unsigned long) childregs; + p->thread.sp0 = (unsigned long) (childregs+1); + p->thread.usersp = me->thread.usersp; ++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p); + + set_tsk_thread_flag(p, TIF_FORK); + +@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + struct thread_struct *prev = &prev_p->thread; + struct thread_struct *next = &next_p->thread; + int cpu = smp_processor_id(); +- struct tss_struct *tss = &per_cpu(init_tss, cpu); ++ struct tss_struct *tss = init_tss + cpu; + unsigned fsindex, gsindex; + fpu_switch_t fpu; + +@@ -461,10 +461,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + prev->usersp = percpu_read(old_rsp); + percpu_write(old_rsp, next->usersp); + percpu_write(current_task, next_p); ++ percpu_write(current_tinfo, &next_p->tinfo); + +- percpu_write(kernel_stack, +- (unsigned long)task_stack_page(next_p) + +- THREAD_SIZE - KERNEL_STACK_OFFSET); ++ percpu_write(kernel_stack, next->sp0); + + /* + * Now maybe reload the debug registers and handle I/O bitmaps +@@ -519,12 +518,11 @@ unsigned long get_wchan(struct task_struct *p) + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + stack = (unsigned long)task_stack_page(p); +- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) ++ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64)) + return 0; + fp = *(u64 *)(p->thread.sp); + do { +- if (fp < (unsigned long)stack || +- fp >= (unsigned long)stack+THREAD_SIZE) ++ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64)) + return 0; + ip = *(u64 *)(fp+8); + if (!in_sched_functions(ip)) +diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c +index 8252879..f367ec9 100644 +--- a/arch/x86/kernel/ptrace.c ++++ b/arch/x86/kernel/ptrace.c +@@ -791,6 +791,10 @@ static int ioperm_active(struct task_struct *target, + static int ioperm_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, ++ void *kbuf, void __user *ubuf) __size_overflow(3,4); ++static int ioperm_get(struct task_struct *target, ++ const struct user_regset *regset, ++ unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) + { + if (!target->thread.io_bitmap_ptr) +@@ -822,7 +826,7 @@ long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) + { + int ret; +- unsigned long __user *datap = (unsigned long __user *)data; ++ unsigned long __user *datap = (__force unsigned long __user *)data; + + switch (request) { + /* read the word at location addr in the USER area. */ +@@ -907,14 +911,14 @@ long arch_ptrace(struct task_struct *child, long request, + if ((int) addr < 0) + return -EIO; + ret = do_get_thread_area(child, addr, +- (struct user_desc __user *)data); ++ (__force struct user_desc __user *) data); + break; + + case PTRACE_SET_THREAD_AREA: + if ((int) addr < 0) + return -EIO; + ret = do_set_thread_area(child, addr, +- (struct user_desc __user *)data, 0); ++ (__force struct user_desc __user *) data, 0); + break; + #endif + +@@ -1331,7 +1335,7 @@ static void fill_sigtrap_info(struct task_struct *tsk, + memset(info, 0, sizeof(*info)); + info->si_signo = SIGTRAP; + info->si_code = si_code; +- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; ++ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL; + } + + void user_single_step_siginfo(struct task_struct *tsk, +diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c +index 42eb330..139955c 100644 +--- a/arch/x86/kernel/pvclock.c ++++ b/arch/x86/kernel/pvclock.c +@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src) + return pv_tsc_khz; + } + +-static atomic64_t last_value = ATOMIC64_INIT(0); ++static atomic64_unchecked_t last_value = ATOMIC64_INIT(0); + + void pvclock_resume(void) + { +- atomic64_set(&last_value, 0); ++ atomic64_set_unchecked(&last_value, 0); + } + + cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) +@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) + * updating at the same time, and one of them could be slightly behind, + * making the assumption that last_value always go forward fail to hold. + */ +- last = atomic64_read(&last_value); ++ last = atomic64_read_unchecked(&last_value); + do { + if (ret < last) + return last; +- last = atomic64_cmpxchg(&last_value, last, ret); ++ last = atomic64_cmpxchg_unchecked(&last_value, last, ret); + } while (unlikely(last != ret)); + + return ret; +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c +index 37a458b..e63d183 100644 +--- a/arch/x86/kernel/reboot.c ++++ b/arch/x86/kernel/reboot.c +@@ -35,7 +35,7 @@ void (*pm_power_off)(void); + EXPORT_SYMBOL(pm_power_off); + + static const struct desc_ptr no_idt = {}; +-static int reboot_mode; ++static unsigned short reboot_mode; + enum reboot_type reboot_type = BOOT_ACPI; + int reboot_force; + +@@ -324,13 +324,17 @@ core_initcall(reboot_init); + extern const unsigned char machine_real_restart_asm[]; + extern const u64 machine_real_restart_gdt[3]; + +-void machine_real_restart(unsigned int type) ++__noreturn void machine_real_restart(unsigned int type) + { + void *restart_va; + unsigned long restart_pa; +- void (*restart_lowmem)(unsigned int); ++ void (* __noreturn restart_lowmem)(unsigned int); + u64 *lowmem_gdt; + ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) ++ struct desc_struct *gdt; ++#endif ++ + local_irq_disable(); + + /* Write zero to CMOS register number 0x0f, which the BIOS POST +@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type) + boot)". This seems like a fairly standard thing that gets set by + REBOOT.COM programs, and the previous reset routine did this + too. */ +- *((unsigned short *)0x472) = reboot_mode; ++ *(unsigned short *)(__va(0x472)) = reboot_mode; + + /* Patch the GDT in the low memory trampoline */ + lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt); + + restart_va = TRAMPOLINE_SYM(machine_real_restart_asm); + restart_pa = virt_to_phys(restart_va); +- restart_lowmem = (void (*)(unsigned int))restart_pa; ++ restart_lowmem = (void *)restart_pa; + + /* GDT[0]: GDT self-pointer */ + lowmem_gdt[0] = +@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type) + GDT_ENTRY(0x009b, restart_pa, 0xffff); + + /* Jump to the identity-mapped low memory code */ ++ ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) ++ gdt = get_cpu_gdt_table(smp_processor_id()); ++ pax_open_kernel(); ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ gdt[GDT_ENTRY_KERNEL_DS].type = 3; ++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf; ++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory"); ++#endif ++#ifdef CONFIG_PAX_KERNEXEC ++ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0; ++ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0; ++ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0; ++ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff; ++ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf; ++ gdt[GDT_ENTRY_KERNEL_CS].g = 1; ++#endif ++ pax_close_kernel(); ++#endif ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type)); ++ unreachable(); ++#else + restart_lowmem(type); ++#endif ++ + } + #ifdef CONFIG_APM_MODULE + EXPORT_SYMBOL(machine_real_restart); +@@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void) + * try to force a triple fault and then cycle between hitting the keyboard + * controller and doing that + */ +-static void native_machine_emergency_restart(void) ++__noreturn static void native_machine_emergency_restart(void) + { + int i; + int attempt = 0; +@@ -664,13 +694,13 @@ void native_machine_shutdown(void) + #endif + } + +-static void __machine_emergency_restart(int emergency) ++static __noreturn void __machine_emergency_restart(int emergency) + { + reboot_emergency = emergency; + machine_ops.emergency_restart(); + } + +-static void native_machine_restart(char *__unused) ++static __noreturn void native_machine_restart(char *__unused) + { + printk("machine restart\n"); + +@@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused) + __machine_emergency_restart(0); + } + +-static void native_machine_halt(void) ++static __noreturn void native_machine_halt(void) + { + /* stop other cpus and apics */ + machine_shutdown(); +@@ -690,7 +720,7 @@ static void native_machine_halt(void) + stop_this_cpu(NULL); + } + +-static void native_machine_power_off(void) ++__noreturn static void native_machine_power_off(void) + { + if (pm_power_off) { + if (!reboot_force) +@@ -699,6 +729,7 @@ static void native_machine_power_off(void) + } + /* a fallback in case there is no PM info available */ + tboot_shutdown(TB_SHUTDOWN_HALT); ++ unreachable(); + } + + struct machine_ops machine_ops = { +diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S +index 7a6f3b3..bed145d7 100644 +--- a/arch/x86/kernel/relocate_kernel_64.S ++++ b/arch/x86/kernel/relocate_kernel_64.S +@@ -11,6 +11,7 @@ + #include <asm/kexec.h> + #include <asm/processor-flags.h> + #include <asm/pgtable_types.h> ++#include <asm/alternative-asm.h> + + /* + * Must be relocatable PIC code callable as a C function +@@ -160,13 +161,14 @@ identity_mapped: + xorq %rbp, %rbp + xorq %r8, %r8 + xorq %r9, %r9 +- xorq %r10, %r9 ++ xorq %r10, %r10 + xorq %r11, %r11 + xorq %r12, %r12 + xorq %r13, %r13 + xorq %r14, %r14 + xorq %r15, %r15 + ++ pax_force_retaddr 0, 1 + ret + + 1: +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c +index cf0ef98..e3f780b 100644 +--- a/arch/x86/kernel/setup.c ++++ b/arch/x86/kernel/setup.c +@@ -447,7 +447,7 @@ static void __init parse_setup_data(void) + + switch (data->type) { + case SETUP_E820_EXT: +- parse_e820_ext(data); ++ parse_e820_ext((struct setup_data __force_kernel *)data); + break; + case SETUP_DTB: + add_dtb(pa_data); +@@ -650,7 +650,7 @@ static void __init trim_bios_range(void) + * area (640->1Mb) as ram even though it is not. + * take them out. + */ +- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); ++ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1); + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); + } + +@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p) + + if (!boot_params.hdr.root_flags) + root_mountflags &= ~MS_RDONLY; +- init_mm.start_code = (unsigned long) _text; +- init_mm.end_code = (unsigned long) _etext; ++ init_mm.start_code = ktla_ktva((unsigned long) _text); ++ init_mm.end_code = ktla_ktva((unsigned long) _etext); + init_mm.end_data = (unsigned long) _edata; + init_mm.brk = _brk_end; + +- code_resource.start = virt_to_phys(_text); +- code_resource.end = virt_to_phys(_etext)-1; +- data_resource.start = virt_to_phys(_etext); ++ code_resource.start = virt_to_phys(ktla_ktva(_text)); ++ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1; ++ data_resource.start = virt_to_phys(_sdata); + data_resource.end = virt_to_phys(_edata)-1; + bss_resource.start = virt_to_phys(&__bss_start); + bss_resource.end = virt_to_phys(&__bss_stop)-1; +diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c +index 71f4727..217419b 100644 +--- a/arch/x86/kernel/setup_percpu.c ++++ b/arch/x86/kernel/setup_percpu.c +@@ -21,19 +21,17 @@ + #include <asm/cpu.h> + #include <asm/stackprotector.h> + +-DEFINE_PER_CPU(int, cpu_number); ++#ifdef CONFIG_SMP ++DEFINE_PER_CPU(unsigned int, cpu_number); + EXPORT_PER_CPU_SYMBOL(cpu_number); ++#endif + +-#ifdef CONFIG_X86_64 + #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) +-#else +-#define BOOT_PERCPU_OFFSET 0 +-#endif + + DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; + EXPORT_PER_CPU_SYMBOL(this_cpu_off); + +-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { ++unsigned long __per_cpu_offset[NR_CPUS] __read_only = { + [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, + }; + EXPORT_SYMBOL(__per_cpu_offset); +@@ -96,6 +94,8 @@ static bool __init pcpu_need_numa(void) + * Pointer to the allocated area on success, NULL on failure. + */ + static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, ++ unsigned long align) __size_overflow(2); ++static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, + unsigned long align) + { + const unsigned long goal = __pa(MAX_DMA_ADDRESS); +@@ -124,6 +124,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, + /* + * Helpers for first chunk memory allocation + */ ++static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) __size_overflow(2); ++ + static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) + { + return pcpu_alloc_bootmem(cpu, size, align); +@@ -155,10 +157,10 @@ static inline void setup_percpu_segment(int cpu) + { + #ifdef CONFIG_X86_32 + struct desc_struct gdt; ++ unsigned long base = per_cpu_offset(cpu); + +- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, +- 0x2 | DESCTYPE_S, 0x8); +- gdt.s = 1; ++ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT, ++ 0x83 | DESCTYPE_S, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), + GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); + #endif +@@ -207,6 +209,11 @@ void __init setup_per_cpu_areas(void) + /* alrighty, percpu areas up and running */ + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) { ++#ifdef CONFIG_CC_STACKPROTECTOR ++#ifdef CONFIG_X86_32 ++ unsigned long canary = per_cpu(stack_canary.canary, cpu); ++#endif ++#endif + per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; + per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); + per_cpu(cpu_number, cpu) = cpu; +@@ -247,6 +254,12 @@ void __init setup_per_cpu_areas(void) + */ + set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); + #endif ++#ifdef CONFIG_CC_STACKPROTECTOR ++#ifdef CONFIG_X86_32 ++ if (!cpu) ++ per_cpu(stack_canary.canary, cpu) = canary; ++#endif ++#endif + /* + * Up to this point, the boot CPU has been using .init.data + * area. Reload any changed state for the boot CPU. +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c +index 54ddaeb2..22c3bdc 100644 +--- a/arch/x86/kernel/signal.c ++++ b/arch/x86/kernel/signal.c +@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp) + * Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. + */ +- sp = ((sp + 4) & -16ul) - 4; ++ sp = ((sp - 12) & -16ul) - 4; + #else /* !CONFIG_X86_32 */ + sp = round_down(sp, 16) - 8; + #endif +@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, + * Return an always-bogus address instead so we will die with SIGSEGV. + */ + if (onsigstack && !likely(on_sig_stack(sp))) +- return (void __user *)-1L; ++ return (__force void __user *)-1L; + + /* save i387 state */ + if (used_math() && save_i387_xstate(*fpstate) < 0) +- return (void __user *)-1L; ++ return (__force void __user *)-1L; + + return (void __user *)sp; + } +@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, + } + + if (current->mm->context.vdso) +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); ++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); + else +- restorer = &frame->retcode; ++ restorer = (void __user *)&frame->retcode; + if (ka->sa.sa_flags & SA_RESTORER) + restorer = ka->sa.sa_restorer; + +@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, + * reasons and because gdb uses it as a signature to notice + * signal handler stack frames. + */ +- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); ++ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode); + + if (err) + return -EFAULT; +@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + /* Set up to return from userspace. */ +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); ++ if (current->mm->context.vdso) ++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); ++ else ++ restorer = (void __user *)&frame->retcode; + if (ka->sa.sa_flags & SA_RESTORER) + restorer = ka->sa.sa_restorer; + put_user_ex(restorer, &frame->pretcode); +@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + * reasons and because gdb uses it as a signature to notice + * signal handler stack frames. + */ +- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); ++ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode); + } put_user_catch(err); + + if (err) +@@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs) + * X86_32: vm86 regs switched out by assembly code before reaching + * here, so testing against kernel CS suffices. + */ +- if (!user_mode(regs)) ++ if (!user_mode_novm(regs)) + return; + + signr = get_signal_to_deliver(&info, &ka, regs, NULL); +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c +index 9f548cb..caf76f7 100644 +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) + set_idle_for_cpu(cpu, c_idle.idle); + do_rest: + per_cpu(current_task, cpu) = c_idle.idle; ++ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo; + #ifdef CONFIG_X86_32 + /* Stack for startup_32 can be just as for start_secondary onwards */ + irq_ctx_init(cpu); + #else + clear_tsk_thread_flag(c_idle.idle, TIF_FORK); + initial_gs = per_cpu_offset(cpu); +- per_cpu(kernel_stack, cpu) = +- (unsigned long)task_stack_page(c_idle.idle) - +- KERNEL_STACK_OFFSET + THREAD_SIZE; ++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE; + #endif ++ ++ pax_open_kernel(); + early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); ++ pax_close_kernel(); ++ + initial_code = (unsigned long)start_secondary; + stack_start = c_idle.idle->thread.sp; + +@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu) + + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++#endif ++ + err = do_boot_cpu(apicid, cpu); + if (err) { + pr_debug("do_boot_cpu failed %d\n", err); +diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c +index c346d11..d43b163 100644 +--- a/arch/x86/kernel/step.c ++++ b/arch/x86/kernel/step.c +@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re + struct desc_struct *desc; + unsigned long base; + +- seg &= ~7UL; ++ seg >>= 3; + + mutex_lock(&child->mm->context.lock); +- if (unlikely((seg >> 3) >= child->mm->context.size)) ++ if (unlikely(seg >= child->mm->context.size)) + addr = -1L; /* bogus selector, access would fault */ + else { + desc = child->mm->context.ldt + seg; +@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re + addr += base; + } + mutex_unlock(&child->mm->context.lock); +- } ++ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS) ++ addr = ktla_ktva(addr); + + return addr; + } +@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) + unsigned char opcode[15]; + unsigned long addr = convert_ip_to_linear(child, regs); + ++ if (addr == -EINVAL) ++ return 0; ++ + copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); + for (i = 0; i < copied; i++) { + switch (opcode[i]) { +diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c +index 0b0cb5f..db6b9ed 100644 +--- a/arch/x86/kernel/sys_i386_32.c ++++ b/arch/x86/kernel/sys_i386_32.c +@@ -24,17 +24,224 @@ + + #include <asm/syscalls.h> + +-/* +- * Do a system call from kernel instead of calling sys_execve so we +- * end up with proper pt_regs. +- */ +-int kernel_execve(const char *filename, +- const char *const argv[], +- const char *const envp[]) ++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) + { +- long __res; +- asm volatile ("int $0x80" +- : "=a" (__res) +- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory"); +- return __res; ++ unsigned long pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ if (len > pax_task_size || addr > pax_task_size - len) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++unsigned long ++arch_get_unmapped_area(struct file *filp, unsigned long addr, ++ unsigned long len, unsigned long pgoff, unsigned long flags) ++{ ++ struct mm_struct *mm = current->mm; ++ struct vm_area_struct *vma; ++ unsigned long start_addr, pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ if (len > pax_task_size) ++ return -ENOMEM; ++ ++ if (flags & MAP_FIXED) ++ return addr; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ ++ if (addr) { ++ addr = PAGE_ALIGN(addr); ++ if (pax_task_size - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } ++ } ++ if (len > mm->cached_hole_size) { ++ start_addr = addr = mm->free_area_cache; ++ } else { ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; ++ } ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) { ++ start_addr = 0x00110000UL; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ start_addr += mm->delta_mmap & 0x03FFF000UL; ++#endif ++ ++ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base) ++ start_addr = addr = mm->mmap_base; ++ else ++ addr = start_addr; ++ } ++#endif ++ ++full_search: ++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { ++ /* At this point: (!vma || addr < vma->vm_end). */ ++ if (pax_task_size - len < addr) { ++ /* ++ * Start a new search - just in case we missed ++ * some holes. ++ */ ++ if (start_addr != mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; ++ goto full_search; ++ } ++ return -ENOMEM; ++ } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; ++ if (addr + mm->cached_hole_size < vma->vm_start) ++ mm->cached_hole_size = vma->vm_start - addr; ++ addr = vma->vm_end; ++ if (mm->start_brk <= addr && addr < mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; ++ goto full_search; ++ } ++ } ++ ++ /* ++ * Remember the place where we stopped the search: ++ */ ++ mm->free_area_cache = addr + len; ++ return addr; ++} ++ ++unsigned long ++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ++ const unsigned long len, const unsigned long pgoff, ++ const unsigned long flags) ++{ ++ struct vm_area_struct *vma; ++ struct mm_struct *mm = current->mm; ++ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ /* requested length too big for entire address space */ ++ if (len > pax_task_size) ++ return -ENOMEM; ++ ++ if (flags & MAP_FIXED) ++ return addr; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) ++ goto bottomup; ++#endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ ++ /* requesting a specific address */ ++ if (addr) { ++ addr = PAGE_ALIGN(addr); ++ if (pax_task_size - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } ++ } ++ ++ /* check if free_area_cache is useful for us */ ++ if (len <= mm->cached_hole_size) { ++ mm->cached_hole_size = 0; ++ mm->free_area_cache = mm->mmap_base; ++ } ++ ++ /* either no address requested or can't fit in requested address hole */ ++ addr = mm->free_area_cache; ++ ++ /* make sure it can fit in the remaining address space */ ++ if (addr > len) { ++ vma = find_vma(mm, addr-len); ++ if (check_heap_stack_gap(vma, addr - len, len)) ++ /* remember the address as a hint for next time */ ++ return (mm->free_area_cache = addr-len); ++ } ++ ++ if (mm->mmap_base < len) ++ goto bottomup; ++ ++ addr = mm->mmap_base-len; ++ ++ do { ++ /* ++ * Lookup failure means no vma is above this address, ++ * else if new region fits below vma->vm_start, ++ * return with success: ++ */ ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ /* remember the address as a hint for next time */ ++ return (mm->free_area_cache = addr); ++ ++ /* remember the largest hole we saw so far */ ++ if (addr + mm->cached_hole_size < vma->vm_start) ++ mm->cached_hole_size = vma->vm_start - addr; ++ ++ /* try just below the current vma->vm_start */ ++ addr = skip_heap_stack_gap(vma, len); ++ } while (!IS_ERR_VALUE(addr)); ++ ++bottomup: ++ /* ++ * A failed mmap() very likely causes application failure, ++ * so fall back to the bottom-up function here. This scenario ++ * can happen with large stack limits and large mmap() ++ * allocations. ++ */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; ++ else ++#endif ++ ++ mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ ++ mm->free_area_cache = mm->mmap_base; ++ mm->cached_hole_size = ~0UL; ++ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); ++ /* ++ * Restore the topdown base: ++ */ ++ mm->mmap_base = base; ++ mm->free_area_cache = base; ++ mm->cached_hole_size = ~0UL; ++ ++ return addr; + } +diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c +index 0514890..3dbebce 100644 +--- a/arch/x86/kernel/sys_x86_64.c ++++ b/arch/x86/kernel/sys_x86_64.c +@@ -95,8 +95,8 @@ out: + return error; + } + +-static void find_start_end(unsigned long flags, unsigned long *begin, +- unsigned long *end) ++static void find_start_end(struct mm_struct *mm, unsigned long flags, ++ unsigned long *begin, unsigned long *end) + { + if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { + unsigned long new_begin; +@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, + *begin = new_begin; + } + } else { +- *begin = TASK_UNMAPPED_BASE; ++ *begin = mm->mmap_base; + *end = TASK_SIZE; + } + } +@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (flags & MAP_FIXED) + return addr; + +- find_start_end(flags, &begin, &end); ++ find_start_end(mm, flags, &begin, &end); + + if (len > end) + return -ENOMEM; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (end - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) +@@ -172,7 +175,7 @@ full_search: + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ +@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + { + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; +- unsigned long addr = addr0; ++ unsigned long base = mm->mmap_base, addr = addr0; + + /* requested length too big for entire address space */ + if (len > TASK_SIZE) +@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) + goto bottomup; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + + /* check if free_area_cache is useful for us */ +@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + ALIGN_TOPDOWN); + + vma = find_vma(mm, tmp_addr); +- if (!vma || tmp_addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, tmp_addr, len)) + /* remember the address as a hint for next time */ + return mm->free_area_cache = tmp_addr; + } +@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr; + +@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ +- addr = vma->vm_start-len; +- } while (len < vma->vm_start); ++ addr = skip_heap_stack_gap(vma, len); ++ } while (!IS_ERR_VALUE(addr)); + + bottomup: + /* +@@ -270,13 +278,21 @@ bottomup: + * can happen with large stack limits and large mmap() + * allocations. + */ ++ mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ ++ mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; +- mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ +- mm->free_area_cache = mm->mmap_base; ++ mm->mmap_base = base; ++ mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; + + return addr; +diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S +index 9a0e312..e6f66f2 100644 +--- a/arch/x86/kernel/syscall_table_32.S ++++ b/arch/x86/kernel/syscall_table_32.S +@@ -1,3 +1,4 @@ ++.section .rodata,"a",@progbits + ENTRY(sys_call_table) + .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ + .long sys_exit +diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c +index e2410e2..4fe3fbc 100644 +--- a/arch/x86/kernel/tboot.c ++++ b/arch/x86/kernel/tboot.c +@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void) + + void tboot_shutdown(u32 shutdown_type) + { +- void (*shutdown)(void); ++ void (* __noreturn shutdown)(void); + + if (!tboot_enabled()) + return; +@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type) + + switch_to_tboot_pt(); + +- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry; ++ shutdown = (void *)tboot->shutdown_entry; + shutdown(); + + /* should not reach here */ +@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control) + tboot_shutdown(acpi_shutdown_map[sleep_state]); + } + +-static atomic_t ap_wfs_count; ++static atomic_unchecked_t ap_wfs_count; + + static int tboot_wait_for_aps(int num_aps) + { +@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, + { + switch (action) { + case CPU_DYING: +- atomic_inc(&ap_wfs_count); ++ atomic_inc_unchecked(&ap_wfs_count); + if (num_online_cpus() == 1) +- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) ++ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count))) + return NOTIFY_BAD; + break; + } +@@ -343,7 +343,7 @@ static __init int tboot_late_init(void) + + tboot_create_trampoline(); + +- atomic_set(&ap_wfs_count, 0); ++ atomic_set_unchecked(&ap_wfs_count, 0); + register_hotcpu_notifier(&tboot_cpu_notifier); + return 0; + } +diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c +index dd5fbf4..b7f2232 100644 +--- a/arch/x86/kernel/time.c ++++ b/arch/x86/kernel/time.c +@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs) + { + unsigned long pc = instruction_pointer(regs); + +- if (!user_mode_vm(regs) && in_lock_functions(pc)) { ++ if (!user_mode(regs) && in_lock_functions(pc)) { + #ifdef CONFIG_FRAME_POINTER +- return *(unsigned long *)(regs->bp + sizeof(long)); ++ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long))); + #else + unsigned long *sp = + (unsigned long *)kernel_stack_pointer(regs); +@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs) + * or above a saved flags. Eflags has bits 22-31 zero, + * kernel addresses don't. + */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ return ktla_ktva(sp[0]); ++#else + if (sp[0] >> 22) + return sp[0]; + if (sp[1] >> 22) + return sp[1]; + #endif ++ ++#endif + } + return pc; + } +diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c +index 6bb7b85..dd853e1 100644 +--- a/arch/x86/kernel/tls.c ++++ b/arch/x86/kernel/tls.c +@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx, + if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) + return -EINVAL; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) ++ return -EINVAL; ++#endif ++ + set_tls_desc(p, idx, &info, 1); + + return 0; +diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h +index 2f083a2..7d3fecc 100644 +--- a/arch/x86/kernel/tls.h ++++ b/arch/x86/kernel/tls.h +@@ -16,6 +16,6 @@ + + extern user_regset_active_fn regset_tls_active; + extern user_regset_get_fn regset_tls_get; +-extern user_regset_set_fn regset_tls_set; ++extern user_regset_set_fn regset_tls_set __size_overflow(4); + + #endif /* _ARCH_X86_KERNEL_TLS_H */ +diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S +index 451c0a7..e57f551 100644 +--- a/arch/x86/kernel/trampoline_32.S ++++ b/arch/x86/kernel/trampoline_32.S +@@ -32,6 +32,12 @@ + #include <asm/segment.h> + #include <asm/page_types.h> + ++#ifdef CONFIG_PAX_KERNEXEC ++#define ta(X) (X) ++#else ++#define ta(X) ((X) - __PAGE_OFFSET) ++#endif ++ + #ifdef CONFIG_SMP + + .section ".x86_trampoline","a" +@@ -62,7 +68,7 @@ r_base = . + inc %ax # protected mode (PE) bit + lmsw %ax # into protected mode + # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S +- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET) ++ ljmpl $__BOOT_CS, $ta(startup_32_smp) + + # These need to be in the same 64K segment as the above; + # hence we don't use the boot_gdt_descr defined in head.S +diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S +index 09ff517..df19fbff 100644 +--- a/arch/x86/kernel/trampoline_64.S ++++ b/arch/x86/kernel/trampoline_64.S +@@ -90,7 +90,7 @@ startup_32: + movl $__KERNEL_DS, %eax # Initialize the %ds segment register + movl %eax, %ds + +- movl $X86_CR4_PAE, %eax ++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax + movl %eax, %cr4 # Enable PAE mode + + # Setup trampoline 4 level pagetables +@@ -138,7 +138,7 @@ tidt: + # so the kernel can live anywhere + .balign 4 + tgdt: +- .short tgdt_end - tgdt # gdt limit ++ .short tgdt_end - tgdt - 1 # gdt limit + .long tgdt - r_base + .short 0 + .quad 0x00cf9b000000ffff # __KERNEL32_CS +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 31d9d0f..e244dd9 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -70,12 +70,6 @@ asmlinkage int system_call(void); + + /* Do we ignore FPU interrupts ? */ + char ignore_fpu_irq; +- +-/* +- * The IDT has to be page-aligned to simplify the Pentium +- * F0 0F bug workaround. +- */ +-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; + #endif + + DECLARE_BITMAP(used_vectors, NR_VECTORS); +@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) + } + + static void __kprobes +-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, ++do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs, + long error_code, siginfo_t *info) + { + struct task_struct *tsk = current; + + #ifdef CONFIG_X86_32 +- if (regs->flags & X86_VM_MASK) { ++ if (v8086_mode(regs)) { + /* + * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. + * On nmi (interrupt 2), do_trap should not be called. +@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, + } + #endif + +- if (!user_mode(regs)) ++ if (!user_mode_novm(regs)) + goto kernel_trap; + + #ifdef CONFIG_X86_32 +@@ -148,7 +142,7 @@ trap_signal: + printk_ratelimit()) { + printk(KERN_INFO + "%s[%d] trap %s ip:%lx sp:%lx error:%lx", +- tsk->comm, tsk->pid, str, ++ tsk->comm, task_pid_nr(tsk), str, + regs->ip, regs->sp, error_code); + print_vma_addr(" in ", regs->ip); + printk("\n"); +@@ -165,8 +159,20 @@ kernel_trap: + if (!fixup_exception(regs)) { + tsk->thread.error_code = error_code; + tsk->thread.trap_no = trapnr; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)) ++ str = "PAX: suspicious stack segment fault"; ++#endif ++ + die(str, regs, error_code); + } ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ if (trapnr == 4) ++ pax_report_refcount_overflow(regs); ++#endif ++ + return; + + #ifdef CONFIG_X86_32 +@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code) + conditional_sti(regs); + + #ifdef CONFIG_X86_32 +- if (regs->flags & X86_VM_MASK) ++ if (v8086_mode(regs)) + goto gp_in_vm86; + #endif + + tsk = current; +- if (!user_mode(regs)) ++ if (!user_mode_novm(regs)) + goto gp_in_kernel; + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) { ++ struct mm_struct *mm = tsk->mm; ++ unsigned long limit; ++ ++ down_write(&mm->mmap_sem); ++ limit = mm->context.user_cs_limit; ++ if (limit < TASK_SIZE) { ++ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC); ++ up_write(&mm->mmap_sem); ++ return; ++ } ++ up_write(&mm->mmap_sem); ++ } ++#endif ++ + tsk->thread.error_code = error_code; + tsk->thread.trap_no = 13; + +@@ -295,6 +317,13 @@ gp_in_kernel: + if (notify_die(DIE_GPF, "general protection fault", regs, + error_code, 13, SIGSEGV) == NOTIFY_STOP) + return; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS) ++ die("PAX: suspicious general protection fault", regs, error_code); ++ else ++#endif ++ + die("general protection fault", regs, error_code); + } + +@@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + /* It's safe to allow irq's after DR6 has been saved */ + preempt_conditional_sti(regs); + +- if (regs->flags & X86_VM_MASK) { ++ if (v8086_mode(regs)) { + handle_vm86_trap((struct kernel_vm86_regs *) regs, + error_code, 1); + preempt_conditional_cli(regs); +@@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + * We already checked v86 mode above, so we can check for kernel mode + * by just checking the CPL of CS. + */ +- if ((dr6 & DR_STEP) && !user_mode(regs)) { ++ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) { + tsk->thread.debugreg6 &= ~DR_STEP; + set_tsk_thread_flag(tsk, TIF_SINGLESTEP); + regs->flags &= ~X86_EFLAGS_TF; +@@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr) + return; + conditional_sti(regs); + +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + { + if (!fixup_exception(regs)) { + task->thread.error_code = error_code; +@@ -569,8 +598,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) + void __math_state_restore(struct task_struct *tsk) + { + /* We need a safe address that is cheap to find and that is already +- in L1. We've just brought in "tsk->thread.has_fpu", so use that */ +-#define safe_address (tsk->thread.has_fpu) ++ in L1. */ ++#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0) + + /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception + is pending. Clear the x87 state here by setting it to fixed +diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S +index b9242ba..50c5edd 100644 +--- a/arch/x86/kernel/verify_cpu.S ++++ b/arch/x86/kernel/verify_cpu.S +@@ -20,6 +20,7 @@ + * arch/x86/boot/compressed/head_64.S: Boot cpu verification + * arch/x86/kernel/trampoline_64.S: secondary processor verification + * arch/x86/kernel/head_32.S: processor startup ++ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume + * + * verify_cpu, returns the status of longmode and SSE in register %eax. + * 0: Success 1: Failure +diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c +index 863f875..4307295 100644 +--- a/arch/x86/kernel/vm86_32.c ++++ b/arch/x86/kernel/vm86_32.c +@@ -41,6 +41,7 @@ + #include <linux/ptrace.h> + #include <linux/audit.h> + #include <linux/stddef.h> ++#include <linux/grsecurity.h> + + #include <asm/uaccess.h> + #include <asm/io.h> +@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) + do_exit(SIGSEGV); + } + +- tss = &per_cpu(init_tss, get_cpu()); ++ tss = init_tss + get_cpu(); + current->thread.sp0 = current->thread.saved_sp0; + current->thread.sysenter_cs = __KERNEL_CS; + load_sp0(tss, ¤t->thread); +@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs) + struct task_struct *tsk; + int tmp, ret = -EPERM; + ++#ifdef CONFIG_GRKERNSEC_VM86 ++ if (!capable(CAP_SYS_RAWIO)) { ++ gr_handle_vm86(); ++ goto out; ++ } ++#endif ++ + tsk = current; + if (tsk->thread.saved_sp0) + goto out; +@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs) + int tmp, ret; + struct vm86plus_struct __user *v86; + ++#ifdef CONFIG_GRKERNSEC_VM86 ++ if (!capable(CAP_SYS_RAWIO)) { ++ gr_handle_vm86(); ++ ret = -EPERM; ++ goto out; ++ } ++#endif ++ + tsk = current; + switch (cmd) { + case VM86_REQUEST_IRQ: +@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk + tsk->thread.saved_fs = info->regs32->fs; + tsk->thread.saved_gs = get_user_gs(info->regs32); + +- tss = &per_cpu(init_tss, get_cpu()); ++ tss = init_tss + get_cpu(); + tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; + if (cpu_has_sep) + tsk->thread.sysenter_cs = 0; +@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i, + goto cannot_handle; + if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) + goto cannot_handle; +- intr_ptr = (unsigned long __user *) (i << 2); ++ intr_ptr = (__force unsigned long __user *) (i << 2); + if (get_user(segoffs, intr_ptr)) + goto cannot_handle; + if ((segoffs >> 16) == BIOSSEG) +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S +index 0f703f1..9e15f64 100644 +--- a/arch/x86/kernel/vmlinux.lds.S ++++ b/arch/x86/kernel/vmlinux.lds.S +@@ -26,6 +26,13 @@ + #include <asm/page_types.h> + #include <asm/cache.h> + #include <asm/boot.h> ++#include <asm/segment.h> ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR) ++#else ++#define __KERNEL_TEXT_OFFSET 0 ++#endif + + #undef i386 /* in case the preprocessor is a 32bit one */ + +@@ -69,30 +76,43 @@ jiffies_64 = jiffies; + + PHDRS { + text PT_LOAD FLAGS(5); /* R_E */ ++#ifdef CONFIG_X86_32 ++ module PT_LOAD FLAGS(5); /* R_E */ ++#endif ++#ifdef CONFIG_XEN ++ rodata PT_LOAD FLAGS(5); /* R_E */ ++#else ++ rodata PT_LOAD FLAGS(4); /* R__ */ ++#endif + data PT_LOAD FLAGS(6); /* RW_ */ +-#ifdef CONFIG_X86_64 ++ init.begin PT_LOAD FLAGS(6); /* RW_ */ + #ifdef CONFIG_SMP + percpu PT_LOAD FLAGS(6); /* RW_ */ + #endif ++ text.init PT_LOAD FLAGS(5); /* R_E */ ++ text.exit PT_LOAD FLAGS(5); /* R_E */ + init PT_LOAD FLAGS(7); /* RWE */ +-#endif + note PT_NOTE FLAGS(0); /* ___ */ + } + + SECTIONS + { + #ifdef CONFIG_X86_32 +- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; +- phys_startup_32 = startup_32 - LOAD_OFFSET; ++ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR; + #else +- . = __START_KERNEL; +- phys_startup_64 = startup_64 - LOAD_OFFSET; ++ . = __START_KERNEL; + #endif + + /* Text and read-only data */ +- .text : AT(ADDR(.text) - LOAD_OFFSET) { +- _text = .; ++ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { + /* bootstrapping code */ ++#ifdef CONFIG_X86_32 ++ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; ++#else ++ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; ++#endif ++ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; ++ _text = .; + HEAD_TEXT + #ifdef CONFIG_X86_32 + . = ALIGN(PAGE_SIZE); +@@ -108,13 +128,47 @@ SECTIONS + IRQENTRY_TEXT + *(.fixup) + *(.gnu.warning) +- /* End of text section */ +- _etext = .; + } :text = 0x9090 + +- NOTES :text :note ++ . += __KERNEL_TEXT_OFFSET; + +- EXCEPTION_TABLE(16) :text = 0x9090 ++#ifdef CONFIG_X86_32 ++ . = ALIGN(PAGE_SIZE); ++ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) { ++ ++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES) ++ MODULES_EXEC_VADDR = .; ++ BYTE(0) ++ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024); ++ . = ALIGN(HPAGE_SIZE); ++ MODULES_EXEC_END = . - 1; ++#endif ++ ++ } :module ++#endif ++ ++ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) { ++ /* End of text section */ ++ _etext = . - __KERNEL_TEXT_OFFSET; ++ } ++ ++#ifdef CONFIG_X86_32 ++ . = ALIGN(PAGE_SIZE); ++ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) { ++ *(.idt) ++ . = ALIGN(PAGE_SIZE); ++ *(.empty_zero_page) ++ *(.initial_pg_fixmap) ++ *(.initial_pg_pmd) ++ *(.initial_page_table) ++ *(.swapper_pg_dir) ++ } :rodata ++#endif ++ ++ . = ALIGN(PAGE_SIZE); ++ NOTES :rodata :note ++ ++ EXCEPTION_TABLE(16) :rodata + + #if defined(CONFIG_DEBUG_RODATA) + /* .text should occupy whole number of pages */ +@@ -126,16 +180,20 @@ SECTIONS + + /* Data */ + .data : AT(ADDR(.data) - LOAD_OFFSET) { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(HPAGE_SIZE); ++#else ++ . = ALIGN(PAGE_SIZE); ++#endif ++ + /* Start of data section */ + _sdata = .; + + /* init_task */ + INIT_TASK_DATA(THREAD_SIZE) + +-#ifdef CONFIG_X86_32 +- /* 32 bit has nosave before _edata */ + NOSAVE_DATA +-#endif + + PAGE_ALIGNED_DATA(PAGE_SIZE) + +@@ -176,12 +234,19 @@ SECTIONS + #endif /* CONFIG_X86_64 */ + + /* Init code and data - will be freed after init */ +- . = ALIGN(PAGE_SIZE); + .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { ++ BYTE(0) ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(HPAGE_SIZE); ++#else ++ . = ALIGN(PAGE_SIZE); ++#endif ++ + __init_begin = .; /* paired with __init_end */ +- } ++ } :init.begin + +-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) ++#ifdef CONFIG_SMP + /* + * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the + * output PHDR, so the next output section - .init.text - should +@@ -190,12 +255,27 @@ SECTIONS + PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) + #endif + +- INIT_TEXT_SECTION(PAGE_SIZE) +-#ifdef CONFIG_X86_64 +- :init +-#endif ++ . = ALIGN(PAGE_SIZE); ++ init_begin = .; ++ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) { ++ VMLINUX_SYMBOL(_sinittext) = .; ++ INIT_TEXT ++ VMLINUX_SYMBOL(_einittext) = .; ++ . = ALIGN(PAGE_SIZE); ++ } :text.init + +- INIT_DATA_SECTION(16) ++ /* ++ * .exit.text is discard at runtime, not link time, to deal with ++ * references from .altinstructions and .eh_frame ++ */ ++ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { ++ EXIT_TEXT ++ . = ALIGN(16); ++ } :text.exit ++ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text); ++ ++ . = ALIGN(PAGE_SIZE); ++ INIT_DATA_SECTION(16) :init + + /* + * Code and data for a variety of lowlevel trampolines, to be +@@ -269,19 +349,12 @@ SECTIONS + } + + . = ALIGN(8); +- /* +- * .exit.text is discard at runtime, not link time, to deal with +- * references from .altinstructions and .eh_frame +- */ +- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { +- EXIT_TEXT +- } + + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { + EXIT_DATA + } + +-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) ++#ifndef CONFIG_SMP + PERCPU_SECTION(INTERNODE_CACHE_BYTES) + #endif + +@@ -300,16 +373,10 @@ SECTIONS + .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { + __smp_locks = .; + *(.smp_locks) +- . = ALIGN(PAGE_SIZE); + __smp_locks_end = .; ++ . = ALIGN(PAGE_SIZE); + } + +-#ifdef CONFIG_X86_64 +- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { +- NOSAVE_DATA +- } +-#endif +- + /* BSS */ + . = ALIGN(PAGE_SIZE); + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { +@@ -325,6 +392,7 @@ SECTIONS + __brk_base = .; + . += 64 * 1024; /* 64k alignment slop space */ + *(.brk_reservation) /* areas brk users have reserved */ ++ . = ALIGN(HPAGE_SIZE); + __brk_limit = .; + } + +@@ -351,13 +419,12 @@ SECTIONS + * for the boot processor. + */ + #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load +-INIT_PER_CPU(gdt_page); + INIT_PER_CPU(irq_stack_union); + + /* + * Build-time check on the image size: + */ +-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), ++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE"); + + #ifdef CONFIG_SMP +diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c +index e4d4a22..47ee71f 100644 +--- a/arch/x86/kernel/vsyscall_64.c ++++ b/arch/x86/kernel/vsyscall_64.c +@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = + .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), + }; + +-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE; ++static enum { EMULATE, NONE } vsyscall_mode = EMULATE; + + static int __init vsyscall_setup(char *str) + { + if (str) { + if (!strcmp("emulate", str)) + vsyscall_mode = EMULATE; +- else if (!strcmp("native", str)) +- vsyscall_mode = NATIVE; + else if (!strcmp("none", str)) + vsyscall_mode = NONE; + else +@@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) + + tsk = current; + if (seccomp_mode(&tsk->seccomp)) +- do_exit(SIGKILL); ++ do_group_exit(SIGKILL); + + switch (vsyscall_nr) { + case 0: +@@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) + return true; + + sigsegv: +- force_sig(SIGSEGV, current); +- return true; ++ do_group_exit(SIGKILL); + } + + /* +@@ -274,10 +271,7 @@ void __init map_vsyscall(void) + extern char __vvar_page; + unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); + +- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, +- vsyscall_mode == NATIVE +- ? PAGE_KERNEL_VSYSCALL +- : PAGE_KERNEL_VVAR); ++ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR); + BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) != + (unsigned long)VSYSCALL_START); + +diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c +index 9796c2f..f686fbf 100644 +--- a/arch/x86/kernel/x8664_ksyms_64.c ++++ b/arch/x86/kernel/x8664_ksyms_64.c +@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8); + EXPORT_SYMBOL(copy_user_generic_string); + EXPORT_SYMBOL(copy_user_generic_unrolled); + EXPORT_SYMBOL(__copy_user_nocache); +-EXPORT_SYMBOL(_copy_from_user); +-EXPORT_SYMBOL(_copy_to_user); + + EXPORT_SYMBOL(copy_page); + EXPORT_SYMBOL(clear_page); +diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c +index 7110911..e8cdee5 100644 +--- a/arch/x86/kernel/xsave.c ++++ b/arch/x86/kernel/xsave.c +@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf, + fx_sw_user->xstate_size > fx_sw_user->extended_size) + return -EINVAL; + +- err = __get_user(magic2, (__u32 *) (((void *)fpstate) + ++ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) + + fx_sw_user->extended_size - + FP_XSTATE_MAGIC2_SIZE)); + if (err) +@@ -266,7 +266,7 @@ fx_only: + * the other extended state. + */ + xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); +- return fxrstor_checking((__force struct i387_fxsave_struct *)buf); ++ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf); + } + + /* +@@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf) + if (use_xsave()) + err = restore_user_xstate(buf); + else +- err = fxrstor_checking((__force struct i387_fxsave_struct *) ++ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *) + buf); + if (unlikely(err)) { + /* +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index f1e3be18..588efc8 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -249,6 +249,7 @@ struct gprefix { + + #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \ + do { \ ++ unsigned long _tmp; \ + __asm__ __volatile__ ( \ + _PRE_EFLAGS("0", "4", "2") \ + _op _suffix " %"_x"3,%1; " \ +@@ -263,8 +264,6 @@ struct gprefix { + /* Raw emulation: instruction has two explicit operands. */ + #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \ + do { \ +- unsigned long _tmp; \ +- \ + switch ((ctxt)->dst.bytes) { \ + case 2: \ + ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \ +@@ -280,7 +279,6 @@ struct gprefix { + + #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ + do { \ +- unsigned long _tmp; \ + switch ((ctxt)->dst.bytes) { \ + case 1: \ + ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \ +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 54abb40..a192606 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -53,7 +53,7 @@ + #define APIC_BUS_CYCLE_NS 1 + + /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ +-#define apic_debug(fmt, arg...) ++#define apic_debug(fmt, arg...) do {} while (0) + + #define APIC_LVT_NUM 6 + /* 14 is the version for Xeon and Pentium 8.4.8*/ +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index f1b36cf..af8a124 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, + + pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); + +- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter); ++ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter); + + /* + * Assume that the pte write on a page table of the same type +@@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, + } + + spin_lock(&vcpu->kvm->mmu_lock); +- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) ++ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) + gentry = 0; + kvm_mmu_free_some_pages(vcpu); + ++vcpu->kvm->stat.mmu_pte_write; +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h +index 9299410..ade2f9b 100644 +--- a/arch/x86/kvm/paging_tmpl.h ++++ b/arch/x86/kvm/paging_tmpl.h +@@ -197,7 +197,7 @@ retry_walk: + if (unlikely(kvm_is_error_hva(host_addr))) + goto error; + +- ptep_user = (pt_element_t __user *)((void *)host_addr + offset); ++ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset); + if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) + goto error; + +@@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) + if (need_flush) + kvm_flush_remote_tlbs(vcpu->kvm); + +- atomic_inc(&vcpu->kvm->arch.invlpg_counter); ++ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter); + + spin_unlock(&vcpu->kvm->mmu_lock); + +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 94a4672..1700ed1 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -3037,6 +3037,7 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) + return 0; + } + ++static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) __size_overflow(3); + static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) + { + struct vcpu_svm *svm = to_svm(vcpu); +@@ -3405,7 +3406,11 @@ static void reload_tss(struct kvm_vcpu *vcpu) + int cpu = raw_smp_processor_id(); + + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); ++ ++ pax_open_kernel(); + sd->tss_desc->type = 9; /* available 32/64-bit TSS */ ++ pax_close_kernel(); ++ + load_TR_desc(); + } + +@@ -3783,6 +3788,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + #endif + #endif + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ __set_fs(current_thread_info()->addr_limit); ++#endif ++ + reload_tss(vcpu); + + local_irq_disable(); +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 4ea7678..c715f2f 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1305,7 +1305,11 @@ static void reload_tss(void) + struct desc_struct *descs; + + descs = (void *)gdt->address; ++ ++ pax_open_kernel(); + descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ ++ pax_close_kernel(); ++ + load_TR_desc(); + } + +@@ -2163,6 +2167,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) + * Returns 0 on success, non-0 otherwise. + * Assumes vcpu_load() was already called. + */ ++static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) __size_overflow(3); + static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); +@@ -2633,8 +2638,11 @@ static __init int hardware_setup(void) + if (!cpu_has_vmx_flexpriority()) + flexpriority_enabled = 0; + +- if (!cpu_has_vmx_tpr_shadow()) +- kvm_x86_ops->update_cr8_intercept = NULL; ++ if (!cpu_has_vmx_tpr_shadow()) { ++ pax_open_kernel(); ++ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL; ++ pax_close_kernel(); ++ } + + if (enable_ept && !cpu_has_vmx_ept_2m_page()) + kvm_disable_largepages(); +@@ -3648,7 +3656,7 @@ static void vmx_set_constant_host_state(void) + vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ + + asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl)); +- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */ ++ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */ + + rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); + vmcs_write32(HOST_IA32_SYSENTER_CS, low32); +@@ -6169,6 +6177,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + "jmp .Lkvm_vmx_return \n\t" + ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" + ".Lkvm_vmx_return: " ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ "ljmp %[cs],$.Lkvm_vmx_return2\n\t" ++ ".Lkvm_vmx_return2: " ++#endif ++ + /* Save guest registers, load host registers, keep flags */ + "mov %0, %c[wordsize](%%"R"sp) \n\t" + "pop %0 \n\t" +@@ -6217,6 +6231,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + #endif + [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), + [wordsize]"i"(sizeof(ulong)) ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ ,[cs]"i"(__KERNEL_CS) ++#endif ++ + : "cc", "memory" + , R"ax", R"bx", R"di", R"si" + #ifdef CONFIG_X86_64 +@@ -6245,7 +6264,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + } + } + +- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); ++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS)); ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ loadsegment(fs, __KERNEL_PERCPU); ++#endif ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ __set_fs(current_thread_info()->addr_limit); ++#endif ++ + vmx->loaded_vmcs->launched = 1; + + vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 4c938da..6cd8090 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -907,6 +907,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) + return kvm_set_msr(vcpu, index, *data); + } + ++static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) __size_overflow(2); + static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) + { + int version; +@@ -1345,8 +1346,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) + { + struct kvm *kvm = vcpu->kvm; + int lm = is_long_mode(vcpu); +- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 +- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; ++ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64 ++ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32; + u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 + : kvm->arch.xen_hvm_config.blob_size_32; + u32 page_num = data & ~PAGE_MASK; +@@ -2165,6 +2166,8 @@ long kvm_arch_dev_ioctl(struct file *filp, + if (n < msr_list.nmsrs) + goto out; + r = -EFAULT; ++ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save)) ++ goto out; + if (copy_to_user(user_msr_list->indices, &msrs_to_save, + num_msrs_to_save * sizeof(u32))) + goto out; +@@ -2340,15 +2343,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, + struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries) + { +- int r; ++ int r, i; + + r = -E2BIG; + if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) + goto out; + r = -EFAULT; +- if (copy_from_user(&vcpu->arch.cpuid_entries, entries, +- cpuid->nent * sizeof(struct kvm_cpuid_entry2))) ++ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2))) + goto out; ++ for (i = 0; i < cpuid->nent; ++i) { ++ struct kvm_cpuid_entry2 cpuid_entry; ++ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry))) ++ goto out; ++ vcpu->arch.cpuid_entries[i] = cpuid_entry; ++ } + vcpu->arch.cpuid_nent = cpuid->nent; + kvm_apic_set_version(vcpu); + kvm_x86_ops->cpuid_update(vcpu); +@@ -2363,15 +2371,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, + struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries) + { +- int r; ++ int r, i; + + r = -E2BIG; + if (cpuid->nent < vcpu->arch.cpuid_nent) + goto out; + r = -EFAULT; +- if (copy_to_user(entries, &vcpu->arch.cpuid_entries, +- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) ++ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) + goto out; ++ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { ++ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i]; ++ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry))) ++ goto out; ++ } + return 0; + + out: +@@ -2746,7 +2758,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, + static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, + struct kvm_interrupt *irq) + { +- if (irq->irq < 0 || irq->irq >= 256) ++ if (irq->irq >= 256) + return -EINVAL; + if (irqchip_in_kernel(vcpu->kvm)) + return -ENXIO; +@@ -3949,6 +3961,9 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, + + static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu, u32 access, ++ struct x86_exception *exception) __size_overflow(1,3); ++static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, ++ struct kvm_vcpu *vcpu, u32 access, + struct x86_exception *exception) + { + void *data = val; +@@ -3980,6 +3995,9 @@ out: + /* used for instruction fetching */ + static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, + gva_t addr, void *val, unsigned int bytes, ++ struct x86_exception *exception) __size_overflow(2,4); ++static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, ++ gva_t addr, void *val, unsigned int bytes, + struct x86_exception *exception) + { + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); +@@ -4004,6 +4022,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt); + + static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, + gva_t addr, void *val, unsigned int bytes, ++ struct x86_exception *exception) __size_overflow(2,4); ++static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, ++ gva_t addr, void *val, unsigned int bytes, + struct x86_exception *exception) + { + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); +@@ -4117,12 +4138,16 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) + } + + static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, ++ void *val, int bytes) __size_overflow(2); ++static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, + void *val, int bytes) + { + return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); + } + + static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, ++ void *val, int bytes) __size_overflow(2); ++static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, + void *val, int bytes) + { + return emulator_write_phys(vcpu, gpa, val, bytes); +@@ -4273,6 +4298,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, + const void *old, + const void *new, + unsigned int bytes, ++ struct x86_exception *exception) __size_overflow(5); ++static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, ++ unsigned long addr, ++ const void *old, ++ const void *new, ++ unsigned int bytes, + struct x86_exception *exception) + { + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); +@@ -5162,7 +5193,7 @@ static void kvm_set_mmio_spte_mask(void) + kvm_mmu_set_mmio_spte_mask(mask); + } + +-int kvm_arch_init(void *opaque) ++int kvm_arch_init(const void *opaque) + { + int r; + struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; +diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h +index d36fe23..a4b189f 100644 +--- a/arch/x86/kvm/x86.h ++++ b/arch/x86/kvm/x86.h +@@ -119,10 +119,10 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data); + + int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, + gva_t addr, void *val, unsigned int bytes, +- struct x86_exception *exception); ++ struct x86_exception *exception) __size_overflow(2,4); + + int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, + gva_t addr, void *val, unsigned int bytes, +- struct x86_exception *exception); ++ struct x86_exception *exception) __size_overflow(2,4); + + #endif +diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c +index cf4603b..7cdde38 100644 +--- a/arch/x86/lguest/boot.c ++++ b/arch/x86/lguest/boot.c +@@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) + * Rebooting also tells the Host we're finished, but the RESTART flag tells the + * Launcher to reboot us. + */ +-static void lguest_restart(char *reason) ++static __noreturn void lguest_restart(char *reason) + { + hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0); ++ BUG(); + } + + /*G:050 +diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c +index 042f682..c92afb6 100644 +--- a/arch/x86/lib/atomic64_32.c ++++ b/arch/x86/lib/atomic64_32.c +@@ -8,18 +8,30 @@ + + long long atomic64_read_cx8(long long, const atomic64_t *v); + EXPORT_SYMBOL(atomic64_read_cx8); ++long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_read_unchecked_cx8); + long long atomic64_set_cx8(long long, const atomic64_t *v); + EXPORT_SYMBOL(atomic64_set_cx8); ++long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_set_unchecked_cx8); + long long atomic64_xchg_cx8(long long, unsigned high); + EXPORT_SYMBOL(atomic64_xchg_cx8); + long long atomic64_add_return_cx8(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_add_return_cx8); ++long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8); + long long atomic64_sub_return_cx8(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_sub_return_cx8); ++long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8); + long long atomic64_inc_return_cx8(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_inc_return_cx8); ++long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8); + long long atomic64_dec_return_cx8(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_dec_return_cx8); ++long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8); + long long atomic64_dec_if_positive_cx8(atomic64_t *v); + EXPORT_SYMBOL(atomic64_dec_if_positive_cx8); + int atomic64_inc_not_zero_cx8(atomic64_t *v); +@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8); + #ifndef CONFIG_X86_CMPXCHG64 + long long atomic64_read_386(long long, const atomic64_t *v); + EXPORT_SYMBOL(atomic64_read_386); ++long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_read_unchecked_386); + long long atomic64_set_386(long long, const atomic64_t *v); + EXPORT_SYMBOL(atomic64_set_386); ++long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_set_unchecked_386); + long long atomic64_xchg_386(long long, unsigned high); + EXPORT_SYMBOL(atomic64_xchg_386); + long long atomic64_add_return_386(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_add_return_386); ++long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_add_return_unchecked_386); + long long atomic64_sub_return_386(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_sub_return_386); ++long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_sub_return_unchecked_386); + long long atomic64_inc_return_386(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_inc_return_386); ++long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_inc_return_unchecked_386); + long long atomic64_dec_return_386(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_dec_return_386); ++long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_dec_return_unchecked_386); + long long atomic64_add_386(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_add_386); ++long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_add_unchecked_386); + long long atomic64_sub_386(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_sub_386); ++long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_sub_unchecked_386); + long long atomic64_inc_386(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_inc_386); ++long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_inc_unchecked_386); + long long atomic64_dec_386(long long a, atomic64_t *v); + EXPORT_SYMBOL(atomic64_dec_386); ++long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v); ++EXPORT_SYMBOL(atomic64_dec_unchecked_386); + long long atomic64_dec_if_positive_386(atomic64_t *v); + EXPORT_SYMBOL(atomic64_dec_if_positive_386); + int atomic64_inc_not_zero_386(atomic64_t *v); +diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S +index e8e7e0d..56fd1b0 100644 +--- a/arch/x86/lib/atomic64_386_32.S ++++ b/arch/x86/lib/atomic64_386_32.S +@@ -48,6 +48,10 @@ BEGIN(read) + movl (v), %eax + movl 4(v), %edx + RET_ENDP ++BEGIN(read_unchecked) ++ movl (v), %eax ++ movl 4(v), %edx ++RET_ENDP + #undef v + + #define v %esi +@@ -55,6 +59,10 @@ BEGIN(set) + movl %ebx, (v) + movl %ecx, 4(v) + RET_ENDP ++BEGIN(set_unchecked) ++ movl %ebx, (v) ++ movl %ecx, 4(v) ++RET_ENDP + #undef v + + #define v %esi +@@ -70,6 +78,20 @@ RET_ENDP + BEGIN(add) + addl %eax, (v) + adcl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 0f ++ subl %eax, (v) ++ sbbl %edx, 4(v) ++ int $4 ++0: ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++RET_ENDP ++BEGIN(add_unchecked) ++ addl %eax, (v) ++ adcl %edx, 4(v) + RET_ENDP + #undef v + +@@ -77,6 +99,24 @@ RET_ENDP + BEGIN(add_return) + addl (v), %eax + adcl 4(v), %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ ++ movl %eax, (v) ++ movl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++2: ++#endif ++ ++RET_ENDP ++BEGIN(add_return_unchecked) ++ addl (v), %eax ++ adcl 4(v), %edx + movl %eax, (v) + movl %edx, 4(v) + RET_ENDP +@@ -86,6 +126,20 @@ RET_ENDP + BEGIN(sub) + subl %eax, (v) + sbbl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 0f ++ addl %eax, (v) ++ adcl %edx, 4(v) ++ int $4 ++0: ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++RET_ENDP ++BEGIN(sub_unchecked) ++ subl %eax, (v) ++ sbbl %edx, 4(v) + RET_ENDP + #undef v + +@@ -96,6 +150,27 @@ BEGIN(sub_return) + sbbl $0, %edx + addl (v), %eax + adcl 4(v), %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ ++ movl %eax, (v) ++ movl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++2: ++#endif ++ ++RET_ENDP ++BEGIN(sub_return_unchecked) ++ negl %edx ++ negl %eax ++ sbbl $0, %edx ++ addl (v), %eax ++ adcl 4(v), %edx + movl %eax, (v) + movl %edx, 4(v) + RET_ENDP +@@ -105,6 +180,20 @@ RET_ENDP + BEGIN(inc) + addl $1, (v) + adcl $0, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 0f ++ subl $1, (v) ++ sbbl $0, 4(v) ++ int $4 ++0: ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++RET_ENDP ++BEGIN(inc_unchecked) ++ addl $1, (v) ++ adcl $0, 4(v) + RET_ENDP + #undef v + +@@ -114,6 +203,26 @@ BEGIN(inc_return) + movl 4(v), %edx + addl $1, %eax + adcl $0, %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ ++ movl %eax, (v) ++ movl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++2: ++#endif ++ ++RET_ENDP ++BEGIN(inc_return_unchecked) ++ movl (v), %eax ++ movl 4(v), %edx ++ addl $1, %eax ++ adcl $0, %edx + movl %eax, (v) + movl %edx, 4(v) + RET_ENDP +@@ -123,6 +232,20 @@ RET_ENDP + BEGIN(dec) + subl $1, (v) + sbbl $0, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 0f ++ addl $1, (v) ++ adcl $0, 4(v) ++ int $4 ++0: ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++RET_ENDP ++BEGIN(dec_unchecked) ++ subl $1, (v) ++ sbbl $0, 4(v) + RET_ENDP + #undef v + +@@ -132,6 +255,26 @@ BEGIN(dec_return) + movl 4(v), %edx + subl $1, %eax + sbbl $0, %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ ++ movl %eax, (v) ++ movl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++2: ++#endif ++ ++RET_ENDP ++BEGIN(dec_return_unchecked) ++ movl (v), %eax ++ movl 4(v), %edx ++ subl $1, %eax ++ sbbl $0, %edx + movl %eax, (v) + movl %edx, 4(v) + RET_ENDP +@@ -143,6 +286,13 @@ BEGIN(add_unless) + adcl %edx, %edi + addl (v), %eax + adcl 4(v), %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ + cmpl %eax, %esi + je 3f + 1: +@@ -168,6 +318,13 @@ BEGIN(inc_not_zero) + 1: + addl $1, %eax + adcl $0, %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ + movl %eax, (v) + movl %edx, 4(v) + movl $1, %eax +@@ -186,6 +343,13 @@ BEGIN(dec_if_positive) + movl 4(v), %edx + subl $1, %eax + sbbl $0, %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 1f) ++#endif ++ + js 1f + movl %eax, (v) + movl %edx, 4(v) +diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S +index 391a083..d658e9f 100644 +--- a/arch/x86/lib/atomic64_cx8_32.S ++++ b/arch/x86/lib/atomic64_cx8_32.S +@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8) + CFI_STARTPROC + + read64 %ecx ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(atomic64_read_cx8) + ++ENTRY(atomic64_read_unchecked_cx8) ++ CFI_STARTPROC ++ ++ read64 %ecx ++ pax_force_retaddr ++ ret ++ CFI_ENDPROC ++ENDPROC(atomic64_read_unchecked_cx8) ++ + ENTRY(atomic64_set_cx8) + CFI_STARTPROC + +@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8) + cmpxchg8b (%esi) + jne 1b + ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(atomic64_set_cx8) + ++ENTRY(atomic64_set_unchecked_cx8) ++ CFI_STARTPROC ++ ++1: ++/* we don't need LOCK_PREFIX since aligned 64-bit writes ++ * are atomic on 586 and newer */ ++ cmpxchg8b (%esi) ++ jne 1b ++ ++ pax_force_retaddr ++ ret ++ CFI_ENDPROC ++ENDPROC(atomic64_set_unchecked_cx8) ++ + ENTRY(atomic64_xchg_cx8) + CFI_STARTPROC + +@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8) + cmpxchg8b (%esi) + jne 1b + ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(atomic64_xchg_cx8) + +-.macro addsub_return func ins insc +-ENTRY(atomic64_\func()_return_cx8) ++.macro addsub_return func ins insc unchecked="" ++ENTRY(atomic64_\func()_return\unchecked()_cx8) + CFI_STARTPROC + SAVE ebp + SAVE ebx +@@ -84,27 +110,44 @@ ENTRY(atomic64_\func()_return_cx8) + movl %edx, %ecx + \ins()l %esi, %ebx + \insc()l %edi, %ecx ++ ++.ifb \unchecked ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++2: ++ _ASM_EXTABLE(2b, 3f) ++#endif ++.endif ++ + LOCK_PREFIX + cmpxchg8b (%ebp) + jne 1b +- +-10: + movl %ebx, %eax + movl %ecx, %edx ++ ++.ifb \unchecked ++#ifdef CONFIG_PAX_REFCOUNT ++3: ++#endif ++.endif ++ + RESTORE edi + RESTORE esi + RESTORE ebx + RESTORE ebp ++ pax_force_retaddr + ret + CFI_ENDPROC +-ENDPROC(atomic64_\func()_return_cx8) ++ENDPROC(atomic64_\func()_return\unchecked()_cx8) + .endm + + addsub_return add add adc + addsub_return sub sub sbb ++addsub_return add add adc _unchecked ++addsub_return sub sub sbb _unchecked + +-.macro incdec_return func ins insc +-ENTRY(atomic64_\func()_return_cx8) ++.macro incdec_return func ins insc unchecked ++ENTRY(atomic64_\func()_return\unchecked()_cx8) + CFI_STARTPROC + SAVE ebx + +@@ -114,21 +157,39 @@ ENTRY(atomic64_\func()_return_cx8) + movl %edx, %ecx + \ins()l $1, %ebx + \insc()l $0, %ecx ++ ++.ifb \unchecked ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++2: ++ _ASM_EXTABLE(2b, 3f) ++#endif ++.endif ++ + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + +-10: + movl %ebx, %eax + movl %ecx, %edx ++ ++.ifb \unchecked ++#ifdef CONFIG_PAX_REFCOUNT ++3: ++#endif ++.endif ++ + RESTORE ebx ++ pax_force_retaddr + ret + CFI_ENDPROC +-ENDPROC(atomic64_\func()_return_cx8) ++ENDPROC(atomic64_\func()_return\unchecked()_cx8) + .endm + + incdec_return inc add adc + incdec_return dec sub sbb ++incdec_return inc add adc _unchecked ++incdec_return dec sub sbb _unchecked + + ENTRY(atomic64_dec_if_positive_cx8) + CFI_STARTPROC +@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8) + movl %edx, %ecx + subl $1, %ebx + sbb $0, %ecx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ + js 2f + LOCK_PREFIX + cmpxchg8b (%esi) +@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8) + movl %ebx, %eax + movl %ecx, %edx + RESTORE ebx ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(atomic64_dec_if_positive_cx8) +@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8) + movl %edx, %ecx + addl %esi, %ebx + adcl %edi, %ecx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 3f) ++#endif ++ + LOCK_PREFIX + cmpxchg8b (%ebp) + jne 1b +@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8) + CFI_ADJUST_CFA_OFFSET -8 + RESTORE ebx + RESTORE ebp ++ pax_force_retaddr + ret + 4: + cmpl %edx, 4(%esp) +@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8) + movl %edx, %ecx + addl $1, %ebx + adcl $0, %ecx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 3f) ++#endif ++ + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b +@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8) + movl $1, %eax + 3: + RESTORE ebx ++ pax_force_retaddr + ret + 4: + testl %edx, %edx +diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S +index 78d16a5..fbcf666 100644 +--- a/arch/x86/lib/checksum_32.S ++++ b/arch/x86/lib/checksum_32.S +@@ -28,7 +28,8 @@ + #include <linux/linkage.h> + #include <asm/dwarf2.h> + #include <asm/errno.h> +- ++#include <asm/segment.h> ++ + /* + * computes a partial checksum, e.g. for TCP/UDP fragments + */ +@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, + + #define ARGBASE 16 + #define FP 12 +- +-ENTRY(csum_partial_copy_generic) ++ ++ENTRY(csum_partial_copy_generic_to_user) + CFI_STARTPROC ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pushl_cfi %gs ++ popl_cfi %es ++ jmp csum_partial_copy_generic ++#endif ++ ++ENTRY(csum_partial_copy_generic_from_user) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pushl_cfi %gs ++ popl_cfi %ds ++#endif ++ ++ENTRY(csum_partial_copy_generic) + subl $4,%esp + CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %edi +@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic) + jmp 4f + SRC(1: movw (%esi), %bx ) + addl $2, %esi +-DST( movw %bx, (%edi) ) ++DST( movw %bx, %es:(%edi) ) + addl $2, %edi + addw %bx, %ax + adcl $0, %eax +@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) ) + SRC(1: movl (%esi), %ebx ) + SRC( movl 4(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, (%edi) ) ++DST( movl %ebx, %es:(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 4(%edi) ) ++DST( movl %edx, %es:4(%edi) ) + + SRC( movl 8(%esi), %ebx ) + SRC( movl 12(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, 8(%edi) ) ++DST( movl %ebx, %es:8(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 12(%edi) ) ++DST( movl %edx, %es:12(%edi) ) + + SRC( movl 16(%esi), %ebx ) + SRC( movl 20(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, 16(%edi) ) ++DST( movl %ebx, %es:16(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 20(%edi) ) ++DST( movl %edx, %es:20(%edi) ) + + SRC( movl 24(%esi), %ebx ) + SRC( movl 28(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, 24(%edi) ) ++DST( movl %ebx, %es:24(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 28(%edi) ) ++DST( movl %edx, %es:28(%edi) ) + + lea 32(%esi), %esi + lea 32(%edi), %edi +@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) ) + shrl $2, %edx # This clears CF + SRC(3: movl (%esi), %ebx ) + adcl %ebx, %eax +-DST( movl %ebx, (%edi) ) ++DST( movl %ebx, %es:(%edi) ) + lea 4(%esi), %esi + lea 4(%edi), %edi + dec %edx +@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) ) + jb 5f + SRC( movw (%esi), %cx ) + leal 2(%esi), %esi +-DST( movw %cx, (%edi) ) ++DST( movw %cx, %es:(%edi) ) + leal 2(%edi), %edi + je 6f + shll $16,%ecx + SRC(5: movb (%esi), %cl ) +-DST( movb %cl, (%edi) ) ++DST( movb %cl, %es:(%edi) ) + 6: addl %ecx, %eax + adcl $0, %eax + 7: +@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) ) + + 6001: + movl ARGBASE+20(%esp), %ebx # src_err_ptr +- movl $-EFAULT, (%ebx) ++ movl $-EFAULT, %ss:(%ebx) + + # zero the complete destination - computing the rest + # is too much work +@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) ) + + 6002: + movl ARGBASE+24(%esp), %ebx # dst_err_ptr +- movl $-EFAULT,(%ebx) ++ movl $-EFAULT,%ss:(%ebx) + jmp 5000b + + .previous + ++ pushl_cfi %ss ++ popl_cfi %ds ++ pushl_cfi %ss ++ popl_cfi %es + popl_cfi %ebx + CFI_RESTORE ebx + popl_cfi %esi +@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) ) + popl_cfi %ecx # equivalent to addl $4,%esp + ret + CFI_ENDPROC +-ENDPROC(csum_partial_copy_generic) ++ENDPROC(csum_partial_copy_generic_to_user) + + #else + + /* Version for PentiumII/PPro */ + + #define ROUND1(x) \ ++ nop; nop; nop; \ + SRC(movl x(%esi), %ebx ) ; \ + addl %ebx, %eax ; \ +- DST(movl %ebx, x(%edi) ) ; ++ DST(movl %ebx, %es:x(%edi)) ; + + #define ROUND(x) \ ++ nop; nop; nop; \ + SRC(movl x(%esi), %ebx ) ; \ + adcl %ebx, %eax ; \ +- DST(movl %ebx, x(%edi) ) ; ++ DST(movl %ebx, %es:x(%edi)) ; + + #define ARGBASE 12 +- +-ENTRY(csum_partial_copy_generic) ++ ++ENTRY(csum_partial_copy_generic_to_user) + CFI_STARTPROC ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pushl_cfi %gs ++ popl_cfi %es ++ jmp csum_partial_copy_generic ++#endif ++ ++ENTRY(csum_partial_copy_generic_from_user) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pushl_cfi %gs ++ popl_cfi %ds ++#endif ++ ++ENTRY(csum_partial_copy_generic) + pushl_cfi %ebx + CFI_REL_OFFSET ebx, 0 + pushl_cfi %edi +@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic) + subl %ebx, %edi + lea -1(%esi),%edx + andl $-32,%edx +- lea 3f(%ebx,%ebx), %ebx ++ lea 3f(%ebx,%ebx,2), %ebx + testl %esi, %esi + jmp *%ebx + 1: addl $64,%esi +@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic) + jb 5f + SRC( movw (%esi), %dx ) + leal 2(%esi), %esi +-DST( movw %dx, (%edi) ) ++DST( movw %dx, %es:(%edi) ) + leal 2(%edi), %edi + je 6f + shll $16,%edx + 5: + SRC( movb (%esi), %dl ) +-DST( movb %dl, (%edi) ) ++DST( movb %dl, %es:(%edi) ) + 6: addl %edx, %eax + adcl $0, %eax + 7: + .section .fixup, "ax" + 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr +- movl $-EFAULT, (%ebx) ++ movl $-EFAULT, %ss:(%ebx) + # zero the complete destination (computing the rest is too much work) + movl ARGBASE+8(%esp),%edi # dst + movl ARGBASE+12(%esp),%ecx # len +@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) ) + rep; stosb + jmp 7b + 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr +- movl $-EFAULT, (%ebx) ++ movl $-EFAULT, %ss:(%ebx) + jmp 7b + .previous + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pushl_cfi %ss ++ popl_cfi %ds ++ pushl_cfi %ss ++ popl_cfi %es ++#endif ++ + popl_cfi %esi + CFI_RESTORE esi + popl_cfi %edi +@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) ) + CFI_RESTORE ebx + ret + CFI_ENDPROC +-ENDPROC(csum_partial_copy_generic) ++ENDPROC(csum_partial_copy_generic_to_user) + + #undef ROUND + #undef ROUND1 +diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S +index f2145cf..cea889d 100644 +--- a/arch/x86/lib/clear_page_64.S ++++ b/arch/x86/lib/clear_page_64.S +@@ -11,6 +11,7 @@ ENTRY(clear_page_c) + movl $4096/8,%ecx + xorl %eax,%eax + rep stosq ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(clear_page_c) +@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e) + movl $4096,%ecx + xorl %eax,%eax + rep stosb ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(clear_page_c_e) +@@ -43,6 +45,7 @@ ENTRY(clear_page) + leaq 64(%rdi),%rdi + jnz .Lloop + nop ++ pax_force_retaddr + ret + CFI_ENDPROC + .Lclear_page_end: +@@ -58,7 +61,7 @@ ENDPROC(clear_page) + + #include <asm/cpufeature.h> + +- .section .altinstr_replacement,"ax" ++ .section .altinstr_replacement,"a" + 1: .byte 0xeb /* jmp <disp8> */ + .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ + 2: .byte 0xeb /* jmp <disp8> */ +diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S +index 1e572c5..2a162cd 100644 +--- a/arch/x86/lib/cmpxchg16b_emu.S ++++ b/arch/x86/lib/cmpxchg16b_emu.S +@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu: + + popf + mov $1, %al ++ pax_force_retaddr + ret + + not_same: + popf + xor %al,%al ++ pax_force_retaddr + ret + + CFI_ENDPROC +diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S +index 01c805b..dccb07f 100644 +--- a/arch/x86/lib/copy_page_64.S ++++ b/arch/x86/lib/copy_page_64.S +@@ -9,6 +9,7 @@ copy_page_c: + CFI_STARTPROC + movl $4096/8,%ecx + rep movsq ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(copy_page_c) +@@ -39,7 +40,7 @@ ENTRY(copy_page) + movq 16 (%rsi), %rdx + movq 24 (%rsi), %r8 + movq 32 (%rsi), %r9 +- movq 40 (%rsi), %r10 ++ movq 40 (%rsi), %r13 + movq 48 (%rsi), %r11 + movq 56 (%rsi), %r12 + +@@ -50,7 +51,7 @@ ENTRY(copy_page) + movq %rdx, 16 (%rdi) + movq %r8, 24 (%rdi) + movq %r9, 32 (%rdi) +- movq %r10, 40 (%rdi) ++ movq %r13, 40 (%rdi) + movq %r11, 48 (%rdi) + movq %r12, 56 (%rdi) + +@@ -69,7 +70,7 @@ ENTRY(copy_page) + movq 16 (%rsi), %rdx + movq 24 (%rsi), %r8 + movq 32 (%rsi), %r9 +- movq 40 (%rsi), %r10 ++ movq 40 (%rsi), %r13 + movq 48 (%rsi), %r11 + movq 56 (%rsi), %r12 + +@@ -78,7 +79,7 @@ ENTRY(copy_page) + movq %rdx, 16 (%rdi) + movq %r8, 24 (%rdi) + movq %r9, 32 (%rdi) +- movq %r10, 40 (%rdi) ++ movq %r13, 40 (%rdi) + movq %r11, 48 (%rdi) + movq %r12, 56 (%rdi) + +@@ -95,6 +96,7 @@ ENTRY(copy_page) + CFI_RESTORE r13 + addq $3*8,%rsp + CFI_ADJUST_CFA_OFFSET -3*8 ++ pax_force_retaddr + ret + .Lcopy_page_end: + CFI_ENDPROC +@@ -105,7 +107,7 @@ ENDPROC(copy_page) + + #include <asm/cpufeature.h> + +- .section .altinstr_replacement,"ax" ++ .section .altinstr_replacement,"a" + 1: .byte 0xeb /* jmp <disp8> */ + .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */ + 2: +diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S +index 0248402..821c786 100644 +--- a/arch/x86/lib/copy_user_64.S ++++ b/arch/x86/lib/copy_user_64.S +@@ -16,6 +16,7 @@ + #include <asm/thread_info.h> + #include <asm/cpufeature.h> + #include <asm/alternative-asm.h> ++#include <asm/pgtable.h> + + /* + * By placing feature2 after feature1 in altinstructions section, we logically +@@ -29,7 +30,7 @@ + .byte 0xe9 /* 32bit jump */ + .long \orig-1f /* by default jump to orig */ + 1: +- .section .altinstr_replacement,"ax" ++ .section .altinstr_replacement,"a" + 2: .byte 0xe9 /* near jump with 32bit immediate */ + .long \alt1-1b /* offset */ /* or alternatively to alt1 */ + 3: .byte 0xe9 /* near jump with 32bit immediate */ +@@ -71,47 +72,20 @@ + #endif + .endm + +-/* Standard copy_to_user with segment limit checking */ +-ENTRY(_copy_to_user) +- CFI_STARTPROC +- GET_THREAD_INFO(%rax) +- movq %rdi,%rcx +- addq %rdx,%rcx +- jc bad_to_user +- cmpq TI_addr_limit(%rax),%rcx +- ja bad_to_user +- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ +- copy_user_generic_unrolled,copy_user_generic_string, \ +- copy_user_enhanced_fast_string +- CFI_ENDPROC +-ENDPROC(_copy_to_user) +- +-/* Standard copy_from_user with segment limit checking */ +-ENTRY(_copy_from_user) +- CFI_STARTPROC +- GET_THREAD_INFO(%rax) +- movq %rsi,%rcx +- addq %rdx,%rcx +- jc bad_from_user +- cmpq TI_addr_limit(%rax),%rcx +- ja bad_from_user +- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ +- copy_user_generic_unrolled,copy_user_generic_string, \ +- copy_user_enhanced_fast_string +- CFI_ENDPROC +-ENDPROC(_copy_from_user) +- + .section .fixup,"ax" + /* must zero dest */ + ENTRY(bad_from_user) + bad_from_user: + CFI_STARTPROC ++ testl %edx,%edx ++ js bad_to_user + movl %edx,%ecx + xorl %eax,%eax + rep + stosb + bad_to_user: + movl %edx,%eax ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(bad_from_user) +@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled) + jz 17f + 1: movq (%rsi),%r8 + 2: movq 1*8(%rsi),%r9 +-3: movq 2*8(%rsi),%r10 ++3: movq 2*8(%rsi),%rax + 4: movq 3*8(%rsi),%r11 + 5: movq %r8,(%rdi) + 6: movq %r9,1*8(%rdi) +-7: movq %r10,2*8(%rdi) ++7: movq %rax,2*8(%rdi) + 8: movq %r11,3*8(%rdi) + 9: movq 4*8(%rsi),%r8 + 10: movq 5*8(%rsi),%r9 +-11: movq 6*8(%rsi),%r10 ++11: movq 6*8(%rsi),%rax + 12: movq 7*8(%rsi),%r11 + 13: movq %r8,4*8(%rdi) + 14: movq %r9,5*8(%rdi) +-15: movq %r10,6*8(%rdi) ++15: movq %rax,6*8(%rdi) + 16: movq %r11,7*8(%rdi) + leaq 64(%rsi),%rsi + leaq 64(%rdi),%rdi +@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled) + decl %ecx + jnz 21b + 23: xor %eax,%eax ++ pax_force_retaddr + ret + + .section .fixup,"ax" +@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string) + 3: rep + movsb + 4: xorl %eax,%eax ++ pax_force_retaddr + ret + + .section .fixup,"ax" +@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string) + 1: rep + movsb + 2: xorl %eax,%eax ++ pax_force_retaddr + ret + + .section .fixup,"ax" +diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S +index cb0c112..e3a6895 100644 +--- a/arch/x86/lib/copy_user_nocache_64.S ++++ b/arch/x86/lib/copy_user_nocache_64.S +@@ -8,12 +8,14 @@ + + #include <linux/linkage.h> + #include <asm/dwarf2.h> ++#include <asm/alternative-asm.h> + + #define FIX_ALIGNMENT 1 + + #include <asm/current.h> + #include <asm/asm-offsets.h> + #include <asm/thread_info.h> ++#include <asm/pgtable.h> + + .macro ALIGN_DESTINATION + #ifdef FIX_ALIGNMENT +@@ -50,6 +52,15 @@ + */ + ENTRY(__copy_user_nocache) + CFI_STARTPROC ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov $PAX_USER_SHADOW_BASE,%rcx ++ cmp %rcx,%rsi ++ jae 1f ++ add %rcx,%rsi ++1: ++#endif ++ + cmpl $8,%edx + jb 20f /* less then 8 bytes, go to byte copy loop */ + ALIGN_DESTINATION +@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache) + jz 17f + 1: movq (%rsi),%r8 + 2: movq 1*8(%rsi),%r9 +-3: movq 2*8(%rsi),%r10 ++3: movq 2*8(%rsi),%rax + 4: movq 3*8(%rsi),%r11 + 5: movnti %r8,(%rdi) + 6: movnti %r9,1*8(%rdi) +-7: movnti %r10,2*8(%rdi) ++7: movnti %rax,2*8(%rdi) + 8: movnti %r11,3*8(%rdi) + 9: movq 4*8(%rsi),%r8 + 10: movq 5*8(%rsi),%r9 +-11: movq 6*8(%rsi),%r10 ++11: movq 6*8(%rsi),%rax + 12: movq 7*8(%rsi),%r11 + 13: movnti %r8,4*8(%rdi) + 14: movnti %r9,5*8(%rdi) +-15: movnti %r10,6*8(%rdi) ++15: movnti %rax,6*8(%rdi) + 16: movnti %r11,7*8(%rdi) + leaq 64(%rsi),%rsi + leaq 64(%rdi),%rdi +@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache) + jnz 21b + 23: xorl %eax,%eax + sfence ++ pax_force_retaddr + ret + + .section .fixup,"ax" +diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S +index fb903b7..c92b7f7 100644 +--- a/arch/x86/lib/csum-copy_64.S ++++ b/arch/x86/lib/csum-copy_64.S +@@ -8,6 +8,7 @@ + #include <linux/linkage.h> + #include <asm/dwarf2.h> + #include <asm/errno.h> ++#include <asm/alternative-asm.h> + + /* + * Checksum copy with exception handling. +@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic) + CFI_RESTORE rbp + addq $7*8, %rsp + CFI_ADJUST_CFA_OFFSET -7*8 ++ pax_force_retaddr 0, 1 + ret + CFI_RESTORE_STATE + +diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c +index 459b58a..9570bc7 100644 +--- a/arch/x86/lib/csum-wrappers_64.c ++++ b/arch/x86/lib/csum-wrappers_64.c +@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst, + len -= 2; + } + } +- isum = csum_partial_copy_generic((__force const void *)src, ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; ++#endif ++ ++ isum = csum_partial_copy_generic((const void __force_kernel *)src, + dst, len, isum, errp, NULL); + if (unlikely(*errp)) + goto out_err; +@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst, + } + + *errp = 0; +- return csum_partial_copy_generic(src, (void __force *)dst, ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; ++#endif ++ ++ return csum_partial_copy_generic(src, (void __force_kernel *)dst, + len, isum, NULL, errp); + } + EXPORT_SYMBOL(csum_partial_copy_to_user); +diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S +index 51f1504..ddac4c1 100644 +--- a/arch/x86/lib/getuser.S ++++ b/arch/x86/lib/getuser.S +@@ -33,15 +33,38 @@ + #include <asm/asm-offsets.h> + #include <asm/thread_info.h> + #include <asm/asm.h> ++#include <asm/segment.h> ++#include <asm/pgtable.h> ++#include <asm/alternative-asm.h> ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __copyuser_seg gs; ++#else ++#define __copyuser_seg ++#endif + + .text + ENTRY(__get_user_1) + CFI_STARTPROC ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user +-1: movzb (%_ASM_AX),%edx ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ ++#endif ++ ++1: __copyuser_seg movzb (%_ASM_AX),%edx + xor %eax,%eax ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(__get_user_1) +@@ -49,12 +72,26 @@ ENDPROC(__get_user_1) + ENTRY(__get_user_2) + CFI_STARTPROC + add $1,%_ASM_AX ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + jc bad_get_user + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user +-2: movzwl -1(%_ASM_AX),%edx ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ ++#endif ++ ++2: __copyuser_seg movzwl -1(%_ASM_AX),%edx + xor %eax,%eax ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(__get_user_2) +@@ -62,12 +99,26 @@ ENDPROC(__get_user_2) + ENTRY(__get_user_4) + CFI_STARTPROC + add $3,%_ASM_AX ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + jc bad_get_user + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user +-3: mov -3(%_ASM_AX),%edx ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ ++#endif ++ ++3: __copyuser_seg mov -3(%_ASM_AX),%edx + xor %eax,%eax ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(__get_user_4) +@@ -80,8 +131,18 @@ ENTRY(__get_user_8) + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ + 4: movq -7(%_ASM_AX),%_ASM_DX + xor %eax,%eax ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(__get_user_8) +@@ -91,6 +152,7 @@ bad_get_user: + CFI_STARTPROC + xor %edx,%edx + mov $(-EFAULT),%_ASM_AX ++ pax_force_retaddr + ret + CFI_ENDPROC + END(bad_get_user) +diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c +index 374562e..a75830b 100644 +--- a/arch/x86/lib/insn.c ++++ b/arch/x86/lib/insn.c +@@ -21,6 +21,11 @@ + #include <linux/string.h> + #include <asm/inat.h> + #include <asm/insn.h> ++#ifdef __KERNEL__ ++#include <asm/pgtable_types.h> ++#else ++#define ktla_ktva(addr) addr ++#endif + + /* Verify next sizeof(t) bytes can be on the same instruction */ + #define validate_next(t, insn, n) \ +@@ -49,8 +54,8 @@ + void insn_init(struct insn *insn, const void *kaddr, int x86_64) + { + memset(insn, 0, sizeof(*insn)); +- insn->kaddr = kaddr; +- insn->next_byte = kaddr; ++ insn->kaddr = ktla_ktva(kaddr); ++ insn->next_byte = ktla_ktva(kaddr); + insn->x86_64 = x86_64 ? 1 : 0; + insn->opnd_bytes = 4; + if (x86_64) +diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S +index 05a95e7..326f2fa 100644 +--- a/arch/x86/lib/iomap_copy_64.S ++++ b/arch/x86/lib/iomap_copy_64.S +@@ -17,6 +17,7 @@ + + #include <linux/linkage.h> + #include <asm/dwarf2.h> ++#include <asm/alternative-asm.h> + + /* + * override generic version in lib/iomap_copy.c +@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy) + CFI_STARTPROC + movl %edx,%ecx + rep movsd ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(__iowrite32_copy) +diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S +index efbf2a0..8893637 100644 +--- a/arch/x86/lib/memcpy_64.S ++++ b/arch/x86/lib/memcpy_64.S +@@ -34,6 +34,7 @@ + rep movsq + movl %edx, %ecx + rep movsb ++ pax_force_retaddr + ret + .Lmemcpy_e: + .previous +@@ -51,6 +52,7 @@ + + movl %edx, %ecx + rep movsb ++ pax_force_retaddr + ret + .Lmemcpy_e_e: + .previous +@@ -81,13 +83,13 @@ ENTRY(memcpy) + */ + movq 0*8(%rsi), %r8 + movq 1*8(%rsi), %r9 +- movq 2*8(%rsi), %r10 ++ movq 2*8(%rsi), %rcx + movq 3*8(%rsi), %r11 + leaq 4*8(%rsi), %rsi + + movq %r8, 0*8(%rdi) + movq %r9, 1*8(%rdi) +- movq %r10, 2*8(%rdi) ++ movq %rcx, 2*8(%rdi) + movq %r11, 3*8(%rdi) + leaq 4*8(%rdi), %rdi + jae .Lcopy_forward_loop +@@ -110,12 +112,12 @@ ENTRY(memcpy) + subq $0x20, %rdx + movq -1*8(%rsi), %r8 + movq -2*8(%rsi), %r9 +- movq -3*8(%rsi), %r10 ++ movq -3*8(%rsi), %rcx + movq -4*8(%rsi), %r11 + leaq -4*8(%rsi), %rsi + movq %r8, -1*8(%rdi) + movq %r9, -2*8(%rdi) +- movq %r10, -3*8(%rdi) ++ movq %rcx, -3*8(%rdi) + movq %r11, -4*8(%rdi) + leaq -4*8(%rdi), %rdi + jae .Lcopy_backward_loop +@@ -135,12 +137,13 @@ ENTRY(memcpy) + */ + movq 0*8(%rsi), %r8 + movq 1*8(%rsi), %r9 +- movq -2*8(%rsi, %rdx), %r10 ++ movq -2*8(%rsi, %rdx), %rcx + movq -1*8(%rsi, %rdx), %r11 + movq %r8, 0*8(%rdi) + movq %r9, 1*8(%rdi) +- movq %r10, -2*8(%rdi, %rdx) ++ movq %rcx, -2*8(%rdi, %rdx) + movq %r11, -1*8(%rdi, %rdx) ++ pax_force_retaddr + retq + .p2align 4 + .Lless_16bytes: +@@ -153,6 +156,7 @@ ENTRY(memcpy) + movq -1*8(%rsi, %rdx), %r9 + movq %r8, 0*8(%rdi) + movq %r9, -1*8(%rdi, %rdx) ++ pax_force_retaddr + retq + .p2align 4 + .Lless_8bytes: +@@ -166,6 +170,7 @@ ENTRY(memcpy) + movl -4(%rsi, %rdx), %r8d + movl %ecx, (%rdi) + movl %r8d, -4(%rdi, %rdx) ++ pax_force_retaddr + retq + .p2align 4 + .Lless_3bytes: +@@ -183,6 +188,7 @@ ENTRY(memcpy) + jnz .Lloop_1 + + .Lend: ++ pax_force_retaddr + retq + CFI_ENDPROC + ENDPROC(memcpy) +diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S +index ee16461..c39c199 100644 +--- a/arch/x86/lib/memmove_64.S ++++ b/arch/x86/lib/memmove_64.S +@@ -61,13 +61,13 @@ ENTRY(memmove) + 5: + sub $0x20, %rdx + movq 0*8(%rsi), %r11 +- movq 1*8(%rsi), %r10 ++ movq 1*8(%rsi), %rcx + movq 2*8(%rsi), %r9 + movq 3*8(%rsi), %r8 + leaq 4*8(%rsi), %rsi + + movq %r11, 0*8(%rdi) +- movq %r10, 1*8(%rdi) ++ movq %rcx, 1*8(%rdi) + movq %r9, 2*8(%rdi) + movq %r8, 3*8(%rdi) + leaq 4*8(%rdi), %rdi +@@ -81,10 +81,10 @@ ENTRY(memmove) + 4: + movq %rdx, %rcx + movq -8(%rsi, %rdx), %r11 +- lea -8(%rdi, %rdx), %r10 ++ lea -8(%rdi, %rdx), %r9 + shrq $3, %rcx + rep movsq +- movq %r11, (%r10) ++ movq %r11, (%r9) + jmp 13f + .Lmemmove_end_forward: + +@@ -95,14 +95,14 @@ ENTRY(memmove) + 7: + movq %rdx, %rcx + movq (%rsi), %r11 +- movq %rdi, %r10 ++ movq %rdi, %r9 + leaq -8(%rsi, %rdx), %rsi + leaq -8(%rdi, %rdx), %rdi + shrq $3, %rcx + std + rep movsq + cld +- movq %r11, (%r10) ++ movq %r11, (%r9) + jmp 13f + + /* +@@ -127,13 +127,13 @@ ENTRY(memmove) + 8: + subq $0x20, %rdx + movq -1*8(%rsi), %r11 +- movq -2*8(%rsi), %r10 ++ movq -2*8(%rsi), %rcx + movq -3*8(%rsi), %r9 + movq -4*8(%rsi), %r8 + leaq -4*8(%rsi), %rsi + + movq %r11, -1*8(%rdi) +- movq %r10, -2*8(%rdi) ++ movq %rcx, -2*8(%rdi) + movq %r9, -3*8(%rdi) + movq %r8, -4*8(%rdi) + leaq -4*8(%rdi), %rdi +@@ -151,11 +151,11 @@ ENTRY(memmove) + * Move data from 16 bytes to 31 bytes. + */ + movq 0*8(%rsi), %r11 +- movq 1*8(%rsi), %r10 ++ movq 1*8(%rsi), %rcx + movq -2*8(%rsi, %rdx), %r9 + movq -1*8(%rsi, %rdx), %r8 + movq %r11, 0*8(%rdi) +- movq %r10, 1*8(%rdi) ++ movq %rcx, 1*8(%rdi) + movq %r9, -2*8(%rdi, %rdx) + movq %r8, -1*8(%rdi, %rdx) + jmp 13f +@@ -167,9 +167,9 @@ ENTRY(memmove) + * Move data from 8 bytes to 15 bytes. + */ + movq 0*8(%rsi), %r11 +- movq -1*8(%rsi, %rdx), %r10 ++ movq -1*8(%rsi, %rdx), %r9 + movq %r11, 0*8(%rdi) +- movq %r10, -1*8(%rdi, %rdx) ++ movq %r9, -1*8(%rdi, %rdx) + jmp 13f + 10: + cmpq $4, %rdx +@@ -178,9 +178,9 @@ ENTRY(memmove) + * Move data from 4 bytes to 7 bytes. + */ + movl (%rsi), %r11d +- movl -4(%rsi, %rdx), %r10d ++ movl -4(%rsi, %rdx), %r9d + movl %r11d, (%rdi) +- movl %r10d, -4(%rdi, %rdx) ++ movl %r9d, -4(%rdi, %rdx) + jmp 13f + 11: + cmp $2, %rdx +@@ -189,9 +189,9 @@ ENTRY(memmove) + * Move data from 2 bytes to 3 bytes. + */ + movw (%rsi), %r11w +- movw -2(%rsi, %rdx), %r10w ++ movw -2(%rsi, %rdx), %r9w + movw %r11w, (%rdi) +- movw %r10w, -2(%rdi, %rdx) ++ movw %r9w, -2(%rdi, %rdx) + jmp 13f + 12: + cmp $1, %rdx +@@ -202,6 +202,7 @@ ENTRY(memmove) + movb (%rsi), %r11b + movb %r11b, (%rdi) + 13: ++ pax_force_retaddr + retq + CFI_ENDPROC + +@@ -210,6 +211,7 @@ ENTRY(memmove) + /* Forward moving data. */ + movq %rdx, %rcx + rep movsb ++ pax_force_retaddr + retq + .Lmemmove_end_forward_efs: + .previous +diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S +index 79bd454..dff325a 100644 +--- a/arch/x86/lib/memset_64.S ++++ b/arch/x86/lib/memset_64.S +@@ -31,6 +31,7 @@ + movl %r8d,%ecx + rep stosb + movq %r9,%rax ++ pax_force_retaddr + ret + .Lmemset_e: + .previous +@@ -53,6 +54,7 @@ + movl %edx,%ecx + rep stosb + movq %r9,%rax ++ pax_force_retaddr + ret + .Lmemset_e_e: + .previous +@@ -60,13 +62,13 @@ + ENTRY(memset) + ENTRY(__memset) + CFI_STARTPROC +- movq %rdi,%r10 + movq %rdx,%r11 + + /* expand byte value */ + movzbl %sil,%ecx + movabs $0x0101010101010101,%rax + mul %rcx /* with rax, clobbers rdx */ ++ movq %rdi,%rdx + + /* align dst */ + movl %edi,%r9d +@@ -120,7 +122,8 @@ ENTRY(__memset) + jnz .Lloop_1 + + .Lende: +- movq %r10,%rax ++ movq %rdx,%rax ++ pax_force_retaddr + ret + + CFI_RESTORE_STATE +diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c +index c9f2d9b..e7fd2c0 100644 +--- a/arch/x86/lib/mmx_32.c ++++ b/arch/x86/lib/mmx_32.c +@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) + { + void *p; + int i; ++ unsigned long cr0; + + if (unlikely(in_interrupt())) + return __memcpy(to, from, len); +@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) + kernel_fpu_begin(); + + __asm__ __volatile__ ( +- "1: prefetch (%0)\n" /* This set is 28 bytes */ +- " prefetch 64(%0)\n" +- " prefetch 128(%0)\n" +- " prefetch 192(%0)\n" +- " prefetch 256(%0)\n" ++ "1: prefetch (%1)\n" /* This set is 28 bytes */ ++ " prefetch 64(%1)\n" ++ " prefetch 128(%1)\n" ++ " prefetch 192(%1)\n" ++ " prefetch 256(%1)\n" + "2: \n" + ".section .fixup, "ax"\n" +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ "3: \n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) +- : : "r" (from)); ++ : "=&r" (cr0) : "r" (from) : "ax"); + + for ( ; i > 5; i--) { + __asm__ __volatile__ ( +- "1: prefetch 320(%0)\n" +- "2: movq (%0), %%mm0\n" +- " movq 8(%0), %%mm1\n" +- " movq 16(%0), %%mm2\n" +- " movq 24(%0), %%mm3\n" +- " movq %%mm0, (%1)\n" +- " movq %%mm1, 8(%1)\n" +- " movq %%mm2, 16(%1)\n" +- " movq %%mm3, 24(%1)\n" +- " movq 32(%0), %%mm0\n" +- " movq 40(%0), %%mm1\n" +- " movq 48(%0), %%mm2\n" +- " movq 56(%0), %%mm3\n" +- " movq %%mm0, 32(%1)\n" +- " movq %%mm1, 40(%1)\n" +- " movq %%mm2, 48(%1)\n" +- " movq %%mm3, 56(%1)\n" ++ "1: prefetch 320(%1)\n" ++ "2: movq (%1), %%mm0\n" ++ " movq 8(%1), %%mm1\n" ++ " movq 16(%1), %%mm2\n" ++ " movq 24(%1), %%mm3\n" ++ " movq %%mm0, (%2)\n" ++ " movq %%mm1, 8(%2)\n" ++ " movq %%mm2, 16(%2)\n" ++ " movq %%mm3, 24(%2)\n" ++ " movq 32(%1), %%mm0\n" ++ " movq 40(%1), %%mm1\n" ++ " movq 48(%1), %%mm2\n" ++ " movq 56(%1), %%mm3\n" ++ " movq %%mm0, 32(%2)\n" ++ " movq %%mm1, 40(%2)\n" ++ " movq %%mm2, 48(%2)\n" ++ " movq %%mm3, 56(%2)\n" + ".section .fixup, "ax"\n" +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ "3:\n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) +- : : "r" (from), "r" (to) : "memory"); ++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); + + from += 64; + to += 64; +@@ -158,6 +187,7 @@ static void fast_clear_page(void *page) + static void fast_copy_page(void *to, void *from) + { + int i; ++ unsigned long cr0; + + kernel_fpu_begin(); + +@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from) + * but that is for later. -AV + */ + __asm__ __volatile__( +- "1: prefetch (%0)\n" +- " prefetch 64(%0)\n" +- " prefetch 128(%0)\n" +- " prefetch 192(%0)\n" +- " prefetch 256(%0)\n" ++ "1: prefetch (%1)\n" ++ " prefetch 64(%1)\n" ++ " prefetch 128(%1)\n" ++ " prefetch 192(%1)\n" ++ " prefetch 256(%1)\n" + "2: \n" + ".section .fixup, "ax"\n" +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ "3: \n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" +- _ASM_EXTABLE(1b, 3b) : : "r" (from)); ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); + + for (i = 0; i < (4096-320)/64; i++) { + __asm__ __volatile__ ( +- "1: prefetch 320(%0)\n" +- "2: movq (%0), %%mm0\n" +- " movntq %%mm0, (%1)\n" +- " movq 8(%0), %%mm1\n" +- " movntq %%mm1, 8(%1)\n" +- " movq 16(%0), %%mm2\n" +- " movntq %%mm2, 16(%1)\n" +- " movq 24(%0), %%mm3\n" +- " movntq %%mm3, 24(%1)\n" +- " movq 32(%0), %%mm4\n" +- " movntq %%mm4, 32(%1)\n" +- " movq 40(%0), %%mm5\n" +- " movntq %%mm5, 40(%1)\n" +- " movq 48(%0), %%mm6\n" +- " movntq %%mm6, 48(%1)\n" +- " movq 56(%0), %%mm7\n" +- " movntq %%mm7, 56(%1)\n" ++ "1: prefetch 320(%1)\n" ++ "2: movq (%1), %%mm0\n" ++ " movntq %%mm0, (%2)\n" ++ " movq 8(%1), %%mm1\n" ++ " movntq %%mm1, 8(%2)\n" ++ " movq 16(%1), %%mm2\n" ++ " movntq %%mm2, 16(%2)\n" ++ " movq 24(%1), %%mm3\n" ++ " movntq %%mm3, 24(%2)\n" ++ " movq 32(%1), %%mm4\n" ++ " movntq %%mm4, 32(%2)\n" ++ " movq 40(%1), %%mm5\n" ++ " movntq %%mm5, 40(%2)\n" ++ " movq 48(%1), %%mm6\n" ++ " movntq %%mm6, 48(%2)\n" ++ " movq 56(%1), %%mm7\n" ++ " movntq %%mm7, 56(%2)\n" + ".section .fixup, "ax"\n" +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ "3:\n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" +- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); + + from += 64; + to += 64; +@@ -280,47 +338,76 @@ static void fast_clear_page(void *page) + static void fast_copy_page(void *to, void *from) + { + int i; ++ unsigned long cr0; + + kernel_fpu_begin(); + + __asm__ __volatile__ ( +- "1: prefetch (%0)\n" +- " prefetch 64(%0)\n" +- " prefetch 128(%0)\n" +- " prefetch 192(%0)\n" +- " prefetch 256(%0)\n" ++ "1: prefetch (%1)\n" ++ " prefetch 64(%1)\n" ++ " prefetch 128(%1)\n" ++ " prefetch 192(%1)\n" ++ " prefetch 256(%1)\n" + "2: \n" + ".section .fixup, "ax"\n" +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ "3: \n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" +- _ASM_EXTABLE(1b, 3b) : : "r" (from)); ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); + + for (i = 0; i < 4096/64; i++) { + __asm__ __volatile__ ( +- "1: prefetch 320(%0)\n" +- "2: movq (%0), %%mm0\n" +- " movq 8(%0), %%mm1\n" +- " movq 16(%0), %%mm2\n" +- " movq 24(%0), %%mm3\n" +- " movq %%mm0, (%1)\n" +- " movq %%mm1, 8(%1)\n" +- " movq %%mm2, 16(%1)\n" +- " movq %%mm3, 24(%1)\n" +- " movq 32(%0), %%mm0\n" +- " movq 40(%0), %%mm1\n" +- " movq 48(%0), %%mm2\n" +- " movq 56(%0), %%mm3\n" +- " movq %%mm0, 32(%1)\n" +- " movq %%mm1, 40(%1)\n" +- " movq %%mm2, 48(%1)\n" +- " movq %%mm3, 56(%1)\n" ++ "1: prefetch 320(%1)\n" ++ "2: movq (%1), %%mm0\n" ++ " movq 8(%1), %%mm1\n" ++ " movq 16(%1), %%mm2\n" ++ " movq 24(%1), %%mm3\n" ++ " movq %%mm0, (%2)\n" ++ " movq %%mm1, 8(%2)\n" ++ " movq %%mm2, 16(%2)\n" ++ " movq %%mm3, 24(%2)\n" ++ " movq 32(%1), %%mm0\n" ++ " movq 40(%1), %%mm1\n" ++ " movq 48(%1), %%mm2\n" ++ " movq 56(%1), %%mm3\n" ++ " movq %%mm0, 32(%2)\n" ++ " movq %%mm1, 40(%2)\n" ++ " movq %%mm2, 48(%2)\n" ++ " movq %%mm3, 56(%2)\n" + ".section .fixup, "ax"\n" +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ "3:\n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) +- : : "r" (from), "r" (to) : "memory"); ++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); + + from += 64; + to += 64; +diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S +index 69fa106..adda88b 100644 +--- a/arch/x86/lib/msr-reg.S ++++ b/arch/x86/lib/msr-reg.S +@@ -3,6 +3,7 @@ + #include <asm/dwarf2.h> + #include <asm/asm.h> + #include <asm/msr.h> ++#include <asm/alternative-asm.h> + + #ifdef CONFIG_X86_64 + /* +@@ -16,7 +17,7 @@ ENTRY(native_\op()_safe_regs) + CFI_STARTPROC + pushq_cfi %rbx + pushq_cfi %rbp +- movq %rdi, %r10 /* Save pointer */ ++ movq %rdi, %r9 /* Save pointer */ + xorl %r11d, %r11d /* Return value */ + movl (%rdi), %eax + movl 4(%rdi), %ecx +@@ -27,16 +28,17 @@ ENTRY(native_\op()_safe_regs) + movl 28(%rdi), %edi + CFI_REMEMBER_STATE + 1: \op +-2: movl %eax, (%r10) ++2: movl %eax, (%r9) + movl %r11d, %eax /* Return value */ +- movl %ecx, 4(%r10) +- movl %edx, 8(%r10) +- movl %ebx, 12(%r10) +- movl %ebp, 20(%r10) +- movl %esi, 24(%r10) +- movl %edi, 28(%r10) ++ movl %ecx, 4(%r9) ++ movl %edx, 8(%r9) ++ movl %ebx, 12(%r9) ++ movl %ebp, 20(%r9) ++ movl %esi, 24(%r9) ++ movl %edi, 28(%r9) + popq_cfi %rbp + popq_cfi %rbx ++ pax_force_retaddr + ret + 3: + CFI_RESTORE_STATE +diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S +index 36b0d15..d381858 100644 +--- a/arch/x86/lib/putuser.S ++++ b/arch/x86/lib/putuser.S +@@ -15,7 +15,9 @@ + #include <asm/thread_info.h> + #include <asm/errno.h> + #include <asm/asm.h> +- ++#include <asm/segment.h> ++#include <asm/pgtable.h> ++#include <asm/alternative-asm.h> + + /* + * __put_user_X +@@ -29,52 +31,119 @@ + * as they get called from within inline assembly. + */ + +-#define ENTER CFI_STARTPROC ; \ +- GET_THREAD_INFO(%_ASM_BX) +-#define EXIT ret ; \ ++#define ENTER CFI_STARTPROC ++#define EXIT pax_force_retaddr; ret ; \ + CFI_ENDPROC + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define _DEST %_ASM_CX,%_ASM_BX ++#else ++#define _DEST %_ASM_CX ++#endif ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __copyuser_seg gs; ++#else ++#define __copyuser_seg ++#endif ++ + .text + ENTRY(__put_user_1) + ENTER ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) ++ GET_THREAD_INFO(%_ASM_BX) + cmp TI_addr_limit(%_ASM_BX),%_ASM_CX + jae bad_put_user +-1: movb %al,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++1: __copyuser_seg movb %al,(_DEST) + xor %eax,%eax + EXIT + ENDPROC(__put_user_1) + + ENTRY(__put_user_2) + ENTER ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) ++ GET_THREAD_INFO(%_ASM_BX) + mov TI_addr_limit(%_ASM_BX),%_ASM_BX + sub $1,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jae bad_put_user +-2: movw %ax,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++2: __copyuser_seg movw %ax,(_DEST) + xor %eax,%eax + EXIT + ENDPROC(__put_user_2) + + ENTRY(__put_user_4) + ENTER ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) ++ GET_THREAD_INFO(%_ASM_BX) + mov TI_addr_limit(%_ASM_BX),%_ASM_BX + sub $3,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jae bad_put_user +-3: movl %eax,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++3: __copyuser_seg movl %eax,(_DEST) + xor %eax,%eax + EXIT + ENDPROC(__put_user_4) + + ENTRY(__put_user_8) + ENTER ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) ++ GET_THREAD_INFO(%_ASM_BX) + mov TI_addr_limit(%_ASM_BX),%_ASM_BX + sub $7,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jae bad_put_user +-4: mov %_ASM_AX,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++4: __copyuser_seg mov %_ASM_AX,(_DEST) + #ifdef CONFIG_X86_32 +-5: movl %edx,4(%_ASM_CX) ++5: __copyuser_seg movl %edx,4(_DEST) + #endif + xor %eax,%eax + EXIT +diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S +index 1cad221..de671ee 100644 +--- a/arch/x86/lib/rwlock.S ++++ b/arch/x86/lib/rwlock.S +@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed) + FRAME + 0: LOCK_PREFIX + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 1234f ++ LOCK_PREFIX ++ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr) ++ int $4 ++1234: ++ _ASM_EXTABLE(1234b, 1234b) ++#endif ++ + 1: rep; nop + cmpl $WRITE_LOCK_CMP, (%__lock_ptr) + jne 1b + LOCK_PREFIX + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 1234f ++ LOCK_PREFIX ++ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr) ++ int $4 ++1234: ++ _ASM_EXTABLE(1234b, 1234b) ++#endif ++ + jnz 0b + ENDFRAME ++ pax_force_retaddr + ret + CFI_ENDPROC + END(__write_lock_failed) +@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed) + FRAME + 0: LOCK_PREFIX + READ_LOCK_SIZE(inc) (%__lock_ptr) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 1234f ++ LOCK_PREFIX ++ READ_LOCK_SIZE(dec) (%__lock_ptr) ++ int $4 ++1234: ++ _ASM_EXTABLE(1234b, 1234b) ++#endif ++ + 1: rep; nop + READ_LOCK_SIZE(cmp) $1, (%__lock_ptr) + js 1b + LOCK_PREFIX + READ_LOCK_SIZE(dec) (%__lock_ptr) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 1234f ++ LOCK_PREFIX ++ READ_LOCK_SIZE(inc) (%__lock_ptr) ++ int $4 ++1234: ++ _ASM_EXTABLE(1234b, 1234b) ++#endif ++ + js 0b + ENDFRAME ++ pax_force_retaddr + ret + CFI_ENDPROC + END(__read_lock_failed) +diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S +index 5dff5f0..cadebf4 100644 +--- a/arch/x86/lib/rwsem.S ++++ b/arch/x86/lib/rwsem.S +@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed) + __ASM_SIZE(pop,_cfi) %__ASM_REG(dx) + CFI_RESTORE __ASM_REG(dx) + restore_common_regs ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(call_rwsem_down_read_failed) +@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed) + movq %rax,%rdi + call rwsem_down_write_failed + restore_common_regs ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(call_rwsem_down_write_failed) +@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake) + movq %rax,%rdi + call rwsem_wake + restore_common_regs +-1: ret ++1: pax_force_retaddr ++ ret + CFI_ENDPROC + ENDPROC(call_rwsem_wake) + +@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake) + __ASM_SIZE(pop,_cfi) %__ASM_REG(dx) + CFI_RESTORE __ASM_REG(dx) + restore_common_regs ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(call_rwsem_downgrade_wake) +diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S +index a63efd6..ccecad8 100644 +--- a/arch/x86/lib/thunk_64.S ++++ b/arch/x86/lib/thunk_64.S +@@ -8,6 +8,7 @@ + #include <linux/linkage.h> + #include <asm/dwarf2.h> + #include <asm/calling.h> ++#include <asm/alternative-asm.h> + + /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ + .macro THUNK name, func, put_ret_addr_in_rdi=0 +@@ -41,5 +42,6 @@ + SAVE_ARGS + restore: + RESTORE_ARGS ++ pax_force_retaddr + ret + CFI_ENDPROC +diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c +index e218d5d..a99a1eb 100644 +--- a/arch/x86/lib/usercopy_32.c ++++ b/arch/x86/lib/usercopy_32.c +@@ -43,7 +43,7 @@ do { \ + __asm__ __volatile__( \ + " testl %1,%1\n" \ + " jz 2f\n" \ +- "0: lodsb\n" \ ++ "0: "__copyuser_seg"lodsb\n" \ + " stosb\n" \ + " testb %%al,%%al\n" \ + " jz 1f\n" \ +@@ -128,10 +128,12 @@ do { \ + int __d0; \ + might_fault(); \ + __asm__ __volatile__( \ ++ __COPYUSER_SET_ES \ + "0: rep; stosl\n" \ + " movl %2,%0\n" \ + "1: rep; stosb\n" \ + "2:\n" \ ++ __COPYUSER_RESTORE_ES \ + ".section .fixup,"ax"\n" \ + "3: lea 0(%2,%0,4),%0\n" \ + " jmp 2b\n" \ +@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n) + might_fault(); + + __asm__ __volatile__( ++ __COPYUSER_SET_ES + " testl %0, %0\n" + " jz 3f\n" + " andl %0,%%ecx\n" +@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n) + " subl %%ecx,%0\n" + " addl %0,%%eax\n" + "1:\n" ++ __COPYUSER_RESTORE_ES + ".section .fixup,"ax"\n" + "2: xorl %%eax,%%eax\n" + " jmp 1b\n" +@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user); + + #ifdef CONFIG_X86_INTEL_USERCOPY + static unsigned long +-__copy_user_intel(void __user *to, const void *from, unsigned long size) ++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size) + { + int d0, d1; + __asm__ __volatile__( +@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) + " .align 2,0x90\n" + "3: movl 0(%4), %%eax\n" + "4: movl 4(%4), %%edx\n" +- "5: movl %%eax, 0(%3)\n" +- "6: movl %%edx, 4(%3)\n" ++ "5: "__copyuser_seg" movl %%eax, 0(%3)\n" ++ "6: "__copyuser_seg" movl %%edx, 4(%3)\n" + "7: movl 8(%4), %%eax\n" + "8: movl 12(%4),%%edx\n" +- "9: movl %%eax, 8(%3)\n" +- "10: movl %%edx, 12(%3)\n" ++ "9: "__copyuser_seg" movl %%eax, 8(%3)\n" ++ "10: "__copyuser_seg" movl %%edx, 12(%3)\n" + "11: movl 16(%4), %%eax\n" + "12: movl 20(%4), %%edx\n" +- "13: movl %%eax, 16(%3)\n" +- "14: movl %%edx, 20(%3)\n" ++ "13: "__copyuser_seg" movl %%eax, 16(%3)\n" ++ "14: "__copyuser_seg" movl %%edx, 20(%3)\n" + "15: movl 24(%4), %%eax\n" + "16: movl 28(%4), %%edx\n" +- "17: movl %%eax, 24(%3)\n" +- "18: movl %%edx, 28(%3)\n" ++ "17: "__copyuser_seg" movl %%eax, 24(%3)\n" ++ "18: "__copyuser_seg" movl %%edx, 28(%3)\n" + "19: movl 32(%4), %%eax\n" + "20: movl 36(%4), %%edx\n" +- "21: movl %%eax, 32(%3)\n" +- "22: movl %%edx, 36(%3)\n" ++ "21: "__copyuser_seg" movl %%eax, 32(%3)\n" ++ "22: "__copyuser_seg" movl %%edx, 36(%3)\n" + "23: movl 40(%4), %%eax\n" + "24: movl 44(%4), %%edx\n" +- "25: movl %%eax, 40(%3)\n" +- "26: movl %%edx, 44(%3)\n" ++ "25: "__copyuser_seg" movl %%eax, 40(%3)\n" ++ "26: "__copyuser_seg" movl %%edx, 44(%3)\n" + "27: movl 48(%4), %%eax\n" + "28: movl 52(%4), %%edx\n" +- "29: movl %%eax, 48(%3)\n" +- "30: movl %%edx, 52(%3)\n" ++ "29: "__copyuser_seg" movl %%eax, 48(%3)\n" ++ "30: "__copyuser_seg" movl %%edx, 52(%3)\n" + "31: movl 56(%4), %%eax\n" + "32: movl 60(%4), %%edx\n" +- "33: movl %%eax, 56(%3)\n" +- "34: movl %%edx, 60(%3)\n" ++ "33: "__copyuser_seg" movl %%eax, 56(%3)\n" ++ "34: "__copyuser_seg" movl %%edx, 60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" +@@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" ++ __COPYUSER_SET_ES + "99: rep; movsl\n" + "36: movl %%eax, %0\n" + "37: rep; movsb\n" + "100:\n" ++ __COPYUSER_RESTORE_ES + ".section .fixup,"ax"\n" + "101: lea 0(%%eax,%0,4),%0\n" + " jmp 100b\n" +@@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) + } + + static unsigned long ++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size) ++{ ++ int d0, d1; ++ __asm__ __volatile__( ++ " .align 2,0x90\n" ++ "1: "__copyuser_seg" movl 32(%4), %%eax\n" ++ " cmpl $67, %0\n" ++ " jbe 3f\n" ++ "2: "__copyuser_seg" movl 64(%4), %%eax\n" ++ " .align 2,0x90\n" ++ "3: "__copyuser_seg" movl 0(%4), %%eax\n" ++ "4: "__copyuser_seg" movl 4(%4), %%edx\n" ++ "5: movl %%eax, 0(%3)\n" ++ "6: movl %%edx, 4(%3)\n" ++ "7: "__copyuser_seg" movl 8(%4), %%eax\n" ++ "8: "__copyuser_seg" movl 12(%4),%%edx\n" ++ "9: movl %%eax, 8(%3)\n" ++ "10: movl %%edx, 12(%3)\n" ++ "11: "__copyuser_seg" movl 16(%4), %%eax\n" ++ "12: "__copyuser_seg" movl 20(%4), %%edx\n" ++ "13: movl %%eax, 16(%3)\n" ++ "14: movl %%edx, 20(%3)\n" ++ "15: "__copyuser_seg" movl 24(%4), %%eax\n" ++ "16: "__copyuser_seg" movl 28(%4), %%edx\n" ++ "17: movl %%eax, 24(%3)\n" ++ "18: movl %%edx, 28(%3)\n" ++ "19: "__copyuser_seg" movl 32(%4), %%eax\n" ++ "20: "__copyuser_seg" movl 36(%4), %%edx\n" ++ "21: movl %%eax, 32(%3)\n" ++ "22: movl %%edx, 36(%3)\n" ++ "23: "__copyuser_seg" movl 40(%4), %%eax\n" ++ "24: "__copyuser_seg" movl 44(%4), %%edx\n" ++ "25: movl %%eax, 40(%3)\n" ++ "26: movl %%edx, 44(%3)\n" ++ "27: "__copyuser_seg" movl 48(%4), %%eax\n" ++ "28: "__copyuser_seg" movl 52(%4), %%edx\n" ++ "29: movl %%eax, 48(%3)\n" ++ "30: movl %%edx, 52(%3)\n" ++ "31: "__copyuser_seg" movl 56(%4), %%eax\n" ++ "32: "__copyuser_seg" movl 60(%4), %%edx\n" ++ "33: movl %%eax, 56(%3)\n" ++ "34: movl %%edx, 60(%3)\n" ++ " addl $-64, %0\n" ++ " addl $64, %4\n" ++ " addl $64, %3\n" ++ " cmpl $63, %0\n" ++ " ja 1b\n" ++ "35: movl %0, %%eax\n" ++ " shrl $2, %0\n" ++ " andl $3, %%eax\n" ++ " cld\n" ++ "99: rep; "__copyuser_seg" movsl\n" ++ "36: movl %%eax, %0\n" ++ "37: rep; "__copyuser_seg" movsb\n" ++ "100:\n" ++ ".section .fixup,"ax"\n" ++ "101: lea 0(%%eax,%0,4),%0\n" ++ " jmp 100b\n" ++ ".previous\n" ++ ".section __ex_table,"a"\n" ++ " .align 4\n" ++ " .long 1b,100b\n" ++ " .long 2b,100b\n" ++ " .long 3b,100b\n" ++ " .long 4b,100b\n" ++ " .long 5b,100b\n" ++ " .long 6b,100b\n" ++ " .long 7b,100b\n" ++ " .long 8b,100b\n" ++ " .long 9b,100b\n" ++ " .long 10b,100b\n" ++ " .long 11b,100b\n" ++ " .long 12b,100b\n" ++ " .long 13b,100b\n" ++ " .long 14b,100b\n" ++ " .long 15b,100b\n" ++ " .long 16b,100b\n" ++ " .long 17b,100b\n" ++ " .long 18b,100b\n" ++ " .long 19b,100b\n" ++ " .long 20b,100b\n" ++ " .long 21b,100b\n" ++ " .long 22b,100b\n" ++ " .long 23b,100b\n" ++ " .long 24b,100b\n" ++ " .long 25b,100b\n" ++ " .long 26b,100b\n" ++ " .long 27b,100b\n" ++ " .long 28b,100b\n" ++ " .long 29b,100b\n" ++ " .long 30b,100b\n" ++ " .long 31b,100b\n" ++ " .long 32b,100b\n" ++ " .long 33b,100b\n" ++ " .long 34b,100b\n" ++ " .long 35b,100b\n" ++ " .long 36b,100b\n" ++ " .long 37b,100b\n" ++ " .long 99b,101b\n" ++ ".previous" ++ : "=&c"(size), "=&D" (d0), "=&S" (d1) ++ : "1"(to), "2"(from), "0"(size) ++ : "eax", "edx", "memory"); ++ return size; ++} ++ ++static unsigned long ++__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3); ++static unsigned long + __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) + { + int d0, d1; + __asm__ __volatile__( + " .align 2,0x90\n" +- "0: movl 32(%4), %%eax\n" ++ "0: "__copyuser_seg" movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 2f\n" +- "1: movl 64(%4), %%eax\n" ++ "1: "__copyuser_seg" movl 64(%4), %%eax\n" + " .align 2,0x90\n" +- "2: movl 0(%4), %%eax\n" +- "21: movl 4(%4), %%edx\n" ++ "2: "__copyuser_seg" movl 0(%4), %%eax\n" ++ "21: "__copyuser_seg" movl 4(%4), %%edx\n" + " movl %%eax, 0(%3)\n" + " movl %%edx, 4(%3)\n" +- "3: movl 8(%4), %%eax\n" +- "31: movl 12(%4),%%edx\n" ++ "3: "__copyuser_seg" movl 8(%4), %%eax\n" ++ "31: "__copyuser_seg" movl 12(%4),%%edx\n" + " movl %%eax, 8(%3)\n" + " movl %%edx, 12(%3)\n" +- "4: movl 16(%4), %%eax\n" +- "41: movl 20(%4), %%edx\n" ++ "4: "__copyuser_seg" movl 16(%4), %%eax\n" ++ "41: "__copyuser_seg" movl 20(%4), %%edx\n" + " movl %%eax, 16(%3)\n" + " movl %%edx, 20(%3)\n" +- "10: movl 24(%4), %%eax\n" +- "51: movl 28(%4), %%edx\n" ++ "10: "__copyuser_seg" movl 24(%4), %%eax\n" ++ "51: "__copyuser_seg" movl 28(%4), %%edx\n" + " movl %%eax, 24(%3)\n" + " movl %%edx, 28(%3)\n" +- "11: movl 32(%4), %%eax\n" +- "61: movl 36(%4), %%edx\n" ++ "11: "__copyuser_seg" movl 32(%4), %%eax\n" ++ "61: "__copyuser_seg" movl 36(%4), %%edx\n" + " movl %%eax, 32(%3)\n" + " movl %%edx, 36(%3)\n" +- "12: movl 40(%4), %%eax\n" +- "71: movl 44(%4), %%edx\n" ++ "12: "__copyuser_seg" movl 40(%4), %%eax\n" ++ "71: "__copyuser_seg" movl 44(%4), %%edx\n" + " movl %%eax, 40(%3)\n" + " movl %%edx, 44(%3)\n" +- "13: movl 48(%4), %%eax\n" +- "81: movl 52(%4), %%edx\n" ++ "13: "__copyuser_seg" movl 48(%4), %%eax\n" ++ "81: "__copyuser_seg" movl 52(%4), %%edx\n" + " movl %%eax, 48(%3)\n" + " movl %%edx, 52(%3)\n" +- "14: movl 56(%4), %%eax\n" +- "91: movl 60(%4), %%edx\n" ++ "14: "__copyuser_seg" movl 56(%4), %%eax\n" ++ "91: "__copyuser_seg" movl 60(%4), %%edx\n" + " movl %%eax, 56(%3)\n" + " movl %%edx, 60(%3)\n" + " addl $-64, %0\n" +@@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" +- "6: rep; movsl\n" ++ "6: rep; "__copyuser_seg" movsl\n" + " movl %%eax,%0\n" +- "7: rep; movsb\n" ++ "7: rep; "__copyuser_seg" movsb\n" + "8:\n" + ".section .fixup,"ax"\n" + "9: lea 0(%%eax,%0,4),%0\n" +@@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) + */ + + static unsigned long __copy_user_zeroing_intel_nocache(void *to, ++ const void __user *from, unsigned long size) __size_overflow(3); ++static unsigned long __copy_user_zeroing_intel_nocache(void *to, + const void __user *from, unsigned long size) + { + int d0, d1; + + __asm__ __volatile__( + " .align 2,0x90\n" +- "0: movl 32(%4), %%eax\n" ++ "0: "__copyuser_seg" movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 2f\n" +- "1: movl 64(%4), %%eax\n" ++ "1: "__copyuser_seg" movl 64(%4), %%eax\n" + " .align 2,0x90\n" +- "2: movl 0(%4), %%eax\n" +- "21: movl 4(%4), %%edx\n" ++ "2: "__copyuser_seg" movl 0(%4), %%eax\n" ++ "21: "__copyuser_seg" movl 4(%4), %%edx\n" + " movnti %%eax, 0(%3)\n" + " movnti %%edx, 4(%3)\n" +- "3: movl 8(%4), %%eax\n" +- "31: movl 12(%4),%%edx\n" ++ "3: "__copyuser_seg" movl 8(%4), %%eax\n" ++ "31: "__copyuser_seg" movl 12(%4),%%edx\n" + " movnti %%eax, 8(%3)\n" + " movnti %%edx, 12(%3)\n" +- "4: movl 16(%4), %%eax\n" +- "41: movl 20(%4), %%edx\n" ++ "4: "__copyuser_seg" movl 16(%4), %%eax\n" ++ "41: "__copyuser_seg" movl 20(%4), %%edx\n" + " movnti %%eax, 16(%3)\n" + " movnti %%edx, 20(%3)\n" +- "10: movl 24(%4), %%eax\n" +- "51: movl 28(%4), %%edx\n" ++ "10: "__copyuser_seg" movl 24(%4), %%eax\n" ++ "51: "__copyuser_seg" movl 28(%4), %%edx\n" + " movnti %%eax, 24(%3)\n" + " movnti %%edx, 28(%3)\n" +- "11: movl 32(%4), %%eax\n" +- "61: movl 36(%4), %%edx\n" ++ "11: "__copyuser_seg" movl 32(%4), %%eax\n" ++ "61: "__copyuser_seg" movl 36(%4), %%edx\n" + " movnti %%eax, 32(%3)\n" + " movnti %%edx, 36(%3)\n" +- "12: movl 40(%4), %%eax\n" +- "71: movl 44(%4), %%edx\n" ++ "12: "__copyuser_seg" movl 40(%4), %%eax\n" ++ "71: "__copyuser_seg" movl 44(%4), %%edx\n" + " movnti %%eax, 40(%3)\n" + " movnti %%edx, 44(%3)\n" +- "13: movl 48(%4), %%eax\n" +- "81: movl 52(%4), %%edx\n" ++ "13: "__copyuser_seg" movl 48(%4), %%eax\n" ++ "81: "__copyuser_seg" movl 52(%4), %%edx\n" + " movnti %%eax, 48(%3)\n" + " movnti %%edx, 52(%3)\n" +- "14: movl 56(%4), %%eax\n" +- "91: movl 60(%4), %%edx\n" ++ "14: "__copyuser_seg" movl 56(%4), %%eax\n" ++ "91: "__copyuser_seg" movl 60(%4), %%edx\n" + " movnti %%eax, 56(%3)\n" + " movnti %%edx, 60(%3)\n" + " addl $-64, %0\n" +@@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" +- "6: rep; movsl\n" ++ "6: rep; "__copyuser_seg" movsl\n" + " movl %%eax,%0\n" +- "7: rep; movsb\n" ++ "7: rep; "__copyuser_seg" movsb\n" + "8:\n" + ".section .fixup,"ax"\n" + "9: lea 0(%%eax,%0,4),%0\n" +@@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, + } + + static unsigned long __copy_user_intel_nocache(void *to, ++ const void __user *from, unsigned long size) __size_overflow(3); ++static unsigned long __copy_user_intel_nocache(void *to, + const void __user *from, unsigned long size) + { + int d0, d1; + + __asm__ __volatile__( + " .align 2,0x90\n" +- "0: movl 32(%4), %%eax\n" ++ "0: "__copyuser_seg" movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 2f\n" +- "1: movl 64(%4), %%eax\n" ++ "1: "__copyuser_seg" movl 64(%4), %%eax\n" + " .align 2,0x90\n" +- "2: movl 0(%4), %%eax\n" +- "21: movl 4(%4), %%edx\n" ++ "2: "__copyuser_seg" movl 0(%4), %%eax\n" ++ "21: "__copyuser_seg" movl 4(%4), %%edx\n" + " movnti %%eax, 0(%3)\n" + " movnti %%edx, 4(%3)\n" +- "3: movl 8(%4), %%eax\n" +- "31: movl 12(%4),%%edx\n" ++ "3: "__copyuser_seg" movl 8(%4), %%eax\n" ++ "31: "__copyuser_seg" movl 12(%4),%%edx\n" + " movnti %%eax, 8(%3)\n" + " movnti %%edx, 12(%3)\n" +- "4: movl 16(%4), %%eax\n" +- "41: movl 20(%4), %%edx\n" ++ "4: "__copyuser_seg" movl 16(%4), %%eax\n" ++ "41: "__copyuser_seg" movl 20(%4), %%edx\n" + " movnti %%eax, 16(%3)\n" + " movnti %%edx, 20(%3)\n" +- "10: movl 24(%4), %%eax\n" +- "51: movl 28(%4), %%edx\n" ++ "10: "__copyuser_seg" movl 24(%4), %%eax\n" ++ "51: "__copyuser_seg" movl 28(%4), %%edx\n" + " movnti %%eax, 24(%3)\n" + " movnti %%edx, 28(%3)\n" +- "11: movl 32(%4), %%eax\n" +- "61: movl 36(%4), %%edx\n" ++ "11: "__copyuser_seg" movl 32(%4), %%eax\n" ++ "61: "__copyuser_seg" movl 36(%4), %%edx\n" + " movnti %%eax, 32(%3)\n" + " movnti %%edx, 36(%3)\n" +- "12: movl 40(%4), %%eax\n" +- "71: movl 44(%4), %%edx\n" ++ "12: "__copyuser_seg" movl 40(%4), %%eax\n" ++ "71: "__copyuser_seg" movl 44(%4), %%edx\n" + " movnti %%eax, 40(%3)\n" + " movnti %%edx, 44(%3)\n" +- "13: movl 48(%4), %%eax\n" +- "81: movl 52(%4), %%edx\n" ++ "13: "__copyuser_seg" movl 48(%4), %%eax\n" ++ "81: "__copyuser_seg" movl 52(%4), %%edx\n" + " movnti %%eax, 48(%3)\n" + " movnti %%edx, 52(%3)\n" +- "14: movl 56(%4), %%eax\n" +- "91: movl 60(%4), %%edx\n" ++ "14: "__copyuser_seg" movl 56(%4), %%eax\n" ++ "91: "__copyuser_seg" movl 60(%4), %%edx\n" + " movnti %%eax, 56(%3)\n" + " movnti %%edx, 60(%3)\n" + " addl $-64, %0\n" +@@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to, + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" +- "6: rep; movsl\n" ++ "6: rep; "__copyuser_seg" movsl\n" + " movl %%eax,%0\n" +- "7: rep; movsb\n" ++ "7: rep; "__copyuser_seg" movsb\n" + "8:\n" + ".section .fixup,"ax"\n" + "9: lea 0(%%eax,%0,4),%0\n" +@@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to, + */ + unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, + unsigned long size); +-unsigned long __copy_user_intel(void __user *to, const void *from, ++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from, ++ unsigned long size); ++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from, + unsigned long size); + unsigned long __copy_user_zeroing_intel_nocache(void *to, + const void __user *from, unsigned long size); + #endif /* CONFIG_X86_INTEL_USERCOPY */ + + /* Generic arbitrary sized copy. */ +-#define __copy_user(to, from, size) \ ++#define __copy_user(to, from, size, prefix, set, restore) \ + do { \ + int __d0, __d1, __d2; \ + __asm__ __volatile__( \ ++ set \ + " cmp $7,%0\n" \ + " jbe 1f\n" \ + " movl %1,%0\n" \ + " negl %0\n" \ + " andl $7,%0\n" \ + " subl %0,%3\n" \ +- "4: rep; movsb\n" \ ++ "4: rep; "prefix"movsb\n" \ + " movl %3,%0\n" \ + " shrl $2,%0\n" \ + " andl $3,%3\n" \ + " .align 2,0x90\n" \ +- "0: rep; movsl\n" \ ++ "0: rep; "prefix"movsl\n" \ + " movl %3,%0\n" \ +- "1: rep; movsb\n" \ ++ "1: rep; "prefix"movsb\n" \ + "2:\n" \ ++ restore \ + ".section .fixup,"ax"\n" \ + "5: addl %3,%0\n" \ + " jmp 2b\n" \ +@@ -682,14 +805,14 @@ do { \ + " negl %0\n" \ + " andl $7,%0\n" \ + " subl %0,%3\n" \ +- "4: rep; movsb\n" \ ++ "4: rep; "__copyuser_seg"movsb\n" \ + " movl %3,%0\n" \ + " shrl $2,%0\n" \ + " andl $3,%3\n" \ + " .align 2,0x90\n" \ +- "0: rep; movsl\n" \ ++ "0: rep; "__copyuser_seg"movsl\n" \ + " movl %3,%0\n" \ +- "1: rep; movsb\n" \ ++ "1: rep; "__copyuser_seg"movsb\n" \ + "2:\n" \ + ".section .fixup,"ax"\n" \ + "5: addl %3,%0\n" \ +@@ -775,9 +898,9 @@ survive: + } + #endif + if (movsl_is_ok(to, from, n)) +- __copy_user(to, from, n); ++ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES); + else +- n = __copy_user_intel(to, from, n); ++ n = __generic_copy_to_user_intel(to, from, n); + return n; + } + EXPORT_SYMBOL(__copy_to_user_ll); +@@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, + unsigned long n) + { + if (movsl_is_ok(to, from, n)) +- __copy_user(to, from, n); ++ __copy_user(to, from, n, __copyuser_seg, "", ""); + else +- n = __copy_user_intel((void __user *)to, +- (const void *)from, n); ++ n = __generic_copy_from_user_intel(to, from, n); + return n; + } + EXPORT_SYMBOL(__copy_from_user_ll_nozero); +@@ -827,65 +949,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr + if (n > 64 && cpu_has_xmm2) + n = __copy_user_intel_nocache(to, from, n); + else +- __copy_user(to, from, n); ++ __copy_user(to, from, n, __copyuser_seg, "", ""); + #else +- __copy_user(to, from, n); ++ __copy_user(to, from, n, __copyuser_seg, "", ""); + #endif + return n; + } + EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); + +-/** +- * copy_to_user: - Copy a block of data into user space. +- * @to: Destination address, in user space. +- * @from: Source address, in kernel space. +- * @n: Number of bytes to copy. +- * +- * Context: User context only. This function may sleep. +- * +- * Copy data from kernel space to user space. +- * +- * Returns number of bytes that could not be copied. +- * On success, this will be zero. +- */ +-unsigned long +-copy_to_user(void __user *to, const void *from, unsigned long n) +-{ +- if (access_ok(VERIFY_WRITE, to, n)) +- n = __copy_to_user(to, from, n); +- return n; +-} +-EXPORT_SYMBOL(copy_to_user); +- +-/** +- * copy_from_user: - Copy a block of data from user space. +- * @to: Destination address, in kernel space. +- * @from: Source address, in user space. +- * @n: Number of bytes to copy. +- * +- * Context: User context only. This function may sleep. +- * +- * Copy data from user space to kernel space. +- * +- * Returns number of bytes that could not be copied. +- * On success, this will be zero. +- * +- * If some data could not be copied, this function will pad the copied +- * data to the requested size using zero bytes. +- */ +-unsigned long +-_copy_from_user(void *to, const void __user *from, unsigned long n) +-{ +- if (access_ok(VERIFY_READ, from, n)) +- n = __copy_from_user(to, from, n); +- else +- memset(to, 0, n); +- return n; +-} +-EXPORT_SYMBOL(_copy_from_user); +- + void copy_from_user_overflow(void) + { + WARN(1, "Buffer overflow detected!\n"); + } + EXPORT_SYMBOL(copy_from_user_overflow); ++ ++void copy_to_user_overflow(void) ++{ ++ WARN(1, "Buffer overflow detected!\n"); ++} ++EXPORT_SYMBOL(copy_to_user_overflow); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++void __set_fs(mm_segment_t x) ++{ ++ switch (x.seg) { ++ case 0: ++ loadsegment(gs, 0); ++ break; ++ case TASK_SIZE_MAX: ++ loadsegment(gs, __USER_DS); ++ break; ++ case -1UL: ++ loadsegment(gs, __KERNEL_DS); ++ break; ++ default: ++ BUG(); ++ } ++ return; ++} ++EXPORT_SYMBOL(__set_fs); ++ ++void set_fs(mm_segment_t x) ++{ ++ current_thread_info()->addr_limit = x; ++ __set_fs(x); ++} ++EXPORT_SYMBOL(set_fs); ++#endif +diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c +index b7c2849..8633ad8 100644 +--- a/arch/x86/lib/usercopy_64.c ++++ b/arch/x86/lib/usercopy_64.c +@@ -42,6 +42,12 @@ long + __strncpy_from_user(char *dst, const char __user *src, long count) + { + long res; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; ++#endif ++ + __do_strncpy_from_user(dst, src, count, res); + return res; + } +@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size) + { + long __d0; + might_fault(); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)addr < PAX_USER_SHADOW_BASE) ++ addr += PAX_USER_SHADOW_BASE; ++#endif ++ + /* no memory constraint because it doesn't change any memory gcc knows + about */ + asm volatile( +@@ -149,12 +161,20 @@ long strlen_user(const char __user *s) + } + EXPORT_SYMBOL(strlen_user); + +-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) ++unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len) + { +- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { +- return copy_user_generic((__force void *)to, (__force void *)from, len); +- } +- return len; ++ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if ((unsigned long)to < PAX_USER_SHADOW_BASE) ++ to += PAX_USER_SHADOW_BASE; ++ if ((unsigned long)from < PAX_USER_SHADOW_BASE) ++ from += PAX_USER_SHADOW_BASE; ++#endif ++ ++ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len); ++ } ++ return len; + } + EXPORT_SYMBOL(copy_in_user); + +@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user); + * it is not necessary to optimize tail handling. + */ + unsigned long +-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) ++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) + { + char c; + unsigned zero_len; +diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c +index d0474ad..36e9257 100644 +--- a/arch/x86/mm/extable.c ++++ b/arch/x86/mm/extable.c +@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs) + const struct exception_table_entry *fixup; + + #ifdef CONFIG_PNPBIOS +- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { ++ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) { + extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; + extern u32 pnp_bios_is_utter_crap; + pnp_bios_is_utter_crap = 1; +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c +index 5db0490..2ddce45 100644 +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -13,11 +13,18 @@ + #include <linux/perf_event.h> /* perf_sw_event */ + #include <linux/hugetlb.h> /* hstate_index_to_shift */ + #include <linux/prefetch.h> /* prefetchw */ ++#include <linux/unistd.h> ++#include <linux/compiler.h> + + #include <asm/traps.h> /* dotraplinkage, ... */ + #include <asm/pgalloc.h> /* pgd_*(), ... */ + #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ + #include <asm/fixmap.h> /* VSYSCALL_START */ ++#include <asm/tlbflush.h> ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#include <asm/stacktrace.h> ++#endif + + /* + * Page fault error code bits: +@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs) + int ret = 0; + + /* kprobe_running() needs smp_processor_id() */ +- if (kprobes_built_in() && !user_mode_vm(regs)) { ++ if (kprobes_built_in() && !user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, 14)) + ret = 1; +@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, + return !instr_lo || (instr_lo>>1) == 1; + case 0x00: + /* Prefetch instruction is 0x0F0D or 0x0F18 */ +- if (probe_kernel_address(instr, opcode)) ++ if (user_mode(regs)) { ++ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1)) ++ return 0; ++ } else if (probe_kernel_address(instr, opcode)) + return 0; + + *prefetch = (instr_lo == 0xF) && +@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) + while (instr < max_instr) { + unsigned char opcode; + +- if (probe_kernel_address(instr, opcode)) ++ if (user_mode(regs)) { ++ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1)) ++ break; ++ } else if (probe_kernel_address(instr, opcode)) + break; + + instr++; +@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, + force_sig_info(si_signo, &info, tsk); + } + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address); ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++static int pax_handle_fetch_fault(struct pt_regs *regs); ++#endif ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address) ++{ ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ ++ pgd = pgd_offset(mm, address); ++ if (!pgd_present(*pgd)) ++ return NULL; ++ pud = pud_offset(pgd, address); ++ if (!pud_present(*pud)) ++ return NULL; ++ pmd = pmd_offset(pud, address); ++ if (!pmd_present(*pmd)) ++ return NULL; ++ return pmd; ++} ++#endif ++ + DEFINE_SPINLOCK(pgd_lock); + LIST_HEAD(pgd_list); + +@@ -231,10 +272,22 @@ void vmalloc_sync_all(void) + for (address = VMALLOC_START & PMD_MASK; + address >= TASK_SIZE && address < FIXADDR_TOP; + address += PMD_SIZE) { ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ unsigned long cpu; ++#else + struct page *page; ++#endif + + spin_lock(&pgd_lock); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) { ++ pgd_t *pgd = get_cpu_pgd(cpu); ++ pmd_t *ret; ++#else + list_for_each_entry(page, &pgd_list, lru) { ++ pgd_t *pgd = page_address(page); + spinlock_t *pgt_lock; + pmd_t *ret; + +@@ -242,8 +295,13 @@ void vmalloc_sync_all(void) + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; + + spin_lock(pgt_lock); +- ret = vmalloc_sync_one(page_address(page), address); ++#endif ++ ++ ret = vmalloc_sync_one(pgd, address); ++ ++#ifndef CONFIG_PAX_PER_CPU_PGD + spin_unlock(pgt_lock); ++#endif + + if (!ret) + break; +@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) + * an interrupt in the middle of a task switch.. + */ + pgd_paddr = read_cr3(); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK)); ++#endif ++ + pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); + if (!pmd_k) + return -1; +@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) + * happen within a race in page table update. In the later + * case just flush: + */ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK)); ++ pgd = pgd_offset_cpu(smp_processor_id(), address); ++#else + pgd = pgd_offset(current->active_mm, address); ++#endif ++ + pgd_ref = pgd_offset_k(address); + if (pgd_none(*pgd_ref)) + return -1; +@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) + static int is_errata100(struct pt_regs *regs, unsigned long address) + { + #ifdef CONFIG_X86_64 +- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) ++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32)) + return 1; + #endif + return 0; +@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) + } + + static const char nx_warning[] = KERN_CRIT +-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; ++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n"; + + static void + show_fault_oops(struct pt_regs *regs, unsigned long error_code, +@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, + if (!oops_may_print()) + return; + +- if (error_code & PF_INSTR) { ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) { + unsigned int level; + + pte_t *pte = lookup_address(address, &level); + + if (pte && pte_present(*pte) && !pte_exec(*pte)) +- printk(nx_warning, current_uid()); ++ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current)); + } + ++#ifdef CONFIG_PAX_KERNEXEC ++ if (init_mm.start_code <= address && address < init_mm.end_code) { ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ++ ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); ++ else ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ++ current->comm, task_pid_nr(current), current_uid(), current_euid()); ++ } ++#endif ++ + printk(KERN_ALERT "BUG: unable to handle kernel "); + if (address < PAGE_SIZE) + printk(KERN_CONT "NULL pointer dereference"); +@@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, + } + #endif + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (pax_is_fetch_fault(regs, error_code, address)) { ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ switch (pax_handle_fetch_fault(regs)) { ++ case 2: ++ return; ++ } ++#endif ++ ++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + if (unlikely(show_unhandled_signals)) + show_signal_msg(regs, error_code, address, tsk); + +@@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, + if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { + printk(KERN_ERR + "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", +- tsk->comm, tsk->pid, address); ++ tsk->comm, task_pid_nr(tsk), address); + code = BUS_MCEERR_AR; + } + #endif +@@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) + return 1; + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code) ++{ ++ pte_t *pte; ++ pmd_t *pmd; ++ spinlock_t *ptl; ++ unsigned char pte_mask; ++ ++ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) || ++ !(mm->pax_flags & MF_PAX_PAGEEXEC)) ++ return 0; ++ ++ /* PaX: it's our fault, let's handle it if we can */ ++ ++ /* PaX: take a look at read faults before acquiring any locks */ ++ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) { ++ /* instruction fetch attempt from a protected page in user mode */ ++ up_read(&mm->mmap_sem); ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ switch (pax_handle_fetch_fault(regs)) { ++ case 2: ++ return 1; ++ } ++#endif ++ ++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++ ++ pmd = pax_get_pmd(mm, address); ++ if (unlikely(!pmd)) ++ return 0; ++ ++ pte = pte_offset_map_lock(mm, pmd, address, &ptl); ++ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) { ++ pte_unmap_unlock(pte, ptl); ++ return 0; ++ } ++ ++ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) { ++ /* write attempt to a protected page in user mode */ ++ pte_unmap_unlock(pte, ptl); ++ return 0; ++ } ++ ++#ifdef CONFIG_SMP ++ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask))) ++#else ++ if (likely(address > get_limit(regs->cs))) ++#endif ++ { ++ set_pte(pte, pte_mkread(*pte)); ++ __flush_tlb_one(address); ++ pte_unmap_unlock(pte, ptl); ++ up_read(&mm->mmap_sem); ++ return 1; ++ } ++ ++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1)); ++ ++ /* ++ * PaX: fill DTLB with user rights and retry ++ */ ++ __asm__ __volatile__ ( ++ "orb %2,(%1)\n" ++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC) ++/* ++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's ++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any* ++ * page fault when examined during a TLB load attempt. this is true not only ++ * for PTEs holding a non-present entry but also present entries that will ++ * raise a page fault (such as those set up by PaX, or the copy-on-write ++ * mechanism). in effect it means that we do *not* need to flush the TLBs ++ * for our target pages since their PTEs are simply not in the TLBs at all. ++ ++ * the best thing in omitting it is that we gain around 15-20% speed in the ++ * fast path of the page fault handler and can get rid of tracing since we ++ * can no longer flush unintended entries. ++ */ ++ "invlpg (%0)\n" ++#endif ++ __copyuser_seg"testb $0,(%0)\n" ++ "xorb %3,(%1)\n" ++ : ++ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER) ++ : "memory", "cc"); ++ pte_unmap_unlock(pte, ptl); ++ up_read(&mm->mmap_sem); ++ return 1; ++} ++#endif ++ + /* + * Handle a spurious fault caused by a stale TLB entry. + * +@@ -962,6 +1151,9 @@ int show_unhandled_signals = 1; + static inline int + access_error(unsigned long error_code, struct vm_area_struct *vma) + { ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC)) ++ return 1; ++ + if (error_code & PF_WRITE) { + /* write, present and write, not present: */ + if (unlikely(!(vma->vm_flags & VM_WRITE))) +@@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) + { + struct vm_area_struct *vma; + struct task_struct *tsk; +- unsigned long address; + struct mm_struct *mm; + int fault; + int write = error_code & PF_WRITE; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + (write ? FAULT_FLAG_WRITE : 0); + +- tsk = current; +- mm = tsk->mm; +- + /* Get the faulting address: */ +- address = read_cr2(); ++ unsigned long address = read_cr2(); ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) { ++ if (!search_exception_tables(regs->ip)) { ++ bad_area_nosemaphore(regs, error_code, address); ++ return; ++ } ++ if (address < PAX_USER_SHADOW_BASE) { ++ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n"); ++ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip); ++ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR); ++ } else ++ address -= PAX_USER_SHADOW_BASE; ++ } ++#endif ++ ++ tsk = current; ++ mm = tsk->mm; + + /* + * Detect and handle instructions that would cause a page fault for +@@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) + * User-mode registers count as a user access even for any + * potential system fault or CPU buglet: + */ +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + local_irq_enable(); + error_code |= PF_USER; + } else { +@@ -1122,6 +1328,11 @@ retry: + might_sleep(); + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++ if (pax_handle_pageexec_fault(regs, mm, address, error_code)) ++ return; ++#endif ++ + vma = find_vma(mm, address); + if (unlikely(!vma)) { + bad_area(regs, error_code, address); +@@ -1133,18 +1344,24 @@ retry: + bad_area(regs, error_code, address); + return; + } +- if (error_code & PF_USER) { +- /* +- * Accessing the stack below %sp is always a bug. +- * The large cushion allows instructions like enter +- * and pusha to work. ("enter $65535, $31" pushes +- * 32 pointers and then decrements %sp by 65535.) +- */ +- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { +- bad_area(regs, error_code, address); +- return; +- } ++ /* ++ * Accessing the stack below %sp is always a bug. ++ * The large cushion allows instructions like enter ++ * and pusha to work. ("enter $65535, $31" pushes ++ * 32 pointers and then decrements %sp by 65535.) ++ */ ++ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) { ++ bad_area(regs, error_code, address); ++ return; + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) { ++ bad_area(regs, error_code, address); ++ return; ++ } ++#endif ++ + if (unlikely(expand_stack(vma, address))) { + bad_area(regs, error_code, address); + return; +@@ -1199,3 +1416,292 @@ good_area: + + up_read(&mm->mmap_sem); + } ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) ++{ ++ struct mm_struct *mm = current->mm; ++ unsigned long ip = regs->ip; ++ ++ if (v8086_mode(regs)) ++ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff); ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) { ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) ++ return true; ++ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address) ++ return true; ++ return false; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) ++ return true; ++ return false; ++ } ++#endif ++ ++ return false; ++} ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++static int pax_handle_fetch_fault_32(struct pt_regs *regs) ++{ ++ int err; ++ ++ do { /* PaX: libffi trampoline emulation */ ++ unsigned char mov, jmp; ++ unsigned int addr1, addr2; ++ ++#ifdef CONFIG_X86_64 ++ if ((regs->ip + 9) >> 32) ++ break; ++#endif ++ ++ err = get_user(mov, (unsigned char __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); ++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5)); ++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); ++ ++ if (err) ++ break; ++ ++ if (mov == 0xB8 && jmp == 0xE9) { ++ regs->ax = addr1; ++ regs->ip = (unsigned int)(regs->ip + addr2 + 10); ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: gcc trampoline emulation #1 */ ++ unsigned char mov1, mov2; ++ unsigned short jmp; ++ unsigned int addr1, addr2; ++ ++#ifdef CONFIG_X86_64 ++ if ((regs->ip + 11) >> 32) ++ break; ++#endif ++ ++ err = get_user(mov1, (unsigned char __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); ++ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5)); ++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); ++ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) { ++ regs->cx = addr1; ++ regs->ax = addr2; ++ regs->ip = addr2; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: gcc trampoline emulation #2 */ ++ unsigned char mov, jmp; ++ unsigned int addr1, addr2; ++ ++#ifdef CONFIG_X86_64 ++ if ((regs->ip + 9) >> 32) ++ break; ++#endif ++ ++ err = get_user(mov, (unsigned char __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); ++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5)); ++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); ++ ++ if (err) ++ break; ++ ++ if (mov == 0xB9 && jmp == 0xE9) { ++ regs->cx = addr1; ++ regs->ip = (unsigned int)(regs->ip + addr2 + 10); ++ return 2; ++ } ++ } while (0); ++ ++ return 1; /* PaX in action */ ++} ++ ++#ifdef CONFIG_X86_64 ++static int pax_handle_fetch_fault_64(struct pt_regs *regs) ++{ ++ int err; ++ ++ do { /* PaX: libffi trampoline emulation */ ++ unsigned short mov1, mov2, jmp1; ++ unsigned char stcclc, jmp2; ++ unsigned long addr1, addr2; ++ ++ err = get_user(mov1, (unsigned short __user *)regs->ip); ++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2)); ++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10)); ++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12)); ++ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20)); ++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21)); ++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) { ++ regs->r11 = addr1; ++ regs->r10 = addr2; ++ if (stcclc == 0xF8) ++ regs->flags &= ~X86_EFLAGS_CF; ++ else ++ regs->flags |= X86_EFLAGS_CF; ++ regs->ip = addr1; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: gcc trampoline emulation #1 */ ++ unsigned short mov1, mov2, jmp1; ++ unsigned char jmp2; ++ unsigned int addr1; ++ unsigned long addr2; ++ ++ err = get_user(mov1, (unsigned short __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2)); ++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6)); ++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8)); ++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16)); ++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { ++ regs->r11 = addr1; ++ regs->r10 = addr2; ++ regs->ip = addr1; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: gcc trampoline emulation #2 */ ++ unsigned short mov1, mov2, jmp1; ++ unsigned char jmp2; ++ unsigned long addr1, addr2; ++ ++ err = get_user(mov1, (unsigned short __user *)regs->ip); ++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2)); ++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10)); ++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12)); ++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20)); ++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { ++ regs->r11 = addr1; ++ regs->r10 = addr2; ++ regs->ip = addr1; ++ return 2; ++ } ++ } while (0); ++ ++ return 1; /* PaX in action */ ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->ip = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when gcc trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ if (v8086_mode(regs)) ++ return 1; ++ ++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) ++ return 1; ++ ++#ifdef CONFIG_X86_32 ++ return pax_handle_fetch_fault_32(regs); ++#else ++ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) ++ return pax_handle_fetch_fault_32(regs); ++ else ++ return pax_handle_fetch_fault_64(regs); ++#endif ++} ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (unsigned char __force_user *)pc+i)) ++ printk(KERN_CONT "?? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++ ++ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long)); ++ for (i = -1; i < 80 / (long)sizeof(long); i++) { ++ unsigned long c; ++ if (get_user(c, (unsigned long __force_user *)sp+i)) { ++#ifdef CONFIG_X86_32 ++ printk(KERN_CONT "???????? "); ++#else ++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) ++ printk(KERN_CONT "???????? ???????? "); ++ else ++ printk(KERN_CONT "???????????????? "); ++#endif ++ } else { ++#ifdef CONFIG_X86_64 ++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) { ++ printk(KERN_CONT "%08x ", (unsigned int)c); ++ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32)); ++ } else ++#endif ++ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c); ++ } ++ } ++ printk("\n"); ++} ++#endif ++ ++/** ++ * probe_kernel_write(): safely attempt to write to a location ++ * @dst: address to write to ++ * @src: pointer to the data that shall be written ++ * @size: size of the data chunk ++ * ++ * Safely write to address @dst from the buffer at @src. If a kernel fault ++ * happens, handle that and return -EFAULT. ++ */ ++long notrace probe_kernel_write(void *dst, const void *src, size_t size) ++{ ++ long ret; ++ mm_segment_t old_fs = get_fs(); ++ ++ set_fs(KERNEL_DS); ++ pagefault_disable(); ++ pax_open_kernel(); ++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size); ++ pax_close_kernel(); ++ pagefault_enable(); ++ set_fs(old_fs); ++ ++ return ret ? -EFAULT : 0; ++} +diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c +index dd74e46..7d26398 100644 +--- a/arch/x86/mm/gup.c ++++ b/arch/x86/mm/gup.c +@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; +- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, ++ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ, + (void __user *)start, len))) + return 0; + +diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c +index f4f29b1..5cac4fb 100644 +--- a/arch/x86/mm/highmem_32.c ++++ b/arch/x86/mm/highmem_32.c +@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); ++ ++ pax_open_kernel(); + set_pte(kmap_pte-idx, mk_pte(page, prot)); ++ pax_close_kernel(); ++ + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c +index f581a18..29efd37 100644 +--- a/arch/x86/mm/hugetlbpage.c ++++ b/arch/x86/mm/hugetlbpage.c +@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; +- unsigned long start_addr; ++ unsigned long start_addr, pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; + + if (len > mm->cached_hole_size) { +- start_addr = mm->free_area_cache; ++ start_addr = mm->free_area_cache; + } else { +- start_addr = TASK_UNMAPPED_BASE; +- mm->cached_hole_size = 0; ++ start_addr = mm->mmap_base; ++ mm->cached_hole_size = 0; + } + + full_search: +@@ -280,26 +287,27 @@ full_search: + + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ +- if (TASK_SIZE - len < addr) { ++ if (pax_task_size - len < addr) { + /* + * Start a new search - just in case we missed + * some holes. + */ +- if (start_addr != TASK_UNMAPPED_BASE) { +- start_addr = TASK_UNMAPPED_BASE; ++ if (start_addr != mm->mmap_base) { ++ start_addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { +- mm->free_area_cache = addr + len; +- return addr; +- } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = ALIGN(vma->vm_end, huge_page_size(h)); + } ++ ++ mm->free_area_cache = addr + len; ++ return addr; + } + + static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, +@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + { + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; +- struct vm_area_struct *vma, *prev_vma; +- unsigned long base = mm->mmap_base, addr = addr0; ++ struct vm_area_struct *vma; ++ unsigned long base = mm->mmap_base, addr; + unsigned long largest_hole = mm->cached_hole_size; +- int first_time = 1; + + /* don't allow allocations above current base */ + if (mm->free_area_cache > base) +@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + largest_hole = 0; + mm->free_area_cache = base; + } +-try_again: ++ + /* make sure it can fit in the remaining address space */ + if (mm->free_area_cache < len) + goto fail; + + /* either no address requested or can't fit in requested address hole */ +- addr = (mm->free_area_cache - len) & huge_page_mask(h); ++ addr = (mm->free_area_cache - len); + do { ++ addr &= huge_page_mask(h); ++ vma = find_vma(mm, addr); + /* + * Lookup failure means no vma is above this address, + * i.e. return with success: +- */ +- if (!(vma = find_vma_prev(mm, addr, &prev_vma))) +- return addr; +- +- /* + * new region fits between prev_vma->vm_end and + * vma->vm_start, use it: + */ +- if (addr + len <= vma->vm_start && +- (!prev_vma || (addr >= prev_vma->vm_end))) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* remember the address as a hint for next time */ +- mm->cached_hole_size = largest_hole; +- return (mm->free_area_cache = addr); +- } else { +- /* pull free_area_cache down to the first hole */ +- if (mm->free_area_cache == vma->vm_end) { +- mm->free_area_cache = vma->vm_start; +- mm->cached_hole_size = largest_hole; +- } ++ mm->cached_hole_size = largest_hole; ++ return (mm->free_area_cache = addr); ++ } ++ /* pull free_area_cache down to the first hole */ ++ if (mm->free_area_cache == vma->vm_end) { ++ mm->free_area_cache = vma->vm_start; ++ mm->cached_hole_size = largest_hole; + } + + /* remember the largest hole we saw so far */ + if (addr + largest_hole < vma->vm_start) +- largest_hole = vma->vm_start - addr; ++ largest_hole = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ +- addr = (vma->vm_start - len) & huge_page_mask(h); +- } while (len <= vma->vm_start); ++ addr = skip_heap_stack_gap(vma, len); ++ } while (!IS_ERR_VALUE(addr)); + + fail: + /* +- * if hint left us with no space for the requested +- * mapping then try again: +- */ +- if (first_time) { +- mm->free_area_cache = base; +- largest_hole = 0; +- first_time = 0; +- goto try_again; +- } +- /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ +- mm->free_area_cache = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; ++ else ++#endif ++ ++ mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ ++ mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + addr = hugetlb_get_unmapped_area_bottomup(file, addr0, + len, pgoff, flags); +@@ -386,6 +392,7 @@ fail: + /* + * Restore the topdown base: + */ ++ mm->mmap_base = base; + mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; + +@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; ++ unsigned long pax_task_size = TASK_SIZE; + + if (len & ~huge_page_mask(h)) + return -EINVAL; +- if (len > TASK_SIZE) ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ if (len > pax_task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) { +@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 87488b9..399f416 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -15,6 +15,7 @@ + #include <asm/tlbflush.h> + #include <asm/tlb.h> + #include <asm/proto.h> ++#include <asm/desc.h> + + unsigned long __initdata pgt_buf_start; + unsigned long __meminitdata pgt_buf_end; +@@ -31,7 +32,7 @@ int direct_gbpages + static void __init find_early_table_space(unsigned long end, int use_pse, + int use_gbpages) + { +- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; ++ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end; + phys_addr_t base; + + puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; +@@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, + */ + int devmem_is_allowed(unsigned long pagenr) + { ++#ifdef CONFIG_GRKERNSEC_KMEM ++ /* allow BDA */ ++ if (!pagenr) ++ return 1; ++ /* allow EBDA */ ++ if ((0x9f000 >> PAGE_SHIFT) == pagenr) ++ return 1; ++#else ++ if (!pagenr) ++ return 1; ++#ifdef CONFIG_VM86 ++ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT)) ++ return 1; ++#endif ++#endif ++ ++ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT)) ++ return 1; ++#ifdef CONFIG_GRKERNSEC_KMEM ++ /* throw out everything else below 1MB */ + if (pagenr <= 256) +- return 1; ++ return 0; ++#endif + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) + return 0; + if (!page_is_ram(pagenr)) +@@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) + + void free_initmem(void) + { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_X86_32 ++ /* PaX: limit KERNEL_CS to actual size */ ++ unsigned long addr, limit; ++ struct desc_struct d; ++ int cpu; ++ ++ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext; ++ limit = (limit - 1UL) >> PAGE_SHIFT; ++ ++ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE); ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { ++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S); ++ } ++ ++ /* PaX: make KERNEL_CS read-only */ ++ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text)); ++ if (!paravirt_enabled()) ++ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT); ++/* ++ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); ++ } ++*/ ++#ifdef CONFIG_X86_PAE ++ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT); ++/* ++ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); ++ } ++*/ ++#endif ++ ++#ifdef CONFIG_MODULES ++ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT); ++#endif ++ ++#else ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ unsigned long addr, end; ++ ++ /* PaX: make kernel code/rodata read-only, rest non-executable */ ++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_present(*pmd)) ++ continue; ++ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata) ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); ++ else ++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); ++ } ++ ++ addr = (unsigned long)__va(__pa(__START_KERNEL_map)); ++ end = addr + KERNEL_IMAGE_SIZE; ++ for (; addr < end; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_present(*pmd)) ++ continue; ++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata))) ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); ++ } ++#endif ++ ++ flush_tlb_all(); ++#endif ++ + free_init_pages("unused kernel memory", + (unsigned long)(&__init_begin), + (unsigned long)(&__init_end)); +diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c +index 29f7c6d..b46b35b 100644 +--- a/arch/x86/mm/init_32.c ++++ b/arch/x86/mm/init_32.c +@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void) + } + + /* +- * Creates a middle page table and puts a pointer to it in the +- * given global directory entry. This only returns the gd entry +- * in non-PAE compilation mode, since the middle layer is folded. +- */ +-static pmd_t * __init one_md_table_init(pgd_t *pgd) +-{ +- pud_t *pud; +- pmd_t *pmd_table; +- +-#ifdef CONFIG_X86_PAE +- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { +- if (after_bootmem) +- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); +- else +- pmd_table = (pmd_t *)alloc_low_page(); +- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); +- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); +- pud = pud_offset(pgd, 0); +- BUG_ON(pmd_table != pmd_offset(pud, 0)); +- +- return pmd_table; +- } +-#endif +- pud = pud_offset(pgd, 0); +- pmd_table = pmd_offset(pud, 0); +- +- return pmd_table; +-} +- +-/* + * Create a page table and place a pointer to it in a middle page + * directory entry: + */ +@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) + page_table = (pte_t *)alloc_low_page(); + + paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE)); ++#else + set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); ++#endif + BUG_ON(page_table != pte_offset_kernel(pmd, 0)); + } + + return pte_offset_kernel(pmd, 0); + } + ++static pmd_t * __init one_md_table_init(pgd_t *pgd) ++{ ++ pud_t *pud; ++ pmd_t *pmd_table; ++ ++ pud = pud_offset(pgd, 0); ++ pmd_table = pmd_offset(pud, 0); ++ ++ return pmd_table; ++} ++ + pmd_t * __init populate_extra_pmd(unsigned long vaddr) + { + int pgd_idx = pgd_index(vaddr); +@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) + int pgd_idx, pmd_idx; + unsigned long vaddr; + pgd_t *pgd; ++ pud_t *pud; + pmd_t *pmd; + pte_t *pte = NULL; + +@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) + pgd = pgd_base + pgd_idx; + + for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { +- pmd = one_md_table_init(pgd); +- pmd = pmd + pmd_index(vaddr); ++ pud = pud_offset(pgd, vaddr); ++ pmd = pmd_offset(pud, vaddr); ++ ++#ifdef CONFIG_X86_PAE ++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); ++#endif ++ + for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); + pmd++, pmd_idx++) { + pte = page_table_kmap_check(one_page_table_init(pmd), +@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) + } + } + +-static inline int is_kernel_text(unsigned long addr) ++static inline int is_kernel_text(unsigned long start, unsigned long end) + { +- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) +- return 1; +- return 0; ++ if ((start > ktla_ktva((unsigned long)_etext) || ++ end <= ktla_ktva((unsigned long)_stext)) && ++ (start > ktla_ktva((unsigned long)_einittext) || ++ end <= ktla_ktva((unsigned long)_sinittext)) && ++ ++#ifdef CONFIG_ACPI_SLEEP ++ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) && ++#endif ++ ++ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000))) ++ return 0; ++ return 1; + } + + /* +@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start, + unsigned long last_map_addr = end; + unsigned long start_pfn, end_pfn; + pgd_t *pgd_base = swapper_pg_dir; +- int pgd_idx, pmd_idx, pte_ofs; ++ unsigned int pgd_idx, pmd_idx, pte_ofs; + unsigned long pfn; + pgd_t *pgd; ++ pud_t *pud; + pmd_t *pmd; + pte_t *pte; + unsigned pages_2m, pages_4k; +@@ -281,8 +282,13 @@ repeat: + pfn = start_pfn; + pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); + pgd = pgd_base + pgd_idx; +- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { +- pmd = one_md_table_init(pgd); ++ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) { ++ pud = pud_offset(pgd, 0); ++ pmd = pmd_offset(pud, 0); ++ ++#ifdef CONFIG_X86_PAE ++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); ++#endif + + if (pfn >= end_pfn) + continue; +@@ -294,14 +300,13 @@ repeat: + #endif + for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; + pmd++, pmd_idx++) { +- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; ++ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET; + + /* + * Map with big pages if possible, otherwise + * create normal page tables: + */ + if (use_pse) { +- unsigned int addr2; + pgprot_t prot = PAGE_KERNEL_LARGE; + /* + * first pass will use the same initial +@@ -311,11 +316,7 @@ repeat: + __pgprot(PTE_IDENT_ATTR | + _PAGE_PSE); + +- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + +- PAGE_OFFSET + PAGE_SIZE-1; +- +- if (is_kernel_text(addr) || +- is_kernel_text(addr2)) ++ if (is_kernel_text(address, address + PMD_SIZE)) + prot = PAGE_KERNEL_LARGE_EXEC; + + pages_2m++; +@@ -332,7 +333,7 @@ repeat: + pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); + pte += pte_ofs; + for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; +- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { ++ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) { + pgprot_t prot = PAGE_KERNEL; + /* + * first pass will use the same initial +@@ -340,7 +341,7 @@ repeat: + */ + pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); + +- if (is_kernel_text(addr)) ++ if (is_kernel_text(address, address + PAGE_SIZE)) + prot = PAGE_KERNEL_EXEC; + + pages_4k++; +@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base) + + pud = pud_offset(pgd, va); + pmd = pmd_offset(pud, va); +- if (!pmd_present(*pmd)) ++ if (!pmd_present(*pmd) || pmd_huge(*pmd)) + break; + + pte = pte_offset_kernel(pmd, va); +@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void) + + static void __init pagetable_init(void) + { +- pgd_t *pgd_base = swapper_pg_dir; +- +- permanent_kmaps_init(pgd_base); ++ permanent_kmaps_init(swapper_pg_dir); + } + +-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); ++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); + EXPORT_SYMBOL_GPL(__supported_pte_mask); + + /* user-defined highmem size */ +@@ -757,6 +756,12 @@ void __init mem_init(void) + + pci_iommu_alloc(); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++#endif ++ + #ifdef CONFIG_FLATMEM + BUG_ON(!mem_map); + #endif +@@ -774,7 +779,7 @@ void __init mem_init(void) + set_highmem_pages_init(); + + codesize = (unsigned long) &_etext - (unsigned long) &_text; +- datasize = (unsigned long) &_edata - (unsigned long) &_etext; ++ datasize = (unsigned long) &_edata - (unsigned long) &_sdata; + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; + + printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " +@@ -815,10 +820,10 @@ void __init mem_init(void) + ((unsigned long)&__init_end - + (unsigned long)&__init_begin) >> 10, + +- (unsigned long)&_etext, (unsigned long)&_edata, +- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, ++ (unsigned long)&_sdata, (unsigned long)&_edata, ++ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10, + +- (unsigned long)&_text, (unsigned long)&_etext, ++ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext), + ((unsigned long)&_etext - (unsigned long)&_text) >> 10); + + /* +@@ -896,6 +901,7 @@ void set_kernel_text_rw(void) + if (!kernel_set_to_readonly) + return; + ++ start = ktla_ktva(start); + pr_debug("Set kernel text: %lx - %lx for read write\n", + start, start+size); + +@@ -910,6 +916,7 @@ void set_kernel_text_ro(void) + if (!kernel_set_to_readonly) + return; + ++ start = ktla_ktva(start); + pr_debug("Set kernel text: %lx - %lx for read only\n", + start, start+size); + +@@ -938,6 +945,7 @@ void mark_rodata_ro(void) + unsigned long start = PFN_ALIGN(_text); + unsigned long size = PFN_ALIGN(_etext) - start; + ++ start = ktla_ktva(start); + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); + printk(KERN_INFO "Write protecting the kernel text: %luk\n", + size >> 10); +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c +index bbaaa00..796fa65 100644 +--- a/arch/x86/mm/init_64.c ++++ b/arch/x86/mm/init_64.c +@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on); + * around without checking the pgd every time. + */ + +-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; ++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP); + EXPORT_SYMBOL_GPL(__supported_pte_mask); + + int force_personality32; +@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end) + + for (address = start; address <= end; address += PGDIR_SIZE) { + const pgd_t *pgd_ref = pgd_offset_k(address); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ unsigned long cpu; ++#else + struct page *page; ++#endif + + if (pgd_none(*pgd_ref)) + continue; + + spin_lock(&pgd_lock); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) { ++ pgd_t *pgd = pgd_offset_cpu(cpu, address); ++#else + list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; + spinlock_t *pgt_lock; +@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) + /* the pgt_lock only for Xen */ + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; + spin_lock(pgt_lock); ++#endif + + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); +@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end) + BUG_ON(pgd_page_vaddr(*pgd) + != pgd_page_vaddr(*pgd_ref)); + ++#ifndef CONFIG_PAX_PER_CPU_PGD + spin_unlock(pgt_lock); ++#endif ++ + } + spin_unlock(&pgd_lock); + } +@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) + pmd = fill_pmd(pud, vaddr); + pte = fill_pte(pmd, vaddr); + ++ pax_open_kernel(); + set_pte(pte, new_pte); ++ pax_close_kernel(); + + /* + * It's enough to flush this one mapping. +@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size, + pgd = pgd_offset_k((unsigned long)__va(phys)); + if (pgd_none(*pgd)) { + pud = (pud_t *) spp_getpage(); +- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | +- _PAGE_USER)); ++ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE)); + } + pud = pud_offset(pgd, (unsigned long)__va(phys)); + if (pud_none(*pud)) { + pmd = (pmd_t *) spp_getpage(); +- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | +- _PAGE_USER)); ++ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); + } + pmd = pmd_offset(pud, phys); + BUG_ON(!pmd_none(*pmd)); +@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys) + if (pfn >= pgt_buf_top) + panic("alloc_low_page: ran out of memory"); + +- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); ++ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); + clear_page(adr); + *phys = pfn * PAGE_SIZE; + return adr; +@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt) + + phys = __pa(virt); + left = phys & (PAGE_SIZE - 1); +- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE); ++ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE); + adr = (void *)(((unsigned long)adr) | left); + + return adr; +@@ -693,6 +707,12 @@ void __init mem_init(void) + + pci_iommu_alloc(); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++#endif ++ + /* clear_bss() already clear the empty_zero_page */ + + reservedpages = 0; +@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr) + static struct vm_area_struct gate_vma = { + .vm_start = VSYSCALL_START, + .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), +- .vm_page_prot = PAGE_READONLY_EXEC, +- .vm_flags = VM_READ | VM_EXEC ++ .vm_page_prot = PAGE_READONLY, ++ .vm_flags = VM_READ + }; + + struct vm_area_struct *get_gate_vma(struct mm_struct *mm) +@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr) + + const char *arch_vma_name(struct vm_area_struct *vma) + { +- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) ++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) + return "[vdso]"; + if (vma == &gate_vma) + return "[vsyscall]"; +diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c +index 7b179b4..6bd1777 100644 +--- a/arch/x86/mm/iomap_32.c ++++ b/arch/x86/mm/iomap_32.c +@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ ++ pax_open_kernel(); + set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ++ pax_close_kernel(); ++ + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c +index be1ef57..55f0160 100644 +--- a/arch/x86/mm/ioremap.c ++++ b/arch/x86/mm/ioremap.c +@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, + for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { + int is_ram = page_is_ram(pfn); + +- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) ++ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn)))) + return NULL; + WARN_ON_ONCE(is_ram); + } +@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys) + + /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ + if (page_is_ram(start >> PAGE_SHIFT)) ++#ifdef CONFIG_HIGHMEM ++ if ((start >> PAGE_SHIFT) < max_low_pfn) ++#endif + return __va(phys); + + addr = (void __force *)ioremap_cache(start, PAGE_SIZE); +@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str) + early_param("early_ioremap_debug", early_ioremap_debug_setup); + + static __initdata int after_paging_init; +-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; ++static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE); + + static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) + { +@@ -381,8 +384,7 @@ void __init early_ioremap_init(void) + slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); + + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); +- memset(bm_pte, 0, sizeof(bm_pte)); +- pmd_populate_kernel(&init_mm, pmd, bm_pte); ++ pmd_populate_user(&init_mm, pmd, bm_pte); + + /* + * The boot-ioremap range spans multiple pmds, for which +diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c +index d87dd6d..bf3fa66 100644 +--- a/arch/x86/mm/kmemcheck/kmemcheck.c ++++ b/arch/x86/mm/kmemcheck/kmemcheck.c +@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, + * memory (e.g. tracked pages)? For now, we need this to avoid + * invoking kmemcheck for PnP BIOS calls. + */ +- if (regs->flags & X86_VM_MASK) ++ if (v8086_mode(regs)) + return false; +- if (regs->cs != __KERNEL_CS) ++ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS) + return false; + + pte = kmemcheck_pte_lookup(address); +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 845df68..1d8d29f 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void) + * Leave an at least ~128 MB hole with possible stack randomization. + */ + #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size()) +-#define MAX_GAP (TASK_SIZE/6*5) ++#define MAX_GAP (pax_task_size/6*5) + + static int mmap_is_legacy(void) + { +@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void) + return rnd << PAGE_SHIFT; + } + +-static unsigned long mmap_base(void) ++static unsigned long mmap_base(struct mm_struct *mm) + { + unsigned long gap = rlimit(RLIMIT_STACK); ++ unsigned long pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif + + if (gap < MIN_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; + +- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); ++ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd()); + } + + /* + * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 + * does, but not when emulating X86_32 + */ +-static unsigned long mmap_legacy_base(void) ++static unsigned long mmap_legacy_base(struct mm_struct *mm) + { +- if (mmap_is_ia32()) ++ if (mmap_is_ia32()) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ return SEGMEXEC_TASK_UNMAPPED_BASE; ++ else ++#endif ++ + return TASK_UNMAPPED_BASE; +- else ++ } else + return TASK_UNMAPPED_BASE + mmap_rnd(); + } + +@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void) + void arch_pick_mmap_layout(struct mm_struct *mm) + { + if (mmap_is_legacy()) { +- mm->mmap_base = mmap_legacy_base(); ++ mm->mmap_base = mmap_legacy_base(mm); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { +- mm->mmap_base = mmap_base(); ++ mm->mmap_base = mmap_base(mm); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c +index de54b9b..799051e 100644 +--- a/arch/x86/mm/mmio-mod.c ++++ b/arch/x86/mm/mmio-mod.c +@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs, + break; + default: + { +- unsigned char *ip = (unsigned char *)instptr; ++ unsigned char *ip = (unsigned char *)ktla_ktva(instptr); + my_trace->opcode = MMIO_UNKNOWN_OP; + my_trace->width = 0; + my_trace->value = (*ip) << 16 | *(ip + 1) << 8 | +@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition, + static void ioremap_trace_core(resource_size_t offset, unsigned long size, + void __iomem *addr) + { +- static atomic_t next_id; ++ static atomic_unchecked_t next_id; + struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL); + /* These are page-unaligned. */ + struct mmiotrace_map map = { +@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size, + .private = trace + }, + .phys = offset, +- .id = atomic_inc_return(&next_id) ++ .id = atomic_inc_return_unchecked(&next_id) + }; + map.map_id = trace->id; + +diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c +index b008656..773eac2 100644 +--- a/arch/x86/mm/pageattr-test.c ++++ b/arch/x86/mm/pageattr-test.c +@@ -36,7 +36,7 @@ enum { + + static int pte_testbit(pte_t pte) + { +- return pte_flags(pte) & _PAGE_UNUSED1; ++ return pte_flags(pte) & _PAGE_CPA_TEST; + } + + struct split_state { +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index f9e5267..77b1a40 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, + */ + #ifdef CONFIG_PCI_BIOS + if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) +- pgprot_val(forbidden) |= _PAGE_NX; ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; + #endif + + /* +@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, + * Does not cover __inittext since that is gone later on. On + * 64bit we do not enforce !NX on the low mapping + */ +- if (within(address, (unsigned long)_text, (unsigned long)_etext)) +- pgprot_val(forbidden) |= _PAGE_NX; ++ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext))) ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; + ++#ifdef CONFIG_DEBUG_RODATA + /* + * The .rodata section needs to be read-only. Using the pfn + * catches all aliases. +@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, + if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, + __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) + pgprot_val(forbidden) |= _PAGE_RW; ++#endif + + #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) + /* +@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, + } + #endif + ++#ifdef CONFIG_PAX_KERNEXEC ++ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) { ++ pgprot_val(forbidden) |= _PAGE_RW; ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; ++ } ++#endif ++ + prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); + + return prot; +@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address); + static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) + { + /* change init_mm */ ++ pax_open_kernel(); + set_pte_atomic(kpte, pte); ++ + #ifdef CONFIG_X86_32 + if (!SHARED_KERNEL_PMD) { ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ unsigned long cpu; ++#else + struct page *page; ++#endif + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) { ++ pgd_t *pgd = get_cpu_pgd(cpu); ++#else + list_for_each_entry(page, &pgd_list, lru) { +- pgd_t *pgd; ++ pgd_t *pgd = (pgd_t *)page_address(page); ++#endif ++ + pud_t *pud; + pmd_t *pmd; + +- pgd = (pgd_t *)page_address(page) + pgd_index(address); ++ pgd += pgd_index(address); + pud = pud_offset(pgd, address); + pmd = pmd_offset(pud, address); + set_pte_atomic((pte_t *)pmd, pte); + } + } + #endif ++ pax_close_kernel(); + } + + static int +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c +index f6ff57b..481690f 100644 +--- a/arch/x86/mm/pat.c ++++ b/arch/x86/mm/pat.c +@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end) + + if (!entry) { + printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", +- current->comm, current->pid, start, end); ++ current->comm, task_pid_nr(current), start, end); + return -EINVAL; + } + +@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + while (cursor < to) { + if (!devmem_is_allowed(pfn)) { + printk(KERN_INFO +- "Program %s tried to access /dev/mem between %Lx->%Lx.\n", +- current->comm, from, to); ++ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n", ++ current->comm, from, to, cursor); + return 0; + } + cursor += PAGE_SIZE; +@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) + printk(KERN_INFO + "%s:%d ioremap_change_attr failed %s " + "for %Lx-%Lx\n", +- current->comm, current->pid, ++ current->comm, task_pid_nr(current), + cattr_name(flags), + base, (unsigned long long)(base + size)); + return -EINVAL; +@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, + if (want_flags != flags) { + printk(KERN_WARNING + "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n", +- current->comm, current->pid, ++ current->comm, task_pid_nr(current), + cattr_name(want_flags), + (unsigned long long)paddr, + (unsigned long long)(paddr + size), +@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, + free_memtype(paddr, paddr + size); + printk(KERN_ERR "%s:%d map pfn expected mapping type %s" + " for %Lx-%Lx, got %s\n", +- current->comm, current->pid, ++ current->comm, task_pid_nr(current), + cattr_name(want_flags), + (unsigned long long)paddr, + (unsigned long long)(paddr + size), +diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c +index 9f0614d..92ae64a 100644 +--- a/arch/x86/mm/pf_in.c ++++ b/arch/x86/mm/pf_in.c +@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr) + int i; + enum reason_type rv = OTHERS; + +- p = (unsigned char *)ins_addr; ++ p = (unsigned char *)ktla_ktva(ins_addr); + p += skip_prefix(p, &prf); + p += get_opcode(p, &opcode); + +@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr) + struct prefix_bits prf; + int i; + +- p = (unsigned char *)ins_addr; ++ p = (unsigned char *)ktla_ktva(ins_addr); + p += skip_prefix(p, &prf); + p += get_opcode(p, &opcode); + +@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr) + struct prefix_bits prf; + int i; + +- p = (unsigned char *)ins_addr; ++ p = (unsigned char *)ktla_ktva(ins_addr); + p += skip_prefix(p, &prf); + p += get_opcode(p, &opcode); + +@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs) + struct prefix_bits prf; + int i; + +- p = (unsigned char *)ins_addr; ++ p = (unsigned char *)ktla_ktva(ins_addr); + p += skip_prefix(p, &prf); + p += get_opcode(p, &opcode); + for (i = 0; i < ARRAY_SIZE(reg_rop); i++) +@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr) + struct prefix_bits prf; + int i; + +- p = (unsigned char *)ins_addr; ++ p = (unsigned char *)ktla_ktva(ins_addr); + p += skip_prefix(p, &prf); + p += get_opcode(p, &opcode); + for (i = 0; i < ARRAY_SIZE(imm_wop); i++) +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c +index 8573b83..c3b1a30 100644 +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd) + list_del(&page->lru); + } + +-#define UNSHARED_PTRS_PER_PGD \ +- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT; + ++void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) ++{ ++ while (count--) ++ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER); ++} ++#endif + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count) ++{ ++ while (count--) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask); ++#else ++ *dst++ = *src++; ++#endif ++ ++} ++#endif ++ ++#ifdef CONFIG_X86_64 ++#define pxd_t pud_t ++#define pyd_t pgd_t ++#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn) ++#define pxd_free(mm, pud) pud_free((mm), (pud)) ++#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud)) ++#define pyd_offset(mm, address) pgd_offset((mm), (address)) ++#define PYD_SIZE PGDIR_SIZE ++#else ++#define pxd_t pmd_t ++#define pyd_t pud_t ++#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn) ++#define pxd_free(mm, pud) pmd_free((mm), (pud)) ++#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud)) ++#define pyd_offset(mm, address) pud_offset((mm), (address)) ++#define PYD_SIZE PUD_SIZE ++#endif ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {} ++static inline void pgd_dtor(pgd_t *pgd) {} ++#else + static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) + { + BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); +@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd) + pgd_list_del(pgd); + spin_unlock(&pgd_lock); + } ++#endif + + /* + * List of all pgd's needed for non-PAE so it can invalidate entries +@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd) + * -- wli + */ + +-#ifdef CONFIG_X86_PAE ++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) + /* + * In PAE mode, we need to do a cr3 reload (=tlb flush) when + * updating the top-level pagetable entries to guarantee the +@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd) + * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate + * and initialize the kernel pmds here. + */ +-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD ++#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) + + void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) + { +@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) + */ + flush_tlb_mm(mm); + } ++#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD) ++#define PREALLOCATED_PXDS USER_PGD_PTRS + #else /* !CONFIG_X86_PAE */ + + /* No need to prepopulate any pagetable entries in non-PAE modes. */ +-#define PREALLOCATED_PMDS 0 ++#define PREALLOCATED_PXDS 0 + + #endif /* CONFIG_X86_PAE */ + +-static void free_pmds(pmd_t *pmds[]) ++static void free_pxds(pxd_t *pxds[]) + { + int i; + +- for(i = 0; i < PREALLOCATED_PMDS; i++) +- if (pmds[i]) +- free_page((unsigned long)pmds[i]); ++ for(i = 0; i < PREALLOCATED_PXDS; i++) ++ if (pxds[i]) ++ free_page((unsigned long)pxds[i]); + } + +-static int preallocate_pmds(pmd_t *pmds[]) ++static int preallocate_pxds(pxd_t *pxds[]) + { + int i; + bool failed = false; + +- for(i = 0; i < PREALLOCATED_PMDS; i++) { +- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); +- if (pmd == NULL) ++ for(i = 0; i < PREALLOCATED_PXDS; i++) { ++ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP); ++ if (pxd == NULL) + failed = true; +- pmds[i] = pmd; ++ pxds[i] = pxd; + } + + if (failed) { +- free_pmds(pmds); ++ free_pxds(pxds); + return -ENOMEM; + } + +@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[]) + * preallocate which never got a corresponding vma will need to be + * freed manually. + */ +-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) ++static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp) + { + int i; + +- for(i = 0; i < PREALLOCATED_PMDS; i++) { ++ for(i = 0; i < PREALLOCATED_PXDS; i++) { + pgd_t pgd = pgdp[i]; + + if (pgd_val(pgd) != 0) { +- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); ++ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd); + +- pgdp[i] = native_make_pgd(0); ++ set_pgd(pgdp + i, native_make_pgd(0)); + +- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); +- pmd_free(mm, pmd); ++ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT); ++ pxd_free(mm, pxd); + } + } + } + +-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) ++static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[]) + { +- pud_t *pud; ++ pyd_t *pyd; + unsigned long addr; + int i; + +- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ ++ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */ + return; + +- pud = pud_offset(pgd, 0); ++#ifdef CONFIG_X86_64 ++ pyd = pyd_offset(mm, 0L); ++#else ++ pyd = pyd_offset(pgd, 0L); ++#endif + +- for (addr = i = 0; i < PREALLOCATED_PMDS; +- i++, pud++, addr += PUD_SIZE) { +- pmd_t *pmd = pmds[i]; ++ for (addr = i = 0; i < PREALLOCATED_PXDS; ++ i++, pyd++, addr += PYD_SIZE) { ++ pxd_t *pxd = pxds[i]; + + if (i >= KERNEL_PGD_BOUNDARY) +- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), +- sizeof(pmd_t) * PTRS_PER_PMD); ++ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]), ++ sizeof(pxd_t) * PTRS_PER_PMD); + +- pud_populate(mm, pud, pmd); ++ pyd_populate(mm, pyd, pxd); + } + } + + pgd_t *pgd_alloc(struct mm_struct *mm) + { + pgd_t *pgd; +- pmd_t *pmds[PREALLOCATED_PMDS]; ++ pxd_t *pxds[PREALLOCATED_PXDS]; + + pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); + +@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + + mm->pgd = pgd; + +- if (preallocate_pmds(pmds) != 0) ++ if (preallocate_pxds(pxds) != 0) + goto out_free_pgd; + + if (paravirt_pgd_alloc(mm) != 0) +- goto out_free_pmds; ++ goto out_free_pxds; + + /* + * Make sure that pre-populating the pmds is atomic with +@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + spin_lock(&pgd_lock); + + pgd_ctor(mm, pgd); +- pgd_prepopulate_pmd(mm, pgd, pmds); ++ pgd_prepopulate_pxd(mm, pgd, pxds); + + spin_unlock(&pgd_lock); + + return pgd; + +-out_free_pmds: +- free_pmds(pmds); ++out_free_pxds: ++ free_pxds(pxds); + out_free_pgd: + free_page((unsigned long)pgd); + out: +@@ -295,7 +344,7 @@ out: + + void pgd_free(struct mm_struct *mm, pgd_t *pgd) + { +- pgd_mop_up_pmds(mm, pgd); ++ pgd_mop_up_pxds(mm, pgd); + pgd_dtor(pgd); + paravirt_pgd_free(mm, pgd); + free_page((unsigned long)pgd); +diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c +index cac7184..09a39fa 100644 +--- a/arch/x86/mm/pgtable_32.c ++++ b/arch/x86/mm/pgtable_32.c +@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) + return; + } + pte = pte_offset_kernel(pmd, vaddr); ++ ++ pax_open_kernel(); + if (pte_val(pteval)) + set_pte_at(&init_mm, vaddr, pte, pteval); + else + pte_clear(&init_mm, vaddr, pte); ++ pax_close_kernel(); + + /* + * It's enough to flush this one mapping. +diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c +index 410531d..0f16030 100644 +--- a/arch/x86/mm/setup_nx.c ++++ b/arch/x86/mm/setup_nx.c +@@ -5,8 +5,10 @@ + #include <asm/pgtable.h> + #include <asm/proto.h> + ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + static int disable_nx __cpuinitdata; + ++#ifndef CONFIG_PAX_PAGEEXEC + /* + * noexec = on|off + * +@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str) + return 0; + } + early_param("noexec", noexec_setup); ++#endif ++ ++#endif + + void __cpuinit x86_configure_nx(void) + { ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + if (cpu_has_nx && !disable_nx) + __supported_pte_mask |= _PAGE_NX; + else ++#endif + __supported_pte_mask &= ~_PAGE_NX; + } + +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c +index d6c0418..06a0ad5 100644 +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -65,7 +65,11 @@ void leave_mm(int cpu) + BUG(); + cpumask_clear_cpu(cpu, + mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); ++ ++#ifndef CONFIG_PAX_PER_CPU_PGD + load_cr3(swapper_pg_dir); ++#endif ++ + } + EXPORT_SYMBOL_GPL(leave_mm); + +diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S +index 6687022..ceabcfa 100644 +--- a/arch/x86/net/bpf_jit.S ++++ b/arch/x86/net/bpf_jit.S +@@ -9,6 +9,7 @@ + */ + #include <linux/linkage.h> + #include <asm/dwarf2.h> ++#include <asm/alternative-asm.h> + + /* + * Calling convention : +@@ -35,6 +36,7 @@ sk_load_word: + jle bpf_slow_path_word + mov (SKBDATA,%rsi),%eax + bswap %eax /* ntohl() */ ++ pax_force_retaddr + ret + + +@@ -53,6 +55,7 @@ sk_load_half: + jle bpf_slow_path_half + movzwl (SKBDATA,%rsi),%eax + rol $8,%ax # ntohs() ++ pax_force_retaddr + ret + + sk_load_byte_ind: +@@ -66,6 +69,7 @@ sk_load_byte: + cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ + jle bpf_slow_path_byte + movzbl (SKBDATA,%rsi),%eax ++ pax_force_retaddr + ret + + /** +@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh) + movzbl (SKBDATA,%rsi),%ebx + and $15,%bl + shl $2,%bl ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(sk_load_byte_msh) +@@ -91,6 +96,7 @@ bpf_error: + xor %eax,%eax + mov -8(%rbp),%rbx + leaveq ++ pax_force_retaddr + ret + + /* rsi contains offset and can be scratched */ +@@ -113,6 +119,7 @@ bpf_slow_path_word: + js bpf_error + mov -12(%rbp),%eax + bswap %eax ++ pax_force_retaddr + ret + + bpf_slow_path_half: +@@ -121,12 +128,14 @@ bpf_slow_path_half: + mov -12(%rbp),%ax + rol $8,%ax + movzwl %ax,%eax ++ pax_force_retaddr + ret + + bpf_slow_path_byte: + bpf_slow_path_common(1) + js bpf_error + movzbl -12(%rbp),%eax ++ pax_force_retaddr + ret + + bpf_slow_path_byte_msh: +@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh: + and $15,%al + shl $2,%al + xchg %eax,%ebx ++ pax_force_retaddr + ret +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c +index 7c1b765..8c072c6 100644 +--- a/arch/x86/net/bpf_jit_comp.c ++++ b/arch/x86/net/bpf_jit_comp.c +@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end) + set_fs(old_fs); + } + ++struct bpf_jit_work { ++ struct work_struct work; ++ void *image; ++}; + + void bpf_jit_compile(struct sk_filter *fp) + { +@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp) + if (addrs == NULL) + return; + ++ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL); ++ if (!fp->work) ++ goto out; ++ + /* Before first pass, make a rough estimation of addrs[] + * each bpf instruction is translated to less than 64 bytes + */ +@@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp) + func = sk_load_word; + common_load: seen |= SEEN_DATAREF; + if ((int)K < 0) +- goto out; ++ goto error; + t_offset = func - (image + addrs[i]); + EMIT1_off32(0xbe, K); /* mov imm32,%esi */ + EMIT1_off32(0xe8, t_offset); /* call */ +@@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; + break; + default: + /* hmm, too complex filter, give up with jit compiler */ +- goto out; ++ goto error; + } + ilen = prog - temp; + if (image) { + if (unlikely(proglen + ilen > oldproglen)) { + pr_err("bpb_jit_compile fatal error\n"); +- kfree(addrs); +- module_free(NULL, image); +- return; ++ module_free_exec(NULL, image); ++ goto error; + } ++ pax_open_kernel(); + memcpy(image + proglen, temp, ilen); ++ pax_close_kernel(); + } + proglen += ilen; + addrs[i] = proglen; +@@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; + break; + } + if (proglen == oldproglen) { +- image = module_alloc(max_t(unsigned int, +- proglen, +- sizeof(struct work_struct))); ++ image = module_alloc_exec(proglen); + if (!image) +- goto out; ++ goto error; + } + oldproglen = proglen; + } +@@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; + bpf_flush_icache(image, image + proglen); + + fp->bpf_func = (void *)image; +- } ++ } else ++error: ++ kfree(fp->work); ++ + out: + kfree(addrs); + return; +@@ -645,18 +655,20 @@ out: + + static void jit_free_defer(struct work_struct *arg) + { +- module_free(NULL, arg); ++ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image); ++ kfree(arg); + } + + /* run from softirq, we must use a work_struct to call +- * module_free() from process context ++ * module_free_exec() from process context + */ + void bpf_jit_free(struct sk_filter *fp) + { + if (fp->bpf_func != sk_run_filter) { +- struct work_struct *work = (struct work_struct *)fp->bpf_func; ++ struct work_struct *work = &fp->work->work; + + INIT_WORK(work, jit_free_defer); ++ fp->work->image = fp->bpf_func; + schedule_work(work); + } + } +diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c +index bff89df..377758a 100644 +--- a/arch/x86/oprofile/backtrace.c ++++ b/arch/x86/oprofile/backtrace.c +@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head) + struct stack_frame_ia32 *fp; + unsigned long bytes; + +- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); ++ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead)); + if (bytes != sizeof(bufhead)) + return NULL; + +- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); ++ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame); + + oprofile_add_trace(bufhead[0].return_address); + +@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head) + struct stack_frame bufhead[2]; + unsigned long bytes; + +- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); ++ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead)); + if (bytes != sizeof(bufhead)) + return NULL; + +@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth) + { + struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); + +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + unsigned long stack = kernel_stack_pointer(regs); + if (depth) + dump_trace(NULL, regs, (unsigned long *)stack, 0, +diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c +index cb29191..036766d 100644 +--- a/arch/x86/pci/mrst.c ++++ b/arch/x86/pci/mrst.c +@@ -234,7 +234,9 @@ int __init pci_mrst_init(void) + printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n"); + pci_mmcfg_late_init(); + pcibios_enable_irq = mrst_pci_irq_enable; +- pci_root_ops = pci_mrst_ops; ++ pax_open_kernel(); ++ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops)); ++ pax_close_kernel(); + /* Continue with standard init */ + return 1; + } +diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c +index db0e9a5..0372c14 100644 +--- a/arch/x86/pci/pcbios.c ++++ b/arch/x86/pci/pcbios.c +@@ -79,50 +79,93 @@ union bios32 { + static struct { + unsigned long address; + unsigned short segment; +-} bios32_indirect = { 0, __KERNEL_CS }; ++} bios32_indirect __read_only = { 0, __PCIBIOS_CS }; + + /* + * Returns the entry point for the given service, NULL on error + */ + +-static unsigned long bios32_service(unsigned long service) ++static unsigned long __devinit bios32_service(unsigned long service) + { + unsigned char return_code; /* %al */ + unsigned long address; /* %ebx */ + unsigned long length; /* %ecx */ + unsigned long entry; /* %edx */ + unsigned long flags; ++ struct desc_struct d, *gdt; + + local_irq_save(flags); +- __asm__("lcall *(%%edi); cld" ++ ++ gdt = get_cpu_gdt_table(smp_processor_id()); ++ ++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); ++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); ++ ++ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld" + : "=a" (return_code), + "=b" (address), + "=c" (length), + "=d" (entry) + : "0" (service), + "1" (0), +- "D" (&bios32_indirect)); ++ "D" (&bios32_indirect), ++ "r"(__PCIBIOS_DS) ++ : "memory"); ++ ++ pax_open_kernel(); ++ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0; ++ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0; ++ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0; ++ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0; ++ pax_close_kernel(); ++ + local_irq_restore(flags); + + switch (return_code) { +- case 0: +- return address + entry; +- case 0x80: /* Not present */ +- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); +- return 0; +- default: /* Shouldn't happen */ +- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", +- service, return_code); ++ case 0: { ++ int cpu; ++ unsigned char flags; ++ ++ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry); ++ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) { ++ printk(KERN_WARNING "bios32_service: not valid\n"); + return 0; ++ } ++ address = address + PAGE_OFFSET; ++ length += 16UL; /* some BIOSs underreport this... */ ++ flags = 4; ++ if (length >= 64*1024*1024) { ++ length >>= PAGE_SHIFT; ++ flags |= 8; ++ } ++ ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { ++ gdt = get_cpu_gdt_table(cpu); ++ pack_descriptor(&d, address, length, 0x9b, flags); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); ++ pack_descriptor(&d, address, length, 0x93, flags); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); ++ } ++ return entry; ++ } ++ case 0x80: /* Not present */ ++ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); ++ return 0; ++ default: /* Shouldn't happen */ ++ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", ++ service, return_code); ++ return 0; + } + } + + static struct { + unsigned long address; + unsigned short segment; +-} pci_indirect = { 0, __KERNEL_CS }; ++} pci_indirect __read_only = { 0, __PCIBIOS_CS }; + +-static int pci_bios_present; ++static int pci_bios_present __read_only; + + static int __devinit check_pcibios(void) + { +@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void) + unsigned long flags, pcibios_entry; + + if ((pcibios_entry = bios32_service(PCI_SERVICE))) { +- pci_indirect.address = pcibios_entry + PAGE_OFFSET; ++ pci_indirect.address = pcibios_entry; + + local_irq_save(flags); +- __asm__( +- "lcall *(%%edi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%edi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void) + "=b" (ebx), + "=c" (ecx) + : "1" (PCIBIOS_PCI_BIOS_PRESENT), +- "D" (&pci_indirect) ++ "D" (&pci_indirect), ++ "r" (__PCIBIOS_DS) + : "memory"); + local_irq_restore(flags); + +@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + + switch (len) { + case 1: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + : "1" (PCIBIOS_READ_CONFIG_BYTE), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + /* + * Zero-extend the result beyond 8 bits, do not trust the + * BIOS having done it: +@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + *value &= 0xff; + break; + case 2: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + : "1" (PCIBIOS_READ_CONFIG_WORD), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + /* + * Zero-extend the result beyond 16 bits, do not trust the + * BIOS having done it: +@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + *value &= 0xffff; + break; + case 4: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + : "1" (PCIBIOS_READ_CONFIG_DWORD), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + } + +@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, + + switch (len) { + case 1: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, + "c" (value), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + case 2: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, + "c" (value), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + case 4: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, + "c" (value), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + } + +@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void) + + DBG("PCI: Fetching IRQ routing table... "); + __asm__("push %%es\n\t" ++ "movw %w8, %%ds\n\t" + "push %%ds\n\t" + "pop %%es\n\t" +- "lcall *(%%esi); cld\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" + "pop %%es\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void) + "1" (0), + "D" ((long) &opt), + "S" (&pci_indirect), +- "m" (opt) ++ "m" (opt), ++ "r" (__PCIBIOS_DS) + : "memory"); + DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map); + if (ret & 0xff00) +@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) + { + int ret; + +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w5, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) + : "0" (PCIBIOS_SET_PCI_HW_INT), + "b" ((dev->bus->number << 8) | dev->devfn), + "c" ((irq << 8) | (pin + 10)), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + return !(ret & 0xff00); + } + EXPORT_SYMBOL(pcibios_set_irq_routing); +diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c +index 40e4469..1ab536e 100644 +--- a/arch/x86/platform/efi/efi_32.c ++++ b/arch/x86/platform/efi/efi_32.c +@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void) + { + struct desc_ptr gdt_descr; + ++#ifdef CONFIG_PAX_KERNEXEC ++ struct desc_struct d; ++#endif ++ + local_irq_save(efi_rt_eflags); + + load_cr3(initial_page_table); + __flush_tlb_all(); + ++#ifdef CONFIG_PAX_KERNEXEC ++ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S); ++ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S); ++#endif ++ + gdt_descr.address = __pa(get_cpu_gdt_table(0)); + gdt_descr.size = GDT_SIZE - 1; + load_gdt(&gdt_descr); +@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void) + { + struct desc_ptr gdt_descr; + ++#ifdef CONFIG_PAX_KERNEXEC ++ struct desc_struct d; ++ ++ memset(&d, 0, sizeof d); ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S); ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S); ++#endif ++ + gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); + gdt_descr.size = GDT_SIZE - 1; + load_gdt(&gdt_descr); +diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S +index fbe66e6..c5c0dd2 100644 +--- a/arch/x86/platform/efi/efi_stub_32.S ++++ b/arch/x86/platform/efi/efi_stub_32.S +@@ -6,7 +6,9 @@ + */ + + #include <linux/linkage.h> ++#include <linux/init.h> + #include <asm/page_types.h> ++#include <asm/segment.h> + + /* + * efi_call_phys(void *, ...) is a function with variable parameters. +@@ -20,7 +22,7 @@ + * service functions will comply with gcc calling convention, too. + */ + +-.text ++__INIT + ENTRY(efi_call_phys) + /* + * 0. The function can only be called in Linux kernel. So CS has been +@@ -36,9 +38,11 @@ ENTRY(efi_call_phys) + * The mapping of lower virtual memory has been created in prelog and + * epilog. + */ +- movl $1f, %edx +- subl $__PAGE_OFFSET, %edx +- jmp *%edx ++ movl $(__KERNEXEC_EFI_DS), %edx ++ mov %edx, %ds ++ mov %edx, %es ++ mov %edx, %ss ++ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET + 1: + + /* +@@ -47,14 +51,8 @@ ENTRY(efi_call_phys) + * parameter 2, ..., param n. To make things easy, we save the return + * address of efi_call_phys in a global variable. + */ +- popl %edx +- movl %edx, saved_return_addr +- /* get the function pointer into ECX*/ +- popl %ecx +- movl %ecx, efi_rt_function_ptr +- movl $2f, %edx +- subl $__PAGE_OFFSET, %edx +- pushl %edx ++ popl (saved_return_addr) ++ popl (efi_rt_function_ptr) + + /* + * 3. Clear PG bit in %CR0. +@@ -73,9 +71,8 @@ ENTRY(efi_call_phys) + /* + * 5. Call the physical function. + */ +- jmp *%ecx ++ call *(efi_rt_function_ptr-__PAGE_OFFSET) + +-2: + /* + * 6. After EFI runtime service returns, control will return to + * following instruction. We'd better readjust stack pointer first. +@@ -88,35 +85,32 @@ ENTRY(efi_call_phys) + movl %cr0, %edx + orl $0x80000000, %edx + movl %edx, %cr0 +- jmp 1f +-1: ++ + /* + * 8. Now restore the virtual mode from flat mode by + * adding EIP with PAGE_OFFSET. + */ +- movl $1f, %edx +- jmp *%edx ++ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET + 1: ++ movl $(__KERNEL_DS), %edx ++ mov %edx, %ds ++ mov %edx, %es ++ mov %edx, %ss + + /* + * 9. Balance the stack. And because EAX contain the return value, + * we'd better not clobber it. + */ +- leal efi_rt_function_ptr, %edx +- movl (%edx), %ecx +- pushl %ecx ++ pushl (efi_rt_function_ptr) + + /* +- * 10. Push the saved return address onto the stack and return. ++ * 10. Return to the saved return address. + */ +- leal saved_return_addr, %edx +- movl (%edx), %ecx +- pushl %ecx +- ret ++ jmpl *(saved_return_addr) + ENDPROC(efi_call_phys) + .previous + +-.data ++__INITDATA + saved_return_addr: + .long 0 + efi_rt_function_ptr: +diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S +index 4c07cca..2c8427d 100644 +--- a/arch/x86/platform/efi/efi_stub_64.S ++++ b/arch/x86/platform/efi/efi_stub_64.S +@@ -7,6 +7,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + #define SAVE_XMM \ + mov %rsp, %rax; \ +@@ -40,6 +41,7 @@ ENTRY(efi_call0) + call *%rdi + addq $32, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call0) + +@@ -50,6 +52,7 @@ ENTRY(efi_call1) + call *%rdi + addq $32, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call1) + +@@ -60,6 +63,7 @@ ENTRY(efi_call2) + call *%rdi + addq $32, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call2) + +@@ -71,6 +75,7 @@ ENTRY(efi_call3) + call *%rdi + addq $32, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call3) + +@@ -83,6 +88,7 @@ ENTRY(efi_call4) + call *%rdi + addq $32, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call4) + +@@ -96,6 +102,7 @@ ENTRY(efi_call5) + call *%rdi + addq $48, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call5) + +@@ -112,5 +119,6 @@ ENTRY(efi_call6) + call *%rdi + addq $48, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call6) +diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c +index ad4ec1c..686479e 100644 +--- a/arch/x86/platform/mrst/mrst.c ++++ b/arch/x86/platform/mrst/mrst.c +@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX]; + EXPORT_SYMBOL_GPL(sfi_mrtc_array); + int sfi_mrtc_num; + +-static void mrst_power_off(void) ++static __noreturn void mrst_power_off(void) + { + if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT) + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1); ++ BUG(); + } + +-static void mrst_reboot(void) ++static __noreturn void mrst_reboot(void) + { + if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT) + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0); + else + intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); ++ BUG(); + } + + /* parse all the mtimer info to a static mtimer array */ +diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c +index 81aee5a..9ad9aae 100644 +--- a/arch/x86/platform/uv/tlb_uv.c ++++ b/arch/x86/platform/uv/tlb_uv.c +@@ -1433,6 +1433,8 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf, + * 0: display meaning of the statistics + */ + static ssize_t ptc_proc_write(struct file *file, const char __user *user, ++ size_t count, loff_t *data) __size_overflow(3); ++static ssize_t ptc_proc_write(struct file *file, const char __user *user, + size_t count, loff_t *data) + { + int cpu; +@@ -1548,6 +1550,8 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr, + * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables) + */ + static ssize_t tunables_write(struct file *file, const char __user *user, ++ size_t count, loff_t *data) __size_overflow(3); ++static ssize_t tunables_write(struct file *file, const char __user *user, + size_t count, loff_t *data) + { + int cpu; +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c +index f10c0af..3ec1f95 100644 +--- a/arch/x86/power/cpu.c ++++ b/arch/x86/power/cpu.c +@@ -131,7 +131,7 @@ static void do_fpu_end(void) + static void fix_processor_context(void) + { + int cpu = smp_processor_id(); +- struct tss_struct *t = &per_cpu(init_tss, cpu); ++ struct tss_struct *t = init_tss + cpu; + + set_tss_desc(cpu, t); /* + * This just modifies memory; should not be +@@ -141,7 +141,9 @@ static void fix_processor_context(void) + */ + + #ifdef CONFIG_X86_64 ++ pax_open_kernel(); + get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; ++ pax_close_kernel(); + + syscall_init(); /* This sets MSR_*STAR and related */ + #endif +diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile +index 5d17950..2253fc9 100644 +--- a/arch/x86/vdso/Makefile ++++ b/arch/x86/vdso/Makefile +@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@ + -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ + sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' + +-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) ++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + GCOV_PROFILE := n + + # +diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c +index 468d591..8e80a0a 100644 +--- a/arch/x86/vdso/vdso32-setup.c ++++ b/arch/x86/vdso/vdso32-setup.c +@@ -25,6 +25,7 @@ + #include <asm/tlbflush.h> + #include <asm/vdso.h> + #include <asm/proto.h> ++#include <asm/mman.h> + + enum { + VDSO_DISABLED = 0, +@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map) + void enable_sep_cpu(void) + { + int cpu = get_cpu(); +- struct tss_struct *tss = &per_cpu(init_tss, cpu); ++ struct tss_struct *tss = init_tss + cpu; + + if (!boot_cpu_has(X86_FEATURE_SEP)) { + put_cpu(); +@@ -249,7 +250,7 @@ static int __init gate_vma_init(void) + gate_vma.vm_start = FIXADDR_USER_START; + gate_vma.vm_end = FIXADDR_USER_END; + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; +- gate_vma.vm_page_prot = __P101; ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); + /* + * Make sure the vDSO gets into every core dump. + * Dumping its contents makes post-mortem fully interpretable later +@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + if (compat) + addr = VDSO_HIGH_BASE; + else { +- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); ++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + } + +- current->mm->context.vdso = (void *)addr; ++ current->mm->context.vdso = addr; + + if (compat_uses_vma || !compat) { + /* +@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + } + + current_thread_info()->sysenter_return = +- VDSO32_SYMBOL(addr, SYSENTER_RETURN); ++ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN); + + up_fail: + if (ret) +- current->mm->context.vdso = NULL; ++ current->mm->context.vdso = 0; + + up_write(&mm->mmap_sem); + +@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init); + + const char *arch_vma_name(struct vm_area_struct *vma) + { +- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) ++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) + return "[vdso]"; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso) ++ return "[vdso]"; ++#endif ++ + return NULL; + } + +@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm) + * Check to see if the corresponding task was created in compat vdso + * mode. + */ +- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) ++ if (mm && mm->context.vdso == VDSO_HIGH_BASE) + return &gate_vma; + return NULL; + } +diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c +index 153407c..611cba9 100644 +--- a/arch/x86/vdso/vma.c ++++ b/arch/x86/vdso/vma.c +@@ -16,8 +16,6 @@ + #include <asm/vdso.h> + #include <asm/page.h> + +-unsigned int __read_mostly vdso_enabled = 1; +- + extern char vdso_start[], vdso_end[]; + extern unsigned short vdso_sync_cpuid; + +@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) + * unaligned here as a result of stack start randomization. + */ + addr = PAGE_ALIGN(addr); +- addr = align_addr(addr, NULL, ALIGN_VDSO); + + return addr; + } +@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) + int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + { + struct mm_struct *mm = current->mm; +- unsigned long addr; ++ unsigned long addr = 0; + int ret; + +- if (!vdso_enabled) +- return 0; +- + down_write(&mm->mmap_sem); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + addr = vdso_addr(mm->start_stack, vdso_size); ++ addr = align_addr(addr, NULL, ALIGN_VDSO); + addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + +- current->mm->context.vdso = (void *)addr; ++ mm->context.vdso = addr; + + ret = install_special_mapping(mm, addr, vdso_size, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| + VM_ALWAYSDUMP, + vdso_pages); +- if (ret) { +- current->mm->context.vdso = NULL; +- goto up_fail; +- } ++ ++ if (ret) ++ mm->context.vdso = 0; + + up_fail: + up_write(&mm->mmap_sem); + return ret; + } +- +-static __init int vdso_setup(char *s) +-{ +- vdso_enabled = simple_strtoul(s, NULL, 0); +- return 0; +-} +-__setup("vdso=", vdso_setup); +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index 1f92865..c843b20 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); + + struct shared_info xen_dummy_shared_info; + +-void *xen_initial_gdt; +- + RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); + __read_mostly int xen_have_vector_callback; + EXPORT_SYMBOL_GPL(xen_have_vector_callback); +@@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = { + #endif + }; + +-static void xen_reboot(int reason) ++static __noreturn void xen_reboot(int reason) + { + struct sched_shutdown r = { .reason = reason }; + +@@ -1037,17 +1035,17 @@ static void xen_reboot(int reason) + BUG(); + } + +-static void xen_restart(char *msg) ++static __noreturn void xen_restart(char *msg) + { + xen_reboot(SHUTDOWN_reboot); + } + +-static void xen_emergency_restart(void) ++static __noreturn void xen_emergency_restart(void) + { + xen_reboot(SHUTDOWN_reboot); + } + +-static void xen_machine_halt(void) ++static __noreturn void xen_machine_halt(void) + { + xen_reboot(SHUTDOWN_poweroff); + } +@@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void) + __userpte_alloc_gfp &= ~__GFP_HIGHMEM; + + /* Work out if we support NX */ +- x86_configure_nx(); ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) ++ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 && ++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) { ++ unsigned l, h; ++ ++ __supported_pte_mask |= _PAGE_NX; ++ rdmsr(MSR_EFER, l, h); ++ l |= EFER_NX; ++ wrmsr(MSR_EFER, l, h); ++ } ++#endif + + xen_setup_features(); + +@@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void) + + machine_ops = xen_machine_ops; + +- /* +- * The only reliable way to retain the initial address of the +- * percpu gdt_page is to remember it here, so we can go and +- * mark it RW later, when the initial percpu area is freed. +- */ +- xen_initial_gdt = &per_cpu(gdt_page, 0); +- + xen_smp_init(); + + #ifdef CONFIG_ACPI_NUMA +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c +index 87f6673..e2555a6 100644 +--- a/arch/x86/xen/mmu.c ++++ b/arch/x86/xen/mmu.c +@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, + convert_pfn_mfn(init_level4_pgt); + convert_pfn_mfn(level3_ident_pgt); + convert_pfn_mfn(level3_kernel_pgt); ++ convert_pfn_mfn(level3_vmalloc_start_pgt); ++ convert_pfn_mfn(level3_vmalloc_end_pgt); ++ convert_pfn_mfn(level3_vmemmap_pgt); + + l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); + l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); +@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, + set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); ++ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO); ++ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO); ++ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); ++ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); + +@@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void) + pv_mmu_ops.set_pud = xen_set_pud; + #if PAGETABLE_LEVELS == 4 + pv_mmu_ops.set_pgd = xen_set_pgd; ++ pv_mmu_ops.set_pgd_batched = xen_set_pgd; + #endif + + /* This will work as long as patching hasn't happened yet +@@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { + .pud_val = PV_CALLEE_SAVE(xen_pud_val), + .make_pud = PV_CALLEE_SAVE(xen_make_pud), + .set_pgd = xen_set_pgd_hyper, ++ .set_pgd_batched = xen_set_pgd_hyper, + + .alloc_pud = xen_alloc_pmd_init, + .release_pud = xen_release_pmd_init, +diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c +index 041d4fe..7666b7e 100644 +--- a/arch/x86/xen/smp.c ++++ b/arch/x86/xen/smp.c +@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void) + { + BUG_ON(smp_processor_id() != 0); + native_smp_prepare_boot_cpu(); +- +- /* We've switched to the "real" per-cpu gdt, so make sure the +- old memory can be recycled */ +- make_lowmem_page_readwrite(xen_initial_gdt); +- + xen_filter_cpu_maps(); + xen_setup_vcpu_info_placement(); + } +@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) + gdt = get_cpu_gdt_table(cpu); + + ctxt->flags = VGCF_IN_KERNEL; +- ctxt->user_regs.ds = __USER_DS; +- ctxt->user_regs.es = __USER_DS; ++ ctxt->user_regs.ds = __KERNEL_DS; ++ ctxt->user_regs.es = __KERNEL_DS; + ctxt->user_regs.ss = __KERNEL_DS; + #ifdef CONFIG_X86_32 + ctxt->user_regs.fs = __KERNEL_PERCPU; +- ctxt->user_regs.gs = __KERNEL_STACK_CANARY; ++ savesegment(gs, ctxt->user_regs.gs); + #else + ctxt->gs_base_kernel = per_cpu_offset(cpu); + #endif +@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) + int rc; + + per_cpu(current_task, cpu) = idle; ++ per_cpu(current_tinfo, cpu) = &idle->tinfo; + #ifdef CONFIG_X86_32 + irq_ctx_init(cpu); + #else + clear_tsk_thread_flag(idle, TIF_FORK); +- per_cpu(kernel_stack, cpu) = +- (unsigned long)task_stack_page(idle) - +- KERNEL_STACK_OFFSET + THREAD_SIZE; ++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE; + #endif + xen_setup_runstate_info(cpu); + xen_setup_timer(cpu); +diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S +index b040b0e..8cc4fe0 100644 +--- a/arch/x86/xen/xen-asm_32.S ++++ b/arch/x86/xen/xen-asm_32.S +@@ -83,14 +83,14 @@ ENTRY(xen_iret) + ESP_OFFSET=4 # bytes pushed onto stack + + /* +- * Store vcpu_info pointer for easy access. Do it this way to +- * avoid having to reload %fs ++ * Store vcpu_info pointer for easy access. + */ + #ifdef CONFIG_SMP +- GET_THREAD_INFO(%eax) +- movl TI_cpu(%eax), %eax +- movl __per_cpu_offset(,%eax,4), %eax +- mov xen_vcpu(%eax), %eax ++ push %fs ++ mov $(__KERNEL_PERCPU), %eax ++ mov %eax, %fs ++ mov PER_CPU_VAR(xen_vcpu), %eax ++ pop %fs + #else + movl xen_vcpu, %eax + #endif +diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S +index aaa7291..3f77960 100644 +--- a/arch/x86/xen/xen-head.S ++++ b/arch/x86/xen/xen-head.S +@@ -19,6 +19,17 @@ ENTRY(startup_xen) + #ifdef CONFIG_X86_32 + mov %esi,xen_start_info + mov $init_thread_union+THREAD_SIZE,%esp ++#ifdef CONFIG_SMP ++ movl $cpu_gdt_table,%edi ++ movl $__per_cpu_load,%eax ++ movw %ax,__KERNEL_PERCPU + 2(%edi) ++ rorl $16,%eax ++ movb %al,__KERNEL_PERCPU + 4(%edi) ++ movb %ah,__KERNEL_PERCPU + 7(%edi) ++ movl $__per_cpu_end - 1,%eax ++ subl $__per_cpu_start,%eax ++ movw %ax,__KERNEL_PERCPU + 0(%edi) ++#endif + #else + mov %rsi,xen_start_info + mov $init_thread_union+THREAD_SIZE,%rsp +diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h +index b095739..8c17bcd 100644 +--- a/arch/x86/xen/xen-ops.h ++++ b/arch/x86/xen/xen-ops.h +@@ -10,8 +10,6 @@ + extern const char xen_hypervisor_callback[]; + extern const char xen_failsafe_callback[]; + +-extern void *xen_initial_gdt; +- + struct trap_info; + void xen_copy_trap_info(struct trap_info *traps); + +diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h +index 525bd3d..ef888b1 100644 +--- a/arch/xtensa/variants/dc232b/include/variant/core.h ++++ b/arch/xtensa/variants/dc232b/include/variant/core.h +@@ -119,9 +119,9 @@ + ----------------------------------------------------------------------*/ + + #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */ +-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */ + #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */ + #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */ ++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */ + + #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */ + #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */ +diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h +index 2f33760..835e50a 100644 +--- a/arch/xtensa/variants/fsf/include/variant/core.h ++++ b/arch/xtensa/variants/fsf/include/variant/core.h +@@ -11,6 +11,7 @@ + #ifndef _XTENSA_CORE_H + #define _XTENSA_CORE_H + ++#include <linux/const.h> + + /**************************************************************************** + Parameters Useful for Any Code, USER or PRIVILEGED +@@ -112,9 +113,9 @@ + ----------------------------------------------------------------------*/ + + #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */ +-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */ + #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */ + #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */ ++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */ + + #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */ + #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */ +diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h +index af00795..2bb8105 100644 +--- a/arch/xtensa/variants/s6000/include/variant/core.h ++++ b/arch/xtensa/variants/s6000/include/variant/core.h +@@ -11,6 +11,7 @@ + #ifndef _XTENSA_CORE_CONFIGURATION_H + #define _XTENSA_CORE_CONFIGURATION_H + ++#include <linux/const.h> + + /**************************************************************************** + Parameters Useful for Any Code, USER or PRIVILEGED +@@ -118,9 +119,9 @@ + ----------------------------------------------------------------------*/ + + #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */ +-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */ + #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */ + #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */ ++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */ + + #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */ + #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */ +diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c +index 58916af..9cb880b 100644 +--- a/block/blk-iopoll.c ++++ b/block/blk-iopoll.c +@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll) + } + EXPORT_SYMBOL(blk_iopoll_complete); + +-static void blk_iopoll_softirq(struct softirq_action *h) ++static void blk_iopoll_softirq(void) + { + struct list_head *list = &__get_cpu_var(blk_cpu_iopoll); + int rearm = 0, budget = blk_iopoll_budget; +diff --git a/block/blk-map.c b/block/blk-map.c +index 623e1cd..ca1e109 100644 +--- a/block/blk-map.c ++++ b/block/blk-map.c +@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, + if (!len || !kbuf) + return -EINVAL; + +- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); ++ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf); + if (do_copy) + bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); + else +diff --git a/block/blk-softirq.c b/block/blk-softirq.c +index 1366a89..e17f54b 100644 +--- a/block/blk-softirq.c ++++ b/block/blk-softirq.c +@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); + * Softirq action handler - move entries to local list and loop over them + * while passing them to the queue registered handler. + */ +-static void blk_done_softirq(struct softirq_action *h) ++static void blk_done_softirq(void) + { + struct list_head *cpu_list, local_list; + +diff --git a/block/bsg.c b/block/bsg.c +index c0ab25c..9d49f8f 100644 +--- a/block/bsg.c ++++ b/block/bsg.c +@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, + struct sg_io_v4 *hdr, struct bsg_device *bd, + fmode_t has_write_perm) + { ++ unsigned char tmpcmd[sizeof(rq->__cmd)]; ++ unsigned char *cmdptr; ++ + if (hdr->request_len > BLK_MAX_CDB) { + rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); + if (!rq->cmd) + return -ENOMEM; +- } ++ cmdptr = rq->cmd; ++ } else ++ cmdptr = tmpcmd; + +- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, ++ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request, + hdr->request_len)) + return -EFAULT; + ++ if (cmdptr != rq->cmd) ++ memcpy(rq->cmd, cmdptr, hdr->request_len); ++ + if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { + if (blk_verify_command(rq->cmd, has_write_perm)) + return -EPERM; +diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c +index 7b72502..646105c 100644 +--- a/block/compat_ioctl.c ++++ b/block/compat_ioctl.c +@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode, + err |= __get_user(f->spec1, &uf->spec1); + err |= __get_user(f->fmt_gap, &uf->fmt_gap); + err |= __get_user(name, &uf->name); +- f->name = compat_ptr(name); ++ f->name = (void __force_kernel *)compat_ptr(name); + if (err) { + err = -EFAULT; + goto out; +diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c +index 688be8a..8a37d98 100644 +--- a/block/scsi_ioctl.c ++++ b/block/scsi_ioctl.c +@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command); + static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, + struct sg_io_hdr *hdr, fmode_t mode) + { +- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) ++ unsigned char tmpcmd[sizeof(rq->__cmd)]; ++ unsigned char *cmdptr; ++ ++ if (rq->cmd != rq->__cmd) ++ cmdptr = rq->cmd; ++ else ++ cmdptr = tmpcmd; ++ ++ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len)) + return -EFAULT; ++ ++ if (cmdptr != rq->cmd) ++ memcpy(rq->cmd, cmdptr, hdr->cmd_len); ++ + if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) + return -EPERM; + +@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, + int err; + unsigned int in_len, out_len, bytes, opcode, cmdlen; + char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; ++ unsigned char tmpcmd[sizeof(rq->__cmd)]; ++ unsigned char *cmdptr; + + if (!sic) + return -EINVAL; +@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, + */ + err = -EFAULT; + rq->cmd_len = cmdlen; +- if (copy_from_user(rq->cmd, sic->data, cmdlen)) ++ ++ if (rq->cmd != rq->__cmd) ++ cmdptr = rq->cmd; ++ else ++ cmdptr = tmpcmd; ++ ++ if (copy_from_user(cmdptr, sic->data, cmdlen)) + goto error; + ++ if (rq->cmd != cmdptr) ++ memcpy(rq->cmd, cmdptr, cmdlen); ++ + if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) + goto error; + +diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c +index a0f768c..1da9c73 100644 +--- a/crypto/ablkcipher.c ++++ b/crypto/ablkcipher.c +@@ -307,6 +307,8 @@ int ablkcipher_walk_phys(struct ablkcipher_request *req, + EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); + + static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, ++ unsigned int keylen) __size_overflow(3); ++static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) + { + struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); +@@ -329,6 +331,8 @@ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, + } + + static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, ++ unsigned int keylen) __size_overflow(3); ++static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) + { + struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); +diff --git a/crypto/aead.c b/crypto/aead.c +index 04add3dc..983032f 100644 +--- a/crypto/aead.c ++++ b/crypto/aead.c +@@ -27,6 +27,8 @@ + #include "internal.h" + + static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, ++ unsigned int keylen) __size_overflow(3); ++static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) + { + struct aead_alg *aead = crypto_aead_alg(tfm); +@@ -48,6 +50,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, + return ret; + } + ++static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) __size_overflow(3); + static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) + { + struct aead_alg *aead = crypto_aead_alg(tfm); +diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c +index 1e61d1a..cf06b86 100644 +--- a/crypto/blkcipher.c ++++ b/crypto/blkcipher.c +@@ -359,6 +359,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc, + EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block); + + static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen) __size_overflow(3); ++static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) + { + struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; +@@ -380,6 +382,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, + return ret; + } + ++static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3); + static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) + { + struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; +diff --git a/crypto/cipher.c b/crypto/cipher.c +index 39541e0..802d956 100644 +--- a/crypto/cipher.c ++++ b/crypto/cipher.c +@@ -21,6 +21,8 @@ + #include "internal.h" + + static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, ++ unsigned int keylen) __size_overflow(3); ++static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) + { + struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; +@@ -43,6 +45,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, + + } + ++static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3); + static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) + { + struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; +diff --git a/crypto/cryptd.c b/crypto/cryptd.c +index 671d4d6..5f24030 100644 +--- a/crypto/cryptd.c ++++ b/crypto/cryptd.c +@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx { + + struct cryptd_blkcipher_request_ctx { + crypto_completion_t complete; +-}; ++} __no_const; + + struct cryptd_hash_ctx { + struct crypto_shash *child; +@@ -80,7 +80,7 @@ struct cryptd_aead_ctx { + + struct cryptd_aead_request_ctx { + crypto_completion_t complete; +-}; ++} __no_const; + + static void cryptd_queue_worker(struct work_struct *work); + +diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c +index 5d41894..22021e4 100644 +--- a/drivers/acpi/apei/cper.c ++++ b/drivers/acpi/apei/cper.c +@@ -38,12 +38,12 @@ + */ + u64 cper_next_record_id(void) + { +- static atomic64_t seq; ++ static atomic64_unchecked_t seq; + +- if (!atomic64_read(&seq)) +- atomic64_set(&seq, ((u64)get_seconds()) << 32); ++ if (!atomic64_read_unchecked(&seq)) ++ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32); + +- return atomic64_inc_return(&seq); ++ return atomic64_inc_return_unchecked(&seq); + } + EXPORT_SYMBOL_GPL(cper_next_record_id); + +diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c +index 7711d94..8622811 100644 +--- a/drivers/acpi/battery.c ++++ b/drivers/acpi/battery.c +@@ -787,6 +787,9 @@ static int acpi_battery_print_alarm(struct seq_file *seq, int result) + + static ssize_t acpi_battery_write_alarm(struct file *file, + const char __user * buffer, ++ size_t count, loff_t * ppos) __size_overflow(3); ++static ssize_t acpi_battery_write_alarm(struct file *file, ++ const char __user * buffer, + size_t count, loff_t * ppos) + { + int result = 0; +diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c +index 6c47ae9..abfdd63 100644 +--- a/drivers/acpi/ec_sys.c ++++ b/drivers/acpi/ec_sys.c +@@ -12,6 +12,7 @@ + #include <linux/acpi.h> + #include <linux/debugfs.h> + #include <linux/module.h> ++#include <linux/uaccess.h> + #include "internal.h" + + MODULE_AUTHOR("Thomas Renninger trenn@suse.de"); +@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf, + * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private; + */ + unsigned int size = EC_SPACE_SIZE; +- u8 *data = (u8 *) buf; ++ u8 data; + loff_t init_off = *off; + int err = 0; + +@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf, + size = count; + + while (size) { +- err = ec_read(*off, &data[*off - init_off]); ++ err = ec_read(*off, &data); + if (err) + return err; ++ if (put_user(data, &buf[*off - init_off])) ++ return -EFAULT; + *off += 1; + size--; + } +@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf, + + unsigned int size = count; + loff_t init_off = *off; +- u8 *data = (u8 *) buf; + int err = 0; + + if (*off >= EC_SPACE_SIZE) +@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf, + } + + while (size) { +- u8 byte_write = data[*off - init_off]; ++ u8 byte_write; ++ if (get_user(byte_write, &buf[*off - init_off])) ++ return -EFAULT; + err = ec_write(*off, byte_write); + if (err) + return err; +diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c +index 251c7b62..000462d 100644 +--- a/drivers/acpi/proc.c ++++ b/drivers/acpi/proc.c +@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file, + size_t count, loff_t * ppos) + { + struct list_head *node, *next; +- char strbuf[5]; +- char str[5] = ""; +- unsigned int len = count; ++ char strbuf[5] = {0}; + +- if (len > 4) +- len = 4; +- if (len < 0) ++ if (count > 4) ++ count = 4; ++ if (copy_from_user(strbuf, buffer, count)) + return -EFAULT; +- +- if (copy_from_user(strbuf, buffer, len)) +- return -EFAULT; +- strbuf[len] = '\0'; +- sscanf(strbuf, "%s", str); ++ strbuf[count] = '\0'; + + mutex_lock(&acpi_device_lock); + list_for_each_safe(node, next, &acpi_wakeup_device_list) { +@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file, + if (!dev->wakeup.flags.valid) + continue; + +- if (!strncmp(dev->pnp.bus_id, str, 4)) { ++ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) { + if (device_can_wakeup(&dev->dev)) { + bool enable = !device_may_wakeup(&dev->dev); + device_set_wakeup_enable(&dev->dev, enable); +diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c +index 9d7bc9f..a6fc091 100644 +--- a/drivers/acpi/processor_driver.c ++++ b/drivers/acpi/processor_driver.c +@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) + return 0; + #endif + +- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); ++ BUG_ON(pr->id >= nr_cpu_ids); + + /* + * Buggy BIOS check +diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c +index 6e36d0c..f319944 100644 +--- a/drivers/acpi/sbs.c ++++ b/drivers/acpi/sbs.c +@@ -655,6 +655,9 @@ static int acpi_battery_read_alarm(struct seq_file *seq, void *offset) + + static ssize_t + acpi_battery_write_alarm(struct file *file, const char __user * buffer, ++ size_t count, loff_t * ppos) __size_overflow(3); ++static ssize_t ++acpi_battery_write_alarm(struct file *file, const char __user * buffer, + size_t count, loff_t * ppos) + { + struct seq_file *seq = file->private_data; +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index c04ad68..0b99473 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) + struct ata_port *ap; + unsigned int tag; + +- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ ++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + ap = qc->ap; + + qc->flags = 0; +@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) + struct ata_port *ap; + struct ata_link *link; + +- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ ++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); + ap = qc->ap; + link = qc->dev->link; +@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) + return; + + spin_lock(&lock); ++ pax_open_kernel(); + + for (cur = ops->inherits; cur; cur = cur->inherits) { + void **inherit = (void **)cur; +@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) + if (IS_ERR(*pp)) + *pp = NULL; + +- ops->inherits = NULL; ++ *(struct ata_port_operations **)&ops->inherits = NULL; + ++ pax_close_kernel(); + spin_unlock(&lock); + } + +diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c +index e8574bb..f9f6a72 100644 +--- a/drivers/ata/pata_arasan_cf.c ++++ b/drivers/ata/pata_arasan_cf.c +@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev) + /* Handle platform specific quirks */ + if (pdata->quirk) { + if (pdata->quirk & CF_BROKEN_PIO) { +- ap->ops->set_piomode = NULL; ++ pax_open_kernel(); ++ *(void **)&ap->ops->set_piomode = NULL; ++ pax_close_kernel(); + ap->pio_mask = 0; + } + if (pdata->quirk & CF_BROKEN_MWDMA) +diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c +index f9b983a..887b9d8 100644 +--- a/drivers/atm/adummy.c ++++ b/drivers/atm/adummy.c +@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + return 0; + } +diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c +index f8f41e0..1f987dd 100644 +--- a/drivers/atm/ambassador.c ++++ b/drivers/atm/ambassador.c +@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) { + PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); + + // VC layer stats +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); + + // free the descriptor + kfree (tx_descr); +@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) { + dump_skb ("<<<", vc, skb); + + // VC layer stats +- atomic_inc(&atm_vcc->stats->rx); ++ atomic_inc_unchecked(&atm_vcc->stats->rx); + __net_timestamp(skb); + // end of our responsibility + atm_vcc->push (atm_vcc, skb); +@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) { + } else { + PRINTK (KERN_INFO, "dropped over-size frame"); + // should we count this? +- atomic_inc(&atm_vcc->stats->rx_drop); ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); + } + + } else { +@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { + } + + if (check_area (skb->data, skb->len)) { +- atomic_inc(&atm_vcc->stats->tx_err); ++ atomic_inc_unchecked(&atm_vcc->stats->tx_err); + return -ENOMEM; // ? + } + +diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c +index b22d71c..d6e1049 100644 +--- a/drivers/atm/atmtcp.c ++++ b/drivers/atm/atmtcp.c +@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); + if (dev_data) return 0; +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -ENOLINK; + } + size = skb->len+sizeof(struct atmtcp_hdr); +@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) + if (!new_skb) { + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -ENOBUFS; + } + hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr)); +@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); + out_vcc->push(out_vcc,new_skb); +- atomic_inc(&vcc->stats->tx); +- atomic_inc(&out_vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->tx); ++ atomic_inc_unchecked(&out_vcc->stats->rx); + return 0; + } + +@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) + out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci)); + read_unlock(&vcc_sklist_lock); + if (!out_vcc) { +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + goto done; + } + skb_pull(skb,sizeof(struct atmtcp_hdr)); +@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) + __net_timestamp(new_skb); + skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); + out_vcc->push(out_vcc,new_skb); +- atomic_inc(&vcc->stats->tx); +- atomic_inc(&out_vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->tx); ++ atomic_inc_unchecked(&out_vcc->stats->rx); + done: + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); +diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c +index 956e9ac..133516d 100644 +--- a/drivers/atm/eni.c ++++ b/drivers/atm/eni.c +@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc) + DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", + vcc->dev->number); + length = 0; +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + else { + length = ATM_CELL_SIZE-1; /* no HEC */ +@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc) + size); + } + eff = length = 0; +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + else { + size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); +@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc) + "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n", + vcc->dev->number,vcc->vci,length,size << 2,descr); + length = eff = 0; +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + } + skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL; +@@ -771,7 +771,7 @@ rx_dequeued++; + vcc->push(vcc,skb); + pushed++; + } +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + wake_up(&eni_dev->rx_wait); + } +@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev) + PCI_DMA_TODEVICE); + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb_irq(skb); +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + wake_up(&eni_dev->tx_wait); + dma_complete++; + } +@@ -1569,7 +1569,7 @@ tx_complete++; + /*--------------------------------- entries ---------------------------------*/ + + +-static const char *media_name[] __devinitdata = { ++static const char *media_name[] __devinitconst = { + "MMF", "SMF", "MMF", "03?", /* 0- 3 */ + "UTP", "05?", "06?", "07?", /* 4- 7 */ + "TAXI","09?", "10?", "11?", /* 8-11 */ +diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c +index 5072f8a..fa52520d 100644 +--- a/drivers/atm/firestream.c ++++ b/drivers/atm/firestream.c +@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q) + } + } + +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); + + fs_dprintk (FS_DEBUG_TXMEM, "i"); + fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb); +@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q) + #endif + skb_put (skb, qe->p1 & 0xffff); + ATM_SKB(skb)->vcc = atm_vcc; +- atomic_inc(&atm_vcc->stats->rx); ++ atomic_inc_unchecked(&atm_vcc->stats->rx); + __net_timestamp(skb); + fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); + atm_vcc->push (atm_vcc, skb); +@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q) + kfree (pe); + } + if (atm_vcc) +- atomic_inc(&atm_vcc->stats->rx_drop); ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); + break; + case 0x1f: /* Reassembly abort: no buffers. */ + /* Silently increment error counter. */ + if (atm_vcc) +- atomic_inc(&atm_vcc->stats->rx_drop); ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); + break; + default: /* Hmm. Haven't written the code to handle the others yet... -- REW */ + printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", +diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c +index 361f5ae..7fc552d 100644 +--- a/drivers/atm/fore200e.c ++++ b/drivers/atm/fore200e.c +@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e) + #endif + /* check error condition */ + if (*entry->status & STATUS_ERROR) +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + else +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + } + } + +@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp + if (skb == NULL) { + DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); + +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return -ENOMEM; + } + +@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp + + dev_kfree_skb_any(skb); + +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return -ENOMEM; + } + + ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); + + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); + +@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e) + DPRINTK(2, "damaged PDU on %d.%d.%d\n", + fore200e->atm_dev->number, + entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + } + +@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) + goto retry_here; + } + +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + + fore200e->tx_sat++; + DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", +diff --git a/drivers/atm/he.c b/drivers/atm/he.c +index 9a51df4..f3bb5f8 100644 +--- a/drivers/atm/he.c ++++ b/drivers/atm/he.c +@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) + + if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { + hprintk("HBUF_ERR! (cid 0x%x)\n", cid); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + goto return_host_buffers; + } + +@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) + RBRQ_LEN_ERR(he_dev->rbrq_head) + ? "LEN_ERR" : "", + vcc->vpi, vcc->vci); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto return_host_buffers; + } + +@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) + vcc->push(vcc, skb); + spin_lock(&he_dev->global_lock); + +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + return_host_buffers: + ++pdus_assembled; +@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) + tpd->vcc->pop(tpd->vcc, tpd->skb); + else + dev_kfree_skb_any(tpd->skb); +- atomic_inc(&tpd->vcc->stats->tx_err); ++ atomic_inc_unchecked(&tpd->vcc->stats->tx_err); + } + pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); + return; +@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -EINVAL; + } + +@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -EINVAL; + } + #endif +@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + spin_unlock_irqrestore(&he_dev->global_lock, flags); + return -ENOMEM; + } +@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + spin_unlock_irqrestore(&he_dev->global_lock, flags); + return -ENOMEM; + } +@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) + __enqueue_tpd(he_dev, tpd, cid); + spin_unlock_irqrestore(&he_dev->global_lock, flags); + +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + return 0; + } +diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c +index b812103..e391a49 100644 +--- a/drivers/atm/horizon.c ++++ b/drivers/atm/horizon.c +@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) { + { + struct atm_vcc * vcc = ATM_SKB(skb)->vcc; + // VC layer stats +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + __net_timestamp(skb); + // end of our responsibility + vcc->push (vcc, skb); +@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) { + dev->tx_iovec = NULL; + + // VC layer stats +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); + + // free the skb + hrz_kfree_skb (skb); +diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c +index 1c05212..c28e200 100644 +--- a/drivers/atm/idt77252.c ++++ b/drivers/atm/idt77252.c +@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc) + else + dev_kfree_skb(skb); + +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + } + + atomic_dec(&scq->used); +@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + if ((sb = dev_alloc_skb(64)) == NULL) { + printk("%s: Can't allocate buffers for aal0.\n", + card->name); +- atomic_add(i, &vcc->stats->rx_drop); ++ atomic_add_unchecked(i, &vcc->stats->rx_drop); + break; + } + if (!atm_charge(vcc, sb->truesize)) { + RXPRINTK("%s: atm_charge() dropped aal0 packets.\n", + card->name); +- atomic_add(i - 1, &vcc->stats->rx_drop); ++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); + dev_kfree_skb(sb); + break; + } +@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + cell += ATM_CELL_PAYLOAD; + } +@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + "(CDC: %08x)\n", + card->name, len, rpp->len, readl(SAR_REG_CDC)); + recycle_rx_pool_skb(card, rpp); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + if (stat & SAR_RSQE_CRC) { + RXPRINTK("%s: AAL5 CRC error.\n", card->name); + recycle_rx_pool_skb(card, rpp); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + if (skb_queue_len(&rpp->queue) > 1) { +@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + RXPRINTK("%s: Can't alloc RX skb.\n", + card->name); + recycle_rx_pool_skb(card, rpp); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + if (!atm_charge(vcc, skb->truesize)) { +@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + __net_timestamp(skb); + + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + return; + } +@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + __net_timestamp(skb); + + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + if (skb->truesize > SAR_FB_SIZE_3) + add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); +@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card) + if (vcc->qos.aal != ATM_AAL0) { + RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n", + card->name, vpi, vci); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + goto drop; + } + + if ((sb = dev_alloc_skb(64)) == NULL) { + printk("%s: Can't allocate buffers for AAL0.\n", + card->name); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto drop; + } + +@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card) + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + drop: + skb_pull(queue, 64); +@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) + + if (vc == NULL) { + printk("%s: NULL connection in send().\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } + if (!test_bit(VCF_TX, &vc->flags)) { + printk("%s: Trying to transmit on a non-tx VC.\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } +@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) + break; + default: + printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } + + if (skb_shinfo(skb)->nr_frags != 0) { + printk("%s: No scatter-gather yet.\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } +@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) + + err = queue_skb(card, vc, skb, oam); + if (err) { +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return err; + } +@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags) + skb = dev_alloc_skb(64); + if (!skb) { + printk("%s: Out of memory in send_oam().\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -ENOMEM; + } + atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); +diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c +index 3d0c2b0..45441fa 100644 +--- a/drivers/atm/iphase.c ++++ b/drivers/atm/iphase.c +@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev) + status = (u_short) (buf_desc_ptr->desc_mode); + if (status & (RX_CER | RX_PTE | RX_OFL)) + { +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + IF_ERR(printk("IA: bad packet, dropping it");) + if (status & RX_CER) { + IF_ERR(printk(" cause: packet CRC error\n");) +@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev) + len = dma_addr - buf_addr; + if (len > iadev->rx_buf_sz) { + printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out_free_desc; + } + +@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev) + ia_vcc = INPH_IA_VCC(vcc); + if (ia_vcc == NULL) + { +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + dev_kfree_skb_any(skb); + atm_return(vcc, atm_guess_pdu2truesize(len)); + goto INCR_DLE; +@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev) + if ((length > iadev->rx_buf_sz) || (length > + (skb->len - sizeof(struct cpcs_trailer)))) + { +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", + length, skb->len);) + dev_kfree_skb_any(skb); +@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev) + + IF_RX(printk("rx_dle_intr: skb push");) + vcc->push(vcc,skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + iadev->rx_pkt_cnt++; + } + INCR_DLE: +@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) + { + struct k_sonet_stats *stats; + stats = &PRIV(_ia_dev[board])->sonet_stats; +- printk("section_bip: %d\n", atomic_read(&stats->section_bip)); +- printk("line_bip : %d\n", atomic_read(&stats->line_bip)); +- printk("path_bip : %d\n", atomic_read(&stats->path_bip)); +- printk("line_febe : %d\n", atomic_read(&stats->line_febe)); +- printk("path_febe : %d\n", atomic_read(&stats->path_febe)); +- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs)); +- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs)); +- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells)); +- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells)); ++ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip)); ++ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip)); ++ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip)); ++ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe)); ++ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe)); ++ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs)); ++ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs)); ++ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells)); ++ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells)); + } + ia_cmds.status = 0; + break; +@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { + if ((desc == 0) || (desc > iadev->num_tx_desc)) + { + IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + if (vcc->pop) + vcc->pop(vcc, skb); + else +@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { + ATM_DESC(skb) = vcc->vci; + skb_queue_tail(&iadev->tx_dma_q, skb); + +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + iadev->tx_pkt_cnt++; + /* Increment transaction counter */ + writel(2, iadev->dma+IPHASE5575_TX_COUNTER); + + #if 0 + /* add flow control logic */ +- if (atomic_read(&vcc->stats->tx) % 20 == 0) { ++ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) { + if (iavcc->vc_desc_cnt > 10) { + vcc->tx_quota = vcc->tx_quota * 3 / 4; + printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); +diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c +index f556969..0da15eb 100644 +--- a/drivers/atm/lanai.c ++++ b/drivers/atm/lanai.c +@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai, + vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0); + lanai_endtx(lanai, lvcc); + lanai_free_skb(lvcc->tx.atmvcc, skb); +- atomic_inc(&lvcc->tx.atmvcc->stats->tx); ++ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx); + } + + /* Try to fill the buffer - don't call unless there is backlog */ +@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr) + ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; + __net_timestamp(skb); + lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx); + out: + lvcc->rx.buf.ptr = end; + cardvcc_write(lvcc, endptr, vcc_rxreadptr); +@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) + DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 " + "vcc %d\n", lanai->number, (unsigned int) s, vci); + lanai->stats.service_rxnotaal5++; +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + return 0; + } + if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) { +@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) + int bytes; + read_unlock(&vcc_sklist_lock); + DPRINTK("got trashed rx pdu on vci %d\n", vci); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + lvcc->stats.x.aal5.service_trash++; + bytes = (SERVICE_GET_END(s) * 16) - + (((unsigned long) lvcc->rx.buf.ptr) - +@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) + } + if (s & SERVICE_STREAM) { + read_unlock(&vcc_sklist_lock); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + lvcc->stats.x.aal5.service_stream++; + printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream " + "PDU on VCI %d!\n", lanai->number, vci); +@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) + return 0; + } + DPRINTK("got rx crc error on vci %d\n", vci); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + lvcc->stats.x.aal5.service_rxcrc++; + lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4]; + cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr); +diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c +index 1c70c45..300718d 100644 +--- a/drivers/atm/nicstar.c ++++ b/drivers/atm/nicstar.c +@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) + if ((vc = (vc_map *) vcc->dev_data) == NULL) { + printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", + card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } +@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) + if (!vc->tx) { + printk("nicstar%d: Trying to transmit on a non-tx VC.\n", + card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } +@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) + if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { + printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", + card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + if (skb_shinfo(skb)->nr_frags != 0) { + printk("nicstar%d: No scatter-gather yet.\n", card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } +@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) + } + + if (push_scqe(card, vc, scq, &scqe, skb) != 0) { +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EIO; + } +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + return 0; + } +@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + printk + ("nicstar%d: Can't allocate buffers for aal0.\n", + card->index); +- atomic_add(i, &vcc->stats->rx_drop); ++ atomic_add_unchecked(i, &vcc->stats->rx_drop); + break; + } + if (!atm_charge(vcc, sb->truesize)) { + RXPRINTK + ("nicstar%d: atm_charge() dropped aal0 packets.\n", + card->index); +- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ ++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ + dev_kfree_skb_any(sb); + break; + } +@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + cell += ATM_CELL_PAYLOAD; + } + +@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + if (iovb == NULL) { + printk("nicstar%d: Out of iovec buffers.\n", + card->index); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + recycle_rx_buf(card, skb); + return; + } +@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + small or large buffer itself. */ + } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { + printk("nicstar%d: received too big AAL5 SDU.\n", card->index); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, + NS_MAX_IOVECS); + NS_PRV_IOVCNT(iovb) = 0; +@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ("nicstar%d: Expected a small buffer, and this is not one.\n", + card->index); + which_list(card, skb); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_rx_buf(card, skb); + vc->rx_iov = NULL; + recycle_iov_buf(card, iovb); +@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ("nicstar%d: Expected a large buffer, and this is not one.\n", + card->index); + which_list(card, skb); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, + NS_PRV_IOVCNT(iovb)); + vc->rx_iov = NULL; +@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + printk(" - PDU size mismatch.\n"); + else + printk(".\n"); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, + NS_PRV_IOVCNT(iovb)); + vc->rx_iov = NULL; +@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + /* skb points to a small buffer */ + if (!atm_charge(vcc, skb->truesize)) { + push_rxbufs(card, skb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } else { + skb_put(skb, len); + dequeue_sm_buf(card, skb); +@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ATM_SKB(skb)->vcc = vcc; + __net_timestamp(skb); + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ + struct sk_buff *sb; +@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + if (len <= NS_SMBUFSIZE) { + if (!atm_charge(vcc, sb->truesize)) { + push_rxbufs(card, sb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } else { + skb_put(sb, len); + dequeue_sm_buf(card, sb); +@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + + push_rxbufs(card, skb); +@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + + if (!atm_charge(vcc, skb->truesize)) { + push_rxbufs(card, skb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } else { + dequeue_lg_buf(card, skb); + #ifdef NS_USE_DESTRUCTORS +@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ATM_SKB(skb)->vcc = vcc; + __net_timestamp(skb); + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + + push_rxbufs(card, sb); +@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + printk + ("nicstar%d: Out of huge buffers.\n", + card->index); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + recycle_iovec_rx_bufs(card, + (struct iovec *) + iovb->data, +@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + card->hbpool.count++; + } else + dev_kfree_skb_any(hb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } else { + /* Copy the small buffer to the huge buffer */ + sb = (struct sk_buff *)iov->iov_base; +@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + #endif /* NS_USE_DESTRUCTORS */ + __net_timestamp(hb); + vcc->push(vcc, hb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + } + +diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c +index 5d1d076..12fbca4 100644 +--- a/drivers/atm/solos-pci.c ++++ b/drivers/atm/solos-pci.c +@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg) + } + atm_charge(vcc, skb->truesize); + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + break; + + case PKT_STATUS: +@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card) + vcc = SKB_CB(oldskb)->vcc; + + if (vcc) { +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + solos_pop(vcc, oldskb); + } else + dev_kfree_skb_irq(oldskb); +diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c +index 90f1ccc..04c4a1e 100644 +--- a/drivers/atm/suni.c ++++ b/drivers/atm/suni.c +@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock); + + + #define ADD_LIMITED(s,v) \ +- atomic_add((v),&stats->s); \ +- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX); ++ atomic_add_unchecked((v),&stats->s); \ ++ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX); + + + static void suni_hz(unsigned long from_timer) +diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c +index 5120a96..e2572bd 100644 +--- a/drivers/atm/uPD98402.c ++++ b/drivers/atm/uPD98402.c +@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze + struct sonet_stats tmp; + int error = 0; + +- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); ++ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); + sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp); + if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp)); + if (zero && !error) { +@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) + + + #define ADD_LIMITED(s,v) \ +- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \ +- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \ +- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); } ++ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \ ++ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \ ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); } + + + static void stat_event(struct atm_dev *dev) +@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev) + if (reason & uPD98402_INT_PFM) stat_event(dev); + if (reason & uPD98402_INT_PCO) { + (void) GET(PCOCR); /* clear interrupt cause */ +- atomic_add(GET(HECCT), ++ atomic_add_unchecked(GET(HECCT), + &PRIV(dev)->sonet_stats.uncorr_hcs); + } + if ((reason & uPD98402_INT_RFO) && +@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev) + PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO | + uPD98402_INT_LOS),PIMR); /* enable them */ + (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ +- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1); +- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1); +- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1); ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1); ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1); ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1); + return 0; + } + +diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c +index d889f56..17eb71e 100644 +--- a/drivers/atm/zatm.c ++++ b/drivers/atm/zatm.c +@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); + } + if (!size) { + dev_kfree_skb_irq(skb); +- if (vcc) atomic_inc(&vcc->stats->rx_err); ++ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err); + continue; + } + if (!atm_charge(vcc,skb->truesize)) { +@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); + skb->len = size; + ATM_SKB(skb)->vcc = vcc; + vcc->push(vcc,skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + zout(pos & 0xffff,MTA(mbx)); + #if 0 /* probably a stupid idea */ +@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | + skb_queue_head(&zatm_vcc->backlog,skb); + break; + } +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + wake_up(&zatm_vcc->tx_wait); + } + +diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c +index a4760e0..51283cf 100644 +--- a/drivers/base/devtmpfs.c ++++ b/drivers/base/devtmpfs.c +@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir) + if (!thread) + return 0; + +- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL); ++ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL); + if (err) + printk(KERN_INFO "devtmpfs: error mounting %i\n", err); + else +diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c +index caf995f..6f76697 100644 +--- a/drivers/base/power/wakeup.c ++++ b/drivers/base/power/wakeup.c +@@ -30,14 +30,14 @@ bool events_check_enabled; + * They need to be modified together atomically, so it's better to use one + * atomic variable to hold them both. + */ +-static atomic_t combined_event_count = ATOMIC_INIT(0); ++static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0); + + #define IN_PROGRESS_BITS (sizeof(int) * 4) + #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) + + static void split_counters(unsigned int *cnt, unsigned int *inpr) + { +- unsigned int comb = atomic_read(&combined_event_count); ++ unsigned int comb = atomic_read_unchecked(&combined_event_count); + + *cnt = (comb >> IN_PROGRESS_BITS); + *inpr = comb & MAX_IN_PROGRESS; +@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws) + ws->last_time = ktime_get(); + + /* Increment the counter of events in progress. */ +- atomic_inc(&combined_event_count); ++ atomic_inc_unchecked(&combined_event_count); + } + + /** +@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) + * Increment the counter of registered wakeup events and decrement the + * couter of wakeup events in progress simultaneously. + */ +- atomic_add(MAX_IN_PROGRESS, &combined_event_count); ++ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count); + } + + /** +diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c +index b0f553b..77b928b 100644 +--- a/drivers/block/cciss.c ++++ b/drivers/block/cciss.c +@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, + int err; + u32 cp; + ++ memset(&arg64, 0, sizeof(arg64)); ++ + err = 0; + err |= + copy_from_user(&arg64.LUN_info, &arg32->LUN_info, +@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h) + while (!list_empty(&h->reqQ)) { + c = list_entry(h->reqQ.next, CommandList_struct, list); + /* can't do anything if fifo is full */ +- if ((h->access.fifo_full(h))) { ++ if ((h->access->fifo_full(h))) { + dev_warn(&h->pdev->dev, "fifo full\n"); + break; + } +@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h) + h->Qdepth--; + + /* Tell the controller execute command */ +- h->access.submit_command(h, c); ++ h->access->submit_command(h, c); + + /* Put job onto the completed Q */ + addQ(&h->cmpQ, c); +@@ -3443,17 +3445,17 @@ startio: + + static inline unsigned long get_next_completion(ctlr_info_t *h) + { +- return h->access.command_completed(h); ++ return h->access->command_completed(h); + } + + static inline int interrupt_pending(ctlr_info_t *h) + { +- return h->access.intr_pending(h); ++ return h->access->intr_pending(h); + } + + static inline long interrupt_not_for_us(ctlr_info_t *h) + { +- return ((h->access.intr_pending(h) == 0) || ++ return ((h->access->intr_pending(h) == 0) || + (h->interrupts_enabled == 0)); + } + +@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h) + u32 a; + + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) +- return h->access.command_completed(h); ++ return h->access->command_completed(h); + + if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { + a = *(h->reply_pool_head); /* Next cmd in ring buffer */ +@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h) + trans_support & CFGTBL_Trans_use_short_tags); + + /* Change the access methods to the performant access methods */ +- h->access = SA5_performant_access; ++ h->access = &SA5_performant_access; + h->transMethod = CFGTBL_Trans_Performant; + + return; +@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h) + if (prod_index < 0) + return -ENODEV; + h->product_name = products[prod_index].product_name; +- h->access = *(products[prod_index].access); ++ h->access = products[prod_index].access; + + if (cciss_board_disabled(h)) { + dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); +@@ -5041,7 +5043,7 @@ reinit_after_soft_reset: + } + + /* make sure the board interrupts are off */ +- h->access.set_intr_mask(h, CCISS_INTR_OFF); ++ h->access->set_intr_mask(h, CCISS_INTR_OFF); + rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx); + if (rc) + goto clean2; +@@ -5093,7 +5095,7 @@ reinit_after_soft_reset: + * fake ones to scoop up any residual completions. + */ + spin_lock_irqsave(&h->lock, flags); +- h->access.set_intr_mask(h, CCISS_INTR_OFF); ++ h->access->set_intr_mask(h, CCISS_INTR_OFF); + spin_unlock_irqrestore(&h->lock, flags); + free_irq(h->intr[h->intr_mode], h); + rc = cciss_request_irq(h, cciss_msix_discard_completions, +@@ -5113,9 +5115,9 @@ reinit_after_soft_reset: + dev_info(&h->pdev->dev, "Board READY.\n"); + dev_info(&h->pdev->dev, + "Waiting for stale completions to drain.\n"); +- h->access.set_intr_mask(h, CCISS_INTR_ON); ++ h->access->set_intr_mask(h, CCISS_INTR_ON); + msleep(10000); +- h->access.set_intr_mask(h, CCISS_INTR_OFF); ++ h->access->set_intr_mask(h, CCISS_INTR_OFF); + + rc = controller_reset_failed(h->cfgtable); + if (rc) +@@ -5138,7 +5140,7 @@ reinit_after_soft_reset: + cciss_scsi_setup(h); + + /* Turn the interrupts on so we can service requests */ +- h->access.set_intr_mask(h, CCISS_INTR_ON); ++ h->access->set_intr_mask(h, CCISS_INTR_ON); + + /* Get the firmware version */ + inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); +@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev) + kfree(flush_buf); + if (return_code != IO_OK) + dev_warn(&h->pdev->dev, "Error flushing cache\n"); +- h->access.set_intr_mask(h, CCISS_INTR_OFF); ++ h->access->set_intr_mask(h, CCISS_INTR_OFF); + free_irq(h->intr[h->intr_mode], h); + } + +diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h +index 7fda30e..eb5dfe0 100644 +--- a/drivers/block/cciss.h ++++ b/drivers/block/cciss.h +@@ -101,7 +101,7 @@ struct ctlr_info + /* information about each logical volume */ + drive_info_struct *drv[CISS_MAX_LUN]; + +- struct access_method access; ++ struct access_method *access; + + /* queue and queue Info */ + struct list_head reqQ; +diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c +index 9125bbe..eede5c8 100644 +--- a/drivers/block/cpqarray.c ++++ b/drivers/block/cpqarray.c +@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev) + if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) { + goto Enomem4; + } +- hba[i]->access.set_intr_mask(hba[i], 0); ++ hba[i]->access->set_intr_mask(hba[i], 0); + if (request_irq(hba[i]->intr, do_ida_intr, + IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i])) + { +@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev) + add_timer(&hba[i]->timer); + + /* Enable IRQ now that spinlock and rate limit timer are set up */ +- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY); ++ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY); + + for(j=0; j<NWD; j++) { + struct gendisk *disk = ida_gendisk[i][j]; +@@ -694,7 +694,7 @@ DBGINFO( + for(i=0; i<NR_PRODUCTS; i++) { + if (board_id == products[i].board_id) { + c->product_name = products[i].product_name; +- c->access = *(products[i].access); ++ c->access = products[i].access; + break; + } + } +@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void) + hba[ctlr]->intr = intr; + sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr); + hba[ctlr]->product_name = products[j].product_name; +- hba[ctlr]->access = *(products[j].access); ++ hba[ctlr]->access = products[j].access; + hba[ctlr]->ctlr = ctlr; + hba[ctlr]->board_id = board_id; + hba[ctlr]->pci_dev = NULL; /* not PCI */ +@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h) + + while((c = h->reqQ) != NULL) { + /* Can't do anything if we're busy */ +- if (h->access.fifo_full(h) == 0) ++ if (h->access->fifo_full(h) == 0) + return; + + /* Get the first entry from the request Q */ +@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h) + h->Qdepth--; + + /* Tell the controller to do our bidding */ +- h->access.submit_command(h, c); ++ h->access->submit_command(h, c); + + /* Get onto the completion Q */ + addQ(&h->cmpQ, c); +@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id) + unsigned long flags; + __u32 a,a1; + +- istat = h->access.intr_pending(h); ++ istat = h->access->intr_pending(h); + /* Is this interrupt for us? */ + if (istat == 0) + return IRQ_NONE; +@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id) + */ + spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); + if (istat & FIFO_NOT_EMPTY) { +- while((a = h->access.command_completed(h))) { ++ while((a = h->access->command_completed(h))) { + a1 = a; a &= ~3; + if ((c = h->cmpQ) == NULL) + { +@@ -1449,11 +1449,11 @@ static int sendcmd( + /* + * Disable interrupt + */ +- info_p->access.set_intr_mask(info_p, 0); ++ info_p->access->set_intr_mask(info_p, 0); + /* Make sure there is room in the command FIFO */ + /* Actually it should be completely empty at this time. */ + for (i = 200000; i > 0; i--) { +- temp = info_p->access.fifo_full(info_p); ++ temp = info_p->access->fifo_full(info_p); + if (temp != 0) { + break; + } +@@ -1466,7 +1466,7 @@ DBG( + /* + * Send the cmd + */ +- info_p->access.submit_command(info_p, c); ++ info_p->access->submit_command(info_p, c); + complete = pollcomplete(ctlr); + + pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, +@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host) + * we check the new geometry. Then turn interrupts back on when + * we're done. + */ +- host->access.set_intr_mask(host, 0); ++ host->access->set_intr_mask(host, 0); + getgeometry(ctlr); +- host->access.set_intr_mask(host, FIFO_NOT_EMPTY); ++ host->access->set_intr_mask(host, FIFO_NOT_EMPTY); + + for(i=0; i<NWD; i++) { + struct gendisk *disk = ida_gendisk[ctlr][i]; +@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr) + /* Wait (up to 2 seconds) for a command to complete */ + + for (i = 200000; i > 0; i--) { +- done = hba[ctlr]->access.command_completed(hba[ctlr]); ++ done = hba[ctlr]->access->command_completed(hba[ctlr]); + if (done == 0) { + udelay(10); /* a short fixed delay */ + } else +diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h +index be73e9d..7fbf140 100644 +--- a/drivers/block/cpqarray.h ++++ b/drivers/block/cpqarray.h +@@ -99,7 +99,7 @@ struct ctlr_info { + drv_info_t drv[NWD]; + struct proc_dir_entry *proc; + +- struct access_method access; ++ struct access_method *access; + + cmdlist_t *reqQ; + cmdlist_t *cmpQ; +diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h +index 9cf2035..bffca95 100644 +--- a/drivers/block/drbd/drbd_int.h ++++ b/drivers/block/drbd/drbd_int.h +@@ -736,7 +736,7 @@ struct drbd_request; + struct drbd_epoch { + struct list_head list; + unsigned int barrier_nr; +- atomic_t epoch_size; /* increased on every request added. */ ++ atomic_unchecked_t epoch_size; /* increased on every request added. */ + atomic_t active; /* increased on every req. added, and dec on every finished. */ + unsigned long flags; + }; +@@ -1108,7 +1108,7 @@ struct drbd_conf { + void *int_dig_in; + void *int_dig_vv; + wait_queue_head_t seq_wait; +- atomic_t packet_seq; ++ atomic_unchecked_t packet_seq; + unsigned int peer_seq; + spinlock_t peer_seq_lock; + unsigned int minor; +@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname, + + static inline void drbd_tcp_cork(struct socket *sock) + { +- int __user val = 1; ++ int val = 1; + (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, +- (char __user *)&val, sizeof(val)); ++ (char __force_user *)&val, sizeof(val)); + } + + static inline void drbd_tcp_uncork(struct socket *sock) + { +- int __user val = 0; ++ int val = 0; + (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, +- (char __user *)&val, sizeof(val)); ++ (char __force_user *)&val, sizeof(val)); + } + + static inline void drbd_tcp_nodelay(struct socket *sock) + { +- int __user val = 1; ++ int val = 1; + (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY, +- (char __user *)&val, sizeof(val)); ++ (char __force_user *)&val, sizeof(val)); + } + + static inline void drbd_tcp_quickack(struct socket *sock) + { +- int __user val = 2; ++ int val = 2; + (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, +- (char __user *)&val, sizeof(val)); ++ (char __force_user *)&val, sizeof(val)); + } + + void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo); +diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c +index 0358e55..bc33689 100644 +--- a/drivers/block/drbd/drbd_main.c ++++ b/drivers/block/drbd/drbd_main.c +@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, + p.sector = sector; + p.block_id = block_id; + p.blksize = blksize; +- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); ++ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq)); + + if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) + return false; +@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) + p.sector = cpu_to_be64(req->sector); + p.block_id = (unsigned long)req; + p.seq_num = cpu_to_be32(req->seq_num = +- atomic_add_return(1, &mdev->packet_seq)); ++ atomic_add_return_unchecked(1, &mdev->packet_seq)); + + dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw); + +@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) + atomic_set(&mdev->unacked_cnt, 0); + atomic_set(&mdev->local_cnt, 0); + atomic_set(&mdev->net_cnt, 0); +- atomic_set(&mdev->packet_seq, 0); ++ atomic_set_unchecked(&mdev->packet_seq, 0); + atomic_set(&mdev->pp_in_use, 0); + atomic_set(&mdev->pp_in_use_by_net, 0); + atomic_set(&mdev->rs_sect_in, 0); +@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) + mdev->receiver.t_state); + + /* no need to lock it, I'm the only thread alive */ +- if (atomic_read(&mdev->current_epoch->epoch_size) != 0) +- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size)); ++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0) ++ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size)); + mdev->al_writ_cnt = + mdev->bm_writ_cnt = + mdev->read_cnt = +diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c +index af2a250..219c74b 100644 +--- a/drivers/block/drbd/drbd_nl.c ++++ b/drivers/block/drbd/drbd_nl.c +@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms + module_put(THIS_MODULE); + } + +-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ ++static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ + + static unsigned short * + __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, +@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) + cn_reply->id.idx = CN_IDX_DRBD; + cn_reply->id.val = CN_VAL_DRBD; + +- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); ++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq); + cn_reply->ack = 0; /* not used here. */ + cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + + (int)((char *)tl - (char *)reply->tag_list); +@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) + cn_reply->id.idx = CN_IDX_DRBD; + cn_reply->id.val = CN_VAL_DRBD; + +- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); ++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq); + cn_reply->ack = 0; /* not used here. */ + cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + + (int)((char *)tl - (char *)reply->tag_list); +@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev, + cn_reply->id.idx = CN_IDX_DRBD; + cn_reply->id.val = CN_VAL_DRBD; + +- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); ++ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq); + cn_reply->ack = 0; // not used here. + cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + + (int)((char*)tl - (char*)reply->tag_list); +@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev) + cn_reply->id.idx = CN_IDX_DRBD; + cn_reply->id.val = CN_VAL_DRBD; + +- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); ++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq); + cn_reply->ack = 0; /* not used here. */ + cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + + (int)((char *)tl - (char *)reply->tag_list); +diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c +index 43beaca..4a5b1dd 100644 +--- a/drivers/block/drbd/drbd_receiver.c ++++ b/drivers/block/drbd/drbd_receiver.c +@@ -894,7 +894,7 @@ retry: + sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; + sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; + +- atomic_set(&mdev->packet_seq, 0); ++ atomic_set_unchecked(&mdev->packet_seq, 0); + mdev->peer_seq = 0; + + drbd_thread_start(&mdev->asender); +@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, + do { + next_epoch = NULL; + +- epoch_size = atomic_read(&epoch->epoch_size); ++ epoch_size = atomic_read_unchecked(&epoch->epoch_size); + + switch (ev & ~EV_CLEANUP) { + case EV_PUT: +@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, + rv = FE_DESTROYED; + } else { + epoch->flags = 0; +- atomic_set(&epoch->epoch_size, 0); ++ atomic_set_unchecked(&epoch->epoch_size, 0); + /* atomic_set(&epoch->active, 0); is already zero */ + if (rv == FE_STILL_LIVE) + rv = FE_RECYCLED; +@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign + drbd_wait_ee_list_empty(mdev, &mdev->active_ee); + drbd_flush(mdev); + +- if (atomic_read(&mdev->current_epoch->epoch_size)) { ++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) { + epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); + if (epoch) + break; + } + + epoch = mdev->current_epoch; +- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0); ++ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0); + + D_ASSERT(atomic_read(&epoch->active) == 0); + D_ASSERT(epoch->flags == 0); +@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign + } + + epoch->flags = 0; +- atomic_set(&epoch->epoch_size, 0); ++ atomic_set_unchecked(&epoch->epoch_size, 0); + atomic_set(&epoch->active, 0); + + spin_lock(&mdev->epoch_lock); +- if (atomic_read(&mdev->current_epoch->epoch_size)) { ++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) { + list_add(&epoch->list, &mdev->current_epoch->list); + mdev->current_epoch = epoch; + mdev->epochs++; +@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned + spin_unlock(&mdev->peer_seq_lock); + + drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); +- atomic_inc(&mdev->current_epoch->epoch_size); ++ atomic_inc_unchecked(&mdev->current_epoch->epoch_size); + return drbd_drain_block(mdev, data_size); + } + +@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned + + spin_lock(&mdev->epoch_lock); + e->epoch = mdev->current_epoch; +- atomic_inc(&e->epoch->epoch_size); ++ atomic_inc_unchecked(&e->epoch->epoch_size); + atomic_inc(&e->epoch->active); + spin_unlock(&mdev->epoch_lock); + +@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev) + D_ASSERT(list_empty(&mdev->done_ee)); + + /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ +- atomic_set(&mdev->current_epoch->epoch_size, 0); ++ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0); + D_ASSERT(list_empty(&mdev->current_epoch->list)); + } + +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index 1e888c9..05cf1b0 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file, + mm_segment_t old_fs = get_fs(); + + set_fs(get_ds()); +- bw = file->f_op->write(file, buf, len, &pos); ++ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos); + set_fs(old_fs); + if (likely(bw == len)) + return 0; +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +index 4364303..9adf4ee 100644 +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig" + + config DEVKMEM + bool "/dev/kmem virtual device support" +- default y ++ default n ++ depends on !GRKERNSEC_KMEM + help + Say Y here if you want to support the /dev/kmem device. The + /dev/kmem device is rarely used, but can be used for certain +@@ -596,6 +597,7 @@ config DEVPORT + bool + depends on !M68K + depends on ISA || PCI ++ depends on !GRKERNSEC_KMEM + default y + + source "drivers/s390/char/Kconfig" +diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c +index 2e04433..22afc64 100644 +--- a/drivers/char/agp/frontend.c ++++ b/drivers/char/agp/frontend.c +@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) + if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) + return -EFAULT; + +- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) ++ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv)) + return -EFAULT; + + client = agp_find_client_by_pid(reserve.pid); +diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c +index 095ab90..afad0a4 100644 +--- a/drivers/char/briq_panel.c ++++ b/drivers/char/briq_panel.c +@@ -9,6 +9,7 @@ + #include <linux/types.h> + #include <linux/errno.h> + #include <linux/tty.h> ++#include <linux/mutex.h> + #include <linux/timer.h> + #include <linux/kernel.h> + #include <linux/wait.h> +@@ -34,6 +35,7 @@ static int vfd_is_open; + static unsigned char vfd[40]; + static int vfd_cursor; + static unsigned char ledpb, led; ++static DEFINE_MUTEX(vfd_mutex); + + static void update_vfd(void) + { +@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_ + if (!vfd_is_open) + return -EBUSY; + ++ mutex_lock(&vfd_mutex); + for (;;) { + char c; + if (!indx) + break; +- if (get_user(c, buf)) ++ if (get_user(c, buf)) { ++ mutex_unlock(&vfd_mutex); + return -EFAULT; ++ } + if (esc) { + set_led(c); + esc = 0; +@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_ + buf++; + } + update_vfd(); ++ mutex_unlock(&vfd_mutex); + + return len; + } +diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c +index f773a9d..65cd683 100644 +--- a/drivers/char/genrtc.c ++++ b/drivers/char/genrtc.c +@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file, + switch (cmd) { + + case RTC_PLL_GET: ++ memset(&pll, 0, sizeof(pll)); + if (get_rtc_pll(&pll)) + return -EINVAL; + else +diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c +index 0833896..cccce52 100644 +--- a/drivers/char/hpet.c ++++ b/drivers/char/hpet.c +@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, + } + + static int +-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, ++hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, + struct hpet_info *info) + { + struct hpet_timer __iomem *timer; +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index 58c0e63..46c16bf 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -415,7 +415,7 @@ struct ipmi_smi { + struct proc_dir_entry *proc_dir; + char proc_dir_name[10]; + +- atomic_t stats[IPMI_NUM_STATS]; ++ atomic_unchecked_t stats[IPMI_NUM_STATS]; + + /* + * run_to_completion duplicate of smb_info, smi_info +@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex); + + + #define ipmi_inc_stat(intf, stat) \ +- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) ++ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]) + #define ipmi_get_stat(intf, stat) \ +- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) ++ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])) + + static int is_lan_addr(struct ipmi_addr *addr) + { +@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, + INIT_LIST_HEAD(&intf->cmd_rcvrs); + init_waitqueue_head(&intf->waitq); + for (i = 0; i < IPMI_NUM_STATS; i++) +- atomic_set(&intf->stats[i], 0); ++ atomic_set_unchecked(&intf->stats[i], 0); + + intf->proc_dir = NULL; + +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c +index 9397ab4..d01bee1 100644 +--- a/drivers/char/ipmi/ipmi_si_intf.c ++++ b/drivers/char/ipmi/ipmi_si_intf.c +@@ -277,7 +277,7 @@ struct smi_info { + unsigned char slave_addr; + + /* Counters and things for the proc filesystem. */ +- atomic_t stats[SI_NUM_STATS]; ++ atomic_unchecked_t stats[SI_NUM_STATS]; + + struct task_struct *thread; + +@@ -286,9 +286,9 @@ struct smi_info { + }; + + #define smi_inc_stat(smi, stat) \ +- atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) ++ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat]) + #define smi_get_stat(smi, stat) \ +- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) ++ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat])) + + #define SI_MAX_PARMS 4 + +@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi) + atomic_set(&new_smi->req_events, 0); + new_smi->run_to_completion = 0; + for (i = 0; i < SI_NUM_STATS; i++) +- atomic_set(&new_smi->stats[i], 0); ++ atomic_set_unchecked(&new_smi->stats[i], 0); + + new_smi->interrupt_disabled = 1; + atomic_set(&new_smi->stop_operation, 0); +diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c +index 1aeaaba..e018570 100644 +--- a/drivers/char/mbcs.c ++++ b/drivers/char/mbcs.c +@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev) + return 0; + } + +-static const struct cx_device_id __devinitdata mbcs_id_table[] = { ++static const struct cx_device_id __devinitconst mbcs_id_table[] = { + { + .part_num = MBCS_PART_NUM, + .mfg_num = MBCS_MFG_NUM, +diff --git a/drivers/char/mem.c b/drivers/char/mem.c +index 1451790..f705c30 100644 +--- a/drivers/char/mem.c ++++ b/drivers/char/mem.c +@@ -18,6 +18,7 @@ + #include <linux/raw.h> + #include <linux/tty.h> + #include <linux/capability.h> ++#include <linux/security.h> + #include <linux/ptrace.h> + #include <linux/device.h> + #include <linux/highmem.h> +@@ -35,6 +36,10 @@ + # include <linux/efi.h> + #endif + ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) ++extern const struct file_operations grsec_fops; ++#endif ++ + static inline unsigned long size_inside_page(unsigned long start, + unsigned long size) + { +@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + + while (cursor < to) { + if (!devmem_is_allowed(pfn)) { ++#ifdef CONFIG_GRKERNSEC_KMEM ++ gr_handle_mem_readwrite(from, to); ++#else + printk(KERN_INFO + "Program %s tried to access /dev/mem between %Lx->%Lx.\n", + current->comm, from, to); ++#endif + return 0; + } + cursor += PAGE_SIZE; +@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + } + return 1; + } ++#elif defined(CONFIG_GRKERNSEC_KMEM) ++static inline int range_is_allowed(unsigned long pfn, unsigned long size) ++{ ++ return 0; ++} + #else + static inline int range_is_allowed(unsigned long pfn, unsigned long size) + { +@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf, + + while (count > 0) { + unsigned long remaining; ++ char *temp; + + sz = size_inside_page(p, count); + +@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf, + if (!ptr) + return -EFAULT; + +- remaining = copy_to_user(buf, ptr, sz); ++#ifdef CONFIG_PAX_USERCOPY ++ temp = kmalloc(sz, GFP_KERNEL); ++ if (!temp) { ++ unxlate_dev_mem_ptr(p, ptr); ++ return -ENOMEM; ++ } ++ memcpy(temp, ptr, sz); ++#else ++ temp = ptr; ++#endif ++ ++ remaining = copy_to_user(buf, temp, sz); ++ ++#ifdef CONFIG_PAX_USERCOPY ++ kfree(temp); ++#endif ++ + unxlate_dev_mem_ptr(p, ptr); + if (remaining) + return -EFAULT; +@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, + size_t count, loff_t *ppos) + { + unsigned long p = *ppos; +- ssize_t low_count, read, sz; ++ ssize_t low_count, read, sz, err = 0; + char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ +- int err = 0; + + read = 0; + if (p < (unsigned long) high_memory) { +@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, + } + #endif + while (low_count > 0) { ++ char *temp; ++ + sz = size_inside_page(p, low_count); + + /* +@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf, + */ + kbuf = xlate_dev_kmem_ptr((char *)p); + +- if (copy_to_user(buf, kbuf, sz)) ++#ifdef CONFIG_PAX_USERCOPY ++ temp = kmalloc(sz, GFP_KERNEL); ++ if (!temp) ++ return -ENOMEM; ++ memcpy(temp, kbuf, sz); ++#else ++ temp = kbuf; ++#endif ++ ++ err = copy_to_user(buf, temp, sz); ++ ++#ifdef CONFIG_PAX_USERCOPY ++ kfree(temp); ++#endif ++ ++ if (err) + return -EFAULT; + buf += sz; + p += sz; +@@ -867,6 +914,9 @@ static const struct memdev { + #ifdef CONFIG_CRASH_DUMP + [12] = { "oldmem", 0, &oldmem_fops, NULL }, + #endif ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) ++ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL }, ++#endif + }; + + static int memory_open(struct inode *inode, struct file *filp) +diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c +index da3cfee..a5a6606 100644 +--- a/drivers/char/nvram.c ++++ b/drivers/char/nvram.c +@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf, + + spin_unlock_irq(&rtc_lock); + +- if (copy_to_user(buf, contents, tmp - contents)) ++ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents)) + return -EFAULT; + + *ppos = i; +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 6035ab8..bdfe4fd 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -261,8 +261,13 @@ + /* + * Configuration information + */ ++#ifdef CONFIG_GRKERNSEC_RANDNET ++#define INPUT_POOL_WORDS 512 ++#define OUTPUT_POOL_WORDS 128 ++#else + #define INPUT_POOL_WORDS 128 + #define OUTPUT_POOL_WORDS 32 ++#endif + #define SEC_XFER_SIZE 512 + #define EXTRACT_SIZE 10 + +@@ -300,10 +305,17 @@ static struct poolinfo { + int poolwords; + int tap1, tap2, tap3, tap4, tap5; + } poolinfo_table[] = { ++#ifdef CONFIG_GRKERNSEC_RANDNET ++ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */ ++ { 512, 411, 308, 208, 104, 1 }, ++ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */ ++ { 128, 103, 76, 51, 25, 1 }, ++#else + /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */ + { 128, 103, 76, 51, 25, 1 }, + /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */ + { 32, 26, 20, 14, 7, 1 }, ++#endif + #if 0 + /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ + { 2048, 1638, 1231, 819, 411, 1 }, +@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, + + extract_buf(r, tmp); + i = min_t(int, nbytes, EXTRACT_SIZE); +- if (copy_to_user(buf, tmp, i)) { ++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) { + ret = -EFAULT; + break; + } +@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid); + #include <linux/sysctl.h> + + static int min_read_thresh = 8, min_write_thresh; +-static int max_read_thresh = INPUT_POOL_WORDS * 32; ++static int max_read_thresh = OUTPUT_POOL_WORDS * 32; + static int max_write_thresh = INPUT_POOL_WORDS * 32; + static char sysctl_bootid[16]; + +diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c +index 1ee8ce7..b778bef 100644 +--- a/drivers/char/sonypi.c ++++ b/drivers/char/sonypi.c +@@ -55,6 +55,7 @@ + #include <asm/uaccess.h> + #include <asm/io.h> + #include <asm/system.h> ++#include <asm/local.h> + + #include <linux/sonypi.h> + +@@ -491,7 +492,7 @@ static struct sonypi_device { + spinlock_t fifo_lock; + wait_queue_head_t fifo_proc_list; + struct fasync_struct *fifo_async; +- int open_count; ++ local_t open_count; + int model; + struct input_dev *input_jog_dev; + struct input_dev *input_key_dev; +@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on) + static int sonypi_misc_release(struct inode *inode, struct file *file) + { + mutex_lock(&sonypi_device.lock); +- sonypi_device.open_count--; ++ local_dec(&sonypi_device.open_count); + mutex_unlock(&sonypi_device.lock); + return 0; + } +@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file) + { + mutex_lock(&sonypi_device.lock); + /* Flush input queue on first open */ +- if (!sonypi_device.open_count) ++ if (!local_read(&sonypi_device.open_count)) + kfifo_reset(&sonypi_device.fifo); +- sonypi_device.open_count++; ++ local_inc(&sonypi_device.open_count); + mutex_unlock(&sonypi_device.lock); + + return 0; +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c +index 361a1df..2471eee 100644 +--- a/drivers/char/tpm/tpm.c ++++ b/drivers/char/tpm/tpm.c +@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, + chip->vendor.req_complete_val) + goto out_recv; + +- if ((status == chip->vendor.req_canceled)) { ++ if (status == chip->vendor.req_canceled) { + dev_err(chip->dev, "Operation Canceled\n"); + rc = -ECANCELED; + goto out; +diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c +index 0636520..169c1d0 100644 +--- a/drivers/char/tpm/tpm_bios.c ++++ b/drivers/char/tpm/tpm_bios.c +@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos) + event = addr; + + if ((event->event_type == 0 && event->event_size == 0) || +- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit)) ++ (event->event_size >= limit - addr - sizeof(struct tcpa_event))) + return NULL; + + return addr; +@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v, + return NULL; + + if ((event->event_type == 0 && event->event_size == 0) || +- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit)) ++ (event->event_size >= limit - v - sizeof(struct tcpa_event))) + return NULL; + + (*pos)++; +@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v) + int i; + + for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) +- seq_putc(m, data[i]); ++ if (!seq_putc(m, data[i])) ++ return -EFAULT; + + return 0; + } +@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log) + log->bios_event_log_end = log->bios_event_log + len; + + virt = acpi_os_map_memory(start, len); ++ if (!virt) { ++ kfree(log->bios_event_log); ++ log->bios_event_log = NULL; ++ return -EFAULT; ++ } + +- memcpy(log->bios_event_log, virt, len); ++ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len); + + acpi_os_unmap_memory(virt, len); + return 0; +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 8e3c46d..c139b99 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, + if (to_user) { + ssize_t ret; + +- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); ++ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count); + if (ret) + return -EFAULT; + } else { +@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, + if (!port_has_data(port) && !port->host_connected) + return 0; + +- return fill_readbuf(port, ubuf, count, true); ++ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true); + } + + static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c +index eb1d864..39ee5a7 100644 +--- a/drivers/dma/dmatest.c ++++ b/drivers/dma/dmatest.c +@@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan) + } + if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { + cnt = dmatest_add_threads(dtc, DMA_PQ); +- thread_count += cnt > 0 ?: 0; ++ thread_count += cnt > 0 ? cnt : 0; + } + + pr_info("dmatest: Started %u threads using %s\n", +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c +index c9eee6d..f9d5280 100644 +--- a/drivers/edac/amd64_edac.c ++++ b/drivers/edac/amd64_edac.c +@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) + * PCI core identifies what devices are on a system during boot, and then + * inquiry this table to see if this driver is for a given device found. + */ +-static const struct pci_device_id amd64_pci_table[] __devinitdata = { ++static const struct pci_device_id amd64_pci_table[] __devinitconst = { + { + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, +diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c +index e47e73b..348e0bd 100644 +--- a/drivers/edac/amd76x_edac.c ++++ b/drivers/edac/amd76x_edac.c +@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { ++static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + AMD762}, +diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c +index 1af531a..3a8ff27 100644 +--- a/drivers/edac/e752x_edac.c ++++ b/drivers/edac/e752x_edac.c +@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { ++static const struct pci_device_id e752x_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7520}, +diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c +index 6ffb6d2..383d8d7 100644 +--- a/drivers/edac/e7xxx_edac.c ++++ b/drivers/edac/e7xxx_edac.c +@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { ++static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7205}, +diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c +index 495198a..ac08c85 100644 +--- a/drivers/edac/edac_pci_sysfs.c ++++ b/drivers/edac/edac_pci_sysfs.c +@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */ + static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */ + static int edac_pci_poll_msec = 1000; /* one second workq period */ + +-static atomic_t pci_parity_count = ATOMIC_INIT(0); +-static atomic_t pci_nonparity_count = ATOMIC_INIT(0); ++static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0); ++static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0); + + static struct kobject *edac_pci_top_main_kobj; + static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0); +@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + edac_printk(KERN_CRIT, EDAC_PCI, + "Signaled System Error on %s\n", + pci_name(dev)); +- atomic_inc(&pci_nonparity_count); ++ atomic_inc_unchecked(&pci_nonparity_count); + } + + if (status & (PCI_STATUS_PARITY)) { +@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Master Data Parity Error on %s\n", + pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + + if (status & (PCI_STATUS_DETECTED_PARITY)) { +@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Detected Parity Error on %s\n", + pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + } + +@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " + "Signaled System Error on %s\n", + pci_name(dev)); +- atomic_inc(&pci_nonparity_count); ++ atomic_inc_unchecked(&pci_nonparity_count); + } + + if (status & (PCI_STATUS_PARITY)) { +@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Master Data Parity Error on " + "%s\n", pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + + if (status & (PCI_STATUS_DETECTED_PARITY)) { +@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Detected Parity Error on %s\n", + pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + } + } +@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void) + if (!check_pci_errors) + return; + +- before_count = atomic_read(&pci_parity_count); ++ before_count = atomic_read_unchecked(&pci_parity_count); + + /* scan all PCI devices looking for a Parity Error on devices and + * bridges. +@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void) + /* Only if operator has selected panic on PCI Error */ + if (edac_pci_get_panic_on_pe()) { + /* If the count is different 'after' from 'before' */ +- if (before_count != atomic_read(&pci_parity_count)) ++ if (before_count != atomic_read_unchecked(&pci_parity_count)) + panic("EDAC: PCI Parity Error"); + } + } +diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c +index c0510b3..6e2a954 100644 +--- a/drivers/edac/i3000_edac.c ++++ b/drivers/edac/i3000_edac.c +@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i3000_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I3000}, +diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c +index aa08497..7e6822a 100644 +--- a/drivers/edac/i3200_edac.c ++++ b/drivers/edac/i3200_edac.c +@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i3200_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I3200}, +diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c +index 4dc3ac2..67d05a6 100644 +--- a/drivers/edac/i5000_edac.c ++++ b/drivers/edac/i5000_edac.c +@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev) + * + * The "E500P" device is the first device supported. + */ +-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i5000_pci_tbl[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16), + .driver_data = I5000P}, + +diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c +index bcbdeec..9886d16 100644 +--- a/drivers/edac/i5100_edac.c ++++ b/drivers/edac/i5100_edac.c +@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i5100_pci_tbl[] __devinitconst = { + /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, + { 0, } +diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c +index 74d6ec34..baff517 100644 +--- a/drivers/edac/i5400_edac.c ++++ b/drivers/edac/i5400_edac.c +@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev) + * + * The "E500P" device is the first device supported. + */ +-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i5400_pci_tbl[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)}, + {0,} /* 0 terminated list. */ + }; +diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c +index 6104dba..e7ea8e1 100644 +--- a/drivers/edac/i7300_edac.c ++++ b/drivers/edac/i7300_edac.c +@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev) + * + * Has only 8086:360c PCI ID + */ +-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i7300_pci_tbl[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, + {0,} /* 0 terminated list. */ + }; +diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c +index 70ad892..178943c 100644 +--- a/drivers/edac/i7core_edac.c ++++ b/drivers/edac/i7core_edac.c +@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = { + /* + * pci_device_id table for which devices we are looking for + */ +-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i7core_pci_tbl[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, + {0,} /* 0 terminated list. */ +diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c +index 4329d39..f3022ef 100644 +--- a/drivers/edac/i82443bxgx_edac.c ++++ b/drivers/edac/i82443bxgx_edac.c +@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) + + EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one); + +-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)}, +diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c +index 931a057..fd28340 100644 +--- a/drivers/edac/i82860_edac.c ++++ b/drivers/edac/i82860_edac.c +@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i82860_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82860}, +diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c +index 33864c6..01edc61 100644 +--- a/drivers/edac/i82875p_edac.c ++++ b/drivers/edac/i82875p_edac.c +@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82875P}, +diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c +index a5da732..983363b 100644 +--- a/drivers/edac/i82975x_edac.c ++++ b/drivers/edac/i82975x_edac.c +@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = { ++static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82975X +diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h +index 0106747..0b40417 100644 +--- a/drivers/edac/mce_amd.h ++++ b/drivers/edac/mce_amd.h +@@ -83,7 +83,7 @@ struct amd_decoder_ops { + bool (*dc_mce)(u16, u8); + bool (*ic_mce)(u16, u8); + bool (*nb_mce)(u16, u8); +-}; ++} __no_const; + + void amd_report_gart_errors(bool); + void amd_register_ecc_decoder(void (*f)(int, struct mce *)); +diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c +index b153674..ad2ba9b 100644 +--- a/drivers/edac/r82600_edac.c ++++ b/drivers/edac/r82600_edac.c +@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = { ++static const struct pci_device_id r82600_pci_tbl[] __devinitconst = { + { + PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) + }, +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c +index 7a402bf..af0b211 100644 +--- a/drivers/edac/sb_edac.c ++++ b/drivers/edac/sb_edac.c +@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = { + /* + * pci_device_id table for which devices we are looking for + */ +-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = { ++static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)}, + {0,} /* 0 terminated list. */ + }; +diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c +index b6f47de..c5acf3a 100644 +--- a/drivers/edac/x38_edac.c ++++ b/drivers/edac/x38_edac.c +@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev) + edac_mc_free(mci); + } + +-static const struct pci_device_id x38_pci_tbl[] __devinitdata = { ++static const struct pci_device_id x38_pci_tbl[] __devinitconst = { + { + PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + X38}, +diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c +index 85661b0..c784559a 100644 +--- a/drivers/firewire/core-card.c ++++ b/drivers/firewire/core-card.c +@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref) + + void fw_core_remove_card(struct fw_card *card) + { +- struct fw_card_driver dummy_driver = dummy_driver_template; ++ fw_card_driver_no_const dummy_driver = dummy_driver_template; + + card->driver->update_phy_reg(card, 4, + PHY_LINK_ACTIVE | PHY_CONTENDER, 0); +diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c +index 4799393..37bd3ab 100644 +--- a/drivers/firewire/core-cdev.c ++++ b/drivers/firewire/core-cdev.c +@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client, + int ret; + + if ((request->channels == 0 && request->bandwidth == 0) || +- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || +- request->bandwidth < 0) ++ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) + return -EINVAL; + + r = kmalloc(sizeof(*r), GFP_KERNEL); +diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c +index 855ab3f..11f4bbd 100644 +--- a/drivers/firewire/core-transaction.c ++++ b/drivers/firewire/core-transaction.c +@@ -37,6 +37,7 @@ + #include <linux/timer.h> + #include <linux/types.h> + #include <linux/workqueue.h> ++#include <linux/sched.h> + + #include <asm/byteorder.h> + +diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h +index b45be57..5fad18b 100644 +--- a/drivers/firewire/core.h ++++ b/drivers/firewire/core.h +@@ -101,6 +101,7 @@ struct fw_card_driver { + + int (*stop_iso)(struct fw_iso_context *ctx); + }; ++typedef struct fw_card_driver __no_const fw_card_driver_no_const; + + void fw_card_initialize(struct fw_card *card, + const struct fw_card_driver *driver, struct device *device); +diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c +index 153980b..4b4d046 100644 +--- a/drivers/firmware/dmi_scan.c ++++ b/drivers/firmware/dmi_scan.c +@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void) + } + } + else { +- /* +- * no iounmap() for that ioremap(); it would be a no-op, but +- * it's so early in setup that sucker gets confused into doing +- * what it shouldn't if we actually call it. +- */ + p = dmi_ioremap(0xF0000, 0x10000); + if (p == NULL) + goto error; +@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *), + if (buf == NULL) + return -1; + +- dmi_table(buf, dmi_len, dmi_num, decode, private_data); ++ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data); + + iounmap(buf); + return 0; +diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c +index 98723cb..10ca85b 100644 +--- a/drivers/gpio/gpio-vr41xx.c ++++ b/drivers/gpio/gpio-vr41xx.c +@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq) + printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", + maskl, pendl, maskh, pendh); + +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + + return -EINVAL; + } +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index 8323fc3..5c1d755 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, + */ + if ((out_resp->count_modes >= mode_count) && mode_count) { + copied = 0; +- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr; ++ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; + list_for_each_entry(mode, &connector->modes, head) { + drm_crtc_convert_to_umode(&u_mode, mode); + if (copy_to_user(mode_ptr + copied, +@@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, + + if ((out_resp->count_props >= props_count) && props_count) { + copied = 0; +- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr); +- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr); ++ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr); ++ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr); + for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { + if (connector->property_ids[i] != 0) { + if (put_user(connector->property_ids[i], +@@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, + + if ((out_resp->count_encoders >= encoders_count) && encoders_count) { + copied = 0; +- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr); ++ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] != 0) { + if (put_user(connector->encoder_ids[i], +@@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, + } + + for (i = 0; i < crtc_req->count_connectors; i++) { +- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr; ++ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr; + if (get_user(out_id, &set_connectors_ptr[i])) { + ret = -EFAULT; + goto out; +@@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + fb = obj_to_fb(obj); + + num_clips = r->num_clips; +- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr; ++ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; + + if (!num_clips != !clips_ptr) { + ret = -EINVAL; +@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, + out_resp->flags = property->flags; + + if ((out_resp->count_values >= value_count) && value_count) { +- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr; ++ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr; + for (i = 0; i < value_count; i++) { + if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { + ret = -EFAULT; +@@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, + if (property->flags & DRM_MODE_PROP_ENUM) { + if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { + copied = 0; +- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr; ++ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; + list_for_each_entry(prop_enum, &property->enum_blob_list, head) { + + if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { +@@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, + if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { + copied = 0; + blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr; +- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr; ++ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr; + + list_for_each_entry(prop_blob, &property->enum_blob_list, head) { + if (put_user(prop_blob->base.id, blob_id_ptr + copied)) { +@@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, + struct drm_mode_get_blob *out_resp = data; + struct drm_property_blob *blob; + int ret = 0; +- void *blob_ptr; ++ void __user *blob_ptr; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; +@@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, + blob = obj_to_blob(obj); + + if (out_resp->length == blob->length) { +- blob_ptr = (void *)(unsigned long)out_resp->data; ++ blob_ptr = (void __user *)(unsigned long)out_resp->data; + if (copy_to_user(blob_ptr, blob->data, blob->length)){ + ret = -EFAULT; + goto done; +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c +index d2619d7..bd6bd00 100644 +--- a/drivers/gpu/drm/drm_crtc_helper.c ++++ b/drivers/gpu/drm/drm_crtc_helper.c +@@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, + struct drm_crtc *tmp; + int crtc_mask = 1; + +- WARN(!crtc, "checking null crtc?\n"); ++ BUG_ON(!crtc); + + dev = crtc->dev; + +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c +index 40c187c..5746164 100644 +--- a/drivers/gpu/drm/drm_drv.c ++++ b/drivers/gpu/drm/drm_drv.c +@@ -308,7 +308,7 @@ module_exit(drm_core_exit); + /** + * Copy and IOCTL return string to user space + */ +-static int drm_copy_field(char *buf, size_t *buf_len, const char *value) ++static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value) + { + int len; + +@@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp, + + dev = file_priv->minor->dev; + atomic_inc(&dev->ioctl_count); +- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]); + ++file_priv->ioctl_count; + + DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", +diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c +index 828bf65..cdaa0e9 100644 +--- a/drivers/gpu/drm/drm_fops.c ++++ b/drivers/gpu/drm/drm_fops.c +@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev) + } + + for (i = 0; i < ARRAY_SIZE(dev->counts); i++) +- atomic_set(&dev->counts[i], 0); ++ atomic_set_unchecked(&dev->counts[i], 0); + + dev->sigdata.lock = NULL; + +@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp) + + retcode = drm_open_helper(inode, filp, dev); + if (!retcode) { +- atomic_inc(&dev->counts[_DRM_STAT_OPENS]); +- if (!dev->open_count++) ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]); ++ if (local_inc_return(&dev->open_count) == 1) + retcode = drm_setup(dev); + } + if (!retcode) { +@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp) + + mutex_lock(&drm_global_mutex); + +- DRM_DEBUG("open_count = %d\n", dev->open_count); ++ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count)); + + if (dev->driver->preclose) + dev->driver->preclose(dev, file_priv); +@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp) + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->device), +- dev->open_count); ++ local_read(&dev->open_count)); + + /* Release any auth tokens that might point to this file_priv, + (do that under the drm_global_mutex) */ +@@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp) + * End inline drm_release + */ + +- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); +- if (!--dev->open_count) { ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]); ++ if (local_dec_and_test(&dev->open_count)) { + if (atomic_read(&dev->ioctl_count)) { + DRM_ERROR("Device busy: %d\n", + atomic_read(&dev->ioctl_count)); +diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c +index c87dc96..326055d 100644 +--- a/drivers/gpu/drm/drm_global.c ++++ b/drivers/gpu/drm/drm_global.c +@@ -36,7 +36,7 @@ + struct drm_global_item { + struct mutex mutex; + void *object; +- int refcount; ++ atomic_t refcount; + }; + + static struct drm_global_item glob[DRM_GLOBAL_NUM]; +@@ -49,7 +49,7 @@ void drm_global_init(void) + struct drm_global_item *item = &glob[i]; + mutex_init(&item->mutex); + item->object = NULL; +- item->refcount = 0; ++ atomic_set(&item->refcount, 0); + } + } + +@@ -59,7 +59,7 @@ void drm_global_release(void) + for (i = 0; i < DRM_GLOBAL_NUM; ++i) { + struct drm_global_item *item = &glob[i]; + BUG_ON(item->object != NULL); +- BUG_ON(item->refcount != 0); ++ BUG_ON(atomic_read(&item->refcount) != 0); + } + } + +@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref) + void *object; + + mutex_lock(&item->mutex); +- if (item->refcount == 0) { ++ if (atomic_read(&item->refcount) == 0) { + item->object = kzalloc(ref->size, GFP_KERNEL); + if (unlikely(item->object == NULL)) { + ret = -ENOMEM; +@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref) + goto out_err; + + } +- ++item->refcount; ++ atomic_inc(&item->refcount); + ref->object = item->object; + object = item->object; + mutex_unlock(&item->mutex); +@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref) + struct drm_global_item *item = &glob[ref->global_type]; + + mutex_lock(&item->mutex); +- BUG_ON(item->refcount == 0); ++ BUG_ON(atomic_read(&item->refcount) == 0); + BUG_ON(ref->object != item->object); +- if (--item->refcount == 0) { ++ if (atomic_dec_and_test(&item->refcount)) { + ref->release(ref); + item->object = NULL; + } +diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c +index ab1162d..42587b2 100644 +--- a/drivers/gpu/drm/drm_info.c ++++ b/drivers/gpu/drm/drm_info.c +@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data) + struct drm_local_map *map; + struct drm_map_list *r_list; + +- /* Hardcoded from _DRM_FRAME_BUFFER, +- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and +- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ +- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; ++ static const char * const types[] = { ++ [_DRM_FRAME_BUFFER] = "FB", ++ [_DRM_REGISTERS] = "REG", ++ [_DRM_SHM] = "SHM", ++ [_DRM_AGP] = "AGP", ++ [_DRM_SCATTER_GATHER] = "SG", ++ [_DRM_CONSISTENT] = "PCI", ++ [_DRM_GEM] = "GEM" }; + const char *type; + int i; + +@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data) + map = r_list->map; + if (!map) + continue; +- if (map->type < 0 || map->type > 5) ++ if (map->type >= ARRAY_SIZE(types)) + type = "??"; + else + type = types[map->type]; +@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data) + vma->vm_flags & VM_MAYSHARE ? 's' : 'p', + vma->vm_flags & VM_LOCKED ? 'l' : '-', + vma->vm_flags & VM_IO ? 'i' : '-', ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ 0); ++#else + vma->vm_pgoff); ++#endif + + #if defined(__i386__) + pgprot = pgprot_val(vma->vm_page_prot); +diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c +index ddd70db..40321e6 100644 +--- a/drivers/gpu/drm/drm_ioc32.c ++++ b/drivers/gpu/drm/drm_ioc32.c +@@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, + request = compat_alloc_user_space(nbytes); + if (!access_ok(VERIFY_WRITE, request, nbytes)) + return -EFAULT; +- list = (struct drm_buf_desc *) (request + 1); ++ list = (struct drm_buf_desc __user *) (request + 1); + + if (__put_user(count, &request->count) + || __put_user(list, &request->list)) +@@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, + request = compat_alloc_user_space(nbytes); + if (!access_ok(VERIFY_WRITE, request, nbytes)) + return -EFAULT; +- list = (struct drm_buf_pub *) (request + 1); ++ list = (struct drm_buf_pub __user *) (request + 1); + + if (__put_user(count, &request->count) + || __put_user(list, &request->list)) +diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c +index 904d7e9..ab88581 100644 +--- a/drivers/gpu/drm/drm_ioctl.c ++++ b/drivers/gpu/drm/drm_ioctl.c +@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data, + stats->data[i].value = + (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); + else +- stats->data[i].value = atomic_read(&dev->counts[i]); ++ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]); + stats->data[i].type = dev->types[i]; + } + +diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c +index 632ae24..244cf4a 100644 +--- a/drivers/gpu/drm/drm_lock.c ++++ b/drivers/gpu/drm/drm_lock.c +@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) + if (drm_lock_take(&master->lock, lock->context)) { + master->lock.file_priv = file_priv; + master->lock.lock_time = jiffies; +- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]); + break; /* Got lock */ + } + +@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) + return -EINVAL; + } + +- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]); + + if (drm_lock_free(&master->lock, lock->context)) { + /* FIXME: Should really bail out here. */ +diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c +index 8f371e8..9f85d52 100644 +--- a/drivers/gpu/drm/i810/i810_dma.c ++++ b/drivers/gpu/drm/i810/i810_dma.c +@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data, + dma->buflist[vertex->idx], + vertex->discard, vertex->used); + +- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); +- atomic_inc(&dev->counts[_DRM_STAT_DMA]); ++ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); + sarea_priv->last_enqueue = dev_priv->counter - 1; + sarea_priv->last_dispatch = (int)hw_status[5]; + +@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data, + i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, + mc->last_render); + +- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); +- atomic_inc(&dev->counts[_DRM_STAT_DMA]); ++ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); + sarea_priv->last_enqueue = dev_priv->counter - 1; + sarea_priv->last_dispatch = (int)hw_status[5]; + +diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h +index c9339f4..f5e1b9d 100644 +--- a/drivers/gpu/drm/i810/i810_drv.h ++++ b/drivers/gpu/drm/i810/i810_drv.h +@@ -108,8 +108,8 @@ typedef struct drm_i810_private { + int page_flipping; + + wait_queue_head_t irq_queue; +- atomic_t irq_received; +- atomic_t irq_emitted; ++ atomic_unchecked_t irq_received; ++ atomic_unchecked_t irq_emitted; + + int front_offset; + } drm_i810_private_t; +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index b2e3c97..58cf079 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) + I915_READ(GTIMR)); + } + seq_printf(m, "Interrupts received: %d\n", +- atomic_read(&dev_priv->irq_received)); ++ atomic_read_unchecked(&dev_priv->irq_received)); + for (i = 0; i < I915_NUM_RINGS; i++) { + if (IS_GEN6(dev) || IS_GEN7(dev)) { + seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", +@@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused) + return ret; + + if (opregion->header) +- seq_write(m, opregion->header, OPREGION_SIZE); ++ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE); + + mutex_unlock(&dev->struct_mutex); + +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +index c4da951..3c59c5c 100644 +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) + bool can_switch; + + spin_lock(&dev->count_lock); +- can_switch = (dev->open_count == 0); ++ can_switch = (local_read(&dev->open_count) == 0); + spin_unlock(&dev->count_lock); + return can_switch; + } +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index ae294a0..1755461 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -229,7 +229,7 @@ struct drm_i915_display_funcs { + /* render clock increase/decrease */ + /* display clock increase/decrease */ + /* pll clock increase/decrease */ +-}; ++} __no_const; + + struct intel_device_info { + u8 gen; +@@ -318,7 +318,7 @@ typedef struct drm_i915_private { + int current_page; + int page_flipping; + +- atomic_t irq_received; ++ atomic_unchecked_t irq_received; + + /* protects the irq masks */ + spinlock_t irq_lock; +@@ -893,7 +893,7 @@ struct drm_i915_gem_object { + * will be page flipped away on the next vblank. When it + * reaches 0, dev_priv->pending_flip_queue will be woken up. + */ +- atomic_t pending_flip; ++ atomic_unchecked_t pending_flip; + }; + + #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) +@@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev); + extern void intel_teardown_gmbus(struct drm_device *dev); + extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); + extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); +-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) ++static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) + { + return container_of(adapter, struct intel_gmbus, adapter)->force_bit; + } +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +index b9da890..cad1d98 100644 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, + i915_gem_clflush_object(obj); + + if (obj->base.pending_write_domain) +- cd->flips |= atomic_read(&obj->pending_flip); ++ cd->flips |= atomic_read_unchecked(&obj->pending_flip); + + /* The actual obj->write_domain will be updated with + * pending_write_domain after we emit the accumulated flush for all +@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) + + static int + validate_exec_list(struct drm_i915_gem_exec_object2 *exec, +- int count) ++ unsigned int count) + { +- int i; ++ unsigned int i; + + for (i = 0; i < count; i++) { + char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index d47a53b..61154c2 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) + u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; + struct drm_i915_master_private *master_priv; + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + + /* disable master interrupt before clearing iir */ + de_ier = I915_READ(DEIER); +@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) + struct drm_i915_master_private *master_priv; + u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + + if (IS_GEN6(dev)) + bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; +@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + int ret = IRQ_NONE, pipe; + bool blc_event = false; + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + + iir = I915_READ(IIR); + +@@ -1750,7 +1750,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + +- atomic_set(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); + + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->error_work, i915_error_work_func); +@@ -1938,7 +1938,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev) + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + +- atomic_set(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); + + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->error_work, i915_error_work_func); +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index 9ec9755..6d1cf2d 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, + + wait_event(dev_priv->pending_flip_queue, + atomic_read(&dev_priv->mm.wedged) || +- atomic_read(&obj->pending_flip) == 0); ++ atomic_read_unchecked(&obj->pending_flip) == 0); + + /* Big Hammer, we also need to ensure that any pending + * MI_WAIT_FOR_EVENT inside a user batch buffer on the +@@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) + obj = to_intel_framebuffer(crtc->fb)->obj; + dev_priv = crtc->dev->dev_private; + wait_event(dev_priv->pending_flip_queue, +- atomic_read(&obj->pending_flip) == 0); ++ atomic_read_unchecked(&obj->pending_flip) == 0); + } + + static bool intel_crtc_driving_pch(struct drm_crtc *crtc) +@@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, + + atomic_clear_mask(1 << intel_crtc->plane, + &obj->pending_flip.counter); +- if (atomic_read(&obj->pending_flip) == 0) ++ if (atomic_read_unchecked(&obj->pending_flip) == 0) + wake_up(&dev_priv->pending_flip_queue); + + schedule_work(&work->work); +@@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, + /* Block clients from rendering to the new back buffer until + * the flip occurs and the object is no longer visible. + */ +- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); ++ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); + + ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); + if (ret) +@@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, + return 0; + + cleanup_pending: +- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); ++ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); + drm_gem_object_unreference(&work->old_fb_obj->base); + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); +diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h +index 54558a0..2d97005 100644 +--- a/drivers/gpu/drm/mga/mga_drv.h ++++ b/drivers/gpu/drm/mga/mga_drv.h +@@ -120,9 +120,9 @@ typedef struct drm_mga_private { + u32 clear_cmd; + u32 maccess; + +- atomic_t vbl_received; /**< Number of vblanks received. */ ++ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */ + wait_queue_head_t fence_queue; +- atomic_t last_fence_retired; ++ atomic_unchecked_t last_fence_retired; + u32 next_fence_to_post; + + unsigned int fb_cpp; +diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c +index 2581202..f230a8d9 100644 +--- a/drivers/gpu/drm/mga/mga_irq.c ++++ b/drivers/gpu/drm/mga/mga_irq.c +@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) + if (crtc != 0) + return 0; + +- return atomic_read(&dev_priv->vbl_received); ++ return atomic_read_unchecked(&dev_priv->vbl_received); + } + + +@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) + /* VBLANK interrupt */ + if (status & MGA_VLINEPEN) { + MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); +- atomic_inc(&dev_priv->vbl_received); ++ atomic_inc_unchecked(&dev_priv->vbl_received); + drm_handle_vblank(dev, 0); + handled = 1; + } +@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) + if ((prim_start & ~0x03) != (prim_end & ~0x03)) + MGA_WRITE(MGA_PRIMEND, prim_end); + +- atomic_inc(&dev_priv->last_fence_retired); ++ atomic_inc_unchecked(&dev_priv->last_fence_retired); + DRM_WAKEUP(&dev_priv->fence_queue); + handled = 1; + } +@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence) + * using fences. + */ + DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, +- (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) ++ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired)) + - *sequence) <= (1 << 23))); + + *sequence = cur_fence; +diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c +index 5fc201b..7b032b9 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bios.c ++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c +@@ -201,7 +201,7 @@ struct methods { + const char desc[8]; + void (*loadbios)(struct drm_device *, uint8_t *); + const bool rw; +-}; ++} __do_const; + + static struct methods shadow_methods[] = { + { "PRAMIN", load_vbios_pramin, true }, +@@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios, + struct bit_table { + const char id; + int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); +-}; ++} __no_const; + + #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) + +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h +index 4c0be3a..5757582 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drv.h ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h +@@ -238,7 +238,7 @@ struct nouveau_channel { + struct list_head pending; + uint32_t sequence; + uint32_t sequence_ack; +- atomic_t last_sequence_irq; ++ atomic_unchecked_t last_sequence_irq; + struct nouveau_vma vma; + } fence; + +@@ -319,7 +319,7 @@ struct nouveau_exec_engine { + u32 handle, u16 class); + void (*set_tile_region)(struct drm_device *dev, int i); + void (*tlb_flush)(struct drm_device *, int engine); +-}; ++} __no_const; + + struct nouveau_instmem_engine { + void *priv; +@@ -341,13 +341,13 @@ struct nouveau_instmem_engine { + struct nouveau_mc_engine { + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); +-}; ++} __no_const; + + struct nouveau_timer_engine { + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); + uint64_t (*read)(struct drm_device *dev); +-}; ++} __no_const; + + struct nouveau_fb_engine { + int num_tiles; +@@ -558,7 +558,7 @@ struct nouveau_vram_engine { + void (*put)(struct drm_device *, struct nouveau_mem **); + + bool (*flags_valid)(struct drm_device *, u32 tile_flags); +-}; ++} __no_const; + + struct nouveau_engine { + struct nouveau_instmem_engine instmem; +@@ -706,7 +706,7 @@ struct drm_nouveau_private { + struct drm_global_reference mem_global_ref; + struct ttm_bo_global_ref bo_global_ref; + struct ttm_bo_device bdev; +- atomic_t validate_sequence; ++ atomic_unchecked_t validate_sequence; + } ttm; + + struct { +diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c +index 2f6daae..c9d7b9e 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fence.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c +@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan) + if (USE_REFCNT(dev)) + sequence = nvchan_rd32(chan, 0x48); + else +- sequence = atomic_read(&chan->fence.last_sequence_irq); ++ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq); + + if (chan->fence.sequence_ack == sequence) + goto out; +@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) + return ret; + } + +- atomic_set(&chan->fence.last_sequence_irq, 0); ++ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0); + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c +index 7ce3fde..cb3ea04 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c +@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, + int trycnt = 0; + int ret, i; + +- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); ++ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence); + retry: + if (++trycnt > 100000) { + NV_ERROR(dev, "%s failed and gave up.\n", __func__); +diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c +index d8831ab..0ba8356 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_state.c ++++ b/drivers/gpu/drm/nouveau/nouveau_state.c +@@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) + bool can_switch; + + spin_lock(&dev->count_lock); +- can_switch = (dev->open_count == 0); ++ can_switch = (local_read(&dev->open_count) == 0); + spin_unlock(&dev->count_lock); + return can_switch; + } +diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c +index dbdea8e..cd6eeeb 100644 +--- a/drivers/gpu/drm/nouveau/nv04_graph.c ++++ b/drivers/gpu/drm/nouveau/nv04_graph.c +@@ -554,7 +554,7 @@ static int + nv04_graph_mthd_set_ref(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) + { +- atomic_set(&chan->fence.last_sequence_irq, data); ++ atomic_set_unchecked(&chan->fence.last_sequence_irq, data); + return 0; + } + +diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c +index bcac90b..53bfc76 100644 +--- a/drivers/gpu/drm/r128/r128_cce.c ++++ b/drivers/gpu/drm/r128/r128_cce.c +@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) + + /* GH: Simple idle check. + */ +- atomic_set(&dev_priv->idle_count, 0); ++ atomic_set_unchecked(&dev_priv->idle_count, 0); + + /* We don't support anything other than bus-mastering ring mode, + * but the ring can be in either AGP or PCI space for the ring +diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h +index 930c71b..499aded 100644 +--- a/drivers/gpu/drm/r128/r128_drv.h ++++ b/drivers/gpu/drm/r128/r128_drv.h +@@ -90,14 +90,14 @@ typedef struct drm_r128_private { + int is_pci; + unsigned long cce_buffers_offset; + +- atomic_t idle_count; ++ atomic_unchecked_t idle_count; + + int page_flipping; + int current_page; + u32 crtc_offset; + u32 crtc_offset_cntl; + +- atomic_t vbl_received; ++ atomic_unchecked_t vbl_received; + + u32 color_fmt; + unsigned int front_offset; +diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c +index 429d5a0..7e899ed 100644 +--- a/drivers/gpu/drm/r128/r128_irq.c ++++ b/drivers/gpu/drm/r128/r128_irq.c +@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) + if (crtc != 0) + return 0; + +- return atomic_read(&dev_priv->vbl_received); ++ return atomic_read_unchecked(&dev_priv->vbl_received); + } + + irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) +@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) + /* VBLANK interrupt */ + if (status & R128_CRTC_VBLANK_INT) { + R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); +- atomic_inc(&dev_priv->vbl_received); ++ atomic_inc_unchecked(&dev_priv->vbl_received); + drm_handle_vblank(dev, 0); + return IRQ_HANDLED; + } +diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c +index a9e33ce..09edd4b 100644 +--- a/drivers/gpu/drm/r128/r128_state.c ++++ b/drivers/gpu/drm/r128/r128_state.c +@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv, + + static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv) + { +- if (atomic_read(&dev_priv->idle_count) == 0) ++ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) + r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); + else +- atomic_set(&dev_priv->idle_count, 0); ++ atomic_set_unchecked(&dev_priv->idle_count, 0); + } + + #endif +diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c +index 5a82b6b..9e69c73 100644 +--- a/drivers/gpu/drm/radeon/mkregtable.c ++++ b/drivers/gpu/drm/radeon/mkregtable.c +@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename) + regex_t mask_rex; + regmatch_t match[4]; + char buf[1024]; +- size_t end; ++ long end; + int len; + int done = 0; + int r; + unsigned o; + struct offset *offset; + char last_reg_s[10]; +- int last_reg; ++ unsigned long last_reg; + + if (regcomp + (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) { +diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c +index cb1acff..8861bc5 100644 +--- a/drivers/gpu/drm/radeon/r600_cs.c ++++ b/drivers/gpu/drm/radeon/r600_cs.c +@@ -1304,6 +1304,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, + h0 = G_038004_TEX_HEIGHT(word1) + 1; + d0 = G_038004_TEX_DEPTH(word1); + nfaces = 1; ++ array = 0; + switch (G_038000_DIM(word0)) { + case V_038000_SQ_TEX_DIM_1D: + case V_038000_SQ_TEX_DIM_2D: +diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h +index 8227e76..ce0b195 100644 +--- a/drivers/gpu/drm/radeon/radeon.h ++++ b/drivers/gpu/drm/radeon/radeon.h +@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev); + */ + struct radeon_fence_driver { + uint32_t scratch_reg; +- atomic_t seq; ++ atomic_unchecked_t seq; + uint32_t last_seq; + unsigned long last_jiffies; + unsigned long last_timeout; +@@ -530,7 +530,7 @@ struct r600_blit_cp_primitives { + int x2, int y2); + void (*draw_auto)(struct radeon_device *rdev); + void (*set_default_state)(struct radeon_device *rdev); +-}; ++} __no_const; + + struct r600_blit { + struct mutex mutex; +@@ -954,7 +954,7 @@ struct radeon_asic { + void (*pre_page_flip)(struct radeon_device *rdev, int crtc); + u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); + void (*post_page_flip)(struct radeon_device *rdev, int crtc); +-}; ++} __no_const; + + /* + * Asic structures +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c +index 9231564..78b00fd 100644 +--- a/drivers/gpu/drm/radeon/radeon_device.c ++++ b/drivers/gpu/drm/radeon/radeon_device.c +@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) + bool can_switch; + + spin_lock(&dev->count_lock); +- can_switch = (dev->open_count == 0); ++ can_switch = (local_read(&dev->open_count) == 0); + spin_unlock(&dev->count_lock); + return can_switch; + } +diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h +index a1b59ca..86f2d44 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.h ++++ b/drivers/gpu/drm/radeon/radeon_drv.h +@@ -255,7 +255,7 @@ typedef struct drm_radeon_private { + + /* SW interrupt */ + wait_queue_head_t swi_queue; +- atomic_t swi_emitted; ++ atomic_unchecked_t swi_emitted; + int vblank_crtc; + uint32_t irq_enable_reg; + uint32_t r500_disp_irq_reg; +diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c +index 76ec0e9..6feb1a3 100644 +--- a/drivers/gpu/drm/radeon/radeon_fence.c ++++ b/drivers/gpu/drm/radeon/radeon_fence.c +@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) + write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); + return 0; + } +- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); ++ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq); + if (!rdev->cp.ready) + /* FIXME: cp is not running assume everythings is done right + * away +@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev) + return r; + } + radeon_fence_write(rdev, 0); +- atomic_set(&rdev->fence_drv.seq, 0); ++ atomic_set_unchecked(&rdev->fence_drv.seq, 0); + INIT_LIST_HEAD(&rdev->fence_drv.created); + INIT_LIST_HEAD(&rdev->fence_drv.emited); + INIT_LIST_HEAD(&rdev->fence_drv.signaled); +diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c +index 48b7cea..342236f 100644 +--- a/drivers/gpu/drm/radeon/radeon_ioc32.c ++++ b/drivers/gpu/drm/radeon/radeon_ioc32.c +@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.param, &request->param) +- || __put_user((void __user *)(unsigned long)req32.value, ++ || __put_user((unsigned long)req32.value, + &request->value)) + return -EFAULT; + +diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c +index 00da384..32f972d 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq.c ++++ b/drivers/gpu/drm/radeon/radeon_irq.c +@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev) + unsigned int ret; + RING_LOCALS; + +- atomic_inc(&dev_priv->swi_emitted); +- ret = atomic_read(&dev_priv->swi_emitted); ++ atomic_inc_unchecked(&dev_priv->swi_emitted); ++ ret = atomic_read_unchecked(&dev_priv->swi_emitted); + + BEGIN_RING(4); + OUT_RING_REG(RADEON_LAST_SWI_REG, ret); +@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev) + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *) dev->dev_private; + +- atomic_set(&dev_priv->swi_emitted, 0); ++ atomic_set_unchecked(&dev_priv->swi_emitted, 0); + DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); + + dev->max_vblank_count = 0x001fffff; +diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c +index e8422ae..d22d4a8 100644 +--- a/drivers/gpu/drm/radeon/radeon_state.c ++++ b/drivers/gpu/drm/radeon/radeon_state.c +@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + +- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, ++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, + sarea_priv->nbox * sizeof(depth_boxes[0]))) + return -EFAULT; + +@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil + { + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_getparam_t *param = data; +- int value; ++ int value = 0; + + DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); + +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index 0b5468b..9c4b308 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) + } + if (unlikely(ttm_vm_ops == NULL)) { + ttm_vm_ops = vma->vm_ops; +- radeon_ttm_vm_ops = *ttm_vm_ops; +- radeon_ttm_vm_ops.fault = &radeon_ttm_fault; ++ pax_open_kernel(); ++ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops)); ++ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault; ++ pax_close_kernel(); + } + vma->vm_ops = &radeon_ttm_vm_ops; + return 0; +diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c +index a9049ed..501f284 100644 +--- a/drivers/gpu/drm/radeon/rs690.c ++++ b/drivers/gpu/drm/radeon/rs690.c +@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && + rdev->pm.sideport_bandwidth.full) + rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; +- read_delay_latency.full = dfixed_const(370 * 800 * 1000); ++ read_delay_latency.full = dfixed_const(800 * 1000); + read_delay_latency.full = dfixed_div(read_delay_latency, + rdev->pm.igp_sideport_mclk); ++ a.full = dfixed_const(370); ++ read_delay_latency.full = dfixed_mul(read_delay_latency, a); + } else { + if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && + rdev->pm.k8_bandwidth.full) +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +index 727e93d..1565650 100644 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void) + static int ttm_pool_mm_shrink(struct shrinker *shrink, + struct shrink_control *sc) + { +- static atomic_t start_pool = ATOMIC_INIT(0); ++ static atomic_unchecked_t start_pool = ATOMIC_INIT(0); + unsigned i; +- unsigned pool_offset = atomic_add_return(1, &start_pool); ++ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool); + struct ttm_page_pool *pool; + int shrink_pages = sc->nr_to_scan; + +diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h +index 9cf87d9..2000b7d 100644 +--- a/drivers/gpu/drm/via/via_drv.h ++++ b/drivers/gpu/drm/via/via_drv.h +@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer { + typedef uint32_t maskarray_t[5]; + + typedef struct drm_via_irq { +- atomic_t irq_received; ++ atomic_unchecked_t irq_received; + uint32_t pending_mask; + uint32_t enable_mask; + wait_queue_head_t irq_queue; +@@ -75,7 +75,7 @@ typedef struct drm_via_private { + struct timeval last_vblank; + int last_vblank_valid; + unsigned usec_per_vblank; +- atomic_t vbl_received; ++ atomic_unchecked_t vbl_received; + drm_via_state_t hc_state; + char pci_buf[VIA_PCI_BUF_SIZE]; + const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; +diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c +index d391f48..10c8ca3 100644 +--- a/drivers/gpu/drm/via/via_irq.c ++++ b/drivers/gpu/drm/via/via_irq.c +@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc) + if (crtc != 0) + return 0; + +- return atomic_read(&dev_priv->vbl_received); ++ return atomic_read_unchecked(&dev_priv->vbl_received); + } + + irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) +@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) + + status = VIA_READ(VIA_REG_INTERRUPT); + if (status & VIA_IRQ_VBLANK_PENDING) { +- atomic_inc(&dev_priv->vbl_received); +- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { ++ atomic_inc_unchecked(&dev_priv->vbl_received); ++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) { + do_gettimeofday(&cur_vblank); + if (dev_priv->last_vblank_valid) { + dev_priv->usec_per_vblank = +@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) + dev_priv->last_vblank = cur_vblank; + dev_priv->last_vblank_valid = 1; + } +- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { ++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) { + DRM_DEBUG("US per vblank is: %u\n", + dev_priv->usec_per_vblank); + } +@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) + + for (i = 0; i < dev_priv->num_irqs; ++i) { + if (status & cur_irq->pending_mask) { +- atomic_inc(&cur_irq->irq_received); ++ atomic_inc_unchecked(&cur_irq->irq_received); + DRM_WAKEUP(&cur_irq->irq_queue); + handled = 1; + if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) +@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence + DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, + ((VIA_READ(masks[irq][2]) & masks[irq][3]) == + masks[irq][4])); +- cur_irq_sequence = atomic_read(&cur_irq->irq_received); ++ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received); + } else { + DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, + (((cur_irq_sequence = +- atomic_read(&cur_irq->irq_received)) - ++ atomic_read_unchecked(&cur_irq->irq_received)) - + *sequence) <= (1 << 23))); + } + *sequence = cur_irq_sequence; +@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev) + } + + for (i = 0; i < dev_priv->num_irqs; ++i) { +- atomic_set(&cur_irq->irq_received, 0); ++ atomic_set_unchecked(&cur_irq->irq_received, 0); + cur_irq->enable_mask = dev_priv->irq_masks[i][0]; + cur_irq->pending_mask = dev_priv->irq_masks[i][1]; + DRM_INIT_WAITQUEUE(&cur_irq->irq_queue); +@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) + switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { + case VIA_IRQ_RELATIVE: + irqwait->request.sequence += +- atomic_read(&cur_irq->irq_received); ++ atomic_read_unchecked(&cur_irq->irq_received); + irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; + case VIA_IRQ_ABSOLUTE: + break; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +index dc27970..f18b008 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +@@ -260,7 +260,7 @@ struct vmw_private { + * Fencing and IRQs. + */ + +- atomic_t marker_seq; ++ atomic_unchecked_t marker_seq; + wait_queue_head_t fence_queue; + wait_queue_head_t fifo_queue; + int fence_queue_waiters; /* Protected by hw_mutex */ +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +index a0c2f12..68ae6cb 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) + (unsigned int) min, + (unsigned int) fifo->capabilities); + +- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); ++ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno); + iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); + vmw_marker_queue_init(&fifo->marker_queue); + return vmw_fifo_send_fence(dev_priv, &dummy); +@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) + if (reserveable) + iowrite32(bytes, fifo_mem + + SVGA_FIFO_RESERVED); +- return fifo_mem + (next_cmd >> 2); ++ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2); + } else { + need_bounce = true; + } +@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) + + fm = vmw_fifo_reserve(dev_priv, bytes); + if (unlikely(fm == NULL)) { +- *seqno = atomic_read(&dev_priv->marker_seq); ++ *seqno = atomic_read_unchecked(&dev_priv->marker_seq); + ret = -ENOMEM; + (void)vmw_fallback_wait(dev_priv, false, true, *seqno, + false, 3*HZ); +@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) + } + + do { +- *seqno = atomic_add_return(1, &dev_priv->marker_seq); ++ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq); + } while (*seqno == 0); + + if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +index cabc95f..14b3d77 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv, + * emitted. Then the fence is stale and signaled. + */ + +- ret = ((atomic_read(&dev_priv->marker_seq) - seqno) ++ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno) + > VMW_FENCE_WRAP); + + return ret; +@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, + + if (fifo_idle) + down_read(&fifo_state->rwsem); +- signal_seq = atomic_read(&dev_priv->marker_seq); ++ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq); + ret = 0; + + for (;;) { +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c +index 8a8725c..afed796 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c +@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv, + while (!vmw_lag_lt(queue, us)) { + spin_lock(&queue->lock); + if (list_empty(&queue->head)) +- seqno = atomic_read(&dev_priv->marker_seq); ++ seqno = atomic_read_unchecked(&dev_priv->marker_seq); + else { + marker = list_first_entry(&queue->head, + struct vmw_marker, head); +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index bb656d8..4169fca 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev) + + int hid_add_device(struct hid_device *hdev) + { +- static atomic_t id = ATOMIC_INIT(0); ++ static atomic_unchecked_t id = ATOMIC_INIT(0); + int ret; + + if (WARN_ON(hdev->status & HID_STAT_ADDED)) +@@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev) + /* XXX hack, any other cleaner solution after the driver core + * is converted to allow more than 20 bytes as the device name? */ + dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, +- hdev->vendor, hdev->product, atomic_inc_return(&id)); ++ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id)); + + hid_debug_register(hdev, dev_name(&hdev->dev)); + ret = device_add(&hdev->dev); +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c +index 4ef02b2..8a96831 100644 +--- a/drivers/hid/usbhid/hiddev.c ++++ b/drivers/hid/usbhid/hiddev.c +@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + break; + + case HIDIOCAPPLICATION: +- if (arg < 0 || arg >= hid->maxapplication) ++ if (arg >= hid->maxapplication) + break; + + for (i = 0; i < hid->maxcollection; i++) +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c +index 4065374..10ed7dc 100644 +--- a/drivers/hv/channel.c ++++ b/drivers/hv/channel.c +@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, + int ret = 0; + int t; + +- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle); +- atomic_inc(&vmbus_connection.next_gpadl_handle); ++ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle); ++ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle); + + ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount); + if (ret) +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index 0fb100e..baf87e5 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output) + u64 output_address = (output) ? virt_to_phys(output) : 0; + u32 output_address_hi = output_address >> 32; + u32 output_address_lo = output_address & 0xFFFFFFFF; +- void *hypercall_page = hv_context.hypercall_page; ++ void *hypercall_page = ktva_ktla(hv_context.hypercall_page); + + __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi), + "=a"(hv_status_lo) : "d" (control_hi), +diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h +index 0aee112..b72d21f 100644 +--- a/drivers/hv/hyperv_vmbus.h ++++ b/drivers/hv/hyperv_vmbus.h +@@ -556,7 +556,7 @@ enum vmbus_connect_state { + struct vmbus_connection { + enum vmbus_connect_state conn_state; + +- atomic_t next_gpadl_handle; ++ atomic_unchecked_t next_gpadl_handle; + + /* + * Represents channel interrupts. Each bit position represents a +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index d2d0a2a..90b8f4d 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj) + { + int ret = 0; + +- static atomic_t device_num = ATOMIC_INIT(0); ++ static atomic_unchecked_t device_num = ATOMIC_INIT(0); + + dev_set_name(&child_device_obj->device, "vmbus_0_%d", +- atomic_inc_return(&device_num)); ++ atomic_inc_return_unchecked(&device_num)); + + child_device_obj->device.bus = &hv_bus; + child_device_obj->device.parent = &hv_acpi_dev->dev; +diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c +index 66f6729..2d6de0a 100644 +--- a/drivers/hwmon/acpi_power_meter.c ++++ b/drivers/hwmon/acpi_power_meter.c +@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr, + return res; + + temp /= 1000; +- if (temp < 0) +- return -EINVAL; + + mutex_lock(&resource->lock); + resource->trip[attr->index - 7] = temp; +diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c +index 5357925..6cf0418 100644 +--- a/drivers/hwmon/sht15.c ++++ b/drivers/hwmon/sht15.c +@@ -166,7 +166,7 @@ struct sht15_data { + int supply_uV; + bool supply_uV_valid; + struct work_struct update_supply_work; +- atomic_t interrupt_handled; ++ atomic_unchecked_t interrupt_handled; + }; + + /** +@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data, + return ret; + + gpio_direction_input(data->pdata->gpio_data); +- atomic_set(&data->interrupt_handled, 0); ++ atomic_set_unchecked(&data->interrupt_handled, 0); + + enable_irq(gpio_to_irq(data->pdata->gpio_data)); + if (gpio_get_value(data->pdata->gpio_data) == 0) { + disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); + /* Only relevant if the interrupt hasn't occurred. */ +- if (!atomic_read(&data->interrupt_handled)) ++ if (!atomic_read_unchecked(&data->interrupt_handled)) + schedule_work(&data->read_work); + } + ret = wait_event_timeout(data->wait_queue, +@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d) + + /* First disable the interrupt */ + disable_irq_nosync(irq); +- atomic_inc(&data->interrupt_handled); ++ atomic_inc_unchecked(&data->interrupt_handled); + /* Then schedule a reading work struct */ + if (data->state != SHT15_READING_NOTHING) + schedule_work(&data->read_work); +@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s) + * If not, then start the interrupt again - care here as could + * have gone low in meantime so verify it hasn't! + */ +- atomic_set(&data->interrupt_handled, 0); ++ atomic_set_unchecked(&data->interrupt_handled, 0); + enable_irq(gpio_to_irq(data->pdata->gpio_data)); + /* If still not occurred or another handler has been scheduled */ + if (gpio_get_value(data->pdata->gpio_data) +- || atomic_read(&data->interrupt_handled)) ++ || atomic_read_unchecked(&data->interrupt_handled)) + return; + } + +diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c +index 378fcb5..5e91fa8 100644 +--- a/drivers/i2c/busses/i2c-amd756-s4882.c ++++ b/drivers/i2c/busses/i2c-amd756-s4882.c +@@ -43,7 +43,7 @@ + extern struct i2c_adapter amd756_smbus; + + static struct i2c_adapter *s4882_adapter; +-static struct i2c_algorithm *s4882_algo; ++static i2c_algorithm_no_const *s4882_algo; + + /* Wrapper access functions for multiplexed SMBus */ + static DEFINE_MUTEX(amd756_lock); +diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c +index 29015eb..af2d8e9 100644 +--- a/drivers/i2c/busses/i2c-nforce2-s4985.c ++++ b/drivers/i2c/busses/i2c-nforce2-s4985.c +@@ -41,7 +41,7 @@ + extern struct i2c_adapter *nforce2_smbus; + + static struct i2c_adapter *s4985_adapter; +-static struct i2c_algorithm *s4985_algo; ++static i2c_algorithm_no_const *s4985_algo; + + /* Wrapper access functions for multiplexed SMBus */ + static DEFINE_MUTEX(nforce2_lock); +diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c +index d7a4833..7fae376 100644 +--- a/drivers/i2c/i2c-mux.c ++++ b/drivers/i2c/i2c-mux.c +@@ -28,7 +28,7 @@ + /* multiplexer per channel data */ + struct i2c_mux_priv { + struct i2c_adapter adap; +- struct i2c_algorithm algo; ++ i2c_algorithm_no_const algo; + + struct i2c_adapter *parent; + void *mux_dev; /* the mux chip/device */ +diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c +index 57d00ca..0145194 100644 +--- a/drivers/ide/aec62xx.c ++++ b/drivers/ide/aec62xx.c +@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = { + .cable_detect = atp86x_cable_detect, + }; + +-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { ++static const struct ide_port_info aec62xx_chipsets[] __devinitconst = { + { /* 0: AEC6210 */ + .name = DRV_NAME, + .init_chipset = init_chipset_aec62xx, +diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c +index 2c8016a..911a27c 100644 +--- a/drivers/ide/alim15x3.c ++++ b/drivers/ide/alim15x3.c +@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info ali15x3_chipset __devinitdata = { ++static const struct ide_port_info ali15x3_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_ali15x3, + .init_hwif = init_hwif_ali15x3, +diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c +index 3747b25..56fc995 100644 +--- a/drivers/ide/amd74xx.c ++++ b/drivers/ide/amd74xx.c +@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = { + .udma_mask = udma, \ + } + +-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = { ++static const struct ide_port_info amd74xx_chipsets[] __devinitconst = { + /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2), + /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4), + /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5), +diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c +index 15f0ead..cb43480 100644 +--- a/drivers/ide/atiixp.c ++++ b/drivers/ide/atiixp.c +@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = { + .cable_detect = atiixp_cable_detect, + }; + +-static const struct ide_port_info atiixp_pci_info[] __devinitdata = { ++static const struct ide_port_info atiixp_pci_info[] __devinitconst = { + { /* 0: IXP200/300/400/700 */ + .name = DRV_NAME, + .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}}, +diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c +index 5f80312..d1fc438 100644 +--- a/drivers/ide/cmd64x.c ++++ b/drivers/ide/cmd64x.c +@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { ++static const struct ide_port_info cmd64x_chipsets[] __devinitconst = { + { /* 0: CMD643 */ + .name = DRV_NAME, + .init_chipset = init_chipset_cmd64x, +diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c +index 2c1e5f7..1444762 100644 +--- a/drivers/ide/cs5520.c ++++ b/drivers/ide/cs5520.c +@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = { + .set_dma_mode = cs5520_set_dma_mode, + }; + +-static const struct ide_port_info cyrix_chipset __devinitdata = { ++static const struct ide_port_info cyrix_chipset __devinitconst = { + .name = DRV_NAME, + .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } }, + .port_ops = &cs5520_port_ops, +diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c +index 4dc4eb9..49b40ad 100644 +--- a/drivers/ide/cs5530.c ++++ b/drivers/ide/cs5530.c +@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = { + .udma_filter = cs5530_udma_filter, + }; + +-static const struct ide_port_info cs5530_chipset __devinitdata = { ++static const struct ide_port_info cs5530_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_cs5530, + .init_hwif = init_hwif_cs5530, +diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c +index 5059faf..18d4c85 100644 +--- a/drivers/ide/cs5535.c ++++ b/drivers/ide/cs5535.c +@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = { + .cable_detect = cs5535_cable_detect, + }; + +-static const struct ide_port_info cs5535_chipset __devinitdata = { ++static const struct ide_port_info cs5535_chipset __devinitconst = { + .name = DRV_NAME, + .port_ops = &cs5535_port_ops, + .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE, +diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c +index 847553f..3ffb49d 100644 +--- a/drivers/ide/cy82c693.c ++++ b/drivers/ide/cy82c693.c +@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = { + .set_dma_mode = cy82c693_set_dma_mode, + }; + +-static const struct ide_port_info cy82c693_chipset __devinitdata = { ++static const struct ide_port_info cy82c693_chipset __devinitconst = { + .name = DRV_NAME, + .init_iops = init_iops_cy82c693, + .port_ops = &cy82c693_port_ops, +diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c +index 58c51cd..4aec3b8 100644 +--- a/drivers/ide/hpt366.c ++++ b/drivers/ide/hpt366.c +@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = { + } + }; + +-static const struct hpt_info hpt36x __devinitdata = { ++static const struct hpt_info hpt36x __devinitconst = { + .chip_name = "HPT36x", + .chip_type = HPT36x, + .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2, +@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = { + .timings = &hpt36x_timings + }; + +-static const struct hpt_info hpt370 __devinitdata = { ++static const struct hpt_info hpt370 __devinitconst = { + .chip_name = "HPT370", + .chip_type = HPT370, + .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, +@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt370a __devinitdata = { ++static const struct hpt_info hpt370a __devinitconst = { + .chip_name = "HPT370A", + .chip_type = HPT370A, + .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, +@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt374 __devinitdata = { ++static const struct hpt_info hpt374 __devinitconst = { + .chip_name = "HPT374", + .chip_type = HPT374, + .udma_mask = ATA_UDMA5, +@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt372 __devinitdata = { ++static const struct hpt_info hpt372 __devinitconst = { + .chip_name = "HPT372", + .chip_type = HPT372, + .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt372a __devinitdata = { ++static const struct hpt_info hpt372a __devinitconst = { + .chip_name = "HPT372A", + .chip_type = HPT372A, + .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt302 __devinitdata = { ++static const struct hpt_info hpt302 __devinitconst = { + .chip_name = "HPT302", + .chip_type = HPT302, + .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt371 __devinitdata = { ++static const struct hpt_info hpt371 __devinitconst = { + .chip_name = "HPT371", + .chip_type = HPT371, + .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt372n __devinitdata = { ++static const struct hpt_info hpt372n __devinitconst = { + .chip_name = "HPT372N", + .chip_type = HPT372N, + .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt302n __devinitdata = { ++static const struct hpt_info hpt302n __devinitconst = { + .chip_name = "HPT302N", + .chip_type = HPT302N, + .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = { + .timings = &hpt37x_timings + }; + +-static const struct hpt_info hpt371n __devinitdata = { ++static const struct hpt_info hpt371n __devinitconst = { + .chip_name = "HPT371N", + .chip_type = HPT371N, + .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, +@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info hpt366_chipsets[] __devinitdata = { ++static const struct ide_port_info hpt366_chipsets[] __devinitconst = { + { /* 0: HPT36x */ + .name = DRV_NAME, + .init_chipset = init_chipset_hpt366, +diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c +index 8126824..55a2798 100644 +--- a/drivers/ide/ide-cd.c ++++ b/drivers/ide/ide-cd.c +@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) + alignment = queue_dma_alignment(q) | q->dma_pad_mask; + if ((unsigned long)buf & alignment + || blk_rq_bytes(rq) & q->dma_pad_mask +- || object_is_on_stack(buf)) ++ || object_starts_on_stack(buf)) + drive->dma = 0; + } + } +diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c +index a743e68..1cfd674 100644 +--- a/drivers/ide/ide-pci-generic.c ++++ b/drivers/ide/ide-pci-generic.c +@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = { + .udma_mask = ATA_UDMA6, \ + } + +-static const struct ide_port_info generic_chipsets[] __devinitdata = { ++static const struct ide_port_info generic_chipsets[] __devinitconst = { + /* 0: Unknown */ + DECLARE_GENERIC_PCI_DEV(0), + +diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c +index 560e66d..d5dd180 100644 +--- a/drivers/ide/it8172.c ++++ b/drivers/ide/it8172.c +@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = { + .set_dma_mode = it8172_set_dma_mode, + }; + +-static const struct ide_port_info it8172_port_info __devinitdata = { ++static const struct ide_port_info it8172_port_info __devinitconst = { + .name = DRV_NAME, + .port_ops = &it8172_port_ops, + .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} }, +diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c +index 46816ba..1847aeb 100644 +--- a/drivers/ide/it8213.c ++++ b/drivers/ide/it8213.c +@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = { + .cable_detect = it8213_cable_detect, + }; + +-static const struct ide_port_info it8213_chipset __devinitdata = { ++static const struct ide_port_info it8213_chipset __devinitconst = { + .name = DRV_NAME, + .enablebits = { {0x41, 0x80, 0x80} }, + .port_ops = &it8213_port_ops, +diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c +index 2e3169f..c5611db 100644 +--- a/drivers/ide/it821x.c ++++ b/drivers/ide/it821x.c +@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = { + .cable_detect = it821x_cable_detect, + }; + +-static const struct ide_port_info it821x_chipset __devinitdata = { ++static const struct ide_port_info it821x_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_it821x, + .init_hwif = init_hwif_it821x, +diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c +index 74c2c4a..efddd7d 100644 +--- a/drivers/ide/jmicron.c ++++ b/drivers/ide/jmicron.c +@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = { + .cable_detect = jmicron_cable_detect, + }; + +-static const struct ide_port_info jmicron_chipset __devinitdata = { ++static const struct ide_port_info jmicron_chipset __devinitconst = { + .name = DRV_NAME, + .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } }, + .port_ops = &jmicron_port_ops, +diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c +index 95327a2..73f78d8 100644 +--- a/drivers/ide/ns87415.c ++++ b/drivers/ide/ns87415.c +@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = { + .dma_sff_read_status = superio_dma_sff_read_status, + }; + +-static const struct ide_port_info ns87415_chipset __devinitdata = { ++static const struct ide_port_info ns87415_chipset __devinitconst = { + .name = DRV_NAME, + .init_hwif = init_hwif_ns87415, + .tp_ops = &ns87415_tp_ops, +diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c +index 1a53a4c..39edc66 100644 +--- a/drivers/ide/opti621.c ++++ b/drivers/ide/opti621.c +@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = { + .set_pio_mode = opti621_set_pio_mode, + }; + +-static const struct ide_port_info opti621_chipset __devinitdata = { ++static const struct ide_port_info opti621_chipset __devinitconst = { + .name = DRV_NAME, + .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} }, + .port_ops = &opti621_port_ops, +diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c +index 9546fe2..2e5ceb6 100644 +--- a/drivers/ide/pdc202xx_new.c ++++ b/drivers/ide/pdc202xx_new.c +@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = { + .udma_mask = udma, \ + } + +-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = { ++static const struct ide_port_info pdcnew_chipsets[] __devinitconst = { + /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5), + /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6), + }; +diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c +index 3a35ec6..5634510 100644 +--- a/drivers/ide/pdc202xx_old.c ++++ b/drivers/ide/pdc202xx_old.c +@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = { + .max_sectors = sectors, \ + } + +-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { ++static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = { + { /* 0: PDC20246 */ + .name = DRV_NAME, + .init_chipset = init_chipset_pdc202xx, +diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c +index 1892e81..fe0fd60 100644 +--- a/drivers/ide/piix.c ++++ b/drivers/ide/piix.c +@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = { + .udma_mask = udma, \ + } + +-static const struct ide_port_info piix_pci_info[] __devinitdata = { ++static const struct ide_port_info piix_pci_info[] __devinitconst = { + /* 0: MPIIX */ + { /* + * MPIIX actually has only a single IDE channel mapped to +diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c +index a6414a8..c04173e 100644 +--- a/drivers/ide/rz1000.c ++++ b/drivers/ide/rz1000.c +@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev) + } + } + +-static const struct ide_port_info rz1000_chipset __devinitdata = { ++static const struct ide_port_info rz1000_chipset __devinitconst = { + .name = DRV_NAME, + .host_flags = IDE_HFLAG_NO_DMA, + }; +diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c +index 356b9b5..d4758eb 100644 +--- a/drivers/ide/sc1200.c ++++ b/drivers/ide/sc1200.c +@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info sc1200_chipset __devinitdata = { ++static const struct ide_port_info sc1200_chipset __devinitconst = { + .name = DRV_NAME, + .port_ops = &sc1200_port_ops, + .dma_ops = &sc1200_dma_ops, +diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c +index b7f5b0c..9701038 100644 +--- a/drivers/ide/scc_pata.c ++++ b/drivers/ide/scc_pata.c +@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = { + .dma_sff_read_status = scc_dma_sff_read_status, + }; + +-static const struct ide_port_info scc_chipset __devinitdata = { ++static const struct ide_port_info scc_chipset __devinitconst = { + .name = "sccIDE", + .init_iops = init_iops_scc, + .init_dma = scc_init_dma, +diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c +index 35fb8da..24d72ef 100644 +--- a/drivers/ide/serverworks.c ++++ b/drivers/ide/serverworks.c +@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = { + .cable_detect = svwks_cable_detect, + }; + +-static const struct ide_port_info serverworks_chipsets[] __devinitdata = { ++static const struct ide_port_info serverworks_chipsets[] __devinitconst = { + { /* 0: OSB4 */ + .name = DRV_NAME, + .init_chipset = init_chipset_svwks, +diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c +index ddeda44..46f7e30 100644 +--- a/drivers/ide/siimage.c ++++ b/drivers/ide/siimage.c +@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = { + .udma_mask = ATA_UDMA6, \ + } + +-static const struct ide_port_info siimage_chipsets[] __devinitdata = { ++static const struct ide_port_info siimage_chipsets[] __devinitconst = { + /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops), + /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops) + }; +diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c +index 4a00225..09e61b4 100644 +--- a/drivers/ide/sis5513.c ++++ b/drivers/ide/sis5513.c +@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = { + .cable_detect = sis_cable_detect, + }; + +-static const struct ide_port_info sis5513_chipset __devinitdata = { ++static const struct ide_port_info sis5513_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_sis5513, + .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} }, +diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c +index f21dc2a..d051cd2 100644 +--- a/drivers/ide/sl82c105.c ++++ b/drivers/ide/sl82c105.c +@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info sl82c105_chipset __devinitdata = { ++static const struct ide_port_info sl82c105_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_sl82c105, + .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}}, +diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c +index 864ffe0..863a5e9 100644 +--- a/drivers/ide/slc90e66.c ++++ b/drivers/ide/slc90e66.c +@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = { + .cable_detect = slc90e66_cable_detect, + }; + +-static const struct ide_port_info slc90e66_chipset __devinitdata = { ++static const struct ide_port_info slc90e66_chipset __devinitconst = { + .name = DRV_NAME, + .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} }, + .port_ops = &slc90e66_port_ops, +diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c +index 4799d5c..1794678 100644 +--- a/drivers/ide/tc86c001.c ++++ b/drivers/ide/tc86c001.c +@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = { + .dma_sff_read_status = ide_dma_sff_read_status, + }; + +-static const struct ide_port_info tc86c001_chipset __devinitdata = { ++static const struct ide_port_info tc86c001_chipset __devinitconst = { + .name = DRV_NAME, + .init_hwif = init_hwif_tc86c001, + .port_ops = &tc86c001_port_ops, +diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c +index 281c914..55ce1b8 100644 +--- a/drivers/ide/triflex.c ++++ b/drivers/ide/triflex.c +@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = { + .set_dma_mode = triflex_set_mode, + }; + +-static const struct ide_port_info triflex_device __devinitdata = { ++static const struct ide_port_info triflex_device __devinitconst = { + .name = DRV_NAME, + .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}}, + .port_ops = &triflex_port_ops, +diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c +index 4b42ca0..e494a98 100644 +--- a/drivers/ide/trm290.c ++++ b/drivers/ide/trm290.c +@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = { + .dma_check = trm290_dma_check, + }; + +-static const struct ide_port_info trm290_chipset __devinitdata = { ++static const struct ide_port_info trm290_chipset __devinitconst = { + .name = DRV_NAME, + .init_hwif = init_hwif_trm290, + .tp_ops = &trm290_tp_ops, +diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c +index f46f49c..eb77678 100644 +--- a/drivers/ide/via82cxxx.c ++++ b/drivers/ide/via82cxxx.c +@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = { + .cable_detect = via82cxxx_cable_detect, + }; + +-static const struct ide_port_info via82cxxx_chipset __devinitdata = { ++static const struct ide_port_info via82cxxx_chipset __devinitconst = { + .name = DRV_NAME, + .init_chipset = init_chipset_via82cxxx, + .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, +diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c +index eb0e2cc..14241c7 100644 +--- a/drivers/ieee802154/fakehard.c ++++ b/drivers/ieee802154/fakehard.c +@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev) + phy->transmit_power = 0xbf; + + dev->netdev_ops = &fake_ops; +- dev->ml_priv = &fake_mlme; ++ dev->ml_priv = (void *)&fake_mlme; + + priv = netdev_priv(dev); + priv->phy = phy; +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index 8b72f39..55df4c8 100644 +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS] + + struct cm_counter_group { + struct kobject obj; +- atomic_long_t counter[CM_ATTR_COUNT]; ++ atomic_long_unchecked_t counter[CM_ATTR_COUNT]; + }; + + struct cm_counter_attribute { +@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work, + struct ib_mad_send_buf *msg = NULL; + int ret; + +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_REQ_COUNTER]); + + /* Quick state check to discard duplicate REQs. */ +@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work) + if (!cm_id_priv) + return; + +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_REP_COUNTER]); + ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); + if (ret) +@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work) + if (cm_id_priv->id.state != IB_CM_REP_SENT && + cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { + spin_unlock_irq(&cm_id_priv->lock); +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_RTU_COUNTER]); + goto out; + } +@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work) + cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, + dreq_msg->local_comm_id); + if (!cm_id_priv) { +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + cm_issue_drep(work->port, work->mad_recv_wc); + return -EINVAL; +@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work) + case IB_CM_MRA_REP_RCVD: + break; + case IB_CM_TIMEWAIT: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) + goto unlock; +@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work) + cm_free_msg(msg); + goto deref; + case IB_CM_DREQ_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + goto unlock; + default: +@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work) + ib_modify_mad(cm_id_priv->av.port->mad_agent, + cm_id_priv->msg, timeout)) { + if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) +- atomic_long_inc(&work->port-> ++ atomic_long_inc_unchecked(&work->port-> + counter_group[CM_RECV_DUPLICATES]. + counter[CM_MRA_COUNTER]); + goto out; +@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work) + break; + case IB_CM_MRA_REQ_RCVD: + case IB_CM_MRA_REP_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_MRA_COUNTER]); + /* fall through */ + default: +@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work) + case IB_CM_LAP_IDLE: + break; + case IB_CM_MRA_LAP_SENT: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_LAP_COUNTER]); + if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) + goto unlock; +@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work) + cm_free_msg(msg); + goto deref; + case IB_CM_LAP_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_LAP_COUNTER]); + goto unlock; + default: +@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work) + cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); + if (cur_cm_id_priv) { + spin_unlock_irq(&cm.lock); +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_SIDR_REQ_COUNTER]); + goto out; /* Duplicate message. */ + } +@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, + if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) + msg->retries = 1; + +- atomic_long_add(1 + msg->retries, ++ atomic_long_add_unchecked(1 + msg->retries, + &port->counter_group[CM_XMIT].counter[attr_index]); + if (msg->retries) +- atomic_long_add(msg->retries, ++ atomic_long_add_unchecked(msg->retries, + &port->counter_group[CM_XMIT_RETRIES]. + counter[attr_index]); + +@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, + } + + attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); +- atomic_long_inc(&port->counter_group[CM_RECV]. ++ atomic_long_inc_unchecked(&port->counter_group[CM_RECV]. + counter[attr_id - CM_ATTR_ID_OFFSET]); + + work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, +@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, + cm_attr = container_of(attr, struct cm_counter_attribute, attr); + + return sprintf(buf, "%ld\n", +- atomic_long_read(&group->counter[cm_attr->index])); ++ atomic_long_read_unchecked(&group->counter[cm_attr->index])); + } + + static const struct sysfs_ops cm_counter_ops = { +diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c +index 176c8f9..2627b62 100644 +--- a/drivers/infiniband/core/fmr_pool.c ++++ b/drivers/infiniband/core/fmr_pool.c +@@ -98,8 +98,8 @@ struct ib_fmr_pool { + + struct task_struct *thread; + +- atomic_t req_ser; +- atomic_t flush_ser; ++ atomic_unchecked_t req_ser; ++ atomic_unchecked_t flush_ser; + + wait_queue_head_t force_wait; + }; +@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) + struct ib_fmr_pool *pool = pool_ptr; + + do { +- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { ++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) { + ib_fmr_batch_release(pool); + +- atomic_inc(&pool->flush_ser); ++ atomic_inc_unchecked(&pool->flush_ser); + wake_up_interruptible(&pool->force_wait); + + if (pool->flush_function) +@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) + } + + set_current_state(TASK_INTERRUPTIBLE); +- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && ++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 && + !kthread_should_stop()) + schedule(); + __set_current_state(TASK_RUNNING); +@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, + pool->dirty_watermark = params->dirty_watermark; + pool->dirty_len = 0; + spin_lock_init(&pool->pool_lock); +- atomic_set(&pool->req_ser, 0); +- atomic_set(&pool->flush_ser, 0); ++ atomic_set_unchecked(&pool->req_ser, 0); ++ atomic_set_unchecked(&pool->flush_ser, 0); + init_waitqueue_head(&pool->force_wait); + + pool->thread = kthread_run(ib_fmr_cleanup_thread, +@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool) + } + spin_unlock_irq(&pool->pool_lock); + +- serial = atomic_inc_return(&pool->req_ser); ++ serial = atomic_inc_return_unchecked(&pool->req_ser); + wake_up_process(pool->thread); + + if (wait_event_interruptible(pool->force_wait, +- atomic_read(&pool->flush_ser) - serial >= 0)) ++ atomic_read_unchecked(&pool->flush_ser) - serial >= 0)) + return -EINTR; + + return 0; +@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) + } else { + list_add_tail(&fmr->list, &pool->dirty_list); + if (++pool->dirty_len >= pool->dirty_watermark) { +- atomic_inc(&pool->req_ser); ++ atomic_inc_unchecked(&pool->req_ser); + wake_up_process(pool->thread); + } + } +diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c +index 40c8353..946b0e4 100644 +--- a/drivers/infiniband/hw/cxgb4/mem.c ++++ b/drivers/infiniband/hw/cxgb4/mem.c +@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, + int err; + struct fw_ri_tpte tpt; + u32 stag_idx; +- static atomic_t key; ++ static atomic_unchecked_t key; + + if (c4iw_fatal_error(rdev)) + return -EIO; +@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, + &rdev->resource.tpt_fifo_lock); + if (!stag_idx) + return -ENOMEM; +- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); ++ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff); + } + PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", + __func__, stag_state, type, pdid, stag_idx); +diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c +index 31ae1b1..641d285 100644 +--- a/drivers/infiniband/hw/ipath/ipath_fs.c ++++ b/drivers/infiniband/hw/ipath/ipath_fs.c +@@ -126,6 +126,8 @@ static const struct file_operations atomic_counters_ops = { + }; + + static ssize_t flash_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) __size_overflow(3); ++static ssize_t flash_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) + { + struct ipath_devdata *dd; +diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c +index 79b3dbc..96e5fcc 100644 +--- a/drivers/infiniband/hw/ipath/ipath_rc.c ++++ b/drivers/infiniband/hw/ipath/ipath_rc.c +@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, + struct ib_atomic_eth *ateth; + struct ipath_ack_entry *e; + u64 vaddr; +- atomic64_t *maddr; ++ atomic64_unchecked_t *maddr; + u64 sdata; + u32 rkey; + u8 next; +@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, + IB_ACCESS_REMOTE_ATOMIC))) + goto nack_acc_unlck; + /* Perform atomic OP and save result. */ +- maddr = (atomic64_t *) qp->r_sge.sge.vaddr; ++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr; + sdata = be64_to_cpu(ateth->swap_data); + e = &qp->s_ack_queue[qp->r_head_ack_queue]; + e->atomic_data = (opcode == OP(FETCH_ADD)) ? +- (u64) atomic64_add_return(sdata, maddr) - sdata : ++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata : + (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, + be64_to_cpu(ateth->compare_data), + sdata); +diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c +index 1f95bba..9530f87 100644 +--- a/drivers/infiniband/hw/ipath/ipath_ruc.c ++++ b/drivers/infiniband/hw/ipath/ipath_ruc.c +@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp) + unsigned long flags; + struct ib_wc wc; + u64 sdata; +- atomic64_t *maddr; ++ atomic64_unchecked_t *maddr; + enum ib_wc_status send_status; + + /* +@@ -382,11 +382,11 @@ again: + IB_ACCESS_REMOTE_ATOMIC))) + goto acc_err; + /* Perform atomic OP and save result. */ +- maddr = (atomic64_t *) qp->r_sge.sge.vaddr; ++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr; + sdata = wqe->wr.wr.atomic.compare_add; + *(u64 *) sqp->s_sge.sge.vaddr = + (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? +- (u64) atomic64_add_return(sdata, maddr) - sdata : ++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata : + (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, + sdata, wqe->wr.wr.atomic.swap); + goto send_comp; +diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c +index 5965b3d..16817fb 100644 +--- a/drivers/infiniband/hw/nes/nes.c ++++ b/drivers/infiniband/hw/nes/nes.c +@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes"); + LIST_HEAD(nes_adapter_list); + static LIST_HEAD(nes_dev_list); + +-atomic_t qps_destroyed; ++atomic_unchecked_t qps_destroyed; + + static unsigned int ee_flsh_adapter; + static unsigned int sysfs_nonidx_addr; +@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r + struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; + struct nes_adapter *nesadapter = nesdev->nesadapter; + +- atomic_inc(&qps_destroyed); ++ atomic_inc_unchecked(&qps_destroyed); + + /* Free the control structures */ + +diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h +index 568b4f1..5ea3eff 100644 +--- a/drivers/infiniband/hw/nes/nes.h ++++ b/drivers/infiniband/hw/nes/nes.h +@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level; + extern unsigned int wqm_quanta; + extern struct list_head nes_adapter_list; + +-extern atomic_t cm_connects; +-extern atomic_t cm_accepts; +-extern atomic_t cm_disconnects; +-extern atomic_t cm_closes; +-extern atomic_t cm_connecteds; +-extern atomic_t cm_connect_reqs; +-extern atomic_t cm_rejects; +-extern atomic_t mod_qp_timouts; +-extern atomic_t qps_created; +-extern atomic_t qps_destroyed; +-extern atomic_t sw_qps_destroyed; ++extern atomic_unchecked_t cm_connects; ++extern atomic_unchecked_t cm_accepts; ++extern atomic_unchecked_t cm_disconnects; ++extern atomic_unchecked_t cm_closes; ++extern atomic_unchecked_t cm_connecteds; ++extern atomic_unchecked_t cm_connect_reqs; ++extern atomic_unchecked_t cm_rejects; ++extern atomic_unchecked_t mod_qp_timouts; ++extern atomic_unchecked_t qps_created; ++extern atomic_unchecked_t qps_destroyed; ++extern atomic_unchecked_t sw_qps_destroyed; + extern u32 mh_detected; + extern u32 mh_pauses_sent; + extern u32 cm_packets_sent; +@@ -197,16 +197,16 @@ extern u32 cm_packets_created; + extern u32 cm_packets_received; + extern u32 cm_packets_dropped; + extern u32 cm_packets_retrans; +-extern atomic_t cm_listens_created; +-extern atomic_t cm_listens_destroyed; ++extern atomic_unchecked_t cm_listens_created; ++extern atomic_unchecked_t cm_listens_destroyed; + extern u32 cm_backlog_drops; +-extern atomic_t cm_loopbacks; +-extern atomic_t cm_nodes_created; +-extern atomic_t cm_nodes_destroyed; +-extern atomic_t cm_accel_dropped_pkts; +-extern atomic_t cm_resets_recvd; +-extern atomic_t pau_qps_created; +-extern atomic_t pau_qps_destroyed; ++extern atomic_unchecked_t cm_loopbacks; ++extern atomic_unchecked_t cm_nodes_created; ++extern atomic_unchecked_t cm_nodes_destroyed; ++extern atomic_unchecked_t cm_accel_dropped_pkts; ++extern atomic_unchecked_t cm_resets_recvd; ++extern atomic_unchecked_t pau_qps_created; ++extern atomic_unchecked_t pau_qps_destroyed; + + extern u32 int_mod_timer_init; + extern u32 int_mod_cq_depth_256; +diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c +index 0a52d72..0642f36 100644 +--- a/drivers/infiniband/hw/nes/nes_cm.c ++++ b/drivers/infiniband/hw/nes/nes_cm.c +@@ -68,14 +68,14 @@ u32 cm_packets_dropped; + u32 cm_packets_retrans; + u32 cm_packets_created; + u32 cm_packets_received; +-atomic_t cm_listens_created; +-atomic_t cm_listens_destroyed; ++atomic_unchecked_t cm_listens_created; ++atomic_unchecked_t cm_listens_destroyed; + u32 cm_backlog_drops; +-atomic_t cm_loopbacks; +-atomic_t cm_nodes_created; +-atomic_t cm_nodes_destroyed; +-atomic_t cm_accel_dropped_pkts; +-atomic_t cm_resets_recvd; ++atomic_unchecked_t cm_loopbacks; ++atomic_unchecked_t cm_nodes_created; ++atomic_unchecked_t cm_nodes_destroyed; ++atomic_unchecked_t cm_accel_dropped_pkts; ++atomic_unchecked_t cm_resets_recvd; + + static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *); + static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *); +@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = { + + static struct nes_cm_core *g_cm_core; + +-atomic_t cm_connects; +-atomic_t cm_accepts; +-atomic_t cm_disconnects; +-atomic_t cm_closes; +-atomic_t cm_connecteds; +-atomic_t cm_connect_reqs; +-atomic_t cm_rejects; ++atomic_unchecked_t cm_connects; ++atomic_unchecked_t cm_accepts; ++atomic_unchecked_t cm_disconnects; ++atomic_unchecked_t cm_closes; ++atomic_unchecked_t cm_connecteds; ++atomic_unchecked_t cm_connect_reqs; ++atomic_unchecked_t cm_rejects; + + int nes_add_ref_cm_node(struct nes_cm_node *cm_node) + { +@@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, + kfree(listener); + listener = NULL; + ret = 0; +- atomic_inc(&cm_listens_destroyed); ++ atomic_inc_unchecked(&cm_listens_destroyed); + } else { + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + } +@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, + cm_node->rem_mac); + + add_hte_node(cm_core, cm_node); +- atomic_inc(&cm_nodes_created); ++ atomic_inc_unchecked(&cm_nodes_created); + + return cm_node; + } +@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, + } + + atomic_dec(&cm_core->node_cnt); +- atomic_inc(&cm_nodes_destroyed); ++ atomic_inc_unchecked(&cm_nodes_destroyed); + nesqp = cm_node->nesqp; + if (nesqp) { + nesqp->cm_node = NULL; +@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, + + static void drop_packet(struct sk_buff *skb) + { +- atomic_inc(&cm_accel_dropped_pkts); ++ atomic_inc_unchecked(&cm_accel_dropped_pkts); + dev_kfree_skb_any(skb); + } + +@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, + { + + int reset = 0; /* whether to send reset in case of err.. */ +- atomic_inc(&cm_resets_recvd); ++ atomic_inc_unchecked(&cm_resets_recvd); + nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." + " refcnt=%d\n", cm_node, cm_node->state, + atomic_read(&cm_node->ref_count)); +@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, + rem_ref_cm_node(cm_node->cm_core, cm_node); + return NULL; + } +- atomic_inc(&cm_loopbacks); ++ atomic_inc_unchecked(&cm_loopbacks); + loopbackremotenode->loopbackpartner = cm_node; + loopbackremotenode->tcp_cntxt.rcv_wscale = + NES_CM_DEFAULT_RCV_WND_SCALE; +@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, + nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp); + else { + rem_ref_cm_node(cm_core, cm_node); +- atomic_inc(&cm_accel_dropped_pkts); ++ atomic_inc_unchecked(&cm_accel_dropped_pkts); + dev_kfree_skb_any(skb); + } + break; +@@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) + + if ((cm_id) && (cm_id->event_handler)) { + if (issue_disconn) { +- atomic_inc(&cm_disconnects); ++ atomic_inc_unchecked(&cm_disconnects); + cm_event.event = IW_CM_EVENT_DISCONNECT; + cm_event.status = disconn_status; + cm_event.local_addr = cm_id->local_addr; +@@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) + } + + if (issue_close) { +- atomic_inc(&cm_closes); ++ atomic_inc_unchecked(&cm_closes); + nes_disconnect(nesqp, 1); + + cm_id->provider_data = nesqp; +@@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + + nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", + nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); +- atomic_inc(&cm_accepts); ++ atomic_inc_unchecked(&cm_accepts); + + nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", + netdev_refcnt_read(nesvnic->netdev)); +@@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) + struct nes_cm_core *cm_core; + u8 *start_buff; + +- atomic_inc(&cm_rejects); ++ atomic_inc_unchecked(&cm_rejects); + cm_node = (struct nes_cm_node *)cm_id->provider_data; + loopback = cm_node->loopbackpartner; + cm_core = cm_node->cm_core; +@@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + ntohl(cm_id->local_addr.sin_addr.s_addr), + ntohs(cm_id->local_addr.sin_port)); + +- atomic_inc(&cm_connects); ++ atomic_inc_unchecked(&cm_connects); + nesqp->active_conn = 1; + + /* cache the cm_id in the qp */ +@@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) + g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); + return err; + } +- atomic_inc(&cm_listens_created); ++ atomic_inc_unchecked(&cm_listens_created); + } + + cm_id->add_ref(cm_id); +@@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event) + + if (nesqp->destroyed) + return; +- atomic_inc(&cm_connecteds); ++ atomic_inc_unchecked(&cm_connecteds); + nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" + " local port 0x%04X. jiffies = %lu.\n", + nesqp->hwqp.qp_id, +@@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event) + + cm_id->add_ref(cm_id); + ret = cm_id->event_handler(cm_id, &cm_event); +- atomic_inc(&cm_closes); ++ atomic_inc_unchecked(&cm_closes); + cm_event.event = IW_CM_EVENT_CLOSE; + cm_event.status = 0; + cm_event.provider_data = cm_id->provider_data; +@@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) + return; + cm_id = cm_node->cm_id; + +- atomic_inc(&cm_connect_reqs); ++ atomic_inc_unchecked(&cm_connect_reqs); + nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", + cm_node, cm_id, jiffies); + +@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event) + return; + cm_id = cm_node->cm_id; + +- atomic_inc(&cm_connect_reqs); ++ atomic_inc_unchecked(&cm_connect_reqs); + nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", + cm_node, cm_id, jiffies); + +diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c +index b3b2a24..7bfaf1e 100644 +--- a/drivers/infiniband/hw/nes/nes_mgt.c ++++ b/drivers/infiniband/hw/nes/nes_mgt.c +@@ -40,8 +40,8 @@ + #include "nes.h" + #include "nes_mgt.h" + +-atomic_t pau_qps_created; +-atomic_t pau_qps_destroyed; ++atomic_unchecked_t pau_qps_created; ++atomic_unchecked_t pau_qps_destroyed; + + static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic) + { +@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp) + { + struct sk_buff *skb; + unsigned long flags; +- atomic_inc(&pau_qps_destroyed); ++ atomic_inc_unchecked(&pau_qps_destroyed); + + /* Free packets that have not yet been forwarded */ + /* Lock is acquired by skb_dequeue when removing the skb */ +@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq * + cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]); + skb_queue_head_init(&nesqp->pau_list); + spin_lock_init(&nesqp->pau_lock); +- atomic_inc(&pau_qps_created); ++ atomic_inc_unchecked(&pau_qps_created); + nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp); + } + +diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c +index c00d2f3..8834298 100644 +--- a/drivers/infiniband/hw/nes/nes_nic.c ++++ b/drivers/infiniband/hw/nes/nes_nic.c +@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, + target_stat_values[++index] = mh_detected; + target_stat_values[++index] = mh_pauses_sent; + target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; +- target_stat_values[++index] = atomic_read(&cm_connects); +- target_stat_values[++index] = atomic_read(&cm_accepts); +- target_stat_values[++index] = atomic_read(&cm_disconnects); +- target_stat_values[++index] = atomic_read(&cm_connecteds); +- target_stat_values[++index] = atomic_read(&cm_connect_reqs); +- target_stat_values[++index] = atomic_read(&cm_rejects); +- target_stat_values[++index] = atomic_read(&mod_qp_timouts); +- target_stat_values[++index] = atomic_read(&qps_created); +- target_stat_values[++index] = atomic_read(&sw_qps_destroyed); +- target_stat_values[++index] = atomic_read(&qps_destroyed); +- target_stat_values[++index] = atomic_read(&cm_closes); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connects); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects); ++ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts); ++ target_stat_values[++index] = atomic_read_unchecked(&qps_created); ++ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_closes); + target_stat_values[++index] = cm_packets_sent; + target_stat_values[++index] = cm_packets_bounced; + target_stat_values[++index] = cm_packets_created; + target_stat_values[++index] = cm_packets_received; + target_stat_values[++index] = cm_packets_dropped; + target_stat_values[++index] = cm_packets_retrans; +- target_stat_values[++index] = atomic_read(&cm_listens_created); +- target_stat_values[++index] = atomic_read(&cm_listens_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed); + target_stat_values[++index] = cm_backlog_drops; +- target_stat_values[++index] = atomic_read(&cm_loopbacks); +- target_stat_values[++index] = atomic_read(&cm_nodes_created); +- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); +- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); +- target_stat_values[++index] = atomic_read(&cm_resets_recvd); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd); + target_stat_values[++index] = nesadapter->free_4kpbl; + target_stat_values[++index] = nesadapter->free_256pbl; + target_stat_values[++index] = int_mod_timer_init; + target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; + target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; + target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; +- target_stat_values[++index] = atomic_read(&pau_qps_created); +- target_stat_values[++index] = atomic_read(&pau_qps_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created); ++ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed); + } + + /** +diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c +index 5095bc4..41e8fff 100644 +--- a/drivers/infiniband/hw/nes/nes_verbs.c ++++ b/drivers/infiniband/hw/nes/nes_verbs.c +@@ -46,9 +46,9 @@ + + #include <rdma/ib_umem.h> + +-atomic_t mod_qp_timouts; +-atomic_t qps_created; +-atomic_t sw_qps_destroyed; ++atomic_unchecked_t mod_qp_timouts; ++atomic_unchecked_t qps_created; ++atomic_unchecked_t sw_qps_destroyed; + + static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); + +@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, + if (init_attr->create_flags) + return ERR_PTR(-EINVAL); + +- atomic_inc(&qps_created); ++ atomic_inc_unchecked(&qps_created); + switch (init_attr->qp_type) { + case IB_QPT_RC: + if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { +@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) + struct iw_cm_event cm_event; + int ret = 0; + +- atomic_inc(&sw_qps_destroyed); ++ atomic_inc_unchecked(&sw_qps_destroyed); + nesqp->destroyed = 1; + + /* Blow away the connection if it exists. */ +diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h +index b881bdc..c2e360c 100644 +--- a/drivers/infiniband/hw/qib/qib.h ++++ b/drivers/infiniband/hw/qib/qib.h +@@ -51,6 +51,7 @@ + #include <linux/completion.h> + #include <linux/kref.h> + #include <linux/sched.h> ++#include <linux/slab.h> + + #include "qib_common.h" + #include "qib_verbs.h" +diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c +index df7fa25..0c854f0 100644 +--- a/drivers/infiniband/hw/qib/qib_fs.c ++++ b/drivers/infiniband/hw/qib/qib_fs.c +@@ -267,6 +267,8 @@ static const struct file_operations qsfp_ops[] = { + }; + + static ssize_t flash_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) __size_overflow(3); ++static ssize_t flash_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) + { + struct qib_devdata *dd; +diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c +index c351aa4..e6967c2 100644 +--- a/drivers/input/gameport/gameport.c ++++ b/drivers/input/gameport/gameport.c +@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys); + */ + static void gameport_init_port(struct gameport *gameport) + { +- static atomic_t gameport_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0); + + __module_get(THIS_MODULE); + + mutex_init(&gameport->drv_mutex); + device_initialize(&gameport->dev); + dev_set_name(&gameport->dev, "gameport%lu", +- (unsigned long)atomic_inc_return(&gameport_no) - 1); ++ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1); + gameport->dev.bus = &gameport_bus; + gameport->dev.release = gameport_release_port; + if (gameport->parent) +diff --git a/drivers/input/input.c b/drivers/input/input.c +index da38d97..2aa0b79 100644 +--- a/drivers/input/input.c ++++ b/drivers/input/input.c +@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev) + */ + int input_register_device(struct input_dev *dev) + { +- static atomic_t input_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t input_no = ATOMIC_INIT(0); + struct input_handler *handler; + const char *path; + int error; +@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev) + dev->setkeycode = input_default_setkeycode; + + dev_set_name(&dev->dev, "input%ld", +- (unsigned long) atomic_inc_return(&input_no) - 1); ++ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1); + + error = device_add(&dev->dev); + if (error) +diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c +index b8d8611..7a4a04b 100644 +--- a/drivers/input/joystick/sidewinder.c ++++ b/drivers/input/joystick/sidewinder.c +@@ -30,6 +30,7 @@ + #include <linux/kernel.h> + #include <linux/module.h> + #include <linux/slab.h> ++#include <linux/sched.h> + #include <linux/init.h> + #include <linux/input.h> + #include <linux/gameport.h> +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index d728875..844c89b 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev, + + static int xpad_led_probe(struct usb_xpad *xpad) + { +- static atomic_t led_seq = ATOMIC_INIT(0); ++ static atomic_unchecked_t led_seq = ATOMIC_INIT(0); + long led_no; + struct xpad_led *led; + struct led_classdev *led_cdev; +@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad) + if (!led) + return -ENOMEM; + +- led_no = (long)atomic_inc_return(&led_seq) - 1; ++ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1; + + snprintf(led->name, sizeof(led->name), "xpad%ld", led_no); + led->xpad = xpad; +diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c +index 0110b5a..d3ad144 100644 +--- a/drivers/input/mousedev.c ++++ b/drivers/input/mousedev.c +@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, + + spin_unlock_irq(&client->packet_lock); + +- if (copy_to_user(buffer, data, count)) ++ if (count > sizeof(data) || copy_to_user(buffer, data, count)) + return -EFAULT; + + return count; +diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c +index ba70058..571d25d 100644 +--- a/drivers/input/serio/serio.c ++++ b/drivers/input/serio/serio.c +@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev) + */ + static void serio_init_port(struct serio *serio) + { +- static atomic_t serio_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t serio_no = ATOMIC_INIT(0); + + __module_get(THIS_MODULE); + +@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio) + mutex_init(&serio->drv_mutex); + device_initialize(&serio->dev); + dev_set_name(&serio->dev, "serio%ld", +- (long)atomic_inc_return(&serio_no) - 1); ++ (long)atomic_inc_return_unchecked(&serio_no) - 1); + serio->dev.bus = &serio_bus; + serio->dev.release = serio_release_port; + serio->dev.groups = serio_device_attr_groups; +diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c +index e44933d..9ba484a 100644 +--- a/drivers/isdn/capi/capi.c ++++ b/drivers/isdn/capi/capi.c +@@ -83,8 +83,8 @@ struct capiminor { + + struct capi20_appl *ap; + u32 ncci; +- atomic_t datahandle; +- atomic_t msgid; ++ atomic_unchecked_t datahandle; ++ atomic_unchecked_t msgid; + + struct tty_port port; + int ttyinstop; +@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb) + capimsg_setu16(s, 2, mp->ap->applid); + capimsg_setu8 (s, 4, CAPI_DATA_B3); + capimsg_setu8 (s, 5, CAPI_RESP); +- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid)); ++ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid)); + capimsg_setu32(s, 8, mp->ncci); + capimsg_setu16(s, 12, datahandle); + } +@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp) + mp->outbytes -= len; + spin_unlock_bh(&mp->outlock); + +- datahandle = atomic_inc_return(&mp->datahandle); ++ datahandle = atomic_inc_return_unchecked(&mp->datahandle); + skb_push(skb, CAPI_DATA_B3_REQ_LEN); + memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN); + capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN); + capimsg_setu16(skb->data, 2, mp->ap->applid); + capimsg_setu8 (skb->data, 4, CAPI_DATA_B3); + capimsg_setu8 (skb->data, 5, CAPI_REQ); +- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid)); ++ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid)); + capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */ + capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */ + capimsg_setu16(skb->data, 16, len); /* Data length */ +diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c +index db621db..825ea1a 100644 +--- a/drivers/isdn/gigaset/common.c ++++ b/drivers/isdn/gigaset/common.c +@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, + cs->commands_pending = 0; + cs->cur_at_seq = 0; + cs->gotfwver = -1; +- cs->open_count = 0; ++ local_set(&cs->open_count, 0); + cs->dev = NULL; + cs->tty = NULL; + cs->tty_dev = NULL; +diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h +index 212efaf..f187c6b 100644 +--- a/drivers/isdn/gigaset/gigaset.h ++++ b/drivers/isdn/gigaset/gigaset.h +@@ -35,6 +35,7 @@ + #include <linux/tty_driver.h> + #include <linux/list.h> + #include <linux/atomic.h> ++#include <asm/local.h> + + #define GIG_VERSION {0, 5, 0, 0} + #define GIG_COMPAT {0, 4, 0, 0} +@@ -433,7 +434,7 @@ struct cardstate { + spinlock_t cmdlock; + unsigned curlen, cmdbytes; + +- unsigned open_count; ++ local_t open_count; + struct tty_struct *tty; + struct tasklet_struct if_wake_tasklet; + unsigned control_state; +diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c +index ee0a549..a7c9798 100644 +--- a/drivers/isdn/gigaset/interface.c ++++ b/drivers/isdn/gigaset/interface.c +@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp) + } + tty->driver_data = cs; + +- ++cs->open_count; +- +- if (cs->open_count == 1) { ++ if (local_inc_return(&cs->open_count) == 1) { + spin_lock_irqsave(&cs->lock, flags); + cs->tty = tty; + spin_unlock_irqrestore(&cs->lock, flags); +@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp) + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ +- else if (!cs->open_count) ++ else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else { +- if (!--cs->open_count) { ++ if (!local_dec_return(&cs->open_count)) { + spin_lock_irqsave(&cs->lock, flags); + cs->tty = NULL; + spin_unlock_irqrestore(&cs->lock, flags); +@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty, + if (!cs->connected) { + gig_dbg(DEBUG_IF, "not connected"); + retval = -ENODEV; +- } else if (!cs->open_count) ++ } else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else { + retval = 0; +@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count) + retval = -ENODEV; + goto done; + } +- if (!cs->open_count) { ++ if (!local_read(&cs->open_count)) { + dev_warn(cs->dev, "%s: device not opened\n", __func__); + retval = -ENODEV; + goto done; +@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty) + if (!cs->connected) { + gig_dbg(DEBUG_IF, "not connected"); + retval = -ENODEV; +- } else if (!cs->open_count) ++ } else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else if (cs->mstate != MS_LOCKED) { + dev_warn(cs->dev, "can't write to unlocked device\n"); +@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty) + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); +- else if (!cs->open_count) ++ else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else if (cs->mstate != MS_LOCKED) + dev_warn(cs->dev, "can't write to unlocked device\n"); +@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty) + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ +- else if (!cs->open_count) ++ else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else + gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__); +@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty) + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ +- else if (!cs->open_count) ++ else if (!local_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else + gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__); +@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old) + goto out; + } + +- if (!cs->open_count) { ++ if (!local_read(&cs->open_count)) { + dev_warn(cs->dev, "%s: device not opened\n", __func__); + goto out; + } +diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c +index 2a57da59..e7a12ed 100644 +--- a/drivers/isdn/hardware/avm/b1.c ++++ b/drivers/isdn/hardware/avm/b1.c +@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file) + } + if (left) { + if (t4file->user) { +- if (copy_from_user(buf, dp, left)) ++ if (left > sizeof buf || copy_from_user(buf, dp, left)) + return -EFAULT; + } else { + memcpy(buf, dp, left); +@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config) + } + if (left) { + if (config->user) { +- if (copy_from_user(buf, dp, left)) ++ if (left > sizeof buf || copy_from_user(buf, dp, left)) + return -EFAULT; + } else { + memcpy(buf, dp, left); +diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h +index 85784a7..a19ca98 100644 +--- a/drivers/isdn/hardware/eicon/divasync.h ++++ b/drivers/isdn/hardware/eicon/divasync.h +@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter { + } diva_didd_add_adapter_t; + typedef struct _diva_didd_remove_adapter { + IDI_CALL p_request; +-} diva_didd_remove_adapter_t; ++} __no_const diva_didd_remove_adapter_t; + typedef struct _diva_didd_read_adapter_array { + void * buffer; + dword length; +diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h +index a3bd163..8956575 100644 +--- a/drivers/isdn/hardware/eicon/xdi_adapter.h ++++ b/drivers/isdn/hardware/eicon/xdi_adapter.h +@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t { + typedef struct _diva_os_idi_adapter_interface { + diva_init_card_proc_t cleanup_adapter_proc; + diva_cmd_card_proc_t cmd_proc; +-} diva_os_idi_adapter_interface_t; ++} __no_const diva_os_idi_adapter_interface_t; + + typedef struct _diva_os_xdi_adapter { + struct list_head link; +diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c +index 2339d73..802ab87a 100644 +--- a/drivers/isdn/i4l/isdn_net.c ++++ b/drivers/isdn/i4l/isdn_net.c +@@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev, + { + isdn_net_local *lp = netdev_priv(dev); + unsigned char *p; +- ushort len = 0; ++ int len = 0; + + switch (lp->p_encap) { + case ISDN_NET_ENCAP_ETHER: +diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c +index 1f355bb..43f1fea 100644 +--- a/drivers/isdn/icn/icn.c ++++ b/drivers/isdn/icn/icn.c +@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card) + if (count > len) + count = len; + if (user) { +- if (copy_from_user(msg, buf, count)) ++ if (count > sizeof msg || copy_from_user(msg, buf, count)) + return -EFAULT; + } else + memcpy(msg, buf, count); +diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c +index b5fdcb7..5b6c59f 100644 +--- a/drivers/lguest/core.c ++++ b/drivers/lguest/core.c +@@ -92,9 +92,17 @@ static __init int map_switcher(void) + * it's worked so far. The end address needs +1 because __get_vm_area + * allocates an extra guard page, so we need space for that. + */ ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, ++ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR ++ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); ++#else + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, + VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); ++#endif ++ + if (!switcher_vma) { + err = -ENOMEM; + printk("lguest: could not map switcher pages high\n"); +@@ -119,7 +127,7 @@ static __init int map_switcher(void) + * Now the Switcher is mapped at the right address, we can't fail! + * Copy in the compiled-in Switcher code (from x86/switcher_32.S). + */ +- memcpy(switcher_vma->addr, start_switcher_text, ++ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text), + end_switcher_text - start_switcher_text); + + printk(KERN_INFO "lguest: mapped switcher at %p\n", +diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c +index ff4a0bc..f5fdd9c 100644 +--- a/drivers/lguest/lguest_user.c ++++ b/drivers/lguest/lguest_user.c +@@ -198,6 +198,7 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) + * Once our Guest is initialized, the Launcher makes it run by reading + * from /dev/lguest. + */ ++static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) __size_overflow(3); + static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) + { + struct lguest *lg = file->private_data; +diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c +index 65af42f..530c87a 100644 +--- a/drivers/lguest/x86/core.c ++++ b/drivers/lguest/x86/core.c +@@ -59,7 +59,7 @@ static struct { + /* Offset from where switcher.S was compiled to where we've copied it */ + static unsigned long switcher_offset(void) + { +- return SWITCHER_ADDR - (unsigned long)start_switcher_text; ++ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text); + } + + /* This cpu's struct lguest_pages. */ +@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) + * These copies are pretty cheap, so we do them unconditionally: */ + /* Save the current Host top-level page directory. + */ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pages->state.host_cr3 = read_cr3(); ++#else + pages->state.host_cr3 = __pa(current->mm->pgd); ++#endif ++ + /* + * Set up the Guest's page tables to see this CPU's pages (and no + * other CPU's pages). +@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void) + * compiled-in switcher code and the high-mapped copy we just made. + */ + for (i = 0; i < IDT_ENTRIES; i++) +- default_idt_entries[i] += switcher_offset(); ++ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset(); + + /* + * Set up the Switcher's per-cpu areas. +@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void) + * it will be undisturbed when we switch. To change %cs and jump we + * need this structure to feed to Intel's "lcall" instruction. + */ +- lguest_entry.offset = (long)switch_to_guest + switcher_offset(); ++ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset(); + lguest_entry.segment = LGUEST_CS; + + /* +diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S +index 40634b0..4f5855e 100644 +--- a/drivers/lguest/x86/switcher_32.S ++++ b/drivers/lguest/x86/switcher_32.S +@@ -87,6 +87,7 @@ + #include <asm/page.h> + #include <asm/segment.h> + #include <asm/lguest.h> ++#include <asm/processor-flags.h> + + // We mark the start of the code to copy + // It's placed in .text tho it's never run here +@@ -149,6 +150,13 @@ ENTRY(switch_to_guest) + // Changes type when we load it: damn Intel! + // For after we switch over our page tables + // That entry will be read-only: we'd crash. ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mov %cr0, %edx ++ xor $X86_CR0_WP, %edx ++ mov %edx, %cr0 ++#endif ++ + movl $(GDT_ENTRY_TSS*8), %edx + ltr %dx + +@@ -157,9 +165,15 @@ ENTRY(switch_to_guest) + // Let's clear it again for our return. + // The GDT descriptor of the Host + // Points to the table after two "size" bytes +- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx ++ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax + // Clear "used" from type field (byte 5, bit 2) +- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx) ++ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax) ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mov %cr0, %eax ++ xor $X86_CR0_WP, %eax ++ mov %eax, %cr0 ++#endif + + // Once our page table's switched, the Guest is live! + // The Host fades as we run this final step. +@@ -295,13 +309,12 @@ deliver_to_host: + // I consulted gcc, and it gave + // These instructions, which I gladly credit: + leal (%edx,%ebx,8), %eax +- movzwl (%eax),%edx +- movl 4(%eax), %eax +- xorw %ax, %ax +- orl %eax, %edx ++ movl 4(%eax), %edx ++ movw (%eax), %dx + // Now the address of the handler's in %edx + // We call it now: its "iret" drops us home. +- jmp *%edx ++ ljmp $__KERNEL_CS, $1f ++1: jmp *%edx + + // Every interrupt can come to us here + // But we must truly tell each apart. +diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c +index 4daf9e5..b8d1d0f 100644 +--- a/drivers/macintosh/macio_asic.c ++++ b/drivers/macintosh/macio_asic.c +@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev) + * MacIO is matched against any Apple ID, it's probe() function + * will then decide wether it applies or not + */ +-static const struct pci_device_id __devinitdata pci_ids [] = { { ++static const struct pci_device_id __devinitconst pci_ids [] = { { + .vendor = PCI_VENDOR_ID_APPLE, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index 1ce84ed..0fdd40a 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param) + cmd == DM_LIST_VERSIONS_CMD) + return 0; + +- if ((cmd == DM_DEV_CREATE_CMD)) { ++ if (cmd == DM_DEV_CREATE_CMD) { + if (!*param->name) { + DMWARN("name not supplied when creating device"); + return -EINVAL; +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c +index 9bfd057..01180bc 100644 +--- a/drivers/md/dm-raid1.c ++++ b/drivers/md/dm-raid1.c +@@ -40,7 +40,7 @@ enum dm_raid1_error { + + struct mirror { + struct mirror_set *ms; +- atomic_t error_count; ++ atomic_unchecked_t error_count; + unsigned long error_type; + struct dm_dev *dev; + sector_t offset; +@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms) + struct mirror *m; + + for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) +- if (!atomic_read(&m->error_count)) ++ if (!atomic_read_unchecked(&m->error_count)) + return m; + + return NULL; +@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) + * simple way to tell if a device has encountered + * errors. + */ +- atomic_inc(&m->error_count); ++ atomic_inc_unchecked(&m->error_count); + + if (test_and_set_bit(error_type, &m->error_type)) + return; +@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) + struct mirror *m = get_default_mirror(ms); + + do { +- if (likely(!atomic_read(&m->error_count))) ++ if (likely(!atomic_read_unchecked(&m->error_count))) + return m; + + if (m-- == ms->mirror) +@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m) + { + struct mirror *default_mirror = get_default_mirror(m->ms); + +- return !atomic_read(&default_mirror->error_count); ++ return !atomic_read_unchecked(&default_mirror->error_count); + } + + static int mirror_available(struct mirror_set *ms, struct bio *bio) +@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) + */ + if (likely(region_in_sync(ms, region, 1))) + m = choose_mirror(ms, bio->bi_sector); +- else if (m && atomic_read(&m->error_count)) ++ else if (m && atomic_read_unchecked(&m->error_count)) + m = NULL; + + if (likely(m)) +@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, + } + + ms->mirror[mirror].ms = ms; +- atomic_set(&(ms->mirror[mirror].error_count), 0); ++ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0); + ms->mirror[mirror].error_type = 0; + ms->mirror[mirror].offset = offset; + +@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti) + */ + static char device_status_char(struct mirror *m) + { +- if (!atomic_read(&(m->error_count))) ++ if (!atomic_read_unchecked(&(m->error_count))) + return 'A'; + + return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : +diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c +index 3d80cf0..b77cc47 100644 +--- a/drivers/md/dm-stripe.c ++++ b/drivers/md/dm-stripe.c +@@ -20,7 +20,7 @@ struct stripe { + struct dm_dev *dev; + sector_t physical_start; + +- atomic_t error_count; ++ atomic_unchecked_t error_count; + }; + + struct stripe_c { +@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) + kfree(sc); + return r; + } +- atomic_set(&(sc->stripe[i].error_count), 0); ++ atomic_set_unchecked(&(sc->stripe[i].error_count), 0); + } + + ti->private = sc; +@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti, + DMEMIT("%d ", sc->stripes); + for (i = 0; i < sc->stripes; i++) { + DMEMIT("%s ", sc->stripe[i].dev->name); +- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ? ++ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ? + 'D' : 'A'; + } + buffer[i] = '\0'; +@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, + */ + for (i = 0; i < sc->stripes; i++) + if (!strcmp(sc->stripe[i].dev->name, major_minor)) { +- atomic_inc(&(sc->stripe[i].error_count)); +- if (atomic_read(&(sc->stripe[i].error_count)) < ++ atomic_inc_unchecked(&(sc->stripe[i].error_count)); ++ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) < + DM_IO_ERROR_THRESHOLD) + schedule_work(&sc->trigger_event); + } +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 8e91321..fd17aef 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, + if (!dev_size) + return 0; + +- if ((start >= dev_size) || (start + len > dev_size)) { ++ if ((start >= dev_size) || (len > dev_size - start)) { + DMWARN("%s: %s too small for target: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index 237571a..fb6d19b 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd, + + pmd->info.tm = tm; + pmd->info.levels = 2; +- pmd->info.value_type.context = pmd->data_sm; ++ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm; + pmd->info.value_type.size = sizeof(__le64); + pmd->info.value_type.inc = data_block_inc; + pmd->info.value_type.dec = data_block_dec; +@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd, + + pmd->bl_info.tm = tm; + pmd->bl_info.levels = 1; +- pmd->bl_info.value_type.context = pmd->data_sm; ++ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm; + pmd->bl_info.value_type.size = sizeof(__le64); + pmd->bl_info.value_type.inc = data_block_inc; + pmd->bl_info.value_type.dec = data_block_dec; +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 4720f68..78d1df7 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -177,9 +177,9 @@ struct mapped_device { + /* + * Event handling. + */ +- atomic_t event_nr; ++ atomic_unchecked_t event_nr; + wait_queue_head_t eventq; +- atomic_t uevent_seq; ++ atomic_unchecked_t uevent_seq; + struct list_head uevent_list; + spinlock_t uevent_lock; /* Protect access to uevent_list */ + +@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor) + rwlock_init(&md->map_lock); + atomic_set(&md->holders, 1); + atomic_set(&md->open_count, 0); +- atomic_set(&md->event_nr, 0); +- atomic_set(&md->uevent_seq, 0); ++ atomic_set_unchecked(&md->event_nr, 0); ++ atomic_set_unchecked(&md->uevent_seq, 0); + INIT_LIST_HEAD(&md->uevent_list); + spin_lock_init(&md->uevent_lock); + +@@ -1980,7 +1980,7 @@ static void event_callback(void *context) + + dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); + +- atomic_inc(&md->event_nr); ++ atomic_inc_unchecked(&md->event_nr); + wake_up(&md->eventq); + } + +@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, + + uint32_t dm_next_uevent_seq(struct mapped_device *md) + { +- return atomic_add_return(1, &md->uevent_seq); ++ return atomic_add_return_unchecked(1, &md->uevent_seq); + } + + uint32_t dm_get_event_nr(struct mapped_device *md) + { +- return atomic_read(&md->event_nr); ++ return atomic_read_unchecked(&md->event_nr); + } + + int dm_wait_event(struct mapped_device *md, int event_nr) + { + return wait_event_interruptible(md->eventq, +- (event_nr != atomic_read(&md->event_nr))); ++ (event_nr != atomic_read_unchecked(&md->event_nr))); + } + + void dm_uevent_add(struct mapped_device *md, struct list_head *elist) +diff --git a/drivers/md/md.c b/drivers/md/md.c +index f47f1f8..b7f559e 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio); + * start build, activate spare + */ + static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); +-static atomic_t md_event_count; ++static atomic_unchecked_t md_event_count; + void md_new_event(struct mddev *mddev) + { +- atomic_inc(&md_event_count); ++ atomic_inc_unchecked(&md_event_count); + wake_up(&md_event_waiters); + } + EXPORT_SYMBOL_GPL(md_new_event); +@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event); + */ + static void md_new_event_inintr(struct mddev *mddev) + { +- atomic_inc(&md_event_count); ++ atomic_inc_unchecked(&md_event_count); + wake_up(&md_event_waiters); + } + +@@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ + + rdev->preferred_minor = 0xffff; + rdev->data_offset = le64_to_cpu(sb->data_offset); +- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); ++ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); + + rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; + bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; +@@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) + else + sb->resync_offset = cpu_to_le64(0); + +- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); ++ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors)); + + sb->raid_disks = cpu_to_le32(mddev->raid_disks); + sb->size = cpu_to_le64(mddev->dev_sectors); +@@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); + static ssize_t + errors_show(struct md_rdev *rdev, char *page) + { +- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); ++ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors)); + } + + static ssize_t +@@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len) + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + if (*buf && (*e == 0 || *e == '\n')) { +- atomic_set(&rdev->corrected_errors, n); ++ atomic_set_unchecked(&rdev->corrected_errors, n); + return len; + } + return -EINVAL; +@@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev) + rdev->sb_loaded = 0; + rdev->bb_page = NULL; + atomic_set(&rdev->nr_pending, 0); +- atomic_set(&rdev->read_errors, 0); +- atomic_set(&rdev->corrected_errors, 0); ++ atomic_set_unchecked(&rdev->read_errors, 0); ++ atomic_set_unchecked(&rdev->corrected_errors, 0); + + INIT_LIST_HEAD(&rdev->same_set); + init_waitqueue_head(&rdev->blocked_wait); +@@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v) + + spin_unlock(&pers_lock); + seq_printf(seq, "\n"); +- seq->poll_event = atomic_read(&md_event_count); ++ seq->poll_event = atomic_read_unchecked(&md_event_count); + return 0; + } + if (v == (void*)2) { +@@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v) + chunk_kb ? "KB" : "B"); + if (bitmap->file) { + seq_printf(seq, ", file: "); +- seq_path(seq, &bitmap->file->f_path, " \t\n"); ++ seq_path(seq, &bitmap->file->f_path, " \t\n\"); + } + + seq_printf(seq, "\n"); +@@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file) + return error; + + seq = file->private_data; +- seq->poll_event = atomic_read(&md_event_count); ++ seq->poll_event = atomic_read_unchecked(&md_event_count); + return error; + } + +@@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait) + /* always allow read */ + mask = POLLIN | POLLRDNORM; + +- if (seq->poll_event != atomic_read(&md_event_count)) ++ if (seq->poll_event != atomic_read_unchecked(&md_event_count)) + mask |= POLLERR | POLLPRI; + return mask; + } +@@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init) + struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; + curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + + (int)part_stat_read(&disk->part0, sectors[1]) - +- atomic_read(&disk->sync_io); ++ atomic_read_unchecked(&disk->sync_io); + /* sync IO will cause sync_io to increase before the disk_stats + * as sync_io is counted when a request starts, and + * disk_stats is counted when it completes. +diff --git a/drivers/md/md.h b/drivers/md/md.h +index cf742d9..7c7c745 100644 +--- a/drivers/md/md.h ++++ b/drivers/md/md.h +@@ -120,13 +120,13 @@ struct md_rdev { + * only maintained for arrays that + * support hot removal + */ +- atomic_t read_errors; /* number of consecutive read errors that ++ atomic_unchecked_t read_errors; /* number of consecutive read errors that + * we have tried to ignore. + */ + struct timespec last_read_error; /* monotonic time since our + * last read error + */ +- atomic_t corrected_errors; /* number of corrected read errors, ++ atomic_unchecked_t corrected_errors; /* number of corrected read errors, + * for reporting to userspace and storing + * in superblock. + */ +@@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) + + static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) + { +- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); ++ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); + } + + struct md_personality +diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c +index 50ed53b..4f29d7d 100644 +--- a/drivers/md/persistent-data/dm-space-map-checker.c ++++ b/drivers/md/persistent-data/dm-space-map-checker.c +@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca) + /*----------------------------------------------------------------*/ + + struct sm_checker { +- struct dm_space_map sm; ++ dm_space_map_no_const sm; + + struct count_array old_counts; + struct count_array counts; +diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c +index fc469ba..2d91555 100644 +--- a/drivers/md/persistent-data/dm-space-map-disk.c ++++ b/drivers/md/persistent-data/dm-space-map-disk.c +@@ -23,7 +23,7 @@ + * Space map interface. + */ + struct sm_disk { +- struct dm_space_map sm; ++ dm_space_map_no_const sm; + + struct ll_disk ll; + struct ll_disk old_ll; +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c +index e89ae5e..062e4c2 100644 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c +@@ -43,7 +43,7 @@ struct block_op { + }; + + struct sm_metadata { +- struct dm_space_map sm; ++ dm_space_map_no_const sm; + + struct ll_disk ll; + struct ll_disk old_ll; +diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h +index 1cbfc6b..56e1dbb 100644 +--- a/drivers/md/persistent-data/dm-space-map.h ++++ b/drivers/md/persistent-data/dm-space-map.h +@@ -60,6 +60,7 @@ struct dm_space_map { + int (*root_size)(struct dm_space_map *sm, size_t *result); + int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len); + }; ++typedef struct dm_space_map __no_const dm_space_map_no_const; + + /*----------------------------------------------------------------*/ + +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 7d9e071..015b1d5 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) + if (r1_sync_page_io(rdev, sect, s, + bio->bi_io_vec[idx].bv_page, + READ) != 0) +- atomic_add(s, &rdev->corrected_errors); ++ atomic_add_unchecked(s, &rdev->corrected_errors); + } + sectors -= s; + sect += s; +@@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, + test_bit(In_sync, &rdev->flags)) { + if (r1_sync_page_io(rdev, sect, s, + conf->tmppage, READ)) { +- atomic_add(s, &rdev->corrected_errors); ++ atomic_add_unchecked(s, &rdev->corrected_errors); + printk(KERN_INFO + "md/raid1:%s: read error corrected " + "(%d sectors at %llu on %s)\n", +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index 685ddf3..955b087 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error) + /* The write handler will notice the lack of + * R10BIO_Uptodate and record any errors etc + */ +- atomic_add(r10_bio->sectors, ++ atomic_add_unchecked(r10_bio->sectors, + &conf->mirrors[d].rdev->corrected_errors); + + /* for reconstruct, we always reschedule after a read. +@@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) + { + struct timespec cur_time_mon; + unsigned long hours_since_last; +- unsigned int read_errors = atomic_read(&rdev->read_errors); ++ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors); + + ktime_get_ts(&cur_time_mon); + +@@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) + * overflowing the shift of read_errors by hours_since_last. + */ + if (hours_since_last >= 8 * sizeof(read_errors)) +- atomic_set(&rdev->read_errors, 0); ++ atomic_set_unchecked(&rdev->read_errors, 0); + else +- atomic_set(&rdev->read_errors, read_errors >> hours_since_last); ++ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last); + } + + static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, +@@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 + return; + + check_decay_read_errors(mddev, rdev); +- atomic_inc(&rdev->read_errors); +- if (atomic_read(&rdev->read_errors) > max_read_errors) { ++ atomic_inc_unchecked(&rdev->read_errors); ++ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) { + char b[BDEVNAME_SIZE]; + bdevname(rdev->bdev, b); + +@@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 + "md/raid10:%s: %s: Raid device exceeded " + "read_error threshold [cur %d:max %d]\n", + mdname(mddev), b, +- atomic_read(&rdev->read_errors), max_read_errors); ++ atomic_read_unchecked(&rdev->read_errors), max_read_errors); + printk(KERN_NOTICE + "md/raid10:%s: %s: Failing raid device\n", + mdname(mddev), b); +@@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 + (unsigned long long)( + sect + rdev->data_offset), + bdevname(rdev->bdev, b)); +- atomic_add(s, &rdev->corrected_errors); ++ atomic_add_unchecked(s, &rdev->corrected_errors); + } + + rdev_dec_pending(rdev, mddev); +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 858fdbb..b2dac95 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error) + (unsigned long long)(sh->sector + + rdev->data_offset), + bdevname(rdev->bdev, b)); +- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); ++ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors); + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + } +- if (atomic_read(&conf->disks[i].rdev->read_errors)) +- atomic_set(&conf->disks[i].rdev->read_errors, 0); ++ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors)) ++ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0); + } else { + const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); + int retry = 0; + rdev = conf->disks[i].rdev; + + clear_bit(R5_UPTODATE, &sh->dev[i].flags); +- atomic_inc(&rdev->read_errors); ++ atomic_inc_unchecked(&rdev->read_errors); + if (conf->mddev->degraded >= conf->max_degraded) + printk_ratelimited( + KERN_WARNING +@@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error) + (unsigned long long)(sh->sector + + rdev->data_offset), + bdn); +- else if (atomic_read(&rdev->read_errors) ++ else if (atomic_read_unchecked(&rdev->read_errors) + > conf->max_nr_stripes) + printk(KERN_WARNING + "md/raid:%s: Too many read errors, failing device %s.\n", +diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c +index ba9a643..e474ab5 100644 +--- a/drivers/media/dvb/ddbridge/ddbridge-core.c ++++ b/drivers/media/dvb/ddbridge/ddbridge-core.c +@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = { + .subvendor = _subvend, .subdevice = _subdev, \ + .driver_data = (unsigned long)&_driverdata } + +-static const struct pci_device_id ddb_id_tbl[] __devinitdata = { ++static const struct pci_device_id ddb_id_tbl[] __devinitconst = { + DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus), + DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus), + DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le), +diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h +index a7d876f..8c21b61 100644 +--- a/drivers/media/dvb/dvb-core/dvb_demux.h ++++ b/drivers/media/dvb/dvb-core/dvb_demux.h +@@ -73,7 +73,7 @@ struct dvb_demux_feed { + union { + dmx_ts_cb ts; + dmx_section_cb sec; +- } cb; ++ } __no_const cb; + + struct dvb_demux *demux; + void *priv; +diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c +index f732877..d38c35a 100644 +--- a/drivers/media/dvb/dvb-core/dvbdev.c ++++ b/drivers/media/dvb/dvb-core/dvbdev.c +@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, + const struct dvb_device *template, void *priv, int type) + { + struct dvb_device *dvbdev; +- struct file_operations *dvbdevfops; ++ file_operations_no_const *dvbdevfops; + struct device *clsdev; + int minor; + int id; +diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c +index 9f2a02c..5920f88 100644 +--- a/drivers/media/dvb/dvb-usb/cxusb.c ++++ b/drivers/media/dvb/dvb-usb/cxusb.c +@@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = { + struct dib0700_adapter_state { + int (*set_param_save) (struct dvb_frontend *, + struct dvb_frontend_parameters *); +-}; ++} __no_const; + + static int dib7070_set_param_override(struct dvb_frontend *fe, + struct dvb_frontend_parameters *fep) +diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c +index f103ec1..5e8968b 100644 +--- a/drivers/media/dvb/dvb-usb/dw2102.c ++++ b/drivers/media/dvb/dvb-usb/dw2102.c +@@ -95,7 +95,7 @@ struct su3000_state { + + struct s6x0_state { + int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v); +-}; ++} __no_const; + + /* debug */ + static int dvb_usb_dw2102_debug; +diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h +index 404f63a..4796533 100644 +--- a/drivers/media/dvb/frontends/dib3000.h ++++ b/drivers/media/dvb/frontends/dib3000.h +@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops + int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff); + int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff); + int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl); +-}; ++} __no_const; + + #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE)) + extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config, +diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c +index 90bf573..e8463da 100644 +--- a/drivers/media/dvb/frontends/ds3000.c ++++ b/drivers/media/dvb/frontends/ds3000.c +@@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe, + + for (i = 0; i < 30 ; i++) { + ds3000_read_status(fe, &status); +- if (status && FE_HAS_LOCK) ++ if (status & FE_HAS_LOCK) + break; + + msleep(10); +diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c +index 0564192..75b16f5 100644 +--- a/drivers/media/dvb/ngene/ngene-cards.c ++++ b/drivers/media/dvb/ngene/ngene-cards.c +@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = { + + /****************************************************************************/ + +-static const struct pci_device_id ngene_id_tbl[] __devinitdata = { ++static const struct pci_device_id ngene_id_tbl[] __devinitconst = { + NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2), + NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2), + NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2), +diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c +index 16a089f..ab1667d 100644 +--- a/drivers/media/radio/radio-cadet.c ++++ b/drivers/media/radio/radio-cadet.c +@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo + unsigned char readbuf[RDS_BUFFER]; + int i = 0; + ++ if (count > RDS_BUFFER) ++ return -EFAULT; + mutex_lock(&dev->lock); + if (dev->rdsstat == 0) { + dev->rdsstat = 1; +diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c +index 61287fc..8b08712 100644 +--- a/drivers/media/rc/redrat3.c ++++ b/drivers/media/rc/redrat3.c +@@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier) + return carrier; + } + +-static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n) ++static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n) + { + struct redrat3_dev *rr3 = rcdev->priv; + struct device *dev = rr3->dev; +diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h +index 9cde353..8c6a1c3 100644 +--- a/drivers/media/video/au0828/au0828.h ++++ b/drivers/media/video/au0828/au0828.h +@@ -191,7 +191,7 @@ struct au0828_dev { + + /* I2C */ + struct i2c_adapter i2c_adap; +- struct i2c_algorithm i2c_algo; ++ i2c_algorithm_no_const i2c_algo; + struct i2c_client i2c_client; + u32 i2c_rc; + +diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c +index 68d1240..46b32eb 100644 +--- a/drivers/media/video/cx88/cx88-alsa.c ++++ b/drivers/media/video/cx88/cx88-alsa.c +@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = { + * Only boards with eeprom and byte 1 at eeprom=1 have it + */ + +-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = { ++static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = { + {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0}, + {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0}, + {0, } +diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c +index ee0d0b3..63f6b78 100644 +--- a/drivers/media/video/omap/omap_vout.c ++++ b/drivers/media/video/omap/omap_vout.c +@@ -64,7 +64,6 @@ enum omap_vout_channels { + OMAP_VIDEO2, + }; + +-static struct videobuf_queue_ops video_vbq_ops; + /* Variables configurable through module params*/ + static u32 video1_numbuffers = 3; + static u32 video2_numbuffers = 3; +@@ -999,6 +998,12 @@ static int omap_vout_open(struct file *file) + { + struct videobuf_queue *q; + struct omap_vout_device *vout = NULL; ++ static struct videobuf_queue_ops video_vbq_ops = { ++ .buf_setup = omap_vout_buffer_setup, ++ .buf_prepare = omap_vout_buffer_prepare, ++ .buf_release = omap_vout_buffer_release, ++ .buf_queue = omap_vout_buffer_queue, ++ }; + + vout = video_drvdata(file); + v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__); +@@ -1016,10 +1021,6 @@ static int omap_vout_open(struct file *file) + vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + + q = &vout->vbq; +- video_vbq_ops.buf_setup = omap_vout_buffer_setup; +- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare; +- video_vbq_ops.buf_release = omap_vout_buffer_release; +- video_vbq_ops.buf_queue = omap_vout_buffer_queue; + spin_lock_init(&vout->vbq_lock); + + videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev, +diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h +index 305e6aa..0143317 100644 +--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h ++++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h +@@ -196,7 +196,7 @@ struct pvr2_hdw { + + /* I2C stuff */ + struct i2c_adapter i2c_adap; +- struct i2c_algorithm i2c_algo; ++ i2c_algorithm_no_const i2c_algo; + pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT]; + int i2c_cx25840_hack_state; + int i2c_linked; +diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c +index 2fd38a0..ddec3c4 100644 +--- a/drivers/media/video/saa7164/saa7164-encoder.c ++++ b/drivers/media/video/saa7164/saa7164-encoder.c +@@ -1136,6 +1136,8 @@ struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port) + } + + static ssize_t fops_read(struct file *file, char __user *buffer, ++ size_t count, loff_t *pos) __size_overflow(3); ++static ssize_t fops_read(struct file *file, char __user *buffer, + size_t count, loff_t *pos) + { + struct saa7164_encoder_fh *fh = file->private_data; +diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c +index e2e0341..b80056c 100644 +--- a/drivers/media/video/saa7164/saa7164-vbi.c ++++ b/drivers/media/video/saa7164/saa7164-vbi.c +@@ -1081,6 +1081,8 @@ struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port) + } + + static ssize_t fops_read(struct file *file, char __user *buffer, ++ size_t count, loff_t *pos) __size_overflow(3); ++static ssize_t fops_read(struct file *file, char __user *buffer, + size_t count, loff_t *pos) + { + struct saa7164_vbi_fh *fh = file->private_data; +diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c +index a0895bf..b7ebb1b 100644 +--- a/drivers/media/video/timblogiw.c ++++ b/drivers/media/video/timblogiw.c +@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma) + + /* Platform device functions */ + +-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = { ++static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = { + .vidioc_querycap = timblogiw_querycap, + .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt, + .vidioc_g_fmt_vid_cap = timblogiw_g_fmt, +@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = { + .vidioc_enum_framesizes = timblogiw_enum_framesizes, + }; + +-static __devinitconst struct v4l2_file_operations timblogiw_fops = { ++static __devinitconst v4l2_file_operations_no_const timblogiw_fops = { + .owner = THIS_MODULE, + .open = timblogiw_open, + .release = timblogiw_close, +diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c +index e9c6a60..daf6a33 100644 +--- a/drivers/message/fusion/mptbase.c ++++ b/drivers/message/fusion/mptbase.c +@@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v) + seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); + seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL); ++#else + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", + (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma); ++#endif ++ + /* + * Rounding UP to nearest 4-kB boundary here... + */ +diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c +index 9d95042..b808101 100644 +--- a/drivers/message/fusion/mptsas.c ++++ b/drivers/message/fusion/mptsas.c +@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached) + return 0; + } + ++static inline void ++mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) ++{ ++ if (phy_info->port_details) { ++ phy_info->port_details->rphy = rphy; ++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", ++ ioc->name, rphy)); ++ } ++ ++ if (rphy) { ++ dsaswideprintk(ioc, dev_printk(KERN_DEBUG, ++ &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); ++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", ++ ioc->name, rphy, rphy->dev.release)); ++ } ++} ++ + /* no mutex */ + static void + mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details) +@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info) + return NULL; + } + +-static inline void +-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) +-{ +- if (phy_info->port_details) { +- phy_info->port_details->rphy = rphy; +- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", +- ioc->name, rphy)); +- } +- +- if (rphy) { +- dsaswideprintk(ioc, dev_printk(KERN_DEBUG, +- &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); +- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", +- ioc->name, rphy, rphy->dev.release)); +- } +-} +- + static inline struct sas_port * + mptsas_get_port(struct mptsas_phyinfo *phy_info) + { +diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c +index 0c3ced7..1fe34ec 100644 +--- a/drivers/message/fusion/mptscsih.c ++++ b/drivers/message/fusion/mptscsih.c +@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost) + + h = shost_priv(SChost); + +- if (h) { +- if (h->info_kbuf == NULL) +- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) +- return h->info_kbuf; +- h->info_kbuf[0] = '\0'; ++ if (!h) ++ return NULL; + +- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); +- h->info_kbuf[size-1] = '\0'; +- } ++ if (h->info_kbuf == NULL) ++ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) ++ return h->info_kbuf; ++ h->info_kbuf[0] = '\0'; ++ ++ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); ++ h->info_kbuf[size-1] = '\0'; + + return h->info_kbuf; + } +diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c +index 07dbeaf..5533142 100644 +--- a/drivers/message/i2o/i2o_proc.c ++++ b/drivers/message/i2o/i2o_proc.c +@@ -255,13 +255,6 @@ static char *scsi_devices[] = { + "Array Controller Device" + }; + +-static char *chtostr(u8 * chars, int n) +-{ +- char tmp[256]; +- tmp[0] = 0; +- return strncat(tmp, (char *)chars, n); +-} +- + static int i2o_report_query_status(struct seq_file *seq, int block_status, + char *group) + { +@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) + + seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); + seq_printf(seq, "%-#8x", ddm_table.module_id); +- seq_printf(seq, "%-29s", +- chtostr(ddm_table.module_name_version, 28)); ++ seq_printf(seq, "%-.28s", ddm_table.module_name_version); + seq_printf(seq, "%9d ", ddm_table.data_size); + seq_printf(seq, "%8d", ddm_table.code_size); + +@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) + + seq_printf(seq, "%-#7x", dst->i2o_vendor_id); + seq_printf(seq, "%-#8x", dst->module_id); +- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28)); +- seq_printf(seq, "%-9s", chtostr(dst->date, 8)); ++ seq_printf(seq, "%-.28s", dst->module_name_version); ++ seq_printf(seq, "%-.8s", dst->date); + seq_printf(seq, "%8d ", dst->module_size); + seq_printf(seq, "%8d ", dst->mpb_size); + seq_printf(seq, "0x%04x", dst->module_flags); +@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) + seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); + seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); + seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); +- seq_printf(seq, "Vendor info : %s\n", +- chtostr((u8 *) (work32 + 2), 16)); +- seq_printf(seq, "Product info : %s\n", +- chtostr((u8 *) (work32 + 6), 16)); +- seq_printf(seq, "Description : %s\n", +- chtostr((u8 *) (work32 + 10), 16)); +- seq_printf(seq, "Product rev. : %s\n", +- chtostr((u8 *) (work32 + 14), 8)); ++ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2)); ++ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6)); ++ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10)); ++ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14)); + + seq_printf(seq, "Serial number : "); + print_serial_number(seq, (u8 *) (work32 + 16), +@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) + } + + seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); +- seq_printf(seq, "Module name : %s\n", +- chtostr(result.module_name, 24)); +- seq_printf(seq, "Module revision : %s\n", +- chtostr(result.module_rev, 8)); ++ seq_printf(seq, "Module name : %.24s\n", result.module_name); ++ seq_printf(seq, "Module revision : %.8s\n", result.module_rev); + + seq_printf(seq, "Serial number : "); + print_serial_number(seq, result.serial_number, sizeof(result) - 36); +@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) + return 0; + } + +- seq_printf(seq, "Device name : %s\n", +- chtostr(result.device_name, 64)); +- seq_printf(seq, "Service name : %s\n", +- chtostr(result.service_name, 64)); +- seq_printf(seq, "Physical name : %s\n", +- chtostr(result.physical_location, 64)); +- seq_printf(seq, "Instance number : %s\n", +- chtostr(result.instance_number, 4)); ++ seq_printf(seq, "Device name : %.64s\n", result.device_name); ++ seq_printf(seq, "Service name : %.64s\n", result.service_name); ++ seq_printf(seq, "Physical name : %.64s\n", result.physical_location); ++ seq_printf(seq, "Instance number : %.4s\n", result.instance_number); + + return 0; + } +diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c +index a8c08f3..155fe3d 100644 +--- a/drivers/message/i2o/iop.c ++++ b/drivers/message/i2o/iop.c +@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr) + + spin_lock_irqsave(&c->context_list_lock, flags); + +- if (unlikely(atomic_inc_and_test(&c->context_list_counter))) +- atomic_inc(&c->context_list_counter); ++ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter))) ++ atomic_inc_unchecked(&c->context_list_counter); + +- entry->context = atomic_read(&c->context_list_counter); ++ entry->context = atomic_read_unchecked(&c->context_list_counter); + + list_add(&entry->list, &c->context_list); + +@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void) + + #if BITS_PER_LONG == 64 + spin_lock_init(&c->context_list_lock); +- atomic_set(&c->context_list_counter, 0); ++ atomic_set_unchecked(&c->context_list_counter, 0); + INIT_LIST_HEAD(&c->context_list); + #endif + +diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c +index 7ce65f4..e66e9bc 100644 +--- a/drivers/mfd/abx500-core.c ++++ b/drivers/mfd/abx500-core.c +@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list); + + struct abx500_device_entry { + struct list_head list; +- struct abx500_ops ops; ++ abx500_ops_no_const ops; + struct device *dev; + }; + +diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c +index 5c2a06a..8fa077c 100644 +--- a/drivers/mfd/janz-cmodio.c ++++ b/drivers/mfd/janz-cmodio.c +@@ -13,6 +13,7 @@ + + #include <linux/kernel.h> + #include <linux/module.h> ++#include <linux/slab.h> + #include <linux/init.h> + #include <linux/pci.h> + #include <linux/interrupt.h> +diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c +index 29d12a7..f900ba4 100644 +--- a/drivers/misc/lis3lv02d/lis3lv02d.c ++++ b/drivers/misc/lis3lv02d/lis3lv02d.c +@@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data) + * the lid is closed. This leads to interrupts as soon as a little move + * is done. + */ +- atomic_inc(&lis3->count); ++ atomic_inc_unchecked(&lis3->count); + + wake_up_interruptible(&lis3->misc_wait); + kill_fasync(&lis3->async_queue, SIGIO, POLL_IN); +@@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file) + if (lis3->pm_dev) + pm_runtime_get_sync(lis3->pm_dev); + +- atomic_set(&lis3->count, 0); ++ atomic_set_unchecked(&lis3->count, 0); + return 0; + } + +@@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf, + add_wait_queue(&lis3->misc_wait, &wait); + while (true) { + set_current_state(TASK_INTERRUPTIBLE); +- data = atomic_xchg(&lis3->count, 0); ++ data = atomic_xchg_unchecked(&lis3->count, 0); + if (data) + break; + +@@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait) + struct lis3lv02d, miscdev); + + poll_wait(file, &lis3->misc_wait, wait); +- if (atomic_read(&lis3->count)) ++ if (atomic_read_unchecked(&lis3->count)) + return POLLIN | POLLRDNORM; + return 0; + } +diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h +index 2b1482a..5d33616 100644 +--- a/drivers/misc/lis3lv02d/lis3lv02d.h ++++ b/drivers/misc/lis3lv02d/lis3lv02d.h +@@ -266,7 +266,7 @@ struct lis3lv02d { + struct input_polled_dev *idev; /* input device */ + struct platform_device *pdev; /* platform device */ + struct regulator_bulk_data regulators[2]; +- atomic_t count; /* interrupt count after last read */ ++ atomic_unchecked_t count; /* interrupt count after last read */ + union axis_conversion ac; /* hw -> logical axis */ + int mapped_btns[3]; + +diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c +index 2f30bad..c4c13d0 100644 +--- a/drivers/misc/sgi-gru/gruhandles.c ++++ b/drivers/misc/sgi-gru/gruhandles.c +@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks) + unsigned long nsec; + + nsec = CLKS2NSEC(clks); +- atomic_long_inc(&mcs_op_statistics[op].count); +- atomic_long_add(nsec, &mcs_op_statistics[op].total); ++ atomic_long_inc_unchecked(&mcs_op_statistics[op].count); ++ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total); + if (mcs_op_statistics[op].max < nsec) + mcs_op_statistics[op].max = nsec; + } +diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c +index 7768b87..f8aac38 100644 +--- a/drivers/misc/sgi-gru/gruprocfs.c ++++ b/drivers/misc/sgi-gru/gruprocfs.c +@@ -32,9 +32,9 @@ + + #define printstat(s, f) printstat_val(s, &gru_stats.f, #f) + +-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) ++static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id) + { +- unsigned long val = atomic_long_read(v); ++ unsigned long val = atomic_long_read_unchecked(v); + + seq_printf(s, "%16lu %s\n", val, id); + } +@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p) + + seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks"); + for (op = 0; op < mcsop_last; op++) { +- count = atomic_long_read(&mcs_op_statistics[op].count); +- total = atomic_long_read(&mcs_op_statistics[op].total); ++ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count); ++ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total); + max = mcs_op_statistics[op].max; + seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, + count ? total / count : 0, max); +diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h +index 5c3ce24..4915ccb 100644 +--- a/drivers/misc/sgi-gru/grutables.h ++++ b/drivers/misc/sgi-gru/grutables.h +@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids; + * GRU statistics. + */ + struct gru_stats_s { +- atomic_long_t vdata_alloc; +- atomic_long_t vdata_free; +- atomic_long_t gts_alloc; +- atomic_long_t gts_free; +- atomic_long_t gms_alloc; +- atomic_long_t gms_free; +- atomic_long_t gts_double_allocate; +- atomic_long_t assign_context; +- atomic_long_t assign_context_failed; +- atomic_long_t free_context; +- atomic_long_t load_user_context; +- atomic_long_t load_kernel_context; +- atomic_long_t lock_kernel_context; +- atomic_long_t unlock_kernel_context; +- atomic_long_t steal_user_context; +- atomic_long_t steal_kernel_context; +- atomic_long_t steal_context_failed; +- atomic_long_t nopfn; +- atomic_long_t asid_new; +- atomic_long_t asid_next; +- atomic_long_t asid_wrap; +- atomic_long_t asid_reuse; +- atomic_long_t intr; +- atomic_long_t intr_cbr; +- atomic_long_t intr_tfh; +- atomic_long_t intr_spurious; +- atomic_long_t intr_mm_lock_failed; +- atomic_long_t call_os; +- atomic_long_t call_os_wait_queue; +- atomic_long_t user_flush_tlb; +- atomic_long_t user_unload_context; +- atomic_long_t user_exception; +- atomic_long_t set_context_option; +- atomic_long_t check_context_retarget_intr; +- atomic_long_t check_context_unload; +- atomic_long_t tlb_dropin; +- atomic_long_t tlb_preload_page; +- atomic_long_t tlb_dropin_fail_no_asid; +- atomic_long_t tlb_dropin_fail_upm; +- atomic_long_t tlb_dropin_fail_invalid; +- atomic_long_t tlb_dropin_fail_range_active; +- atomic_long_t tlb_dropin_fail_idle; +- atomic_long_t tlb_dropin_fail_fmm; +- atomic_long_t tlb_dropin_fail_no_exception; +- atomic_long_t tfh_stale_on_fault; +- atomic_long_t mmu_invalidate_range; +- atomic_long_t mmu_invalidate_page; +- atomic_long_t flush_tlb; +- atomic_long_t flush_tlb_gru; +- atomic_long_t flush_tlb_gru_tgh; +- atomic_long_t flush_tlb_gru_zero_asid; ++ atomic_long_unchecked_t vdata_alloc; ++ atomic_long_unchecked_t vdata_free; ++ atomic_long_unchecked_t gts_alloc; ++ atomic_long_unchecked_t gts_free; ++ atomic_long_unchecked_t gms_alloc; ++ atomic_long_unchecked_t gms_free; ++ atomic_long_unchecked_t gts_double_allocate; ++ atomic_long_unchecked_t assign_context; ++ atomic_long_unchecked_t assign_context_failed; ++ atomic_long_unchecked_t free_context; ++ atomic_long_unchecked_t load_user_context; ++ atomic_long_unchecked_t load_kernel_context; ++ atomic_long_unchecked_t lock_kernel_context; ++ atomic_long_unchecked_t unlock_kernel_context; ++ atomic_long_unchecked_t steal_user_context; ++ atomic_long_unchecked_t steal_kernel_context; ++ atomic_long_unchecked_t steal_context_failed; ++ atomic_long_unchecked_t nopfn; ++ atomic_long_unchecked_t asid_new; ++ atomic_long_unchecked_t asid_next; ++ atomic_long_unchecked_t asid_wrap; ++ atomic_long_unchecked_t asid_reuse; ++ atomic_long_unchecked_t intr; ++ atomic_long_unchecked_t intr_cbr; ++ atomic_long_unchecked_t intr_tfh; ++ atomic_long_unchecked_t intr_spurious; ++ atomic_long_unchecked_t intr_mm_lock_failed; ++ atomic_long_unchecked_t call_os; ++ atomic_long_unchecked_t call_os_wait_queue; ++ atomic_long_unchecked_t user_flush_tlb; ++ atomic_long_unchecked_t user_unload_context; ++ atomic_long_unchecked_t user_exception; ++ atomic_long_unchecked_t set_context_option; ++ atomic_long_unchecked_t check_context_retarget_intr; ++ atomic_long_unchecked_t check_context_unload; ++ atomic_long_unchecked_t tlb_dropin; ++ atomic_long_unchecked_t tlb_preload_page; ++ atomic_long_unchecked_t tlb_dropin_fail_no_asid; ++ atomic_long_unchecked_t tlb_dropin_fail_upm; ++ atomic_long_unchecked_t tlb_dropin_fail_invalid; ++ atomic_long_unchecked_t tlb_dropin_fail_range_active; ++ atomic_long_unchecked_t tlb_dropin_fail_idle; ++ atomic_long_unchecked_t tlb_dropin_fail_fmm; ++ atomic_long_unchecked_t tlb_dropin_fail_no_exception; ++ atomic_long_unchecked_t tfh_stale_on_fault; ++ atomic_long_unchecked_t mmu_invalidate_range; ++ atomic_long_unchecked_t mmu_invalidate_page; ++ atomic_long_unchecked_t flush_tlb; ++ atomic_long_unchecked_t flush_tlb_gru; ++ atomic_long_unchecked_t flush_tlb_gru_tgh; ++ atomic_long_unchecked_t flush_tlb_gru_zero_asid; + +- atomic_long_t copy_gpa; +- atomic_long_t read_gpa; ++ atomic_long_unchecked_t copy_gpa; ++ atomic_long_unchecked_t read_gpa; + +- atomic_long_t mesq_receive; +- atomic_long_t mesq_receive_none; +- atomic_long_t mesq_send; +- atomic_long_t mesq_send_failed; +- atomic_long_t mesq_noop; +- atomic_long_t mesq_send_unexpected_error; +- atomic_long_t mesq_send_lb_overflow; +- atomic_long_t mesq_send_qlimit_reached; +- atomic_long_t mesq_send_amo_nacked; +- atomic_long_t mesq_send_put_nacked; +- atomic_long_t mesq_page_overflow; +- atomic_long_t mesq_qf_locked; +- atomic_long_t mesq_qf_noop_not_full; +- atomic_long_t mesq_qf_switch_head_failed; +- atomic_long_t mesq_qf_unexpected_error; +- atomic_long_t mesq_noop_unexpected_error; +- atomic_long_t mesq_noop_lb_overflow; +- atomic_long_t mesq_noop_qlimit_reached; +- atomic_long_t mesq_noop_amo_nacked; +- atomic_long_t mesq_noop_put_nacked; +- atomic_long_t mesq_noop_page_overflow; ++ atomic_long_unchecked_t mesq_receive; ++ atomic_long_unchecked_t mesq_receive_none; ++ atomic_long_unchecked_t mesq_send; ++ atomic_long_unchecked_t mesq_send_failed; ++ atomic_long_unchecked_t mesq_noop; ++ atomic_long_unchecked_t mesq_send_unexpected_error; ++ atomic_long_unchecked_t mesq_send_lb_overflow; ++ atomic_long_unchecked_t mesq_send_qlimit_reached; ++ atomic_long_unchecked_t mesq_send_amo_nacked; ++ atomic_long_unchecked_t mesq_send_put_nacked; ++ atomic_long_unchecked_t mesq_page_overflow; ++ atomic_long_unchecked_t mesq_qf_locked; ++ atomic_long_unchecked_t mesq_qf_noop_not_full; ++ atomic_long_unchecked_t mesq_qf_switch_head_failed; ++ atomic_long_unchecked_t mesq_qf_unexpected_error; ++ atomic_long_unchecked_t mesq_noop_unexpected_error; ++ atomic_long_unchecked_t mesq_noop_lb_overflow; ++ atomic_long_unchecked_t mesq_noop_qlimit_reached; ++ atomic_long_unchecked_t mesq_noop_amo_nacked; ++ atomic_long_unchecked_t mesq_noop_put_nacked; ++ atomic_long_unchecked_t mesq_noop_page_overflow; + + }; + +@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, + tghop_invalidate, mcsop_last}; + + struct mcs_op_statistic { +- atomic_long_t count; +- atomic_long_t total; ++ atomic_long_unchecked_t count; ++ atomic_long_unchecked_t total; + unsigned long max; + }; + +@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; + + #define STAT(id) do { \ + if (gru_options & OPT_STATS) \ +- atomic_long_inc(&gru_stats.id); \ ++ atomic_long_inc_unchecked(&gru_stats.id); \ + } while (0) + + #ifdef CONFIG_SGI_GRU_DEBUG +diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h +index 851b2f2..a4ec097 100644 +--- a/drivers/misc/sgi-xp/xp.h ++++ b/drivers/misc/sgi-xp/xp.h +@@ -289,7 +289,7 @@ struct xpc_interface { + xpc_notify_func, void *); + void (*received) (short, int, void *); + enum xp_retval (*partid_to_nasids) (short, void *); +-}; ++} __no_const; + + extern struct xpc_interface xpc_interface; + +diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h +index b94d5f7..7f494c5 100644 +--- a/drivers/misc/sgi-xp/xpc.h ++++ b/drivers/misc/sgi-xp/xpc.h +@@ -835,6 +835,7 @@ struct xpc_arch_operations { + void (*received_payload) (struct xpc_channel *, void *); + void (*notify_senders_of_disconnect) (struct xpc_channel *); + }; ++typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const; + + /* struct xpc_partition act_state values (for XPC HB) */ + +@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[]; + /* found in xpc_main.c */ + extern struct device *xpc_part; + extern struct device *xpc_chan; +-extern struct xpc_arch_operations xpc_arch_ops; ++extern xpc_arch_operations_no_const xpc_arch_ops; + extern int xpc_disengage_timelimit; + extern int xpc_disengage_timedout; + extern int xpc_activate_IRQ_rcvd; +diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c +index 8d082b4..aa749ae 100644 +--- a/drivers/misc/sgi-xp/xpc_main.c ++++ b/drivers/misc/sgi-xp/xpc_main.c +@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = { + .notifier_call = xpc_system_die, + }; + +-struct xpc_arch_operations xpc_arch_ops; ++xpc_arch_operations_no_const xpc_arch_ops; + + /* + * Timer function to enforce the timelimit on the partition disengage. +diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c +index 6878a94..fe5c5f1 100644 +--- a/drivers/mmc/host/sdhci-pci.c ++++ b/drivers/mmc/host/sdhci-pci.c +@@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = { + .probe = via_probe, + }; + +-static const struct pci_device_id pci_ids[] __devinitdata = { ++static const struct pci_device_id pci_ids[] __devinitconst = { + { + .vendor = PCI_VENDOR_ID_RICOH, + .device = PCI_DEVICE_ID_RICOH_R5C822, +diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c +index e9fad91..0a7a16a 100644 +--- a/drivers/mtd/devices/doc2000.c ++++ b/drivers/mtd/devices/doc2000.c +@@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, + + /* The ECC will not be calculated correctly if less than 512 is written */ + /* DBB- +- if (len != 0x200 && eccbuf) ++ if (len != 0x200) + printk(KERN_WARNING + "ECC needs a full sector write (adr: %lx size %lx)\n", + (long) to, (long) len); +diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c +index a3f7a27..234016e 100644 +--- a/drivers/mtd/devices/doc2001.c ++++ b/drivers/mtd/devices/doc2001.c +@@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len, + struct Nand *mychip = &this->chips[from >> (this->chipshift)]; + + /* Don't allow read past end of device */ +- if (from >= this->totlen) ++ if (from >= this->totlen || !len) + return -EINVAL; + + /* Don't allow a single read to cross a 512-byte block boundary */ +diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c +index 3984d48..28aa897 100644 +--- a/drivers/mtd/nand/denali.c ++++ b/drivers/mtd/nand/denali.c +@@ -26,6 +26,7 @@ + #include <linux/pci.h> + #include <linux/mtd/mtd.h> + #include <linux/module.h> ++#include <linux/slab.h> + + #include "denali.h" + +diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c +index ac40925..483b753 100644 +--- a/drivers/mtd/nftlmount.c ++++ b/drivers/mtd/nftlmount.c +@@ -24,6 +24,7 @@ + #include <asm/errno.h> + #include <linux/delay.h> + #include <linux/slab.h> ++#include <linux/sched.h> + #include <linux/mtd/mtd.h> + #include <linux/mtd/nand.h> + #include <linux/mtd/nftl.h> +diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c +index 6c3fb5a..5b2eeb0 100644 +--- a/drivers/mtd/ubi/build.c ++++ b/drivers/mtd/ubi/build.c +@@ -1311,7 +1311,7 @@ module_exit(ubi_exit); + static int __init bytes_str_to_int(const char *str) + { + char *endp; +- unsigned long result; ++ unsigned long result, scale = 1; + + result = simple_strtoul(str, &endp, 0); + if (str == endp || result >= INT_MAX) { +@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str) + + switch (*endp) { + case 'G': +- result *= 1024; ++ scale *= 1024; + case 'M': +- result *= 1024; ++ scale *= 1024; + case 'K': +- result *= 1024; ++ scale *= 1024; + if (endp[1] == 'i' && endp[2] == 'B') + endp += 2; + case '\0': +@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str) + return -EINVAL; + } + +- return result; ++ if (result*scale >= INT_MAX) { ++ printk(KERN_ERR "UBI error: incorrect bytes count: "%s"\n", ++ str); ++ return -EINVAL; ++ } ++ ++ return result*scale; + } + + /** +diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c +index ab80c0d..aec8580 100644 +--- a/drivers/mtd/ubi/debug.c ++++ b/drivers/mtd/ubi/debug.c +@@ -338,6 +338,8 @@ out: + + /* Write an UBI debugfs file */ + static ssize_t dfs_file_write(struct file *file, const char __user *user_buf, ++ size_t count, loff_t *ppos) __size_overflow(3); ++static ssize_t dfs_file_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) + { + unsigned long ubi_num = (unsigned long)file->private_data; +diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c +index 1feae59..c2a61d2 100644 +--- a/drivers/net/ethernet/atheros/atlx/atl2.c ++++ b/drivers/net/ethernet/atheros/atlx/atl2.c +@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw) + */ + + #define ATL2_PARAM(X, desc) \ +- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ ++ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); + #else +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +index 9a517c2..a50cfcb 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj { + + int (*wait_comp)(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p); +-}; ++} __no_const; + + /********************** Set multicast group ***********************************/ + +diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h +index 94b4bd0..73c02de 100644 +--- a/drivers/net/ethernet/broadcom/tg3.h ++++ b/drivers/net/ethernet/broadcom/tg3.h +@@ -134,6 +134,7 @@ + #define CHIPREV_ID_5750_A0 0x4000 + #define CHIPREV_ID_5750_A1 0x4001 + #define CHIPREV_ID_5750_A3 0x4003 ++#define CHIPREV_ID_5750_C1 0x4201 + #define CHIPREV_ID_5750_C2 0x4202 + #define CHIPREV_ID_5752_A0_HW 0x5000 + #define CHIPREV_ID_5752_A0 0x6000 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h +index c5f5479..2e8c260 100644 +--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h ++++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h +@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev, + */ + struct l2t_skb_cb { + arp_failure_handler_func arp_failure_handler; +-}; ++} __no_const; + + #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) + +diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c +index cfb60e1..9c76da7 100644 +--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c +@@ -611,6 +611,8 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, + * of the SW ring. + */ + static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, ++ size_t sw_size, dma_addr_t * phys, void *metadata) __size_overflow(2,4); ++static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t * phys, void *metadata) + { + size_t len = nelem * elem_size; +diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c +index 140254c..5b8a0a6 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c +@@ -593,6 +593,9 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) + */ + static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t *phys, void *metadata, ++ size_t stat_size, int node) __size_overflow(2,4); ++static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, ++ size_t sw_size, dma_addr_t *phys, void *metadata, + size_t stat_size, int node) + { + size_t len = nelem * elem_size + stat_size; +diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +index 8d5d55a..a3c3474 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +@@ -730,6 +730,9 @@ static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl) + */ + static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, + size_t swsize, dma_addr_t *busaddrp, void *swringp, ++ size_t stat_size) __size_overflow(2,4); ++static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, ++ size_t swsize, dma_addr_t *busaddrp, void *swringp, + size_t stat_size) + { + /* +diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c +index 871bcaa..4043505 100644 +--- a/drivers/net/ethernet/dec/tulip/de4x5.c ++++ b/drivers/net/ethernet/dec/tulip/de4x5.c +@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + for (i=0; i<ETH_ALEN; i++) { + tmp.addr[i] = dev->dev_addr[i]; + } +- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; ++ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; + break; + + case DE4X5_SET_HWADDR: /* Set the hardware address */ +@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + spin_lock_irqsave(&lp->lock, flags); + memcpy(&statbuf, &lp->pktStats, ioc->len); + spin_unlock_irqrestore(&lp->lock, flags); +- if (copy_to_user(ioc->data, &statbuf, ioc->len)) ++ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len)) + return -EFAULT; + break; + } +diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c +index 14d5b61..1398636 100644 +--- a/drivers/net/ethernet/dec/tulip/eeprom.c ++++ b/drivers/net/ethernet/dec/tulip/eeprom.c +@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = { + {NULL}}; + + +-static const char *block_name[] __devinitdata = { ++static const char *block_name[] __devinitconst = { + "21140 non-MII", + "21140 MII PHY", + "21142 Serial PHY", +diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c +index 4d01219..b58d26d 100644 +--- a/drivers/net/ethernet/dec/tulip/winbond-840.c ++++ b/drivers/net/ethernet/dec/tulip/winbond-840.c +@@ -236,7 +236,7 @@ struct pci_id_info { + int drv_flags; /* Driver use, intended as capability flags. */ + }; + +-static const struct pci_id_info pci_id_tbl[] __devinitdata = { ++static const struct pci_id_info pci_id_tbl[] __devinitconst = { + { /* Sometime a Level-One switch card. */ + "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, + { "Winbond W89c840", CanHaveMII | HasBrokenTx}, +diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c +index dcd7f7a..ecb7fb3 100644 +--- a/drivers/net/ethernet/dlink/sundance.c ++++ b/drivers/net/ethernet/dlink/sundance.c +@@ -218,7 +218,7 @@ enum { + struct pci_id_info { + const char *name; + }; +-static const struct pci_id_info pci_id_tbl[] __devinitdata = { ++static const struct pci_id_info pci_id_tbl[] __devinitconst = { + {"D-Link DFE-550TX FAST Ethernet Adapter"}, + {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, + {"D-Link DFE-580TX 4 port Server Adapter"}, +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c +index bf266a0..e024af7 100644 +--- a/drivers/net/ethernet/emulex/benet/be_main.c ++++ b/drivers/net/ethernet/emulex/benet/be_main.c +@@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val) + + if (wrapped) + newacc += 65536; +- ACCESS_ONCE(*acc) = newacc; ++ ACCESS_ONCE_RW(*acc) = newacc; + } + + void be_parse_stats(struct be_adapter *adapter) +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c +index fb5579a..debdffa 100644 +--- a/drivers/net/ethernet/faraday/ftgmac100.c ++++ b/drivers/net/ethernet/faraday/ftgmac100.c +@@ -30,6 +30,8 @@ + #include <linux/netdevice.h> + #include <linux/phy.h> + #include <linux/platform_device.h> ++#include <linux/interrupt.h> ++#include <linux/irqreturn.h> + #include <net/ip.h> + + #include "ftgmac100.h" +diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c +index a127cb2..0d043cd 100644 +--- a/drivers/net/ethernet/faraday/ftmac100.c ++++ b/drivers/net/ethernet/faraday/ftmac100.c +@@ -30,6 +30,8 @@ + #include <linux/module.h> + #include <linux/netdevice.h> + #include <linux/platform_device.h> ++#include <linux/interrupt.h> ++#include <linux/irqreturn.h> + + #include "ftmac100.h" + +diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c +index 61d2bdd..7f1154a 100644 +--- a/drivers/net/ethernet/fealnx.c ++++ b/drivers/net/ethernet/fealnx.c +@@ -150,7 +150,7 @@ struct chip_info { + int flags; + }; + +-static const struct chip_info skel_netdrv_tbl[] __devinitdata = { ++static const struct chip_info skel_netdrv_tbl[] __devinitconst = { + { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, + { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, + { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, +diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c +index e1159e5..e18684d 100644 +--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c ++++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c +@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; +- struct e1000_mac_operations *func = &mac->ops; ++ e1000_mac_operations_no_const *func = &mac->ops; + + /* Set media type */ + switch (adapter->pdev->device) { +diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c +index a3e65fd..f451444 100644 +--- a/drivers/net/ethernet/intel/e1000e/82571.c ++++ b/drivers/net/ethernet/intel/e1000e/82571.c +@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; +- struct e1000_mac_operations *func = &mac->ops; ++ e1000_mac_operations_no_const *func = &mac->ops; + u32 swsm = 0; + u32 swsm2 = 0; + bool force_clear_smbi = false; +diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h +index 2967039..ca8c40c 100644 +--- a/drivers/net/ethernet/intel/e1000e/hw.h ++++ b/drivers/net/ethernet/intel/e1000e/hw.h +@@ -778,6 +778,7 @@ struct e1000_mac_operations { + void (*write_vfta)(struct e1000_hw *, u32, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + }; ++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const; + + /* + * When to use various PHY register access functions: +@@ -818,6 +819,7 @@ struct e1000_phy_operations { + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); + }; ++typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const; + + /* Function pointers for the NVM. */ + struct e1000_nvm_operations { +@@ -829,9 +831,10 @@ struct e1000_nvm_operations { + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + }; ++typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const; + + struct e1000_mac_info { +- struct e1000_mac_operations ops; ++ e1000_mac_operations_no_const ops; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + +@@ -872,7 +875,7 @@ struct e1000_mac_info { + }; + + struct e1000_phy_info { +- struct e1000_phy_operations ops; ++ e1000_phy_operations_no_const ops; + + enum e1000_phy_type type; + +@@ -906,7 +909,7 @@ struct e1000_phy_info { + }; + + struct e1000_nvm_info { +- struct e1000_nvm_operations ops; ++ e1000_nvm_operations_no_const ops; + + enum e1000_nvm_type type; + enum e1000_nvm_override override; +diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h +index 4519a13..f97fcd0 100644 +--- a/drivers/net/ethernet/intel/igb/e1000_hw.h ++++ b/drivers/net/ethernet/intel/igb/e1000_hw.h +@@ -314,6 +314,7 @@ struct e1000_mac_operations { + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + }; ++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const; + + struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); +@@ -330,6 +331,7 @@ struct e1000_phy_operations { + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + }; ++typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const; + + struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); +@@ -339,6 +341,7 @@ struct e1000_nvm_operations { + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + }; ++typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const; + + struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); +@@ -350,7 +353,7 @@ struct e1000_info { + extern const struct e1000_info e1000_82575_info; + + struct e1000_mac_info { +- struct e1000_mac_operations ops; ++ e1000_mac_operations_no_const ops; + + u8 addr[6]; + u8 perm_addr[6]; +@@ -388,7 +391,7 @@ struct e1000_mac_info { + }; + + struct e1000_phy_info { +- struct e1000_phy_operations ops; ++ e1000_phy_operations_no_const ops; + + enum e1000_phy_type type; + +@@ -423,7 +426,7 @@ struct e1000_phy_info { + }; + + struct e1000_nvm_info { +- struct e1000_nvm_operations ops; ++ e1000_nvm_operations_no_const ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + +@@ -468,6 +471,7 @@ struct e1000_mbx_operations { + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); + }; ++typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const; + + struct e1000_mbx_stats { + u32 msgs_tx; +@@ -479,7 +483,7 @@ struct e1000_mbx_stats { + }; + + struct e1000_mbx_info { +- struct e1000_mbx_operations ops; ++ e1000_mbx_operations_no_const ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; +diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h +index d7ed58f..64cde36 100644 +--- a/drivers/net/ethernet/intel/igbvf/vf.h ++++ b/drivers/net/ethernet/intel/igbvf/vf.h +@@ -189,9 +189,10 @@ struct e1000_mac_operations { + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*set_vfta)(struct e1000_hw *, u16, bool); + }; ++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const; + + struct e1000_mac_info { +- struct e1000_mac_operations ops; ++ e1000_mac_operations_no_const ops; + u8 addr[6]; + u8 perm_addr[6]; + +@@ -213,6 +214,7 @@ struct e1000_mbx_operations { + s32 (*check_for_ack)(struct e1000_hw *); + s32 (*check_for_rst)(struct e1000_hw *); + }; ++typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const; + + struct e1000_mbx_stats { + u32 msgs_tx; +@@ -224,7 +226,7 @@ struct e1000_mbx_stats { + }; + + struct e1000_mbx_info { +- struct e1000_mbx_operations ops; ++ e1000_mbx_operations_no_const ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +index 6c5cca8..de8ef63 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations { + s32 (*update_checksum)(struct ixgbe_hw *); + u16 (*calc_checksum)(struct ixgbe_hw *); + }; ++typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const; + + struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); +@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations { + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); + }; ++typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const; + + struct ixgbe_phy_operations { + s32 (*identify)(struct ixgbe_hw *); +@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations { + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + s32 (*check_overtemp)(struct ixgbe_hw *); + }; ++typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const; + + struct ixgbe_eeprom_info { +- struct ixgbe_eeprom_operations ops; ++ ixgbe_eeprom_operations_no_const ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; +@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info { + + #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 + struct ixgbe_mac_info { +- struct ixgbe_mac_operations ops; ++ ixgbe_mac_operations_no_const ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; +@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info { + }; + + struct ixgbe_phy_info { +- struct ixgbe_phy_operations ops; ++ ixgbe_phy_operations_no_const ops; + struct mdio_if_info mdio; + enum ixgbe_phy_type type; + u32 id; +@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations { + s32 (*check_for_ack)(struct ixgbe_hw *, u16); + s32 (*check_for_rst)(struct ixgbe_hw *, u16); + }; ++typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const; + + struct ixgbe_mbx_stats { + u32 msgs_tx; +@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats { + }; + + struct ixgbe_mbx_info { +- struct ixgbe_mbx_operations ops; ++ ixgbe_mbx_operations_no_const ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 usec_delay; +diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h +index 10306b4..28df758 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/vf.h ++++ b/drivers/net/ethernet/intel/ixgbevf/vf.h +@@ -70,6 +70,7 @@ struct ixgbe_mac_operations { + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + }; ++typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const; + + enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, +@@ -79,7 +80,7 @@ enum ixgbe_mac_type { + }; + + struct ixgbe_mac_info { +- struct ixgbe_mac_operations ops; ++ ixgbe_mac_operations_no_const ops; + u8 addr[6]; + u8 perm_addr[6]; + +@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations { + s32 (*check_for_ack)(struct ixgbe_hw *); + s32 (*check_for_rst)(struct ixgbe_hw *); + }; ++typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const; + + struct ixgbe_mbx_stats { + u32 msgs_tx; +@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats { + }; + + struct ixgbe_mbx_info { +- struct ixgbe_mbx_operations ops; ++ ixgbe_mbx_operations_no_const ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 udelay; +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c +index 94bbc85..78c12e6 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c +@@ -40,6 +40,7 @@ + #include <linux/dma-mapping.h> + #include <linux/slab.h> + #include <linux/io-mapping.h> ++#include <linux/sched.h> + + #include <linux/mlx4/device.h> + #include <linux/mlx4/doorbell.h> +diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h +index 5046a64..71ca936 100644 +--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h ++++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h +@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs { + void (*link_down)(struct __vxge_hw_device *devh); + void (*crit_err)(struct __vxge_hw_device *devh, + enum vxge_hw_event type, u64 ext_data); +-}; ++} __no_const; + + /* + * struct __vxge_hw_blockpool_entry - Block private data structure +diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h +index 4a518a3..936b334 100644 +--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h ++++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h +@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs { + struct vxge_hw_mempool_dma *dma_object, + u32 index, + u32 is_last); +-}; ++} __no_const; + + #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \ + ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next) +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c +index 0cf2351..56c4cef 100644 +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -698,17 +698,17 @@ struct rtl8169_private { + struct mdio_ops { + void (*write)(void __iomem *, int, int); + int (*read)(void __iomem *, int); +- } mdio_ops; ++ } __no_const mdio_ops; + + struct pll_power_ops { + void (*down)(struct rtl8169_private *); + void (*up)(struct rtl8169_private *); +- } pll_power_ops; ++ } __no_const pll_power_ops; + + struct jumbo_ops { + void (*enable)(struct rtl8169_private *); + void (*disable)(struct rtl8169_private *); +- } jumbo_ops; ++ } __no_const jumbo_ops; + + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); + int (*get_settings)(struct net_device *, struct ethtool_cmd *); +diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c +index 1b4658c..a30dabb 100644 +--- a/drivers/net/ethernet/sis/sis190.c ++++ b/drivers/net/ethernet/sis/sis190.c +@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, + static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, + struct net_device *dev) + { +- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 }; ++ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 }; + struct sis190_private *tp = netdev_priv(dev); + struct pci_dev *isa_bridge; + u8 reg, tmp8; +diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +index 41e6b33..8e89b0f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +@@ -139,8 +139,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) + + writel(value, ioaddr + MMC_CNTRL); + +- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", +- MMC_CNTRL, value); ++// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", ++// MMC_CNTRL, value); + } + + /* To mask all all interrupts.*/ +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c +index 486b404..0d6677d 100644 +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; + struct ppp_stats stats; + struct ppp_comp_stats cstats; +- char *vers; + + switch (cmd) { + case SIOCGPPPSTATS: +@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + break; + + case SIOCGPPPVER: +- vers = PPP_VERSION; +- if (copy_to_user(addr, vers, strlen(vers) + 1)) ++ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION))) + break; + err = 0; + break; +diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c +index 515f122..41dd273 100644 +--- a/drivers/net/tokenring/abyss.c ++++ b/drivers/net/tokenring/abyss.c +@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = { + + static int __init abyss_init (void) + { +- abyss_netdev_ops = tms380tr_netdev_ops; ++ pax_open_kernel(); ++ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); + +- abyss_netdev_ops.ndo_open = abyss_open; +- abyss_netdev_ops.ndo_stop = abyss_close; ++ *(void **)&abyss_netdev_ops.ndo_open = abyss_open; ++ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close; ++ pax_close_kernel(); + + return pci_register_driver(&abyss_driver); + } +diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c +index 6153cfd..cf69c1c 100644 +--- a/drivers/net/tokenring/madgemc.c ++++ b/drivers/net/tokenring/madgemc.c +@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = { + + static int __init madgemc_init (void) + { +- madgemc_netdev_ops = tms380tr_netdev_ops; +- madgemc_netdev_ops.ndo_open = madgemc_open; +- madgemc_netdev_ops.ndo_stop = madgemc_close; ++ pax_open_kernel(); ++ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); ++ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open; ++ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close; ++ pax_close_kernel(); + + return mca_register_driver (&madgemc_driver); + } +diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c +index 8d362e6..f91cc52 100644 +--- a/drivers/net/tokenring/proteon.c ++++ b/drivers/net/tokenring/proteon.c +@@ -353,9 +353,11 @@ static int __init proteon_init(void) + struct platform_device *pdev; + int i, num = 0, err = 0; + +- proteon_netdev_ops = tms380tr_netdev_ops; +- proteon_netdev_ops.ndo_open = proteon_open; +- proteon_netdev_ops.ndo_stop = tms380tr_close; ++ pax_open_kernel(); ++ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); ++ *(void **)&proteon_netdev_ops.ndo_open = proteon_open; ++ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close; ++ pax_close_kernel(); + + err = platform_driver_register(&proteon_driver); + if (err) +diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c +index 46db5c5..37c1536 100644 +--- a/drivers/net/tokenring/skisa.c ++++ b/drivers/net/tokenring/skisa.c +@@ -363,9 +363,11 @@ static int __init sk_isa_init(void) + struct platform_device *pdev; + int i, num = 0, err = 0; + +- sk_isa_netdev_ops = tms380tr_netdev_ops; +- sk_isa_netdev_ops.ndo_open = sk_isa_open; +- sk_isa_netdev_ops.ndo_stop = tms380tr_close; ++ pax_open_kernel(); ++ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops)); ++ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open; ++ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close; ++ pax_close_kernel(); + + err = platform_driver_register(&sk_isa_driver); + if (err) +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index 7bea9c6..7ef073c 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -359,7 +359,7 @@ static void tun_free_netdev(struct net_device *dev) + { + struct tun_struct *tun = netdev_priv(dev); + +- sock_put(tun->socket.sk); ++ sk_release_kernel(tun->socket.sk); + } + + /* Net device open. */ +@@ -979,10 +979,18 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, + return ret; + } + ++static int tun_release(struct socket *sock) ++{ ++ if (sock->sk) ++ sock_put(sock->sk); ++ return 0; ++} ++ + /* Ops structure to mimic raw sockets with tun */ + static const struct proto_ops tun_socket_ops = { + .sendmsg = tun_sendmsg, + .recvmsg = tun_recvmsg, ++ .release = tun_release, + }; + + static struct proto tun_proto = { +@@ -1109,10 +1117,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) + tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); + + err = -ENOMEM; +- sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto); ++ sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto); + if (!sk) + goto err_free_dev; + ++ sk_change_net(sk, net); + tun->socket.wq = &tun->wq; + init_waitqueue_head(&tun->wq.wait); + tun->socket.ops = &tun_socket_ops; +@@ -1173,7 +1182,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) + return 0; + + err_free_sk: +- sock_put(sk); ++ tun_free_netdev(dev); + err_free_dev: + free_netdev(dev); + failed: +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c +index 304fe78..db112fa 100644 +--- a/drivers/net/usb/hso.c ++++ b/drivers/net/usb/hso.c +@@ -71,7 +71,7 @@ + #include <asm/byteorder.h> + #include <linux/serial_core.h> + #include <linux/serial.h> +- ++#include <asm/local.h> + + #define MOD_AUTHOR "Option Wireless" + #define MOD_DESCRIPTION "USB High Speed Option driver" +@@ -257,7 +257,7 @@ struct hso_serial { + + /* from usb_serial_port */ + struct tty_struct *tty; +- int open_count; ++ local_t open_count; + spinlock_t serial_lock; + + int (*write_data) (struct hso_serial *serial); +@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial) + struct urb *urb; + + urb = serial->rx_urb[0]; +- if (serial->open_count > 0) { ++ if (local_read(&serial->open_count) > 0) { + count = put_rxbuf_data(urb, serial); + if (count == -1) + return; +@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) + DUMP1(urb->transfer_buffer, urb->actual_length); + + /* Anyone listening? */ +- if (serial->open_count == 0) ++ if (local_read(&serial->open_count) == 0) + return; + + if (status == 0) { +@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) + spin_unlock_irq(&serial->serial_lock); + + /* check for port already opened, if not set the termios */ +- serial->open_count++; +- if (serial->open_count == 1) { ++ if (local_inc_return(&serial->open_count) == 1) { + serial->rx_state = RX_IDLE; + /* Force default termio settings */ + _hso_serial_set_termios(tty, NULL); +@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) + result = hso_start_serial_device(serial->parent, GFP_KERNEL); + if (result) { + hso_stop_serial_device(serial->parent); +- serial->open_count--; ++ local_dec(&serial->open_count); + kref_put(&serial->parent->ref, hso_serial_ref_free); + } + } else { +@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) + + /* reset the rts and dtr */ + /* do the actual close */ +- serial->open_count--; ++ local_dec(&serial->open_count); + +- if (serial->open_count <= 0) { +- serial->open_count = 0; ++ if (local_read(&serial->open_count) <= 0) { ++ local_set(&serial->open_count, 0); + spin_lock_irq(&serial->serial_lock); + if (serial->tty == tty) { + serial->tty->driver_data = NULL; +@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) + + /* the actual setup */ + spin_lock_irqsave(&serial->serial_lock, flags); +- if (serial->open_count) ++ if (local_read(&serial->open_count)) + _hso_serial_set_termios(tty, old); + else + tty->termios = old; +@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb) + D1("Pending read interrupt on port %d\n", i); + spin_lock(&serial->serial_lock); + if (serial->rx_state == RX_IDLE && +- serial->open_count > 0) { ++ local_read(&serial->open_count) > 0) { + /* Setup and send a ctrl req read on + * port i */ + if (!serial->rx_urb_filled[0]) { +@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface) + /* Start all serial ports */ + for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { + if (serial_table[i] && (serial_table[i]->interface == iface)) { +- if (dev2ser(serial_table[i])->open_count) { ++ if (local_read(&dev2ser(serial_table[i])->open_count)) { + result = + hso_start_serial_device(serial_table[i], GFP_NOIO); + hso_kick_transmit(dev2ser(serial_table[i])); +diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c +index e662cbc..8d4a102 100644 +--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c ++++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c +@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev, + * Return with error code if any of the queue indices + * is out of range + */ +- if (p->ring_index[i] < 0 || +- p->ring_index[i] >= adapter->num_rx_queues) ++ if (p->ring_index[i] >= adapter->num_rx_queues) + return -EINVAL; + } + +diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h +index 0f9ee46..e2d6e65 100644 +--- a/drivers/net/wireless/ath/ath.h ++++ b/drivers/net/wireless/ath/ath.h +@@ -119,6 +119,7 @@ struct ath_ops { + void (*write_flush) (void *); + u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr); + }; ++typedef struct ath_ops __no_const ath_ops_no_const; + + struct ath_common; + struct ath_bus_ops; +diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c +index 8c5ce8b..abf101b 100644 +--- a/drivers/net/wireless/ath/ath5k/debug.c ++++ b/drivers/net/wireless/ath/ath5k/debug.c +@@ -343,6 +343,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf, + + static ssize_t write_file_debug(struct file *file, + const char __user *userbuf, ++ size_t count, loff_t *ppos) __size_overflow(3); ++static ssize_t write_file_debug(struct file *file, ++ const char __user *userbuf, + size_t count, loff_t *ppos) + { + struct ath5k_hw *ah = file->private_data; +diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c +index b592016..fe47870 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c ++++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c +@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + ads->ds_txstatus6 = ads->ds_txstatus7 = 0; + ads->ds_txstatus8 = ads->ds_txstatus9 = 0; + +- ACCESS_ONCE(ads->ds_link) = i->link; +- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->ds_link) = i->link; ++ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0]; + + ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore); + ctl6 = SM(i->keytype, AR_EncrType); +@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + + if ((i->is_first || i->is_last) && + i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) { +- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ds_ctl2) = 0; +- ACCESS_ONCE(ads->ds_ctl3) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl2) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl3) = 0; + } + + if (!i->is_first) { +- ACCESS_ONCE(ads->ds_ctl0) = 0; +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl0) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + return; + } + +@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + break; + } + +- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : + (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); + +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + + if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST) + return; + +- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c +index f5ae3c6..7936af3 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c ++++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c +@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + (i->qcu << AR_TxQcuNum_S) | 0x17; + + checksum += val; +- ACCESS_ONCE(ads->info) = val; ++ ACCESS_ONCE_RW(ads->info) = val; + + checksum += i->link; +- ACCESS_ONCE(ads->link) = i->link; ++ ACCESS_ONCE_RW(ads->link) = i->link; + + checksum += i->buf_addr[0]; +- ACCESS_ONCE(ads->data0) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0]; + checksum += i->buf_addr[1]; +- ACCESS_ONCE(ads->data1) = i->buf_addr[1]; ++ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1]; + checksum += i->buf_addr[2]; +- ACCESS_ONCE(ads->data2) = i->buf_addr[2]; ++ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2]; + checksum += i->buf_addr[3]; +- ACCESS_ONCE(ads->data3) = i->buf_addr[3]; ++ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3]; + + checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl3) = val; ++ ACCESS_ONCE_RW(ads->ctl3) = val; + checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl5) = val; ++ ACCESS_ONCE_RW(ads->ctl5) = val; + checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl7) = val; ++ ACCESS_ONCE_RW(ads->ctl7) = val; + checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl9) = val; ++ ACCESS_ONCE_RW(ads->ctl9) = val; + + checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff); +- ACCESS_ONCE(ads->ctl10) = checksum; ++ ACCESS_ONCE_RW(ads->ctl10) = checksum; + + if (i->is_first || i->is_last) { +- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ctl13) = 0; +- ACCESS_ONCE(ads->ctl14) = 0; ++ ACCESS_ONCE_RW(ads->ctl13) = 0; ++ ACCESS_ONCE_RW(ads->ctl14) = 0; + } + + ads->ctl20 = 0; +@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + + ctl17 = SM(i->keytype, AR_EncrType); + if (!i->is_first) { +- ACCESS_ONCE(ads->ctl11) = 0; +- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore; +- ACCESS_ONCE(ads->ctl15) = 0; +- ACCESS_ONCE(ads->ctl16) = 0; +- ACCESS_ONCE(ads->ctl17) = ctl17; +- ACCESS_ONCE(ads->ctl18) = 0; +- ACCESS_ONCE(ads->ctl19) = 0; ++ ACCESS_ONCE_RW(ads->ctl11) = 0; ++ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore; ++ ACCESS_ONCE_RW(ads->ctl15) = 0; ++ ACCESS_ONCE_RW(ads->ctl16) = 0; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl18) = 0; ++ ACCESS_ONCE_RW(ads->ctl19) = 0; + return; + } + +- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S; + ctl12 |= SM(val, AR_PAPRDChainMask); + +- ACCESS_ONCE(ads->ctl12) = ctl12; +- ACCESS_ONCE(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl12) = ctl12; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; + +- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) + | SM(i->rtscts_rate, AR_RTSCTSRate); + +- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding; ++ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding; + } + + static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads) +diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c +index 2741203..837a960 100644 +--- a/drivers/net/wireless/ath/ath9k/debug.c ++++ b/drivers/net/wireless/ath/ath9k/debug.c +@@ -60,6 +60,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf, + } + + static ssize_t write_file_debug(struct file *file, const char __user *user_buf, ++ size_t count, loff_t *ppos) __size_overflow(3); ++static ssize_t write_file_debug(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) + { + struct ath_softc *sc = file->private_data; +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +index d3ff33c..c98bcda 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +@@ -464,6 +464,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf, + } + + static ssize_t write_file_debug(struct file *file, const char __user *user_buf, ++ size_t count, loff_t *ppos) __size_overflow(3); ++static ssize_t write_file_debug(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) + { + struct ath9k_htc_priv *priv = file->private_data; +diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h +index 1bd8edf..10c6d30 100644 +--- a/drivers/net/wireless/ath/ath9k/hw.h ++++ b/drivers/net/wireless/ath/ath9k/hw.h +@@ -605,7 +605,7 @@ struct ath_hw_private_ops { + + /* ANI */ + void (*ani_cache_ini_regs)(struct ath_hw *ah); +-}; ++} __no_const; + + /** + * struct ath_hw_ops - callbacks used by hardware code and driver code +@@ -635,7 +635,7 @@ struct ath_hw_ops { + void (*antdiv_comb_conf_set)(struct ath_hw *ah, + struct ath_hw_antcomb_conf *antconf); + +-}; ++} __no_const; + + struct ath_nf_limits { + s16 max; +@@ -655,7 +655,7 @@ enum ath_cal_list { + #define AH_FASTCC 0x4 + + struct ath_hw { +- struct ath_ops reg_ops; ++ ath_ops_no_const reg_ops; + + struct ieee80211_hw *hw; + struct ath_common common; +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h +index bea8524..c677c06 100644 +--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h ++++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h +@@ -547,7 +547,7 @@ struct phy_func_ptr { + void (*carrsuppr)(struct brcms_phy *); + s32 (*rxsigpwr)(struct brcms_phy *, s32); + void (*detach)(struct brcms_phy *); +-}; ++} __no_const; + + struct brcms_phy { + struct brcms_phy_pub pubpi_ro; +diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c +index 05f2ad1..ae00eea 100644 +--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c ++++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c +@@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e + */ + if (iwl3945_mod_params.disable_hw_scan) { + IWL_DEBUG_INFO(priv, "Disabling hw_scan\n"); +- iwl3945_hw_ops.hw_scan = NULL; ++ pax_open_kernel(); ++ *(void **)&iwl3945_hw_ops.hw_scan = NULL; ++ pax_close_kernel(); + } + + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); +diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h +index 69a77e2..552b42c 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-debug.h ++++ b/drivers/net/wireless/iwlwifi/iwl-debug.h +@@ -71,8 +71,8 @@ do { \ + } while (0) + + #else +-#define IWL_DEBUG(m, level, fmt, args...) +-#define IWL_DEBUG_LIMIT(m, level, fmt, args...) ++#define IWL_DEBUG(m, level, fmt, args...) do {} while (0) ++#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0) + #define iwl_print_hex_dump(m, level, p, len) + #endif /* CONFIG_IWLWIFI_DEBUG */ + +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c +index 523ad55..f8c5dc5 100644 +--- a/drivers/net/wireless/mac80211_hwsim.c ++++ b/drivers/net/wireless/mac80211_hwsim.c +@@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void) + return -EINVAL; + + if (fake_hw_scan) { +- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; +- mac80211_hwsim_ops.sw_scan_start = NULL; +- mac80211_hwsim_ops.sw_scan_complete = NULL; ++ pax_open_kernel(); ++ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; ++ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL; ++ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL; ++ pax_close_kernel(); + } + + spin_lock_init(&hwsim_radio_lock); +diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h +index 30f138b..c904585 100644 +--- a/drivers/net/wireless/mwifiex/main.h ++++ b/drivers/net/wireless/mwifiex/main.h +@@ -543,7 +543,7 @@ struct mwifiex_if_ops { + void (*cleanup_mpa_buf) (struct mwifiex_adapter *); + int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *); + int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); +-}; ++} __no_const; + + struct mwifiex_adapter { + u8 iface_type; +diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c +index 0c13840..a5c3ed6 100644 +--- a/drivers/net/wireless/rndis_wlan.c ++++ b/drivers/net/wireless/rndis_wlan.c +@@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold) + + netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold); + +- if (rts_threshold < 0 || rts_threshold > 2347) ++ if (rts_threshold > 2347) + rts_threshold = 2347; + + tmp = cpu_to_le32(rts_threshold); +diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h +index a77f1bb..c608b2b 100644 +--- a/drivers/net/wireless/wl1251/wl1251.h ++++ b/drivers/net/wireless/wl1251/wl1251.h +@@ -266,7 +266,7 @@ struct wl1251_if_operations { + void (*reset)(struct wl1251 *wl); + void (*enable_irq)(struct wl1251 *wl); + void (*disable_irq)(struct wl1251 *wl); +-}; ++} __no_const; + + struct wl1251 { + struct ieee80211_hw *hw; +diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c +index f34b5b2..b5abb9f 100644 +--- a/drivers/oprofile/buffer_sync.c ++++ b/drivers/oprofile/buffer_sync.c +@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm) + if (cookie == NO_COOKIE) + offset = pc; + if (cookie == INVALID_COOKIE) { +- atomic_inc(&oprofile_stats.sample_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); + offset = pc; + } + if (cookie != last_cookie) { +@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) + /* add userspace sample */ + + if (!mm) { +- atomic_inc(&oprofile_stats.sample_lost_no_mm); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm); + return 0; + } + + cookie = lookup_dcookie(mm, s->eip, &offset); + + if (cookie == INVALID_COOKIE) { +- atomic_inc(&oprofile_stats.sample_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); + return 0; + } + +@@ -563,7 +563,7 @@ void sync_buffer(int cpu) + /* ignore backtraces if failed to add a sample */ + if (state == sb_bt_start) { + state = sb_bt_ignore; +- atomic_inc(&oprofile_stats.bt_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping); + } + } + release_mm(mm); +diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c +index c0cc4e7..44d4e54 100644 +--- a/drivers/oprofile/event_buffer.c ++++ b/drivers/oprofile/event_buffer.c +@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value) + } + + if (buffer_pos == buffer_size) { +- atomic_inc(&oprofile_stats.event_lost_overflow); ++ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow); + return; + } + +diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c +index f8c752e..28bf4fc 100644 +--- a/drivers/oprofile/oprof.c ++++ b/drivers/oprofile/oprof.c +@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work) + if (oprofile_ops.switch_events()) + return; + +- atomic_inc(&oprofile_stats.multiplex_counter); ++ atomic_inc_unchecked(&oprofile_stats.multiplex_counter); + start_switch_worker(); + } + +diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c +index 84a208d..f07d177 100644 +--- a/drivers/oprofile/oprofile_files.c ++++ b/drivers/oprofile/oprofile_files.c +@@ -36,6 +36,8 @@ static ssize_t timeout_read(struct file *file, char __user *buf, + + + static ssize_t timeout_write(struct file *file, char const __user *buf, ++ size_t count, loff_t *offset) __size_overflow(3); ++static ssize_t timeout_write(struct file *file, char const __user *buf, + size_t count, loff_t *offset) + { + unsigned long val; +@@ -72,6 +74,7 @@ static ssize_t depth_read(struct file *file, char __user *buf, size_t count, lof + } + + ++static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3); + static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) + { + unsigned long val; +@@ -126,12 +129,14 @@ static const struct file_operations cpu_type_fops = { + }; + + ++static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) __size_overflow(3); + static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) + { + return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset); + } + + ++static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3); + static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) + { + unsigned long val; +diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c +index 917d28e..d62d981 100644 +--- a/drivers/oprofile/oprofile_stats.c ++++ b/drivers/oprofile/oprofile_stats.c +@@ -30,11 +30,11 @@ void oprofile_reset_stats(void) + cpu_buf->sample_invalid_eip = 0; + } + +- atomic_set(&oprofile_stats.sample_lost_no_mm, 0); +- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); +- atomic_set(&oprofile_stats.event_lost_overflow, 0); +- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); +- atomic_set(&oprofile_stats.multiplex_counter, 0); ++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0); ++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0); ++ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0); ++ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0); ++ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0); + } + + +diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h +index 38b6fc0..b5cbfce 100644 +--- a/drivers/oprofile/oprofile_stats.h ++++ b/drivers/oprofile/oprofile_stats.h +@@ -13,11 +13,11 @@ + #include <linux/atomic.h> + + struct oprofile_stat_struct { +- atomic_t sample_lost_no_mm; +- atomic_t sample_lost_no_mapping; +- atomic_t bt_lost_no_mapping; +- atomic_t event_lost_overflow; +- atomic_t multiplex_counter; ++ atomic_unchecked_t sample_lost_no_mm; ++ atomic_unchecked_t sample_lost_no_mapping; ++ atomic_unchecked_t bt_lost_no_mapping; ++ atomic_unchecked_t event_lost_overflow; ++ atomic_unchecked_t multiplex_counter; + }; + + extern struct oprofile_stat_struct oprofile_stats; +diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c +index 2f0aa0f..d5246c3 100644 +--- a/drivers/oprofile/oprofilefs.c ++++ b/drivers/oprofile/oprofilefs.c +@@ -97,6 +97,7 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count + } + + ++static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3); + static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) + { + unsigned long value; +@@ -193,7 +194,7 @@ static const struct file_operations atomic_ro_fops = { + + + int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root, +- char const *name, atomic_t *val) ++ char const *name, atomic_unchecked_t *val) + { + return __oprofilefs_create_file(sb, root, name, + &atomic_ro_fops, 0444, val); +diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c +index 3f56bc0..707d642 100644 +--- a/drivers/parport/procfs.c ++++ b/drivers/parport/procfs.c +@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write, + + *ppos += len; + +- return copy_to_user(result, buffer, len) ? -EFAULT : 0; ++ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0; + } + + #ifdef CONFIG_PARPORT_1284 +@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write, + + *ppos += len; + +- return copy_to_user (result, buffer, len) ? -EFAULT : 0; ++ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0; + } + #endif /* IEEE1284.3 support. */ + +diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h +index 9fff878..ad0ad53 100644 +--- a/drivers/pci/hotplug/cpci_hotplug.h ++++ b/drivers/pci/hotplug/cpci_hotplug.h +@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops { + int (*hardware_test) (struct slot* slot, u32 value); + u8 (*get_power) (struct slot* slot); + int (*set_power) (struct slot* slot, int value); +-}; ++} __no_const; + + struct cpci_hp_controller { + unsigned int irq; +diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c +index 76ba8a1..20ca857 100644 +--- a/drivers/pci/hotplug/cpqphp_nvram.c ++++ b/drivers/pci/hotplug/cpqphp_nvram.c +@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start) + + void compaq_nvram_init (void __iomem *rom_start) + { ++ ++#ifndef CONFIG_PAX_KERNEXEC + if (rom_start) { + compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); + } ++#endif ++ + dbg("int15 entry = %p\n", compaq_int15_entry_point); + + /* initialize our int15 lock */ +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c +index 24f049e..051f66e 100644 +--- a/drivers/pci/pcie/aspm.c ++++ b/drivers/pci/pcie/aspm.c +@@ -27,9 +27,9 @@ + #define MODULE_PARAM_PREFIX "pcie_aspm." + + /* Note: those are not register definitions */ +-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ +-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ +-#define ASPM_STATE_L1 (4) /* L1 state */ ++#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */ ++#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */ ++#define ASPM_STATE_L1 (4U) /* L1 state */ + #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) + #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1) + +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index dfee1b3..a454fb6 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, + u32 l, sz, mask; + u16 orig_cmd; + +- mask = type ? PCI_ROM_ADDRESS_MASK : ~0; ++ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0; + + if (!dev->mmio_always_on) { + pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); +diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c +index 27911b5..5b6db88 100644 +--- a/drivers/pci/proc.c ++++ b/drivers/pci/proc.c +@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = { + static int __init pci_proc_init(void) + { + struct pci_dev *dev = NULL; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); ++#endif ++#else + proc_bus_pci_dir = proc_mkdir("bus/pci", NULL); ++#endif + proc_create("devices", 0, proc_bus_pci_dir, + &proc_bus_pci_dev_operations); + proc_initialized = 1; +diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c +index d9312b3..59f63f2 100644 +--- a/drivers/platform/x86/asus_acpi.c ++++ b/drivers/platform/x86/asus_acpi.c +@@ -887,6 +887,8 @@ static int lcd_proc_open(struct inode *inode, struct file *file) + } + + static ssize_t lcd_proc_write(struct file *file, const char __user *buffer, ++ size_t count, loff_t *pos) __size_overflow(3); ++static ssize_t lcd_proc_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) + { + int rv, value; +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c +index 7b82868..b9344c9 100644 +--- a/drivers/platform/x86/thinkpad_acpi.c ++++ b/drivers/platform/x86/thinkpad_acpi.c +@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void) + return 0; + } + +-void static hotkey_mask_warn_incomplete_mask(void) ++static void hotkey_mask_warn_incomplete_mask(void) + { + /* log only what the user can fix... */ + const u32 wantedmask = hotkey_driver_mask & +@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m) + } + } + +-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, +- struct tp_nvram_state *newn, +- const u32 event_mask) +-{ +- + #define TPACPI_COMPARE_KEY(__scancode, __member) \ + do { \ + if ((event_mask & (1 << __scancode)) && \ +@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + tpacpi_hotkey_send_key(__scancode); \ + } while (0) + +- void issue_volchange(const unsigned int oldvol, +- const unsigned int newvol) +- { +- unsigned int i = oldvol; ++static void issue_volchange(const unsigned int oldvol, ++ const unsigned int newvol, ++ const u32 event_mask) ++{ ++ unsigned int i = oldvol; + +- while (i > newvol) { +- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN); +- i--; +- } +- while (i < newvol) { +- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); +- i++; +- } ++ while (i > newvol) { ++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN); ++ i--; + } ++ while (i < newvol) { ++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); ++ i++; ++ } ++} + +- void issue_brightnesschange(const unsigned int oldbrt, +- const unsigned int newbrt) +- { +- unsigned int i = oldbrt; ++static void issue_brightnesschange(const unsigned int oldbrt, ++ const unsigned int newbrt, ++ const u32 event_mask) ++{ ++ unsigned int i = oldbrt; + +- while (i > newbrt) { +- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND); +- i--; +- } +- while (i < newbrt) { +- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); +- i++; +- } ++ while (i > newbrt) { ++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND); ++ i--; ++ } ++ while (i < newbrt) { ++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); ++ i++; + } ++} + ++static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, ++ struct tp_nvram_state *newn, ++ const u32 event_mask) ++{ + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle); + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle); + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle); +@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + oldn->volume_level != newn->volume_level) { + /* recently muted, or repeated mute keypress, or + * multiple presses ending in mute */ +- issue_volchange(oldn->volume_level, newn->volume_level); ++ issue_volchange(oldn->volume_level, newn->volume_level, event_mask); + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE); + } + } else { +@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); + } + if (oldn->volume_level != newn->volume_level) { +- issue_volchange(oldn->volume_level, newn->volume_level); ++ issue_volchange(oldn->volume_level, newn->volume_level, event_mask); + } else if (oldn->volume_toggle != newn->volume_toggle) { + /* repeated vol up/down keypress at end of scale ? */ + if (newn->volume_level == 0) +@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + /* handle brightness */ + if (oldn->brightness_level != newn->brightness_level) { + issue_brightnesschange(oldn->brightness_level, +- newn->brightness_level); ++ newn->brightness_level, ++ event_mask); + } else if (oldn->brightness_toggle != newn->brightness_toggle) { + /* repeated key presses that didn't change state */ + if (newn->brightness_level == 0) +@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + && !tp_features.bright_unkfw) + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); + } ++} + + #undef TPACPI_COMPARE_KEY + #undef TPACPI_MAY_SEND_KEY +-} + + /* + * Polling driver +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c +index dcdc1f4..85cee16 100644 +--- a/drivers/platform/x86/toshiba_acpi.c ++++ b/drivers/platform/x86/toshiba_acpi.c +@@ -517,6 +517,8 @@ static int set_lcd_status(struct backlight_device *bd) + } + + static ssize_t lcd_proc_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *pos) __size_overflow(3); ++static ssize_t lcd_proc_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) + { + struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data; +diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c +index b859d16..5cc6b1a 100644 +--- a/drivers/pnp/pnpbios/bioscalls.c ++++ b/drivers/pnp/pnpbios/bioscalls.c +@@ -59,7 +59,7 @@ do { \ + set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \ + } while(0) + +-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, ++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, + (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); + + /* +@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, + + cpu = get_cpu(); + save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8]; ++ ++ pax_open_kernel(); + get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + /* On some boxes IRQ's during PnP BIOS calls are deadly. */ + spin_lock_irqsave(&pnp_bios_lock, flags); +@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, + :"memory"); + spin_unlock_irqrestore(&pnp_bios_lock, flags); + ++ pax_open_kernel(); + get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + + /* If we get here and this is set then the PnP BIOS faulted on us. */ +@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base) + return status; + } + +-void pnpbios_calls_init(union pnp_bios_install_struct *header) ++void __init pnpbios_calls_init(union pnp_bios_install_struct *header) + { + int i; + +@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) + pnp_bios_callpoint.offset = header->fields.pm16offset; + pnp_bios_callpoint.segment = PNP_CS16; + ++ pax_open_kernel(); ++ + for_each_possible_cpu(i) { + struct desc_struct *gdt = get_cpu_gdt_table(i); + if (!gdt) +@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) + set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS], + (unsigned long)__va(header->fields.pm16dseg)); + } ++ ++ pax_close_kernel(); + } +diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c +index b0ecacb..7c9da2e 100644 +--- a/drivers/pnp/resource.c ++++ b/drivers/pnp/resource.c +@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res) + return 1; + + /* check if the resource is valid */ +- if (*irq < 0 || *irq > 15) ++ if (*irq > 15) + return 0; + + /* check if the resource is reserved */ +@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res) + return 1; + + /* check if the resource is valid */ +- if (*dma < 0 || *dma == 4 || *dma > 7) ++ if (*dma == 4 || *dma > 7) + return 0; + + /* check if the resource is reserved */ +diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c +index bb16f5b..c751eef 100644 +--- a/drivers/power/bq27x00_battery.c ++++ b/drivers/power/bq27x00_battery.c +@@ -67,7 +67,7 @@ + struct bq27x00_device_info; + struct bq27x00_access_methods { + int (*read)(struct bq27x00_device_info *di, u8 reg, bool single); +-}; ++} __no_const; + + enum bq27x00_chip { BQ27000, BQ27500 }; + +diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c +index 33f5d9a..d957d3f 100644 +--- a/drivers/regulator/max8660.c ++++ b/drivers/regulator/max8660.c +@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client, + max8660->shadow_regs[MAX8660_OVER1] = 5; + } else { + /* Otherwise devices can be toggled via software */ +- max8660_dcdc_ops.enable = max8660_dcdc_enable; +- max8660_dcdc_ops.disable = max8660_dcdc_disable; ++ pax_open_kernel(); ++ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable; ++ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable; ++ pax_close_kernel(); + } + + /* +diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c +index 023d17d..74ef35b 100644 +--- a/drivers/regulator/mc13892-regulator.c ++++ b/drivers/regulator/mc13892-regulator.c +@@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev) + } + mc13xxx_unlock(mc13892); + +- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode ++ pax_open_kernel(); ++ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode + = mc13892_vcam_set_mode; +- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode ++ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode + = mc13892_vcam_get_mode; ++ pax_close_kernel(); + for (i = 0; i < pdata->num_regulators; i++) { + init_data = &pdata->regulators[i]; + priv->regulators[i] = regulator_register( +diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c +index cace6d3..f623fda 100644 +--- a/drivers/rtc/rtc-dev.c ++++ b/drivers/rtc/rtc-dev.c +@@ -14,6 +14,7 @@ + #include <linux/module.h> + #include <linux/rtc.h> + #include <linux/sched.h> ++#include <linux/grsecurity.h> + #include "rtc-core.h" + + static dev_t rtc_devt; +@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file, + if (copy_from_user(&tm, uarg, sizeof(tm))) + return -EFAULT; + ++ gr_log_timechange(); ++ + return rtc_set_time(rtc, &tm); + + case RTC_PIE_ON: +diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h +index ffb5878..e6d785c 100644 +--- a/drivers/scsi/aacraid/aacraid.h ++++ b/drivers/scsi/aacraid/aacraid.h +@@ -492,7 +492,7 @@ struct adapter_ops + int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd); + /* Administrative operations */ + int (*adapter_comm)(struct aac_dev * dev, int comm); +-}; ++} __no_const; + + /* + * Define which interrupt handler needs to be installed +diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c +index 705e13e..91c873c 100644 +--- a/drivers/scsi/aacraid/linit.c ++++ b/drivers/scsi/aacraid/linit.c +@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = { + #elif defined(__devinitconst) + static const struct pci_device_id aac_pci_tbl[] __devinitconst = { + #else +-static const struct pci_device_id aac_pci_tbl[] __devinitdata = { ++static const struct pci_device_id aac_pci_tbl[] __devinitconst = { + #endif + { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */ + { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */ +diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c +index d5ff142..49c0ebb 100644 +--- a/drivers/scsi/aic94xx/aic94xx_init.c ++++ b/drivers/scsi/aic94xx/aic94xx_init.c +@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = { + .lldd_control_phy = asd_control_phy, + }; + +-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = { ++static const struct pci_device_id aic94xx_pci_table[] __devinitconst = { + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1}, +diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h +index a796de9..1ef20e1 100644 +--- a/drivers/scsi/bfa/bfa.h ++++ b/drivers/scsi/bfa/bfa.h +@@ -196,7 +196,7 @@ struct bfa_hwif_s { + u32 *end); + int cpe_vec_q0; + int rme_vec_q0; +-}; ++} __no_const; + typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status); + + struct bfa_faa_cbfn_s { +diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c +index e07bd47..cd1bbbb 100644 +--- a/drivers/scsi/bfa/bfa_fcpim.c ++++ b/drivers/scsi/bfa/bfa_fcpim.c +@@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + + bfa_iotag_attach(fcp); + +- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp); ++ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp); + bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr + + (fcp->num_itns * sizeof(struct bfa_itn_s)); + memset(fcp->itn_arr, 0, +@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, + void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)) + { + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); +- struct bfa_itn_s *itn; ++ bfa_itn_s_no_const *itn; + + itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag); + itn->isr = isr; +diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h +index 1080bcb..a3b39e3 100644 +--- a/drivers/scsi/bfa/bfa_fcpim.h ++++ b/drivers/scsi/bfa/bfa_fcpim.h +@@ -37,6 +37,7 @@ struct bfa_iotag_s { + struct bfa_itn_s { + bfa_isr_func_t isr; + }; ++typedef struct bfa_itn_s __no_const bfa_itn_s_no_const; + + void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, + void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); +@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s { + struct list_head iotag_tio_free_q; /* free IO resources */ + struct list_head iotag_unused_q; /* unused IO resources*/ + struct bfa_iotag_s *iotag_arr; +- struct bfa_itn_s *itn_arr; ++ bfa_itn_s_no_const *itn_arr; + int num_ioim_reqs; + int num_fwtio_reqs; + int num_itns; +diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h +index 546d46b..642fa5b 100644 +--- a/drivers/scsi/bfa/bfa_ioc.h ++++ b/drivers/scsi/bfa/bfa_ioc.h +@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s { + bfa_ioc_disable_cbfn_t disable_cbfn; + bfa_ioc_hbfail_cbfn_t hbfail_cbfn; + bfa_ioc_reset_cbfn_t reset_cbfn; +-}; ++} __no_const; + + /* + * IOC event notification mechanism. +@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s { + void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); + bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc); + bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc); +-}; ++} __no_const; + + /* + * Queue element to wait for room in request queue. FIFO order is +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c +index 351dc0b..951dc32 100644 +--- a/drivers/scsi/hosts.c ++++ b/drivers/scsi/hosts.c +@@ -42,7 +42,7 @@ + #include "scsi_logging.h" + + +-static atomic_t scsi_host_next_hn; /* host_no for next new host */ ++static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */ + + + static void scsi_host_cls_release(struct device *dev) +@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) + * subtract one because we increment first then return, but we need to + * know what the next host number was before increment + */ +- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1; ++ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1; + shost->dma_channel = 0xff; + + /* These three are default values which can be overridden */ +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c +index 865d452..e9b7fa7 100644 +--- a/drivers/scsi/hpsa.c ++++ b/drivers/scsi/hpsa.c +@@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h) + u32 a; + + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) +- return h->access.command_completed(h); ++ return h->access->command_completed(h); + + if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { + a = *(h->reply_pool_head); /* Next cmd in ring buffer */ +@@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h) + while (!list_empty(&h->reqQ)) { + c = list_entry(h->reqQ.next, struct CommandList, list); + /* can't do anything if fifo is full */ +- if ((h->access.fifo_full(h))) { ++ if ((h->access->fifo_full(h))) { + dev_warn(&h->pdev->dev, "fifo full\n"); + break; + } +@@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h) + h->Qdepth--; + + /* Tell the controller execute command */ +- h->access.submit_command(h, c); ++ h->access->submit_command(h, c); + + /* Put job onto the completed Q */ + addQ(&h->cmpQ, c); +@@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h) + + static inline unsigned long get_next_completion(struct ctlr_info *h) + { +- return h->access.command_completed(h); ++ return h->access->command_completed(h); + } + + static inline bool interrupt_pending(struct ctlr_info *h) + { +- return h->access.intr_pending(h); ++ return h->access->intr_pending(h); + } + + static inline long interrupt_not_for_us(struct ctlr_info *h) + { +- return (h->access.intr_pending(h) == 0) || ++ return (h->access->intr_pending(h) == 0) || + (h->interrupts_enabled == 0); + } + +@@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h) + if (prod_index < 0) + return -ENODEV; + h->product_name = products[prod_index].product_name; +- h->access = *(products[prod_index].access); ++ h->access = products[prod_index].access; + + if (hpsa_board_disabled(h->pdev)) { + dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); +@@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h) + + assert_spin_locked(&lockup_detector_lock); + remove_ctlr_from_lockup_detector_list(h); +- h->access.set_intr_mask(h, HPSA_INTR_OFF); ++ h->access->set_intr_mask(h, HPSA_INTR_OFF); + spin_lock_irqsave(&h->lock, flags); + h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); + spin_unlock_irqrestore(&h->lock, flags); +@@ -4340,7 +4340,7 @@ reinit_after_soft_reset: + } + + /* make sure the board interrupts are off */ +- h->access.set_intr_mask(h, HPSA_INTR_OFF); ++ h->access->set_intr_mask(h, HPSA_INTR_OFF); + + if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) + goto clean2; +@@ -4374,7 +4374,7 @@ reinit_after_soft_reset: + * fake ones to scoop up any residual completions. + */ + spin_lock_irqsave(&h->lock, flags); +- h->access.set_intr_mask(h, HPSA_INTR_OFF); ++ h->access->set_intr_mask(h, HPSA_INTR_OFF); + spin_unlock_irqrestore(&h->lock, flags); + free_irq(h->intr[h->intr_mode], h); + rc = hpsa_request_irq(h, hpsa_msix_discard_completions, +@@ -4393,9 +4393,9 @@ reinit_after_soft_reset: + dev_info(&h->pdev->dev, "Board READY.\n"); + dev_info(&h->pdev->dev, + "Waiting for stale completions to drain.\n"); +- h->access.set_intr_mask(h, HPSA_INTR_ON); ++ h->access->set_intr_mask(h, HPSA_INTR_ON); + msleep(10000); +- h->access.set_intr_mask(h, HPSA_INTR_OFF); ++ h->access->set_intr_mask(h, HPSA_INTR_OFF); + + rc = controller_reset_failed(h->cfgtable); + if (rc) +@@ -4416,7 +4416,7 @@ reinit_after_soft_reset: + } + + /* Turn the interrupts on so we can service requests */ +- h->access.set_intr_mask(h, HPSA_INTR_ON); ++ h->access->set_intr_mask(h, HPSA_INTR_ON); + + hpsa_hba_inquiry(h); + hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ +@@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev) + * To write all data in the battery backed cache to disks + */ + hpsa_flush_cache(h); +- h->access.set_intr_mask(h, HPSA_INTR_OFF); ++ h->access->set_intr_mask(h, HPSA_INTR_OFF); + free_irq(h->intr[h->intr_mode], h); + #ifdef CONFIG_PCI_MSI + if (h->msix_vector) +@@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h, + return; + } + /* Change the access methods to the performant access methods */ +- h->access = SA5_performant_access; ++ h->access = &SA5_performant_access; + h->transMethod = CFGTBL_Trans_Performant; + } + +diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h +index 91edafb..a9b88ec 100644 +--- a/drivers/scsi/hpsa.h ++++ b/drivers/scsi/hpsa.h +@@ -73,7 +73,7 @@ struct ctlr_info { + unsigned int msix_vector; + unsigned int msi_vector; + int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ +- struct access_method access; ++ struct access_method *access; + + /* queue and queue Info */ + struct list_head reqQ; +diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h +index f2df059..a3a9930 100644 +--- a/drivers/scsi/ips.h ++++ b/drivers/scsi/ips.h +@@ -1027,7 +1027,7 @@ typedef struct { + int (*intr)(struct ips_ha *); + void (*enableint)(struct ips_ha *); + uint32_t (*statupd)(struct ips_ha *); +-} ips_hw_func_t; ++} __no_const ips_hw_func_t; + + typedef struct ips_ha { + uint8_t ha_id[IPS_MAX_CHANNELS+1]; +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c +index 9de9db2..1e09660 100644 +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -105,12 +105,12 @@ struct fc_exch_mgr { + * all together if not used XXX + */ + struct { +- atomic_t no_free_exch; +- atomic_t no_free_exch_xid; +- atomic_t xid_not_found; +- atomic_t xid_busy; +- atomic_t seq_not_found; +- atomic_t non_bls_resp; ++ atomic_unchecked_t no_free_exch; ++ atomic_unchecked_t no_free_exch_xid; ++ atomic_unchecked_t xid_not_found; ++ atomic_unchecked_t xid_busy; ++ atomic_unchecked_t seq_not_found; ++ atomic_unchecked_t non_bls_resp; + } stats; + }; + +@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, + /* allocate memory for exchange */ + ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); + if (!ep) { +- atomic_inc(&mp->stats.no_free_exch); ++ atomic_inc_unchecked(&mp->stats.no_free_exch); + goto out; + } + memset(ep, 0, sizeof(*ep)); +@@ -780,7 +780,7 @@ out: + return ep; + err: + spin_unlock_bh(&pool->lock); +- atomic_inc(&mp->stats.no_free_exch_xid); ++ atomic_inc_unchecked(&mp->stats.no_free_exch_xid); + mempool_free(ep, mp->ep_pool); + return NULL; + } +@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + xid = ntohs(fh->fh_ox_id); /* we originated exch */ + ep = fc_exch_find(mp, xid); + if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + reject = FC_RJT_OX_ID; + goto out; + } +@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + ep = fc_exch_find(mp, xid); + if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { + if (ep) { +- atomic_inc(&mp->stats.xid_busy); ++ atomic_inc_unchecked(&mp->stats.xid_busy); + reject = FC_RJT_RX_ID; + goto rel; + } +@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + } + xid = ep->xid; /* get our XID */ + } else if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + reject = FC_RJT_RX_ID; /* XID not found */ + goto out; + } +@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + } else { + sp = &ep->seq; + if (sp->id != fh->fh_seq_id) { +- atomic_inc(&mp->stats.seq_not_found); ++ atomic_inc_unchecked(&mp->stats.seq_not_found); + if (f_ctl & FC_FC_END_SEQ) { + /* + * Update sequence_id based on incoming last +@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + + ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); + if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto out; + } + if (ep->esb_stat & ESB_ST_COMPLETE) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto rel; + } + if (ep->rxid == FC_XID_UNKNOWN) + ep->rxid = ntohs(fh->fh_rx_id); + if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto rel; + } + if (ep->did != ntoh24(fh->fh_s_id) && + ep->did != FC_FID_FLOGI) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto rel; + } + sof = fr_sof(fp); +@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + sp->ssb_stat |= SSB_ST_RESP; + sp->id = fh->fh_seq_id; + } else if (sp->id != fh->fh_seq_id) { +- atomic_inc(&mp->stats.seq_not_found); ++ atomic_inc_unchecked(&mp->stats.seq_not_found); + goto rel; + } + +@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ + + if (!sp) +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + else +- atomic_inc(&mp->stats.non_bls_resp); ++ atomic_inc_unchecked(&mp->stats.non_bls_resp); + + fc_frame_free(fp); + } +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c +index db9238f..4378ed2 100644 +--- a/drivers/scsi/libsas/sas_ata.c ++++ b/drivers/scsi/libsas/sas_ata.c +@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = { + .postreset = ata_std_postreset, + .error_handler = ata_std_error_handler, + .post_internal_cmd = sas_ata_post_internal, +- .qc_defer = ata_std_qc_defer, ++ .qc_defer = ata_std_qc_defer, + .qc_prep = ata_noop_qc_prep, + .qc_issue = sas_ata_qc_issue, + .qc_fill_rtf = sas_ata_qc_fill_rtf, +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h +index bb4c8e0..f33d849 100644 +--- a/drivers/scsi/lpfc/lpfc.h ++++ b/drivers/scsi/lpfc/lpfc.h +@@ -425,7 +425,7 @@ struct lpfc_vport { + struct dentry *debug_nodelist; + struct dentry *vport_debugfs_root; + struct lpfc_debugfs_trc *disc_trc; +- atomic_t disc_trc_cnt; ++ atomic_unchecked_t disc_trc_cnt; + #endif + uint8_t stat_data_enabled; + uint8_t stat_data_blocked; +@@ -835,8 +835,8 @@ struct lpfc_hba { + struct timer_list fabric_block_timer; + unsigned long bit_flags; + #define FABRIC_COMANDS_BLOCKED 0 +- atomic_t num_rsrc_err; +- atomic_t num_cmd_success; ++ atomic_unchecked_t num_rsrc_err; ++ atomic_unchecked_t num_cmd_success; + unsigned long last_rsrc_error_time; + unsigned long last_ramp_down_time; + unsigned long last_ramp_up_time; +@@ -866,7 +866,7 @@ struct lpfc_hba { + + struct dentry *debug_slow_ring_trc; + struct lpfc_debugfs_trc *slow_ring_trc; +- atomic_t slow_ring_trc_cnt; ++ atomic_unchecked_t slow_ring_trc_cnt; + /* iDiag debugfs sub-directory */ + struct dentry *idiag_root; + struct dentry *idiag_pci_cfg; +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c +index 2838259..a07cfb5 100644 +--- a/drivers/scsi/lpfc/lpfc_debugfs.c ++++ b/drivers/scsi/lpfc/lpfc_debugfs.c +@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, + + #include <linux/debugfs.h> + +-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); ++static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); + static unsigned long lpfc_debugfs_start_time = 0L; + + /* iDiag */ +@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) + lpfc_debugfs_enable = 0; + + len = 0; +- index = (atomic_read(&vport->disc_trc_cnt) + 1) & ++ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) & + (lpfc_debugfs_max_disc_trc - 1); + for (i = index; i < lpfc_debugfs_max_disc_trc; i++) { + dtp = vport->disc_trc + i; +@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) + lpfc_debugfs_enable = 0; + + len = 0; +- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) & ++ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) & + (lpfc_debugfs_max_slow_ring_trc - 1); + for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) { + dtp = phba->slow_ring_trc + i; +@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, + !vport || !vport->disc_trc) + return; + +- index = atomic_inc_return(&vport->disc_trc_cnt) & ++ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) & + (lpfc_debugfs_max_disc_trc - 1); + dtp = vport->disc_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; +- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); ++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); + dtp->jif = jiffies; + #endif + return; +@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, + !phba || !phba->slow_ring_trc) + return; + +- index = atomic_inc_return(&phba->slow_ring_trc_cnt) & ++ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) & + (lpfc_debugfs_max_slow_ring_trc - 1); + dtp = phba->slow_ring_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; +- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); ++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); + dtp->jif = jiffies; + #endif + return; +@@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) + "slow_ring buffer\n"); + goto debug_failed; + } +- atomic_set(&phba->slow_ring_trc_cnt, 0); ++ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0); + memset(phba->slow_ring_trc, 0, + (sizeof(struct lpfc_debugfs_trc) * + lpfc_debugfs_max_slow_ring_trc)); +@@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) + "buffer\n"); + goto debug_failed; + } +- atomic_set(&vport->disc_trc_cnt, 0); ++ atomic_set_unchecked(&vport->disc_trc_cnt, 0); + + snprintf(name, sizeof(name), "discovery_trace"); + vport->debug_disc_trc = +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index 55bc4fc..a2a109c 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -10027,8 +10027,10 @@ lpfc_init(void) + printk(LPFC_COPYRIGHT "\n"); + + if (lpfc_enable_npiv) { +- lpfc_transport_functions.vport_create = lpfc_vport_create; +- lpfc_transport_functions.vport_delete = lpfc_vport_delete; ++ pax_open_kernel(); ++ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create; ++ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete; ++ pax_close_kernel(); + } + lpfc_transport_template = + fc_attach_transport(&lpfc_transport_functions); +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c +index 2e1e54e..1af0a0d 100644 +--- a/drivers/scsi/lpfc/lpfc_scsi.c ++++ b/drivers/scsi/lpfc/lpfc_scsi.c +@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba) + uint32_t evt_posted; + + spin_lock_irqsave(&phba->hbalock, flags); +- atomic_inc(&phba->num_rsrc_err); ++ atomic_inc_unchecked(&phba->num_rsrc_err); + phba->last_rsrc_error_time = jiffies; + + if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { +@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport, + unsigned long flags; + struct lpfc_hba *phba = vport->phba; + uint32_t evt_posted; +- atomic_inc(&phba->num_cmd_success); ++ atomic_inc_unchecked(&phba->num_cmd_success); + + if (vport->cfg_lun_queue_depth <= queue_depth) + return; +@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) + unsigned long num_rsrc_err, num_cmd_success; + int i; + +- num_rsrc_err = atomic_read(&phba->num_rsrc_err); +- num_cmd_success = atomic_read(&phba->num_cmd_success); ++ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err); ++ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success); + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) +@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) + } + } + lpfc_destroy_vport_work_array(phba, vports); +- atomic_set(&phba->num_rsrc_err, 0); +- atomic_set(&phba->num_cmd_success, 0); ++ atomic_set_unchecked(&phba->num_rsrc_err, 0); ++ atomic_set_unchecked(&phba->num_cmd_success, 0); + } + + /** +@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) + } + } + lpfc_destroy_vport_work_array(phba, vports); +- atomic_set(&phba->num_rsrc_err, 0); +- atomic_set(&phba->num_cmd_success, 0); ++ atomic_set_unchecked(&phba->num_rsrc_err, 0); ++ atomic_set_unchecked(&phba->num_cmd_success, 0); + } + + /** +diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c +index 5163edb..7b142bc 100644 +--- a/drivers/scsi/pmcraid.c ++++ b/drivers/scsi/pmcraid.c +@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) + res->scsi_dev = scsi_dev; + scsi_dev->hostdata = res; + res->change_detected = 0; +- atomic_set(&res->read_failures, 0); +- atomic_set(&res->write_failures, 0); ++ atomic_set_unchecked(&res->read_failures, 0); ++ atomic_set_unchecked(&res->write_failures, 0); + rc = 0; + } + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); +@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd) + + /* If this was a SCSI read/write command keep count of errors */ + if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD) +- atomic_inc(&res->read_failures); ++ atomic_inc_unchecked(&res->read_failures); + else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD) +- atomic_inc(&res->write_failures); ++ atomic_inc_unchecked(&res->write_failures); + + if (!RES_IS_GSCSI(res->cfg_entry) && + masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) { +@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck( + * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses + * hrrq_id assigned here in queuecommand + */ +- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % ++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) % + pinstance->num_hrrq; + cmd->cmd_done = pmcraid_io_done; + +@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough( + * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses + * hrrq_id assigned here in queuecommand + */ +- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % ++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) % + pinstance->num_hrrq; + + if (request_size) { +@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp) + + pinstance = container_of(workp, struct pmcraid_instance, worker_q); + /* add resources only after host is added into system */ +- if (!atomic_read(&pinstance->expose_resources)) ++ if (!atomic_read_unchecked(&pinstance->expose_resources)) + return; + + fw_version = be16_to_cpu(pinstance->inq_data->fw_version); +@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance( + init_waitqueue_head(&pinstance->reset_wait_q); + + atomic_set(&pinstance->outstanding_cmds, 0); +- atomic_set(&pinstance->last_message_id, 0); +- atomic_set(&pinstance->expose_resources, 0); ++ atomic_set_unchecked(&pinstance->last_message_id, 0); ++ atomic_set_unchecked(&pinstance->expose_resources, 0); + + INIT_LIST_HEAD(&pinstance->free_res_q); + INIT_LIST_HEAD(&pinstance->used_res_q); +@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe( + /* Schedule worker thread to handle CCN and take care of adding and + * removing devices to OS + */ +- atomic_set(&pinstance->expose_resources, 1); ++ atomic_set_unchecked(&pinstance->expose_resources, 1); + schedule_work(&pinstance->worker_q); + return rc; + +diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h +index ca496c7..9c791d5 100644 +--- a/drivers/scsi/pmcraid.h ++++ b/drivers/scsi/pmcraid.h +@@ -748,7 +748,7 @@ struct pmcraid_instance { + struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS]; + + /* Message id as filled in last fired IOARCB, used to identify HRRQ */ +- atomic_t last_message_id; ++ atomic_unchecked_t last_message_id; + + /* configuration table */ + struct pmcraid_config_table *cfg_table; +@@ -777,7 +777,7 @@ struct pmcraid_instance { + atomic_t outstanding_cmds; + + /* should add/delete resources to mid-layer now ?*/ +- atomic_t expose_resources; ++ atomic_unchecked_t expose_resources; + + + +@@ -813,8 +813,8 @@ struct pmcraid_resource_entry { + struct pmcraid_config_table_entry_ext cfg_entry_ext; + }; + struct scsi_device *scsi_dev; /* Link scsi_device structure */ +- atomic_t read_failures; /* count of failed READ commands */ +- atomic_t write_failures; /* count of failed WRITE commands */ ++ atomic_unchecked_t read_failures; /* count of failed READ commands */ ++ atomic_unchecked_t write_failures; /* count of failed WRITE commands */ + + /* To indicate add/delete/modify during CCN */ + u8 change_detected; +diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h +index fcf052c..a8025a4 100644 +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -2244,7 +2244,7 @@ struct isp_operations { + int (*get_flash_version) (struct scsi_qla_host *, void *); + int (*start_scsi) (srb_t *); + int (*abort_isp) (struct scsi_qla_host *); +-}; ++} __no_const; + + /* MSI-X Support *************************************************************/ + +diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h +index fd5edc6..4906148 100644 +--- a/drivers/scsi/qla4xxx/ql4_def.h ++++ b/drivers/scsi/qla4xxx/ql4_def.h +@@ -258,7 +258,7 @@ struct ddb_entry { + * (4000 only) */ + atomic_t relogin_timer; /* Max Time to wait for + * relogin to complete */ +- atomic_t relogin_retry_count; /* Num of times relogin has been ++ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been + * retried */ + uint32_t default_time2wait; /* Default Min time between + * relogins (+aens) */ +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index 4169c8b..a8b896b 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) + */ + if (!iscsi_is_session_online(cls_sess)) { + /* Reset retry relogin timer */ +- atomic_inc(&ddb_entry->relogin_retry_count); ++ atomic_inc_unchecked(&ddb_entry->relogin_retry_count); + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: index[%d] relogin timed out-retrying" + " relogin (%d), retry (%d)\n", __func__, + ddb_entry->fw_ddb_index, +- atomic_read(&ddb_entry->relogin_retry_count), ++ atomic_read_unchecked(&ddb_entry->relogin_retry_count), + ddb_entry->default_time2wait + 4)); + set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); + atomic_set(&ddb_entry->retry_relogin_timer, +@@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, + + atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); + atomic_set(&ddb_entry->relogin_timer, 0); +- atomic_set(&ddb_entry->relogin_retry_count, 0); ++ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0); + + ddb_entry->default_relogin_timeout = + le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); +diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c +index 2aeb2e9..46e3925 100644 +--- a/drivers/scsi/scsi.c ++++ b/drivers/scsi/scsi.c +@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) + unsigned long timeout; + int rtn = 0; + +- atomic_inc(&cmd->device->iorequest_cnt); ++ atomic_inc_unchecked(&cmd->device->iorequest_cnt); + + /* check if the device is still usable */ + if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index f85cfa6..a57c9e8 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) + shost = sdev->host; + scsi_init_cmd_errh(cmd); + cmd->result = DID_NO_CONNECT << 16; +- atomic_inc(&cmd->device->iorequest_cnt); ++ atomic_inc_unchecked(&cmd->device->iorequest_cnt); + + /* + * SCSI request completion path will do scsi_device_unbusy(), +@@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq) + + INIT_LIST_HEAD(&cmd->eh_entry); + +- atomic_inc(&cmd->device->iodone_cnt); ++ atomic_inc_unchecked(&cmd->device->iodone_cnt); + if (cmd->result) +- atomic_inc(&cmd->device->ioerr_cnt); ++ atomic_inc_unchecked(&cmd->device->ioerr_cnt); + + disposition = scsi_decide_disposition(cmd); + if (disposition != SUCCESS && +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 04c2a27..9d8bd66 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \ + char *buf) \ + { \ + struct scsi_device *sdev = to_scsi_device(dev); \ +- unsigned long long count = atomic_read(&sdev->field); \ ++ unsigned long long count = atomic_read_unchecked(&sdev->field); \ + return snprintf(buf, 20, "0x%llx\n", count); \ + } \ + static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) +diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c +index 84a1fdf..693b0d6 100644 +--- a/drivers/scsi/scsi_tgt_lib.c ++++ b/drivers/scsi/scsi_tgt_lib.c +@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, + int err; + + dprintk("%lx %u\n", uaddr, len); +- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL); ++ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL); + if (err) { + /* + * TODO: need to fixup sg_tablesize, max_segment_size, +diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c +index 1b21491..1b7f60e 100644 +--- a/drivers/scsi/scsi_transport_fc.c ++++ b/drivers/scsi/scsi_transport_fc.c +@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class, + * Netlink Infrastructure + */ + +-static atomic_t fc_event_seq; ++static atomic_unchecked_t fc_event_seq; + + /** + * fc_get_event_number - Obtain the next sequential FC event number +@@ -497,7 +497,7 @@ static atomic_t fc_event_seq; + u32 + fc_get_event_number(void) + { +- return atomic_add_return(1, &fc_event_seq); ++ return atomic_add_return_unchecked(1, &fc_event_seq); + } + EXPORT_SYMBOL(fc_get_event_number); + +@@ -645,7 +645,7 @@ static __init int fc_transport_init(void) + { + int error; + +- atomic_set(&fc_event_seq, 0); ++ atomic_set_unchecked(&fc_event_seq, 0); + + error = transport_class_register(&fc_host_class); + if (error) +@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val) + char *cp; + + *val = simple_strtoul(buf, &cp, 0); +- if ((*cp && (*cp != '\n')) || (*val < 0)) ++ if (*cp && (*cp != '\n')) + return -EINVAL; + /* + * Check for overflow; dev_loss_tmo is u32 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c +index 96029e6..4d77fa0 100644 +--- a/drivers/scsi/scsi_transport_iscsi.c ++++ b/drivers/scsi/scsi_transport_iscsi.c +@@ -79,7 +79,7 @@ struct iscsi_internal { + struct transport_container session_cont; + }; + +-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ ++static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */ + static struct workqueue_struct *iscsi_eh_timer_workq; + + static DEFINE_IDA(iscsi_sess_ida); +@@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) + int err; + + ihost = shost->shost_data; +- session->sid = atomic_add_return(1, &iscsi_session_nr); ++ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr); + + if (target_id == ISCSI_MAX_TARGET) { + id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL); +@@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void) + printk(KERN_INFO "Loading iSCSI transport class v%s.\n", + ISCSI_TRANSPORT_VERSION); + +- atomic_set(&iscsi_session_nr, 0); ++ atomic_set_unchecked(&iscsi_session_nr, 0); + + err = class_register(&iscsi_transport_class); + if (err) +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c +index 21a045e..ec89e03 100644 +--- a/drivers/scsi/scsi_transport_srp.c ++++ b/drivers/scsi/scsi_transport_srp.c +@@ -33,7 +33,7 @@ + #include "scsi_transport_srp_internal.h" + + struct srp_host_attrs { +- atomic_t next_port_id; ++ atomic_unchecked_t next_port_id; + }; + #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) + +@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev, + struct Scsi_Host *shost = dev_to_shost(dev); + struct srp_host_attrs *srp_host = to_srp_host_attrs(shost); + +- atomic_set(&srp_host->next_port_id, 0); ++ atomic_set_unchecked(&srp_host->next_port_id, 0); + return 0; + } + +@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, + memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); + rport->roles = ids->roles; + +- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); ++ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id); + dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); + + transport_setup_device(&rport->dev); +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 441a1c5..07cece7 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + sdp->disk->disk_name, + MKDEV(SCSI_GENERIC_MAJOR, sdp->index), + NULL, +- (char *)arg); ++ (char __user *)arg); + case BLKTRACESTART: + return blk_trace_startstop(sdp->device->request_queue, 1); + case BLKTRACESTOP: +@@ -2312,7 +2312,7 @@ struct sg_proc_leaf { + const struct file_operations * fops; + }; + +-static struct sg_proc_leaf sg_proc_leaf_arr[] = { ++static const struct sg_proc_leaf sg_proc_leaf_arr[] = { + {"allow_dio", &adio_fops}, + {"debug", &debug_fops}, + {"def_reserved_size", &dressz_fops}, +@@ -2327,7 +2327,7 @@ sg_proc_init(void) + { + int k, mask; + int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); +- struct sg_proc_leaf * leaf; ++ const struct sg_proc_leaf * leaf; + + sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); + if (!sg_proc_sgp) +diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c +index f64250e..1ee3049 100644 +--- a/drivers/spi/spi-dw-pci.c ++++ b/drivers/spi/spi-dw-pci.c +@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev) + #define spi_resume NULL + #endif + +-static const struct pci_device_id pci_ids[] __devinitdata = { ++static const struct pci_device_id pci_ids[] __devinitconst = { + /* Intel MID platform SPI controller 0 */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, + {}, +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index 77eae99..b7cdcc9 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master) + EXPORT_SYMBOL_GPL(spi_bus_unlock); + + /* portable code must never pass more than 32 bytes */ +-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) ++#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES) + + static u8 *buf; + +diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c +index 436fe97..4082570 100644 +--- a/drivers/staging/gma500/power.c ++++ b/drivers/staging/gma500/power.c +@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on) + ret = gma_resume_pci(dev->pdev); + if (ret == 0) { + /* FIXME: we want to defer this for Medfield/Oaktrail */ +- gma_resume_display(dev); ++ gma_resume_display(dev->pdev); + psb_irq_preinstall(dev); + psb_irq_postinstall(dev); + pm_runtime_get(&dev->pdev->dev); +diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c +index bafccb3..e3ac78d 100644 +--- a/drivers/staging/hv/rndis_filter.c ++++ b/drivers/staging/hv/rndis_filter.c +@@ -42,7 +42,7 @@ struct rndis_device { + + enum rndis_device_state state; + bool link_state; +- atomic_t new_req_id; ++ atomic_unchecked_t new_req_id; + + spinlock_t request_lock; + struct list_head req_list; +@@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev, + * template + */ + set = &rndis_msg->msg.set_req; +- set->req_id = atomic_inc_return(&dev->new_req_id); ++ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id); + + /* Add to the request list */ + spin_lock_irqsave(&dev->request_lock, flags); +@@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev) + + /* Setup the rndis set */ + halt = &request->request_msg.msg.halt_req; +- halt->req_id = atomic_inc_return(&dev->new_req_id); ++ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id); + + /* Ignore return since this msg is optional. */ + rndis_filter_send_request(dev, request); +diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h +index 9e8f010..af9efb56 100644 +--- a/drivers/staging/iio/buffer_generic.h ++++ b/drivers/staging/iio/buffer_generic.h +@@ -64,7 +64,7 @@ struct iio_buffer_access_funcs { + + int (*is_enabled)(struct iio_buffer *buffer); + int (*enable)(struct iio_buffer *buffer); +-}; ++} __no_const; + + /** + * struct iio_buffer_setup_ops - buffer setup related callbacks +diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c +index 8b307b4..a97ac91 100644 +--- a/drivers/staging/octeon/ethernet-rx.c ++++ b/drivers/staging/octeon/ethernet-rx.c +@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) + /* Increment RX stats for virtual ports */ + if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { + #ifdef CONFIG_64BIT +- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); +- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); ++ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets); ++ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes); + #else +- atomic_add(1, (atomic_t *)&priv->stats.rx_packets); +- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); ++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets); ++ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes); + #endif + } + netif_receive_skb(skb); +@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) + dev->name); + */ + #ifdef CONFIG_64BIT +- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); ++ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped); + #else +- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); ++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped); + #endif + dev_kfree_skb_irq(skb); + } +diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c +index 076f866..2308070 100644 +--- a/drivers/staging/octeon/ethernet.c ++++ b/drivers/staging/octeon/ethernet.c +@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) + * since the RX tasklet also increments it. + */ + #ifdef CONFIG_64BIT +- atomic64_add(rx_status.dropped_packets, +- (atomic64_t *)&priv->stats.rx_dropped); ++ atomic64_add_unchecked(rx_status.dropped_packets, ++ (atomic64_unchecked_t *)&priv->stats.rx_dropped); + #else +- atomic_add(rx_status.dropped_packets, +- (atomic_t *)&priv->stats.rx_dropped); ++ atomic_add_unchecked(rx_status.dropped_packets, ++ (atomic_unchecked_t *)&priv->stats.rx_dropped); + #endif + } + +diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c +index 7a19555..466456d 100644 +--- a/drivers/staging/pohmelfs/inode.c ++++ b/drivers/staging/pohmelfs/inode.c +@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) + mutex_init(&psb->mcache_lock); + psb->mcache_root = RB_ROOT; + psb->mcache_timeout = msecs_to_jiffies(5000); +- atomic_long_set(&psb->mcache_gen, 0); ++ atomic_long_set_unchecked(&psb->mcache_gen, 0); + + psb->trans_max_pages = 100; + +@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) + INIT_LIST_HEAD(&psb->crypto_ready_list); + INIT_LIST_HEAD(&psb->crypto_active_list); + +- atomic_set(&psb->trans_gen, 1); ++ atomic_set_unchecked(&psb->trans_gen, 1); + atomic_long_set(&psb->total_inodes, 0); + + mutex_init(&psb->state_lock); +diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c +index e22665c..a2a9390 100644 +--- a/drivers/staging/pohmelfs/mcache.c ++++ b/drivers/staging/pohmelfs/mcache.c +@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start + m->data = data; + m->start = start; + m->size = size; +- m->gen = atomic_long_inc_return(&psb->mcache_gen); ++ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen); + + mutex_lock(&psb->mcache_lock); + err = pohmelfs_mcache_insert(psb, m); +diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h +index 985b6b7..7699e05 100644 +--- a/drivers/staging/pohmelfs/netfs.h ++++ b/drivers/staging/pohmelfs/netfs.h +@@ -571,14 +571,14 @@ struct pohmelfs_config; + struct pohmelfs_sb { + struct rb_root mcache_root; + struct mutex mcache_lock; +- atomic_long_t mcache_gen; ++ atomic_long_unchecked_t mcache_gen; + unsigned long mcache_timeout; + + unsigned int idx; + + unsigned int trans_retries; + +- atomic_t trans_gen; ++ atomic_unchecked_t trans_gen; + + unsigned int crypto_attached_size; + unsigned int crypto_align_size; +diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c +index 06c1a74..866eebc 100644 +--- a/drivers/staging/pohmelfs/trans.c ++++ b/drivers/staging/pohmelfs/trans.c +@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb) + int err; + struct netfs_cmd *cmd = t->iovec.iov_base; + +- t->gen = atomic_inc_return(&psb->trans_gen); ++ t->gen = atomic_inc_return_unchecked(&psb->trans_gen); + + cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) + + t->attached_size + t->attached_pages * sizeof(struct netfs_cmd); +diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c +index c36a140..dd27fda 100644 +--- a/drivers/staging/rtl8192e/rtllib_module.c ++++ b/drivers/staging/rtl8192e/rtllib_module.c +@@ -228,6 +228,8 @@ static int show_debug_level(char *page, char **start, off_t offset, + } + + static int store_debug_level(struct file *file, const char __user *buffer, ++ unsigned long count, void *data) __size_overflow(3); ++static int store_debug_level(struct file *file, const char __user *buffer, + unsigned long count, void *data) + { + char buf[] = "0x00000000"; +diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c +index e3d47bc..85f4d0d 100644 +--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c ++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c +@@ -250,6 +250,8 @@ static int show_debug_level(char *page, char **start, off_t offset, + } + + static int store_debug_level(struct file *file, const char *buffer, ++ unsigned long count, void *data) __size_overflow(3); ++static int store_debug_level(struct file *file, const char *buffer, + unsigned long count, void *data) + { + char buf[] = "0x00000000"; +diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h +index 86308a0..feaa925 100644 +--- a/drivers/staging/rtl8712/rtl871x_io.h ++++ b/drivers/staging/rtl8712/rtl871x_io.h +@@ -108,7 +108,7 @@ struct _io_ops { + u8 *pmem); + u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, + u8 *pmem); +-}; ++} __no_const; + + struct io_req { + struct list_head list; +diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c +index c7b5e8b..783d6cb 100644 +--- a/drivers/staging/sbe-2t3e3/netdev.c ++++ b/drivers/staging/sbe-2t3e3/netdev.c +@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + t3e3_if_config(sc, cmd_2t3e3, (char *)¶m, &resp, &rlen); + + if (rlen) +- if (copy_to_user(data, &resp, rlen)) ++ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen)) + return -EFAULT; + + return 0; +diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h +index be21617..0954e45 100644 +--- a/drivers/staging/usbip/usbip_common.h ++++ b/drivers/staging/usbip/usbip_common.h +@@ -289,7 +289,7 @@ struct usbip_device { + void (*shutdown)(struct usbip_device *); + void (*reset)(struct usbip_device *); + void (*unusable)(struct usbip_device *); +- } eh_ops; ++ } __no_const eh_ops; + }; + + #if 0 +diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h +index 88b3298..3783eee 100644 +--- a/drivers/staging/usbip/vhci.h ++++ b/drivers/staging/usbip/vhci.h +@@ -88,7 +88,7 @@ struct vhci_hcd { + unsigned resuming:1; + unsigned long re_timeout; + +- atomic_t seqnum; ++ atomic_unchecked_t seqnum; + + /* + * NOTE: +diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c +index 2ee97e2..0420b86 100644 +--- a/drivers/staging/usbip/vhci_hcd.c ++++ b/drivers/staging/usbip/vhci_hcd.c +@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb) + return; + } + +- priv->seqnum = atomic_inc_return(&the_controller->seqnum); ++ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum); + if (priv->seqnum == 0xffff) + dev_info(&urb->dev->dev, "seqnum max\n"); + +@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) + return -ENOMEM; + } + +- unlink->seqnum = atomic_inc_return(&the_controller->seqnum); ++ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum); + if (unlink->seqnum == 0xffff) + pr_info("seqnum max\n"); + +@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd) + vdev->rhport = rhport; + } + +- atomic_set(&vhci->seqnum, 0); ++ atomic_set_unchecked(&vhci->seqnum, 0); + spin_lock_init(&vhci->lock); + + hcd->power_budget = 0; /* no limit */ +diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c +index 3872b8c..fe6d2f4 100644 +--- a/drivers/staging/usbip/vhci_rx.c ++++ b/drivers/staging/usbip/vhci_rx.c +@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, + if (!urb) { + pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); + pr_info("max seqnum %d\n", +- atomic_read(&the_controller->seqnum)); ++ atomic_read_unchecked(&the_controller->seqnum)); + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); + return; + } +diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c +index 7735027..30eed13 100644 +--- a/drivers/staging/vt6655/hostap.c ++++ b/drivers/staging/vt6655/hostap.c +@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO; + * + */ + ++static net_device_ops_no_const apdev_netdev_ops; ++ + static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) + { + PSDevice apdev_priv; + struct net_device *dev = pDevice->dev; + int ret; +- const struct net_device_ops apdev_netdev_ops = { +- .ndo_start_xmit = pDevice->tx_80211, +- }; + + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name); + +@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) + *apdev_priv = *pDevice; + memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN); + ++ /* only half broken now */ ++ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211; + pDevice->apdev->netdev_ops = &apdev_netdev_ops; + + pDevice->apdev->type = ARPHRD_IEEE80211; +diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c +index 51b5adf..098e320 100644 +--- a/drivers/staging/vt6656/hostap.c ++++ b/drivers/staging/vt6656/hostap.c +@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO; + * + */ + ++static net_device_ops_no_const apdev_netdev_ops; ++ + static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) + { + PSDevice apdev_priv; + struct net_device *dev = pDevice->dev; + int ret; +- const struct net_device_ops apdev_netdev_ops = { +- .ndo_start_xmit = pDevice->tx_80211, +- }; + + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name); + +@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) + *apdev_priv = *pDevice; + memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN); + ++ /* only half broken now */ ++ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211; + pDevice->apdev->netdev_ops = &apdev_netdev_ops; + + pDevice->apdev->type = ARPHRD_IEEE80211; +diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c +index 7843dfd..3db105f 100644 +--- a/drivers/staging/wlan-ng/hfa384x_usb.c ++++ b/drivers/staging/wlan-ng/hfa384x_usb.c +@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx); + + struct usbctlx_completor { + int (*complete) (struct usbctlx_completor *); +-}; ++} __no_const; + + static int + hfa384x_usbctlx_complete_sync(hfa384x_t *hw, +diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c +index 1ca66ea..76f1343 100644 +--- a/drivers/staging/zcache/tmem.c ++++ b/drivers/staging/zcache/tmem.c +@@ -39,7 +39,7 @@ + * A tmem host implementation must use this function to register callbacks + * for memory allocation. + */ +-static struct tmem_hostops tmem_hostops; ++static tmem_hostops_no_const tmem_hostops; + + static void tmem_objnode_tree_init(void); + +@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m) + * A tmem host implementation must use this function to register + * callbacks for a page-accessible memory (PAM) implementation + */ +-static struct tmem_pamops tmem_pamops; ++static tmem_pamops_no_const tmem_pamops; + + void tmem_register_pamops(struct tmem_pamops *m) + { +diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h +index ed147c4..94fc3c6 100644 +--- a/drivers/staging/zcache/tmem.h ++++ b/drivers/staging/zcache/tmem.h +@@ -180,6 +180,7 @@ struct tmem_pamops { + void (*new_obj)(struct tmem_obj *); + int (*replace_in_obj)(void *, struct tmem_obj *); + }; ++typedef struct tmem_pamops __no_const tmem_pamops_no_const; + extern void tmem_register_pamops(struct tmem_pamops *m); + + /* memory allocation methods provided by the host implementation */ +@@ -189,6 +190,7 @@ struct tmem_hostops { + struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *); + void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *); + }; ++typedef struct tmem_hostops __no_const tmem_hostops_no_const; + extern void tmem_register_hostops(struct tmem_hostops *m); + + /* core tmem accessor functions */ +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 03d3528..6bbe82f 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) + * outstanding_r2ts reaches zero, go ahead and send the delayed + * TASK_ABORTED status. + */ +- if (atomic_read(&se_cmd->t_transport_aborted) != 0) { ++ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) { + if (hdr->flags & ISCSI_FLAG_CMD_FINAL) + if (--cmd->outstanding_r2ts < 1) { + iscsit_stop_dataout_timer(cmd); +diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c +index 6845228..df77141 100644 +--- a/drivers/target/target_core_tmr.c ++++ b/drivers/target/target_core_tmr.c +@@ -250,7 +250,7 @@ static void core_tmr_drain_task_list( + cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, + cmd->t_task_list_num, + atomic_read(&cmd->t_task_cdbs_left), +- atomic_read(&cmd->t_task_cdbs_sent), ++ atomic_read_unchecked(&cmd->t_task_cdbs_sent), + atomic_read(&cmd->t_transport_active), + atomic_read(&cmd->t_transport_stop), + atomic_read(&cmd->t_transport_sent)); +@@ -281,7 +281,7 @@ static void core_tmr_drain_task_list( + pr_debug("LUN_RESET: got t_transport_active = 1 for" + " task: %p, t_fe_count: %d dev: %p\n", task, + fe_count, dev); +- atomic_set(&cmd->t_transport_aborted, 1); ++ atomic_set_unchecked(&cmd->t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); +@@ -289,7 +289,7 @@ static void core_tmr_drain_task_list( + } + pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," + " t_fe_count: %d dev: %p\n", task, fe_count, dev); +- atomic_set(&cmd->t_transport_aborted, 1); ++ atomic_set_unchecked(&cmd->t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index cdb774b..8753593 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba( + + dev->queue_depth = dev_limits->queue_depth; + atomic_set(&dev->depth_left, dev->queue_depth); +- atomic_set(&dev->dev_ordered_id, 0); ++ atomic_set_unchecked(&dev->dev_ordered_id, 0); + + se_dev_set_default_attribs(dev, dev_limits); + +@@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) + * Used to determine when ORDERED commands should go from + * Dormant to Active status. + */ +- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); ++ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id); + smp_mb__after_atomic_inc(); + pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", + cmd->se_ordered_id, cmd->sam_task_attr, +@@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd) + " t_transport_active: %d t_transport_stop: %d" + " t_transport_sent: %d\n", cmd->t_task_list_num, + atomic_read(&cmd->t_task_cdbs_left), +- atomic_read(&cmd->t_task_cdbs_sent), ++ atomic_read_unchecked(&cmd->t_task_cdbs_sent), + atomic_read(&cmd->t_task_cdbs_ex_left), + atomic_read(&cmd->t_transport_active), + atomic_read(&cmd->t_transport_stop), +@@ -2089,9 +2089,9 @@ check_depth: + + spin_lock_irqsave(&cmd->t_state_lock, flags); + task->task_flags |= (TF_ACTIVE | TF_SENT); +- atomic_inc(&cmd->t_task_cdbs_sent); ++ atomic_inc_unchecked(&cmd->t_task_cdbs_sent); + +- if (atomic_read(&cmd->t_task_cdbs_sent) == ++ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) == + cmd->t_task_list_num) + atomic_set(&cmd->t_transport_sent, 1); + +@@ -4297,7 +4297,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) + atomic_set(&cmd->transport_lun_stop, 0); + } + if (!atomic_read(&cmd->t_transport_active) || +- atomic_read(&cmd->t_transport_aborted)) { ++ atomic_read_unchecked(&cmd->t_transport_aborted)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return false; + } +@@ -4546,7 +4546,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) + { + int ret = 0; + +- if (atomic_read(&cmd->t_transport_aborted) != 0) { ++ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) { + if (!send_status || + (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) + return 1; +@@ -4583,7 +4583,7 @@ void transport_send_task_abort(struct se_cmd *cmd) + */ + if (cmd->data_direction == DMA_TO_DEVICE) { + if (cmd->se_tfo->write_pending_status(cmd) != 0) { +- atomic_inc(&cmd->t_transport_aborted); ++ atomic_inc_unchecked(&cmd->t_transport_aborted); + smp_mb__after_atomic_inc(); + } + } +diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c +index b9040be..e3f5aab 100644 +--- a/drivers/tty/hvc/hvcs.c ++++ b/drivers/tty/hvc/hvcs.c +@@ -83,6 +83,7 @@ + #include <asm/hvcserver.h> + #include <asm/uaccess.h> + #include <asm/vio.h> ++#include <asm/local.h> + + /* + * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00). +@@ -270,7 +271,7 @@ struct hvcs_struct { + unsigned int index; + + struct tty_struct *tty; +- int open_count; ++ local_t open_count; + + /* + * Used to tell the driver kernel_thread what operations need to take +@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut + + spin_lock_irqsave(&hvcsd->lock, flags); + +- if (hvcsd->open_count > 0) { ++ if (local_read(&hvcsd->open_count) > 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + printk(KERN_INFO "HVCS: vterm state unchanged. " + "The hvcs device node is still in use.\n"); +@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp) + if ((retval = hvcs_partner_connect(hvcsd))) + goto error_release; + +- hvcsd->open_count = 1; ++ local_set(&hvcsd->open_count, 1); + hvcsd->tty = tty; + tty->driver_data = hvcsd; + +@@ -1179,7 +1180,7 @@ fast_open: + + spin_lock_irqsave(&hvcsd->lock, flags); + kref_get(&hvcsd->kref); +- hvcsd->open_count++; ++ local_inc(&hvcsd->open_count); + hvcsd->todo_mask |= HVCS_SCHED_READ; + spin_unlock_irqrestore(&hvcsd->lock, flags); + +@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) + hvcsd = tty->driver_data; + + spin_lock_irqsave(&hvcsd->lock, flags); +- if (--hvcsd->open_count == 0) { ++ if (local_dec_and_test(&hvcsd->open_count)) { + + vio_disable_interrupts(hvcsd->vdev); + +@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) + free_irq(irq, hvcsd); + kref_put(&hvcsd->kref, destroy_hvcs_struct); + return; +- } else if (hvcsd->open_count < 0) { ++ } else if (local_read(&hvcsd->open_count) < 0) { + printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" + " is missmanaged.\n", +- hvcsd->vdev->unit_address, hvcsd->open_count); ++ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count)); + } + + spin_unlock_irqrestore(&hvcsd->lock, flags); +@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty) + + spin_lock_irqsave(&hvcsd->lock, flags); + /* Preserve this so that we know how many kref refs to put */ +- temp_open_count = hvcsd->open_count; ++ temp_open_count = local_read(&hvcsd->open_count); + + /* + * Don't kref put inside the spinlock because the destruction +@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty) + hvcsd->tty->driver_data = NULL; + hvcsd->tty = NULL; + +- hvcsd->open_count = 0; ++ local_set(&hvcsd->open_count, 0); + + /* This will drop any buffered data on the floor which is OK in a hangup + * scenario. */ +@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty, + * the middle of a write operation? This is a crummy place to do this + * but we want to keep it all in the spinlock. + */ +- if (hvcsd->open_count <= 0) { ++ if (local_read(&hvcsd->open_count) <= 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + return -ENODEV; + } +@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty) + { + struct hvcs_struct *hvcsd = tty->driver_data; + +- if (!hvcsd || hvcsd->open_count <= 0) ++ if (!hvcsd || local_read(&hvcsd->open_count) <= 0) + return 0; + + return HVCS_BUFF_LEN - hvcsd->chars_in_buffer; +diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c +index ef92869..f4ebd88 100644 +--- a/drivers/tty/ipwireless/tty.c ++++ b/drivers/tty/ipwireless/tty.c +@@ -29,6 +29,7 @@ + #include <linux/tty_driver.h> + #include <linux/tty_flip.h> + #include <linux/uaccess.h> ++#include <asm/local.h> + + #include "tty.h" + #include "network.h" +@@ -51,7 +52,7 @@ struct ipw_tty { + int tty_type; + struct ipw_network *network; + struct tty_struct *linux_tty; +- int open_count; ++ local_t open_count; + unsigned int control_lines; + struct mutex ipw_tty_mutex; + int tx_bytes_queued; +@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp) + mutex_unlock(&tty->ipw_tty_mutex); + return -ENODEV; + } +- if (tty->open_count == 0) ++ if (local_read(&tty->open_count) == 0) + tty->tx_bytes_queued = 0; + +- tty->open_count++; ++ local_inc(&tty->open_count); + + tty->linux_tty = linux_tty; + linux_tty->driver_data = tty; +@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp) + + static void do_ipw_close(struct ipw_tty *tty) + { +- tty->open_count--; +- +- if (tty->open_count == 0) { ++ if (local_dec_return(&tty->open_count) == 0) { + struct tty_struct *linux_tty = tty->linux_tty; + + if (linux_tty != NULL) { +@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty) + return; + + mutex_lock(&tty->ipw_tty_mutex); +- if (tty->open_count == 0) { ++ if (local_read(&tty->open_count) == 0) { + mutex_unlock(&tty->ipw_tty_mutex); + return; + } +@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, + return; + } + +- if (!tty->open_count) { ++ if (!local_read(&tty->open_count)) { + mutex_unlock(&tty->ipw_tty_mutex); + return; + } +@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty, + return -ENODEV; + + mutex_lock(&tty->ipw_tty_mutex); +- if (!tty->open_count) { ++ if (!local_read(&tty->open_count)) { + mutex_unlock(&tty->ipw_tty_mutex); + return -EINVAL; + } +@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty) + if (!tty) + return -ENODEV; + +- if (!tty->open_count) ++ if (!local_read(&tty->open_count)) + return -EINVAL; + + room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued; +@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty) + if (!tty) + return 0; + +- if (!tty->open_count) ++ if (!local_read(&tty->open_count)) + return 0; + + return tty->tx_bytes_queued; +@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty) + if (!tty) + return -ENODEV; + +- if (!tty->open_count) ++ if (!local_read(&tty->open_count)) + return -EINVAL; + + return get_control_lines(tty); +@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, + if (!tty) + return -ENODEV; + +- if (!tty->open_count) ++ if (!local_read(&tty->open_count)) + return -EINVAL; + + return set_control_lines(tty, set, clear); +@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, + if (!tty) + return -ENODEV; + +- if (!tty->open_count) ++ if (!local_read(&tty->open_count)) + return -EINVAL; + + /* FIXME: Exactly how is the tty object locked here .. */ +@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty) + against a parallel ioctl etc */ + mutex_lock(&ttyj->ipw_tty_mutex); + } +- while (ttyj->open_count) ++ while (local_read(&ttyj->open_count)) + do_ipw_close(ttyj); + ipwireless_disassociate_network_ttys(network, + ttyj->channel_idx); +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c +index fc7bbba..9527e93 100644 +--- a/drivers/tty/n_gsm.c ++++ b/drivers/tty/n_gsm.c +@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) + kref_init(&dlci->ref); + mutex_init(&dlci->mutex); + dlci->fifo = &dlci->_fifo; +- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) { ++ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) { + kfree(dlci); + return NULL; + } +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c +index 39d6ab6..eb97f41 100644 +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) + { + *ops = tty_ldisc_N_TTY; + ops->owner = NULL; +- ops->refcount = ops->flags = 0; ++ atomic_set(&ops->refcount, 0); ++ ops->flags = 0; + } + EXPORT_SYMBOL_GPL(n_tty_inherit_ops); +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index e18604b..a7d5a11 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void) + register_sysctl_table(pty_root_table); + + /* Now create the /dev/ptmx special device */ ++ pax_open_kernel(); + tty_default_fops(&ptmx_fops); +- ptmx_fops.open = ptmx_open; ++ *(void **)&ptmx_fops.open = ptmx_open; ++ pax_close_kernel(); + + cdev_init(&ptmx_cdev, &ptmx_fops); + if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) || +diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c +index 2b42a01..32a2ed3 100644 +--- a/drivers/tty/serial/kgdboc.c ++++ b/drivers/tty/serial/kgdboc.c +@@ -24,8 +24,9 @@ + #define MAX_CONFIG_LEN 40 + + static struct kgdb_io kgdboc_io_ops; ++static struct kgdb_io kgdboc_io_ops_console; + +-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ ++/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */ + static int configured = -1; + + static char config[MAX_CONFIG_LEN]; +@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void) + kgdboc_unregister_kbd(); + if (configured == 1) + kgdb_unregister_io_module(&kgdboc_io_ops); ++ else if (configured == 2) ++ kgdb_unregister_io_module(&kgdboc_io_ops_console); + } + + static int configure_kgdboc(void) +@@ -157,13 +160,13 @@ static int configure_kgdboc(void) + int err; + char *cptr = config; + struct console *cons; ++ int is_console = 0; + + err = kgdboc_option_setup(config); + if (err || !strlen(config) || isspace(config[0])) + goto noconfig; + + err = -ENODEV; +- kgdboc_io_ops.is_console = 0; + kgdb_tty_driver = NULL; + + kgdboc_use_kms = 0; +@@ -184,7 +187,7 @@ static int configure_kgdboc(void) + int idx; + if (cons->device && cons->device(cons, &idx) == p && + idx == tty_line) { +- kgdboc_io_ops.is_console = 1; ++ is_console = 1; + break; + } + cons = cons->next; +@@ -194,12 +197,16 @@ static int configure_kgdboc(void) + kgdb_tty_line = tty_line; + + do_register: +- err = kgdb_register_io_module(&kgdboc_io_ops); ++ if (is_console) { ++ err = kgdb_register_io_module(&kgdboc_io_ops_console); ++ configured = 2; ++ } else { ++ err = kgdb_register_io_module(&kgdboc_io_ops); ++ configured = 1; ++ } + if (err) + goto noconfig; + +- configured = 1; +- + return 0; + + noconfig: +@@ -213,7 +220,7 @@ noconfig: + static int __init init_kgdboc(void) + { + /* Already configured? */ +- if (configured == 1) ++ if (configured >= 1) + return 0; + + return configure_kgdboc(); +@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp) + if (config[len - 1] == '\n') + config[len - 1] = '\0'; + +- if (configured == 1) ++ if (configured >= 1) + cleanup_kgdboc(); + + /* Go and configure with the new params. */ +@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = { + .post_exception = kgdboc_post_exp_handler, + }; + ++static struct kgdb_io kgdboc_io_ops_console = { ++ .name = "kgdboc", ++ .read_char = kgdboc_get_char, ++ .write_char = kgdboc_put_char, ++ .pre_exception = kgdboc_pre_exp_handler, ++ .post_exception = kgdboc_post_exp_handler, ++ .is_console = 1 ++}; ++ + #ifdef CONFIG_KGDB_SERIAL_CONSOLE + /* This is only available if kgdboc is a built in for early debugging */ + static int __init kgdboc_early_init(char *opt) +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c +index 05085be..67eadb0 100644 +--- a/drivers/tty/tty_io.c ++++ b/drivers/tty/tty_io.c +@@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty); + + void tty_default_fops(struct file_operations *fops) + { +- *fops = tty_fops; ++ memcpy((void *)fops, &tty_fops, sizeof(tty_fops)); + } + + /* +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c +index 8e0924f..4204eb4 100644 +--- a/drivers/tty/tty_ldisc.c ++++ b/drivers/tty/tty_ldisc.c +@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld) + if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) { + struct tty_ldisc_ops *ldo = ld->ops; + +- ldo->refcount--; ++ atomic_dec(&ldo->refcount); + module_put(ldo->owner); + spin_unlock_irqrestore(&tty_ldisc_lock, flags); + +@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc) + spin_lock_irqsave(&tty_ldisc_lock, flags); + tty_ldiscs[disc] = new_ldisc; + new_ldisc->num = disc; +- new_ldisc->refcount = 0; ++ atomic_set(&new_ldisc->refcount, 0); + spin_unlock_irqrestore(&tty_ldisc_lock, flags); + + return ret; +@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc) + return -EINVAL; + + spin_lock_irqsave(&tty_ldisc_lock, flags); +- if (tty_ldiscs[disc]->refcount) ++ if (atomic_read(&tty_ldiscs[disc]->refcount)) + ret = -EBUSY; + else + tty_ldiscs[disc] = NULL; +@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc) + if (ldops) { + ret = ERR_PTR(-EAGAIN); + if (try_module_get(ldops->owner)) { +- ldops->refcount++; ++ atomic_inc(&ldops->refcount); + ret = ldops; + } + } +@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops) + unsigned long flags; + + spin_lock_irqsave(&tty_ldisc_lock, flags); +- ldops->refcount--; ++ atomic_dec(&ldops->refcount); + module_put(ldops->owner); + spin_unlock_irqrestore(&tty_ldisc_lock, flags); + } +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c +index a605549..6bd3c96 100644 +--- a/drivers/tty/vt/keyboard.c ++++ b/drivers/tty/vt/keyboard.c +@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag) + kbd->kbdmode == VC_OFF) && + value != KVAL(K_SAK)) + return; /* SAK is allowed even in raw mode */ ++ ++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP) ++ { ++ void *func = fn_handler[value]; ++ if (func == fn_show_state || func == fn_show_ptregs || ++ func == fn_show_mem) ++ return; ++ } ++#endif ++ + fn_handler[value](vc); + } + +diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c +index 65447c5..0526f0a 100644 +--- a/drivers/tty/vt/vt_ioctl.c ++++ b/drivers/tty/vt/vt_ioctl.c +@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str + if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) + return -EFAULT; + +- if (!capable(CAP_SYS_TTY_CONFIG)) +- perm = 0; +- + switch (cmd) { + case KDGKBENT: + key_map = key_maps[s]; +@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str + val = (i ? K_HOLE : K_NOSUCHMAP); + return put_user(val, &user_kbe->kb_value); + case KDSKBENT: ++ if (!capable(CAP_SYS_TTY_CONFIG)) ++ perm = 0; ++ + if (!perm) + return -EPERM; + if (!i && v == K_NOSUCHMAP) { +@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) + int i, j, k; + int ret; + +- if (!capable(CAP_SYS_TTY_CONFIG)) +- perm = 0; +- + kbs = kmalloc(sizeof(*kbs), GFP_KERNEL); + if (!kbs) { + ret = -ENOMEM; +@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) + kfree(kbs); + return ((p && *p) ? -EOVERFLOW : 0); + case KDSKBSENT: ++ if (!capable(CAP_SYS_TTY_CONFIG)) ++ perm = 0; ++ + if (!perm) { + ret = -EPERM; + goto reterr; +diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c +index a783d53..cb30d94 100644 +--- a/drivers/uio/uio.c ++++ b/drivers/uio/uio.c +@@ -25,6 +25,7 @@ + #include <linux/kobject.h> + #include <linux/cdev.h> + #include <linux/uio_driver.h> ++#include <asm/local.h> + + #define UIO_MAX_DEVICES (1U << MINORBITS) + +@@ -32,10 +33,10 @@ struct uio_device { + struct module *owner; + struct device *dev; + int minor; +- atomic_t event; ++ atomic_unchecked_t event; + struct fasync_struct *async_queue; + wait_queue_head_t wait; +- int vma_count; ++ local_t vma_count; + struct uio_info *info; + struct kobject *map_dir; + struct kobject *portio_dir; +@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev, + struct device_attribute *attr, char *buf) + { + struct uio_device *idev = dev_get_drvdata(dev); +- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); ++ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event)); + } + + static struct device_attribute uio_class_attributes[] = { +@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info) + { + struct uio_device *idev = info->uio_dev; + +- atomic_inc(&idev->event); ++ atomic_inc_unchecked(&idev->event); + wake_up_interruptible(&idev->wait); + kill_fasync(&idev->async_queue, SIGIO, POLL_IN); + } +@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep) + } + + listener->dev = idev; +- listener->event_count = atomic_read(&idev->event); ++ listener->event_count = atomic_read_unchecked(&idev->event); + filep->private_data = listener; + + if (idev->info->open) { +@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait) + return -EIO; + + poll_wait(filep, &idev->wait, wait); +- if (listener->event_count != atomic_read(&idev->event)) ++ if (listener->event_count != atomic_read_unchecked(&idev->event)) + return POLLIN | POLLRDNORM; + return 0; + } +@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf, + do { + set_current_state(TASK_INTERRUPTIBLE); + +- event_count = atomic_read(&idev->event); ++ event_count = atomic_read_unchecked(&idev->event); + if (event_count != listener->event_count) { + if (copy_to_user(buf, &event_count, count)) + retval = -EFAULT; +@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma) + static void uio_vma_open(struct vm_area_struct *vma) + { + struct uio_device *idev = vma->vm_private_data; +- idev->vma_count++; ++ local_inc(&idev->vma_count); + } + + static void uio_vma_close(struct vm_area_struct *vma) + { + struct uio_device *idev = vma->vm_private_data; +- idev->vma_count--; ++ local_dec(&idev->vma_count); + } + + static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner, + idev->owner = owner; + idev->info = info; + init_waitqueue_head(&idev->wait); +- atomic_set(&idev->event, 0); ++ atomic_set_unchecked(&idev->event, 0); + + ret = uio_get_minor(idev); + if (ret) +diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c +index a845f8b..4f54072 100644 +--- a/drivers/usb/atm/cxacru.c ++++ b/drivers/usb/atm/cxacru.c +@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev, + ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp); + if (ret < 2) + return -EINVAL; +- if (index < 0 || index > 0x7f) ++ if (index > 0x7f) + return -EINVAL; + pos += tmp; + +diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c +index d3448ca..d2864ca 100644 +--- a/drivers/usb/atm/usbatm.c ++++ b/drivers/usb/atm/usbatm.c +@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (printk_ratelimit()) + atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n", + __func__, vpi, vci); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + +@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (length > ATM_MAX_AAL5_PDU) { + atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n", + __func__, length, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + +@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (sarb->len < pdu_length) { + atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n", + __func__, pdu_length, sarb->len, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + + if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { + atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", + __func__, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + +@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (printk_ratelimit()) + atm_err(instance, "%s: no memory for skb (length: %u)!\n", + __func__, length); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + goto out; + } + +@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + + vcc->push(vcc, skb); + +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + out: + skb_trim(sarb, 0); + } +@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data) + struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc; + + usbatm_pop(vcc, skb); +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + skb = skb_dequeue(&instance->sndqueue); + } +@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag + if (!left--) + return sprintf(page, + "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n", +- atomic_read(&atm_dev->stats.aal5.tx), +- atomic_read(&atm_dev->stats.aal5.tx_err), +- atomic_read(&atm_dev->stats.aal5.rx), +- atomic_read(&atm_dev->stats.aal5.rx_err), +- atomic_read(&atm_dev->stats.aal5.rx_drop)); ++ atomic_read_unchecked(&atm_dev->stats.aal5.tx), ++ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop)); + + if (!left--) { + if (instance->disconnected) +diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c +index d956965..4179a77 100644 +--- a/drivers/usb/core/devices.c ++++ b/drivers/usb/core/devices.c +@@ -126,7 +126,7 @@ static const char format_endpt[] = + * time it gets called. + */ + static struct device_connect_event { +- atomic_t count; ++ atomic_unchecked_t count; + wait_queue_head_t wait; + } device_event = { + .count = ATOMIC_INIT(1), +@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = { + + void usbfs_conn_disc_event(void) + { +- atomic_add(2, &device_event.count); ++ atomic_add_unchecked(2, &device_event.count); + wake_up(&device_event.wait); + } + +@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file, + + poll_wait(file, &device_event.wait, wait); + +- event_count = atomic_read(&device_event.count); ++ event_count = atomic_read_unchecked(&device_event.count); + if (file->f_version != event_count) { + file->f_version = event_count; + return POLLIN | POLLRDNORM; +diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c +index 1fc8f12..20647c1 100644 +--- a/drivers/usb/early/ehci-dbgp.c ++++ b/drivers/usb/early/ehci-dbgp.c +@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len) + + #ifdef CONFIG_KGDB + static struct kgdb_io kgdbdbgp_io_ops; +-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops) ++static struct kgdb_io kgdbdbgp_io_ops_console; ++#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console) + #else + #define dbgp_kgdb_mode (0) + #endif +@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = { + .write_char = kgdbdbgp_write_char, + }; + ++static struct kgdb_io kgdbdbgp_io_ops_console = { ++ .name = "kgdbdbgp", ++ .read_char = kgdbdbgp_read_char, ++ .write_char = kgdbdbgp_write_char, ++ .is_console = 1 ++}; ++ + static int kgdbdbgp_wait_time; + + static int __init kgdbdbgp_parse_config(char *str) +@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str) + ptr++; + kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10); + } +- kgdb_register_io_module(&kgdbdbgp_io_ops); +- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1; ++ if (early_dbgp_console.index != -1) ++ kgdb_register_io_module(&kgdbdbgp_io_ops_console); ++ else ++ kgdb_register_io_module(&kgdbdbgp_io_ops); + + return 0; + } +diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h +index d6bea3e..60b250e 100644 +--- a/drivers/usb/wusbcore/wa-hc.h ++++ b/drivers/usb/wusbcore/wa-hc.h +@@ -192,7 +192,7 @@ struct wahc { + struct list_head xfer_delayed_list; + spinlock_t xfer_list_lock; + struct work_struct xfer_work; +- atomic_t xfer_id_count; ++ atomic_unchecked_t xfer_id_count; + }; + + +@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa) + INIT_LIST_HEAD(&wa->xfer_delayed_list); + spin_lock_init(&wa->xfer_list_lock); + INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run); +- atomic_set(&wa->xfer_id_count, 1); ++ atomic_set_unchecked(&wa->xfer_id_count, 1); + } + + /** +diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c +index 57c01ab..8a05959 100644 +--- a/drivers/usb/wusbcore/wa-xfer.c ++++ b/drivers/usb/wusbcore/wa-xfer.c +@@ -296,7 +296,7 @@ out: + */ + static void wa_xfer_id_init(struct wa_xfer *xfer) + { +- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); ++ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count); + } + + /* +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index c14c42b..f955cc2 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) + return 0; + } + +-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) ++static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp) + { + struct file *eventfp, *filep = NULL, + *pollstart = NULL, *pollstop = NULL; +diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c +index b0b2ac3..89a4399 100644 +--- a/drivers/video/aty/aty128fb.c ++++ b/drivers/video/aty/aty128fb.c +@@ -148,7 +148,7 @@ enum { + }; + + /* Must match above enum */ +-static const char *r128_family[] __devinitdata = { ++static const char *r128_family[] __devinitconst = { + "AGP", + "PCI", + "PRO AGP", +diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c +index e132157..516db70 100644 +--- a/drivers/video/backlight/s6e63m0.c ++++ b/drivers/video/backlight/s6e63m0.c +@@ -690,7 +690,7 @@ static ssize_t s6e63m0_sysfs_store_gamma_mode(struct device *dev, + struct backlight_device *bd = NULL; + int brightness, rc; + +- rc = strict_strtoul(buf, 0, (unsigned long *)&lcd->gamma_mode); ++ rc = kstrtouint(buf, 0, &lcd->gamma_mode); + if (rc < 0) + return rc; + +diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c +index 5c3960d..15cf8fc 100644 +--- a/drivers/video/fbcmap.c ++++ b/drivers/video/fbcmap.c +@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) + rc = -ENODEV; + goto out; + } +- if (cmap->start < 0 || (!info->fbops->fb_setcolreg && +- !info->fbops->fb_setcmap)) { ++ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) { + rc = -EINVAL; + goto out1; + } +diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c +index ad93629..e020fc3 100644 +--- a/drivers/video/fbmem.c ++++ b/drivers/video/fbmem.c +@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, + image->dx += image->width + 8; + } + } else if (rotate == FB_ROTATE_UD) { +- for (x = 0; x < num && image->dx >= 0; x++) { ++ for (x = 0; x < num && (__s32)image->dx >= 0; x++) { + info->fbops->fb_imageblit(info, image); + image->dx -= image->width + 8; + } +@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, + image->dy += image->height + 8; + } + } else if (rotate == FB_ROTATE_CCW) { +- for (x = 0; x < num && image->dy >= 0; x++) { ++ for (x = 0; x < num && (__s32)image->dy >= 0; x++) { + info->fbops->fb_imageblit(info, image); + image->dy -= image->height + 8; + } +@@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, + return -EFAULT; + if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) + return -EINVAL; +- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX) ++ if (con2fb.framebuffer >= FB_MAX) + return -EINVAL; + if (!registered_fb[con2fb.framebuffer]) + request_module("fb%d", con2fb.framebuffer); +diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c +index 5a5d092..265c5ed 100644 +--- a/drivers/video/geode/gx1fb_core.c ++++ b/drivers/video/geode/gx1fb_core.c +@@ -29,7 +29,7 @@ static int crt_option = 1; + static char panel_option[32] = ""; + + /* Modes relevant to the GX1 (taken from modedb.c) */ +-static const struct fb_videomode __devinitdata gx1_modedb[] = { ++static const struct fb_videomode __devinitconst gx1_modedb[] = { + /* 640x480-60 VESA */ + { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, +diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c +index 0fad23f..0e9afa4 100644 +--- a/drivers/video/gxt4500.c ++++ b/drivers/video/gxt4500.c +@@ -156,7 +156,7 @@ struct gxt4500_par { + static char *mode_option; + + /* default mode: 1280x1024 @ 60 Hz, 8 bpp */ +-static const struct fb_videomode defaultmode __devinitdata = { ++static const struct fb_videomode defaultmode __devinitconst = { + .refresh = 60, + .xres = 1280, + .yres = 1024, +@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info) + return 0; + } + +-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = { ++static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = { + .id = "IBM GXT4500P", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_PSEUDOCOLOR, +diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c +index 7672d2e..b56437f 100644 +--- a/drivers/video/i810/i810_accel.c ++++ b/drivers/video/i810/i810_accel.c +@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space) + } + } + printk("ringbuffer lockup!!!\n"); ++ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space); + i810_report_error(mmio); + par->dev_flags |= LOCKUP; + info->pixmap.scan_align = 1; +diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c +index 318f6fb..9a389c1 100644 +--- a/drivers/video/i810/i810_main.c ++++ b/drivers/video/i810/i810_main.c +@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info); + static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par); + + /* PCI */ +-static const char *i810_pci_list[] __devinitdata = { ++static const char *i810_pci_list[] __devinitconst = { + "Intel(R) 810 Framebuffer Device" , + "Intel(R) 810-DC100 Framebuffer Device" , + "Intel(R) 810E Framebuffer Device" , +diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c +index de36693..3c63fc2 100644 +--- a/drivers/video/jz4740_fb.c ++++ b/drivers/video/jz4740_fb.c +@@ -136,7 +136,7 @@ struct jzfb { + uint32_t pseudo_palette[16]; + }; + +-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = { ++static const struct fb_fix_screeninfo jzfb_fix __devinitconst = { + .id = "JZ4740 FB", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, +diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm +index 3c14e43..eafa544 100644 +--- a/drivers/video/logo/logo_linux_clut224.ppm ++++ b/drivers/video/logo/logo_linux_clut224.ppm +@@ -1,1604 +1,1123 @@ + P3 +-# Standard 224-color Linux logo + 80 80 + 255 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 6 6 6 10 10 10 10 10 10 +- 10 10 10 6 6 6 6 6 6 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 10 10 10 14 14 14 +- 22 22 22 26 26 26 30 30 30 34 34 34 +- 30 30 30 30 30 30 26 26 26 18 18 18 +- 14 14 14 10 10 10 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 26 26 26 42 42 42 +- 54 54 54 66 66 66 78 78 78 78 78 78 +- 78 78 78 74 74 74 66 66 66 54 54 54 +- 42 42 42 26 26 26 18 18 18 10 10 10 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 22 22 22 42 42 42 66 66 66 86 86 86 +- 66 66 66 38 38 38 38 38 38 22 22 22 +- 26 26 26 34 34 34 54 54 54 66 66 66 +- 86 86 86 70 70 70 46 46 46 26 26 26 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 26 26 26 +- 50 50 50 82 82 82 58 58 58 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 54 54 54 86 86 86 66 66 66 +- 38 38 38 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 22 22 22 50 50 50 +- 78 78 78 34 34 34 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 6 6 6 70 70 70 +- 78 78 78 46 46 46 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 42 42 42 82 82 82 +- 26 26 26 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 14 14 14 +- 46 46 46 34 34 34 6 6 6 2 2 6 +- 42 42 42 78 78 78 42 42 42 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 10 10 10 30 30 30 66 66 66 58 58 58 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 26 26 26 +- 86 86 86 101 101 101 46 46 46 10 10 10 +- 2 2 6 58 58 58 70 70 70 34 34 34 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 14 14 14 42 42 42 86 86 86 10 10 10 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 30 30 30 +- 94 94 94 94 94 94 58 58 58 26 26 26 +- 2 2 6 6 6 6 78 78 78 54 54 54 +- 22 22 22 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 22 22 22 62 62 62 62 62 62 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 26 26 26 +- 54 54 54 38 38 38 18 18 18 10 10 10 +- 2 2 6 2 2 6 34 34 34 82 82 82 +- 38 38 38 14 14 14 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 30 30 30 78 78 78 30 30 30 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 10 10 10 +- 10 10 10 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 78 78 78 +- 50 50 50 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 86 86 86 14 14 14 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 54 54 54 +- 66 66 66 26 26 26 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 82 82 82 2 2 6 2 2 6 +- 2 2 6 6 6 6 10 10 10 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 6 6 6 +- 14 14 14 10 10 10 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 18 18 18 +- 82 82 82 34 34 34 10 10 10 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 2 2 6 +- 6 6 6 6 6 6 22 22 22 34 34 34 +- 6 6 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 18 18 18 34 34 34 +- 10 10 10 50 50 50 22 22 22 2 2 6 +- 2 2 6 2 2 6 2 2 6 10 10 10 +- 86 86 86 42 42 42 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 2 2 6 +- 38 38 38 116 116 116 94 94 94 22 22 22 +- 22 22 22 2 2 6 2 2 6 2 2 6 +- 14 14 14 86 86 86 138 138 138 162 162 162 +-154 154 154 38 38 38 26 26 26 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 86 86 86 46 46 46 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 14 14 14 +-134 134 134 198 198 198 195 195 195 116 116 116 +- 10 10 10 2 2 6 2 2 6 6 6 6 +-101 98 89 187 187 187 210 210 210 218 218 218 +-214 214 214 134 134 134 14 14 14 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 86 86 86 50 50 50 18 18 18 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 54 54 54 +-218 218 218 195 195 195 226 226 226 246 246 246 +- 58 58 58 2 2 6 2 2 6 30 30 30 +-210 210 210 253 253 253 174 174 174 123 123 123 +-221 221 221 234 234 234 74 74 74 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 70 70 70 58 58 58 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 82 82 82 2 2 6 106 106 106 +-170 170 170 26 26 26 86 86 86 226 226 226 +-123 123 123 10 10 10 14 14 14 46 46 46 +-231 231 231 190 190 190 6 6 6 70 70 70 +- 90 90 90 238 238 238 158 158 158 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 70 70 70 58 58 58 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 86 86 86 6 6 6 116 116 116 +-106 106 106 6 6 6 70 70 70 149 149 149 +-128 128 128 18 18 18 38 38 38 54 54 54 +-221 221 221 106 106 106 2 2 6 14 14 14 +- 46 46 46 190 190 190 198 198 198 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 74 74 74 62 62 62 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 0 +- 0 0 1 0 0 0 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 94 94 94 14 14 14 101 101 101 +-128 128 128 2 2 6 18 18 18 116 116 116 +-118 98 46 121 92 8 121 92 8 98 78 10 +-162 162 162 106 106 106 2 2 6 2 2 6 +- 2 2 6 195 195 195 195 195 195 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 74 74 74 62 62 62 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 1 +- 0 0 1 0 0 0 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 90 90 90 14 14 14 58 58 58 +-210 210 210 26 26 26 54 38 6 154 114 10 +-226 170 11 236 186 11 225 175 15 184 144 12 +-215 174 15 175 146 61 37 26 9 2 2 6 +- 70 70 70 246 246 246 138 138 138 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 70 70 70 66 66 66 26 26 26 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 86 86 86 14 14 14 10 10 10 +-195 195 195 188 164 115 192 133 9 225 175 15 +-239 182 13 234 190 10 232 195 16 232 200 30 +-245 207 45 241 208 19 232 195 16 184 144 12 +-218 194 134 211 206 186 42 42 42 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 50 50 50 74 74 74 30 30 30 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 34 34 34 86 86 86 14 14 14 2 2 6 +-121 87 25 192 133 9 219 162 10 239 182 13 +-236 186 11 232 195 16 241 208 19 244 214 54 +-246 218 60 246 218 38 246 215 20 241 208 19 +-241 208 19 226 184 13 121 87 25 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 50 50 50 82 82 82 34 34 34 10 10 10 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 34 34 34 82 82 82 30 30 30 61 42 6 +-180 123 7 206 145 10 230 174 11 239 182 13 +-234 190 10 238 202 15 241 208 19 246 218 74 +-246 218 38 246 215 20 246 215 20 246 215 20 +-226 184 13 215 174 15 184 144 12 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 26 26 26 94 94 94 42 42 42 14 14 14 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 50 50 50 104 69 6 +-192 133 9 216 158 10 236 178 12 236 186 11 +-232 195 16 241 208 19 244 214 54 245 215 43 +-246 215 20 246 215 20 241 208 19 198 155 10 +-200 144 11 216 158 10 156 118 10 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 90 90 90 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 46 46 46 22 22 22 +-137 92 6 210 162 10 239 182 13 238 190 10 +-238 202 15 241 208 19 246 215 20 246 215 20 +-241 208 19 203 166 17 185 133 11 210 150 10 +-216 158 10 210 150 10 102 78 10 2 2 6 +- 6 6 6 54 54 54 14 14 14 2 2 6 +- 2 2 6 62 62 62 74 74 74 30 30 30 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 34 34 34 78 78 78 50 50 50 6 6 6 +- 94 70 30 139 102 15 190 146 13 226 184 13 +-232 200 30 232 195 16 215 174 15 190 146 13 +-168 122 10 192 133 9 210 150 10 213 154 11 +-202 150 34 182 157 106 101 98 89 2 2 6 +- 2 2 6 78 78 78 116 116 116 58 58 58 +- 2 2 6 22 22 22 90 90 90 46 46 46 +- 18 18 18 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 86 86 86 50 50 50 6 6 6 +-128 128 128 174 154 114 156 107 11 168 122 10 +-198 155 10 184 144 12 197 138 11 200 144 11 +-206 145 10 206 145 10 197 138 11 188 164 115 +-195 195 195 198 198 198 174 174 174 14 14 14 +- 2 2 6 22 22 22 116 116 116 116 116 116 +- 22 22 22 2 2 6 74 74 74 70 70 70 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 101 101 101 26 26 26 10 10 10 +-138 138 138 190 190 190 174 154 114 156 107 11 +-197 138 11 200 144 11 197 138 11 192 133 9 +-180 123 7 190 142 34 190 178 144 187 187 187 +-202 202 202 221 221 221 214 214 214 66 66 66 +- 2 2 6 2 2 6 50 50 50 62 62 62 +- 6 6 6 2 2 6 10 10 10 90 90 90 +- 50 50 50 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 34 34 34 +- 74 74 74 74 74 74 2 2 6 6 6 6 +-144 144 144 198 198 198 190 190 190 178 166 146 +-154 121 60 156 107 11 156 107 11 168 124 44 +-174 154 114 187 187 187 190 190 190 210 210 210 +-246 246 246 253 253 253 253 253 253 182 182 182 +- 6 6 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 62 62 62 +- 74 74 74 34 34 34 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 10 10 10 22 22 22 54 54 54 +- 94 94 94 18 18 18 2 2 6 46 46 46 +-234 234 234 221 221 221 190 190 190 190 190 190 +-190 190 190 187 187 187 187 187 187 190 190 190 +-190 190 190 195 195 195 214 214 214 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +- 82 82 82 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 14 14 14 +- 86 86 86 54 54 54 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 46 46 46 90 90 90 +- 46 46 46 18 18 18 6 6 6 182 182 182 +-253 253 253 246 246 246 206 206 206 190 190 190 +-190 190 190 190 190 190 190 190 190 190 190 190 +-206 206 206 231 231 231 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-202 202 202 14 14 14 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 42 42 42 86 86 86 42 42 42 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 14 14 14 38 38 38 74 74 74 66 66 66 +- 2 2 6 6 6 6 90 90 90 250 250 250 +-253 253 253 253 253 253 238 238 238 198 198 198 +-190 190 190 190 190 190 195 195 195 221 221 221 +-246 246 246 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 82 82 82 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 78 78 78 70 70 70 34 34 34 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 34 34 34 66 66 66 78 78 78 6 6 6 +- 2 2 6 18 18 18 218 218 218 253 253 253 +-253 253 253 253 253 253 253 253 253 246 246 246 +-226 226 226 231 231 231 246 246 246 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 178 178 178 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 18 18 18 90 90 90 62 62 62 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 26 26 26 +- 58 58 58 90 90 90 18 18 18 2 2 6 +- 2 2 6 110 110 110 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 231 231 231 18 18 18 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 18 18 18 94 94 94 +- 54 54 54 26 26 26 10 10 10 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 22 22 22 50 50 50 +- 90 90 90 26 26 26 2 2 6 2 2 6 +- 14 14 14 195 195 195 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 242 242 242 54 54 54 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 38 38 38 +- 86 86 86 50 50 50 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 38 38 38 82 82 82 +- 34 34 34 2 2 6 2 2 6 2 2 6 +- 42 42 42 195 195 195 246 246 246 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-242 242 242 242 242 242 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 250 250 250 246 246 246 238 238 238 +-226 226 226 231 231 231 101 101 101 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 38 38 38 82 82 82 42 42 42 14 14 14 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 10 10 10 26 26 26 62 62 62 66 66 66 +- 2 2 6 2 2 6 2 2 6 6 6 6 +- 70 70 70 170 170 170 206 206 206 234 234 234 +-246 246 246 250 250 250 250 250 250 238 238 238 +-226 226 226 231 231 231 238 238 238 250 250 250 +-250 250 250 250 250 250 246 246 246 231 231 231 +-214 214 214 206 206 206 202 202 202 202 202 202 +-198 198 198 202 202 202 182 182 182 18 18 18 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 62 62 62 66 66 66 30 30 30 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 14 14 14 42 42 42 82 82 82 18 18 18 +- 2 2 6 2 2 6 2 2 6 10 10 10 +- 94 94 94 182 182 182 218 218 218 242 242 242 +-250 250 250 253 253 253 253 253 253 250 250 250 +-234 234 234 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 246 246 246 +-238 238 238 226 226 226 210 210 210 202 202 202 +-195 195 195 195 195 195 210 210 210 158 158 158 +- 6 6 6 14 14 14 50 50 50 14 14 14 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 86 86 86 46 46 46 +- 18 18 18 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 22 22 22 54 54 54 70 70 70 2 2 6 +- 2 2 6 10 10 10 2 2 6 22 22 22 +-166 166 166 231 231 231 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-242 242 242 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 246 246 246 +-231 231 231 206 206 206 198 198 198 226 226 226 +- 94 94 94 2 2 6 6 6 6 38 38 38 +- 30 30 30 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 62 62 62 66 66 66 +- 26 26 26 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 74 74 74 50 50 50 2 2 6 +- 26 26 26 26 26 26 2 2 6 106 106 106 +-238 238 238 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 246 246 246 218 218 218 202 202 202 +-210 210 210 14 14 14 2 2 6 2 2 6 +- 30 30 30 22 22 22 2 2 6 2 2 6 +- 2 2 6 2 2 6 18 18 18 86 86 86 +- 42 42 42 14 14 14 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 90 90 90 22 22 22 2 2 6 +- 42 42 42 2 2 6 18 18 18 218 218 218 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 250 250 250 221 221 221 +-218 218 218 101 101 101 2 2 6 14 14 14 +- 18 18 18 38 38 38 10 10 10 2 2 6 +- 2 2 6 2 2 6 2 2 6 78 78 78 +- 58 58 58 22 22 22 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 54 54 54 82 82 82 2 2 6 26 26 26 +- 22 22 22 2 2 6 123 123 123 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-238 238 238 198 198 198 6 6 6 38 38 38 +- 58 58 58 26 26 26 38 38 38 2 2 6 +- 2 2 6 2 2 6 2 2 6 46 46 46 +- 78 78 78 30 30 30 10 10 10 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 30 30 30 +- 74 74 74 58 58 58 2 2 6 42 42 42 +- 2 2 6 22 22 22 231 231 231 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 246 246 246 46 46 46 38 38 38 +- 42 42 42 14 14 14 38 38 38 14 14 14 +- 2 2 6 2 2 6 2 2 6 6 6 6 +- 86 86 86 46 46 46 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 14 14 14 42 42 42 +- 90 90 90 18 18 18 18 18 18 26 26 26 +- 2 2 6 116 116 116 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 250 250 250 238 238 238 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 94 94 94 6 6 6 +- 2 2 6 2 2 6 10 10 10 34 34 34 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 74 74 74 58 58 58 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 10 10 10 26 26 26 66 66 66 +- 82 82 82 2 2 6 38 38 38 6 6 6 +- 14 14 14 210 210 210 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 246 246 246 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 144 144 144 2 2 6 +- 2 2 6 2 2 6 2 2 6 46 46 46 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 42 42 42 74 74 74 30 30 30 10 10 10 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 42 42 42 90 90 90 +- 26 26 26 6 6 6 42 42 42 2 2 6 +- 74 74 74 250 250 250 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 242 242 242 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 182 182 182 2 2 6 +- 2 2 6 2 2 6 2 2 6 46 46 46 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 10 10 10 86 86 86 38 38 38 10 10 10 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 10 10 10 26 26 26 66 66 66 82 82 82 +- 2 2 6 22 22 22 18 18 18 2 2 6 +-149 149 149 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 206 206 206 2 2 6 +- 2 2 6 2 2 6 2 2 6 38 38 38 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 86 86 86 46 46 46 14 14 14 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 18 18 18 46 46 46 86 86 86 18 18 18 +- 2 2 6 34 34 34 10 10 10 6 6 6 +-210 210 210 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 221 221 221 6 6 6 +- 2 2 6 2 2 6 6 6 6 30 30 30 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 82 82 82 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 26 26 26 66 66 66 62 62 62 2 2 6 +- 2 2 6 38 38 38 10 10 10 26 26 26 +-238 238 238 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 238 238 238 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 6 6 6 +- 2 2 6 2 2 6 10 10 10 30 30 30 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 58 58 58 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 78 78 78 6 6 6 2 2 6 +- 2 2 6 46 46 46 14 14 14 42 42 42 +-246 246 246 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 10 10 10 +- 2 2 6 2 2 6 22 22 22 14 14 14 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 62 62 62 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 74 74 74 2 2 6 2 2 6 +- 14 14 14 70 70 70 34 34 34 62 62 62 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 14 14 14 +- 2 2 6 2 2 6 30 30 30 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 62 62 62 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 54 54 54 62 62 62 2 2 6 2 2 6 +- 2 2 6 30 30 30 46 46 46 70 70 70 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 226 226 226 10 10 10 +- 2 2 6 6 6 6 30 30 30 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 58 58 58 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 22 22 22 +- 58 58 58 62 62 62 2 2 6 2 2 6 +- 2 2 6 2 2 6 30 30 30 78 78 78 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 206 206 206 2 2 6 +- 22 22 22 34 34 34 18 14 6 22 22 22 +- 26 26 26 18 18 18 6 6 6 2 2 6 +- 2 2 6 82 82 82 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 26 26 26 +- 62 62 62 106 106 106 74 54 14 185 133 11 +-210 162 10 121 92 8 6 6 6 62 62 62 +-238 238 238 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 158 158 158 18 18 18 +- 14 14 14 2 2 6 2 2 6 2 2 6 +- 6 6 6 18 18 18 66 66 66 38 38 38 +- 6 6 6 94 94 94 50 50 50 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 10 10 10 10 10 10 18 18 18 38 38 38 +- 78 78 78 142 134 106 216 158 10 242 186 14 +-246 190 14 246 190 14 156 118 10 10 10 10 +- 90 90 90 238 238 238 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 250 250 250 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 246 230 190 +-238 204 91 238 204 91 181 142 44 37 26 9 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 38 38 38 46 46 46 +- 26 26 26 106 106 106 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 14 14 14 22 22 22 +- 30 30 30 38 38 38 50 50 50 70 70 70 +-106 106 106 190 142 34 226 170 11 242 186 14 +-246 190 14 246 190 14 246 190 14 154 114 10 +- 6 6 6 74 74 74 226 226 226 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 250 250 250 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 228 184 62 +-241 196 14 241 208 19 232 195 16 38 30 10 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 30 30 30 26 26 26 +-203 166 17 154 142 90 66 66 66 26 26 26 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 38 38 38 58 58 58 +- 78 78 78 86 86 86 101 101 101 123 123 123 +-175 146 61 210 150 10 234 174 13 246 186 14 +-246 190 14 246 190 14 246 190 14 238 190 10 +-102 78 10 2 2 6 46 46 46 198 198 198 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 224 178 62 +-242 186 14 241 196 14 210 166 10 22 18 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 6 6 6 121 92 8 +-238 202 15 232 195 16 82 82 82 34 34 34 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 14 14 14 38 38 38 70 70 70 154 122 46 +-190 142 34 200 144 11 197 138 11 197 138 11 +-213 154 11 226 170 11 242 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-225 175 15 46 32 6 2 2 6 22 22 22 +-158 158 158 250 250 250 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 250 250 250 242 242 242 224 178 62 +-239 182 13 236 186 11 213 154 11 46 32 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 61 42 6 225 175 15 +-238 190 10 236 186 11 112 100 78 42 42 42 +- 14 14 14 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 22 22 22 54 54 54 154 122 46 213 154 11 +-226 170 11 230 174 11 226 170 11 226 170 11 +-236 178 12 242 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-241 196 14 184 144 12 10 10 10 2 2 6 +- 6 6 6 116 116 116 242 242 242 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 231 231 231 198 198 198 214 170 54 +-236 178 12 236 178 12 210 150 10 137 92 6 +- 18 14 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 70 47 6 200 144 11 236 178 12 +-239 182 13 239 182 13 124 112 88 58 58 58 +- 22 22 22 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 70 70 70 180 133 36 226 170 11 +-239 182 13 242 186 14 242 186 14 246 186 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 232 195 16 98 70 6 2 2 6 +- 2 2 6 2 2 6 66 66 66 221 221 221 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 206 206 206 198 198 198 214 166 58 +-230 174 11 230 174 11 216 158 10 192 133 9 +-163 110 8 116 81 8 102 78 10 116 81 8 +-167 114 7 197 138 11 226 170 11 239 182 13 +-242 186 14 242 186 14 162 146 94 78 78 78 +- 34 34 34 14 14 14 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 30 30 30 78 78 78 190 142 34 226 170 11 +-239 182 13 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 241 196 14 203 166 17 22 18 6 +- 2 2 6 2 2 6 2 2 6 38 38 38 +-218 218 218 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 206 206 206 198 198 198 202 162 69 +-226 170 11 236 178 12 224 166 10 210 150 10 +-200 144 11 197 138 11 192 133 9 197 138 11 +-210 150 10 226 170 11 242 186 14 246 190 14 +-246 190 14 246 186 14 225 175 15 124 112 88 +- 62 62 62 30 30 30 14 14 14 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 174 135 50 224 166 10 +-239 182 13 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 241 196 14 139 102 15 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 78 78 78 250 250 250 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 214 214 214 198 198 198 190 150 46 +-219 162 10 236 178 12 234 174 13 224 166 10 +-216 158 10 213 154 11 213 154 11 216 158 10 +-226 170 11 239 182 13 246 190 14 246 190 14 +-246 190 14 246 190 14 242 186 14 206 162 42 +-101 101 101 58 58 58 30 30 30 14 14 14 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 74 74 74 174 135 50 216 158 10 +-236 178 12 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 241 196 14 226 184 13 +- 61 42 6 2 2 6 2 2 6 2 2 6 +- 22 22 22 238 238 238 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 226 226 226 187 187 187 180 133 36 +-216 158 10 236 178 12 239 182 13 236 178 12 +-230 174 11 226 170 11 226 170 11 230 174 11 +-236 178 12 242 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 186 14 239 182 13 +-206 162 42 106 106 106 66 66 66 34 34 34 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 26 26 26 70 70 70 163 133 67 213 154 11 +-236 178 12 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 241 196 14 +-190 146 13 18 14 6 2 2 6 2 2 6 +- 46 46 46 246 246 246 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 221 221 221 86 86 86 156 107 11 +-216 158 10 236 178 12 242 186 14 246 186 14 +-242 186 14 239 182 13 239 182 13 242 186 14 +-242 186 14 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-242 186 14 225 175 15 142 122 72 66 66 66 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 26 26 26 70 70 70 163 133 67 210 150 10 +-236 178 12 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-232 195 16 121 92 8 34 34 34 106 106 106 +-221 221 221 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-242 242 242 82 82 82 18 14 6 163 110 8 +-216 158 10 236 178 12 242 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 242 186 14 163 133 67 +- 46 46 46 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 163 133 67 210 150 10 +-236 178 12 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-241 196 14 215 174 15 190 178 144 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 218 218 218 +- 58 58 58 2 2 6 22 18 6 167 114 7 +-216 158 10 236 178 12 246 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 186 14 242 186 14 190 150 46 +- 54 54 54 22 22 22 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 38 38 38 86 86 86 180 133 36 213 154 11 +-236 178 12 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 232 195 16 190 146 13 214 214 214 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 250 250 250 170 170 170 26 26 26 +- 2 2 6 2 2 6 37 26 9 163 110 8 +-219 162 10 239 182 13 246 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 186 14 236 178 12 224 166 10 142 122 72 +- 46 46 46 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 109 106 95 192 133 9 224 166 10 +-242 186 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-242 186 14 226 184 13 210 162 10 142 110 46 +-226 226 226 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-198 198 198 66 66 66 2 2 6 2 2 6 +- 2 2 6 2 2 6 50 34 6 156 107 11 +-219 162 10 239 182 13 246 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 242 186 14 +-234 174 13 213 154 11 154 122 46 66 66 66 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 22 22 22 +- 58 58 58 154 121 60 206 145 10 234 174 13 +-242 186 14 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 186 14 236 178 12 210 162 10 163 110 8 +- 61 42 6 138 138 138 218 218 218 250 250 250 +-253 253 253 253 253 253 253 253 253 250 250 250 +-242 242 242 210 210 210 144 144 144 66 66 66 +- 6 6 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 61 42 6 163 110 8 +-216 158 10 236 178 12 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 239 182 13 230 174 11 216 158 10 +-190 142 34 124 112 88 70 70 70 38 38 38 +- 18 18 18 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 22 22 22 +- 62 62 62 168 124 44 206 145 10 224 166 10 +-236 178 12 239 182 13 242 186 14 242 186 14 +-246 186 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 236 178 12 216 158 10 175 118 6 +- 80 54 7 2 2 6 6 6 6 30 30 30 +- 54 54 54 62 62 62 50 50 50 38 38 38 +- 14 14 14 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 80 54 7 167 114 7 +-213 154 11 236 178 12 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 242 186 14 239 182 13 239 182 13 +-230 174 11 210 150 10 174 135 50 124 112 88 +- 82 82 82 54 54 54 34 34 34 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 158 118 36 192 133 9 200 144 11 +-216 158 10 219 162 10 224 166 10 226 170 11 +-230 174 11 236 178 12 239 182 13 239 182 13 +-242 186 14 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 186 14 230 174 11 210 150 10 163 110 8 +-104 69 6 10 10 10 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 91 60 6 167 114 7 +-206 145 10 230 174 11 242 186 14 246 190 14 +-246 190 14 246 190 14 246 186 14 242 186 14 +-239 182 13 230 174 11 224 166 10 213 154 11 +-180 133 36 124 112 88 86 86 86 58 58 58 +- 38 38 38 22 22 22 10 10 10 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 34 34 34 70 70 70 138 110 50 158 118 36 +-167 114 7 180 123 7 192 133 9 197 138 11 +-200 144 11 206 145 10 213 154 11 219 162 10 +-224 166 10 230 174 11 239 182 13 242 186 14 +-246 186 14 246 186 14 246 186 14 246 186 14 +-239 182 13 216 158 10 185 133 11 152 99 6 +-104 69 6 18 14 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 80 54 7 152 99 6 +-192 133 9 219 162 10 236 178 12 239 182 13 +-246 186 14 242 186 14 239 182 13 236 178 12 +-224 166 10 206 145 10 192 133 9 154 121 60 +- 94 94 94 62 62 62 42 42 42 22 22 22 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 18 18 18 34 34 34 58 58 58 78 78 78 +-101 98 89 124 112 88 142 110 46 156 107 11 +-163 110 8 167 114 7 175 118 6 180 123 7 +-185 133 11 197 138 11 210 150 10 219 162 10 +-226 170 11 236 178 12 236 178 12 234 174 13 +-219 162 10 197 138 11 163 110 8 130 83 6 +- 91 60 6 10 10 10 2 2 6 2 2 6 +- 18 18 18 38 38 38 38 38 38 38 38 38 +- 38 38 38 38 38 38 38 38 38 38 38 38 +- 38 38 38 38 38 38 26 26 26 2 2 6 +- 2 2 6 6 6 6 70 47 6 137 92 6 +-175 118 6 200 144 11 219 162 10 230 174 11 +-234 174 13 230 174 11 219 162 10 210 150 10 +-192 133 9 163 110 8 124 112 88 82 82 82 +- 50 50 50 30 30 30 14 14 14 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 22 22 22 34 34 34 +- 42 42 42 58 58 58 74 74 74 86 86 86 +-101 98 89 122 102 70 130 98 46 121 87 25 +-137 92 6 152 99 6 163 110 8 180 123 7 +-185 133 11 197 138 11 206 145 10 200 144 11 +-180 123 7 156 107 11 130 83 6 104 69 6 +- 50 34 6 54 54 54 110 110 110 101 98 89 +- 86 86 86 82 82 82 78 78 78 78 78 78 +- 78 78 78 78 78 78 78 78 78 78 78 78 +- 78 78 78 82 82 82 86 86 86 94 94 94 +-106 106 106 101 101 101 86 66 34 124 80 6 +-156 107 11 180 123 7 192 133 9 200 144 11 +-206 145 10 200 144 11 192 133 9 175 118 6 +-139 102 15 109 106 95 70 70 70 42 42 42 +- 22 22 22 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 10 10 10 +- 14 14 14 22 22 22 30 30 30 38 38 38 +- 50 50 50 62 62 62 74 74 74 90 90 90 +-101 98 89 112 100 78 121 87 25 124 80 6 +-137 92 6 152 99 6 152 99 6 152 99 6 +-138 86 6 124 80 6 98 70 6 86 66 30 +-101 98 89 82 82 82 58 58 58 46 46 46 +- 38 38 38 34 34 34 34 34 34 34 34 34 +- 34 34 34 34 34 34 34 34 34 34 34 34 +- 34 34 34 34 34 34 38 38 38 42 42 42 +- 54 54 54 82 82 82 94 86 76 91 60 6 +-134 86 6 156 107 11 167 114 7 175 118 6 +-175 118 6 167 114 7 152 99 6 121 87 25 +-101 98 89 62 62 62 34 34 34 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 6 6 6 10 10 10 +- 18 18 18 22 22 22 30 30 30 42 42 42 +- 50 50 50 66 66 66 86 86 86 101 98 89 +-106 86 58 98 70 6 104 69 6 104 69 6 +-104 69 6 91 60 6 82 62 34 90 90 90 +- 62 62 62 38 38 38 22 22 22 14 14 14 +- 10 10 10 10 10 10 10 10 10 10 10 10 +- 10 10 10 10 10 10 6 6 6 10 10 10 +- 10 10 10 10 10 10 10 10 10 14 14 14 +- 22 22 22 42 42 42 70 70 70 89 81 66 +- 80 54 7 104 69 6 124 80 6 137 92 6 +-134 86 6 116 81 8 100 82 52 86 86 86 +- 58 58 58 30 30 30 14 14 14 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 10 10 10 14 14 14 +- 18 18 18 26 26 26 38 38 38 54 54 54 +- 70 70 70 86 86 86 94 86 76 89 81 66 +- 89 81 66 86 86 86 74 74 74 50 50 50 +- 30 30 30 14 14 14 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 34 34 34 58 58 58 +- 82 82 82 89 81 66 89 81 66 89 81 66 +- 94 86 66 94 86 76 74 74 74 50 50 50 +- 26 26 26 14 14 14 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 6 6 6 14 14 14 18 18 18 +- 30 30 30 38 38 38 46 46 46 54 54 54 +- 50 50 50 42 42 42 30 30 30 18 18 18 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 14 14 14 26 26 26 +- 38 38 38 50 50 50 58 58 58 58 58 58 +- 54 54 54 42 42 42 30 30 30 18 18 18 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 6 6 6 10 10 10 14 14 14 18 18 18 +- 18 18 18 14 14 14 10 10 10 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 14 14 14 18 18 18 22 22 22 22 22 22 +- 18 18 18 14 14 14 10 10 10 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0 ++0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 ++0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28 ++37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6 ++2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 ++4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0 ++1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137 ++153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0 ++0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125 ++60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4 ++4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35 ++2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0 ++4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167 ++165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63 ++1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 ++3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167 ++163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5 ++0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159 ++37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 ++37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158 ++156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166 ++125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 ++0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158 ++174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1 ++0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196 ++64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134 ++156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157 ++156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167 ++174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0 ++1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0 ++13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153 ++174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2 ++22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193 ++90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3 ++0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174 ++174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155 ++156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153 ++163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17 ++4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 ++5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63 ++131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174 ++190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103 ++90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196 ++31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0 ++4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163 ++155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165 ++167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155 ++153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131 ++41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4 ++1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174 ++177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137 ++125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209 ++136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122 ++7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37 ++125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155 ++156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155 ++137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156 ++156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174 ++167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0 ++0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174 ++166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6 ++6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196 ++90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14 ++1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153 ++167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156 ++157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68 ++26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166 ++158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158 ++165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17 ++60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165 ++137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21 ++52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146 ++13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0 ++4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 ++0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166 ++158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158 ++167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0 ++4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158 ++174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156 ++155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125 ++137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125 ++16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188 ++136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14 ++2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5 ++4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0 ++37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157 ++157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174 ++153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0 ++4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37 ++125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154 ++156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163 ++174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0 ++4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211 ++136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2 ++1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4 ++2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0 ++0 0 0 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 ++4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127 ++158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156 ++153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125 ++37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4 ++4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0 ++4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165 ++154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174 ++174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3 ++32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193 ++28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5 ++50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1 ++0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81 ++2 0 0 0 0 0 ++4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2 ++0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174 ++174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153 ++165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6 ++4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3 ++4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174 ++174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158 ++60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148 ++136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13 ++22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132 ++136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0 ++26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165 ++37 38 37 0 0 0 ++4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0 ++13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165 ++153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174 ++177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0 ++4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5 ++5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5 ++6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84 ++166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27 ++4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220 ++146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103 ++71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196 ++90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28 ++125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174 ++85 115 134 4 0 0 ++4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55 ++125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153 ++155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154 ++125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 ++0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4 ++5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6 ++37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0 ++4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209 ++90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103 ++2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93 ++13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137 ++166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174 ++60 73 81 4 0 0 ++4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174 ++174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155 ++156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37 ++4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3 ++10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0 ++4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55 ++80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209 ++28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13 ++50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1 ++1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174 ++167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125 ++16 19 21 4 0 0 ++4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174 ++158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 ++167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0 ++4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4 ++4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86 ++80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1 ++4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5 ++3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209 ++146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 ++68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193 ++136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0 ++24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165 ++163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28 ++4 0 0 4 3 3 ++3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158 ++156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174 ++155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 ++2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196 ++136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0 ++0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0 ++0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211 ++136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193 ++28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193 ++22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81 ++137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153 ++60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0 ++3 2 2 4 4 4 ++3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158 ++157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125 ++37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4 ++4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0 ++0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196 ++101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126 ++14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ++22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209 ++136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13 ++17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15 ++2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163 ++166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63 ++13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2 ++4 4 4 4 4 4 ++1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153 ++163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6 ++4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4 ++4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18 ++40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209 ++101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126 ++136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 ++136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103 ++136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5 ++3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167 ++174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0 ++4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131 ++155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0 ++4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5 ++4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159 ++101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 ++136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211 ++136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196 ++136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220 ++90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17 ++85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174 ++167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3 ++6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5 ++5 5 5 5 5 5 ++1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125 ++131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0 ++6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1 ++0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196 ++101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209 ++136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209 ++101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141 ++7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154 ++174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125 ++24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 ++5 5 5 4 4 4 ++4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131 ++131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3 ++6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0 ++13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 ++101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 ++136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196 ++136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8 ++2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174 ++174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0 ++4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137 ++137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2 ++4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72 ++64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 ++101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7 ++37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166 ++167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0 ++3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137 ++153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2 ++4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 ++90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209 ++101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196 ++101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193 ++35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84 ++154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157 ++60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137 ++153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2 ++4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193 ++64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193 ++64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 ++13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165 ++174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81 ++6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153 ++156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2 ++4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161 ++90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 ++90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 ++101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8 ++2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158 ++174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153 ++158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2 ++4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161 ++37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 ++90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 ++90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7 ++5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154 ++167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37 ++6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154 ++163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2 ++4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151 ++18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193 ++64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 ++90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141 ++13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5 ++3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158 ++174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63 ++4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158 ++167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2 ++4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144 ++18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 ++26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193 ++90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17 ++7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5 ++4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158 ++174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163 ++174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3 ++5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151 ++18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196 ++101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5 ++2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5 ++3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137 ++153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166 ++174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3 ++5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144 ++18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161 ++35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8 ++2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5 ++3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125 ++131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37 ++4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167 ++174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151 ++18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25 ++7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3 ++4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174 ++174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3 ++5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144 ++18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161 ++18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193 ++26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3 ++28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3 ++3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4 ++4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174 ++174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151 ++10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161 ++90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35 ++3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174 ++177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2 ++5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144 ++10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151 ++26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93 ++6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193 ++10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93 ++2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174 ++177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 ++10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2 ++7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34 ++3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34 ++21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174 ++190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2 ++5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144 ++10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144 ++24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52 ++18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0 ++28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93 ++26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174 ++190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 ++10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14 ++0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161 ++26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52 ++37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161 ++90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0 ++4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174 ++193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2 ++5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144 ++10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7 ++1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52 ++22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 ++10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2 ++2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161 ++26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52 ++10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174 ++193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2 ++5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25 ++13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161 ++10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2 ++5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25 ++28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 ++10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151 ++28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174 ++193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3 ++5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5 ++4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151 ++10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151 ++18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 ++22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4 ++4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2 ++6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3 ++1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151 ++18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144 ++10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144 ++26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14 ++1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4 ++5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174 ++193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0 ++2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 ++4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 ++10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161 ++26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0 ++2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5 ++3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137 ++131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34 ++0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4 ++4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7 ++13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144 ++10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151 ++28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4 ++4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0 ++0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131 ++125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174 ++193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203 ++120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4 ++4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2 ++4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144 ++10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25 ++4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2 ++24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125 ++125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221 ++174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221 ++220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0 ++3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5 ++4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144 ++10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2 ++1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4 ++5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81 ++137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131 ++125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8 ++0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221 ++193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221 ++220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6 ++4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25 ++22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3 ++4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166 ++166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125 ++125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167 ++220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 ++205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125 ++24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7 ++4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4 ++4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0 ++2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166 ++156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137 ++137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28 ++125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203 ++205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246 ++193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3 ++1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4 ++5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17 ++60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163 ++153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137 ++125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 ++6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221 ++193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246 ++244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0 ++0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5 ++4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6 ++3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156 ++220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154 ++153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81 ++13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6 ++6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246 ++244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203 ++220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37 ++3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1 ++0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221 ++177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157 ++158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0 ++4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81 ++177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221 ++220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215 ++125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0 ++37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174 ++174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167 ++158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 ++26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221 ++205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246 ++244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0 ++0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127 ++177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187 ++174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137 ++60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0 ++6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221 ++220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221 ++220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2 ++0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214 ++220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174 ++174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27 ++4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 ++6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 ++4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167 ++220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215 ++205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137 ++60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203 ++177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187 ++190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0 ++4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6 ++125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215 ++205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221 ++193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187 ++190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201 ++153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 ++6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0 ++4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221 ++205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215 ++220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174 ++174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125 ++6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5 ++6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 ++4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221 ++220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 ++190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203 ++193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0 ++4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 ++4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6 ++6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81 ++174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174 ++193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221 ++193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0 ++6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 ++5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3 ++5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 ++6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203 ++193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158 ++60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3 ++5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0 ++4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203 ++193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6 ++6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 ++6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125 ++153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6 ++6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3 ++6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6 ++24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0 ++6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 ++6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 ++4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 ++5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 ++6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6 ++4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 ++6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6 ++6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6 ++4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 ++4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6 ++5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 ++5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 +diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c +index 3473e75..c930142 100644 +--- a/drivers/video/udlfb.c ++++ b/drivers/video/udlfb.c +@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y, + dlfb_urb_completion(urb); + + error: +- atomic_add(bytes_sent, &dev->bytes_sent); +- atomic_add(bytes_identical, &dev->bytes_identical); +- atomic_add(width*height*2, &dev->bytes_rendered); ++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent); ++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical); ++ atomic_add_unchecked(width*height*2, &dev->bytes_rendered); + end_cycles = get_cycles(); +- atomic_add(((unsigned int) ((end_cycles - start_cycles) ++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles) + >> 10)), /* Kcycles */ + &dev->cpu_kcycles_used); + +@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info, + dlfb_urb_completion(urb); + + error: +- atomic_add(bytes_sent, &dev->bytes_sent); +- atomic_add(bytes_identical, &dev->bytes_identical); +- atomic_add(bytes_rendered, &dev->bytes_rendered); ++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent); ++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical); ++ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered); + end_cycles = get_cycles(); +- atomic_add(((unsigned int) ((end_cycles - start_cycles) ++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles) + >> 10)), /* Kcycles */ + &dev->cpu_kcycles_used); + } +@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev, + struct fb_info *fb_info = dev_get_drvdata(fbdev); + struct dlfb_data *dev = fb_info->par; + return snprintf(buf, PAGE_SIZE, "%u\n", +- atomic_read(&dev->bytes_rendered)); ++ atomic_read_unchecked(&dev->bytes_rendered)); + } + + static ssize_t metrics_bytes_identical_show(struct device *fbdev, +@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev, + struct fb_info *fb_info = dev_get_drvdata(fbdev); + struct dlfb_data *dev = fb_info->par; + return snprintf(buf, PAGE_SIZE, "%u\n", +- atomic_read(&dev->bytes_identical)); ++ atomic_read_unchecked(&dev->bytes_identical)); + } + + static ssize_t metrics_bytes_sent_show(struct device *fbdev, +@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev, + struct fb_info *fb_info = dev_get_drvdata(fbdev); + struct dlfb_data *dev = fb_info->par; + return snprintf(buf, PAGE_SIZE, "%u\n", +- atomic_read(&dev->bytes_sent)); ++ atomic_read_unchecked(&dev->bytes_sent)); + } + + static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, +@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, + struct fb_info *fb_info = dev_get_drvdata(fbdev); + struct dlfb_data *dev = fb_info->par; + return snprintf(buf, PAGE_SIZE, "%u\n", +- atomic_read(&dev->cpu_kcycles_used)); ++ atomic_read_unchecked(&dev->cpu_kcycles_used)); + } + + static ssize_t edid_show( +@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev, + struct fb_info *fb_info = dev_get_drvdata(fbdev); + struct dlfb_data *dev = fb_info->par; + +- atomic_set(&dev->bytes_rendered, 0); +- atomic_set(&dev->bytes_identical, 0); +- atomic_set(&dev->bytes_sent, 0); +- atomic_set(&dev->cpu_kcycles_used, 0); ++ atomic_set_unchecked(&dev->bytes_rendered, 0); ++ atomic_set_unchecked(&dev->bytes_identical, 0); ++ atomic_set_unchecked(&dev->bytes_sent, 0); ++ atomic_set_unchecked(&dev->cpu_kcycles_used, 0); + + return count; + } +diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c +index 7f8472c..9842e87 100644 +--- a/drivers/video/uvesafb.c ++++ b/drivers/video/uvesafb.c +@@ -19,6 +19,7 @@ + #include <linux/io.h> + #include <linux/mutex.h> + #include <linux/slab.h> ++#include <linux/moduleloader.h> + #include <video/edid.h> + #include <video/uvesafb.h> + #ifdef CONFIG_X86 +@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void) + NULL, + }; + +- return call_usermodehelper(v86d_path, argv, envp, 1); ++ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC); + } + + /* +@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task, + if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) { + par->pmi_setpal = par->ypan = 0; + } else { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_MODULES ++ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx); ++#endif ++ if (!par->pmi_code) { ++ par->pmi_setpal = par->ypan = 0; ++ return 0; ++ } ++#endif ++ + par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4) + + task->t.regs.edi); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pax_open_kernel(); ++ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx); ++ pax_close_kernel(); ++ ++ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]); ++ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]); ++#else + par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1]; + par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2]; ++#endif ++ + printk(KERN_INFO "uvesafb: protected mode interface info at " + "%04x:%04x\n", + (u16)task->t.regs.es, (u16)task->t.regs.edi); +@@ -1821,6 +1844,11 @@ out: + if (par->vbe_modes) + kfree(par->vbe_modes); + ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ if (par->pmi_code) ++ module_free_exec(NULL, par->pmi_code); ++#endif ++ + framebuffer_release(info); + return err; + } +@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev) + kfree(par->vbe_state_orig); + if (par->vbe_state_saved) + kfree(par->vbe_state_saved); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ if (par->pmi_code) ++ module_free_exec(NULL, par->pmi_code); ++#endif ++ + } + + framebuffer_release(info); +diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c +index 501b340..86bd4cf 100644 +--- a/drivers/video/vesafb.c ++++ b/drivers/video/vesafb.c +@@ -9,6 +9,7 @@ + */ + + #include <linux/module.h> ++#include <linux/moduleloader.h> + #include <linux/kernel.h> + #include <linux/errno.h> + #include <linux/string.h> +@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */ + static int vram_total __initdata; /* Set total amount of memory */ + static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */ + static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */ +-static void (*pmi_start)(void) __read_mostly; +-static void (*pmi_pal) (void) __read_mostly; ++static void (*pmi_start)(void) __read_only; ++static void (*pmi_pal) (void) __read_only; + static int depth __read_mostly; + static int vga_compat __read_mostly; + /* --------------------------------------------------------------------- */ +@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev) + unsigned int size_vmode; + unsigned int size_remap; + unsigned int size_total; ++ void *pmi_code = NULL; + + if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) + return -ENODEV; +@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev) + size_remap = size_total; + vesafb_fix.smem_len = size_remap; + +-#ifndef __i386__ +- screen_info.vesapm_seg = 0; +-#endif +- + if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) { + printk(KERN_WARNING + "vesafb: cannot reserve video memory at 0x%lx\n", +@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev) + printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n", + vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages); + ++#ifdef __i386__ ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pmi_code = module_alloc_exec(screen_info.vesapm_size); ++ if (!pmi_code) ++#elif !defined(CONFIG_PAX_KERNEXEC) ++ if (0) ++#endif ++ ++#endif ++ screen_info.vesapm_seg = 0; ++ + if (screen_info.vesapm_seg) { +- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n", +- screen_info.vesapm_seg,screen_info.vesapm_off); ++ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n", ++ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size); + } + + if (screen_info.vesapm_seg < 0xc000) +@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev) + + if (ypan || pmi_setpal) { + unsigned short *pmi_base; ++ + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off); +- pmi_start = (void*)((char*)pmi_base + pmi_base[1]); +- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pax_open_kernel(); ++ memcpy(pmi_code, pmi_base, screen_info.vesapm_size); ++#else ++ pmi_code = pmi_base; ++#endif ++ ++ pmi_start = (void*)((char*)pmi_code + pmi_base[1]); ++ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pmi_start = ktva_ktla(pmi_start); ++ pmi_pal = ktva_ktla(pmi_pal); ++ pax_close_kernel(); ++#endif ++ + printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal); + if (pmi_base[3]) { + printk(KERN_INFO "vesafb: pmi: ports = "); +@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev) + info->node, info->fix.id); + return 0; + err: ++ ++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ module_free_exec(NULL, pmi_code); ++#endif ++ + if (info->screen_base) + iounmap(info->screen_base); + framebuffer_release(info); +diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h +index 88714ae..16c2e11 100644 +--- a/drivers/video/via/via_clock.h ++++ b/drivers/video/via/via_clock.h +@@ -56,7 +56,7 @@ struct via_clock { + + void (*set_engine_pll_state)(u8 state); + void (*set_engine_pll)(struct via_pll_config config); +-}; ++} __no_const; + + + static inline u32 get_pll_internal_frequency(u32 ref_freq, +diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h +index e56c934..fc22f4b 100644 +--- a/drivers/xen/xen-pciback/conf_space.h ++++ b/drivers/xen/xen-pciback/conf_space.h +@@ -44,15 +44,15 @@ struct config_field { + struct { + conf_dword_write write; + conf_dword_read read; +- } dw; ++ } __no_const dw; + struct { + conf_word_write write; + conf_word_read read; +- } w; ++ } __no_const w; + struct { + conf_byte_write write; + conf_byte_read read; +- } b; ++ } __no_const b; + } u; + struct list_head list; + }; +diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c +index 879ed88..bc03a01 100644 +--- a/fs/9p/vfs_inode.c ++++ b/fs/9p/vfs_inode.c +@@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd) + void + v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + + P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name, + IS_ERR(s) ? "<error>" : s); +diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt +index 79e2ca7..5828ad1 100644 +--- a/fs/Kconfig.binfmt ++++ b/fs/Kconfig.binfmt +@@ -86,7 +86,7 @@ config HAVE_AOUT + + config BINFMT_AOUT + tristate "Kernel support for a.out and ECOFF binaries" +- depends on HAVE_AOUT ++ depends on HAVE_AOUT && BROKEN + ---help--- + A.out (Assembler.OUTput) is a set of formats for libraries and + executables used in the earliest versions of UNIX. Linux used +diff --git a/fs/aio.c b/fs/aio.c +index b9d64d8..86cb1d5 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx) + size += sizeof(struct io_event) * nr_events; + nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; + +- if (nr_pages < 0) ++ if (nr_pages <= 0) + return -EINVAL; + + nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); +@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb) + static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) + { + ssize_t ret; ++ struct iovec iovstack; + + #ifdef CONFIG_COMPAT + if (compat) + ret = compat_rw_copy_check_uvector(type, + (struct compat_iovec __user *)kiocb->ki_buf, +- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, ++ kiocb->ki_nbytes, 1, &iovstack, + &kiocb->ki_iovec, 1); + else + #endif + ret = rw_copy_check_uvector(type, + (struct iovec __user *)kiocb->ki_buf, +- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, ++ kiocb->ki_nbytes, 1, &iovstack, + &kiocb->ki_iovec, 1); + if (ret < 0) + goto out; + ++ if (kiocb->ki_iovec == &iovstack) { ++ kiocb->ki_inline_vec = iovstack; ++ kiocb->ki_iovec = &kiocb->ki_inline_vec; ++ } + kiocb->ki_nr_segs = kiocb->ki_nbytes; + kiocb->ki_cur_seg = 0; + /* ki_nbytes/left now reflect bytes instead of segs */ +diff --git a/fs/attr.c b/fs/attr.c +index 7ee7ba4..0c61a60 100644 +--- a/fs/attr.c ++++ b/fs/attr.c +@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset) + unsigned long limit; + + limit = rlimit(RLIMIT_FSIZE); ++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1); + if (limit != RLIM_INFINITY && offset > limit) + goto out_sig; + if (offset > inode->i_sb->s_maxbytes) +diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c +index 6861f61..a25f010 100644 +--- a/fs/autofs4/waitq.c ++++ b/fs/autofs4/waitq.c +@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes) + { + unsigned long sigpipe, flags; + mm_segment_t fs; +- const char *data = (const char *)addr; ++ const char __user *data = (const char __force_user *)addr; + ssize_t wr = 0; + + /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/ +diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c +index 8342ca6..82fd192 100644 +--- a/fs/befs/linuxvfs.c ++++ b/fs/befs/linuxvfs.c +@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) + { + befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); + if (befs_ino->i_flags & BEFS_LONG_SYMLINK) { +- char *link = nd_get_link(nd); ++ const char *link = nd_get_link(nd); + if (!IS_ERR(link)) + kfree(link); + } +diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c +index a6395bd..f1e376a 100644 +--- a/fs/binfmt_aout.c ++++ b/fs/binfmt_aout.c +@@ -16,6 +16,7 @@ + #include <linux/string.h> + #include <linux/fs.h> + #include <linux/file.h> ++#include <linux/security.h> + #include <linux/stat.h> + #include <linux/fcntl.h> + #include <linux/ptrace.h> +@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm) + #endif + # define START_STACK(u) ((void __user *)u.start_stack) + ++ memset(&dump, 0, sizeof(dump)); ++ + fs = get_fs(); + set_fs(KERNEL_DS); + has_dumped = 1; +@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm) + + /* If the size of the dump file exceeds the rlimit, then see what would happen + if we wrote the stack, but not the data area. */ ++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1); + if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit) + dump.u_dsize = 0; + + /* Make sure we have enough room to write the stack and data areas. */ ++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1); + if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) + dump.u_ssize = 0; + +@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) + rlim = rlimit(RLIMIT_DATA); + if (rlim >= RLIM_INFINITY) + rlim = ~0; ++ ++ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1); + if (ex.a_data + ex.a_bss > rlim) + return -ENOMEM; + +@@ -259,9 +266,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) + current->mm->free_area_cache = current->mm->mmap_base; + current->mm->cached_hole_size = 0; + ++ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); ++ if (retval < 0) { ++ /* Someone check-me: is this error path enough? */ ++ send_sig(SIGKILL, current, 0); ++ return retval; ++ } ++ + install_exec_creds(bprm); + current->flags &= ~PF_FORKNOEXEC; + ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ current->mm->pax_flags = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) { ++ current->mm->pax_flags |= MF_PAX_PAGEEXEC; ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP) ++ current->mm->pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT)) ++ current->mm->pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++ } ++#endif ++ + if (N_MAGIC(ex) == OMAGIC) { + unsigned long text_addr, map_size; + loff_t pos; +@@ -334,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) + + down_write(¤t->mm->mmap_sem); + error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, +- PROT_READ | PROT_WRITE | PROT_EXEC, ++ PROT_READ | PROT_WRITE, + MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, + fd_offset + ex.a_text); + up_write(¤t->mm->mmap_sem); +@@ -352,13 +387,6 @@ beyond_if: + return retval; + } + +- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); +- if (retval < 0) { +- /* Someone check-me: is this error path enough? */ +- send_sig(SIGKILL, current, 0); +- return retval; +- } +- + current->mm->start_stack = + (unsigned long) create_aout_tables((char __user *) bprm->p, bprm); + #ifdef __alpha__ +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index 6ff96c6..b5fb43a 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -32,6 +32,7 @@ + #include <linux/elf.h> + #include <linux/utsname.h> + #include <linux/coredump.h> ++#include <linux/xattr.h> + #include <asm/uaccess.h> + #include <asm/param.h> + #include <asm/page.h> +@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm); + #define elf_core_dump NULL + #endif + ++#ifdef CONFIG_PAX_MPROTECT ++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags); ++#endif ++ + #if ELF_EXEC_PAGESIZE > PAGE_SIZE + #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE + #else +@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = { + .load_binary = load_elf_binary, + .load_shlib = load_elf_library, + .core_dump = elf_core_dump, ++ ++#ifdef CONFIG_PAX_MPROTECT ++ .handle_mprotect= elf_handle_mprotect, ++#endif ++ + .min_coredump = ELF_EXEC_PAGESIZE, + }; + +@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = { + + static int set_brk(unsigned long start, unsigned long end) + { ++ unsigned long e = end; ++ + start = ELF_PAGEALIGN(start); + end = ELF_PAGEALIGN(end); + if (end > start) { +@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end) + if (BAD_ADDR(addr)) + return addr; + } +- current->mm->start_brk = current->mm->brk = end; ++ current->mm->start_brk = current->mm->brk = e; + return 0; + } + +@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, + elf_addr_t __user *u_rand_bytes; + const char *k_platform = ELF_PLATFORM; + const char *k_base_platform = ELF_BASE_PLATFORM; +- unsigned char k_rand_bytes[16]; ++ u32 k_rand_bytes[4]; + int items; + elf_addr_t *elf_info; + int ei_index = 0; + const struct cred *cred = current_cred(); + struct vm_area_struct *vma; ++ unsigned long saved_auxv[AT_VECTOR_SIZE]; + + /* + * In some cases (e.g. Hyper-Threading), we want to avoid L1 +@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, + * Generate 16 random bytes for userspace PRNG seeding. + */ + get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); +- u_rand_bytes = (elf_addr_t __user *) +- STACK_ALLOC(p, sizeof(k_rand_bytes)); ++ srandom32(k_rand_bytes[0] ^ random32()); ++ srandom32(k_rand_bytes[1] ^ random32()); ++ srandom32(k_rand_bytes[2] ^ random32()); ++ srandom32(k_rand_bytes[3] ^ random32()); ++ p = STACK_ROUND(p, sizeof(k_rand_bytes)); ++ u_rand_bytes = (elf_addr_t __user *) p; + if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) + return -EFAULT; + +@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, + return -EFAULT; + current->mm->env_end = p; + ++ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t)); ++ + /* Put the elf_info on the stack in the right place. */ + sp = (elf_addr_t __user *)envp + 1; +- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t))) ++ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t))) + return -EFAULT; + return 0; + } +@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + { + struct elf_phdr *elf_phdata; + struct elf_phdr *eppnt; +- unsigned long load_addr = 0; ++ unsigned long load_addr = 0, pax_task_size = TASK_SIZE; + int load_addr_set = 0; + unsigned long last_bss = 0, elf_bss = 0; +- unsigned long error = ~0UL; ++ unsigned long error = -EINVAL; + unsigned long total_size; + int retval, i, size; + +@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + goto out_close; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ + eppnt = elf_phdata; + for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { + if (eppnt->p_type == PT_LOAD) { +@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + k = load_addr + eppnt->p_vaddr; + if (BAD_ADDR(k) || + eppnt->p_filesz > eppnt->p_memsz || +- eppnt->p_memsz > TASK_SIZE || +- TASK_SIZE - eppnt->p_memsz < k) { ++ eppnt->p_memsz > pax_task_size || ++ pax_task_size - eppnt->p_memsz < k) { + error = -ENOMEM; + goto out_close; + } +@@ -528,6 +552,351 @@ out: + return error; + } + ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) ++static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (elf_phdata->p_flags & PF_PAGEEXEC) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (elf_phdata->p_flags & PF_SEGMEXEC) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (elf_phdata->p_flags & PF_EMUTRAMP) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (elf_phdata->p_flags & PF_MPROTECT) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++#endif ++ ++ return pax_flags; ++} ++ ++static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(elf_phdata->p_flags & PF_NOMPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++#endif ++ ++ return pax_flags; ++} ++ ++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_EI_PAX ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++#else ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) { ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ pax_flags |= MF_PAX_SEGMEXEC; ++ } ++#endif ++ ++#endif ++ ++ return pax_flags; ++} ++ ++static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata) ++{ ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++ unsigned long i; ++ ++ for (i = 0UL; i < elf_ex->e_phnum; i++) ++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) { ++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) || ++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) || ++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) || ++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) || ++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP))) ++ return ~0UL; ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ if (pax_softmode) ++ return pax_parse_pt_pax_softmode(&elf_phdata[i]); ++ else ++#endif ++ ++ return pax_parse_pt_pax_hardmode(&elf_phdata[i]); ++ break; ++ } ++#endif ++ ++ return ~0UL; ++} ++ ++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS ++static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (pax_flags_softmode & MF_PAX_PAGEEXEC) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_flags_softmode & MF_PAX_SEGMEXEC) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (pax_flags_softmode & MF_PAX_EMUTRAMP) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (pax_flags_softmode & MF_PAX_MPROTECT) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++ ++static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(pax_flags_hardmode & MF_PAX_MPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++#endif ++ ++static unsigned long pax_parse_xattr_pax(struct file * const file) ++{ ++ ++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS ++ ssize_t xattr_size, i; ++ unsigned char xattr_value[5]; ++ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL; ++ ++ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value); ++ if (xattr_size <= 0) ++ return ~0UL; ++ ++ for (i = 0; i < xattr_size; i++) ++ switch (xattr_value[i]) { ++ default: ++ return ~0UL; ++ ++#define parse_flag(option1, option2, flag) \ ++ case option1: \ ++ pax_flags_hardmode |= MF_PAX_##flag; \ ++ break; \ ++ case option2: \ ++ pax_flags_softmode |= MF_PAX_##flag; \ ++ break; ++ ++ parse_flag('p', 'P', PAGEEXEC); ++ parse_flag('e', 'E', EMUTRAMP); ++ parse_flag('m', 'M', MPROTECT); ++ parse_flag('r', 'R', RANDMMAP); ++ parse_flag('s', 'S', SEGMEXEC); ++ ++#undef parse_flag ++ } ++ ++ if (pax_flags_hardmode & pax_flags_softmode) ++ return ~0UL; ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ if (pax_softmode) ++ return pax_parse_xattr_pax_softmode(pax_flags_softmode); ++ else ++#endif ++ ++ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode); ++#else ++ return ~0UL; ++#endif ++ ++} ++ ++static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file) ++{ ++ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags; ++ ++ pax_flags = pax_parse_ei_pax(elf_ex); ++ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata); ++ xattr_pax_flags = pax_parse_xattr_pax(file); ++ ++ if (pt_pax_flags == ~0UL) ++ pt_pax_flags = xattr_pax_flags; ++ else if (xattr_pax_flags == ~0UL) ++ xattr_pax_flags = pt_pax_flags; ++ if (pt_pax_flags != xattr_pax_flags) ++ return -EINVAL; ++ if (pt_pax_flags != ~0UL) ++ pax_flags = pt_pax_flags; ++ ++ if (0 > pax_check_flags(&pax_flags)) ++ return -EINVAL; ++ ++ current->mm->pax_flags = pax_flags; ++ return 0; ++} ++#endif ++ + /* + * These are the functions used to load ELF style executables and shared + * libraries. There is no binary dependent code anywhere else. +@@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top) + { + unsigned int random_variable = 0; + ++#ifdef CONFIG_PAX_RANDUSTACK ++ if (randomize_va_space) ++ return stack_top - current->mm->delta_stack; ++#endif ++ + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) { + random_variable = get_random_int() & STACK_RND_MASK; +@@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + unsigned long load_addr = 0, load_bias = 0; + int load_addr_set = 0; + char * elf_interpreter = NULL; +- unsigned long error; ++ unsigned long error = 0; + struct elf_phdr *elf_ppnt, *elf_phdata; + unsigned long elf_bss, elf_brk; + int retval, i; +@@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + unsigned long start_code, end_code, start_data, end_data; + unsigned long reloc_func_desc __maybe_unused = 0; + int executable_stack = EXSTACK_DEFAULT; +- unsigned long def_flags = 0; + struct { + struct elfhdr elf_ex; + struct elfhdr interp_elf_ex; + } *loc; ++ unsigned long pax_task_size = TASK_SIZE; + + loc = kmalloc(sizeof(*loc), GFP_KERNEL); + if (!loc) { +@@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + + /* OK, This is the point of no return */ + current->flags &= ~PF_FORKNOEXEC; +- current->mm->def_flags = def_flags; ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ current->mm->pax_flags = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ current->mm->call_dl_resolve = 0UL; ++#endif ++ ++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) ++ current->mm->call_syscall = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ current->mm->delta_mmap = 0UL; ++ current->mm->delta_stack = 0UL; ++#endif ++ ++ current->mm->def_flags = 0; ++ ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) ++ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) { ++ send_sig(SIGKILL, current, 0); ++ goto out_free_dentry; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++ pax_set_initial_flags(bprm); ++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) ++ if (pax_set_initial_flags_func) ++ (pax_set_initial_flags_func)(bprm); ++#endif ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) { ++ current->mm->context.user_cs_limit = PAGE_SIZE; ++ current->mm->def_flags |= VM_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE; ++ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE; ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++ current->mm->def_flags |= VM_NOHUGEPAGE; ++ } ++#endif ++ ++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC) ++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu()); ++ put_cpu(); ++ } ++#endif + + /* Do this immediately, since STACK_TOP as used in setup_arg_pages + may depend on the personality. */ + SET_PERSONALITY(loc->elf_ex); ++ ++#ifdef CONFIG_PAX_ASLR ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) { ++ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT; ++ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT; ++ } ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ executable_stack = EXSTACK_DISABLE_X; ++ current->personality &= ~READ_IMPLIES_EXEC; ++ } else ++#endif ++ + if (elf_read_implies_exec(loc->elf_ex, executable_stack)) + current->personality |= READ_IMPLIES_EXEC; + +@@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + #else + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); + #endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ /* PaX: randomize base address at the default exe base if requested */ ++ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) { ++#ifdef CONFIG_SPARC64 ++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1); ++#else ++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT; ++#endif ++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias); ++ elf_flags |= MAP_FIXED; ++ } ++#endif ++ + } + + error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, +@@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + * allowed task size. Note that p_filesz must always be + * <= p_memsz so it is only necessary to check p_memsz. + */ +- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || +- elf_ppnt->p_memsz > TASK_SIZE || +- TASK_SIZE - elf_ppnt->p_memsz < k) { ++ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz || ++ elf_ppnt->p_memsz > pax_task_size || ++ pax_task_size - elf_ppnt->p_memsz < k) { + /* set_brk can never work. Avoid overflows. */ + send_sig(SIGKILL, current, 0); + retval = -EINVAL; +@@ -870,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + start_data += load_bias; + end_data += load_bias; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) ++ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4); ++#endif ++ + /* Calling set_brk effectively mmaps the pages that we need + * for the bss and break sections. We must do this before + * mapping in the interpreter, to make sure it doesn't wind +@@ -881,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + goto out_free_dentry; + } + if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { +- send_sig(SIGSEGV, current, 0); +- retval = -EFAULT; /* Nobody gets to see this, but.. */ +- goto out_free_dentry; ++ /* ++ * This bss-zeroing can fail if the ELF ++ * file specifies odd protections. So ++ * we don't check the return value ++ */ + } + + if (elf_interpreter) { +@@ -1098,7 +1563,7 @@ out: + * Decide what to dump of a segment, part, all or none. + */ + static unsigned long vma_dump_size(struct vm_area_struct *vma, +- unsigned long mm_flags) ++ unsigned long mm_flags, long signr) + { + #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) + +@@ -1132,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, + if (vma->vm_file == NULL) + return 0; + +- if (FILTER(MAPPED_PRIVATE)) ++ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE)) + goto whole; + + /* +@@ -1354,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) + { + elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; + int i = 0; +- do ++ do { + i += 2; +- while (auxv[i - 2] != AT_NULL); ++ } while (auxv[i - 2] != AT_NULL); + fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); + } + +@@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, + } + + static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma, +- unsigned long mm_flags) ++ struct coredump_params *cprm) + { + struct vm_area_struct *vma; + size_t size = 0; + + for (vma = first_vma(current, gate_vma); vma != NULL; + vma = next_vma(vma, gate_vma)) +- size += vma_dump_size(vma, mm_flags); ++ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr); + return size; + } + +@@ -1963,7 +2428,7 @@ static int elf_core_dump(struct coredump_params *cprm) + + dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); + +- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags); ++ offset += elf_core_vma_data_size(gate_vma, cprm); + offset += elf_core_extra_data_size(); + e_shoff = offset; + +@@ -1977,10 +2442,12 @@ static int elf_core_dump(struct coredump_params *cprm) + offset = dataoff; + + size += sizeof(*elf); ++ gr_learn_resource(current, RLIMIT_CORE, size, 1); + if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf))) + goto end_coredump; + + size += sizeof(*phdr4note); ++ gr_learn_resource(current, RLIMIT_CORE, size, 1); + if (size > cprm->limit + || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note))) + goto end_coredump; +@@ -1994,7 +2461,7 @@ static int elf_core_dump(struct coredump_params *cprm) + phdr.p_offset = offset; + phdr.p_vaddr = vma->vm_start; + phdr.p_paddr = 0; +- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags); ++ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr); + phdr.p_memsz = vma->vm_end - vma->vm_start; + offset += phdr.p_filesz; + phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; +@@ -2005,6 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm) + phdr.p_align = ELF_EXEC_PAGESIZE; + + size += sizeof(phdr); ++ gr_learn_resource(current, RLIMIT_CORE, size, 1); + if (size > cprm->limit + || !dump_write(cprm->file, &phdr, sizeof(phdr))) + goto end_coredump; +@@ -2029,7 +2497,7 @@ static int elf_core_dump(struct coredump_params *cprm) + unsigned long addr; + unsigned long end; + +- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags); ++ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr); + + for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { + struct page *page; +@@ -2038,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm) + page = get_dump_page(addr); + if (page) { + void *kaddr = kmap(page); ++ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1); + stop = ((size += PAGE_SIZE) > cprm->limit) || + !dump_write(cprm->file, kaddr, + PAGE_SIZE); +@@ -2055,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm) + + if (e_phnum == PN_XNUM) { + size += sizeof(*shdr4extnum); ++ gr_learn_resource(current, RLIMIT_CORE, size, 1); + if (size > cprm->limit + || !dump_write(cprm->file, shdr4extnum, + sizeof(*shdr4extnum))) +@@ -2075,6 +2545,97 @@ out: + + #endif /* CONFIG_ELF_CORE */ + ++#ifdef CONFIG_PAX_MPROTECT ++/* PaX: non-PIC ELF libraries need relocations on their executable segments ++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly ++ * we'll remove VM_MAYWRITE for good on RELRO segments. ++ * ++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment ++ * basis because we want to allow the common case and not the special ones. ++ */ ++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags) ++{ ++ struct elfhdr elf_h; ++ struct elf_phdr elf_p; ++ unsigned long i; ++ unsigned long oldflags; ++ bool is_textrel_rw, is_textrel_rx, is_relro; ++ ++ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT)) ++ return; ++ ++ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ); ++ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ; ++ ++#ifdef CONFIG_PAX_ELFRELOCS ++ /* possible TEXTREL */ ++ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ); ++ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ); ++#else ++ is_textrel_rw = false; ++ is_textrel_rx = false; ++#endif ++ ++ /* possible RELRO */ ++ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ); ++ ++ if (!is_textrel_rw && !is_textrel_rx && !is_relro) ++ return; ++ ++ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) || ++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) || ++ ++#ifdef CONFIG_PAX_ETEXECRELOCS ++ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) || ++#else ++ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) || ++#endif ++ ++ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) || ++ !elf_check_arch(&elf_h) || ++ elf_h.e_phentsize != sizeof(struct elf_phdr) || ++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr)) ++ return; ++ ++ for (i = 0UL; i < elf_h.e_phnum; i++) { ++ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p))) ++ return; ++ switch (elf_p.p_type) { ++ case PT_DYNAMIC: ++ if (!is_textrel_rw && !is_textrel_rx) ++ continue; ++ i = 0UL; ++ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) { ++ elf_dyn dyn; ++ ++ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn))) ++ return; ++ if (dyn.d_tag == DT_NULL) ++ return; ++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) { ++ gr_log_textrel(vma); ++ if (is_textrel_rw) ++ vma->vm_flags |= VM_MAYWRITE; ++ else ++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */ ++ vma->vm_flags &= ~VM_MAYWRITE; ++ return; ++ } ++ i++; ++ } ++ return; ++ ++ case PT_GNU_RELRO: ++ if (!is_relro) ++ continue; ++ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start) ++ vma->vm_flags &= ~VM_MAYWRITE; ++ return; ++ } ++ } ++} ++#endif ++ + static int __init init_elf_binfmt(void) + { + return register_binfmt(&elf_format); +diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c +index 1bffbe0..c8c283e 100644 +--- a/fs/binfmt_flat.c ++++ b/fs/binfmt_flat.c +@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm, + realdatastart = (unsigned long) -ENOMEM; + printk("Unable to allocate RAM for process data, errno %d\n", + (int)-realdatastart); ++ down_write(¤t->mm->mmap_sem); + do_munmap(current->mm, textpos, text_len); ++ up_write(¤t->mm->mmap_sem); + ret = realdatastart; + goto err; + } +@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm, + } + if (IS_ERR_VALUE(result)) { + printk("Unable to read data+bss, errno %d\n", (int)-result); ++ down_write(¤t->mm->mmap_sem); + do_munmap(current->mm, textpos, text_len); + do_munmap(current->mm, realdatastart, len); ++ up_write(¤t->mm->mmap_sem); + ret = result; + goto err; + } +@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm, + } + if (IS_ERR_VALUE(result)) { + printk("Unable to read code+data+bss, errno %d\n",(int)-result); ++ down_write(¤t->mm->mmap_sem); + do_munmap(current->mm, textpos, text_len + data_len + extra + + MAX_SHARED_LIBS * sizeof(unsigned long)); ++ up_write(¤t->mm->mmap_sem); + ret = result; + goto err; + } +diff --git a/fs/bio.c b/fs/bio.c +index b1fe82c..84da0a9 100644 +--- a/fs/bio.c ++++ b/fs/bio.c +@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err) + const int read = bio_data_dir(bio) == READ; + struct bio_map_data *bmd = bio->bi_private; + int i; +- char *p = bmd->sgvecs[0].iov_base; ++ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base; + + __bio_for_each_segment(bvec, bio, i, 0) { + char *addr = page_address(bvec->bv_page); +diff --git a/fs/block_dev.c b/fs/block_dev.c +index abe9b48..5df59e8 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, + else if (bdev->bd_contains == bdev) + return true; /* is a whole device which isn't held */ + +- else if (whole->bd_holder == bd_may_claim) ++ else if (whole->bd_holder == (void *)bd_may_claim) + return true; /* is a partition of a device that is being partitioned */ + else if (whole->bd_holder != NULL) + return false; /* is a partition of a held device */ +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index dede441..f2a2507 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, + free_extent_buffer(buf); + add_root_to_dirty_list(root); + } else { +- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) +- parent_start = parent->start; +- else ++ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { ++ if (parent) ++ parent_start = parent->start; ++ else ++ parent_start = 0; ++ } else + parent_start = 0; + + WARN_ON(trans->transid != btrfs_header_generation(parent)); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index fd1a06d..6e9033d 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -6895,7 +6895,7 @@ fail: + return -ENOMEM; + } + +-static int btrfs_getattr(struct vfsmount *mnt, ++int btrfs_getattr(struct vfsmount *mnt, + struct dentry *dentry, struct kstat *stat) + { + struct inode *inode = dentry->d_inode; +@@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt, + return 0; + } + ++EXPORT_SYMBOL(btrfs_getattr); ++ ++dev_t get_btrfs_dev_from_inode(struct inode *inode) ++{ ++ return BTRFS_I(inode)->root->anon_dev; ++} ++EXPORT_SYMBOL(get_btrfs_dev_from_inode); ++ + /* + * If a file is moved, it will inherit the cow and compression flags of the new + * directory. +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index c04f02c..f5c9e2e 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) + for (i = 0; i < num_types; i++) { + struct btrfs_space_info *tmp; + ++ /* Don't copy in more than we allocated */ + if (!slot_count) + break; + ++ slot_count--; ++ + info = NULL; + rcu_read_lock(); + list_for_each_entry_rcu(tmp, &root->fs_info->space_info, +@@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) + memcpy(dest, &space, sizeof(space)); + dest++; + space_args.total_spaces++; +- slot_count--; + } +- if (!slot_count) +- break; + } + up_read(&info->groups_sem); + } + +- user_dest = (struct btrfs_ioctl_space_info *) ++ user_dest = (struct btrfs_ioctl_space_info __user *) + (arg + sizeof(struct btrfs_ioctl_space_args)); + + if (copy_to_user(user_dest, dest_orig, alloc_size)) +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index cfb5543..1ae7347 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del) + } + spin_unlock(&rc->reloc_root_tree.lock); + +- BUG_ON((struct btrfs_root *)node->data != root); ++ BUG_ON(!node || (struct btrfs_root *)node->data != root); + + if (!del) { + spin_lock(&rc->reloc_root_tree.lock); +diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c +index 622f469..e8d2d55 100644 +--- a/fs/cachefiles/bind.c ++++ b/fs/cachefiles/bind.c +@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) + args); + + /* start by checking things over */ +- ASSERT(cache->fstop_percent >= 0 && +- cache->fstop_percent < cache->fcull_percent && ++ ASSERT(cache->fstop_percent < cache->fcull_percent && + cache->fcull_percent < cache->frun_percent && + cache->frun_percent < 100); + +- ASSERT(cache->bstop_percent >= 0 && +- cache->bstop_percent < cache->bcull_percent && ++ ASSERT(cache->bstop_percent < cache->bcull_percent && + cache->bcull_percent < cache->brun_percent && + cache->brun_percent < 100); + +diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c +index 0a1467b..6a53245 100644 +--- a/fs/cachefiles/daemon.c ++++ b/fs/cachefiles/daemon.c +@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, + if (n > buflen) + return -EMSGSIZE; + +- if (copy_to_user(_buffer, buffer, n) != 0) ++ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0) + return -EFAULT; + + return n; +@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file, + if (test_bit(CACHEFILES_DEAD, &cache->flags)) + return -EIO; + +- if (datalen < 0 || datalen > PAGE_SIZE - 1) ++ if (datalen > PAGE_SIZE - 1) + return -EOPNOTSUPP; + + /* drag the command string into the kernel so we can parse it */ +@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args) + if (args[0] != '%' || args[1] != '\0') + return -EINVAL; + +- if (fstop < 0 || fstop >= cache->fcull_percent) ++ if (fstop >= cache->fcull_percent) + return cachefiles_daemon_range_error(cache, args); + + cache->fstop_percent = fstop; +@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args) + if (args[0] != '%' || args[1] != '\0') + return -EINVAL; + +- if (bstop < 0 || bstop >= cache->bcull_percent) ++ if (bstop >= cache->bcull_percent) + return cachefiles_daemon_range_error(cache, args); + + cache->bstop_percent = bstop; +diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h +index bd6bc1b..b627b53 100644 +--- a/fs/cachefiles/internal.h ++++ b/fs/cachefiles/internal.h +@@ -57,7 +57,7 @@ struct cachefiles_cache { + wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */ + struct rb_root active_nodes; /* active nodes (can't be culled) */ + rwlock_t active_lock; /* lock for active_nodes */ +- atomic_t gravecounter; /* graveyard uniquifier */ ++ atomic_unchecked_t gravecounter; /* graveyard uniquifier */ + unsigned frun_percent; /* when to stop culling (% files) */ + unsigned fcull_percent; /* when to start culling (% files) */ + unsigned fstop_percent; /* when to stop allocating (% files) */ +@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache, + * proc.c + */ + #ifdef CONFIG_CACHEFILES_HISTOGRAM +-extern atomic_t cachefiles_lookup_histogram[HZ]; +-extern atomic_t cachefiles_mkdir_histogram[HZ]; +-extern atomic_t cachefiles_create_histogram[HZ]; ++extern atomic_unchecked_t cachefiles_lookup_histogram[HZ]; ++extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ]; ++extern atomic_unchecked_t cachefiles_create_histogram[HZ]; + + extern int __init cachefiles_proc_init(void); + extern void cachefiles_proc_cleanup(void); + static inline +-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif) ++void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif) + { + unsigned long jif = jiffies - start_jif; + if (jif >= HZ) + jif = HZ - 1; +- atomic_inc(&histogram[jif]); ++ atomic_inc_unchecked(&histogram[jif]); + } + + #else +diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c +index a0358c2..d6137f2 100644 +--- a/fs/cachefiles/namei.c ++++ b/fs/cachefiles/namei.c +@@ -318,7 +318,7 @@ try_again: + /* first step is to make up a grave dentry in the graveyard */ + sprintf(nbuffer, "%08x%08x", + (uint32_t) get_seconds(), +- (uint32_t) atomic_inc_return(&cache->gravecounter)); ++ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter)); + + /* do the multiway lock magic */ + trap = lock_rename(cache->graveyard, dir); +diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c +index eccd339..4c1d995 100644 +--- a/fs/cachefiles/proc.c ++++ b/fs/cachefiles/proc.c +@@ -14,9 +14,9 @@ + #include <linux/seq_file.h> + #include "internal.h" + +-atomic_t cachefiles_lookup_histogram[HZ]; +-atomic_t cachefiles_mkdir_histogram[HZ]; +-atomic_t cachefiles_create_histogram[HZ]; ++atomic_unchecked_t cachefiles_lookup_histogram[HZ]; ++atomic_unchecked_t cachefiles_mkdir_histogram[HZ]; ++atomic_unchecked_t cachefiles_create_histogram[HZ]; + + /* + * display the latency histogram +@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v) + return 0; + default: + index = (unsigned long) v - 3; +- x = atomic_read(&cachefiles_lookup_histogram[index]); +- y = atomic_read(&cachefiles_mkdir_histogram[index]); +- z = atomic_read(&cachefiles_create_histogram[index]); ++ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]); ++ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]); ++ z = atomic_read_unchecked(&cachefiles_create_histogram[index]); + if (x == 0 && y == 0 && z == 0) + return 0; + +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c +index 0e3c092..818480e 100644 +--- a/fs/cachefiles/rdwr.c ++++ b/fs/cachefiles/rdwr.c +@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = file->f_op->write( +- file, (const void __user *) data, len, &pos); ++ file, (const void __force_user *) data, len, &pos); + set_fs(old_fs); + kunmap(page); + if (ret != len) +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c +index 9895400..78a67e7 100644 +--- a/fs/ceph/dir.c ++++ b/fs/ceph/dir.c +@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) + struct ceph_fs_client *fsc = ceph_inode_to_client(inode); + struct ceph_mds_client *mdsc = fsc->mdsc; + unsigned frag = fpos_frag(filp->f_pos); +- int off = fpos_off(filp->f_pos); ++ unsigned int off = fpos_off(filp->f_pos); + int err; + u32 ftype; + struct ceph_mds_reply_info_parsed *rinfo; +@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, + if (nd && + (nd->flags & LOOKUP_OPEN) && + !(nd->intent.open.flags & O_CREAT)) { +- int mode = nd->intent.open.create_mode & ~current->fs->umask; ++ int mode = nd->intent.open.create_mode & ~current_umask(); + return ceph_lookup_open(dir, dentry, nd, mode, 1); + } + +diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c +index cfd1ce3..6b13a74 100644 +--- a/fs/cifs/asn1.c ++++ b/fs/cifs/asn1.c +@@ -416,6 +416,9 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid) + + static int + asn1_oid_decode(struct asn1_ctx *ctx, ++ unsigned char *eoc, unsigned long **oid, unsigned int *len) __size_overflow(2); ++static int ++asn1_oid_decode(struct asn1_ctx *ctx, + unsigned char *eoc, unsigned long **oid, unsigned int *len) + { + unsigned long subid; +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c +index 84e8c07..6170d31 100644 +--- a/fs/cifs/cifs_debug.c ++++ b/fs/cifs/cifs_debug.c +@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, + + if (c == '1' || c == 'y' || c == 'Y' || c == '0') { + #ifdef CONFIG_CIFS_STATS2 +- atomic_set(&totBufAllocCount, 0); +- atomic_set(&totSmBufAllocCount, 0); ++ atomic_set_unchecked(&totBufAllocCount, 0); ++ atomic_set_unchecked(&totSmBufAllocCount, 0); + #endif /* CONFIG_CIFS_STATS2 */ + spin_lock(&cifs_tcp_ses_lock); + list_for_each(tmp1, &cifs_tcp_ses_list) { +@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file, + tcon = list_entry(tmp3, + struct cifs_tcon, + tcon_list); +- atomic_set(&tcon->num_smbs_sent, 0); +- atomic_set(&tcon->num_writes, 0); +- atomic_set(&tcon->num_reads, 0); +- atomic_set(&tcon->num_oplock_brks, 0); +- atomic_set(&tcon->num_opens, 0); +- atomic_set(&tcon->num_posixopens, 0); +- atomic_set(&tcon->num_posixmkdirs, 0); +- atomic_set(&tcon->num_closes, 0); +- atomic_set(&tcon->num_deletes, 0); +- atomic_set(&tcon->num_mkdirs, 0); +- atomic_set(&tcon->num_rmdirs, 0); +- atomic_set(&tcon->num_renames, 0); +- atomic_set(&tcon->num_t2renames, 0); +- atomic_set(&tcon->num_ffirst, 0); +- atomic_set(&tcon->num_fnext, 0); +- atomic_set(&tcon->num_fclose, 0); +- atomic_set(&tcon->num_hardlinks, 0); +- atomic_set(&tcon->num_symlinks, 0); +- atomic_set(&tcon->num_locks, 0); ++ atomic_set_unchecked(&tcon->num_smbs_sent, 0); ++ atomic_set_unchecked(&tcon->num_writes, 0); ++ atomic_set_unchecked(&tcon->num_reads, 0); ++ atomic_set_unchecked(&tcon->num_oplock_brks, 0); ++ atomic_set_unchecked(&tcon->num_opens, 0); ++ atomic_set_unchecked(&tcon->num_posixopens, 0); ++ atomic_set_unchecked(&tcon->num_posixmkdirs, 0); ++ atomic_set_unchecked(&tcon->num_closes, 0); ++ atomic_set_unchecked(&tcon->num_deletes, 0); ++ atomic_set_unchecked(&tcon->num_mkdirs, 0); ++ atomic_set_unchecked(&tcon->num_rmdirs, 0); ++ atomic_set_unchecked(&tcon->num_renames, 0); ++ atomic_set_unchecked(&tcon->num_t2renames, 0); ++ atomic_set_unchecked(&tcon->num_ffirst, 0); ++ atomic_set_unchecked(&tcon->num_fnext, 0); ++ atomic_set_unchecked(&tcon->num_fclose, 0); ++ atomic_set_unchecked(&tcon->num_hardlinks, 0); ++ atomic_set_unchecked(&tcon->num_symlinks, 0); ++ atomic_set_unchecked(&tcon->num_locks, 0); + } + } + } +@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) + smBufAllocCount.counter, cifs_min_small); + #ifdef CONFIG_CIFS_STATS2 + seq_printf(m, "Total Large %d Small %d Allocations\n", +- atomic_read(&totBufAllocCount), +- atomic_read(&totSmBufAllocCount)); ++ atomic_read_unchecked(&totBufAllocCount), ++ atomic_read_unchecked(&totSmBufAllocCount)); + #endif /* CONFIG_CIFS_STATS2 */ + + seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount)); +@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) + if (tcon->need_reconnect) + seq_puts(m, "\tDISCONNECTED "); + seq_printf(m, "\nSMBs: %d Oplock Breaks: %d", +- atomic_read(&tcon->num_smbs_sent), +- atomic_read(&tcon->num_oplock_brks)); ++ atomic_read_unchecked(&tcon->num_smbs_sent), ++ atomic_read_unchecked(&tcon->num_oplock_brks)); + seq_printf(m, "\nReads: %d Bytes: %lld", +- atomic_read(&tcon->num_reads), ++ atomic_read_unchecked(&tcon->num_reads), + (long long)(tcon->bytes_read)); + seq_printf(m, "\nWrites: %d Bytes: %lld", +- atomic_read(&tcon->num_writes), ++ atomic_read_unchecked(&tcon->num_writes), + (long long)(tcon->bytes_written)); + seq_printf(m, "\nFlushes: %d", +- atomic_read(&tcon->num_flushes)); ++ atomic_read_unchecked(&tcon->num_flushes)); + seq_printf(m, "\nLocks: %d HardLinks: %d " + "Symlinks: %d", +- atomic_read(&tcon->num_locks), +- atomic_read(&tcon->num_hardlinks), +- atomic_read(&tcon->num_symlinks)); ++ atomic_read_unchecked(&tcon->num_locks), ++ atomic_read_unchecked(&tcon->num_hardlinks), ++ atomic_read_unchecked(&tcon->num_symlinks)); + seq_printf(m, "\nOpens: %d Closes: %d " + "Deletes: %d", +- atomic_read(&tcon->num_opens), +- atomic_read(&tcon->num_closes), +- atomic_read(&tcon->num_deletes)); ++ atomic_read_unchecked(&tcon->num_opens), ++ atomic_read_unchecked(&tcon->num_closes), ++ atomic_read_unchecked(&tcon->num_deletes)); + seq_printf(m, "\nPosix Opens: %d " + "Posix Mkdirs: %d", +- atomic_read(&tcon->num_posixopens), +- atomic_read(&tcon->num_posixmkdirs)); ++ atomic_read_unchecked(&tcon->num_posixopens), ++ atomic_read_unchecked(&tcon->num_posixmkdirs)); + seq_printf(m, "\nMkdirs: %d Rmdirs: %d", +- atomic_read(&tcon->num_mkdirs), +- atomic_read(&tcon->num_rmdirs)); ++ atomic_read_unchecked(&tcon->num_mkdirs), ++ atomic_read_unchecked(&tcon->num_rmdirs)); + seq_printf(m, "\nRenames: %d T2 Renames %d", +- atomic_read(&tcon->num_renames), +- atomic_read(&tcon->num_t2renames)); ++ atomic_read_unchecked(&tcon->num_renames), ++ atomic_read_unchecked(&tcon->num_t2renames)); + seq_printf(m, "\nFindFirst: %d FNext %d " + "FClose %d", +- atomic_read(&tcon->num_ffirst), +- atomic_read(&tcon->num_fnext), +- atomic_read(&tcon->num_fclose)); ++ atomic_read_unchecked(&tcon->num_ffirst), ++ atomic_read_unchecked(&tcon->num_fnext), ++ atomic_read_unchecked(&tcon->num_fclose)); + } + } + } +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index 8f1fe32..38f9e27 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -989,7 +989,7 @@ cifs_init_request_bufs(void) + cifs_req_cachep = kmem_cache_create("cifs_request", + CIFSMaxBufSize + + MAX_CIFS_HDR_SIZE, 0, +- SLAB_HWCACHE_ALIGN, NULL); ++ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL); + if (cifs_req_cachep == NULL) + return -ENOMEM; + +@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void) + efficient to alloc 1 per page off the slab compared to 17K (5page) + alloc of large cifs buffers even when page debugging is on */ + cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", +- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, ++ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, + NULL); + if (cifs_sm_req_cachep == NULL) { + mempool_destroy(cifs_req_poolp); +@@ -1101,8 +1101,8 @@ init_cifs(void) + atomic_set(&bufAllocCount, 0); + atomic_set(&smBufAllocCount, 0); + #ifdef CONFIG_CIFS_STATS2 +- atomic_set(&totBufAllocCount, 0); +- atomic_set(&totSmBufAllocCount, 0); ++ atomic_set_unchecked(&totBufAllocCount, 0); ++ atomic_set_unchecked(&totSmBufAllocCount, 0); + #endif /* CONFIG_CIFS_STATS2 */ + + atomic_set(&midCount, 0); +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index 8238aa1..0347196 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -392,28 +392,28 @@ struct cifs_tcon { + __u16 Flags; /* optional support bits */ + enum statusEnum tidStatus; + #ifdef CONFIG_CIFS_STATS +- atomic_t num_smbs_sent; +- atomic_t num_writes; +- atomic_t num_reads; +- atomic_t num_flushes; +- atomic_t num_oplock_brks; +- atomic_t num_opens; +- atomic_t num_closes; +- atomic_t num_deletes; +- atomic_t num_mkdirs; +- atomic_t num_posixopens; +- atomic_t num_posixmkdirs; +- atomic_t num_rmdirs; +- atomic_t num_renames; +- atomic_t num_t2renames; +- atomic_t num_ffirst; +- atomic_t num_fnext; +- atomic_t num_fclose; +- atomic_t num_hardlinks; +- atomic_t num_symlinks; +- atomic_t num_locks; +- atomic_t num_acl_get; +- atomic_t num_acl_set; ++ atomic_unchecked_t num_smbs_sent; ++ atomic_unchecked_t num_writes; ++ atomic_unchecked_t num_reads; ++ atomic_unchecked_t num_flushes; ++ atomic_unchecked_t num_oplock_brks; ++ atomic_unchecked_t num_opens; ++ atomic_unchecked_t num_closes; ++ atomic_unchecked_t num_deletes; ++ atomic_unchecked_t num_mkdirs; ++ atomic_unchecked_t num_posixopens; ++ atomic_unchecked_t num_posixmkdirs; ++ atomic_unchecked_t num_rmdirs; ++ atomic_unchecked_t num_renames; ++ atomic_unchecked_t num_t2renames; ++ atomic_unchecked_t num_ffirst; ++ atomic_unchecked_t num_fnext; ++ atomic_unchecked_t num_fclose; ++ atomic_unchecked_t num_hardlinks; ++ atomic_unchecked_t num_symlinks; ++ atomic_unchecked_t num_locks; ++ atomic_unchecked_t num_acl_get; ++ atomic_unchecked_t num_acl_set; + #ifdef CONFIG_CIFS_STATS2 + unsigned long long time_writes; + unsigned long long time_reads; +@@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim) + } + + #ifdef CONFIG_CIFS_STATS +-#define cifs_stats_inc atomic_inc ++#define cifs_stats_inc atomic_inc_unchecked + + static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon, + unsigned int bytes) +@@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount; + /* Various Debug counters */ + GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ + #ifdef CONFIG_CIFS_STATS2 +-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ +-GLOBAL_EXTERN atomic_t totSmBufAllocCount; ++GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */ ++GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount; + #endif + GLOBAL_EXTERN atomic_t smBufAllocCount; + GLOBAL_EXTERN atomic_t midCount; +diff --git a/fs/cifs/link.c b/fs/cifs/link.c +index 6b0e064..94e6c3c 100644 +--- a/fs/cifs/link.c ++++ b/fs/cifs/link.c +@@ -600,7 +600,7 @@ symlink_exit: + + void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie) + { +- char *p = nd_get_link(nd); ++ const char *p = nd_get_link(nd); + if (!IS_ERR(p)) + kfree(p); + } +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c +index 703ef5c..2a44ed5 100644 +--- a/fs/cifs/misc.c ++++ b/fs/cifs/misc.c +@@ -156,7 +156,7 @@ cifs_buf_get(void) + memset(ret_buf, 0, sizeof(struct smb_hdr) + 3); + atomic_inc(&bufAllocCount); + #ifdef CONFIG_CIFS_STATS2 +- atomic_inc(&totBufAllocCount); ++ atomic_inc_unchecked(&totBufAllocCount); + #endif /* CONFIG_CIFS_STATS2 */ + } + +@@ -191,7 +191,7 @@ cifs_small_buf_get(void) + /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ + atomic_inc(&smBufAllocCount); + #ifdef CONFIG_CIFS_STATS2 +- atomic_inc(&totSmBufAllocCount); ++ atomic_inc_unchecked(&totSmBufAllocCount); + #endif /* CONFIG_CIFS_STATS2 */ + + } +diff --git a/fs/coda/cache.c b/fs/coda/cache.c +index 6901578..d402eb5 100644 +--- a/fs/coda/cache.c ++++ b/fs/coda/cache.c +@@ -24,7 +24,7 @@ + #include "coda_linux.h" + #include "coda_cache.h" + +-static atomic_t permission_epoch = ATOMIC_INIT(0); ++static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0); + + /* replace or extend an acl cache hit */ + void coda_cache_enter(struct inode *inode, int mask) +@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask) + struct coda_inode_info *cii = ITOC(inode); + + spin_lock(&cii->c_lock); +- cii->c_cached_epoch = atomic_read(&permission_epoch); ++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch); + if (cii->c_uid != current_fsuid()) { + cii->c_uid = current_fsuid(); + cii->c_cached_perm = mask; +@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode) + { + struct coda_inode_info *cii = ITOC(inode); + spin_lock(&cii->c_lock); +- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1; ++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1; + spin_unlock(&cii->c_lock); + } + + /* remove all acl caches */ + void coda_cache_clear_all(struct super_block *sb) + { +- atomic_inc(&permission_epoch); ++ atomic_inc_unchecked(&permission_epoch); + } + + +@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask) + spin_lock(&cii->c_lock); + hit = (mask & cii->c_cached_perm) == mask && + cii->c_uid == current_fsuid() && +- cii->c_cached_epoch == atomic_read(&permission_epoch); ++ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch); + spin_unlock(&cii->c_lock); + + return hit; +diff --git a/fs/compat.c b/fs/compat.c +index c987875..08771ca 100644 +--- a/fs/compat.c ++++ b/fs/compat.c +@@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim + static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) + { + compat_ino_t ino = stat->ino; +- typeof(ubuf->st_uid) uid = 0; +- typeof(ubuf->st_gid) gid = 0; ++ typeof(((struct compat_stat *)0)->st_uid) uid = 0; ++ typeof(((struct compat_stat *)0)->st_gid) gid = 0; + int err; + + SET_UID(uid, stat->uid); +@@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p) + + set_fs(KERNEL_DS); + /* The __user pointer cast is valid because of the set_fs() */ +- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64); ++ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64); + set_fs(oldfs); + /* truncating is ok because it's a user address */ + if (!ret) +@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type, + goto out; + + ret = -EINVAL; +- if (nr_segs > UIO_MAXIOV || nr_segs < 0) ++ if (nr_segs > UIO_MAXIOV) + goto out; + if (nr_segs > fast_segs) { + ret = -ENOMEM; +@@ -845,6 +845,7 @@ struct compat_old_linux_dirent { + + struct compat_readdir_callback { + struct compat_old_linux_dirent __user *dirent; ++ struct file * file; + int result; + }; + +@@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen, + buf->result = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + buf->result++; + dirent = buf->dirent; + if (!access_ok(VERIFY_WRITE, dirent, +@@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd, + + buf.result = 0; + buf.dirent = dirent; ++ buf.file = file; + + error = vfs_readdir(file, compat_fillonedir, &buf); + if (buf.result) +@@ -914,6 +920,7 @@ struct compat_linux_dirent { + struct compat_getdents_callback { + struct compat_linux_dirent __user *current_dir; + struct compat_linux_dirent __user *previous; ++ struct file * file; + int count; + int error; + }; +@@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen, + buf->error = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd, + buf.previous = NULL; + buf.count = count; + buf.error = 0; ++ buf.file = file; + + error = vfs_readdir(file, compat_filldir, &buf); + if (error >= 0) +@@ -1003,6 +1015,7 @@ out: + struct compat_getdents_callback64 { + struct linux_dirent64 __user *current_dir; + struct linux_dirent64 __user *previous; ++ struct file * file; + int count; + int error; + }; +@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + + if (dirent) { +@@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, + buf.previous = NULL; + buf.count = count; + buf.error = 0; ++ buf.file = file; + + error = vfs_readdir(file, compat_filldir64, &buf); + if (error >= 0) + error = buf.error; + lastdirent = buf.previous; + if (lastdirent) { +- typeof(lastdirent->d_off) d_off = file->f_pos; ++ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos; + if (__put_user_unaligned(d_off, &lastdirent->d_off)) + error = -EFAULT; + else +diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c +index 112e45a..b59845b 100644 +--- a/fs/compat_binfmt_elf.c ++++ b/fs/compat_binfmt_elf.c +@@ -30,11 +30,13 @@ + #undef elf_phdr + #undef elf_shdr + #undef elf_note ++#undef elf_dyn + #undef elf_addr_t + #define elfhdr elf32_hdr + #define elf_phdr elf32_phdr + #define elf_shdr elf32_shdr + #define elf_note elf32_note ++#define elf_dyn Elf32_Dyn + #define elf_addr_t Elf32_Addr + + /* +diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c +index 51352de..93292ff 100644 +--- a/fs/compat_ioctl.c ++++ b/fs/compat_ioctl.c +@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, + + err = get_user(palp, &up->palette); + err |= get_user(length, &up->length); ++ if (err) ++ return -EFAULT; + + up_native = compat_alloc_user_space(sizeof(struct video_spu_palette)); + err = put_user(compat_ptr(palp), &up_native->palette); +@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, + return -EFAULT; + if (__get_user(udata, &ss32->iomem_base)) + return -EFAULT; +- ss.iomem_base = compat_ptr(udata); ++ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata); + if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) || + __get_user(ss.port_high, &ss32->port_high)) + return -EFAULT; +@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file, + copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) || + copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) || + copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) || +- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32))) ++ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32))) + return -EFAULT; + + return ioctl_preallocate(file, p); +@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, + static int __init init_sys32_ioctl_cmp(const void *p, const void *q) + { + unsigned int a, b; +- a = *(unsigned int *)p; +- b = *(unsigned int *)q; ++ a = *(const unsigned int *)p; ++ b = *(const unsigned int *)q; + if (a > b) + return 1; + if (a < b) +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c +index 9a37a9b..35792b6 100644 +--- a/fs/configfs/dir.c ++++ b/fs/configfs/dir.c +@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir + } + for (p=q->next; p!= &parent_sd->s_children; p=p->next) { + struct configfs_dirent *next; +- const char * name; ++ const unsigned char * name; ++ char d_name[sizeof(next->s_dentry->d_iname)]; + int len; + struct inode *inode = NULL; + +@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir + continue; + + name = configfs_get_name(next); +- len = strlen(name); ++ if (next->s_dentry && name == next->s_dentry->d_iname) { ++ len = next->s_dentry->d_name.len; ++ memcpy(d_name, name, len); ++ name = d_name; ++ } else ++ len = strlen(name); + + /* + * We'll have a dentry and an inode for +diff --git a/fs/dcache.c b/fs/dcache.c +index f7908ae..920a680 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages) + mempages -= reserve; + + names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, +- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL); + + dcache_init(); + inode_init(); +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c +index f3a257d..715ac0f 100644 +--- a/fs/debugfs/inode.c ++++ b/fs/debugfs/inode.c +@@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file); + struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) + { + return debugfs_create_file(name, ++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT ++ S_IFDIR | S_IRWXU, ++#else + S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, ++#endif + parent, NULL, NULL); + } + EXPORT_SYMBOL_GPL(debugfs_create_dir); +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c +index af11098..81e3bbe 100644 +--- a/fs/ecryptfs/inode.c ++++ b/fs/ecryptfs/inode.c +@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, + old_fs = get_fs(); + set_fs(get_ds()); + rc = lower_dentry->d_inode->i_op->readlink(lower_dentry, +- (char __user *)lower_buf, ++ (char __force_user *)lower_buf, + lower_bufsiz); + set_fs(old_fs); + if (rc < 0) +@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd) + } + old_fs = get_fs(); + set_fs(get_ds()); +- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len); ++ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len); + set_fs(old_fs); + if (rc < 0) { + kfree(buf); +@@ -752,7 +752,7 @@ out: + static void + ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr) + { +- char *buf = nd_get_link(nd); ++ const char *buf = nd_get_link(nd); + if (!IS_ERR(buf)) { + /* Free the char* */ + kfree(buf); +diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c +index 0dc5a3d..d3cdeea 100644 +--- a/fs/ecryptfs/miscdev.c ++++ b/fs/ecryptfs/miscdev.c +@@ -328,7 +328,7 @@ check_list: + goto out_unlock_msg_ctx; + i = 5; + if (msg_ctx->msg) { +- if (copy_to_user(&buf[i], packet_length, packet_length_size)) ++ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size)) + goto out_unlock_msg_ctx; + i += packet_length_size; + if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size)) +diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c +index 608c1c3..7d040a8 100644 +--- a/fs/ecryptfs/read_write.c ++++ b/fs/ecryptfs/read_write.c +@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, + return -EIO; + fs_save = get_fs(); + set_fs(get_ds()); +- rc = vfs_write(lower_file, data, size, &offset); ++ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset); + set_fs(fs_save); + mark_inode_dirty_sync(ecryptfs_inode); + return rc; +@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size, + return -EIO; + fs_save = get_fs(); + set_fs(get_ds()); +- rc = vfs_read(lower_file, data, size, &offset); ++ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset); + set_fs(fs_save); + return rc; + } +diff --git a/fs/exec.c b/fs/exec.c +index 3625464..ff895b9 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -55,12 +55,28 @@ + #include <linux/pipe_fs_i.h> + #include <linux/oom.h> + #include <linux/compat.h> ++#include <linux/random.h> ++#include <linux/seq_file.h> ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#include <linux/kallsyms.h> ++#include <linux/kdebug.h> ++#endif + + #include <asm/uaccess.h> + #include <asm/mmu_context.h> + #include <asm/tlb.h> + #include "internal.h" + ++#ifndef CONFIG_PAX_HAVE_ACL_FLAGS ++void __weak pax_set_initial_flags(struct linux_binprm *bprm) {} ++#endif ++ ++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS ++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); ++EXPORT_SYMBOL(pax_set_initial_flags_func); ++#endif ++ + int core_uses_pid; + char core_pattern[CORENAME_MAX_SIZE] = "core"; + unsigned int core_pipe_limit; +@@ -70,7 +86,7 @@ struct core_name { + char *corename; + int used, size; + }; +-static atomic_t call_count = ATOMIC_INIT(1); ++static atomic_unchecked_t call_count = ATOMIC_INIT(1); + + /* The maximal length of core_pattern is also specified in sysctl.c */ + +@@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + int write) + { + struct page *page; +- int ret; + +-#ifdef CONFIG_STACK_GROWSUP +- if (write) { +- ret = expand_downwards(bprm->vma, pos); +- if (ret < 0) +- return NULL; +- } +-#endif +- ret = get_user_pages(current, bprm->mm, pos, +- 1, write, 1, &page, NULL); +- if (ret <= 0) ++ if (0 > expand_downwards(bprm->vma, pos)) ++ return NULL; ++ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL)) + return NULL; + + if (write) { +@@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + if (size <= ARG_MAX) + return page; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ // only allow 512KB for argv+env on suid/sgid binaries ++ // to prevent easy ASLR exhaustion ++ if (((bprm->cred->euid != current_euid()) || ++ (bprm->cred->egid != current_egid())) && ++ (size > (512 * 1024))) { ++ put_page(page); ++ return NULL; ++ } ++#endif ++ + /* + * Limit to 1/4-th the stack size for the argv+env strings. + * This ensures that: +@@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) + vma->vm_end = STACK_TOP_MAX; + vma->vm_start = vma->vm_end - PAGE_SIZE; + vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++#endif ++ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + INIT_LIST_HEAD(&vma->anon_vma_chain); + +@@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) + mm->stack_vm = mm->total_vm = 1; + up_write(&mm->mmap_sem); + bprm->p = vma->vm_end - sizeof(void *); ++ ++#ifdef CONFIG_PAX_RANDUSTACK ++ if (randomize_va_space) ++ bprm->p ^= random32() & ~PAGE_MASK; ++#endif ++ + return 0; + err: + up_write(&mm->mmap_sem); +@@ -396,19 +426,7 @@ err: + return err; + } + +-struct user_arg_ptr { +-#ifdef CONFIG_COMPAT +- bool is_compat; +-#endif +- union { +- const char __user *const __user *native; +-#ifdef CONFIG_COMPAT +- compat_uptr_t __user *compat; +-#endif +- } ptr; +-}; +- +-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) ++const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) + { + const char __user *native; + +@@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) + compat_uptr_t compat; + + if (get_user(compat, argv.ptr.compat + nr)) +- return ERR_PTR(-EFAULT); ++ return (const char __force_user *)ERR_PTR(-EFAULT); + + return compat_ptr(compat); + } + #endif + + if (get_user(native, argv.ptr.native + nr)) +- return ERR_PTR(-EFAULT); ++ return (const char __force_user *)ERR_PTR(-EFAULT); + + return native; + } +@@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max) + if (!p) + break; + +- if (IS_ERR(p)) ++ if (IS_ERR((const char __force_kernel *)p)) + return -EFAULT; + + if (i++ >= max) +@@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv, + + ret = -EFAULT; + str = get_user_arg_ptr(argv, argc); +- if (IS_ERR(str)) ++ if (IS_ERR((const char __force_kernel *)str)) + goto out; + + len = strnlen_user(str, MAX_ARG_STRLEN); +@@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv, + int r; + mm_segment_t oldfs = get_fs(); + struct user_arg_ptr argv = { +- .ptr.native = (const char __user *const __user *)__argv, ++ .ptr.native = (const char __force_user *const __force_user *)__argv, + }; + + set_fs(KERNEL_DS); +@@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) + unsigned long new_end = old_end - shift; + struct mmu_gather tlb; + +- BUG_ON(new_start > new_end); ++ if (new_start >= new_end || new_start < mmap_min_addr) ++ return -ENOMEM; + + /* + * ensure there are no vmas between where we want to go +@@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) + if (vma != find_vma(mm, new_start)) + return -EFAULT; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ BUG_ON(pax_find_mirror_vma(vma)); ++#endif ++ + /* + * cover the whole range: [new_start, old_end) + */ +@@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm, + stack_top = arch_align_stack(stack_top); + stack_top = PAGE_ALIGN(stack_top); + +- if (unlikely(stack_top < mmap_min_addr) || +- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) +- return -ENOMEM; +- + stack_shift = vma->vm_end - stack_top; + + bprm->p -= stack_shift; +@@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm, + bprm->exec -= stack_shift; + + down_write(&mm->mmap_sem); ++ ++ /* Move stack pages down in memory. */ ++ if (stack_shift) { ++ ret = shift_arg_pages(vma, stack_shift); ++ if (ret) ++ goto out_unlock; ++ } ++ + vm_flags = VM_STACK_FLAGS; + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ vm_flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) ++ vm_flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + /* + * Adjust stack execute permissions; explicitly enable for + * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone +@@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm, + goto out_unlock; + BUG_ON(prev != vma); + +- /* Move stack pages down in memory. */ +- if (stack_shift) { +- ret = shift_arg_pages(vma, stack_shift); +- if (ret) +- goto out_unlock; +- } +- + /* mprotect_fixup is overkill to remove the temporary stack flags */ + vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; + +@@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset, + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- result = vfs_read(file, (void __user *)addr, count, &pos); ++ result = vfs_read(file, (void __force_user *)addr, count, &pos); + set_fs(old_fs); + return result; + } +@@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf) + perf_event_comm(tsk); + } + ++static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len) ++{ ++ int i, ch; ++ ++ /* Copies the binary name from after last slash */ ++ for (i = 0; (ch = *(fn++)) != '\0';) { ++ if (ch == '/') ++ i = 0; /* overwrite what we wrote */ ++ else ++ if (i < len - 1) ++ tcomm[i++] = ch; ++ } ++ tcomm[i] = '\0'; ++} ++ + int flush_old_exec(struct linux_binprm * bprm) + { + int retval; +@@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm) + + set_mm_exe_file(bprm->mm, bprm->file); + ++ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm)); + /* + * Release all of the old mmap stuff + */ +@@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump); + + void setup_new_exec(struct linux_binprm * bprm) + { +- int i, ch; +- const char *name; +- char tcomm[sizeof(current->comm)]; +- + arch_pick_mmap_layout(current->mm); + + /* This is the point of no return */ +@@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm) + else + set_dumpable(current->mm, suid_dumpable); + +- name = bprm->filename; +- +- /* Copies the binary name from after last slash */ +- for (i=0; (ch = *(name++)) != '\0';) { +- if (ch == '/') +- i = 0; /* overwrite what we wrote */ +- else +- if (i < (sizeof(tcomm) - 1)) +- tcomm[i++] = ch; +- } +- tcomm[i] = '\0'; +- set_task_comm(current, tcomm); ++ set_task_comm(current, bprm->tcomm); + + /* Set the new mm task size. We have to do that late because it may + * depend on TIF_32BIT which is only updated in flush_thread() on +@@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) + } + rcu_read_unlock(); + +- if (p->fs->users > n_fs) { ++ if (atomic_read(&p->fs->users) > n_fs) { + bprm->unsafe |= LSM_UNSAFE_SHARE; + } else { + res = -EAGAIN; +@@ -1442,6 +1475,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) + + EXPORT_SYMBOL(search_binary_handler); + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++static DEFINE_PER_CPU(u64, exec_counter); ++static int __init init_exec_counters(void) ++{ ++ unsigned int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ per_cpu(exec_counter, cpu) = (u64)cpu; ++ } ++ ++ return 0; ++} ++early_initcall(init_exec_counters); ++static inline void increment_exec_counter(void) ++{ ++ BUILD_BUG_ON(NR_CPUS > (1 << 16)); ++ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16); ++} ++#else ++static inline void increment_exec_counter(void) {} ++#endif ++ + /* + * sys_execve() executes a new program. + */ +@@ -1450,6 +1505,11 @@ static int do_execve_common(const char *filename, + struct user_arg_ptr envp, + struct pt_regs *regs) + { ++#ifdef CONFIG_GRKERNSEC ++ struct file *old_exec_file; ++ struct acl_subject_label *old_acl; ++ struct rlimit old_rlim[RLIM_NLIMITS]; ++#endif + struct linux_binprm *bprm; + struct file *file; + struct files_struct *displaced; +@@ -1457,6 +1517,8 @@ static int do_execve_common(const char *filename, + int retval; + const struct cred *cred = current_cred(); + ++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->cred->user->processes), 1); ++ + /* + * We move the actual failure in case of RLIMIT_NPROC excess from + * set*uid() to execve() because too many poorly written programs +@@ -1497,12 +1559,27 @@ static int do_execve_common(const char *filename, + if (IS_ERR(file)) + goto out_unmark; + ++ if (gr_ptrace_readexec(file, bprm->unsafe)) { ++ retval = -EPERM; ++ goto out_file; ++ } ++ + sched_exec(); + + bprm->file = file; + bprm->filename = filename; + bprm->interp = filename; + ++ if (gr_process_user_ban()) { ++ retval = -EPERM; ++ goto out_file; ++ } ++ ++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) { ++ retval = -EACCES; ++ goto out_file; ++ } ++ + retval = bprm_mm_init(bprm); + if (retval) + goto out_file; +@@ -1519,24 +1596,65 @@ static int do_execve_common(const char *filename, + if (retval < 0) + goto out; + ++#ifdef CONFIG_GRKERNSEC ++ old_acl = current->acl; ++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim)); ++ old_exec_file = current->exec_file; ++ get_file(file); ++ current->exec_file = file; ++#endif ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ /* limit suid stack to 8MB ++ we saved the old limits above and will restore them if this exec fails ++ */ ++ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) && ++ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024))) ++ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024; ++#endif ++ ++ if (!gr_tpe_allow(file)) { ++ retval = -EACCES; ++ goto out_fail; ++ } ++ ++ if (gr_check_crash_exec(file)) { ++ retval = -EACCES; ++ goto out_fail; ++ } ++ ++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt, ++ bprm->unsafe); ++ if (retval < 0) ++ goto out_fail; ++ + retval = copy_strings_kernel(1, &bprm->filename, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; + + bprm->exec = bprm->p; + retval = copy_strings(bprm->envc, envp, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; + + retval = copy_strings(bprm->argc, argv, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; ++ ++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); ++ ++ gr_handle_exec_args(bprm, argv); + + retval = search_binary_handler(bprm,regs); + if (retval < 0) +- goto out; ++ goto out_fail; ++#ifdef CONFIG_GRKERNSEC ++ if (old_exec_file) ++ fput(old_exec_file); ++#endif + + /* execve succeeded */ ++ ++ increment_exec_counter(); + current->fs->in_exec = 0; + current->in_execve = 0; + acct_update_integrals(current); +@@ -1545,6 +1663,14 @@ static int do_execve_common(const char *filename, + put_files_struct(displaced); + return retval; + ++out_fail: ++#ifdef CONFIG_GRKERNSEC ++ current->acl = old_acl; ++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim)); ++ fput(current->exec_file); ++ current->exec_file = old_exec_file; ++#endif ++ + out: + if (bprm->mm) { + acct_arg_size(bprm, 0); +@@ -1618,7 +1744,7 @@ static int expand_corename(struct core_name *cn) + { + char *old_corename = cn->corename; + +- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count); ++ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count); + cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL); + + if (!cn->corename) { +@@ -1715,7 +1841,7 @@ static int format_corename(struct core_name *cn, long signr) + int pid_in_pattern = 0; + int err = 0; + +- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count); ++ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count); + cn->corename = kmalloc(cn->size, GFP_KERNEL); + cn->used = 0; + +@@ -1812,6 +1938,228 @@ out: + return ispipe; + } + ++int pax_check_flags(unsigned long *flags) ++{ ++ int retval = 0; ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC) ++ if (*flags & MF_PAX_SEGMEXEC) ++ { ++ *flags &= ~MF_PAX_SEGMEXEC; ++ retval = -EINVAL; ++ } ++#endif ++ ++ if ((*flags & MF_PAX_PAGEEXEC) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ && (*flags & MF_PAX_SEGMEXEC) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_PAGEEXEC; ++ retval = -EINVAL; ++ } ++ ++ if ((*flags & MF_PAX_MPROTECT) ++ ++#ifdef CONFIG_PAX_MPROTECT ++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_MPROTECT; ++ retval = -EINVAL; ++ } ++ ++ if ((*flags & MF_PAX_EMUTRAMP) ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_EMUTRAMP; ++ retval = -EINVAL; ++ } ++ ++ return retval; ++} ++ ++EXPORT_SYMBOL(pax_check_flags); ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp) ++{ ++ struct task_struct *tsk = current; ++ struct mm_struct *mm = current->mm; ++ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL); ++ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL); ++ char *path_exec = NULL; ++ char *path_fault = NULL; ++ unsigned long start = 0UL, end = 0UL, offset = 0UL; ++ ++ if (buffer_exec && buffer_fault) { ++ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL; ++ ++ down_read(&mm->mmap_sem); ++ vma = mm->mmap; ++ while (vma && (!vma_exec || !vma_fault)) { ++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) ++ vma_exec = vma; ++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end) ++ vma_fault = vma; ++ vma = vma->vm_next; ++ } ++ if (vma_exec) { ++ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE); ++ if (IS_ERR(path_exec)) ++ path_exec = "<path too long>"; ++ else { ++ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\"); ++ if (path_exec) { ++ *path_exec = 0; ++ path_exec = buffer_exec; ++ } else ++ path_exec = "<path too long>"; ++ } ++ } ++ if (vma_fault) { ++ start = vma_fault->vm_start; ++ end = vma_fault->vm_end; ++ offset = vma_fault->vm_pgoff << PAGE_SHIFT; ++ if (vma_fault->vm_file) { ++ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE); ++ if (IS_ERR(path_fault)) ++ path_fault = "<path too long>"; ++ else { ++ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\"); ++ if (path_fault) { ++ *path_fault = 0; ++ path_fault = buffer_fault; ++ } else ++ path_fault = "<path too long>"; ++ } ++ } else ++ path_fault = "<anonymous mapping>"; ++ } ++ up_read(&mm->mmap_sem); ++ } ++ if (tsk->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset); ++ else ++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset); ++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, " ++ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk), ++ task_uid(tsk), task_euid(tsk), pc, sp); ++ free_page((unsigned long)buffer_exec); ++ free_page((unsigned long)buffer_fault); ++ pax_report_insns(regs, pc, sp); ++ do_coredump(SIGKILL, SIGKILL, regs); ++} ++#endif ++ ++#ifdef CONFIG_PAX_REFCOUNT ++void pax_report_refcount_overflow(struct pt_regs *regs) ++{ ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", ++ ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); ++ else ++ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", ++ current->comm, task_pid_nr(current), current_uid(), current_euid()); ++ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs)); ++ show_regs(regs); ++ force_sig_info(SIGKILL, SEND_SIG_FORCED, current); ++} ++#endif ++ ++#ifdef CONFIG_PAX_USERCOPY ++/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */ ++int object_is_on_stack(const void *obj, unsigned long len) ++{ ++ const void * const stack = task_stack_page(current); ++ const void * const stackend = stack + THREAD_SIZE; ++ ++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86) ++ const void *frame = NULL; ++ const void *oldframe; ++#endif ++ ++ if (obj + len < obj) ++ return -1; ++ ++ if (obj + len <= stack || stackend <= obj) ++ return 0; ++ ++ if (obj < stack || stackend < obj + len) ++ return -1; ++ ++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86) ++ oldframe = __builtin_frame_address(1); ++ if (oldframe) ++ frame = __builtin_frame_address(2); ++ /* ++ low ----------------------------------------------> high ++ [saved bp][saved ip][args][local vars][saved bp][saved ip] ++ ^----------------^ ++ allow copies only within here ++ */ ++ while (stack <= frame && frame < stackend) { ++ /* if obj + len extends past the last frame, this ++ check won't pass and the next frame will be 0, ++ causing us to bail out and correctly report ++ the copy as invalid ++ */ ++ if (obj + len <= frame) ++ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1; ++ oldframe = frame; ++ frame = *(const void * const *)frame; ++ } ++ return -1; ++#else ++ return 1; ++#endif ++} ++ ++__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ++{ ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", ++ ¤t->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len); ++ else ++ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", ++ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len); ++ dump_stack(); ++ gr_handle_kernel_exploit(); ++ do_group_exit(SIGKILL); ++} ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++void pax_track_stack(void) ++{ ++ unsigned long sp = (unsigned long)&sp; ++ if (sp < current_thread_info()->lowest_stack && ++ sp > (unsigned long)task_stack_page(current)) ++ current_thread_info()->lowest_stack = sp; ++} ++EXPORT_SYMBOL(pax_track_stack); ++#endif ++ ++#ifdef CONFIG_PAX_SIZE_OVERFLOW ++void report_size_overflow(const char *file, unsigned int line, const char *func) ++{ ++ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line); ++ dump_stack(); ++ do_group_exit(SIGKILL); ++} ++EXPORT_SYMBOL(report_size_overflow); ++#endif ++ + static int zap_process(struct task_struct *start, int exit_code) + { + struct task_struct *t; +@@ -2023,17 +2371,17 @@ static void wait_for_dump_helpers(struct file *file) + pipe = file->f_path.dentry->d_inode->i_pipe; + + pipe_lock(pipe); +- pipe->readers++; +- pipe->writers--; ++ atomic_inc(&pipe->readers); ++ atomic_dec(&pipe->writers); + +- while ((pipe->readers > 1) && (!signal_pending(current))) { ++ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) { + wake_up_interruptible_sync(&pipe->wait); + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + pipe_wait(pipe); + } + +- pipe->readers--; +- pipe->writers++; ++ atomic_dec(&pipe->readers); ++ atomic_inc(&pipe->writers); + pipe_unlock(pipe); + + } +@@ -2094,7 +2442,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) + int retval = 0; + int flag = 0; + int ispipe; +- static atomic_t core_dump_count = ATOMIC_INIT(0); ++ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0); + struct coredump_params cprm = { + .signr = signr, + .regs = regs, +@@ -2109,6 +2457,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) + + audit_core_dumps(signr); + ++ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL) ++ gr_handle_brute_attach(current, cprm.mm_flags); ++ + binfmt = mm->binfmt; + if (!binfmt || !binfmt->core_dump) + goto fail; +@@ -2176,7 +2527,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) + } + cprm.limit = RLIM_INFINITY; + +- dump_count = atomic_inc_return(&core_dump_count); ++ dump_count = atomic_inc_return_unchecked(&core_dump_count); + if (core_pipe_limit && (core_pipe_limit < dump_count)) { + printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", + task_tgid_vnr(current), current->comm); +@@ -2203,6 +2554,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) + } else { + struct inode *inode; + ++ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1); ++ + if (cprm.limit < binfmt->min_coredump) + goto fail_unlock; + +@@ -2246,7 +2599,7 @@ close_fail: + filp_close(cprm.file, NULL); + fail_dropcount: + if (ispipe) +- atomic_dec(&core_dump_count); ++ atomic_dec_unchecked(&core_dump_count); + fail_unlock: + kfree(cn.corename); + fail_corename: +@@ -2265,7 +2618,7 @@ fail: + */ + int dump_write(struct file *file, const void *addr, int nr) + { +- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr; ++ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr; + } + EXPORT_SYMBOL(dump_write); + +diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c +index a8cbe1b..fed04cb 100644 +--- a/fs/ext2/balloc.c ++++ b/fs/ext2/balloc.c +@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi) + + free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); + root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); +- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && ++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) && + sbi->s_resuid != current_fsuid() && + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { + return 0; +diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c +index a203892..4e64db5 100644 +--- a/fs/ext3/balloc.c ++++ b/fs/ext3/balloc.c +@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation) + + free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); + root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); +- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && ++ if (free_blocks < root_blocks + 1 && + !use_reservation && sbi->s_resuid != current_fsuid() && +- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { ++ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) && ++ !capable_nolog(CAP_SYS_RESOURCE)) { + return 0; + } + return 1; +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c +index 12ccacd..a6035fce0 100644 +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi, + /* Hm, nope. Are (enough) root reserved clusters available? */ + if (sbi->s_resuid == current_fsuid() || + ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || +- capable(CAP_SYS_RESOURCE) || +- (flags & EXT4_MB_USE_ROOT_BLOCKS)) { ++ (flags & EXT4_MB_USE_ROOT_BLOCKS) || ++ capable_nolog(CAP_SYS_RESOURCE)) { + + if (free_clusters >= (nclusters + dirty_clusters)) + return 1; +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 5b0e26a..0aa002d 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -1208,19 +1208,19 @@ struct ext4_sb_info { + unsigned long s_mb_last_start; + + /* stats for buddy allocator */ +- atomic_t s_bal_reqs; /* number of reqs with len > 1 */ +- atomic_t s_bal_success; /* we found long enough chunks */ +- atomic_t s_bal_allocated; /* in blocks */ +- atomic_t s_bal_ex_scanned; /* total extents scanned */ +- atomic_t s_bal_goals; /* goal hits */ +- atomic_t s_bal_breaks; /* too long searches */ +- atomic_t s_bal_2orders; /* 2^order hits */ ++ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */ ++ atomic_unchecked_t s_bal_success; /* we found long enough chunks */ ++ atomic_unchecked_t s_bal_allocated; /* in blocks */ ++ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */ ++ atomic_unchecked_t s_bal_goals; /* goal hits */ ++ atomic_unchecked_t s_bal_breaks; /* too long searches */ ++ atomic_unchecked_t s_bal_2orders; /* 2^order hits */ + spinlock_t s_bal_lock; + unsigned long s_mb_buddies_generated; + unsigned long long s_mb_generation_time; +- atomic_t s_mb_lost_chunks; +- atomic_t s_mb_preallocated; +- atomic_t s_mb_discarded; ++ atomic_unchecked_t s_mb_lost_chunks; ++ atomic_unchecked_t s_mb_preallocated; ++ atomic_unchecked_t s_mb_discarded; + atomic_t s_lock_busy; + + /* locality groups */ +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index e2d8be8..c7f0ce9 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, + BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); + + if (EXT4_SB(sb)->s_mb_stats) +- atomic_inc(&EXT4_SB(sb)->s_bal_2orders); ++ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders); + + break; + } +@@ -2088,7 +2088,7 @@ repeat: + ac->ac_status = AC_STATUS_CONTINUE; + ac->ac_flags |= EXT4_MB_HINT_FIRST; + cr = 3; +- atomic_inc(&sbi->s_mb_lost_chunks); ++ atomic_inc_unchecked(&sbi->s_mb_lost_chunks); + goto repeat; + } + } +@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb) + if (sbi->s_mb_stats) { + ext4_msg(sb, KERN_INFO, + "mballoc: %u blocks %u reqs (%u success)", +- atomic_read(&sbi->s_bal_allocated), +- atomic_read(&sbi->s_bal_reqs), +- atomic_read(&sbi->s_bal_success)); ++ atomic_read_unchecked(&sbi->s_bal_allocated), ++ atomic_read_unchecked(&sbi->s_bal_reqs), ++ atomic_read_unchecked(&sbi->s_bal_success)); + ext4_msg(sb, KERN_INFO, + "mballoc: %u extents scanned, %u goal hits, " + "%u 2^N hits, %u breaks, %u lost", +- atomic_read(&sbi->s_bal_ex_scanned), +- atomic_read(&sbi->s_bal_goals), +- atomic_read(&sbi->s_bal_2orders), +- atomic_read(&sbi->s_bal_breaks), +- atomic_read(&sbi->s_mb_lost_chunks)); ++ atomic_read_unchecked(&sbi->s_bal_ex_scanned), ++ atomic_read_unchecked(&sbi->s_bal_goals), ++ atomic_read_unchecked(&sbi->s_bal_2orders), ++ atomic_read_unchecked(&sbi->s_bal_breaks), ++ atomic_read_unchecked(&sbi->s_mb_lost_chunks)); + ext4_msg(sb, KERN_INFO, + "mballoc: %lu generated and it took %Lu", + sbi->s_mb_buddies_generated, + sbi->s_mb_generation_time); + ext4_msg(sb, KERN_INFO, + "mballoc: %u preallocated, %u discarded", +- atomic_read(&sbi->s_mb_preallocated), +- atomic_read(&sbi->s_mb_discarded)); ++ atomic_read_unchecked(&sbi->s_mb_preallocated), ++ atomic_read_unchecked(&sbi->s_mb_discarded)); + } + + free_percpu(sbi->s_locality_groups); +@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); + + if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { +- atomic_inc(&sbi->s_bal_reqs); +- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); ++ atomic_inc_unchecked(&sbi->s_bal_reqs); ++ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); + if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) +- atomic_inc(&sbi->s_bal_success); +- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); ++ atomic_inc_unchecked(&sbi->s_bal_success); ++ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned); + if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && + ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) +- atomic_inc(&sbi->s_bal_goals); ++ atomic_inc_unchecked(&sbi->s_bal_goals); + if (ac->ac_found > sbi->s_mb_max_to_scan) +- atomic_inc(&sbi->s_bal_breaks); ++ atomic_inc_unchecked(&sbi->s_bal_breaks); + } + + if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) +@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) + trace_ext4_mb_new_inode_pa(ac, pa); + + ext4_mb_use_inode_pa(ac, pa); +- atomic_add(pa->pa_free, &sbi->s_mb_preallocated); ++ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated); + + ei = EXT4_I(ac->ac_inode); + grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); +@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) + trace_ext4_mb_new_group_pa(ac, pa); + + ext4_mb_use_group_pa(ac, pa); +- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); ++ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); + + grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); + lg = ac->ac_lg; +@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, + * from the bitmap and continue. + */ + } +- atomic_add(free, &sbi->s_mb_discarded); ++ atomic_add_unchecked(free, &sbi->s_mb_discarded); + + return err; + } +@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, + ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); + BUG_ON(group != e4b->bd_group && pa->pa_len != 0); + mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); +- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); ++ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); + trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); + + return 0; +diff --git a/fs/fcntl.c b/fs/fcntl.c +index 22764c7..86372c9 100644 +--- a/fs/fcntl.c ++++ b/fs/fcntl.c +@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, + if (err) + return err; + ++ if (gr_handle_chroot_fowner(pid, type)) ++ return -ENOENT; ++ if (gr_check_protected_task_fowner(pid, type)) ++ return -EACCES; ++ + f_modown(filp, pid, type, force); + return 0; + } +@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp) + + static int f_setown_ex(struct file *filp, unsigned long arg) + { +- struct f_owner_ex * __user owner_p = (void * __user)arg; ++ struct f_owner_ex __user *owner_p = (void __user *)arg; + struct f_owner_ex owner; + struct pid *pid; + int type; +@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg) + + static int f_getown_ex(struct file *filp, unsigned long arg) + { +- struct f_owner_ex * __user owner_p = (void * __user)arg; ++ struct f_owner_ex __user *owner_p = (void __user *)arg; + struct f_owner_ex owner; + int ret = 0; + +@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, + switch (cmd) { + case F_DUPFD: + case F_DUPFD_CLOEXEC: ++ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0); + if (arg >= rlimit(RLIMIT_NOFILE)) + break; + err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0); +diff --git a/fs/fifo.c b/fs/fifo.c +index b1a524d..4ee270e 100644 +--- a/fs/fifo.c ++++ b/fs/fifo.c +@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp) + */ + filp->f_op = &read_pipefifo_fops; + pipe->r_counter++; +- if (pipe->readers++ == 0) ++ if (atomic_inc_return(&pipe->readers) == 1) + wake_up_partner(inode); + +- if (!pipe->writers) { ++ if (!atomic_read(&pipe->writers)) { + if ((filp->f_flags & O_NONBLOCK)) { + /* suppress POLLHUP until we have + * seen a writer */ +@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp) + * errno=ENXIO when there is no process reading the FIFO. + */ + ret = -ENXIO; +- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers) ++ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers)) + goto err; + + filp->f_op = &write_pipefifo_fops; + pipe->w_counter++; +- if (!pipe->writers++) ++ if (atomic_inc_return(&pipe->writers) == 1) + wake_up_partner(inode); + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + wait_for_partner(inode, &pipe->r_counter); + if (signal_pending(current)) + goto err_wr; +@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp) + */ + filp->f_op = &rdwr_pipefifo_fops; + +- pipe->readers++; +- pipe->writers++; ++ atomic_inc(&pipe->readers); ++ atomic_inc(&pipe->writers); + pipe->r_counter++; + pipe->w_counter++; +- if (pipe->readers == 1 || pipe->writers == 1) ++ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1) + wake_up_partner(inode); + break; + +@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp) + return 0; + + err_rd: +- if (!--pipe->readers) ++ if (atomic_dec_and_test(&pipe->readers)) + wake_up_interruptible(&pipe->wait); + ret = -ERESTARTSYS; + goto err; + + err_wr: +- if (!--pipe->writers) ++ if (atomic_dec_and_test(&pipe->writers)) + wake_up_interruptible(&pipe->wait); + ret = -ERESTARTSYS; + goto err; + + err: +- if (!pipe->readers && !pipe->writers) ++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) + free_pipe_info(inode); + + err_nocleanup: +diff --git a/fs/file.c b/fs/file.c +index 4c6992d..104cdea 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -15,6 +15,7 @@ + #include <linux/slab.h> + #include <linux/vmalloc.h> + #include <linux/file.h> ++#include <linux/security.h> + #include <linux/fdtable.h> + #include <linux/bitops.h> + #include <linux/interrupt.h> +@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr) + * N.B. For clone tasks sharing a files structure, this test + * will limit the total number of files that can be opened. + */ ++ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0); + if (nr >= rlimit(RLIMIT_NOFILE)) + return -EMFILE; + +diff --git a/fs/filesystems.c b/fs/filesystems.c +index 0845f84..7b4ebef 100644 +--- a/fs/filesystems.c ++++ b/fs/filesystems.c +@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name) + int len = dot ? dot - name : strlen(name); + + fs = __get_fs_type(name, len); ++ ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0)) ++#else + if (!fs && (request_module("%.*s", len, name) == 0)) ++#endif + fs = __get_fs_type(name, len); + + if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { +diff --git a/fs/fs_struct.c b/fs/fs_struct.c +index 78b519c..a8b4979 100644 +--- a/fs/fs_struct.c ++++ b/fs/fs_struct.c +@@ -4,6 +4,7 @@ + #include <linux/path.h> + #include <linux/slab.h> + #include <linux/fs_struct.h> ++#include <linux/grsecurity.h> + #include "internal.h" + + static inline void path_get_longterm(struct path *path) +@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path) + old_root = fs->root; + fs->root = *path; + path_get_longterm(path); ++ gr_set_chroot_entries(current, path); + write_seqcount_end(&fs->seq); + spin_unlock(&fs->lock); + if (old_root.dentry) +@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root) + && fs->root.mnt == old_root->mnt) { + path_get_longterm(new_root); + fs->root = *new_root; ++ gr_set_chroot_entries(p, new_root); + count++; + } + if (fs->pwd.dentry == old_root->dentry +@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk) + spin_lock(&fs->lock); + write_seqcount_begin(&fs->seq); + tsk->fs = NULL; +- kill = !--fs->users; ++ gr_clear_chroot_entries(tsk); ++ kill = !atomic_dec_return(&fs->users); + write_seqcount_end(&fs->seq); + spin_unlock(&fs->lock); + task_unlock(tsk); +@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) + struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); + /* We don't need to lock fs - think why ;-) */ + if (fs) { +- fs->users = 1; ++ atomic_set(&fs->users, 1); + fs->in_exec = 0; + spin_lock_init(&fs->lock); + seqcount_init(&fs->seq); +@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) + spin_lock(&old->lock); + fs->root = old->root; + path_get_longterm(&fs->root); ++ /* instead of calling gr_set_chroot_entries here, ++ we call it from every caller of this function ++ */ + fs->pwd = old->pwd; + path_get_longterm(&fs->pwd); + spin_unlock(&old->lock); +@@ -150,8 +157,9 @@ int unshare_fs_struct(void) + + task_lock(current); + spin_lock(&fs->lock); +- kill = !--fs->users; ++ kill = !atomic_dec_return(&fs->users); + current->fs = new_fs; ++ gr_set_chroot_entries(current, &new_fs->root); + spin_unlock(&fs->lock); + task_unlock(current); + +@@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct); + + int current_umask(void) + { +- return current->fs->umask; ++ return current->fs->umask | gr_acl_umask(); + } + EXPORT_SYMBOL(current_umask); + + /* to be mentioned only in INIT_TASK */ + struct fs_struct init_fs = { +- .users = 1, ++ .users = ATOMIC_INIT(1), + .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), + .seq = SEQCNT_ZERO, + .umask = 0022, +@@ -186,12 +194,13 @@ void daemonize_fs_struct(void) + task_lock(current); + + spin_lock(&init_fs.lock); +- init_fs.users++; ++ atomic_inc(&init_fs.users); + spin_unlock(&init_fs.lock); + + spin_lock(&fs->lock); + current->fs = &init_fs; +- kill = !--fs->users; ++ gr_set_chroot_entries(current, ¤t->fs->root); ++ kill = !atomic_dec_return(&fs->users); + spin_unlock(&fs->lock); + + task_unlock(current); +diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c +index 9905350..02eaec4 100644 +--- a/fs/fscache/cookie.c ++++ b/fs/fscache/cookie.c +@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie( + parent ? (char *) parent->def->name : "<no-parent>", + def->name, netfs_data); + +- fscache_stat(&fscache_n_acquires); ++ fscache_stat_unchecked(&fscache_n_acquires); + + /* if there's no parent cookie, then we don't create one here either */ + if (!parent) { +- fscache_stat(&fscache_n_acquires_null); ++ fscache_stat_unchecked(&fscache_n_acquires_null); + _leave(" [no parent]"); + return NULL; + } +@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie( + /* allocate and initialise a cookie */ + cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL); + if (!cookie) { +- fscache_stat(&fscache_n_acquires_oom); ++ fscache_stat_unchecked(&fscache_n_acquires_oom); + _leave(" [ENOMEM]"); + return NULL; + } +@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie( + + switch (cookie->def->type) { + case FSCACHE_COOKIE_TYPE_INDEX: +- fscache_stat(&fscache_n_cookie_index); ++ fscache_stat_unchecked(&fscache_n_cookie_index); + break; + case FSCACHE_COOKIE_TYPE_DATAFILE: +- fscache_stat(&fscache_n_cookie_data); ++ fscache_stat_unchecked(&fscache_n_cookie_data); + break; + default: +- fscache_stat(&fscache_n_cookie_special); ++ fscache_stat_unchecked(&fscache_n_cookie_special); + break; + } + +@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie( + if (fscache_acquire_non_index_cookie(cookie) < 0) { + atomic_dec(&parent->n_children); + __fscache_cookie_put(cookie); +- fscache_stat(&fscache_n_acquires_nobufs); ++ fscache_stat_unchecked(&fscache_n_acquires_nobufs); + _leave(" = NULL"); + return NULL; + } + } + +- fscache_stat(&fscache_n_acquires_ok); ++ fscache_stat_unchecked(&fscache_n_acquires_ok); + _leave(" = %p", cookie); + return cookie; + } +@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie) + cache = fscache_select_cache_for_object(cookie->parent); + if (!cache) { + up_read(&fscache_addremove_sem); +- fscache_stat(&fscache_n_acquires_no_cache); ++ fscache_stat_unchecked(&fscache_n_acquires_no_cache); + _leave(" = -ENOMEDIUM [no cache]"); + return -ENOMEDIUM; + } +@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache, + object = cache->ops->alloc_object(cache, cookie); + fscache_stat_d(&fscache_n_cop_alloc_object); + if (IS_ERR(object)) { +- fscache_stat(&fscache_n_object_no_alloc); ++ fscache_stat_unchecked(&fscache_n_object_no_alloc); + ret = PTR_ERR(object); + goto error; + } + +- fscache_stat(&fscache_n_object_alloc); ++ fscache_stat_unchecked(&fscache_n_object_alloc); + + object->debug_id = atomic_inc_return(&fscache_object_debug_id); + +@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie) + struct fscache_object *object; + struct hlist_node *_p; + +- fscache_stat(&fscache_n_updates); ++ fscache_stat_unchecked(&fscache_n_updates); + + if (!cookie) { +- fscache_stat(&fscache_n_updates_null); ++ fscache_stat_unchecked(&fscache_n_updates_null); + _leave(" [no cookie]"); + return; + } +@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) + struct fscache_object *object; + unsigned long event; + +- fscache_stat(&fscache_n_relinquishes); ++ fscache_stat_unchecked(&fscache_n_relinquishes); + if (retire) +- fscache_stat(&fscache_n_relinquishes_retire); ++ fscache_stat_unchecked(&fscache_n_relinquishes_retire); + + if (!cookie) { +- fscache_stat(&fscache_n_relinquishes_null); ++ fscache_stat_unchecked(&fscache_n_relinquishes_null); + _leave(" [no cookie]"); + return; + } +@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) + + /* wait for the cookie to finish being instantiated (or to fail) */ + if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) { +- fscache_stat(&fscache_n_relinquishes_waitcrt); ++ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt); + wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING, + fscache_wait_bit, TASK_UNINTERRUPTIBLE); + } +diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h +index f6aad48..88dcf26 100644 +--- a/fs/fscache/internal.h ++++ b/fs/fscache/internal.h +@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void); + extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS]; + extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS]; + +-extern atomic_t fscache_n_op_pend; +-extern atomic_t fscache_n_op_run; +-extern atomic_t fscache_n_op_enqueue; +-extern atomic_t fscache_n_op_deferred_release; +-extern atomic_t fscache_n_op_release; +-extern atomic_t fscache_n_op_gc; +-extern atomic_t fscache_n_op_cancelled; +-extern atomic_t fscache_n_op_rejected; ++extern atomic_unchecked_t fscache_n_op_pend; ++extern atomic_unchecked_t fscache_n_op_run; ++extern atomic_unchecked_t fscache_n_op_enqueue; ++extern atomic_unchecked_t fscache_n_op_deferred_release; ++extern atomic_unchecked_t fscache_n_op_release; ++extern atomic_unchecked_t fscache_n_op_gc; ++extern atomic_unchecked_t fscache_n_op_cancelled; ++extern atomic_unchecked_t fscache_n_op_rejected; + +-extern atomic_t fscache_n_attr_changed; +-extern atomic_t fscache_n_attr_changed_ok; +-extern atomic_t fscache_n_attr_changed_nobufs; +-extern atomic_t fscache_n_attr_changed_nomem; +-extern atomic_t fscache_n_attr_changed_calls; ++extern atomic_unchecked_t fscache_n_attr_changed; ++extern atomic_unchecked_t fscache_n_attr_changed_ok; ++extern atomic_unchecked_t fscache_n_attr_changed_nobufs; ++extern atomic_unchecked_t fscache_n_attr_changed_nomem; ++extern atomic_unchecked_t fscache_n_attr_changed_calls; + +-extern atomic_t fscache_n_allocs; +-extern atomic_t fscache_n_allocs_ok; +-extern atomic_t fscache_n_allocs_wait; +-extern atomic_t fscache_n_allocs_nobufs; +-extern atomic_t fscache_n_allocs_intr; +-extern atomic_t fscache_n_allocs_object_dead; +-extern atomic_t fscache_n_alloc_ops; +-extern atomic_t fscache_n_alloc_op_waits; ++extern atomic_unchecked_t fscache_n_allocs; ++extern atomic_unchecked_t fscache_n_allocs_ok; ++extern atomic_unchecked_t fscache_n_allocs_wait; ++extern atomic_unchecked_t fscache_n_allocs_nobufs; ++extern atomic_unchecked_t fscache_n_allocs_intr; ++extern atomic_unchecked_t fscache_n_allocs_object_dead; ++extern atomic_unchecked_t fscache_n_alloc_ops; ++extern atomic_unchecked_t fscache_n_alloc_op_waits; + +-extern atomic_t fscache_n_retrievals; +-extern atomic_t fscache_n_retrievals_ok; +-extern atomic_t fscache_n_retrievals_wait; +-extern atomic_t fscache_n_retrievals_nodata; +-extern atomic_t fscache_n_retrievals_nobufs; +-extern atomic_t fscache_n_retrievals_intr; +-extern atomic_t fscache_n_retrievals_nomem; +-extern atomic_t fscache_n_retrievals_object_dead; +-extern atomic_t fscache_n_retrieval_ops; +-extern atomic_t fscache_n_retrieval_op_waits; ++extern atomic_unchecked_t fscache_n_retrievals; ++extern atomic_unchecked_t fscache_n_retrievals_ok; ++extern atomic_unchecked_t fscache_n_retrievals_wait; ++extern atomic_unchecked_t fscache_n_retrievals_nodata; ++extern atomic_unchecked_t fscache_n_retrievals_nobufs; ++extern atomic_unchecked_t fscache_n_retrievals_intr; ++extern atomic_unchecked_t fscache_n_retrievals_nomem; ++extern atomic_unchecked_t fscache_n_retrievals_object_dead; ++extern atomic_unchecked_t fscache_n_retrieval_ops; ++extern atomic_unchecked_t fscache_n_retrieval_op_waits; + +-extern atomic_t fscache_n_stores; +-extern atomic_t fscache_n_stores_ok; +-extern atomic_t fscache_n_stores_again; +-extern atomic_t fscache_n_stores_nobufs; +-extern atomic_t fscache_n_stores_oom; +-extern atomic_t fscache_n_store_ops; +-extern atomic_t fscache_n_store_calls; +-extern atomic_t fscache_n_store_pages; +-extern atomic_t fscache_n_store_radix_deletes; +-extern atomic_t fscache_n_store_pages_over_limit; ++extern atomic_unchecked_t fscache_n_stores; ++extern atomic_unchecked_t fscache_n_stores_ok; ++extern atomic_unchecked_t fscache_n_stores_again; ++extern atomic_unchecked_t fscache_n_stores_nobufs; ++extern atomic_unchecked_t fscache_n_stores_oom; ++extern atomic_unchecked_t fscache_n_store_ops; ++extern atomic_unchecked_t fscache_n_store_calls; ++extern atomic_unchecked_t fscache_n_store_pages; ++extern atomic_unchecked_t fscache_n_store_radix_deletes; ++extern atomic_unchecked_t fscache_n_store_pages_over_limit; + +-extern atomic_t fscache_n_store_vmscan_not_storing; +-extern atomic_t fscache_n_store_vmscan_gone; +-extern atomic_t fscache_n_store_vmscan_busy; +-extern atomic_t fscache_n_store_vmscan_cancelled; ++extern atomic_unchecked_t fscache_n_store_vmscan_not_storing; ++extern atomic_unchecked_t fscache_n_store_vmscan_gone; ++extern atomic_unchecked_t fscache_n_store_vmscan_busy; ++extern atomic_unchecked_t fscache_n_store_vmscan_cancelled; + +-extern atomic_t fscache_n_marks; +-extern atomic_t fscache_n_uncaches; ++extern atomic_unchecked_t fscache_n_marks; ++extern atomic_unchecked_t fscache_n_uncaches; + +-extern atomic_t fscache_n_acquires; +-extern atomic_t fscache_n_acquires_null; +-extern atomic_t fscache_n_acquires_no_cache; +-extern atomic_t fscache_n_acquires_ok; +-extern atomic_t fscache_n_acquires_nobufs; +-extern atomic_t fscache_n_acquires_oom; ++extern atomic_unchecked_t fscache_n_acquires; ++extern atomic_unchecked_t fscache_n_acquires_null; ++extern atomic_unchecked_t fscache_n_acquires_no_cache; ++extern atomic_unchecked_t fscache_n_acquires_ok; ++extern atomic_unchecked_t fscache_n_acquires_nobufs; ++extern atomic_unchecked_t fscache_n_acquires_oom; + +-extern atomic_t fscache_n_updates; +-extern atomic_t fscache_n_updates_null; +-extern atomic_t fscache_n_updates_run; ++extern atomic_unchecked_t fscache_n_updates; ++extern atomic_unchecked_t fscache_n_updates_null; ++extern atomic_unchecked_t fscache_n_updates_run; + +-extern atomic_t fscache_n_relinquishes; +-extern atomic_t fscache_n_relinquishes_null; +-extern atomic_t fscache_n_relinquishes_waitcrt; +-extern atomic_t fscache_n_relinquishes_retire; ++extern atomic_unchecked_t fscache_n_relinquishes; ++extern atomic_unchecked_t fscache_n_relinquishes_null; ++extern atomic_unchecked_t fscache_n_relinquishes_waitcrt; ++extern atomic_unchecked_t fscache_n_relinquishes_retire; + +-extern atomic_t fscache_n_cookie_index; +-extern atomic_t fscache_n_cookie_data; +-extern atomic_t fscache_n_cookie_special; ++extern atomic_unchecked_t fscache_n_cookie_index; ++extern atomic_unchecked_t fscache_n_cookie_data; ++extern atomic_unchecked_t fscache_n_cookie_special; + +-extern atomic_t fscache_n_object_alloc; +-extern atomic_t fscache_n_object_no_alloc; +-extern atomic_t fscache_n_object_lookups; +-extern atomic_t fscache_n_object_lookups_negative; +-extern atomic_t fscache_n_object_lookups_positive; +-extern atomic_t fscache_n_object_lookups_timed_out; +-extern atomic_t fscache_n_object_created; +-extern atomic_t fscache_n_object_avail; +-extern atomic_t fscache_n_object_dead; ++extern atomic_unchecked_t fscache_n_object_alloc; ++extern atomic_unchecked_t fscache_n_object_no_alloc; ++extern atomic_unchecked_t fscache_n_object_lookups; ++extern atomic_unchecked_t fscache_n_object_lookups_negative; ++extern atomic_unchecked_t fscache_n_object_lookups_positive; ++extern atomic_unchecked_t fscache_n_object_lookups_timed_out; ++extern atomic_unchecked_t fscache_n_object_created; ++extern atomic_unchecked_t fscache_n_object_avail; ++extern atomic_unchecked_t fscache_n_object_dead; + +-extern atomic_t fscache_n_checkaux_none; +-extern atomic_t fscache_n_checkaux_okay; +-extern atomic_t fscache_n_checkaux_update; +-extern atomic_t fscache_n_checkaux_obsolete; ++extern atomic_unchecked_t fscache_n_checkaux_none; ++extern atomic_unchecked_t fscache_n_checkaux_okay; ++extern atomic_unchecked_t fscache_n_checkaux_update; ++extern atomic_unchecked_t fscache_n_checkaux_obsolete; + + extern atomic_t fscache_n_cop_alloc_object; + extern atomic_t fscache_n_cop_lookup_object; +@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat) + atomic_inc(stat); + } + ++static inline void fscache_stat_unchecked(atomic_unchecked_t *stat) ++{ ++ atomic_inc_unchecked(stat); ++} ++ + static inline void fscache_stat_d(atomic_t *stat) + { + atomic_dec(stat); +@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops; + + #define __fscache_stat(stat) (NULL) + #define fscache_stat(stat) do {} while (0) ++#define fscache_stat_unchecked(stat) do {} while (0) + #define fscache_stat_d(stat) do {} while (0) + #endif + +diff --git a/fs/fscache/object.c b/fs/fscache/object.c +index b6b897c..0ffff9c 100644 +--- a/fs/fscache/object.c ++++ b/fs/fscache/object.c +@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object) + /* update the object metadata on disk */ + case FSCACHE_OBJECT_UPDATING: + clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); +- fscache_stat(&fscache_n_updates_run); ++ fscache_stat_unchecked(&fscache_n_updates_run); + fscache_stat(&fscache_n_cop_update_object); + object->cache->ops->update_object(object); + fscache_stat_d(&fscache_n_cop_update_object); +@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object) + spin_lock(&object->lock); + object->state = FSCACHE_OBJECT_DEAD; + spin_unlock(&object->lock); +- fscache_stat(&fscache_n_object_dead); ++ fscache_stat_unchecked(&fscache_n_object_dead); + goto terminal_transit; + + /* handle the parent cache of this object being withdrawn from +@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object) + spin_lock(&object->lock); + object->state = FSCACHE_OBJECT_DEAD; + spin_unlock(&object->lock); +- fscache_stat(&fscache_n_object_dead); ++ fscache_stat_unchecked(&fscache_n_object_dead); + goto terminal_transit; + + /* complain about the object being woken up once it is +@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object) + parent->cookie->def->name, cookie->def->name, + object->cache->tag->name); + +- fscache_stat(&fscache_n_object_lookups); ++ fscache_stat_unchecked(&fscache_n_object_lookups); + fscache_stat(&fscache_n_cop_lookup_object); + ret = object->cache->ops->lookup_object(object); + fscache_stat_d(&fscache_n_cop_lookup_object); +@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object) + if (ret == -ETIMEDOUT) { + /* probably stuck behind another object, so move this one to + * the back of the queue */ +- fscache_stat(&fscache_n_object_lookups_timed_out); ++ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out); + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); + } + +@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object) + + spin_lock(&object->lock); + if (object->state == FSCACHE_OBJECT_LOOKING_UP) { +- fscache_stat(&fscache_n_object_lookups_negative); ++ fscache_stat_unchecked(&fscache_n_object_lookups_negative); + + /* transit here to allow write requests to begin stacking up + * and read requests to begin returning ENODATA */ +@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object) + * result, in which case there may be data available */ + spin_lock(&object->lock); + if (object->state == FSCACHE_OBJECT_LOOKING_UP) { +- fscache_stat(&fscache_n_object_lookups_positive); ++ fscache_stat_unchecked(&fscache_n_object_lookups_positive); + + clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); + +@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object) + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); + } else { + ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING); +- fscache_stat(&fscache_n_object_created); ++ fscache_stat_unchecked(&fscache_n_object_created); + + object->state = FSCACHE_OBJECT_AVAILABLE; + spin_unlock(&object->lock); +@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object) + fscache_enqueue_dependents(object); + + fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); +- fscache_stat(&fscache_n_object_avail); ++ fscache_stat_unchecked(&fscache_n_object_avail); + + _leave(""); + } +@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, + enum fscache_checkaux result; + + if (!object->cookie->def->check_aux) { +- fscache_stat(&fscache_n_checkaux_none); ++ fscache_stat_unchecked(&fscache_n_checkaux_none); + return FSCACHE_CHECKAUX_OKAY; + } + +@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, + switch (result) { + /* entry okay as is */ + case FSCACHE_CHECKAUX_OKAY: +- fscache_stat(&fscache_n_checkaux_okay); ++ fscache_stat_unchecked(&fscache_n_checkaux_okay); + break; + + /* entry requires update */ + case FSCACHE_CHECKAUX_NEEDS_UPDATE: +- fscache_stat(&fscache_n_checkaux_update); ++ fscache_stat_unchecked(&fscache_n_checkaux_update); + break; + + /* entry requires deletion */ + case FSCACHE_CHECKAUX_OBSOLETE: +- fscache_stat(&fscache_n_checkaux_obsolete); ++ fscache_stat_unchecked(&fscache_n_checkaux_obsolete); + break; + + default: +diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c +index 30afdfa..2256596 100644 +--- a/fs/fscache/operation.c ++++ b/fs/fscache/operation.c +@@ -17,7 +17,7 @@ + #include <linux/slab.h> + #include "internal.h" + +-atomic_t fscache_op_debug_id; ++atomic_unchecked_t fscache_op_debug_id; + EXPORT_SYMBOL(fscache_op_debug_id); + + /** +@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op) + ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); + ASSERTCMP(atomic_read(&op->usage), >, 0); + +- fscache_stat(&fscache_n_op_enqueue); ++ fscache_stat_unchecked(&fscache_n_op_enqueue); + switch (op->flags & FSCACHE_OP_TYPE) { + case FSCACHE_OP_ASYNC: + _debug("queue async"); +@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object, + wake_up_bit(&op->flags, FSCACHE_OP_WAITING); + if (op->processor) + fscache_enqueue_operation(op); +- fscache_stat(&fscache_n_op_run); ++ fscache_stat_unchecked(&fscache_n_op_run); + } + + /* +@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object, + if (object->n_ops > 1) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + } else if (!list_empty(&object->pending_ops)) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + fscache_start_operations(object); + } else { + ASSERTCMP(object->n_in_progress, ==, 0); +@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, + object->n_exclusive++; /* reads and writes must wait */ + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + ret = 0; + } else { + /* not allowed to submit ops in any other state */ +@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object, + if (object->n_exclusive > 0) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + } else if (!list_empty(&object->pending_ops)) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + fscache_start_operations(object); + } else { + ASSERTCMP(object->n_exclusive, ==, 0); +@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object, + object->n_ops++; + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + ret = 0; + } else if (object->state == FSCACHE_OBJECT_DYING || + object->state == FSCACHE_OBJECT_LC_DYING || + object->state == FSCACHE_OBJECT_WITHDRAWING) { +- fscache_stat(&fscache_n_op_rejected); ++ fscache_stat_unchecked(&fscache_n_op_rejected); + ret = -ENOBUFS; + } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { + fscache_report_unexpected_submission(object, op, ostate); +@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op) + + ret = -EBUSY; + if (!list_empty(&op->pend_link)) { +- fscache_stat(&fscache_n_op_cancelled); ++ fscache_stat_unchecked(&fscache_n_op_cancelled); + list_del_init(&op->pend_link); + object->n_ops--; + if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) +@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op) + if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) + BUG(); + +- fscache_stat(&fscache_n_op_release); ++ fscache_stat_unchecked(&fscache_n_op_release); + + if (op->release) { + op->release(op); +@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op) + * lock, and defer it otherwise */ + if (!spin_trylock(&object->lock)) { + _debug("defer put"); +- fscache_stat(&fscache_n_op_deferred_release); ++ fscache_stat_unchecked(&fscache_n_op_deferred_release); + + cache = object->cache; + spin_lock(&cache->op_gc_list_lock); +@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work) + + _debug("GC DEFERRED REL OBJ%x OP%x", + object->debug_id, op->debug_id); +- fscache_stat(&fscache_n_op_gc); ++ fscache_stat_unchecked(&fscache_n_op_gc); + + ASSERTCMP(atomic_read(&op->usage), ==, 0); + +diff --git a/fs/fscache/page.c b/fs/fscache/page.c +index 3f7a59b..cf196cc 100644 +--- a/fs/fscache/page.c ++++ b/fs/fscache/page.c +@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie, + val = radix_tree_lookup(&cookie->stores, page->index); + if (!val) { + rcu_read_unlock(); +- fscache_stat(&fscache_n_store_vmscan_not_storing); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing); + __fscache_uncache_page(cookie, page); + return true; + } +@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie, + spin_unlock(&cookie->stores_lock); + + if (xpage) { +- fscache_stat(&fscache_n_store_vmscan_cancelled); +- fscache_stat(&fscache_n_store_radix_deletes); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled); ++ fscache_stat_unchecked(&fscache_n_store_radix_deletes); + ASSERTCMP(xpage, ==, page); + } else { +- fscache_stat(&fscache_n_store_vmscan_gone); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_gone); + } + + wake_up_bit(&cookie->flags, 0); +@@ -107,7 +107,7 @@ page_busy: + /* we might want to wait here, but that could deadlock the allocator as + * the work threads writing to the cache may all end up sleeping + * on memory allocation */ +- fscache_stat(&fscache_n_store_vmscan_busy); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_busy); + return false; + } + EXPORT_SYMBOL(__fscache_maybe_release_page); +@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object, + FSCACHE_COOKIE_STORING_TAG); + if (!radix_tree_tag_get(&cookie->stores, page->index, + FSCACHE_COOKIE_PENDING_TAG)) { +- fscache_stat(&fscache_n_store_radix_deletes); ++ fscache_stat_unchecked(&fscache_n_store_radix_deletes); + xpage = radix_tree_delete(&cookie->stores, page->index); + } + spin_unlock(&cookie->stores_lock); +@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op) + + _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); + +- fscache_stat(&fscache_n_attr_changed_calls); ++ fscache_stat_unchecked(&fscache_n_attr_changed_calls); + + if (fscache_object_is_active(object)) { + fscache_stat(&fscache_n_cop_attr_changed); +@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) + + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); + +- fscache_stat(&fscache_n_attr_changed); ++ fscache_stat_unchecked(&fscache_n_attr_changed); + + op = kzalloc(sizeof(*op), GFP_KERNEL); + if (!op) { +- fscache_stat(&fscache_n_attr_changed_nomem); ++ fscache_stat_unchecked(&fscache_n_attr_changed_nomem); + _leave(" = -ENOMEM"); + return -ENOMEM; + } +@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) + if (fscache_submit_exclusive_op(object, op) < 0) + goto nobufs; + spin_unlock(&cookie->lock); +- fscache_stat(&fscache_n_attr_changed_ok); ++ fscache_stat_unchecked(&fscache_n_attr_changed_ok); + fscache_put_operation(op); + _leave(" = 0"); + return 0; +@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) + nobufs: + spin_unlock(&cookie->lock); + kfree(op); +- fscache_stat(&fscache_n_attr_changed_nobufs); ++ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs); + _leave(" = %d", -ENOBUFS); + return -ENOBUFS; + } +@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval( + /* allocate a retrieval operation and attempt to submit it */ + op = kzalloc(sizeof(*op), GFP_NOIO); + if (!op) { +- fscache_stat(&fscache_n_retrievals_nomem); ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem); + return NULL; + } + +@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) + return 0; + } + +- fscache_stat(&fscache_n_retrievals_wait); ++ fscache_stat_unchecked(&fscache_n_retrievals_wait); + + jif = jiffies; + if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) != 0) { +- fscache_stat(&fscache_n_retrievals_intr); ++ fscache_stat_unchecked(&fscache_n_retrievals_intr); + _leave(" = -ERESTARTSYS"); + return -ERESTARTSYS; + } +@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) + */ + static int fscache_wait_for_retrieval_activation(struct fscache_object *object, + struct fscache_retrieval *op, +- atomic_t *stat_op_waits, +- atomic_t *stat_object_dead) ++ atomic_unchecked_t *stat_op_waits, ++ atomic_unchecked_t *stat_object_dead) + { + int ret; + +@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object, + goto check_if_dead; + + _debug(">>> WT"); +- fscache_stat(stat_op_waits); ++ fscache_stat_unchecked(stat_op_waits); + if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) < 0) { +@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object, + + check_if_dead: + if (unlikely(fscache_object_is_dead(object))) { +- fscache_stat(stat_object_dead); ++ fscache_stat_unchecked(stat_object_dead); + return -ENOBUFS; + } + return 0; +@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, + + _enter("%p,%p,,,", cookie, page); + +- fscache_stat(&fscache_n_retrievals); ++ fscache_stat_unchecked(&fscache_n_retrievals); + + if (hlist_empty(&cookie->backing_objects)) + goto nobufs; +@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, + goto nobufs_unlock; + spin_unlock(&cookie->lock); + +- fscache_stat(&fscache_n_retrieval_ops); ++ fscache_stat_unchecked(&fscache_n_retrieval_ops); + + /* pin the netfs read context in case we need to do the actual netfs + * read because we've encountered a cache read failure */ +@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, + + error: + if (ret == -ENOMEM) +- fscache_stat(&fscache_n_retrievals_nomem); ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem); + else if (ret == -ERESTARTSYS) +- fscache_stat(&fscache_n_retrievals_intr); ++ fscache_stat_unchecked(&fscache_n_retrievals_intr); + else if (ret == -ENODATA) +- fscache_stat(&fscache_n_retrievals_nodata); ++ fscache_stat_unchecked(&fscache_n_retrievals_nodata); + else if (ret < 0) +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + else +- fscache_stat(&fscache_n_retrievals_ok); ++ fscache_stat_unchecked(&fscache_n_retrievals_ok); + + fscache_put_retrieval(op); + _leave(" = %d", ret); +@@ -429,7 +429,7 @@ nobufs_unlock: + spin_unlock(&cookie->lock); + kfree(op); + nobufs: +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + } +@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + + _enter("%p,,%d,,,", cookie, *nr_pages); + +- fscache_stat(&fscache_n_retrievals); ++ fscache_stat_unchecked(&fscache_n_retrievals); + + if (hlist_empty(&cookie->backing_objects)) + goto nobufs; +@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + goto nobufs_unlock; + spin_unlock(&cookie->lock); + +- fscache_stat(&fscache_n_retrieval_ops); ++ fscache_stat_unchecked(&fscache_n_retrieval_ops); + + /* pin the netfs read context in case we need to do the actual netfs + * read because we've encountered a cache read failure */ +@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + + error: + if (ret == -ENOMEM) +- fscache_stat(&fscache_n_retrievals_nomem); ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem); + else if (ret == -ERESTARTSYS) +- fscache_stat(&fscache_n_retrievals_intr); ++ fscache_stat_unchecked(&fscache_n_retrievals_intr); + else if (ret == -ENODATA) +- fscache_stat(&fscache_n_retrievals_nodata); ++ fscache_stat_unchecked(&fscache_n_retrievals_nodata); + else if (ret < 0) +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + else +- fscache_stat(&fscache_n_retrievals_ok); ++ fscache_stat_unchecked(&fscache_n_retrievals_ok); + + fscache_put_retrieval(op); + _leave(" = %d", ret); +@@ -545,7 +545,7 @@ nobufs_unlock: + spin_unlock(&cookie->lock); + kfree(op); + nobufs: +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + } +@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, + + _enter("%p,%p,,,", cookie, page); + +- fscache_stat(&fscache_n_allocs); ++ fscache_stat_unchecked(&fscache_n_allocs); + + if (hlist_empty(&cookie->backing_objects)) + goto nobufs; +@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, + goto nobufs_unlock; + spin_unlock(&cookie->lock); + +- fscache_stat(&fscache_n_alloc_ops); ++ fscache_stat_unchecked(&fscache_n_alloc_ops); + + ret = fscache_wait_for_retrieval_activation( + object, op, +@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, + + error: + if (ret == -ERESTARTSYS) +- fscache_stat(&fscache_n_allocs_intr); ++ fscache_stat_unchecked(&fscache_n_allocs_intr); + else if (ret < 0) +- fscache_stat(&fscache_n_allocs_nobufs); ++ fscache_stat_unchecked(&fscache_n_allocs_nobufs); + else +- fscache_stat(&fscache_n_allocs_ok); ++ fscache_stat_unchecked(&fscache_n_allocs_ok); + + fscache_put_retrieval(op); + _leave(" = %d", ret); +@@ -625,7 +625,7 @@ nobufs_unlock: + spin_unlock(&cookie->lock); + kfree(op); + nobufs: +- fscache_stat(&fscache_n_allocs_nobufs); ++ fscache_stat_unchecked(&fscache_n_allocs_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + } +@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op) + + spin_lock(&cookie->stores_lock); + +- fscache_stat(&fscache_n_store_calls); ++ fscache_stat_unchecked(&fscache_n_store_calls); + + /* find a page to store */ + page = NULL; +@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op) + page = results[0]; + _debug("gang %d [%lx]", n, page->index); + if (page->index > op->store_limit) { +- fscache_stat(&fscache_n_store_pages_over_limit); ++ fscache_stat_unchecked(&fscache_n_store_pages_over_limit); + goto superseded; + } + +@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op) + spin_unlock(&cookie->stores_lock); + spin_unlock(&object->lock); + +- fscache_stat(&fscache_n_store_pages); ++ fscache_stat_unchecked(&fscache_n_store_pages); + fscache_stat(&fscache_n_cop_write_page); + ret = object->cache->ops->write_page(op, page); + fscache_stat_d(&fscache_n_cop_write_page); +@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); + ASSERT(PageFsCache(page)); + +- fscache_stat(&fscache_n_stores); ++ fscache_stat_unchecked(&fscache_n_stores); + + op = kzalloc(sizeof(*op), GFP_NOIO); + if (!op) +@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, + spin_unlock(&cookie->stores_lock); + spin_unlock(&object->lock); + +- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); ++ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); + op->store_limit = object->store_limit; + + if (fscache_submit_op(object, &op->op) < 0) +@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie, + + spin_unlock(&cookie->lock); + radix_tree_preload_end(); +- fscache_stat(&fscache_n_store_ops); +- fscache_stat(&fscache_n_stores_ok); ++ fscache_stat_unchecked(&fscache_n_store_ops); ++ fscache_stat_unchecked(&fscache_n_stores_ok); + + /* the work queue now carries its own ref on the object */ + fscache_put_operation(&op->op); +@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie, + return 0; + + already_queued: +- fscache_stat(&fscache_n_stores_again); ++ fscache_stat_unchecked(&fscache_n_stores_again); + already_pending: + spin_unlock(&cookie->stores_lock); + spin_unlock(&object->lock); + spin_unlock(&cookie->lock); + radix_tree_preload_end(); + kfree(op); +- fscache_stat(&fscache_n_stores_ok); ++ fscache_stat_unchecked(&fscache_n_stores_ok); + _leave(" = 0"); + return 0; + +@@ -851,14 +851,14 @@ nobufs: + spin_unlock(&cookie->lock); + radix_tree_preload_end(); + kfree(op); +- fscache_stat(&fscache_n_stores_nobufs); ++ fscache_stat_unchecked(&fscache_n_stores_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + + nomem_free: + kfree(op); + nomem: +- fscache_stat(&fscache_n_stores_oom); ++ fscache_stat_unchecked(&fscache_n_stores_oom); + _leave(" = -ENOMEM"); + return -ENOMEM; + } +@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); + ASSERTCMP(page, !=, NULL); + +- fscache_stat(&fscache_n_uncaches); ++ fscache_stat_unchecked(&fscache_n_uncaches); + + /* cache withdrawal may beat us to it */ + if (!PageFsCache(page)) +@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op, + unsigned long loop; + + #ifdef CONFIG_FSCACHE_STATS +- atomic_add(pagevec->nr, &fscache_n_marks); ++ atomic_add_unchecked(pagevec->nr, &fscache_n_marks); + #endif + + for (loop = 0; loop < pagevec->nr; loop++) { +diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c +index 4765190..2a067f2 100644 +--- a/fs/fscache/stats.c ++++ b/fs/fscache/stats.c +@@ -18,95 +18,95 @@ + /* + * operation counters + */ +-atomic_t fscache_n_op_pend; +-atomic_t fscache_n_op_run; +-atomic_t fscache_n_op_enqueue; +-atomic_t fscache_n_op_requeue; +-atomic_t fscache_n_op_deferred_release; +-atomic_t fscache_n_op_release; +-atomic_t fscache_n_op_gc; +-atomic_t fscache_n_op_cancelled; +-atomic_t fscache_n_op_rejected; ++atomic_unchecked_t fscache_n_op_pend; ++atomic_unchecked_t fscache_n_op_run; ++atomic_unchecked_t fscache_n_op_enqueue; ++atomic_unchecked_t fscache_n_op_requeue; ++atomic_unchecked_t fscache_n_op_deferred_release; ++atomic_unchecked_t fscache_n_op_release; ++atomic_unchecked_t fscache_n_op_gc; ++atomic_unchecked_t fscache_n_op_cancelled; ++atomic_unchecked_t fscache_n_op_rejected; + +-atomic_t fscache_n_attr_changed; +-atomic_t fscache_n_attr_changed_ok; +-atomic_t fscache_n_attr_changed_nobufs; +-atomic_t fscache_n_attr_changed_nomem; +-atomic_t fscache_n_attr_changed_calls; ++atomic_unchecked_t fscache_n_attr_changed; ++atomic_unchecked_t fscache_n_attr_changed_ok; ++atomic_unchecked_t fscache_n_attr_changed_nobufs; ++atomic_unchecked_t fscache_n_attr_changed_nomem; ++atomic_unchecked_t fscache_n_attr_changed_calls; + +-atomic_t fscache_n_allocs; +-atomic_t fscache_n_allocs_ok; +-atomic_t fscache_n_allocs_wait; +-atomic_t fscache_n_allocs_nobufs; +-atomic_t fscache_n_allocs_intr; +-atomic_t fscache_n_allocs_object_dead; +-atomic_t fscache_n_alloc_ops; +-atomic_t fscache_n_alloc_op_waits; ++atomic_unchecked_t fscache_n_allocs; ++atomic_unchecked_t fscache_n_allocs_ok; ++atomic_unchecked_t fscache_n_allocs_wait; ++atomic_unchecked_t fscache_n_allocs_nobufs; ++atomic_unchecked_t fscache_n_allocs_intr; ++atomic_unchecked_t fscache_n_allocs_object_dead; ++atomic_unchecked_t fscache_n_alloc_ops; ++atomic_unchecked_t fscache_n_alloc_op_waits; + +-atomic_t fscache_n_retrievals; +-atomic_t fscache_n_retrievals_ok; +-atomic_t fscache_n_retrievals_wait; +-atomic_t fscache_n_retrievals_nodata; +-atomic_t fscache_n_retrievals_nobufs; +-atomic_t fscache_n_retrievals_intr; +-atomic_t fscache_n_retrievals_nomem; +-atomic_t fscache_n_retrievals_object_dead; +-atomic_t fscache_n_retrieval_ops; +-atomic_t fscache_n_retrieval_op_waits; ++atomic_unchecked_t fscache_n_retrievals; ++atomic_unchecked_t fscache_n_retrievals_ok; ++atomic_unchecked_t fscache_n_retrievals_wait; ++atomic_unchecked_t fscache_n_retrievals_nodata; ++atomic_unchecked_t fscache_n_retrievals_nobufs; ++atomic_unchecked_t fscache_n_retrievals_intr; ++atomic_unchecked_t fscache_n_retrievals_nomem; ++atomic_unchecked_t fscache_n_retrievals_object_dead; ++atomic_unchecked_t fscache_n_retrieval_ops; ++atomic_unchecked_t fscache_n_retrieval_op_waits; + +-atomic_t fscache_n_stores; +-atomic_t fscache_n_stores_ok; +-atomic_t fscache_n_stores_again; +-atomic_t fscache_n_stores_nobufs; +-atomic_t fscache_n_stores_oom; +-atomic_t fscache_n_store_ops; +-atomic_t fscache_n_store_calls; +-atomic_t fscache_n_store_pages; +-atomic_t fscache_n_store_radix_deletes; +-atomic_t fscache_n_store_pages_over_limit; ++atomic_unchecked_t fscache_n_stores; ++atomic_unchecked_t fscache_n_stores_ok; ++atomic_unchecked_t fscache_n_stores_again; ++atomic_unchecked_t fscache_n_stores_nobufs; ++atomic_unchecked_t fscache_n_stores_oom; ++atomic_unchecked_t fscache_n_store_ops; ++atomic_unchecked_t fscache_n_store_calls; ++atomic_unchecked_t fscache_n_store_pages; ++atomic_unchecked_t fscache_n_store_radix_deletes; ++atomic_unchecked_t fscache_n_store_pages_over_limit; + +-atomic_t fscache_n_store_vmscan_not_storing; +-atomic_t fscache_n_store_vmscan_gone; +-atomic_t fscache_n_store_vmscan_busy; +-atomic_t fscache_n_store_vmscan_cancelled; ++atomic_unchecked_t fscache_n_store_vmscan_not_storing; ++atomic_unchecked_t fscache_n_store_vmscan_gone; ++atomic_unchecked_t fscache_n_store_vmscan_busy; ++atomic_unchecked_t fscache_n_store_vmscan_cancelled; + +-atomic_t fscache_n_marks; +-atomic_t fscache_n_uncaches; ++atomic_unchecked_t fscache_n_marks; ++atomic_unchecked_t fscache_n_uncaches; + +-atomic_t fscache_n_acquires; +-atomic_t fscache_n_acquires_null; +-atomic_t fscache_n_acquires_no_cache; +-atomic_t fscache_n_acquires_ok; +-atomic_t fscache_n_acquires_nobufs; +-atomic_t fscache_n_acquires_oom; ++atomic_unchecked_t fscache_n_acquires; ++atomic_unchecked_t fscache_n_acquires_null; ++atomic_unchecked_t fscache_n_acquires_no_cache; ++atomic_unchecked_t fscache_n_acquires_ok; ++atomic_unchecked_t fscache_n_acquires_nobufs; ++atomic_unchecked_t fscache_n_acquires_oom; + +-atomic_t fscache_n_updates; +-atomic_t fscache_n_updates_null; +-atomic_t fscache_n_updates_run; ++atomic_unchecked_t fscache_n_updates; ++atomic_unchecked_t fscache_n_updates_null; ++atomic_unchecked_t fscache_n_updates_run; + +-atomic_t fscache_n_relinquishes; +-atomic_t fscache_n_relinquishes_null; +-atomic_t fscache_n_relinquishes_waitcrt; +-atomic_t fscache_n_relinquishes_retire; ++atomic_unchecked_t fscache_n_relinquishes; ++atomic_unchecked_t fscache_n_relinquishes_null; ++atomic_unchecked_t fscache_n_relinquishes_waitcrt; ++atomic_unchecked_t fscache_n_relinquishes_retire; + +-atomic_t fscache_n_cookie_index; +-atomic_t fscache_n_cookie_data; +-atomic_t fscache_n_cookie_special; ++atomic_unchecked_t fscache_n_cookie_index; ++atomic_unchecked_t fscache_n_cookie_data; ++atomic_unchecked_t fscache_n_cookie_special; + +-atomic_t fscache_n_object_alloc; +-atomic_t fscache_n_object_no_alloc; +-atomic_t fscache_n_object_lookups; +-atomic_t fscache_n_object_lookups_negative; +-atomic_t fscache_n_object_lookups_positive; +-atomic_t fscache_n_object_lookups_timed_out; +-atomic_t fscache_n_object_created; +-atomic_t fscache_n_object_avail; +-atomic_t fscache_n_object_dead; ++atomic_unchecked_t fscache_n_object_alloc; ++atomic_unchecked_t fscache_n_object_no_alloc; ++atomic_unchecked_t fscache_n_object_lookups; ++atomic_unchecked_t fscache_n_object_lookups_negative; ++atomic_unchecked_t fscache_n_object_lookups_positive; ++atomic_unchecked_t fscache_n_object_lookups_timed_out; ++atomic_unchecked_t fscache_n_object_created; ++atomic_unchecked_t fscache_n_object_avail; ++atomic_unchecked_t fscache_n_object_dead; + +-atomic_t fscache_n_checkaux_none; +-atomic_t fscache_n_checkaux_okay; +-atomic_t fscache_n_checkaux_update; +-atomic_t fscache_n_checkaux_obsolete; ++atomic_unchecked_t fscache_n_checkaux_none; ++atomic_unchecked_t fscache_n_checkaux_okay; ++atomic_unchecked_t fscache_n_checkaux_update; ++atomic_unchecked_t fscache_n_checkaux_obsolete; + + atomic_t fscache_n_cop_alloc_object; + atomic_t fscache_n_cop_lookup_object; +@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v) + seq_puts(m, "FS-Cache statistics\n"); + + seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n", +- atomic_read(&fscache_n_cookie_index), +- atomic_read(&fscache_n_cookie_data), +- atomic_read(&fscache_n_cookie_special)); ++ atomic_read_unchecked(&fscache_n_cookie_index), ++ atomic_read_unchecked(&fscache_n_cookie_data), ++ atomic_read_unchecked(&fscache_n_cookie_special)); + + seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n", +- atomic_read(&fscache_n_object_alloc), +- atomic_read(&fscache_n_object_no_alloc), +- atomic_read(&fscache_n_object_avail), +- atomic_read(&fscache_n_object_dead)); ++ atomic_read_unchecked(&fscache_n_object_alloc), ++ atomic_read_unchecked(&fscache_n_object_no_alloc), ++ atomic_read_unchecked(&fscache_n_object_avail), ++ atomic_read_unchecked(&fscache_n_object_dead)); + seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n", +- atomic_read(&fscache_n_checkaux_none), +- atomic_read(&fscache_n_checkaux_okay), +- atomic_read(&fscache_n_checkaux_update), +- atomic_read(&fscache_n_checkaux_obsolete)); ++ atomic_read_unchecked(&fscache_n_checkaux_none), ++ atomic_read_unchecked(&fscache_n_checkaux_okay), ++ atomic_read_unchecked(&fscache_n_checkaux_update), ++ atomic_read_unchecked(&fscache_n_checkaux_obsolete)); + + seq_printf(m, "Pages : mrk=%u unc=%u\n", +- atomic_read(&fscache_n_marks), +- atomic_read(&fscache_n_uncaches)); ++ atomic_read_unchecked(&fscache_n_marks), ++ atomic_read_unchecked(&fscache_n_uncaches)); + + seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u" + " oom=%u\n", +- atomic_read(&fscache_n_acquires), +- atomic_read(&fscache_n_acquires_null), +- atomic_read(&fscache_n_acquires_no_cache), +- atomic_read(&fscache_n_acquires_ok), +- atomic_read(&fscache_n_acquires_nobufs), +- atomic_read(&fscache_n_acquires_oom)); ++ atomic_read_unchecked(&fscache_n_acquires), ++ atomic_read_unchecked(&fscache_n_acquires_null), ++ atomic_read_unchecked(&fscache_n_acquires_no_cache), ++ atomic_read_unchecked(&fscache_n_acquires_ok), ++ atomic_read_unchecked(&fscache_n_acquires_nobufs), ++ atomic_read_unchecked(&fscache_n_acquires_oom)); + + seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n", +- atomic_read(&fscache_n_object_lookups), +- atomic_read(&fscache_n_object_lookups_negative), +- atomic_read(&fscache_n_object_lookups_positive), +- atomic_read(&fscache_n_object_created), +- atomic_read(&fscache_n_object_lookups_timed_out)); ++ atomic_read_unchecked(&fscache_n_object_lookups), ++ atomic_read_unchecked(&fscache_n_object_lookups_negative), ++ atomic_read_unchecked(&fscache_n_object_lookups_positive), ++ atomic_read_unchecked(&fscache_n_object_created), ++ atomic_read_unchecked(&fscache_n_object_lookups_timed_out)); + + seq_printf(m, "Updates: n=%u nul=%u run=%u\n", +- atomic_read(&fscache_n_updates), +- atomic_read(&fscache_n_updates_null), +- atomic_read(&fscache_n_updates_run)); ++ atomic_read_unchecked(&fscache_n_updates), ++ atomic_read_unchecked(&fscache_n_updates_null), ++ atomic_read_unchecked(&fscache_n_updates_run)); + + seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n", +- atomic_read(&fscache_n_relinquishes), +- atomic_read(&fscache_n_relinquishes_null), +- atomic_read(&fscache_n_relinquishes_waitcrt), +- atomic_read(&fscache_n_relinquishes_retire)); ++ atomic_read_unchecked(&fscache_n_relinquishes), ++ atomic_read_unchecked(&fscache_n_relinquishes_null), ++ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt), ++ atomic_read_unchecked(&fscache_n_relinquishes_retire)); + + seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", +- atomic_read(&fscache_n_attr_changed), +- atomic_read(&fscache_n_attr_changed_ok), +- atomic_read(&fscache_n_attr_changed_nobufs), +- atomic_read(&fscache_n_attr_changed_nomem), +- atomic_read(&fscache_n_attr_changed_calls)); ++ atomic_read_unchecked(&fscache_n_attr_changed), ++ atomic_read_unchecked(&fscache_n_attr_changed_ok), ++ atomic_read_unchecked(&fscache_n_attr_changed_nobufs), ++ atomic_read_unchecked(&fscache_n_attr_changed_nomem), ++ atomic_read_unchecked(&fscache_n_attr_changed_calls)); + + seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n", +- atomic_read(&fscache_n_allocs), +- atomic_read(&fscache_n_allocs_ok), +- atomic_read(&fscache_n_allocs_wait), +- atomic_read(&fscache_n_allocs_nobufs), +- atomic_read(&fscache_n_allocs_intr)); ++ atomic_read_unchecked(&fscache_n_allocs), ++ atomic_read_unchecked(&fscache_n_allocs_ok), ++ atomic_read_unchecked(&fscache_n_allocs_wait), ++ atomic_read_unchecked(&fscache_n_allocs_nobufs), ++ atomic_read_unchecked(&fscache_n_allocs_intr)); + seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n", +- atomic_read(&fscache_n_alloc_ops), +- atomic_read(&fscache_n_alloc_op_waits), +- atomic_read(&fscache_n_allocs_object_dead)); ++ atomic_read_unchecked(&fscache_n_alloc_ops), ++ atomic_read_unchecked(&fscache_n_alloc_op_waits), ++ atomic_read_unchecked(&fscache_n_allocs_object_dead)); + + seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" + " int=%u oom=%u\n", +- atomic_read(&fscache_n_retrievals), +- atomic_read(&fscache_n_retrievals_ok), +- atomic_read(&fscache_n_retrievals_wait), +- atomic_read(&fscache_n_retrievals_nodata), +- atomic_read(&fscache_n_retrievals_nobufs), +- atomic_read(&fscache_n_retrievals_intr), +- atomic_read(&fscache_n_retrievals_nomem)); ++ atomic_read_unchecked(&fscache_n_retrievals), ++ atomic_read_unchecked(&fscache_n_retrievals_ok), ++ atomic_read_unchecked(&fscache_n_retrievals_wait), ++ atomic_read_unchecked(&fscache_n_retrievals_nodata), ++ atomic_read_unchecked(&fscache_n_retrievals_nobufs), ++ atomic_read_unchecked(&fscache_n_retrievals_intr), ++ atomic_read_unchecked(&fscache_n_retrievals_nomem)); + seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n", +- atomic_read(&fscache_n_retrieval_ops), +- atomic_read(&fscache_n_retrieval_op_waits), +- atomic_read(&fscache_n_retrievals_object_dead)); ++ atomic_read_unchecked(&fscache_n_retrieval_ops), ++ atomic_read_unchecked(&fscache_n_retrieval_op_waits), ++ atomic_read_unchecked(&fscache_n_retrievals_object_dead)); + + seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", +- atomic_read(&fscache_n_stores), +- atomic_read(&fscache_n_stores_ok), +- atomic_read(&fscache_n_stores_again), +- atomic_read(&fscache_n_stores_nobufs), +- atomic_read(&fscache_n_stores_oom)); ++ atomic_read_unchecked(&fscache_n_stores), ++ atomic_read_unchecked(&fscache_n_stores_ok), ++ atomic_read_unchecked(&fscache_n_stores_again), ++ atomic_read_unchecked(&fscache_n_stores_nobufs), ++ atomic_read_unchecked(&fscache_n_stores_oom)); + seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n", +- atomic_read(&fscache_n_store_ops), +- atomic_read(&fscache_n_store_calls), +- atomic_read(&fscache_n_store_pages), +- atomic_read(&fscache_n_store_radix_deletes), +- atomic_read(&fscache_n_store_pages_over_limit)); ++ atomic_read_unchecked(&fscache_n_store_ops), ++ atomic_read_unchecked(&fscache_n_store_calls), ++ atomic_read_unchecked(&fscache_n_store_pages), ++ atomic_read_unchecked(&fscache_n_store_radix_deletes), ++ atomic_read_unchecked(&fscache_n_store_pages_over_limit)); + + seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n", +- atomic_read(&fscache_n_store_vmscan_not_storing), +- atomic_read(&fscache_n_store_vmscan_gone), +- atomic_read(&fscache_n_store_vmscan_busy), +- atomic_read(&fscache_n_store_vmscan_cancelled)); ++ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing), ++ atomic_read_unchecked(&fscache_n_store_vmscan_gone), ++ atomic_read_unchecked(&fscache_n_store_vmscan_busy), ++ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled)); + + seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n", +- atomic_read(&fscache_n_op_pend), +- atomic_read(&fscache_n_op_run), +- atomic_read(&fscache_n_op_enqueue), +- atomic_read(&fscache_n_op_cancelled), +- atomic_read(&fscache_n_op_rejected)); ++ atomic_read_unchecked(&fscache_n_op_pend), ++ atomic_read_unchecked(&fscache_n_op_run), ++ atomic_read_unchecked(&fscache_n_op_enqueue), ++ atomic_read_unchecked(&fscache_n_op_cancelled), ++ atomic_read_unchecked(&fscache_n_op_rejected)); + seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", +- atomic_read(&fscache_n_op_deferred_release), +- atomic_read(&fscache_n_op_release), +- atomic_read(&fscache_n_op_gc)); ++ atomic_read_unchecked(&fscache_n_op_deferred_release), ++ atomic_read_unchecked(&fscache_n_op_release), ++ atomic_read_unchecked(&fscache_n_op_gc)); + + seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n", + atomic_read(&fscache_n_cop_alloc_object), +diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c +index 3426521..3b75162 100644 +--- a/fs/fuse/cuse.c ++++ b/fs/fuse/cuse.c +@@ -587,10 +587,12 @@ static int __init cuse_init(void) + INIT_LIST_HEAD(&cuse_conntbl[i]); + + /* inherit and extend fuse_dev_operations */ +- cuse_channel_fops = fuse_dev_operations; +- cuse_channel_fops.owner = THIS_MODULE; +- cuse_channel_fops.open = cuse_channel_open; +- cuse_channel_fops.release = cuse_channel_release; ++ pax_open_kernel(); ++ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations)); ++ *(void **)&cuse_channel_fops.owner = THIS_MODULE; ++ *(void **)&cuse_channel_fops.open = cuse_channel_open; ++ *(void **)&cuse_channel_fops.release = cuse_channel_release; ++ pax_close_kernel(); + + cuse_class = class_create(THIS_MODULE, "cuse"); + if (IS_ERR(cuse_class)) +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index 2aaf3ea..8e50863 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, + ret = 0; + pipe_lock(pipe); + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index 9f63e49..d8a64c0 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry) + return link; + } + +-static void free_link(char *link) ++static void free_link(const char *link) + { + if (!IS_ERR(link)) + free_page((unsigned long) link); +diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c +index cfd4959..a780959 100644 +--- a/fs/gfs2/inode.c ++++ b/fs/gfs2/inode.c +@@ -1490,7 +1490,7 @@ out: + + static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + if (!IS_ERR(s)) + kfree(s); + } +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 0be5a78..9cfb853 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = { + .kill_sb = kill_litter_super, + }; + +-static struct vfsmount *hugetlbfs_vfsmount; ++struct vfsmount *hugetlbfs_vfsmount; + + static int can_do_hugetlb_shm(void) + { +diff --git a/fs/inode.c b/fs/inode.c +index ee4e66b..9a39f9c 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -787,8 +787,8 @@ unsigned int get_next_ino(void) + + #ifdef CONFIG_SMP + if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { +- static atomic_t shared_last_ino; +- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); ++ static atomic_unchecked_t shared_last_ino; ++ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino); + + res = next - LAST_INO_BATCH; + } +@@ -855,8 +855,7 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode) + struct file_system_type *type = inode->i_sb->s_type; + + /* Set new key only if filesystem hasn't already changed it */ +- if (!lockdep_match_class(&inode->i_mutex, +- &type->i_mutex_key)) { ++ if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) { + /* + * ensure nobody is actually holding i_mutex + */ +@@ -883,6 +882,7 @@ void unlock_new_inode(struct inode *inode) + spin_lock(&inode->i_lock); + WARN_ON(!(inode->i_state & I_NEW)); + inode->i_state &= ~I_NEW; ++ smp_mb(); + wake_up_bit(&inode->i_state, __I_NEW); + spin_unlock(&inode->i_lock); + } +diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c +index e513f19..2ab1351 100644 +--- a/fs/jffs2/erase.c ++++ b/fs/jffs2/erase.c +@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb + struct jffs2_unknown_node marker = { + .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), + .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), +- .totlen = cpu_to_je32(c->cleanmarker_size) ++ .totlen = cpu_to_je32(c->cleanmarker_size), ++ .hdr_crc = cpu_to_je32(0) + }; + + jffs2_prealloc_raw_node_refs(c, jeb, 1); +diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c +index b09e51d..e482afa 100644 +--- a/fs/jffs2/wbuf.c ++++ b/fs/jffs2/wbuf.c +@@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker = + { + .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK), + .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), +- .totlen = constant_cpu_to_je32(8) ++ .totlen = constant_cpu_to_je32(8), ++ .hdr_crc = constant_cpu_to_je32(0) + }; + + /* +diff --git a/fs/jfs/super.c b/fs/jfs/super.c +index a44eff0..462e07d 100644 +--- a/fs/jfs/super.c ++++ b/fs/jfs/super.c +@@ -802,7 +802,7 @@ static int __init init_jfs_fs(void) + + jfs_inode_cachep = + kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, +- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, ++ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY, + init_once); + if (jfs_inode_cachep == NULL) + return -ENOMEM; +diff --git a/fs/libfs.c b/fs/libfs.c +index f6d411e..e82a08d 100644 +--- a/fs/libfs.c ++++ b/fs/libfs.c +@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) + + for (p=q->next; p != &dentry->d_subdirs; p=p->next) { + struct dentry *next; ++ char d_name[sizeof(next->d_iname)]; ++ const unsigned char *name; ++ + next = list_entry(p, struct dentry, d_u.d_child); + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); + if (!simple_positive(next)) { +@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) + + spin_unlock(&next->d_lock); + spin_unlock(&dentry->d_lock); +- if (filldir(dirent, next->d_name.name, ++ name = next->d_name.name; ++ if (name == next->d_iname) { ++ memcpy(d_name, name, next->d_name.len); ++ name = d_name; ++ } ++ if (filldir(dirent, name, + next->d_name.len, filp->f_pos, + next->d_inode->i_ino, + dt_type(next->d_inode)) < 0) +diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c +index 8392cb8..80d6193 100644 +--- a/fs/lockd/clntproc.c ++++ b/fs/lockd/clntproc.c +@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops; + /* + * Cookie counter for NLM requests + */ +-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); ++static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234); + + void nlmclnt_next_cookie(struct nlm_cookie *c) + { +- u32 cookie = atomic_inc_return(&nlm_cookie); ++ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie); + + memcpy(c->data, &cookie, 4); + c->len=4; +diff --git a/fs/locks.c b/fs/locks.c +index 637694b..f84a121 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp) + return; + + if (filp->f_op && filp->f_op->flock) { +- struct file_lock fl = { ++ struct file_lock flock = { + .fl_pid = current->tgid, + .fl_file = filp, + .fl_flags = FL_FLOCK, + .fl_type = F_UNLCK, + .fl_end = OFFSET_MAX, + }; +- filp->f_op->flock(filp, F_SETLKW, &fl); +- if (fl.fl_ops && fl.fl_ops->fl_release_private) +- fl.fl_ops->fl_release_private(&fl); ++ filp->f_op->flock(filp, F_SETLKW, &flock); ++ if (flock.fl_ops && flock.fl_ops->fl_release_private) ++ flock.fl_ops->fl_release_private(&flock); + } + + lock_flocks(); +diff --git a/fs/namei.c b/fs/namei.c +index 9680cef..a19f203 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask) + if (ret != -EACCES) + return ret; + ++#ifdef CONFIG_GRKERNSEC ++ /* we'll block if we have to log due to a denied capability use */ ++ if (mask & MAY_NOT_BLOCK) ++ return -ECHILD; ++#endif ++ + if (S_ISDIR(inode->i_mode)) { + /* DACs are overridable for directories */ +- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) +- return 0; + if (!(mask & MAY_WRITE)) +- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) ++ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) || ++ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) + return 0; ++ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) ++ return 0; + return -EACCES; + } + /* ++ * Searching includes executable on directories, else just read. ++ */ ++ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; ++ if (mask == MAY_READ) ++ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) || ++ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) ++ return 0; ++ ++ /* + * Read/write DACs are always overridable. + * Executable DACs are overridable when there is + * at least one exec bit set. +@@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask) + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) + return 0; + +- /* +- * Searching includes executable on directories, else just read. +- */ +- mask &= MAY_READ | MAY_WRITE | MAY_EXEC; +- if (mask == MAY_READ) +- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) +- return 0; +- + return -EACCES; + } + +@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p) + return error; + } + ++ if (gr_handle_follow_link(dentry->d_parent->d_inode, ++ dentry->d_inode, dentry, nd->path.mnt)) { ++ error = -EACCES; ++ *p = ERR_PTR(error); /* no ->put_link(), please */ ++ path_put(&nd->path); ++ return error; ++ } ++ + nd->last_type = LAST_BIND; + *p = dentry->d_inode->i_op->follow_link(dentry, nd); + error = PTR_ERR(*p); + if (!IS_ERR(*p)) { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + error = 0; + if (s) + error = __vfs_follow_link(nd, s); +@@ -1624,6 +1640,21 @@ static int path_lookupat(int dfd, const char *name, + if (!err) + err = complete_walk(nd); + ++ if (!(nd->flags & LOOKUP_PARENT)) { ++#ifdef CONFIG_GRKERNSEC ++ if (flags & LOOKUP_RCU) { ++ if (!err) ++ path_put(&nd->path); ++ err = -ECHILD; ++ } else ++#endif ++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) { ++ if (!err) ++ path_put(&nd->path); ++ err = -ENOENT; ++ } ++ } ++ + if (!err && nd->flags & LOOKUP_DIRECTORY) { + if (!nd->inode->i_op->lookup) { + path_put(&nd->path); +@@ -1651,6 +1682,15 @@ static int do_path_lookup(int dfd, const char *name, + retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd); + + if (likely(!retval)) { ++ if (*name != '/' && nd->path.dentry && nd->inode) { ++#ifdef CONFIG_GRKERNSEC ++ if (flags & LOOKUP_RCU) ++ return -ECHILD; ++#endif ++ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) ++ return -ENOENT; ++ } ++ + if (unlikely(!audit_dummy_context())) { + if (nd->path.dentry && nd->inode) + audit_inode(name, nd->path.dentry); +@@ -2048,6 +2088,13 @@ static int may_open(struct path *path, int acc_mode, int flag) + if (flag & O_NOATIME && !inode_owner_or_capable(inode)) + return -EPERM; + ++ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) ++ return -EPERM; ++ if (gr_handle_rawio(inode)) ++ return -EPERM; ++ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) ++ return -EACCES; ++ + return 0; + } + +@@ -2109,6 +2156,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path, + error = complete_walk(nd); + if (error) + return ERR_PTR(error); ++#ifdef CONFIG_GRKERNSEC ++ if (nd->flags & LOOKUP_RCU) { ++ error = -ECHILD; ++ goto exit; ++ } ++#endif ++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) { ++ error = -ENOENT; ++ goto exit; ++ } + audit_inode(pathname, nd->path.dentry); + if (open_flag & O_CREAT) { + error = -EISDIR; +@@ -2119,6 +2176,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path, + error = complete_walk(nd); + if (error) + return ERR_PTR(error); ++#ifdef CONFIG_GRKERNSEC ++ if (nd->flags & LOOKUP_RCU) { ++ error = -ECHILD; ++ goto exit; ++ } ++#endif ++ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) { ++ error = -ENOENT; ++ goto exit; ++ } + audit_inode(pathname, dir); + goto ok; + } +@@ -2140,6 +2207,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path, + error = complete_walk(nd); + if (error) + return ERR_PTR(error); ++#ifdef CONFIG_GRKERNSEC ++ if (nd->flags & LOOKUP_RCU) { ++ error = -ECHILD; ++ goto exit; ++ } ++#endif ++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) { ++ error = -ENOENT; ++ goto exit; ++ } + + error = -ENOTDIR; + if (nd->flags & LOOKUP_DIRECTORY) { +@@ -2180,6 +2257,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path, + /* Negative dentry, just create the file */ + if (!dentry->d_inode) { + int mode = op->mode; ++ ++ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) { ++ error = -EACCES; ++ goto exit_mutex_unlock; ++ } ++ + if (!IS_POSIXACL(dir->d_inode)) + mode &= ~current_umask(); + /* +@@ -2203,6 +2286,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path, + error = vfs_create(dir->d_inode, dentry, mode, nd); + if (error) + goto exit_mutex_unlock; ++ else ++ gr_handle_create(path->dentry, path->mnt); + mutex_unlock(&dir->d_inode->i_mutex); + dput(nd->path.dentry); + nd->path.dentry = dentry; +@@ -2212,6 +2297,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path, + /* + * It already exists. + */ ++ ++ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) { ++ error = -ENOENT; ++ goto exit_mutex_unlock; ++ } ++ ++ /* only check if O_CREAT is specified, all other checks need to go ++ into may_open */ ++ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) { ++ error = -EACCES; ++ goto exit_mutex_unlock; ++ } ++ + mutex_unlock(&dir->d_inode->i_mutex); + audit_inode(pathname, path->dentry); + +@@ -2424,6 +2522,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path + *path = nd.path; + return dentry; + eexist: ++ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) { ++ dput(dentry); ++ dentry = ERR_PTR(-ENOENT); ++ goto fail; ++ } + dput(dentry); + dentry = ERR_PTR(-EEXIST); + fail: +@@ -2446,6 +2549,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat + } + EXPORT_SYMBOL(user_path_create); + ++static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir) ++{ ++ char *tmp = getname(pathname); ++ struct dentry *res; ++ if (IS_ERR(tmp)) ++ return ERR_CAST(tmp); ++ res = kern_path_create(dfd, tmp, path, is_dir); ++ if (IS_ERR(res)) ++ putname(tmp); ++ else ++ *to = tmp; ++ return res; ++} ++ + int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) + { + int error = may_create(dir, dentry); +@@ -2513,6 +2630,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, + error = mnt_want_write(path.mnt); + if (error) + goto out_dput; ++ ++ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) { ++ error = -EPERM; ++ goto out_drop_write; ++ } ++ ++ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) { ++ error = -EACCES; ++ goto out_drop_write; ++ } ++ + error = security_path_mknod(&path, dentry, mode, dev); + if (error) + goto out_drop_write; +@@ -2530,6 +2658,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, + } + out_drop_write: + mnt_drop_write(path.mnt); ++ ++ if (!error) ++ gr_handle_create(dentry, path.mnt); + out_dput: + dput(dentry); + mutex_unlock(&path.dentry->d_inode->i_mutex); +@@ -2579,12 +2710,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode) + error = mnt_want_write(path.mnt); + if (error) + goto out_dput; ++ ++ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) { ++ error = -EACCES; ++ goto out_drop_write; ++ } ++ + error = security_path_mkdir(&path, dentry, mode); + if (error) + goto out_drop_write; + error = vfs_mkdir(path.dentry->d_inode, dentry, mode); + out_drop_write: + mnt_drop_write(path.mnt); ++ ++ if (!error) ++ gr_handle_create(dentry, path.mnt); + out_dput: + dput(dentry); + mutex_unlock(&path.dentry->d_inode->i_mutex); +@@ -2664,6 +2804,8 @@ static long do_rmdir(int dfd, const char __user *pathname) + char * name; + struct dentry *dentry; + struct nameidata nd; ++ ino_t saved_ino = 0; ++ dev_t saved_dev = 0; + + error = user_path_parent(dfd, pathname, &nd, &name); + if (error) +@@ -2692,6 +2834,15 @@ static long do_rmdir(int dfd, const char __user *pathname) + error = -ENOENT; + goto exit3; + } ++ ++ saved_ino = dentry->d_inode->i_ino; ++ saved_dev = gr_get_dev_from_dentry(dentry); ++ ++ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto exit3; ++ } ++ + error = mnt_want_write(nd.path.mnt); + if (error) + goto exit3; +@@ -2699,6 +2850,8 @@ static long do_rmdir(int dfd, const char __user *pathname) + if (error) + goto exit4; + error = vfs_rmdir(nd.path.dentry->d_inode, dentry); ++ if (!error && (saved_dev || saved_ino)) ++ gr_handle_delete(saved_ino, saved_dev); + exit4: + mnt_drop_write(nd.path.mnt); + exit3: +@@ -2761,6 +2914,8 @@ static long do_unlinkat(int dfd, const char __user *pathname) + struct dentry *dentry; + struct nameidata nd; + struct inode *inode = NULL; ++ ino_t saved_ino = 0; ++ dev_t saved_dev = 0; + + error = user_path_parent(dfd, pathname, &nd, &name); + if (error) +@@ -2783,6 +2938,16 @@ static long do_unlinkat(int dfd, const char __user *pathname) + if (!inode) + goto slashes; + ihold(inode); ++ ++ if (inode->i_nlink <= 1) { ++ saved_ino = inode->i_ino; ++ saved_dev = gr_get_dev_from_dentry(dentry); ++ } ++ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto exit2; ++ } ++ + error = mnt_want_write(nd.path.mnt); + if (error) + goto exit2; +@@ -2790,6 +2955,8 @@ static long do_unlinkat(int dfd, const char __user *pathname) + if (error) + goto exit3; + error = vfs_unlink(nd.path.dentry->d_inode, dentry); ++ if (!error && (saved_ino || saved_dev)) ++ gr_handle_delete(saved_ino, saved_dev); + exit3: + mnt_drop_write(nd.path.mnt); + exit2: +@@ -2865,10 +3032,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, + error = mnt_want_write(path.mnt); + if (error) + goto out_dput; ++ ++ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) { ++ error = -EACCES; ++ goto out_drop_write; ++ } ++ + error = security_path_symlink(&path, dentry, from); + if (error) + goto out_drop_write; + error = vfs_symlink(path.dentry->d_inode, dentry, from); ++ if (!error) ++ gr_handle_create(dentry, path.mnt); + out_drop_write: + mnt_drop_write(path.mnt); + out_dput: +@@ -2940,6 +3115,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, + { + struct dentry *new_dentry; + struct path old_path, new_path; ++ char *to = NULL; + int how = 0; + int error; + +@@ -2963,7 +3139,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, + if (error) + return error; + +- new_dentry = user_path_create(newdfd, newname, &new_path, 0); ++ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0); + error = PTR_ERR(new_dentry); + if (IS_ERR(new_dentry)) + goto out; +@@ -2974,13 +3150,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, + error = mnt_want_write(new_path.mnt); + if (error) + goto out_dput; ++ ++ if (gr_handle_hardlink(old_path.dentry, old_path.mnt, ++ old_path.dentry->d_inode, ++ old_path.dentry->d_inode->i_mode, to)) { ++ error = -EACCES; ++ goto out_drop_write; ++ } ++ ++ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt, ++ old_path.dentry, old_path.mnt, to)) { ++ error = -EACCES; ++ goto out_drop_write; ++ } ++ + error = security_path_link(old_path.dentry, &new_path, new_dentry); + if (error) + goto out_drop_write; + error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry); ++ if (!error) ++ gr_handle_create(new_dentry, new_path.mnt); + out_drop_write: + mnt_drop_write(new_path.mnt); + out_dput: ++ putname(to); + dput(new_dentry); + mutex_unlock(&new_path.dentry->d_inode->i_mutex); + path_put(&new_path); +@@ -3208,6 +3401,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, + if (new_dentry == trap) + goto exit5; + ++ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt, ++ old_dentry, old_dir->d_inode, oldnd.path.mnt, ++ to); ++ if (error) ++ goto exit5; ++ + error = mnt_want_write(oldnd.path.mnt); + if (error) + goto exit5; +@@ -3217,6 +3416,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, + goto exit6; + error = vfs_rename(old_dir->d_inode, old_dentry, + new_dir->d_inode, new_dentry); ++ if (!error) ++ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry, ++ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0); + exit6: + mnt_drop_write(oldnd.path.mnt); + exit5: +@@ -3242,6 +3444,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna + + int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) + { ++ char tmpbuf[64]; ++ const char *newlink; + int len; + + len = PTR_ERR(link); +@@ -3251,7 +3455,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c + len = strlen(link); + if (len > (unsigned) buflen) + len = buflen; +- if (copy_to_user(buffer, link, len)) ++ ++ if (len < sizeof(tmpbuf)) { ++ memcpy(tmpbuf, link, len); ++ newlink = tmpbuf; ++ } else ++ newlink = link; ++ ++ if (copy_to_user(buffer, newlink, len)) + len = -EFAULT; + out: + return len; +diff --git a/fs/namespace.c b/fs/namespace.c +index cfc6d44..b4632a5 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags) + if (!(sb->s_flags & MS_RDONLY)) + retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); + up_write(&sb->s_umount); ++ ++ gr_log_remount(mnt->mnt_devname, retval); ++ + return retval; + } + +@@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags) + br_write_unlock(vfsmount_lock); + up_write(&namespace_sem); + release_mounts(&umount_list); ++ ++ gr_log_unmount(mnt->mnt_devname, retval); ++ + return retval; + } + +@@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, + MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | + MS_STRICTATIME); + ++ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) { ++ retval = -EPERM; ++ goto dput_out; ++ } ++ ++ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) { ++ retval = -EPERM; ++ goto dput_out; ++ } ++ + if (flags & MS_REMOUNT) + retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, + data_page); +@@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, + dev_name, data_page); + dput_out: + path_put(&path); ++ ++ gr_log_mount(dev_name, dir_name, retval); ++ + return retval; + } + +@@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, + if (error) + goto out2; + ++ if (gr_handle_chroot_pivot()) { ++ error = -EPERM; ++ goto out2; ++ } ++ + get_fs_root(current->fs, &root); + error = lock_mount(&old); + if (error) +diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h +index 09881e6..308ff20 100644 +--- a/fs/ncpfs/ncplib_kernel.h ++++ b/fs/ncpfs/ncplib_kernel.h +@@ -130,7 +130,7 @@ static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int voln + int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *, + const unsigned char *, unsigned int, int); + int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *, +- const unsigned char *, unsigned int, int); ++ const unsigned char *, unsigned int, int) __size_overflow(5); + + #define NCP_ESC ':' + #define NCP_IO_TABLE(sb) (NCP_SBP(sb)->nls_io) +@@ -146,7 +146,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *, + int ncp__io2vol(unsigned char *, unsigned int *, + const unsigned char *, unsigned int, int); + int ncp__vol2io(unsigned char *, unsigned int *, +- const unsigned char *, unsigned int, int); ++ const unsigned char *, unsigned int, int) __size_overflow(5); + + #define NCP_IO_TABLE(sb) NULL + #define ncp_tolower(t, c) tolower(c) +diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c +index 3db6b82..a57597e 100644 +--- a/fs/nfs/blocklayout/blocklayout.c ++++ b/fs/nfs/blocklayout/blocklayout.c +@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect) + */ + struct parallel_io { + struct kref refcnt; +- struct rpc_call_ops call_ops; ++ rpc_call_ops_no_const call_ops; + void (*pnfs_callback) (void *data); + void *data; + }; +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 50a15fa..ca113f9 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode) + nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); + nfsi->attrtimeo_timestamp = jiffies; + +- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); ++ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf)); + if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) + nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; + else +@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt + return nfs_size_to_loff_t(fattr->size) > i_size_read(inode); + } + +-static atomic_long_t nfs_attr_generation_counter; ++static atomic_long_unchecked_t nfs_attr_generation_counter; + + static unsigned long nfs_read_attr_generation_counter(void) + { +- return atomic_long_read(&nfs_attr_generation_counter); ++ return atomic_long_read_unchecked(&nfs_attr_generation_counter); + } + + unsigned long nfs_inc_attr_generation_counter(void) + { +- return atomic_long_inc_return(&nfs_attr_generation_counter); ++ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter); + } + + void nfs_fattr_init(struct nfs_fattr *fattr) +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c +index 7a2e442..8e544cc 100644 +--- a/fs/nfsd/vfs.c ++++ b/fs/nfsd/vfs.c +@@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, + } else { + oldfs = get_fs(); + set_fs(KERNEL_DS); +- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset); ++ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset); + set_fs(oldfs); + } + +@@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, + + /* Write the data. */ + oldfs = get_fs(); set_fs(KERNEL_DS); +- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); ++ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset); + set_fs(oldfs); + if (host_err < 0) + goto out_nfserr; +@@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) + */ + + oldfs = get_fs(); set_fs(KERNEL_DS); +- host_err = inode->i_op->readlink(dentry, buf, *lenp); ++ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp); + set_fs(oldfs); + + if (host_err < 0) +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c +index d327140..501b7f8 100644 +--- a/fs/nilfs2/the_nilfs.c ++++ b/fs/nilfs2/the_nilfs.c +@@ -409,6 +409,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, + nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); + nilfs->ns_r_segments_percentage = + le32_to_cpu(sbp->s_r_segments_percentage); ++ if (nilfs->ns_r_segments_percentage < 1 || ++ nilfs->ns_r_segments_percentage > 99) { ++ printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n"); ++ return -EINVAL; ++ } ++ + nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments)); + nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); + return 0; +@@ -515,6 +521,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, + brelse(sbh[1]); + sbh[1] = NULL; + sbp[1] = NULL; ++ valid[1] = 0; + swp = 0; + } + if (!valid[swp]) { +diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c +index 9fde1c0..14e8827 100644 +--- a/fs/notify/fanotify/fanotify_user.c ++++ b/fs/notify/fanotify/fanotify_user.c +@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, + goto out_close_fd; + + ret = -EFAULT; +- if (copy_to_user(buf, &fanotify_event_metadata, ++ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata || ++ copy_to_user(buf, &fanotify_event_metadata, + fanotify_event_metadata.event_len)) + goto out_kill_access_response; + +diff --git a/fs/notify/notification.c b/fs/notify/notification.c +index ee18815..7aa5d01 100644 +--- a/fs/notify/notification.c ++++ b/fs/notify/notification.c +@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep; + * get set to 0 so it will never get 'freed' + */ + static struct fsnotify_event *q_overflow_event; +-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); ++static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0); + + /** + * fsnotify_get_cookie - return a unique cookie for use in synchronizing events. +@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); + */ + u32 fsnotify_get_cookie(void) + { +- return atomic_inc_return(&fsnotify_sync_cookie); ++ return atomic_inc_return_unchecked(&fsnotify_sync_cookie); + } + EXPORT_SYMBOL_GPL(fsnotify_get_cookie); + +diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c +index 99e3610..02c1068 100644 +--- a/fs/ntfs/dir.c ++++ b/fs/ntfs/dir.c +@@ -1329,7 +1329,7 @@ find_next_index_buffer: + ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK & + ~(s64)(ndir->itype.index.block_size - 1))); + /* Bounds checks. */ +- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { ++ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { + ntfs_error(sb, "Out of bounds check failed. Corrupt directory " + "inode 0x%lx or driver bug.", vdir->i_ino); + goto err_out; +diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c +index c587e2d..3641eaa 100644 +--- a/fs/ntfs/file.c ++++ b/fs/ntfs/file.c +@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = { + #endif /* NTFS_RW */ + }; + +-const struct file_operations ntfs_empty_file_ops = {}; ++const struct file_operations ntfs_empty_file_ops __read_only; + +-const struct inode_operations ntfs_empty_inode_ops = {}; ++const struct inode_operations ntfs_empty_inode_ops __read_only; +diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c +index 210c352..a174f83 100644 +--- a/fs/ocfs2/localalloc.c ++++ b/fs/ocfs2/localalloc.c +@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, + goto bail; + } + +- atomic_inc(&osb->alloc_stats.moves); ++ atomic_inc_unchecked(&osb->alloc_stats.moves); + + bail: + if (handle) +diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h +index d355e6e..578d905 100644 +--- a/fs/ocfs2/ocfs2.h ++++ b/fs/ocfs2/ocfs2.h +@@ -235,11 +235,11 @@ enum ocfs2_vol_state + + struct ocfs2_alloc_stats + { +- atomic_t moves; +- atomic_t local_data; +- atomic_t bitmap_data; +- atomic_t bg_allocs; +- atomic_t bg_extends; ++ atomic_unchecked_t moves; ++ atomic_unchecked_t local_data; ++ atomic_unchecked_t bitmap_data; ++ atomic_unchecked_t bg_allocs; ++ atomic_unchecked_t bg_extends; + }; + + enum ocfs2_local_alloc_state +diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c +index ba5d97e..c77db25 100644 +--- a/fs/ocfs2/suballoc.c ++++ b/fs/ocfs2/suballoc.c +@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, + mlog_errno(status); + goto bail; + } +- atomic_inc(&osb->alloc_stats.bg_extends); ++ atomic_inc_unchecked(&osb->alloc_stats.bg_extends); + + /* You should never ask for this much metadata */ + BUG_ON(bits_wanted > +@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle, + mlog_errno(status); + goto bail; + } +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); + + *suballoc_loc = res.sr_bg_blkno; + *suballoc_bit_start = res.sr_bit_offset; +@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle, + trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno, + res->sr_bits); + +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); + + BUG_ON(res->sr_bits != 1); + +@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle, + mlog_errno(status); + goto bail; + } +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); + + BUG_ON(res.sr_bits != 1); + +@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle, + cluster_start, + num_clusters); + if (!status) +- atomic_inc(&osb->alloc_stats.local_data); ++ atomic_inc_unchecked(&osb->alloc_stats.local_data); + } else { + if (min_clusters > (osb->bitmap_cpg - 1)) { + /* The only paths asking for contiguousness +@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle, + ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode, + res.sr_bg_blkno, + res.sr_bit_offset); +- atomic_inc(&osb->alloc_stats.bitmap_data); ++ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data); + *num_clusters = res.sr_bits; + } + } +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c +index 4994f8b..eaab8eb 100644 +--- a/fs/ocfs2/super.c ++++ b/fs/ocfs2/super.c +@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len) + "%10s => GlobalAllocs: %d LocalAllocs: %d " + "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n", + "Stats", +- atomic_read(&osb->alloc_stats.bitmap_data), +- atomic_read(&osb->alloc_stats.local_data), +- atomic_read(&osb->alloc_stats.bg_allocs), +- atomic_read(&osb->alloc_stats.moves), +- atomic_read(&osb->alloc_stats.bg_extends)); ++ atomic_read_unchecked(&osb->alloc_stats.bitmap_data), ++ atomic_read_unchecked(&osb->alloc_stats.local_data), ++ atomic_read_unchecked(&osb->alloc_stats.bg_allocs), ++ atomic_read_unchecked(&osb->alloc_stats.moves), ++ atomic_read_unchecked(&osb->alloc_stats.bg_extends)); + + out += snprintf(buf + out, len - out, + "%10s => State: %u Descriptor: %llu Size: %u bits " +@@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb, + spin_lock_init(&osb->osb_xattr_lock); + ocfs2_init_steal_slots(osb); + +- atomic_set(&osb->alloc_stats.moves, 0); +- atomic_set(&osb->alloc_stats.local_data, 0); +- atomic_set(&osb->alloc_stats.bitmap_data, 0); +- atomic_set(&osb->alloc_stats.bg_allocs, 0); +- atomic_set(&osb->alloc_stats.bg_extends, 0); ++ atomic_set_unchecked(&osb->alloc_stats.moves, 0); ++ atomic_set_unchecked(&osb->alloc_stats.local_data, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0); + + /* Copy the blockcheck stats from the superblock probe */ + osb->osb_ecc_stats = *stats; +diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c +index 5d22872..523db20 100644 +--- a/fs/ocfs2/symlink.c ++++ b/fs/ocfs2/symlink.c +@@ -142,7 +142,7 @@ bail: + + static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) + { +- char *link = nd_get_link(nd); ++ const char *link = nd_get_link(nd); + if (!IS_ERR(link)) + kfree(link); + } +diff --git a/fs/open.c b/fs/open.c +index 22c41b5..78894cf 100644 +--- a/fs/open.c ++++ b/fs/open.c +@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) + error = locks_verify_truncate(inode, NULL, length); + if (!error) + error = security_path_truncate(&path); ++ ++ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt)) ++ error = -EACCES; ++ + if (!error) + error = do_truncate(path.dentry, length, 0, NULL); + +@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode) + if (__mnt_is_readonly(path.mnt)) + res = -EROFS; + ++ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode)) ++ res = -EACCES; ++ + out_path_release: + path_put(&path); + out: +@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename) + if (error) + goto dput_and_out; + ++ gr_log_chdir(path.dentry, path.mnt); ++ + set_fs_pwd(current->fs, &path); + + dput_and_out: +@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd) + goto out_putf; + + error = inode_permission(inode, MAY_EXEC | MAY_CHDIR); ++ ++ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt)) ++ error = -EPERM; ++ ++ if (!error) ++ gr_log_chdir(file->f_path.dentry, file->f_path.mnt); ++ + if (!error) + set_fs_pwd(current->fs, &file->f_path); + out_putf: +@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename) + if (error) + goto dput_and_out; + ++ if (gr_handle_chroot_chroot(path.dentry, path.mnt)) ++ goto dput_and_out; ++ + set_fs_root(current->fs, &path); ++ ++ gr_handle_chroot_chdir(&path); ++ + error = 0; + dput_and_out: + path_put(&path); +@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode) + if (error) + return error; + mutex_lock(&inode->i_mutex); ++ ++ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) { ++ error = -EACCES; ++ goto out_unlock; ++ } ++ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) { ++ error = -EACCES; ++ goto out_unlock; ++ } ++ + error = security_path_chmod(path->dentry, path->mnt, mode); + if (error) + goto out_unlock; +@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group) + int error; + struct iattr newattrs; + ++ if (!gr_acl_handle_chown(path->dentry, path->mnt)) ++ return -EACCES; ++ + newattrs.ia_valid = ATTR_CTIME; + if (user != (uid_t) -1) { + newattrs.ia_valid |= ATTR_UID; +diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c +index 6296b40..417c00f 100644 +--- a/fs/partitions/efi.c ++++ b/fs/partitions/efi.c +@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state, + if (!gpt) + return NULL; + ++ if (!le32_to_cpu(gpt->num_partition_entries)) ++ return NULL; ++ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL); ++ if (!pte) ++ return NULL; ++ + count = le32_to_cpu(gpt->num_partition_entries) * + le32_to_cpu(gpt->sizeof_partition_entry); +- if (!count) +- return NULL; +- pte = kzalloc(count, GFP_KERNEL); +- if (!pte) +- return NULL; +- + if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba), + (u8 *) pte, + count) < count) { +diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c +index bd8ae78..539d250 100644 +--- a/fs/partitions/ldm.c ++++ b/fs/partitions/ldm.c +@@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) + goto found; + } + +- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL); ++ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL); + if (!f) { + ldm_crit ("Out of memory."); + return false; +diff --git a/fs/pipe.c b/fs/pipe.c +index 4065f07..68c0706 100644 +--- a/fs/pipe.c ++++ b/fs/pipe.c +@@ -420,9 +420,9 @@ redo: + } + if (bufs) /* More to do? */ + continue; +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + break; +- if (!pipe->waiting_writers) { ++ if (!atomic_read(&pipe->waiting_writers)) { + /* syscall merging: Usually we must not sleep + * if O_NONBLOCK is set, or if we got some data. + * But if a writer sleeps in kernel space, then +@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, + mutex_lock(&inode->i_mutex); + pipe = inode->i_pipe; + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + ret = -EPIPE; + goto out; +@@ -530,7 +530,7 @@ redo1: + for (;;) { + int bufs; + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -616,9 +616,9 @@ redo2: + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + do_wakeup = 0; + } +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + out: + mutex_unlock(&inode->i_mutex); +@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait) + mask = 0; + if (filp->f_mode & FMODE_READ) { + mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; +- if (!pipe->writers && filp->f_version != pipe->w_counter) ++ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter) + mask |= POLLHUP; + } + +@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait) + * Most Unices do not set POLLERR for FIFOs but on Linux they + * behave exactly like pipes for poll(). + */ +- if (!pipe->readers) ++ if (!atomic_read(&pipe->readers)) + mask |= POLLERR; + } + +@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw) + + mutex_lock(&inode->i_mutex); + pipe = inode->i_pipe; +- pipe->readers -= decr; +- pipe->writers -= decw; ++ atomic_sub(decr, &pipe->readers); ++ atomic_sub(decw, &pipe->writers); + +- if (!pipe->readers && !pipe->writers) { ++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) { + free_pipe_info(inode); + } else { + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP); +@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp) + + if (inode->i_pipe) { + ret = 0; +- inode->i_pipe->readers++; ++ atomic_inc(&inode->i_pipe->readers); + } + + mutex_unlock(&inode->i_mutex); +@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp) + + if (inode->i_pipe) { + ret = 0; +- inode->i_pipe->writers++; ++ atomic_inc(&inode->i_pipe->writers); + } + + mutex_unlock(&inode->i_mutex); +@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp) + if (inode->i_pipe) { + ret = 0; + if (filp->f_mode & FMODE_READ) +- inode->i_pipe->readers++; ++ atomic_inc(&inode->i_pipe->readers); + if (filp->f_mode & FMODE_WRITE) +- inode->i_pipe->writers++; ++ atomic_inc(&inode->i_pipe->writers); + } + + mutex_unlock(&inode->i_mutex); +@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode) + inode->i_pipe = NULL; + } + +-static struct vfsmount *pipe_mnt __read_mostly; ++struct vfsmount *pipe_mnt __read_mostly; + + /* + * pipefs_dname() is called from d_path(). +@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void) + goto fail_iput; + inode->i_pipe = pipe; + +- pipe->readers = pipe->writers = 1; ++ atomic_set(&pipe->readers, 1); ++ atomic_set(&pipe->writers, 1); + inode->i_fop = &rdwr_pipefifo_fops; + + /* +diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig +index 15af622..0e9f4467 100644 +--- a/fs/proc/Kconfig ++++ b/fs/proc/Kconfig +@@ -30,12 +30,12 @@ config PROC_FS + + config PROC_KCORE + bool "/proc/kcore support" if !ARM +- depends on PROC_FS && MMU ++ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD + + config PROC_VMCORE + bool "/proc/vmcore support" +- depends on PROC_FS && CRASH_DUMP +- default y ++ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC ++ default n + help + Exports the dump image of crashed kernel in ELF format. + +@@ -59,8 +59,8 @@ config PROC_SYSCTL + limited in memory. + + config PROC_PAGE_MONITOR +- default y +- depends on PROC_FS && MMU ++ default n ++ depends on PROC_FS && MMU && !GRKERNSEC + bool "Enable /proc page monitoring" if EXPERT + help + Various /proc files exist to monitor process memory utilization: +diff --git a/fs/proc/array.c b/fs/proc/array.c +index 3a1dafd..bf1bd84 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -60,6 +60,7 @@ + #include <linux/tty.h> + #include <linux/string.h> + #include <linux/mman.h> ++#include <linux/grsecurity.h> + #include <linux/proc_fs.h> + #include <linux/ioport.h> + #include <linux/uaccess.h> +@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) + seq_putc(m, '\n'); + } + ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++static inline void task_pax(struct seq_file *m, struct task_struct *p) ++{ ++ if (p->mm) ++ seq_printf(m, "PaX:\t%c%c%c%c%c\n", ++ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p', ++ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e', ++ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm', ++ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r', ++ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's'); ++ else ++ seq_printf(m, "PaX:\t-----\n"); ++} ++#endif ++ + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) + { +@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, + task_cpus_allowed(m, task); + cpuset_task_status_allowed(m, task); + task_context_switch_counts(m, task); ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ task_pax(m, task); ++#endif ++ ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) ++ task_grsec_rbac(m, task); ++#endif ++ + return 0; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task, int whole) + { +@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + char tcomm[sizeof(task->comm)]; + unsigned long flags; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("stat"); ++ return 0; ++ } ++#endif ++ + state = *get_task_state(task); + vsize = eip = esp = 0; + permitted = ptrace_may_access(task, PTRACE_MODE_READ); +@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + gtime = task->gtime; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (PAX_RAND_FLAGS(mm)) { ++ eip = 0; ++ esp = 0; ++ wchan = 0; ++ } ++#endif ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ wchan = 0; ++ eip =0; ++ esp =0; ++#endif ++ + /* scale priority and nice values from timeslices to -20..20 */ + /* to make it look like a "normal" Unix priority/nice value */ + priority = task_prio(task); +@@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + vsize, + mm ? get_mm_rss(mm) : 0, + rsslim, ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0), ++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0), ++ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0), ++#else + mm ? (permitted ? mm->start_code : 1) : 0, + mm ? (permitted ? mm->end_code : 1) : 0, + (permitted && mm) ? mm->start_stack : 0, ++#endif + esp, + eip, + /* The signal information here is obsolete. +@@ -533,8 +590,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) + { + unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0; +- struct mm_struct *mm = get_task_mm(task); ++ struct mm_struct *mm; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("statm"); ++ return 0; ++ } ++#endif ++ mm = get_task_mm(task); + if (mm) { + size = task_statm(mm, &shared, &text, &data, &resident); + mmput(mm); +@@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, + + return 0; + } ++ ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++int proc_pid_ipaddr(struct task_struct *task, char *buffer) ++{ ++ u32 curr_ip = 0; ++ unsigned long flags; ++ ++ if (lock_task_sighand(task, &flags)) { ++ curr_ip = task->signal->curr_ip; ++ unlock_task_sighand(task, &flags); ++ } ++ ++ return sprintf(buffer, "%pI4\n", &curr_ip); ++} ++#endif +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 1ace83d..f5e575d 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -107,6 +107,22 @@ struct pid_entry { + union proc_op op; + }; + ++struct getdents_callback { ++ struct linux_dirent __user * current_dir; ++ struct linux_dirent __user * previous; ++ struct file * file; ++ int count; ++ int error; ++}; ++ ++static int gr_fake_filldir(void * __buf, const char *name, int namlen, ++ loff_t offset, u64 ino, unsigned int d_type) ++{ ++ struct getdents_callback * buf = (struct getdents_callback *) __buf; ++ buf->error = -EINVAL; ++ return 0; ++} ++ + #define NOD(NAME, MODE, IOP, FOP, OP) { \ + .name = (NAME), \ + .len = sizeof(NAME) - 1, \ +@@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path) + return result; + } + +-static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) +-{ +- struct mm_struct *mm; +- int err; +- +- err = mutex_lock_killable(&task->signal->cred_guard_mutex); +- if (err) +- return ERR_PTR(err); +- +- mm = get_task_mm(task); +- if (mm && mm != current->mm && +- !ptrace_may_access(task, mode)) { +- mmput(mm); +- mm = ERR_PTR(-EACCES); +- } +- mutex_unlock(&task->signal->cred_guard_mutex); +- +- return mm; +-} +- + struct mm_struct *mm_for_maps(struct task_struct *task) + { + return mm_access(task, PTRACE_MODE_READ); +@@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer) + if (!mm->arg_end) + goto out_mm; /* Shh! No looking before we're done */ + ++ if (gr_acl_handle_procpidmem(task)) ++ goto out_mm; ++ + len = mm->arg_end - mm->arg_start; + + if (len > PAGE_SIZE) +@@ -256,12 +255,28 @@ out: + return res; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static int proc_pid_auxv(struct task_struct *task, char *buffer) + { + struct mm_struct *mm = mm_for_maps(task); + int res = PTR_ERR(mm); + if (mm && !IS_ERR(mm)) { + unsigned int nwords = 0; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ /* allow if we're currently ptracing this task */ ++ if (PAX_RAND_FLAGS(mm) && ++ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) { ++ mmput(mm); ++ return 0; ++ } ++#endif ++ + do { + nwords += 2; + } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ +@@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer) + } + + +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + /* + * Provides a wchan file via kallsyms in a proper one-value-per-file format. + * Returns the resolved symbol. If that fails, simply return the address. +@@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task) + mutex_unlock(&task->signal->cred_guard_mutex); + } + +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + + #define MAX_STACK_TRACE_DEPTH 64 + +@@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer) + return count; + } + +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + static int proc_pid_syscall(struct task_struct *task, char *buffer) + { + long nr; +@@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer) + /************************************************************************/ + + /* permission checks */ +-static int proc_fd_access_allowed(struct inode *inode) ++static int proc_fd_access_allowed(struct inode *inode, unsigned int log) + { + struct task_struct *task; + int allowed = 0; +@@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode) + */ + task = get_proc_task(inode); + if (task) { +- allowed = ptrace_may_access(task, PTRACE_MODE_READ); ++ if (log) ++ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ); ++ else ++ allowed = ptrace_may_access(task, PTRACE_MODE_READ); + put_task_struct(task); + } + return allowed; +@@ -786,6 +804,10 @@ static int mem_open(struct inode* inode, struct file* file) + file->f_mode |= FMODE_UNSIGNED_OFFSET; + file->private_data = mm; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ file->f_version = current->exec_id; ++#endif ++ + return 0; + } + +@@ -797,6 +819,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf, + ssize_t copied; + char *page; + ++#ifdef CONFIG_GRKERNSEC ++ if (write) ++ return -EPERM; ++#endif ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (file->f_version != current->exec_id) { ++ gr_log_badprocpid("mem"); ++ return 0; ++ } ++#endif ++ + if (!mm) + return 0; + +@@ -897,6 +930,9 @@ static ssize_t environ_read(struct file *file, char __user *buf, + if (!task) + goto out_no_task; + ++ if (gr_acl_handle_procpidmem(task)) ++ goto out; ++ + ret = -ENOMEM; + page = (char *)__get_free_page(GFP_TEMPORARY); + if (!page) +@@ -1519,7 +1555,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) + path_put(&nd->path); + + /* Are we allowed to snoop on the tasks file descriptors? */ +- if (!proc_fd_access_allowed(inode)) ++ if (!proc_fd_access_allowed(inode,0)) + goto out; + + error = PROC_I(inode)->op.proc_get_link(inode, &nd->path); +@@ -1558,8 +1594,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b + struct path path; + + /* Are we allowed to snoop on the tasks file descriptors? */ +- if (!proc_fd_access_allowed(inode)) +- goto out; ++ /* logging this is needed for learning on chromium to work properly, ++ but we don't want to flood the logs from 'ps' which does a readlink ++ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn ++ CAP_SYS_PTRACE as it's not necessary for its basic functionality ++ */ ++ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') { ++ if (!proc_fd_access_allowed(inode,0)) ++ goto out; ++ } else { ++ if (!proc_fd_access_allowed(inode,1)) ++ goto out; ++ } + + error = PROC_I(inode)->op.proc_get_link(inode, &path); + if (error) +@@ -1624,7 +1670,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + inode->i_gid = cred->egid; ++#endif + rcu_read_unlock(); + } + security_task_to_inode(task, inode); +@@ -1642,6 +1692,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) + struct inode *inode = dentry->d_inode; + struct task_struct *task; + const struct cred *cred; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *tmpcred = current_cred(); ++#endif + + generic_fillattr(inode, stat); + +@@ -1649,13 +1702,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) + stat->uid = 0; + stat->gid = 0; + task = pid_task(proc_pid(inode), PIDTYPE_PID); ++ ++ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) { ++ rcu_read_unlock(); ++ return -ENOENT; ++ } ++ + if (task) { ++ cred = __task_cred(task); ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (!tmpcred->uid || (tmpcred->uid == cred->uid) ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ || in_group_p(CONFIG_GRKERNSEC_PROC_GID) ++#endif ++ ) { ++#endif + if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || ++#endif + task_dumpable(task)) { +- cred = __task_cred(task); + stat->uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ stat->gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + stat->gid = cred->egid; ++#endif + } ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ } else { ++ rcu_read_unlock(); ++ return -ENOENT; ++ } ++#endif + } + rcu_read_unlock(); + return 0; +@@ -1692,11 +1773,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd) + + if (task) { + if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || ++#endif + task_dumpable(task)) { + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + inode->i_gid = cred->egid; ++#endif + rcu_read_unlock(); + } else { + inode->i_uid = 0; +@@ -1814,7 +1904,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info) + int fd = proc_fd(inode); + + if (task) { +- files = get_files_struct(task); ++ if (!gr_acl_handle_procpidmem(task)) ++ files = get_files_struct(task); + put_task_struct(task); + } + if (files) { +@@ -2082,11 +2173,21 @@ static const struct file_operations proc_fd_operations = { + */ + static int proc_fd_permission(struct inode *inode, int mask) + { ++ struct task_struct *task; + int rv = generic_permission(inode, mask); +- if (rv == 0) +- return 0; ++ + if (task_pid(current) == proc_pid(inode)) + rv = 0; ++ ++ task = get_proc_task(inode); ++ if (task == NULL) ++ return rv; ++ ++ if (gr_acl_handle_procpidmem(task)) ++ rv = -EACCES; ++ ++ put_task_struct(task); ++ + return rv; + } + +@@ -2196,6 +2297,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir, + if (!task) + goto out_no_task; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out; ++ + /* + * Yes, it does not scale. And it should not. Don't add + * new entries into /proc/<tgid>/ without very good reasons. +@@ -2240,6 +2344,9 @@ static int proc_pident_readdir(struct file *filp, + if (!task) + goto out_no_task; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out; ++ + ret = 0; + i = filp->f_pos; + switch (i) { +@@ -2510,7 +2617,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) + static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd, + void *cookie) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + if (!IS_ERR(s)) + __putname(s); + } +@@ -2708,7 +2815,7 @@ static const struct pid_entry tgid_base_stuff[] = { + REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations), + #endif + REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + INF("syscall", S_IRUGO, proc_pid_syscall), + #endif + INF("cmdline", S_IRUGO, proc_pid_cmdline), +@@ -2733,10 +2840,10 @@ static const struct pid_entry tgid_base_stuff[] = { + #ifdef CONFIG_SECURITY + DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), + #endif +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + INF("wchan", S_IRUGO, proc_pid_wchan), + #endif +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + ONE("stack", S_IRUGO, proc_pid_stack), + #endif + #ifdef CONFIG_SCHEDSTATS +@@ -2770,6 +2877,9 @@ static const struct pid_entry tgid_base_stuff[] = { + #ifdef CONFIG_HARDWALL + INF("hardwall", S_IRUGO, proc_pid_hardwall), + #endif ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++ INF("ipaddr", S_IRUSR, proc_pid_ipaddr), ++#endif + }; + + static int proc_tgid_base_readdir(struct file * filp, +@@ -2895,7 +3005,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir, + if (!inode) + goto out; + ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP; ++#else + inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; ++#endif + inode->i_op = &proc_tgid_base_inode_operations; + inode->i_fop = &proc_tgid_base_operations; + inode->i_flags|=S_IMMUTABLE; +@@ -2937,7 +3054,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct + if (!task) + goto out; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out_put_task; ++ + result = proc_pid_instantiate(dir, dentry, task, NULL); ++out_put_task: + put_task_struct(task); + out: + return result; +@@ -3002,6 +3123,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) + { + unsigned int nr; + struct task_struct *reaper; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *tmpcred = current_cred(); ++ const struct cred *itercred; ++#endif ++ filldir_t __filldir = filldir; + struct tgid_iter iter; + struct pid_namespace *ns; + +@@ -3025,8 +3151,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) + for (iter = next_tgid(ns, iter); + iter.task; + iter.tgid += 1, iter = next_tgid(ns, iter)) { ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ rcu_read_lock(); ++ itercred = __task_cred(iter.task); ++#endif ++ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task) ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ || (tmpcred->uid && (itercred->uid != tmpcred->uid) ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID) ++#endif ++ ) ++#endif ++ ) ++ __filldir = &gr_fake_filldir; ++ else ++ __filldir = filldir; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ rcu_read_unlock(); ++#endif + filp->f_pos = iter.tgid + TGID_OFFSET; +- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) { ++ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) { + put_task_struct(iter.task); + goto out; + } +@@ -3054,7 +3199,7 @@ static const struct pid_entry tid_base_stuff[] = { + REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), + #endif + REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + INF("syscall", S_IRUGO, proc_pid_syscall), + #endif + INF("cmdline", S_IRUGO, proc_pid_cmdline), +@@ -3078,10 +3223,10 @@ static const struct pid_entry tid_base_stuff[] = { + #ifdef CONFIG_SECURITY + DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), + #endif +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + INF("wchan", S_IRUGO, proc_pid_wchan), + #endif +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + ONE("stack", S_IRUGO, proc_pid_stack), + #endif + #ifdef CONFIG_SCHEDSTATS +diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c +index 82676e3..5f8518a 100644 +--- a/fs/proc/cmdline.c ++++ b/fs/proc/cmdline.c +@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = { + + static int __init proc_cmdline_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops); ++#else + proc_create("cmdline", 0, NULL, &cmdline_proc_fops); ++#endif + return 0; + } + module_init(proc_cmdline_init); +diff --git a/fs/proc/devices.c b/fs/proc/devices.c +index b143471..bb105e5 100644 +--- a/fs/proc/devices.c ++++ b/fs/proc/devices.c +@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = { + + static int __init proc_devices_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations); ++#else + proc_create("devices", 0, NULL, &proc_devinfo_operations); ++#endif + return 0; + } + module_init(proc_devices_init); +diff --git a/fs/proc/inode.c b/fs/proc/inode.c +index 7737c54..7172574 100644 +--- a/fs/proc/inode.c ++++ b/fs/proc/inode.c +@@ -18,12 +18,18 @@ + #include <linux/module.h> + #include <linux/sysctl.h> + #include <linux/slab.h> ++#include <linux/grsecurity.h> + + #include <asm/system.h> + #include <asm/uaccess.h> + + #include "internal.h" + ++#ifdef CONFIG_PROC_SYSCTL ++extern const struct inode_operations proc_sys_inode_operations; ++extern const struct inode_operations proc_sys_dir_operations; ++#endif ++ + static void proc_evict_inode(struct inode *inode) + { + struct proc_dir_entry *de; +@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode) + ns_ops = PROC_I(inode)->ns_ops; + if (ns_ops && ns_ops->put) + ns_ops->put(PROC_I(inode)->ns); ++ ++#ifdef CONFIG_PROC_SYSCTL ++ if (inode->i_op == &proc_sys_inode_operations || ++ inode->i_op == &proc_sys_dir_operations) ++ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev); ++#endif ++ + } + + static struct kmem_cache * proc_inode_cachep; +@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) + if (de->mode) { + inode->i_mode = de->mode; + inode->i_uid = de->uid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + inode->i_gid = de->gid; ++#endif + } + if (de->size) + inode->i_size = de->size; +diff --git a/fs/proc/internal.h b/fs/proc/internal.h +index 7838e5c..ff92cbc 100644 +--- a/fs/proc/internal.h ++++ b/fs/proc/internal.h +@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task); + extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task); ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++extern int proc_pid_ipaddr(struct task_struct *task, char *buffer); ++#endif + extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); + + extern const struct file_operations proc_maps_operations; +diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c +index d245cb2..f4e8498 100644 +--- a/fs/proc/kcore.c ++++ b/fs/proc/kcore.c +@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) + * the addresses in the elf_phdr on our list. + */ + start = kc_offset_to_vaddr(*fpos - elf_buflen); +- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) ++ tsz = PAGE_SIZE - (start & ~PAGE_MASK); ++ if (tsz > buflen) + tsz = buflen; +- ++ + while (buflen) { + struct kcore_list *m; + +@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) + kfree(elf_buf); + } else { + if (kern_addr_valid(start)) { +- unsigned long n; ++ char *elf_buf; ++ mm_segment_t oldfs; + +- n = copy_to_user(buffer, (char *)start, tsz); +- /* +- * We cannot distingush between fault on source +- * and fault on destination. When this happens +- * we clear too and hope it will trigger the +- * EFAULT again. +- */ +- if (n) { +- if (clear_user(buffer + tsz - n, +- n)) ++ elf_buf = kmalloc(tsz, GFP_KERNEL); ++ if (!elf_buf) ++ return -ENOMEM; ++ oldfs = get_fs(); ++ set_fs(KERNEL_DS); ++ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) { ++ set_fs(oldfs); ++ if (copy_to_user(buffer, elf_buf, tsz)) { ++ kfree(elf_buf); + return -EFAULT; ++ } + } ++ set_fs(oldfs); ++ kfree(elf_buf); + } else { + if (clear_user(buffer, tsz)) + return -EFAULT; +@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) + + static int open_kcore(struct inode *inode, struct file *filp) + { ++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM) ++ return -EPERM; ++#endif + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + if (kcore_need_update) +diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c +index 80e4645..53e5fcf 100644 +--- a/fs/proc/meminfo.c ++++ b/fs/proc/meminfo.c +@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) + vmi.used >> 10, + vmi.largest_chunk >> 10 + #ifdef CONFIG_MEMORY_FAILURE +- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) ++ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10) + #endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * +diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c +index b1822dd..df622cb 100644 +--- a/fs/proc/nommu.c ++++ b/fs/proc/nommu.c +@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region) + if (len < 1) + len = 1; + seq_printf(m, "%*c", len, ' '); +- seq_path(m, &file->f_path, ""); ++ seq_path(m, &file->f_path, "\n\"); + } + + seq_putc(m, '\n'); +diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c +index f738024..876984a 100644 +--- a/fs/proc/proc_net.c ++++ b/fs/proc/proc_net.c +@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir) + struct task_struct *task; + struct nsproxy *ns; + struct net *net = NULL; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *cred = current_cred(); ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ if (cred->fsuid) ++ return net; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)) ++ return net; ++#endif + + rcu_read_lock(); + task = pid_task(proc_pid(dir), PIDTYPE_PID); +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c +index a6b6217..1e0579d 100644 +--- a/fs/proc/proc_sysctl.c ++++ b/fs/proc/proc_sysctl.c +@@ -9,11 +9,13 @@ + #include <linux/namei.h> + #include "internal.h" + ++extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op); ++ + static const struct dentry_operations proc_sys_dentry_operations; + static const struct file_operations proc_sys_file_operations; +-static const struct inode_operations proc_sys_inode_operations; ++const struct inode_operations proc_sys_inode_operations; + static const struct file_operations proc_sys_dir_file_operations; +-static const struct inode_operations proc_sys_dir_operations; ++const struct inode_operations proc_sys_dir_operations; + + void proc_sys_poll_notify(struct ctl_table_poll *poll) + { +@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, + + err = NULL; + d_set_d_op(dentry, &proc_sys_dentry_operations); ++ ++ gr_handle_proc_create(dentry, inode); ++ + d_add(dentry, inode); + ++ if (gr_handle_sysctl(p, MAY_EXEC)) ++ err = ERR_PTR(-ENOENT); ++ + out: + sysctl_head_finish(head); + return err; +@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, + if (!table->proc_handler) + goto out; + ++#ifdef CONFIG_GRKERNSEC ++ error = -EPERM; ++ if (write && !capable(CAP_SYS_ADMIN)) ++ goto out; ++#endif ++ + /* careful: calling conventions are nasty here */ + res = count; + error = table->proc_handler(table, write, buf, &res, ppos); +@@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent, + return -ENOMEM; + } else { + d_set_d_op(child, &proc_sys_dentry_operations); ++ ++ gr_handle_proc_create(child, inode); ++ + d_add(child, inode); + } + } else { +@@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table, + if (*pos < file->f_pos) + continue; + ++ if (gr_handle_sysctl(table, 0)) ++ continue; ++ + res = proc_sys_fill_cache(file, dirent, filldir, head, table); + if (res) + return res; +@@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct + if (IS_ERR(head)) + return PTR_ERR(head); + ++ if (table && gr_handle_sysctl(table, MAY_EXEC)) ++ return -ENOENT; ++ + generic_fillattr(inode, stat); + if (table) + stat->mode = (stat->mode & S_IFMT) | table->mode; +@@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = { + .llseek = generic_file_llseek, + }; + +-static const struct inode_operations proc_sys_inode_operations = { ++const struct inode_operations proc_sys_inode_operations = { + .permission = proc_sys_permission, + .setattr = proc_sys_setattr, + .getattr = proc_sys_getattr, + }; + +-static const struct inode_operations proc_sys_dir_operations = { ++const struct inode_operations proc_sys_dir_operations = { + .lookup = proc_sys_lookup, + .permission = proc_sys_permission, + .setattr = proc_sys_setattr, +diff --git a/fs/proc/root.c b/fs/proc/root.c +index 03102d9..4ae347e 100644 +--- a/fs/proc/root.c ++++ b/fs/proc/root.c +@@ -121,7 +121,15 @@ void __init proc_root_init(void) + #ifdef CONFIG_PROC_DEVICETREE + proc_device_tree_init(); + #endif ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); ++#endif ++#else + proc_mkdir("bus", NULL); ++#endif + proc_sys_init(); + } + +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 7dcd2a2..b2f410e 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -11,6 +11,7 @@ + #include <linux/rmap.h> + #include <linux/swap.h> + #include <linux/swapops.h> ++#include <linux/grsecurity.h> + + #include <asm/elf.h> + #include <asm/uaccess.h> +@@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + "VmExe:\t%8lu kB\n" + "VmLib:\t%8lu kB\n" + "VmPTE:\t%8lu kB\n" +- "VmSwap:\t%8lu kB\n", +- hiwater_vm << (PAGE_SHIFT-10), ++ "VmSwap:\t%8lu kB\n" ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ "CsBase:\t%8lx\nCsLim:\t%8lx\n" ++#endif ++ ++ ,hiwater_vm << (PAGE_SHIFT-10), + (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), + mm->locked_vm << (PAGE_SHIFT-10), + mm->pinned_vm << (PAGE_SHIFT-10), +@@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + data << (PAGE_SHIFT-10), + mm->stack_vm << (PAGE_SHIFT-10), text, lib, + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10, +- swap << (PAGE_SHIFT-10)); ++ swap << (PAGE_SHIFT-10) ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ , mm->context.user_cs_base, mm->context.user_cs_limit ++#endif ++ ++ ); + } + + unsigned long task_vsize(struct mm_struct *mm) +@@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file, + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + { + struct mm_struct *mm = vma->vm_mm; +@@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; + } + +- /* We don't show the stack guard page in /proc/maps */ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start; ++ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end; ++#else + start = vma->vm_start; +- if (stack_guard_page_start(vma, start)) +- start += PAGE_SIZE; + end = vma->vm_end; +- if (stack_guard_page_end(vma, end)) +- end -= PAGE_SIZE; ++#endif + + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", + start, +@@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + flags & VM_WRITE ? 'w' : '-', + flags & VM_EXEC ? 'x' : '-', + flags & VM_MAYSHARE ? 's' : 'p', ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(mm) ? 0UL : pgoff, ++#else + pgoff, ++#endif + MAJOR(dev), MINOR(dev), ino, &len); + + /* +@@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + */ + if (file) { + pad_len_spaces(m, len); +- seq_path(m, &file->f_path, "\n"); ++ seq_path(m, &file->f_path, "\n\"); + } else { + const char *name = arch_vma_name(vma); + if (!name) { +@@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + if (vma->vm_start <= mm->brk && + vma->vm_end >= mm->start_brk) { + name = "[heap]"; +- } else if (vma->vm_start <= mm->start_stack && +- vma->vm_end >= mm->start_stack) { ++ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) || ++ (vma->vm_start <= mm->start_stack && ++ vma->vm_end >= mm->start_stack)) { + name = "[stack]"; + } + } else { +@@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v) + struct proc_maps_private *priv = m->private; + struct task_struct *task = priv->task; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("maps"); ++ return 0; ++ } ++#endif ++ + show_map_vma(m, vma); + + if (m->count < m->size) /* vma is copied successfully */ +@@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v) + .private = &mss, + }; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("smaps"); ++ return 0; ++ } ++#endif + memset(&mss, 0, sizeof mss); +- mss.vma = vma; +- /* mmap_sem is held in m_start */ +- if (vma->vm_mm && !is_vm_hugetlb_page(vma)) +- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); +- ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (!PAX_RAND_FLAGS(vma->vm_mm)) { ++#endif ++ mss.vma = vma; ++ /* mmap_sem is held in m_start */ ++ if (vma->vm_mm && !is_vm_hugetlb_page(vma)) ++ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ } ++#endif + show_map_vma(m, vma); + + seq_printf(m, +@@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v) + "KernelPageSize: %8lu kB\n" + "MMUPageSize: %8lu kB\n" + "Locked: %8lu kB\n", ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10, ++#else + (vma->vm_end - vma->vm_start) >> 10, ++#endif + mss.resident >> 10, + (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), + mss.shared_clean >> 10, +@@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v) + int n; + char buffer[50]; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("numa_maps"); ++ return 0; ++ } ++#endif ++ + if (!mm) + return 0; + +@@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v) + mpol_to_str(buffer, sizeof(buffer), pol, 0); + mpol_cond_put(pol); + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer); ++#else + seq_printf(m, "%08lx %s", vma->vm_start, buffer); ++#endif + + if (file) { + seq_printf(m, " file="); +- seq_path(m, &file->f_path, "\n\t= "); ++ seq_path(m, &file->f_path, "\n\t\= "); + } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { + seq_printf(m, " heap"); + } else if (vma->vm_start <= mm->start_stack && +diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c +index 980de54..2a4db5f 100644 +--- a/fs/proc/task_nommu.c ++++ b/fs/proc/task_nommu.c +@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + else + bytes += kobjsize(mm); + +- if (current->fs && current->fs->users > 1) ++ if (current->fs && atomic_read(¤t->fs->users) > 1) + sbytes += kobjsize(current->fs); + else + bytes += kobjsize(current->fs); +@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) + + if (file) { + pad_len_spaces(m, len); +- seq_path(m, &file->f_path, ""); ++ seq_path(m, &file->f_path, "\n\"); + } else if (mm) { + if (vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack) { +diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c +index d67908b..d13f6a6 100644 +--- a/fs/quota/netlink.c ++++ b/fs/quota/netlink.c +@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = { + void quota_send_warning(short type, unsigned int id, dev_t dev, + const char warntype) + { +- static atomic_t seq; ++ static atomic_unchecked_t seq; + struct sk_buff *skb; + void *msg_head; + int ret; +@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev, + "VFS: Not enough memory to send quota warning.\n"); + return; + } +- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), ++ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq), + "a_genl_family, 0, QUOTA_NL_C_WARNING); + if (!msg_head) { + printk(KERN_ERR +diff --git a/fs/readdir.c b/fs/readdir.c +index 356f715..c918d38 100644 +--- a/fs/readdir.c ++++ b/fs/readdir.c +@@ -17,6 +17,7 @@ + #include <linux/security.h> + #include <linux/syscalls.h> + #include <linux/unistd.h> ++#include <linux/namei.h> + + #include <asm/uaccess.h> + +@@ -67,6 +68,7 @@ struct old_linux_dirent { + + struct readdir_callback { + struct old_linux_dirent __user * dirent; ++ struct file * file; + int result; + }; + +@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset + buf->result = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + buf->result++; + dirent = buf->dirent; + if (!access_ok(VERIFY_WRITE, dirent, +@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, + + buf.result = 0; + buf.dirent = dirent; ++ buf.file = file; + + error = vfs_readdir(file, fillonedir, &buf); + if (buf.result) +@@ -142,6 +149,7 @@ struct linux_dirent { + struct getdents_callback { + struct linux_dirent __user * current_dir; + struct linux_dirent __user * previous; ++ struct file * file; + int count; + int error; + }; +@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset, + buf->error = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, + buf.previous = NULL; + buf.count = count; + buf.error = 0; ++ buf.file = file; + + error = vfs_readdir(file, filldir, &buf); + if (error >= 0) +@@ -229,6 +242,7 @@ out: + struct getdents_callback64 { + struct linux_dirent64 __user * current_dir; + struct linux_dirent64 __user * previous; ++ struct file *file; + int count; + int error; + }; +@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset, + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, + + buf.current_dir = dirent; + buf.previous = NULL; ++ buf.file = file; + buf.count = count; + buf.error = 0; + +@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, + error = buf.error; + lastdirent = buf.previous; + if (lastdirent) { +- typeof(lastdirent->d_off) d_off = file->f_pos; ++ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos; + if (__put_user(d_off, &lastdirent->d_off)) + error = -EFAULT; + else +diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c +index 60c0804..d814f98 100644 +--- a/fs/reiserfs/do_balan.c ++++ b/fs/reiserfs/do_balan.c +@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */ + return; + } + +- atomic_inc(&(fs_generation(tb->tb_sb))); ++ atomic_inc_unchecked(&(fs_generation(tb->tb_sb))); + do_balance_starts(tb); + + /* balance leaf returns 0 except if combining L R and S into +diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c +index 7a99811..a7c96c4 100644 +--- a/fs/reiserfs/procfs.c ++++ b/fs/reiserfs/procfs.c +@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb) + "SMALL_TAILS " : "NO_TAILS ", + replay_only(sb) ? "REPLAY_ONLY " : "", + convert_reiserfs(sb) ? "CONV " : "", +- atomic_read(&r->s_generation_counter), ++ atomic_read_unchecked(&r->s_generation_counter), + SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes), + SF(s_do_balance), SF(s_unneeded_left_neighbor), + SF(s_good_search_by_key_reada), SF(s_bmaps), +diff --git a/fs/select.c b/fs/select.c +index d33418f..2a5345e 100644 +--- a/fs/select.c ++++ b/fs/select.c +@@ -20,6 +20,7 @@ + #include <linux/module.h> + #include <linux/slab.h> + #include <linux/poll.h> ++#include <linux/security.h> + #include <linux/personality.h> /* for STICKY_TIMEOUTS */ + #include <linux/file.h> + #include <linux/fdtable.h> +@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, + struct poll_list *walk = head; + unsigned long todo = nfds; + ++ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1); + if (nfds > rlimit(RLIMIT_NOFILE)) + return -EINVAL; + +diff --git a/fs/seq_file.c b/fs/seq_file.c +index dba43c3..4b3f701 100644 +--- a/fs/seq_file.c ++++ b/fs/seq_file.c +@@ -9,6 +9,7 @@ + #include <linux/module.h> + #include <linux/seq_file.h> + #include <linux/slab.h> ++#include <linux/sched.h> + + #include <asm/uaccess.h> + #include <asm/page.h> +@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op) + memset(p, 0, sizeof(*p)); + mutex_init(&p->lock); + p->op = op; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ p->exec_id = current->exec_id; ++#endif + + /* + * Wrappers around seq_open(e.g. swaps_open) need to be +@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v) + int single_open(struct file *file, int (*show)(struct seq_file *, void *), + void *data) + { +- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL); ++ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL); + int res = -ENOMEM; + + if (op) { +diff --git a/fs/splice.c b/fs/splice.c +index fa2defa..8601650 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, + pipe_lock(pipe); + + for (;;) { +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, + do_wakeup = 0; + } + +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + + pipe_unlock(pipe); +@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec, + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); ++ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos); + set_fs(old_fs); + + return res; +@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count, + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- res = vfs_write(file, (const char __user *)buf, count, &pos); ++ res = vfs_write(file, (const char __force_user *)buf, count, &pos); + set_fs(old_fs); + + return res; +@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, + goto err; + + this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); +- vec[i].iov_base = (void __user *) page_address(page); ++ vec[i].iov_base = (void __force_user *) page_address(page); + vec[i].iov_len = this_len; + spd.pages[i] = page; + spd.nr_pages++; +@@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed); + int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) + { + while (!pipe->nrbufs) { +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + return 0; + +- if (!pipe->waiting_writers && sd->num_spliced) ++ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced) + return 0; + + if (sd->flags & SPLICE_F_NONBLOCK) +@@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + * out of the pipe right after the splice_to_pipe(). So set + * PIPE_READERS appropriately. + */ +- pipe->readers = 1; ++ atomic_set(&pipe->readers, 1); + + current->splice_pipe = pipe; + } +@@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) + ret = -ERESTARTSYS; + break; + } +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + break; +- if (!pipe->waiting_writers) { ++ if (!atomic_read(&pipe->waiting_writers)) { + if (flags & SPLICE_F_NONBLOCK) { + ret = -EAGAIN; + break; +@@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) + pipe_lock(pipe); + + while (pipe->nrbufs >= pipe->buffers) { +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + ret = -EPIPE; + break; +@@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) + ret = -ERESTARTSYS; + break; + } +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + + pipe_unlock(pipe); +@@ -1819,14 +1819,14 @@ retry: + pipe_double_lock(ipipe, opipe); + + do { +- if (!opipe->readers) { ++ if (!atomic_read(&opipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; + break; + } + +- if (!ipipe->nrbufs && !ipipe->writers) ++ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers)) + break; + + /* +@@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, + pipe_double_lock(ipipe, opipe); + + do { +- if (!opipe->readers) { ++ if (!atomic_read(&opipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, + * return EAGAIN if we have the potential of some data in the + * future, otherwise just return 0 + */ +- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ++ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK)) + ret = -EAGAIN; + + pipe_unlock(ipipe); +diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c +index a475983..9c6a1f0 100644 +--- a/fs/sysfs/bin.c ++++ b/fs/sysfs/bin.c +@@ -67,6 +67,8 @@ fill_read(struct file *file, char *buffer, loff_t off, size_t count) + } + + static ssize_t ++read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) __size_overflow(3); ++static ssize_t + read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) + { + struct bin_buffer *bb = file->private_data; +diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c +index 7fdf6a7..e6cd8ad 100644 +--- a/fs/sysfs/dir.c ++++ b/fs/sysfs/dir.c +@@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd, + struct sysfs_dirent *sd; + int rc; + ++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT ++ const char *parent_name = parent_sd->s_name; ++ ++ mode = S_IFDIR | S_IRWXU; ++ ++ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) || ++ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) || ++ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) || ++ (!strcmp(parent_name, "system") && !strcmp(name, "cpu"))) ++ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; ++#endif ++ + /* allocate */ + sd = sysfs_new_dirent(name, mode, SYSFS_DIR); + if (!sd) +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c +index 779789a..f58193c 100644 +--- a/fs/sysfs/file.c ++++ b/fs/sysfs/file.c +@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock); + + struct sysfs_open_dirent { + atomic_t refcnt; +- atomic_t event; ++ atomic_unchecked_t event; + wait_queue_head_t poll; + struct list_head buffers; /* goes through sysfs_buffer.list */ + }; +@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer + if (!sysfs_get_active(attr_sd)) + return -ENODEV; + +- buffer->event = atomic_read(&attr_sd->s_attr.open->event); ++ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event); + count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page); + + sysfs_put_active(attr_sd); +@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd, + return -ENOMEM; + + atomic_set(&new_od->refcnt, 0); +- atomic_set(&new_od->event, 1); ++ atomic_set_unchecked(&new_od->event, 1); + init_waitqueue_head(&new_od->poll); + INIT_LIST_HEAD(&new_od->buffers); + goto retry; +@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait) + + sysfs_put_active(attr_sd); + +- if (buffer->event != atomic_read(&od->event)) ++ if (buffer->event != atomic_read_unchecked(&od->event)) + goto trigger; + + return DEFAULT_POLLMASK; +@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd) + + od = sd->s_attr.open; + if (od) { +- atomic_inc(&od->event); ++ atomic_inc_unchecked(&od->event); + wake_up_interruptible(&od->poll); + } + +diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c +index a7ac78f..02158e1 100644 +--- a/fs/sysfs/symlink.c ++++ b/fs/sysfs/symlink.c +@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd) + + static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) + { +- char *page = nd_get_link(nd); ++ const char *page = nd_get_link(nd); + if (!IS_ERR(page)) + free_page((unsigned long)page); + } +diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c +index b09ba2d..1cad1a8 100644 +--- a/fs/ubifs/debug.c ++++ b/fs/ubifs/debug.c +@@ -2817,6 +2817,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count, + * debugfs file. Returns %0 or %1 in case of success and a negative error code + * in case of failure. + */ ++static int interpret_user_input(const char __user *u, size_t count) __size_overflow(2); + static int interpret_user_input(const char __user *u, size_t count) + { + size_t buf_size; +@@ -2835,6 +2836,8 @@ static int interpret_user_input(const char __user *u, size_t count) + } + + static ssize_t dfs_file_write(struct file *file, const char __user *u, ++ size_t count, loff_t *ppos) __size_overflow(3); ++static ssize_t dfs_file_write(struct file *file, const char __user *u, + size_t count, loff_t *ppos) + { + struct ubifs_info *c = file->private_data; +diff --git a/fs/udf/file.c b/fs/udf/file.c +index dca0c38..d567b84 100644 +--- a/fs/udf/file.c ++++ b/fs/udf/file.c +@@ -201,12 +201,10 @@ out: + static int udf_release_file(struct inode *inode, struct file *filp) + { + if (filp->f_mode & FMODE_WRITE) { +- mutex_lock(&inode->i_mutex); + down_write(&UDF_I(inode)->i_data_sem); + udf_discard_prealloc(inode); + udf_truncate_tail_extent(inode); + up_write(&UDF_I(inode)->i_data_sem); +- mutex_unlock(&inode->i_mutex); + } + return 0; + } +diff --git a/fs/udf/misc.c b/fs/udf/misc.c +index c175b4d..8f36a16 100644 +--- a/fs/udf/misc.c ++++ b/fs/udf/misc.c +@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, + + u8 udf_tag_checksum(const struct tag *t) + { +- u8 *data = (u8 *)t; ++ const u8 *data = (const u8 *)t; + u8 checksum = 0; + int i; + for (i = 0; i < sizeof(struct tag); ++i) +diff --git a/fs/utimes.c b/fs/utimes.c +index ba653f3..06ea4b1 100644 +--- a/fs/utimes.c ++++ b/fs/utimes.c +@@ -1,6 +1,7 @@ + #include <linux/compiler.h> + #include <linux/file.h> + #include <linux/fs.h> ++#include <linux/security.h> + #include <linux/linkage.h> + #include <linux/mount.h> + #include <linux/namei.h> +@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times) + goto mnt_drop_write_and_out; + } + } ++ ++ if (!gr_acl_handle_utime(path->dentry, path->mnt)) { ++ error = -EACCES; ++ goto mnt_drop_write_and_out; ++ } ++ + mutex_lock(&inode->i_mutex); + error = notify_change(path->dentry, &newattrs); + mutex_unlock(&inode->i_mutex); +diff --git a/fs/xattr.c b/fs/xattr.c +index 67583de..c5aad14 100644 +--- a/fs/xattr.c ++++ b/fs/xattr.c +@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr); + * Extended attribute SET operations + */ + static long +-setxattr(struct dentry *d, const char __user *name, const void __user *value, ++setxattr(struct path *path, const char __user *name, const void __user *value, + size_t size, int flags) + { + int error; +@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value, + return PTR_ERR(kvalue); + } + +- error = vfs_setxattr(d, kname, kvalue, size, flags); ++ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) { ++ error = -EACCES; ++ goto out; ++ } ++ ++ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags); ++out: + kfree(kvalue); + return error; + } +@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname, + return error; + error = mnt_want_write(path.mnt); + if (!error) { +- error = setxattr(path.dentry, name, value, size, flags); ++ error = setxattr(&path, name, value, size, flags); + mnt_drop_write(path.mnt); + } + path_put(&path); +@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname, + return error; + error = mnt_want_write(path.mnt); + if (!error) { +- error = setxattr(path.dentry, name, value, size, flags); ++ error = setxattr(&path, name, value, size, flags); + mnt_drop_write(path.mnt); + } + path_put(&path); +@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name, + const void __user *,value, size_t, size, int, flags) + { + struct file *f; +- struct dentry *dentry; + int error = -EBADF; + + f = fget(fd); + if (!f) + return error; +- dentry = f->f_path.dentry; +- audit_inode(NULL, dentry); ++ audit_inode(NULL, f->f_path.dentry); + error = mnt_want_write_file(f); + if (!error) { +- error = setxattr(dentry, name, value, size, flags); ++ error = setxattr(&f->f_path, name, value, size, flags); + mnt_drop_write(f->f_path.mnt); + } + fput(f); +diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c +index 8d5a506..7f62712 100644 +--- a/fs/xattr_acl.c ++++ b/fs/xattr_acl.c +@@ -17,8 +17,8 @@ + struct posix_acl * + posix_acl_from_xattr(const void *value, size_t size) + { +- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value; +- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end; ++ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value; ++ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end; + int count; + struct posix_acl *acl; + struct posix_acl_entry *acl_e; +diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c +index d0ab788..827999b 100644 +--- a/fs/xfs/xfs_bmap.c ++++ b/fs/xfs/xfs_bmap.c +@@ -190,7 +190,7 @@ xfs_bmap_validate_ret( + int nmap, + int ret_nmap); + #else +-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) ++#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0) + #endif /* DEBUG */ + + STATIC int +diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c +index 79d05e8..e3e5861 100644 +--- a/fs/xfs/xfs_dir2_sf.c ++++ b/fs/xfs/xfs_dir2_sf.c +@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents( + } + + ino = xfs_dir2_sfe_get_ino(sfp, sfep); +- if (filldir(dirent, (char *)sfep->name, sfep->namelen, ++ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) { ++ char name[sfep->namelen]; ++ memcpy(name, sfep->name, sfep->namelen); ++ if (filldir(dirent, name, sfep->namelen, ++ off & 0x7fffffff, ino, DT_UNKNOWN)) { ++ *offset = off & 0x7fffffff; ++ return 0; ++ } ++ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen, + off & 0x7fffffff, ino, DT_UNKNOWN)) { + *offset = off & 0x7fffffff; + return 0; +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c +index d99a905..9f88202 100644 +--- a/fs/xfs/xfs_ioctl.c ++++ b/fs/xfs/xfs_ioctl.c +@@ -128,7 +128,7 @@ xfs_find_handle( + } + + error = -EFAULT; +- if (copy_to_user(hreq->ohandle, &handle, hsize) || ++ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) || + copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) + goto out_put; + +diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c +index 23ce927..e274cc1 100644 +--- a/fs/xfs/xfs_iops.c ++++ b/fs/xfs/xfs_iops.c +@@ -447,7 +447,7 @@ xfs_vn_put_link( + struct nameidata *nd, + void *p) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + + if (!IS_ERR(s)) + kfree(s); +diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig +new file mode 100644 +index 0000000..4089e05 +--- /dev/null ++++ b/grsecurity/Kconfig +@@ -0,0 +1,1078 @@ ++# ++# grecurity configuration ++# ++ ++menu "Grsecurity" ++ ++config GRKERNSEC ++ bool "Grsecurity" ++ select CRYPTO ++ select CRYPTO_SHA256 ++ help ++ If you say Y here, you will be able to configure many features ++ that will enhance the security of your system. It is highly ++ recommended that you say Y here and read through the help ++ for each option so that you fully understand the features and ++ can evaluate their usefulness for your machine. ++ ++choice ++ prompt "Security Level" ++ depends on GRKERNSEC ++ default GRKERNSEC_CUSTOM ++ ++config GRKERNSEC_LOW ++ bool "Low" ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_CHDIR ++ ++ help ++ If you choose this option, several of the grsecurity options will ++ be enabled that will give you greater protection against a number ++ of attacks, while assuring that none of your software will have any ++ conflicts with the additional security measures. If you run a lot ++ of unusual software, or you are having problems with the higher ++ security levels, you should say Y here. With this option, the ++ following features are enabled: ++ ++ - Linking restrictions ++ - FIFO restrictions ++ - Restricted dmesg ++ - Enforced chdir("/") on chroot ++ - Runtime module disabling ++ ++config GRKERNSEC_MEDIUM ++ bool "Medium" ++ select PAX ++ select PAX_EI_PAX ++ select PAX_PT_PAX_FLAGS ++ select PAX_HAVE_ACL_FLAGS ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_SYSCTL ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_FORKFAIL ++ select GRKERNSEC_TIME ++ select GRKERNSEC_SIGNAL ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_UNIX ++ select GRKERNSEC_CHROOT_MOUNT ++ select GRKERNSEC_CHROOT_PIVOT ++ select GRKERNSEC_CHROOT_DOUBLE ++ select GRKERNSEC_CHROOT_CHDIR ++ select GRKERNSEC_CHROOT_MKNOD ++ select GRKERNSEC_PROC ++ select GRKERNSEC_PROC_USERGROUP ++ select PAX_RANDUSTACK ++ select PAX_ASLR ++ select PAX_RANDMMAP ++ select PAX_REFCOUNT if (X86 || SPARC64) ++ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB)) ++ ++ help ++ If you say Y here, several features in addition to those included ++ in the low additional security level will be enabled. These ++ features provide even more security to your system, though in rare ++ cases they may be incompatible with very old or poorly written ++ software. If you enable this option, make sure that your auth ++ service (identd) is running as gid 1001. With this option, ++ the following features (in addition to those provided in the ++ low additional security level) will be enabled: ++ ++ - Failed fork logging ++ - Time change logging ++ - Signal logging ++ - Deny mounts in chroot ++ - Deny double chrooting ++ - Deny sysctl writes in chroot ++ - Deny mknod in chroot ++ - Deny access to abstract AF_UNIX sockets out of chroot ++ - Deny pivot_root in chroot ++ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port ++ - /proc restrictions with special GID set to 10 (usually wheel) ++ - Address Space Layout Randomization (ASLR) ++ - Prevent exploitation of most refcount overflows ++ - Bounds checking of copying between the kernel and userland ++ ++config GRKERNSEC_HIGH ++ bool "High" ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_FORKFAIL ++ select GRKERNSEC_TIME ++ select GRKERNSEC_SIGNAL ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_SHMAT ++ select GRKERNSEC_CHROOT_UNIX ++ select GRKERNSEC_CHROOT_MOUNT ++ select GRKERNSEC_CHROOT_FCHDIR ++ select GRKERNSEC_CHROOT_PIVOT ++ select GRKERNSEC_CHROOT_DOUBLE ++ select GRKERNSEC_CHROOT_CHDIR ++ select GRKERNSEC_CHROOT_MKNOD ++ select GRKERNSEC_CHROOT_CAPS ++ select GRKERNSEC_CHROOT_SYSCTL ++ select GRKERNSEC_CHROOT_FINDTASK ++ select GRKERNSEC_SYSFS_RESTRICT ++ select GRKERNSEC_PROC ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) ++ select GRKERNSEC_HIDESYM ++ select GRKERNSEC_BRUTE ++ select GRKERNSEC_PROC_USERGROUP ++ select GRKERNSEC_KMEM ++ select GRKERNSEC_RESLOG ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_PROC_ADD ++ select GRKERNSEC_CHROOT_CHMOD ++ select GRKERNSEC_CHROOT_NICE ++ select GRKERNSEC_SETXID ++ select GRKERNSEC_AUDIT_MOUNT ++ select GRKERNSEC_MODHARDEN if (MODULES) ++ select GRKERNSEC_HARDEN_PTRACE ++ select GRKERNSEC_PTRACE_READEXEC ++ select GRKERNSEC_VM86 if (X86_32) ++ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC) ++ select PAX ++ select PAX_RANDUSTACK ++ select PAX_ASLR ++ select PAX_RANDMMAP ++ select PAX_NOEXEC ++ select PAX_MPROTECT ++ select PAX_EI_PAX ++ select PAX_PT_PAX_FLAGS ++ select PAX_HAVE_ACL_FLAGS ++ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN) ++ select PAX_MEMORY_UDEREF if (X86 && !XEN) ++ select PAX_RANDKSTACK if (X86_TSC && X86) ++ select PAX_SEGMEXEC if (X86_32) ++ select PAX_PAGEEXEC ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC) ++ select PAX_EMUTRAMP if (PARISC) ++ select PAX_EMUSIGRT if (PARISC) ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC) ++ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86)) ++ select PAX_REFCOUNT if (X86 || SPARC64) ++ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB)) ++ help ++ If you say Y here, many of the features of grsecurity will be ++ enabled, which will protect you against many kinds of attacks ++ against your system. The heightened security comes at a cost ++ of an increased chance of incompatibilities with rare software ++ on your machine. Since this security level enables PaX, you should ++ view http://pax.grsecurity.net and read about the PaX ++ project. While you are there, download chpax and run it on ++ binaries that cause problems with PaX. Also remember that ++ since the /proc restrictions are enabled, you must run your ++ identd as gid 1001. This security level enables the following ++ features in addition to those listed in the low and medium ++ security levels: ++ ++ - Additional /proc restrictions ++ - Chmod restrictions in chroot ++ - No signals, ptrace, or viewing of processes outside of chroot ++ - Capability restrictions in chroot ++ - Deny fchdir out of chroot ++ - Priority restrictions in chroot ++ - Segmentation-based implementation of PaX ++ - Mprotect restrictions ++ - Removal of addresses from /proc/<pid>/[smaps|maps|stat] ++ - Kernel stack randomization ++ - Mount/unmount/remount logging ++ - Kernel symbol hiding ++ - Hardening of module auto-loading ++ - Ptrace restrictions ++ - Restricted vm86 mode ++ - Restricted sysfs/debugfs ++ - Active kernel exploit response ++ ++config GRKERNSEC_CUSTOM ++ bool "Custom" ++ help ++ If you say Y here, you will be able to configure every grsecurity ++ option, which allows you to enable many more features that aren't ++ covered in the basic security levels. These additional features ++ include TPE, socket restrictions, and the sysctl system for ++ grsecurity. It is advised that you read through the help for ++ each option to determine its usefulness in your situation. ++ ++endchoice ++ ++menu "Memory Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_KMEM ++ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port" ++ select STRICT_DEVMEM if (X86 || ARM || TILE || S390) ++ help ++ If you say Y here, /dev/kmem and /dev/mem won't be allowed to ++ be written to or read from to modify or leak the contents of the running ++ kernel. /dev/port will also not be allowed to be opened. If you have module ++ support disabled, enabling this will close up four ways that are ++ currently used to insert malicious code into the running kernel. ++ Even with all these features enabled, we still highly recommend that ++ you use the RBAC system, as it is still possible for an attacker to ++ modify the running kernel through privileged I/O granted by ioperm/iopl. ++ If you are not using XFree86, you may be able to stop this additional ++ case by enabling the 'Disable privileged I/O' option. Though nothing ++ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem, ++ but only to video memory, which is the only writing we allow in this ++ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will ++ not be allowed to mprotect it with PROT_WRITE later. ++ It is highly recommended that you say Y here if you meet all the ++ conditions above. ++ ++config GRKERNSEC_VM86 ++ bool "Restrict VM86 mode" ++ depends on X86_32 ++ ++ help ++ If you say Y here, only processes with CAP_SYS_RAWIO will be able to ++ make use of a special execution mode on 32bit x86 processors called ++ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain ++ video cards and will still work with this option enabled. The purpose ++ of the option is to prevent exploitation of emulation errors in ++ virtualization of vm86 mode like the one discovered in VMWare in 2009. ++ Nearly all users should be able to enable this option. ++ ++config GRKERNSEC_IO ++ bool "Disable privileged I/O" ++ depends on X86 ++ select RTC_CLASS ++ select RTC_INTF_DEV ++ select RTC_DRV_CMOS ++ ++ help ++ If you say Y here, all ioperm and iopl calls will return an error. ++ Ioperm and iopl can be used to modify the running kernel. ++ Unfortunately, some programs need this access to operate properly, ++ the most notable of which are XFree86 and hwclock. hwclock can be ++ remedied by having RTC support in the kernel, so real-time ++ clock support is enabled if this option is enabled, to ensure ++ that hwclock operates correctly. XFree86 still will not ++ operate correctly with this option enabled, so DO NOT CHOOSE Y ++ IF YOU USE XFree86. If you use XFree86 and you still want to ++ protect your kernel against modification, use the RBAC system. ++ ++config GRKERNSEC_PROC_MEMMAP ++ bool "Harden ASLR against information leaks and entropy reduction" ++ default y if (PAX_NOEXEC || PAX_ASLR) ++ depends on PAX_NOEXEC || PAX_ASLR ++ help ++ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will ++ give no information about the addresses of its mappings if ++ PaX features that rely on random addresses are enabled on the task. ++ In addition to sanitizing this information and disabling other ++ dangerous sources of information, this option causes reads of sensitive ++ /proc/<pid> entries where the file descriptor was opened in a different ++ task than the one performing the read. Such attempts are logged. ++ This option also limits argv/env strings for suid/sgid binaries ++ to 512KB to prevent a complete exhaustion of the stack entropy provided ++ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid ++ binaries to prevent alternative mmap layouts from being abused. ++ ++ If you use PaX it is essential that you say Y here as it closes up ++ several holes that make full ASLR useless locally. ++ ++config GRKERNSEC_BRUTE ++ bool "Deter exploit bruteforcing" ++ help ++ If you say Y here, attempts to bruteforce exploits against forking ++ daemons such as apache or sshd, as well as against suid/sgid binaries ++ will be deterred. When a child of a forking daemon is killed by PaX ++ or crashes due to an illegal instruction or other suspicious signal, ++ the parent process will be delayed 30 seconds upon every subsequent ++ fork until the administrator is able to assess the situation and ++ restart the daemon. ++ In the suid/sgid case, the attempt is logged, the user has all their ++ processes terminated, and they are prevented from executing any further ++ processes for 15 minutes. ++ It is recommended that you also enable signal logging in the auditing ++ section so that logs are generated when a process triggers a suspicious ++ signal. ++ If the sysctl option is enabled, a sysctl option with name ++ "deter_bruteforce" is created. ++ ++ ++config GRKERNSEC_MODHARDEN ++ bool "Harden module auto-loading" ++ depends on MODULES ++ help ++ If you say Y here, module auto-loading in response to use of some ++ feature implemented by an unloaded module will be restricted to ++ root users. Enabling this option helps defend against attacks ++ by unprivileged users who abuse the auto-loading behavior to ++ cause a vulnerable module to load that is then exploited. ++ ++ If this option prevents a legitimate use of auto-loading for a ++ non-root user, the administrator can execute modprobe manually ++ with the exact name of the module mentioned in the alert log. ++ Alternatively, the administrator can add the module to the list ++ of modules loaded at boot by modifying init scripts. ++ ++ Modification of init scripts will most likely be needed on ++ Ubuntu servers with encrypted home directory support enabled, ++ as the first non-root user logging in will cause the ecb(aes), ++ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded. ++ ++config GRKERNSEC_HIDESYM ++ bool "Hide kernel symbols" ++ help ++ If you say Y here, getting information on loaded modules, and ++ displaying all kernel symbols through a syscall will be restricted ++ to users with CAP_SYS_MODULE. For software compatibility reasons, ++ /proc/kallsyms will be restricted to the root user. The RBAC ++ system can hide that entry even from root. ++ ++ This option also prevents leaking of kernel addresses through ++ several /proc entries. ++ ++ Note that this option is only effective provided the following ++ conditions are met: ++ 1) The kernel using grsecurity is not precompiled by some distribution ++ 2) You have also enabled GRKERNSEC_DMESG ++ 3) You are using the RBAC system and hiding other files such as your ++ kernel image and System.map. Alternatively, enabling this option ++ causes the permissions on /boot, /lib/modules, and the kernel ++ source directory to change at compile time to prevent ++ reading by non-root users. ++ If the above conditions are met, this option will aid in providing a ++ useful protection against local kernel exploitation of overflows ++ and arbitrary read/write vulnerabilities. ++ ++config GRKERNSEC_KERN_LOCKOUT ++ bool "Active kernel exploit response" ++ depends on X86 || ARM || PPC || SPARC ++ help ++ If you say Y here, when a PaX alert is triggered due to suspicious ++ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY) ++ or an OOPs occurs due to bad memory accesses, instead of just ++ terminating the offending process (and potentially allowing ++ a subsequent exploit from the same user), we will take one of two ++ actions: ++ If the user was root, we will panic the system ++ If the user was non-root, we will log the attempt, terminate ++ all processes owned by the user, then prevent them from creating ++ any new processes until the system is restarted ++ This deters repeated kernel exploitation/bruteforcing attempts ++ and is useful for later forensics. ++ ++endmenu ++menu "Role Based Access Control Options" ++depends on GRKERNSEC ++ ++config GRKERNSEC_RBAC_DEBUG ++ bool ++ ++config GRKERNSEC_NO_RBAC ++ bool "Disable RBAC system" ++ help ++ If you say Y here, the /dev/grsec device will be removed from the kernel, ++ preventing the RBAC system from being enabled. You should only say Y ++ here if you have no intention of using the RBAC system, so as to prevent ++ an attacker with root access from misusing the RBAC system to hide files ++ and processes when loadable module support and /dev/[k]mem have been ++ locked down. ++ ++config GRKERNSEC_ACL_HIDEKERN ++ bool "Hide kernel processes" ++ help ++ If you say Y here, all kernel threads will be hidden to all ++ processes but those whose subject has the "view hidden processes" ++ flag. ++ ++config GRKERNSEC_ACL_MAXTRIES ++ int "Maximum tries before password lockout" ++ default 3 ++ help ++ This option enforces the maximum number of times a user can attempt ++ to authorize themselves with the grsecurity RBAC system before being ++ denied the ability to attempt authorization again for a specified time. ++ The lower the number, the harder it will be to brute-force a password. ++ ++config GRKERNSEC_ACL_TIMEOUT ++ int "Time to wait after max password tries, in seconds" ++ default 30 ++ help ++ This option specifies the time the user must wait after attempting to ++ authorize to the RBAC system with the maximum number of invalid ++ passwords. The higher the number, the harder it will be to brute-force ++ a password. ++ ++endmenu ++menu "Filesystem Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_PROC ++ bool "Proc restrictions" ++ help ++ If you say Y here, the permissions of the /proc filesystem ++ will be altered to enhance system security and privacy. You MUST ++ choose either a user only restriction or a user and group restriction. ++ Depending upon the option you choose, you can either restrict users to ++ see only the processes they themselves run, or choose a group that can ++ view all processes and files normally restricted to root if you choose ++ the "restrict to user only" option. NOTE: If you're running identd or ++ ntpd as a non-root user, you will have to run it as the group you ++ specify here. ++ ++config GRKERNSEC_PROC_USER ++ bool "Restrict /proc to user only" ++ depends on GRKERNSEC_PROC ++ help ++ If you say Y here, non-root users will only be able to view their own ++ processes, and restricts them from viewing network-related information, ++ and viewing kernel symbol and module information. ++ ++config GRKERNSEC_PROC_USERGROUP ++ bool "Allow special group" ++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER ++ help ++ If you say Y here, you will be able to select a group that will be ++ able to view all processes and network-related information. If you've ++ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still ++ remain hidden. This option is useful if you want to run identd as ++ a non-root user. ++ ++config GRKERNSEC_PROC_GID ++ int "GID for special group" ++ depends on GRKERNSEC_PROC_USERGROUP ++ default 1001 ++ ++config GRKERNSEC_PROC_ADD ++ bool "Additional restrictions" ++ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP ++ help ++ If you say Y here, additional restrictions will be placed on ++ /proc that keep normal users from viewing device information and ++ slabinfo information that could be useful for exploits. ++ ++config GRKERNSEC_LINK ++ bool "Linking restrictions" ++ help ++ If you say Y here, /tmp race exploits will be prevented, since users ++ will no longer be able to follow symlinks owned by other users in ++ world-writable +t directories (e.g. /tmp), unless the owner of the ++ symlink is the owner of the directory. users will also not be ++ able to hardlink to files they do not own. If the sysctl option is ++ enabled, a sysctl option with name "linking_restrictions" is created. ++ ++config GRKERNSEC_FIFO ++ bool "FIFO restrictions" ++ help ++ If you say Y here, users will not be able to write to FIFOs they don't ++ own in world-writable +t directories (e.g. /tmp), unless the owner of ++ the FIFO is the same owner of the directory it's held in. If the sysctl ++ option is enabled, a sysctl option with name "fifo_restrictions" is ++ created. ++ ++config GRKERNSEC_SYSFS_RESTRICT ++ bool "Sysfs/debugfs restriction" ++ depends on SYSFS ++ help ++ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and ++ any filesystem normally mounted under it (e.g. debugfs) will be ++ mostly accessible only by root. These filesystems generally provide access ++ to hardware and debug information that isn't appropriate for unprivileged ++ users of the system. Sysfs and debugfs have also become a large source ++ of new vulnerabilities, ranging from infoleaks to local compromise. ++ There has been very little oversight with an eye toward security involved ++ in adding new exporters of information to these filesystems, so their ++ use is discouraged. ++ For reasons of compatibility, a few directories have been whitelisted ++ for access by non-root users: ++ /sys/fs/selinux ++ /sys/fs/fuse ++ /sys/devices/system/cpu ++ ++config GRKERNSEC_ROFS ++ bool "Runtime read-only mount protection" ++ help ++ If you say Y here, a sysctl option with name "romount_protect" will ++ be created. By setting this option to 1 at runtime, filesystems ++ will be protected in the following ways: ++ * No new writable mounts will be allowed ++ * Existing read-only mounts won't be able to be remounted read/write ++ * Write operations will be denied on all block devices ++ This option acts independently of grsec_lock: once it is set to 1, ++ it cannot be turned off. Therefore, please be mindful of the resulting ++ behavior if this option is enabled in an init script on a read-only ++ filesystem. This feature is mainly intended for secure embedded systems. ++ ++config GRKERNSEC_CHROOT ++ bool "Chroot jail restrictions" ++ help ++ If you say Y here, you will be able to choose several options that will ++ make breaking out of a chrooted jail much more difficult. If you ++ encounter no software incompatibilities with the following options, it ++ is recommended that you enable each one. ++ ++config GRKERNSEC_CHROOT_MOUNT ++ bool "Deny mounts" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ mount or remount filesystems. If the sysctl option is enabled, a ++ sysctl option with name "chroot_deny_mount" is created. ++ ++config GRKERNSEC_CHROOT_DOUBLE ++ bool "Deny double-chroots" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to chroot ++ again outside the chroot. This is a widely used method of breaking ++ out of a chroot jail and should not be allowed. If the sysctl ++ option is enabled, a sysctl option with name ++ "chroot_deny_chroot" is created. ++ ++config GRKERNSEC_CHROOT_PIVOT ++ bool "Deny pivot_root in chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to use ++ a function called pivot_root() that was introduced in Linux 2.3.41. It ++ works similar to chroot in that it changes the root filesystem. This ++ function could be misused in a chrooted process to attempt to break out ++ of the chroot, and therefore should not be allowed. If the sysctl ++ option is enabled, a sysctl option with name "chroot_deny_pivot" is ++ created. ++ ++config GRKERNSEC_CHROOT_CHDIR ++ bool "Enforce chdir("/") on all chroots" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, the current working directory of all newly-chrooted ++ applications will be set to the the root directory of the chroot. ++ The man page on chroot(2) states: ++ Note that this call does not change the current working ++ directory, so that `.' can be outside the tree rooted at ++ `/'. In particular, the super-user can escape from a ++ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'. ++ ++ It is recommended that you say Y here, since it's not known to break ++ any software. If the sysctl option is enabled, a sysctl option with ++ name "chroot_enforce_chdir" is created. ++ ++config GRKERNSEC_CHROOT_CHMOD ++ bool "Deny (f)chmod +s" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to chmod ++ or fchmod files to make them have suid or sgid bits. This protects ++ against another published method of breaking a chroot. If the sysctl ++ option is enabled, a sysctl option with name "chroot_deny_chmod" is ++ created. ++ ++config GRKERNSEC_CHROOT_FCHDIR ++ bool "Deny fchdir out of chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, a well-known method of breaking chroots by fchdir'ing ++ to a file descriptor of the chrooting process that points to a directory ++ outside the filesystem will be stopped. If the sysctl option ++ is enabled, a sysctl option with name "chroot_deny_fchdir" is created. ++ ++config GRKERNSEC_CHROOT_MKNOD ++ bool "Deny mknod" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be allowed to ++ mknod. The problem with using mknod inside a chroot is that it ++ would allow an attacker to create a device entry that is the same ++ as one on the physical root of your system, which could range from ++ anything from the console device to a device for your harddrive (which ++ they could then use to wipe the drive or steal data). It is recommended ++ that you say Y here, unless you run into software incompatibilities. ++ If the sysctl option is enabled, a sysctl option with name ++ "chroot_deny_mknod" is created. ++ ++config GRKERNSEC_CHROOT_SHMAT ++ bool "Deny shmat() out of chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to attach ++ to shared memory segments that were created outside of the chroot jail. ++ It is recommended that you say Y here. If the sysctl option is enabled, ++ a sysctl option with name "chroot_deny_shmat" is created. ++ ++config GRKERNSEC_CHROOT_UNIX ++ bool "Deny access to abstract AF_UNIX sockets out of chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ connect to abstract (meaning not belonging to a filesystem) Unix ++ domain sockets that were bound outside of a chroot. It is recommended ++ that you say Y here. If the sysctl option is enabled, a sysctl option ++ with name "chroot_deny_unix" is created. ++ ++config GRKERNSEC_CHROOT_FINDTASK ++ bool "Protect outside processes" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid, ++ getsid, or view any process outside of the chroot. If the sysctl ++ option is enabled, a sysctl option with name "chroot_findtask" is ++ created. ++ ++config GRKERNSEC_CHROOT_NICE ++ bool "Restrict priority changes" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to raise ++ the priority of processes in the chroot, or alter the priority of ++ processes outside the chroot. This provides more security than simply ++ removing CAP_SYS_NICE from the process' capability set. If the ++ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice" ++ is created. ++ ++config GRKERNSEC_CHROOT_SYSCTL ++ bool "Deny sysctl writes" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, an attacker in a chroot will not be able to ++ write to sysctl entries, either by sysctl(2) or through a /proc ++ interface. It is strongly recommended that you say Y here. If the ++ sysctl option is enabled, a sysctl option with name ++ "chroot_deny_sysctl" is created. ++ ++config GRKERNSEC_CHROOT_CAPS ++ bool "Capability restrictions" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, the capabilities on all processes within a ++ chroot jail will be lowered to stop module insertion, raw i/o, ++ system and net admin tasks, rebooting the system, modifying immutable ++ files, modifying IPC owned by another, and changing the system time. ++ This is left an option because it can break some apps. Disable this ++ if your chrooted apps are having problems performing those kinds of ++ tasks. If the sysctl option is enabled, a sysctl option with ++ name "chroot_caps" is created. ++ ++endmenu ++menu "Kernel Auditing" ++depends on GRKERNSEC ++ ++config GRKERNSEC_AUDIT_GROUP ++ bool "Single group for auditing" ++ help ++ If you say Y here, the exec, chdir, and (un)mount logging features ++ will only operate on a group you specify. This option is recommended ++ if you only want to watch certain users instead of having a large ++ amount of logs from the entire system. If the sysctl option is enabled, ++ a sysctl option with name "audit_group" is created. ++ ++config GRKERNSEC_AUDIT_GID ++ int "GID for auditing" ++ depends on GRKERNSEC_AUDIT_GROUP ++ default 1007 ++ ++config GRKERNSEC_EXECLOG ++ bool "Exec logging" ++ help ++ If you say Y here, all execve() calls will be logged (since the ++ other exec*() calls are frontends to execve(), all execution ++ will be logged). Useful for shell-servers that like to keep track ++ of their users. If the sysctl option is enabled, a sysctl option with ++ name "exec_logging" is created. ++ WARNING: This option when enabled will produce a LOT of logs, especially ++ on an active system. ++ ++config GRKERNSEC_RESLOG ++ bool "Resource logging" ++ help ++ If you say Y here, all attempts to overstep resource limits will ++ be logged with the resource name, the requested size, and the current ++ limit. It is highly recommended that you say Y here. If the sysctl ++ option is enabled, a sysctl option with name "resource_logging" is ++ created. If the RBAC system is enabled, the sysctl value is ignored. ++ ++config GRKERNSEC_CHROOT_EXECLOG ++ bool "Log execs within chroot" ++ help ++ If you say Y here, all executions inside a chroot jail will be logged ++ to syslog. This can cause a large amount of logs if certain ++ applications (eg. djb's daemontools) are installed on the system, and ++ is therefore left as an option. If the sysctl option is enabled, a ++ sysctl option with name "chroot_execlog" is created. ++ ++config GRKERNSEC_AUDIT_PTRACE ++ bool "Ptrace logging" ++ help ++ If you say Y here, all attempts to attach to a process via ptrace ++ will be logged. If the sysctl option is enabled, a sysctl option ++ with name "audit_ptrace" is created. ++ ++config GRKERNSEC_AUDIT_CHDIR ++ bool "Chdir logging" ++ help ++ If you say Y here, all chdir() calls will be logged. If the sysctl ++ option is enabled, a sysctl option with name "audit_chdir" is created. ++ ++config GRKERNSEC_AUDIT_MOUNT ++ bool "(Un)Mount logging" ++ help ++ If you say Y here, all mounts and unmounts will be logged. If the ++ sysctl option is enabled, a sysctl option with name "audit_mount" is ++ created. ++ ++config GRKERNSEC_SIGNAL ++ bool "Signal logging" ++ help ++ If you say Y here, certain important signals will be logged, such as ++ SIGSEGV, which will as a result inform you of when a error in a program ++ occurred, which in some cases could mean a possible exploit attempt. ++ If the sysctl option is enabled, a sysctl option with name ++ "signal_logging" is created. ++ ++config GRKERNSEC_FORKFAIL ++ bool "Fork failure logging" ++ help ++ If you say Y here, all failed fork() attempts will be logged. ++ This could suggest a fork bomb, or someone attempting to overstep ++ their process limit. If the sysctl option is enabled, a sysctl option ++ with name "forkfail_logging" is created. ++ ++config GRKERNSEC_TIME ++ bool "Time change logging" ++ help ++ If you say Y here, any changes of the system clock will be logged. ++ If the sysctl option is enabled, a sysctl option with name ++ "timechange_logging" is created. ++ ++config GRKERNSEC_PROC_IPADDR ++ bool "/proc/<pid>/ipaddr support" ++ help ++ If you say Y here, a new entry will be added to each /proc/<pid> ++ directory that contains the IP address of the person using the task. ++ The IP is carried across local TCP and AF_UNIX stream sockets. ++ This information can be useful for IDS/IPSes to perform remote response ++ to a local attack. The entry is readable by only the owner of the ++ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via ++ the RBAC system), and thus does not create privacy concerns. ++ ++config GRKERNSEC_RWXMAP_LOG ++ bool 'Denied RWX mmap/mprotect logging' ++ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT ++ help ++ If you say Y here, calls to mmap() and mprotect() with explicit ++ usage of PROT_WRITE and PROT_EXEC together will be logged when ++ denied by the PAX_MPROTECT feature. If the sysctl option is ++ enabled, a sysctl option with name "rwxmap_logging" is created. ++ ++config GRKERNSEC_AUDIT_TEXTREL ++ bool 'ELF text relocations logging (READ HELP)' ++ depends on PAX_MPROTECT ++ help ++ If you say Y here, text relocations will be logged with the filename ++ of the offending library or binary. The purpose of the feature is ++ to help Linux distribution developers get rid of libraries and ++ binaries that need text relocations which hinder the future progress ++ of PaX. Only Linux distribution developers should say Y here, and ++ never on a production machine, as this option creates an information ++ leak that could aid an attacker in defeating the randomization of ++ a single memory region. If the sysctl option is enabled, a sysctl ++ option with name "audit_textrel" is created. ++ ++endmenu ++ ++menu "Executable Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_DMESG ++ bool "Dmesg(8) restriction" ++ help ++ If you say Y here, non-root users will not be able to use dmesg(8) ++ to view up to the last 4kb of messages in the kernel's log buffer. ++ The kernel's log buffer often contains kernel addresses and other ++ identifying information useful to an attacker in fingerprinting a ++ system for a targeted exploit. ++ If the sysctl option is enabled, a sysctl option with name "dmesg" is ++ created. ++ ++config GRKERNSEC_HARDEN_PTRACE ++ bool "Deter ptrace-based process snooping" ++ help ++ If you say Y here, TTY sniffers and other malicious monitoring ++ programs implemented through ptrace will be defeated. If you ++ have been using the RBAC system, this option has already been ++ enabled for several years for all users, with the ability to make ++ fine-grained exceptions. ++ ++ This option only affects the ability of non-root users to ptrace ++ processes that are not a descendent of the ptracing process. ++ This means that strace ./binary and gdb ./binary will still work, ++ but attaching to arbitrary processes will not. If the sysctl ++ option is enabled, a sysctl option with name "harden_ptrace" is ++ created. ++ ++config GRKERNSEC_PTRACE_READEXEC ++ bool "Require read access to ptrace sensitive binaries" ++ help ++ If you say Y here, unprivileged users will not be able to ptrace unreadable ++ binaries. This option is useful in environments that ++ remove the read bits (e.g. file mode 4711) from suid binaries to ++ prevent infoleaking of their contents. This option adds ++ consistency to the use of that file mode, as the binary could normally ++ be read out when run without privileges while ptracing. ++ ++ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec" ++ is created. ++ ++config GRKERNSEC_SETXID ++ bool "Enforce consistent multithreaded privileges" ++ help ++ If you say Y here, a change from a root uid to a non-root uid ++ in a multithreaded application will cause the resulting uids, ++ gids, supplementary groups, and capabilities in that thread ++ to be propagated to the other threads of the process. In most ++ cases this is unnecessary, as glibc will emulate this behavior ++ on behalf of the application. Other libcs do not act in the ++ same way, allowing the other threads of the process to continue ++ running with root privileges. If the sysctl option is enabled, ++ a sysctl option with name "consistent_setxid" is created. ++ ++config GRKERNSEC_TPE ++ bool "Trusted Path Execution (TPE)" ++ help ++ If you say Y here, you will be able to choose a gid to add to the ++ supplementary groups of users you want to mark as "untrusted." ++ These users will not be able to execute any files that are not in ++ root-owned directories writable only by root. If the sysctl option ++ is enabled, a sysctl option with name "tpe" is created. ++ ++config GRKERNSEC_TPE_ALL ++ bool "Partially restrict all non-root users" ++ depends on GRKERNSEC_TPE ++ help ++ If you say Y here, all non-root users will be covered under ++ a weaker TPE restriction. This is separate from, and in addition to, ++ the main TPE options that you have selected elsewhere. Thus, if a ++ "trusted" GID is chosen, this restriction applies to even that GID. ++ Under this restriction, all non-root users will only be allowed to ++ execute files in directories they own that are not group or ++ world-writable, or in directories owned by root and writable only by ++ root. If the sysctl option is enabled, a sysctl option with name ++ "tpe_restrict_all" is created. ++ ++config GRKERNSEC_TPE_INVERT ++ bool "Invert GID option" ++ depends on GRKERNSEC_TPE ++ help ++ If you say Y here, the group you specify in the TPE configuration will ++ decide what group TPE restrictions will be *disabled* for. This ++ option is useful if you want TPE restrictions to be applied to most ++ users on the system. If the sysctl option is enabled, a sysctl option ++ with name "tpe_invert" is created. Unlike other sysctl options, this ++ entry will default to on for backward-compatibility. ++ ++config GRKERNSEC_TPE_GID ++ int "GID for untrusted users" ++ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT ++ default 1005 ++ help ++ Setting this GID determines what group TPE restrictions will be ++ *enabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. ++ ++config GRKERNSEC_TPE_GID ++ int "GID for trusted users" ++ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT ++ default 1005 ++ help ++ Setting this GID determines what group TPE restrictions will be ++ *disabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. ++ ++endmenu ++menu "Network Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_RANDNET ++ bool "Larger entropy pools" ++ help ++ If you say Y here, the entropy pools used for many features of Linux ++ and grsecurity will be doubled in size. Since several grsecurity ++ features use additional randomness, it is recommended that you say Y ++ here. Saying Y here has a similar effect as modifying ++ /proc/sys/kernel/random/poolsize. ++ ++config GRKERNSEC_BLACKHOLE ++ bool "TCP/UDP blackhole and LAST_ACK DoS prevention" ++ depends on NET ++ help ++ If you say Y here, neither TCP resets nor ICMP ++ destination-unreachable packets will be sent in response to packets ++ sent to ports for which no associated listening process exists. ++ This feature supports both IPV4 and IPV6 and exempts the ++ loopback interface from blackholing. Enabling this feature ++ makes a host more resilient to DoS attacks and reduces network ++ visibility against scanners. ++ ++ The blackhole feature as-implemented is equivalent to the FreeBSD ++ blackhole feature, as it prevents RST responses to all packets, not ++ just SYNs. Under most application behavior this causes no ++ problems, but applications (like haproxy) may not close certain ++ connections in a way that cleanly terminates them on the remote ++ end, leaving the remote host in LAST_ACK state. Because of this ++ side-effect and to prevent intentional LAST_ACK DoSes, this ++ feature also adds automatic mitigation against such attacks. ++ The mitigation drastically reduces the amount of time a socket ++ can spend in LAST_ACK state. If you're using haproxy and not ++ all servers it connects to have this option enabled, consider ++ disabling this feature on the haproxy host. ++ ++ If the sysctl option is enabled, two sysctl options with names ++ "ip_blackhole" and "lastack_retries" will be created. ++ While "ip_blackhole" takes the standard zero/non-zero on/off ++ toggle, "lastack_retries" uses the same kinds of values as ++ "tcp_retries1" and "tcp_retries2". The default value of 4 ++ prevents a socket from lasting more than 45 seconds in LAST_ACK ++ state. ++ ++config GRKERNSEC_SOCKET ++ bool "Socket restrictions" ++ depends on NET ++ help ++ If you say Y here, you will be able to choose from several options. ++ If you assign a GID on your system and add it to the supplementary ++ groups of users you want to restrict socket access to, this patch ++ will perform up to three things, based on the option(s) you choose. ++ ++config GRKERNSEC_SOCKET_ALL ++ bool "Deny any sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to connect to other hosts from your machine or run server ++ applications from your machine. If the sysctl option is enabled, a ++ sysctl option with name "socket_all" is created. ++ ++config GRKERNSEC_SOCKET_ALL_GID ++ int "GID to deny all sockets for" ++ depends on GRKERNSEC_SOCKET_ALL ++ default 1004 ++ help ++ Here you can choose the GID to disable socket access for. Remember to ++ add the users you want socket access disabled for to the GID ++ specified here. If the sysctl option is enabled, a sysctl option ++ with name "socket_all_gid" is created. ++ ++config GRKERNSEC_SOCKET_CLIENT ++ bool "Deny client sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to connect to other hosts from your machine, but will be ++ able to run servers. If this option is enabled, all users in the group ++ you specify will have to use passive mode when initiating ftp transfers ++ from the shell on your machine. If the sysctl option is enabled, a ++ sysctl option with name "socket_client" is created. ++ ++config GRKERNSEC_SOCKET_CLIENT_GID ++ int "GID to deny client sockets for" ++ depends on GRKERNSEC_SOCKET_CLIENT ++ default 1003 ++ help ++ Here you can choose the GID to disable client socket access for. ++ Remember to add the users you want client socket access disabled for to ++ the GID specified here. If the sysctl option is enabled, a sysctl ++ option with name "socket_client_gid" is created. ++ ++config GRKERNSEC_SOCKET_SERVER ++ bool "Deny server sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to run server applications from your machine. If the sysctl ++ option is enabled, a sysctl option with name "socket_server" is created. ++ ++config GRKERNSEC_SOCKET_SERVER_GID ++ int "GID to deny server sockets for" ++ depends on GRKERNSEC_SOCKET_SERVER ++ default 1002 ++ help ++ Here you can choose the GID to disable server socket access for. ++ Remember to add the users you want server socket access disabled for to ++ the GID specified here. If the sysctl option is enabled, a sysctl ++ option with name "socket_server_gid" is created. ++ ++endmenu ++menu "Sysctl support" ++depends on GRKERNSEC && SYSCTL ++ ++config GRKERNSEC_SYSCTL ++ bool "Sysctl support" ++ help ++ If you say Y here, you will be able to change the options that ++ grsecurity runs with at bootup, without having to recompile your ++ kernel. You can echo values to files in /proc/sys/kernel/grsecurity ++ to enable (1) or disable (0) various features. All the sysctl entries ++ are mutable until the "grsec_lock" entry is set to a non-zero value. ++ All features enabled in the kernel configuration are disabled at boot ++ if you do not say Y to the "Turn on features by default" option. ++ All options should be set at startup, and the grsec_lock entry should ++ be set to a non-zero value after all the options are set. ++ *THIS IS EXTREMELY IMPORTANT* ++ ++config GRKERNSEC_SYSCTL_DISTRO ++ bool "Extra sysctl support for distro makers (READ HELP)" ++ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO ++ help ++ If you say Y here, additional sysctl options will be created ++ for features that affect processes running as root. Therefore, ++ it is critical when using this option that the grsec_lock entry be ++ enabled after boot. Only distros with prebuilt kernel packages ++ with this option enabled that can ensure grsec_lock is enabled ++ after boot should use this option. ++ *Failure to set grsec_lock after boot makes all grsec features ++ this option covers useless* ++ ++ Currently this option creates the following sysctl entries: ++ "Disable Privileged I/O": "disable_priv_io" ++ ++config GRKERNSEC_SYSCTL_ON ++ bool "Turn on features by default" ++ depends on GRKERNSEC_SYSCTL ++ help ++ If you say Y here, instead of having all features enabled in the ++ kernel configuration disabled at boot time, the features will be ++ enabled at boot time. It is recommended you say Y here unless ++ there is some reason you would want all sysctl-tunable features to ++ be disabled by default. As mentioned elsewhere, it is important ++ to enable the grsec_lock entry once you have finished modifying ++ the sysctl entries. ++ ++endmenu ++menu "Logging Options" ++depends on GRKERNSEC ++ ++config GRKERNSEC_FLOODTIME ++ int "Seconds in between log messages (minimum)" ++ default 10 ++ help ++ This option allows you to enforce the number of seconds between ++ grsecurity log messages. The default should be suitable for most ++ people, however, if you choose to change it, choose a value small enough ++ to allow informative logs to be produced, but large enough to ++ prevent flooding. ++ ++config GRKERNSEC_FLOODBURST ++ int "Number of messages in a burst (maximum)" ++ default 6 ++ help ++ This option allows you to choose the maximum number of messages allowed ++ within the flood time interval you chose in a separate option. The ++ default should be suitable for most people, however if you find that ++ many of your logs are being interpreted as flooding, you may want to ++ raise this value. ++ ++endmenu ++ ++endmenu +diff --git a/grsecurity/Makefile b/grsecurity/Makefile +new file mode 100644 +index 0000000..1b9afa9 +--- /dev/null ++++ b/grsecurity/Makefile +@@ -0,0 +1,38 @@ ++# grsecurity's ACL system was originally written in 2001 by Michael Dalton ++# during 2001-2009 it has been completely redesigned by Brad Spengler ++# into an RBAC system ++# ++# All code in this directory and various hooks inserted throughout the kernel ++# are copyright Brad Spengler - Open Source Security, Inc., and released ++# under the GPL v2 or higher ++ ++KBUILD_CFLAGS += -Werror ++ ++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \ ++ grsec_mount.o grsec_sig.o grsec_sysctl.o \ ++ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o ++ ++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \ ++ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \ ++ gracl_learn.o grsec_log.o ++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o ++ ++ifdef CONFIG_NET ++obj-y += grsec_sock.o ++obj-$(CONFIG_GRKERNSEC) += gracl_ip.o ++endif ++ ++ifndef CONFIG_GRKERNSEC ++obj-y += grsec_disabled.o ++endif ++ ++ifdef CONFIG_GRKERNSEC_HIDESYM ++extra-y := grsec_hidesym.o ++$(obj)/grsec_hidesym.o: ++ @-chmod -f 500 /boot ++ @-chmod -f 500 /lib/modules ++ @-chmod -f 500 /lib64/modules ++ @-chmod -f 500 /lib32/modules ++ @-chmod -f 700 . ++ @echo ' grsec: protected kernel image paths' ++endif +diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c +new file mode 100644 +index 0000000..2d9c682 +--- /dev/null ++++ b/grsecurity/gracl.c +@@ -0,0 +1,4172 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/namei.h> ++#include <linux/mount.h> ++#include <linux/tty.h> ++#include <linux/proc_fs.h> ++#include <linux/lglock.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/types.h> ++#include <linux/sysctl.h> ++#include <linux/netdevice.h> ++#include <linux/ptrace.h> ++#include <linux/gracl.h> ++#include <linux/gralloc.h> ++#include <linux/security.h> ++#include <linux/grinternal.h> ++#include <linux/pid_namespace.h> ++#include <linux/fdtable.h> ++#include <linux/percpu.h> ++ ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <asm/mman.h> ++ ++static struct acl_role_db acl_role_set; ++static struct name_db name_set; ++static struct inodev_db inodev_set; ++ ++/* for keeping track of userspace pointers used for subjects, so we ++ can share references in the kernel as well ++*/ ++ ++static struct path real_root; ++ ++static struct acl_subj_map_db subj_map_set; ++ ++static struct acl_role_label *default_role; ++ ++static struct acl_role_label *role_list; ++ ++static u16 acl_sp_role_value; ++ ++extern char *gr_shared_page[4]; ++static DEFINE_MUTEX(gr_dev_mutex); ++DEFINE_RWLOCK(gr_inode_lock); ++ ++struct gr_arg *gr_usermode; ++ ++static unsigned int gr_status __read_only = GR_STATUS_INIT; ++ ++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum); ++extern void gr_clear_learn_entries(void); ++ ++#ifdef CONFIG_GRKERNSEC_RESLOG ++extern void gr_log_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt); ++#endif ++ ++unsigned char *gr_system_salt; ++unsigned char *gr_system_sum; ++ ++static struct sprole_pw **acl_special_roles = NULL; ++static __u16 num_sprole_pws = 0; ++ ++static struct acl_role_label *kernel_role = NULL; ++ ++static unsigned int gr_auth_attempts = 0; ++static unsigned long gr_auth_expires = 0UL; ++ ++#ifdef CONFIG_NET ++extern struct vfsmount *sock_mnt; ++#endif ++ ++extern struct vfsmount *pipe_mnt; ++extern struct vfsmount *shm_mnt; ++#ifdef CONFIG_HUGETLBFS ++extern struct vfsmount *hugetlbfs_vfsmount; ++#endif ++ ++static struct acl_object_label *fakefs_obj_rw; ++static struct acl_object_label *fakefs_obj_rwx; ++ ++extern int gr_init_uidset(void); ++extern void gr_free_uidset(void); ++extern void gr_remove_uid(uid_t uid); ++extern int gr_find_uid(uid_t uid); ++ ++DECLARE_BRLOCK(vfsmount_lock); ++ ++__inline__ int ++gr_acl_is_enabled(void) ++{ ++ return (gr_status & GR_READY); ++} ++ ++#ifdef CONFIG_BTRFS_FS ++extern dev_t get_btrfs_dev_from_inode(struct inode *inode); ++extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); ++#endif ++ ++static inline dev_t __get_dev(const struct dentry *dentry) ++{ ++#ifdef CONFIG_BTRFS_FS ++ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr) ++ return get_btrfs_dev_from_inode(dentry->d_inode); ++ else ++#endif ++ return dentry->d_inode->i_sb->s_dev; ++} ++ ++dev_t gr_get_dev_from_dentry(struct dentry *dentry) ++{ ++ return __get_dev(dentry); ++} ++ ++static char gr_task_roletype_to_char(struct task_struct *task) ++{ ++ switch (task->role->roletype & ++ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP | ++ GR_ROLE_SPECIAL)) { ++ case GR_ROLE_DEFAULT: ++ return 'D'; ++ case GR_ROLE_USER: ++ return 'U'; ++ case GR_ROLE_GROUP: ++ return 'G'; ++ case GR_ROLE_SPECIAL: ++ return 'S'; ++ } ++ ++ return 'X'; ++} ++ ++char gr_roletype_to_char(void) ++{ ++ return gr_task_roletype_to_char(current); ++} ++ ++__inline__ int ++gr_acl_tpe_check(void) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ if (current->role->roletype & GR_ROLE_TPE) ++ return 1; ++ else ++ return 0; ++} ++ ++int ++gr_handle_rawio(const struct inode *inode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (inode && S_ISBLK(inode->i_mode) && ++ grsec_enable_chroot_caps && proc_is_chrooted(current) && ++ !capable(CAP_SYS_RAWIO)) ++ return 1; ++#endif ++ return 0; ++} ++ ++static int ++gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb) ++{ ++ if (likely(lena != lenb)) ++ return 0; ++ ++ return !memcmp(a, b, lena); ++} ++ ++static int prepend(char **buffer, int *buflen, const char *str, int namelen) ++{ ++ *buflen -= namelen; ++ if (*buflen < 0) ++ return -ENAMETOOLONG; ++ *buffer -= namelen; ++ memcpy(*buffer, str, namelen); ++ return 0; ++} ++ ++static int prepend_name(char **buffer, int *buflen, struct qstr *name) ++{ ++ return prepend(buffer, buflen, name->name, name->len); ++} ++ ++static int prepend_path(const struct path *path, struct path *root, ++ char **buffer, int *buflen) ++{ ++ struct dentry *dentry = path->dentry; ++ struct vfsmount *vfsmnt = path->mnt; ++ bool slash = false; ++ int error = 0; ++ ++ while (dentry != root->dentry || vfsmnt != root->mnt) { ++ struct dentry * parent; ++ ++ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { ++ /* Global root? */ ++ if (vfsmnt->mnt_parent == vfsmnt) { ++ goto out; ++ } ++ dentry = vfsmnt->mnt_mountpoint; ++ vfsmnt = vfsmnt->mnt_parent; ++ continue; ++ } ++ parent = dentry->d_parent; ++ prefetch(parent); ++ spin_lock(&dentry->d_lock); ++ error = prepend_name(buffer, buflen, &dentry->d_name); ++ spin_unlock(&dentry->d_lock); ++ if (!error) ++ error = prepend(buffer, buflen, "/", 1); ++ if (error) ++ break; ++ ++ slash = true; ++ dentry = parent; ++ } ++ ++out: ++ if (!error && !slash) ++ error = prepend(buffer, buflen, "/", 1); ++ ++ return error; ++} ++ ++/* this must be called with vfsmount_lock and rename_lock held */ ++ ++static char *__our_d_path(const struct path *path, struct path *root, ++ char *buf, int buflen) ++{ ++ char *res = buf + buflen; ++ int error; ++ ++ prepend(&res, &buflen, "\0", 1); ++ error = prepend_path(path, root, &res, &buflen); ++ if (error) ++ return ERR_PTR(error); ++ ++ return res; ++} ++ ++static char * ++gen_full_path(struct path *path, struct path *root, char *buf, int buflen) ++{ ++ char *retval; ++ ++ retval = __our_d_path(path, root, buf, buflen); ++ if (unlikely(IS_ERR(retval))) ++ retval = strcpy(buf, "<path too long>"); ++ else if (unlikely(retval[1] == '/' && retval[2] == '\0')) ++ retval[1] = '\0'; ++ ++ return retval; ++} ++ ++static char * ++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, ++ char *buf, int buflen) ++{ ++ struct path path; ++ char *res; ++ ++ path.dentry = (struct dentry *)dentry; ++ path.mnt = (struct vfsmount *)vfsmnt; ++ ++ /* we can use real_root.dentry, real_root.mnt, because this is only called ++ by the RBAC system */ ++ res = gen_full_path(&path, &real_root, buf, buflen); ++ ++ return res; ++} ++ ++static char * ++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, ++ char *buf, int buflen) ++{ ++ char *res; ++ struct path path; ++ struct path root; ++ struct task_struct *reaper = &init_task; ++ ++ path.dentry = (struct dentry *)dentry; ++ path.mnt = (struct vfsmount *)vfsmnt; ++ ++ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */ ++ get_fs_root(reaper->fs, &root); ++ ++ write_seqlock(&rename_lock); ++ br_read_lock(vfsmount_lock); ++ res = gen_full_path(&path, &root, buf, buflen); ++ br_read_unlock(vfsmount_lock); ++ write_sequnlock(&rename_lock); ++ ++ path_put(&root); ++ return res; ++} ++ ++static char * ++gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ char *ret; ++ write_seqlock(&rename_lock); ++ br_read_lock(vfsmount_lock); ++ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), ++ PAGE_SIZE); ++ br_read_unlock(vfsmount_lock); ++ write_sequnlock(&rename_lock); ++ return ret; ++} ++ ++static char * ++gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ char *ret; ++ char *buf; ++ int buflen; ++ ++ write_seqlock(&rename_lock); ++ br_read_lock(vfsmount_lock); ++ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id()); ++ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6); ++ buflen = (int)(ret - buf); ++ if (buflen >= 5) ++ prepend(&ret, &buflen, "/proc", 5); ++ else ++ ret = strcpy(buf, "<path too long>"); ++ br_read_unlock(vfsmount_lock); ++ write_sequnlock(&rename_lock); ++ return ret; ++} ++ ++char * ++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++__inline__ __u32 ++to_gr_audit(const __u32 reqmode) ++{ ++ /* masks off auditable permission flags, then shifts them to create ++ auditing flags, and adds the special case of append auditing if ++ we're requesting write */ ++ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0)); ++} ++ ++struct acl_subject_label * ++lookup_subject_map(const struct acl_subject_label *userp) ++{ ++ unsigned int index = shash(userp, subj_map_set.s_size); ++ struct subject_map *match; ++ ++ match = subj_map_set.s_hash[index]; ++ ++ while (match && match->user != userp) ++ match = match->next; ++ ++ if (match != NULL) ++ return match->kernel; ++ else ++ return NULL; ++} ++ ++static void ++insert_subj_map_entry(struct subject_map *subjmap) ++{ ++ unsigned int index = shash(subjmap->user, subj_map_set.s_size); ++ struct subject_map **curr; ++ ++ subjmap->prev = NULL; ++ ++ curr = &subj_map_set.s_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = subjmap; ++ ++ subjmap->next = *curr; ++ *curr = subjmap; ++ ++ return; ++} ++ ++static struct acl_role_label * ++lookup_acl_role_label(const struct task_struct *task, const uid_t uid, ++ const gid_t gid) ++{ ++ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size); ++ struct acl_role_label *match; ++ struct role_allowed_ip *ipp; ++ unsigned int x; ++ u32 curr_ip = task->signal->curr_ip; ++ ++ task->signal->saved_ip = curr_ip; ++ ++ match = acl_role_set.r_hash[index]; ++ ++ while (match) { ++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) { ++ for (x = 0; x < match->domain_child_num; x++) { ++ if (match->domain_children[x] == uid) ++ goto found; ++ } ++ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER) ++ break; ++ match = match->next; ++ } ++found: ++ if (match == NULL) { ++ try_group: ++ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size); ++ match = acl_role_set.r_hash[index]; ++ ++ while (match) { ++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) { ++ for (x = 0; x < match->domain_child_num; x++) { ++ if (match->domain_children[x] == gid) ++ goto found2; ++ } ++ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP) ++ break; ++ match = match->next; ++ } ++found2: ++ if (match == NULL) ++ match = default_role; ++ if (match->allowed_ips == NULL) ++ return match; ++ else { ++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { ++ if (likely ++ ((ntohl(curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask))) ++ return match; ++ } ++ match = default_role; ++ } ++ } else if (match->allowed_ips == NULL) { ++ return match; ++ } else { ++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { ++ if (likely ++ ((ntohl(curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask))) ++ return match; ++ } ++ goto try_group; ++ } ++ ++ return match; ++} ++ ++struct acl_subject_label * ++lookup_acl_subj_label(const ino_t ino, const dev_t dev, ++ const struct acl_role_label *role) ++{ ++ unsigned int index = fhash(ino, dev, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++struct acl_subject_label * ++lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, ++ const struct acl_role_label *role) ++{ ++ unsigned int index = fhash(ino, dev, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ !(match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && (match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct acl_object_label * ++lookup_acl_obj_label(const ino_t ino, const dev_t dev, ++ const struct acl_subject_label *subj) ++{ ++ unsigned int index = fhash(ino, dev, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct acl_object_label * ++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev, ++ const struct acl_subject_label *subj) ++{ ++ unsigned int index = fhash(ino, dev, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ !(match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && (match->mode & GR_DELETED)) ++ return match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct name_entry * ++lookup_name_entry(const char *name) ++{ ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % name_set.n_size; ++ struct name_entry *match; ++ ++ match = name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len))) ++ match = match->next; ++ ++ return match; ++} ++ ++static struct name_entry * ++lookup_name_entry_create(const char *name) ++{ ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % name_set.n_size; ++ struct name_entry *match; ++ ++ match = name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || ++ !match->deleted)) ++ match = match->next; ++ ++ if (match && match->deleted) ++ return match; ++ ++ match = name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || ++ match->deleted)) ++ match = match->next; ++ ++ if (match && !match->deleted) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct inodev_entry * ++lookup_inodev_entry(const ino_t ino, const dev_t dev) ++{ ++ unsigned int index = fhash(ino, dev, inodev_set.i_size); ++ struct inodev_entry *match; ++ ++ match = inodev_set.i_hash[index]; ++ ++ while (match && (match->nentry->inode != ino || match->nentry->device != dev)) ++ match = match->next; ++ ++ return match; ++} ++ ++static void ++insert_inodev_entry(struct inodev_entry *entry) ++{ ++ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device, ++ inodev_set.i_size); ++ struct inodev_entry **curr; ++ ++ entry->prev = NULL; ++ ++ curr = &inodev_set.i_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = entry; ++ ++ entry->next = *curr; ++ *curr = entry; ++ ++ return; ++} ++ ++static void ++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid) ++{ ++ unsigned int index = ++ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size); ++ struct acl_role_label **curr; ++ struct acl_role_label *tmp, *tmp2; ++ ++ curr = &acl_role_set.r_hash[index]; ++ ++ /* simple case, slot is empty, just set it to our role */ ++ if (*curr == NULL) { ++ *curr = role; ++ } else { ++ /* example: ++ 1 -> 2 -> 3 (adding 2 -> 3 to here) ++ 2 -> 3 ++ */ ++ /* first check to see if we can already be reached via this slot */ ++ tmp = *curr; ++ while (tmp && tmp != role) ++ tmp = tmp->next; ++ if (tmp == role) { ++ /* we don't need to add ourselves to this slot's chain */ ++ return; ++ } ++ /* we need to add ourselves to this chain, two cases */ ++ if (role->next == NULL) { ++ /* simple case, append the current chain to our role */ ++ role->next = *curr; ++ *curr = role; ++ } else { ++ /* 1 -> 2 -> 3 -> 4 ++ 2 -> 3 -> 4 ++ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here) ++ */ ++ /* trickier case: walk our role's chain until we find ++ the role for the start of the current slot's chain */ ++ tmp = role; ++ tmp2 = *curr; ++ while (tmp->next && tmp->next != tmp2) ++ tmp = tmp->next; ++ if (tmp->next == tmp2) { ++ /* from example above, we found 3, so just ++ replace this slot's chain with ours */ ++ *curr = role; ++ } else { ++ /* we didn't find a subset of our role's chain ++ in the current slot's chain, so append their ++ chain to ours, and set us as the first role in ++ the slot's chain ++ ++ we could fold this case with the case above, ++ but making it explicit for clarity ++ */ ++ tmp->next = tmp2; ++ *curr = role; ++ } ++ } ++ } ++ ++ return; ++} ++ ++static void ++insert_acl_role_label(struct acl_role_label *role) ++{ ++ int i; ++ ++ if (role_list == NULL) { ++ role_list = role; ++ role->prev = NULL; ++ } else { ++ role->prev = role_list; ++ role_list = role; ++ } ++ ++ /* used for hash chains */ ++ role->next = NULL; ++ ++ if (role->roletype & GR_ROLE_DOMAIN) { ++ for (i = 0; i < role->domain_child_num; i++) ++ __insert_acl_role_label(role, role->domain_children[i]); ++ } else ++ __insert_acl_role_label(role, role->uidgid); ++} ++ ++static int ++insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted) ++{ ++ struct name_entry **curr, *nentry; ++ struct inodev_entry *ientry; ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % name_set.n_size; ++ ++ curr = &name_set.n_hash[index]; ++ ++ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len))) ++ curr = &((*curr)->next); ++ ++ if (*curr != NULL) ++ return 1; ++ ++ nentry = acl_alloc(sizeof (struct name_entry)); ++ if (nentry == NULL) ++ return 0; ++ ientry = acl_alloc(sizeof (struct inodev_entry)); ++ if (ientry == NULL) ++ return 0; ++ ientry->nentry = nentry; ++ ++ nentry->key = key; ++ nentry->name = name; ++ nentry->inode = inode; ++ nentry->device = device; ++ nentry->len = len; ++ nentry->deleted = deleted; ++ ++ nentry->prev = NULL; ++ curr = &name_set.n_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = nentry; ++ nentry->next = *curr; ++ *curr = nentry; ++ ++ /* insert us into the table searchable by inode/dev */ ++ insert_inodev_entry(ientry); ++ ++ return 1; ++} ++ ++static void ++insert_acl_obj_label(struct acl_object_label *obj, ++ struct acl_subject_label *subj) ++{ ++ unsigned int index = ++ fhash(obj->inode, obj->device, subj->obj_hash_size); ++ struct acl_object_label **curr; ++ ++ ++ obj->prev = NULL; ++ ++ curr = &subj->obj_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = obj; ++ ++ obj->next = *curr; ++ *curr = obj; ++ ++ return; ++} ++ ++static void ++insert_acl_subj_label(struct acl_subject_label *obj, ++ struct acl_role_label *role) ++{ ++ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size); ++ struct acl_subject_label **curr; ++ ++ obj->prev = NULL; ++ ++ curr = &role->subj_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = obj; ++ ++ obj->next = *curr; ++ *curr = obj; ++ ++ return; ++} ++ ++/* allocating chained hash tables, so optimal size is where lambda ~ 1 */ ++ ++static void * ++create_table(__u32 * len, int elementsize) ++{ ++ unsigned int table_sizes[] = { ++ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, ++ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143, ++ 4194301, 8388593, 16777213, 33554393, 67108859 ++ }; ++ void *newtable = NULL; ++ unsigned int pwr = 0; ++ ++ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) && ++ table_sizes[pwr] <= *len) ++ pwr++; ++ ++ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize)) ++ return newtable; ++ ++ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE) ++ newtable = ++ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL); ++ else ++ newtable = vmalloc(table_sizes[pwr] * elementsize); ++ ++ *len = table_sizes[pwr]; ++ ++ return newtable; ++} ++ ++static int ++init_variables(const struct gr_arg *arg) ++{ ++ struct task_struct *reaper = &init_task; ++ unsigned int stacksize; ++ ++ subj_map_set.s_size = arg->role_db.num_subjects; ++ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children; ++ name_set.n_size = arg->role_db.num_objects; ++ inodev_set.i_size = arg->role_db.num_objects; ++ ++ if (!subj_map_set.s_size || !acl_role_set.r_size || ++ !name_set.n_size || !inodev_set.i_size) ++ return 1; ++ ++ if (!gr_init_uidset()) ++ return 1; ++ ++ /* set up the stack that holds allocation info */ ++ ++ stacksize = arg->role_db.num_pointers + 5; ++ ++ if (!acl_alloc_stack_init(stacksize)) ++ return 1; ++ ++ /* grab reference for the real root dentry and vfsmount */ ++ get_fs_root(reaper->fs, &real_root); ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino); ++#endif ++ ++ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label)); ++ if (fakefs_obj_rw == NULL) ++ return 1; ++ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE; ++ ++ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label)); ++ if (fakefs_obj_rwx == NULL) ++ return 1; ++ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC; ++ ++ subj_map_set.s_hash = ++ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *)); ++ acl_role_set.r_hash = ++ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *)); ++ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *)); ++ inodev_set.i_hash = ++ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *)); ++ ++ if (!subj_map_set.s_hash || !acl_role_set.r_hash || ++ !name_set.n_hash || !inodev_set.i_hash) ++ return 1; ++ ++ memset(subj_map_set.s_hash, 0, ++ sizeof(struct subject_map *) * subj_map_set.s_size); ++ memset(acl_role_set.r_hash, 0, ++ sizeof (struct acl_role_label *) * acl_role_set.r_size); ++ memset(name_set.n_hash, 0, ++ sizeof (struct name_entry *) * name_set.n_size); ++ memset(inodev_set.i_hash, 0, ++ sizeof (struct inodev_entry *) * inodev_set.i_size); ++ ++ return 0; ++} ++ ++/* free information not needed after startup ++ currently contains user->kernel pointer mappings for subjects ++*/ ++ ++static void ++free_init_variables(void) ++{ ++ __u32 i; ++ ++ if (subj_map_set.s_hash) { ++ for (i = 0; i < subj_map_set.s_size; i++) { ++ if (subj_map_set.s_hash[i]) { ++ kfree(subj_map_set.s_hash[i]); ++ subj_map_set.s_hash[i] = NULL; ++ } ++ } ++ ++ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <= ++ PAGE_SIZE) ++ kfree(subj_map_set.s_hash); ++ else ++ vfree(subj_map_set.s_hash); ++ } ++ ++ return; ++} ++ ++static void ++free_variables(void) ++{ ++ struct acl_subject_label *s; ++ struct acl_role_label *r; ++ struct task_struct *task, *task2; ++ unsigned int x; ++ ++ gr_clear_learn_entries(); ++ ++ read_lock(&tasklist_lock); ++ do_each_thread(task2, task) { ++ task->acl_sp_role = 0; ++ task->acl_role_id = 0; ++ task->acl = NULL; ++ task->role = NULL; ++ } while_each_thread(task2, task); ++ read_unlock(&tasklist_lock); ++ ++ /* release the reference to the real root dentry and vfsmount */ ++ path_put(&real_root); ++ memset(&real_root, 0, sizeof(real_root)); ++ ++ /* free all object hash tables */ ++ ++ FOR_EACH_ROLE_START(r) ++ if (r->subj_hash == NULL) ++ goto next_role; ++ FOR_EACH_SUBJECT_START(r, s, x) ++ if (s->obj_hash == NULL) ++ break; ++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) ++ kfree(s->obj_hash); ++ else ++ vfree(s->obj_hash); ++ FOR_EACH_SUBJECT_END(s, x) ++ FOR_EACH_NESTED_SUBJECT_START(r, s) ++ if (s->obj_hash == NULL) ++ break; ++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) ++ kfree(s->obj_hash); ++ else ++ vfree(s->obj_hash); ++ FOR_EACH_NESTED_SUBJECT_END(s) ++ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE) ++ kfree(r->subj_hash); ++ else ++ vfree(r->subj_hash); ++ r->subj_hash = NULL; ++next_role: ++ FOR_EACH_ROLE_END(r) ++ ++ acl_free_all(); ++ ++ if (acl_role_set.r_hash) { ++ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <= ++ PAGE_SIZE) ++ kfree(acl_role_set.r_hash); ++ else ++ vfree(acl_role_set.r_hash); ++ } ++ if (name_set.n_hash) { ++ if ((name_set.n_size * sizeof (struct name_entry *)) <= ++ PAGE_SIZE) ++ kfree(name_set.n_hash); ++ else ++ vfree(name_set.n_hash); ++ } ++ ++ if (inodev_set.i_hash) { ++ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <= ++ PAGE_SIZE) ++ kfree(inodev_set.i_hash); ++ else ++ vfree(inodev_set.i_hash); ++ } ++ ++ gr_free_uidset(); ++ ++ memset(&name_set, 0, sizeof (struct name_db)); ++ memset(&inodev_set, 0, sizeof (struct inodev_db)); ++ memset(&acl_role_set, 0, sizeof (struct acl_role_db)); ++ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db)); ++ ++ default_role = NULL; ++ kernel_role = NULL; ++ role_list = NULL; ++ ++ return; ++} ++ ++static __u32 ++count_user_objs(struct acl_object_label *userp) ++{ ++ struct acl_object_label o_tmp; ++ __u32 num = 0; ++ ++ while (userp) { ++ if (copy_from_user(&o_tmp, userp, ++ sizeof (struct acl_object_label))) ++ break; ++ ++ userp = o_tmp.prev; ++ num++; ++ } ++ ++ return num; ++} ++ ++static struct acl_subject_label * ++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role); ++ ++static int ++copy_user_glob(struct acl_object_label *obj) ++{ ++ struct acl_object_label *g_tmp, **guser; ++ unsigned int len; ++ char *tmp; ++ ++ if (obj->globbed == NULL) ++ return 0; ++ ++ guser = &obj->globbed; ++ while (*guser) { ++ g_tmp = (struct acl_object_label *) ++ acl_alloc(sizeof (struct acl_object_label)); ++ if (g_tmp == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(g_tmp, *guser, ++ sizeof (struct acl_object_label))) ++ return -EFAULT; ++ ++ len = strnlen_user(g_tmp->filename, PATH_MAX); ++ ++ if (!len || len >= PATH_MAX) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, g_tmp->filename, len)) ++ return -EFAULT; ++ tmp[len-1] = '\0'; ++ g_tmp->filename = tmp; ++ ++ *guser = g_tmp; ++ guser = &(g_tmp->next); ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj, ++ struct acl_role_label *role) ++{ ++ struct acl_object_label *o_tmp; ++ unsigned int len; ++ int ret; ++ char *tmp; ++ ++ while (userp) { ++ if ((o_tmp = (struct acl_object_label *) ++ acl_alloc(sizeof (struct acl_object_label))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(o_tmp, userp, ++ sizeof (struct acl_object_label))) ++ return -EFAULT; ++ ++ userp = o_tmp->prev; ++ ++ len = strnlen_user(o_tmp->filename, PATH_MAX); ++ ++ if (!len || len >= PATH_MAX) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, o_tmp->filename, len)) ++ return -EFAULT; ++ tmp[len-1] = '\0'; ++ o_tmp->filename = tmp; ++ ++ insert_acl_obj_label(o_tmp, subj); ++ if (!insert_name_entry(o_tmp->filename, o_tmp->inode, ++ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0)) ++ return -ENOMEM; ++ ++ ret = copy_user_glob(o_tmp); ++ if (ret) ++ return ret; ++ ++ if (o_tmp->nested) { ++ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role); ++ if (IS_ERR(o_tmp->nested)) ++ return PTR_ERR(o_tmp->nested); ++ ++ /* insert into nested subject list */ ++ o_tmp->nested->next = role->hash->first; ++ role->hash->first = o_tmp->nested; ++ } ++ } ++ ++ return 0; ++} ++ ++static __u32 ++count_user_subjs(struct acl_subject_label *userp) ++{ ++ struct acl_subject_label s_tmp; ++ __u32 num = 0; ++ ++ while (userp) { ++ if (copy_from_user(&s_tmp, userp, ++ sizeof (struct acl_subject_label))) ++ break; ++ ++ userp = s_tmp.prev; ++ /* do not count nested subjects against this count, since ++ they are not included in the hash table, but are ++ attached to objects. We have already counted ++ the subjects in userspace for the allocation ++ stack ++ */ ++ if (!(s_tmp.mode & GR_NESTED)) ++ num++; ++ } ++ ++ return num; ++} ++ ++static int ++copy_user_allowedips(struct acl_role_label *rolep) ++{ ++ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast; ++ ++ ruserip = rolep->allowed_ips; ++ ++ while (ruserip) { ++ rlast = rtmp; ++ ++ if ((rtmp = (struct role_allowed_ip *) ++ acl_alloc(sizeof (struct role_allowed_ip))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(rtmp, ruserip, ++ sizeof (struct role_allowed_ip))) ++ return -EFAULT; ++ ++ ruserip = rtmp->prev; ++ ++ if (!rlast) { ++ rtmp->prev = NULL; ++ rolep->allowed_ips = rtmp; ++ } else { ++ rlast->next = rtmp; ++ rtmp->prev = rlast; ++ } ++ ++ if (!ruserip) ++ rtmp->next = NULL; ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_transitions(struct acl_role_label *rolep) ++{ ++ struct role_transition *rusertp, *rtmp = NULL, *rlast; ++ ++ unsigned int len; ++ char *tmp; ++ ++ rusertp = rolep->transitions; ++ ++ while (rusertp) { ++ rlast = rtmp; ++ ++ if ((rtmp = (struct role_transition *) ++ acl_alloc(sizeof (struct role_transition))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(rtmp, rusertp, ++ sizeof (struct role_transition))) ++ return -EFAULT; ++ ++ rusertp = rtmp->prev; ++ ++ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN); ++ ++ if (!len || len >= GR_SPROLE_LEN) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, rtmp->rolename, len)) ++ return -EFAULT; ++ tmp[len-1] = '\0'; ++ rtmp->rolename = tmp; ++ ++ if (!rlast) { ++ rtmp->prev = NULL; ++ rolep->transitions = rtmp; ++ } else { ++ rlast->next = rtmp; ++ rtmp->prev = rlast; ++ } ++ ++ if (!rusertp) ++ rtmp->next = NULL; ++ } ++ ++ return 0; ++} ++ ++static struct acl_subject_label * ++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role) ++{ ++ struct acl_subject_label *s_tmp = NULL, *s_tmp2; ++ unsigned int len; ++ char *tmp; ++ __u32 num_objs; ++ struct acl_ip_label **i_tmp, *i_utmp2; ++ struct gr_hash_struct ghash; ++ struct subject_map *subjmap; ++ unsigned int i_num; ++ int err; ++ ++ s_tmp = lookup_subject_map(userp); ++ ++ /* we've already copied this subject into the kernel, just return ++ the reference to it, and don't copy it over again ++ */ ++ if (s_tmp) ++ return(s_tmp); ++ ++ if ((s_tmp = (struct acl_subject_label *) ++ acl_alloc(sizeof (struct acl_subject_label))) == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL); ++ if (subjmap == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ subjmap->user = userp; ++ subjmap->kernel = s_tmp; ++ insert_subj_map_entry(subjmap); ++ ++ if (copy_from_user(s_tmp, userp, ++ sizeof (struct acl_subject_label))) ++ return ERR_PTR(-EFAULT); ++ ++ len = strnlen_user(s_tmp->filename, PATH_MAX); ++ ++ if (!len || len >= PATH_MAX) ++ return ERR_PTR(-EINVAL); ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ if (copy_from_user(tmp, s_tmp->filename, len)) ++ return ERR_PTR(-EFAULT); ++ tmp[len-1] = '\0'; ++ s_tmp->filename = tmp; ++ ++ if (!strcmp(s_tmp->filename, "/")) ++ role->root_label = s_tmp; ++ ++ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct))) ++ return ERR_PTR(-EFAULT); ++ ++ /* copy user and group transition tables */ ++ ++ if (s_tmp->user_trans_num) { ++ uid_t *uidlist; ++ ++ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t)); ++ if (uidlist == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t))) ++ return ERR_PTR(-EFAULT); ++ ++ s_tmp->user_transitions = uidlist; ++ } ++ ++ if (s_tmp->group_trans_num) { ++ gid_t *gidlist; ++ ++ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t)); ++ if (gidlist == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t))) ++ return ERR_PTR(-EFAULT); ++ ++ s_tmp->group_transitions = gidlist; ++ } ++ ++ /* set up object hash table */ ++ num_objs = count_user_objs(ghash.first); ++ ++ s_tmp->obj_hash_size = num_objs; ++ s_tmp->obj_hash = ++ (struct acl_object_label **) ++ create_table(&(s_tmp->obj_hash_size), sizeof(void *)); ++ ++ if (!s_tmp->obj_hash) ++ return ERR_PTR(-ENOMEM); ++ ++ memset(s_tmp->obj_hash, 0, ++ s_tmp->obj_hash_size * ++ sizeof (struct acl_object_label *)); ++ ++ /* add in objects */ ++ err = copy_user_objs(ghash.first, s_tmp, role); ++ ++ if (err) ++ return ERR_PTR(err); ++ ++ /* set pointer for parent subject */ ++ if (s_tmp->parent_subject) { ++ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role); ++ ++ if (IS_ERR(s_tmp2)) ++ return s_tmp2; ++ ++ s_tmp->parent_subject = s_tmp2; ++ } ++ ++ /* add in ip acls */ ++ ++ if (!s_tmp->ip_num) { ++ s_tmp->ips = NULL; ++ goto insert; ++ } ++ ++ i_tmp = ++ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num, ++ sizeof (struct acl_ip_label *)); ++ ++ if (!i_tmp) ++ return ERR_PTR(-ENOMEM); ++ ++ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) { ++ *(i_tmp + i_num) = ++ (struct acl_ip_label *) ++ acl_alloc(sizeof (struct acl_ip_label)); ++ if (!*(i_tmp + i_num)) ++ return ERR_PTR(-ENOMEM); ++ ++ if (copy_from_user ++ (&i_utmp2, s_tmp->ips + i_num, ++ sizeof (struct acl_ip_label *))) ++ return ERR_PTR(-EFAULT); ++ ++ if (copy_from_user ++ (*(i_tmp + i_num), i_utmp2, ++ sizeof (struct acl_ip_label))) ++ return ERR_PTR(-EFAULT); ++ ++ if ((*(i_tmp + i_num))->iface == NULL) ++ continue; ++ ++ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ); ++ if (!len || len >= IFNAMSIZ) ++ return ERR_PTR(-EINVAL); ++ tmp = acl_alloc(len); ++ if (tmp == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len)) ++ return ERR_PTR(-EFAULT); ++ (*(i_tmp + i_num))->iface = tmp; ++ } ++ ++ s_tmp->ips = i_tmp; ++ ++insert: ++ if (!insert_name_entry(s_tmp->filename, s_tmp->inode, ++ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0)) ++ return ERR_PTR(-ENOMEM); ++ ++ return s_tmp; ++} ++ ++static int ++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role) ++{ ++ struct acl_subject_label s_pre; ++ struct acl_subject_label * ret; ++ int err; ++ ++ while (userp) { ++ if (copy_from_user(&s_pre, userp, ++ sizeof (struct acl_subject_label))) ++ return -EFAULT; ++ ++ /* do not add nested subjects here, add ++ while parsing objects ++ */ ++ ++ if (s_pre.mode & GR_NESTED) { ++ userp = s_pre.prev; ++ continue; ++ } ++ ++ ret = do_copy_user_subj(userp, role); ++ ++ err = PTR_ERR(ret); ++ if (IS_ERR(ret)) ++ return err; ++ ++ insert_acl_subj_label(ret, role); ++ ++ userp = s_pre.prev; ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_acl(struct gr_arg *arg) ++{ ++ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2; ++ struct sprole_pw *sptmp; ++ struct gr_hash_struct *ghash; ++ uid_t *domainlist; ++ unsigned int r_num; ++ unsigned int len; ++ char *tmp; ++ int err = 0; ++ __u16 i; ++ __u32 num_subjs; ++ ++ /* we need a default and kernel role */ ++ if (arg->role_db.num_roles < 2) ++ return -EINVAL; ++ ++ /* copy special role authentication info from userspace */ ++ ++ num_sprole_pws = arg->num_sprole_pws; ++ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *)); ++ ++ if (!acl_special_roles && num_sprole_pws) ++ return -ENOMEM; ++ ++ for (i = 0; i < num_sprole_pws; i++) { ++ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw)); ++ if (!sptmp) ++ return -ENOMEM; ++ if (copy_from_user(sptmp, arg->sprole_pws + i, ++ sizeof (struct sprole_pw))) ++ return -EFAULT; ++ ++ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN); ++ ++ if (!len || len >= GR_SPROLE_LEN) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, sptmp->rolename, len)) ++ return -EFAULT; ++ ++ tmp[len-1] = '\0'; ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Copying special role %s\n", tmp); ++#endif ++ sptmp->rolename = tmp; ++ acl_special_roles[i] = sptmp; ++ } ++ ++ r_utmp = (struct acl_role_label **) arg->role_db.r_table; ++ ++ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) { ++ r_tmp = acl_alloc(sizeof (struct acl_role_label)); ++ ++ if (!r_tmp) ++ return -ENOMEM; ++ ++ if (copy_from_user(&r_utmp2, r_utmp + r_num, ++ sizeof (struct acl_role_label *))) ++ return -EFAULT; ++ ++ if (copy_from_user(r_tmp, r_utmp2, ++ sizeof (struct acl_role_label))) ++ return -EFAULT; ++ ++ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN); ++ ++ if (!len || len >= PATH_MAX) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, r_tmp->rolename, len)) ++ return -EFAULT; ++ ++ tmp[len-1] = '\0'; ++ r_tmp->rolename = tmp; ++ ++ if (!strcmp(r_tmp->rolename, "default") ++ && (r_tmp->roletype & GR_ROLE_DEFAULT)) { ++ default_role = r_tmp; ++ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) { ++ kernel_role = r_tmp; ++ } ++ ++ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) ++ return -EFAULT; ++ ++ r_tmp->hash = ghash; ++ ++ num_subjs = count_user_subjs(r_tmp->hash->first); ++ ++ r_tmp->subj_hash_size = num_subjs; ++ r_tmp->subj_hash = ++ (struct acl_subject_label **) ++ create_table(&(r_tmp->subj_hash_size), sizeof(void *)); ++ ++ if (!r_tmp->subj_hash) ++ return -ENOMEM; ++ ++ err = copy_user_allowedips(r_tmp); ++ if (err) ++ return err; ++ ++ /* copy domain info */ ++ if (r_tmp->domain_children != NULL) { ++ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t)); ++ if (domainlist == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) ++ return -EFAULT; ++ ++ r_tmp->domain_children = domainlist; ++ } ++ ++ err = copy_user_transitions(r_tmp); ++ if (err) ++ return err; ++ ++ memset(r_tmp->subj_hash, 0, ++ r_tmp->subj_hash_size * ++ sizeof (struct acl_subject_label *)); ++ ++ err = copy_user_subjs(r_tmp->hash->first, r_tmp); ++ ++ if (err) ++ return err; ++ ++ /* set nested subject list to null */ ++ r_tmp->hash->first = NULL; ++ ++ insert_acl_role_label(r_tmp); ++ } ++ ++ if (default_role == NULL || kernel_role == NULL) ++ return -EINVAL; ++ ++ return err; ++} ++ ++static int ++gracl_init(struct gr_arg *args) ++{ ++ int error = 0; ++ ++ memcpy(gr_system_salt, args->salt, GR_SALT_LEN); ++ memcpy(gr_system_sum, args->sum, GR_SHA_LEN); ++ ++ if (init_variables(args)) { ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION); ++ error = -ENOMEM; ++ free_variables(); ++ goto out; ++ } ++ ++ error = copy_user_acl(args); ++ free_init_variables(); ++ if (error) { ++ free_variables(); ++ goto out; ++ } ++ ++ if ((error = gr_set_acls(0))) { ++ free_variables(); ++ goto out; ++ } ++ ++ pax_open_kernel(); ++ gr_status |= GR_READY; ++ pax_close_kernel(); ++ ++ out: ++ return error; ++} ++ ++/* derived from glibc fnmatch() 0: match, 1: no match*/ ++ ++static int ++glob_match(const char *p, const char *n) ++{ ++ char c; ++ ++ while ((c = *p++) != '\0') { ++ switch (c) { ++ case '?': ++ if (*n == '\0') ++ return 1; ++ else if (*n == '/') ++ return 1; ++ break; ++ case '\': ++ if (*n != c) ++ return 1; ++ break; ++ case '*': ++ for (c = *p++; c == '?' || c == '*'; c = *p++) { ++ if (*n == '/') ++ return 1; ++ else if (c == '?') { ++ if (*n == '\0') ++ return 1; ++ else ++ ++n; ++ } ++ } ++ if (c == '\0') { ++ return 0; ++ } else { ++ const char *endp; ++ ++ if ((endp = strchr(n, '/')) == NULL) ++ endp = n + strlen(n); ++ ++ if (c == '[') { ++ for (--p; n < endp; ++n) ++ if (!glob_match(p, n)) ++ return 0; ++ } else if (c == '/') { ++ while (*n != '\0' && *n != '/') ++ ++n; ++ if (*n == '/' && !glob_match(p, n + 1)) ++ return 0; ++ } else { ++ for (--p; n < endp; ++n) ++ if (*n == c && !glob_match(p, n)) ++ return 0; ++ } ++ ++ return 1; ++ } ++ case '[': ++ { ++ int not; ++ char cold; ++ ++ if (*n == '\0' || *n == '/') ++ return 1; ++ ++ not = (*p == '!' || *p == '^'); ++ if (not) ++ ++p; ++ ++ c = *p++; ++ for (;;) { ++ unsigned char fn = (unsigned char)*n; ++ ++ if (c == '\0') ++ return 1; ++ else { ++ if (c == fn) ++ goto matched; ++ cold = c; ++ c = *p++; ++ ++ if (c == '-' && *p != ']') { ++ unsigned char cend = *p++; ++ ++ if (cend == '\0') ++ return 1; ++ ++ if (cold <= fn && fn <= cend) ++ goto matched; ++ ++ c = *p++; ++ } ++ } ++ ++ if (c == ']') ++ break; ++ } ++ if (!not) ++ return 1; ++ break; ++ matched: ++ while (c != ']') { ++ if (c == '\0') ++ return 1; ++ ++ c = *p++; ++ } ++ if (not) ++ return 1; ++ } ++ break; ++ default: ++ if (c != *n) ++ return 1; ++ } ++ ++ ++n; ++ } ++ ++ if (*n == '\0') ++ return 0; ++ ++ if (*n == '/') ++ return 0; ++ ++ return 1; ++} ++ ++static struct acl_object_label * ++chk_glob_label(struct acl_object_label *globbed, ++ const struct dentry *dentry, const struct vfsmount *mnt, char **path) ++{ ++ struct acl_object_label *tmp; ++ ++ if (*path == NULL) ++ *path = gr_to_filename_nolock(dentry, mnt); ++ ++ tmp = globbed; ++ ++ while (tmp) { ++ if (!glob_match(tmp->filename, *path)) ++ return tmp; ++ tmp = tmp->next; ++ } ++ ++ return NULL; ++} ++ ++static struct acl_object_label * ++__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, ++ const ino_t curr_ino, const dev_t curr_dev, ++ const struct acl_subject_label *subj, char **path, const int checkglob) ++{ ++ struct acl_subject_label *tmpsubj; ++ struct acl_object_label *retval; ++ struct acl_object_label *retval2; ++ ++ tmpsubj = (struct acl_subject_label *) subj; ++ read_lock(&gr_inode_lock); ++ do { ++ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj); ++ if (retval) { ++ if (checkglob && retval->globbed) { ++ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path); ++ if (retval2) ++ retval = retval2; ++ } ++ break; ++ } ++ } while ((tmpsubj = tmpsubj->parent_subject)); ++ read_unlock(&gr_inode_lock); ++ ++ return retval; ++} ++ ++static __inline__ struct acl_object_label * ++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, ++ struct dentry *curr_dentry, ++ const struct acl_subject_label *subj, char **path, const int checkglob) ++{ ++ int newglob = checkglob; ++ ino_t inode; ++ dev_t device; ++ ++ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking ++ as we don't want a / * rule to match instead of the / object ++ don't do this for create lookups that call this function though, since they're looking up ++ on the parent and thus need globbing checks on all paths ++ */ ++ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB) ++ newglob = GR_NO_GLOB; ++ ++ spin_lock(&curr_dentry->d_lock); ++ inode = curr_dentry->d_inode->i_ino; ++ device = __get_dev(curr_dentry); ++ spin_unlock(&curr_dentry->d_lock); ++ ++ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob); ++} ++ ++static struct acl_object_label * ++__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj, char *path, const int checkglob) ++{ ++ struct dentry *dentry = (struct dentry *) l_dentry; ++ struct vfsmount *mnt = (struct vfsmount *) l_mnt; ++ struct acl_object_label *retval; ++ struct dentry *parent; ++ ++ write_seqlock(&rename_lock); ++ br_read_lock(vfsmount_lock); ++ ++ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt || ++#ifdef CONFIG_NET ++ mnt == sock_mnt || ++#endif ++#ifdef CONFIG_HUGETLBFS ++ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) || ++#endif ++ /* ignore Eric Biederman */ ++ IS_PRIVATE(l_dentry->d_inode))) { ++ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw; ++ goto out; ++ } ++ ++ for (;;) { ++ if (dentry == real_root.dentry && mnt == real_root.mnt) ++ break; ++ ++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { ++ if (mnt->mnt_parent == mnt) ++ break; ++ ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = mnt->mnt_mountpoint; ++ mnt = mnt->mnt_parent; ++ continue; ++ } ++ ++ parent = dentry->d_parent; ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = parent; ++ } ++ ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ ++ /* real_root is pinned so we don't have to hold a reference */ ++ if (retval == NULL) ++ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob); ++out: ++ br_read_unlock(vfsmount_lock); ++ write_sequnlock(&rename_lock); ++ ++ BUG_ON(retval == NULL); ++ ++ return retval; ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj) ++{ ++ char *path = NULL; ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB); ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj) ++{ ++ char *path = NULL; ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB); ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj, char *path) ++{ ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB); ++} ++ ++static struct acl_subject_label * ++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_role_label *role) ++{ ++ struct dentry *dentry = (struct dentry *) l_dentry; ++ struct vfsmount *mnt = (struct vfsmount *) l_mnt; ++ struct acl_subject_label *retval; ++ struct dentry *parent; ++ ++ write_seqlock(&rename_lock); ++ br_read_lock(vfsmount_lock); ++ ++ for (;;) { ++ if (dentry == real_root.dentry && mnt == real_root.mnt) ++ break; ++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { ++ if (mnt->mnt_parent == mnt) ++ break; ++ ++ spin_lock(&dentry->d_lock); ++ read_lock(&gr_inode_lock); ++ retval = ++ lookup_acl_subj_label(dentry->d_inode->i_ino, ++ __get_dev(dentry), role); ++ read_unlock(&gr_inode_lock); ++ spin_unlock(&dentry->d_lock); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = mnt->mnt_mountpoint; ++ mnt = mnt->mnt_parent; ++ continue; ++ } ++ ++ spin_lock(&dentry->d_lock); ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino, ++ __get_dev(dentry), role); ++ read_unlock(&gr_inode_lock); ++ parent = dentry->d_parent; ++ spin_unlock(&dentry->d_lock); ++ ++ if (retval != NULL) ++ goto out; ++ ++ dentry = parent; ++ } ++ ++ spin_lock(&dentry->d_lock); ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino, ++ __get_dev(dentry), role); ++ read_unlock(&gr_inode_lock); ++ spin_unlock(&dentry->d_lock); ++ ++ if (unlikely(retval == NULL)) { ++ /* real_root is pinned, we don't need to hold a reference */ ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino, ++ __get_dev(real_root.dentry), role); ++ read_unlock(&gr_inode_lock); ++ } ++out: ++ br_read_unlock(vfsmount_lock); ++ write_sequnlock(&rename_lock); ++ ++ BUG_ON(retval == NULL); ++ ++ return retval; ++} ++ ++static void ++gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip); ++ ++ return; ++} ++ ++static void ++gr_log_learn_sysctl(const char *path, const __u32 mode) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip); ++ ++ return; ++} ++ ++static void ++gr_log_learn_id_change(const char type, const unsigned int real, ++ const unsigned int effective, const unsigned int fs) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype, ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ type, real, effective, fs, &task->signal->saved_ip); ++ ++ return; ++} ++ ++__u32 ++gr_search_file(const struct dentry * dentry, const __u32 mode, ++ const struct vfsmount * mnt) ++{ ++ __u32 retval = mode; ++ struct acl_subject_label *curracl; ++ struct acl_object_label *currobj; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (mode & ~GR_AUDITS); ++ ++ curracl = current->acl; ++ ++ currobj = chk_obj_label(dentry, mnt, curracl); ++ retval = currobj->mode & mode; ++ ++ /* if we're opening a specified transfer file for writing ++ (e.g. /dev/initctl), then transfer our role to init ++ */ ++ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE && ++ current->role->roletype & GR_ROLE_PERSIST)) { ++ struct task_struct *task = init_pid_ns.child_reaper; ++ ++ if (task->role != current->role) { ++ task->acl_sp_role = 0; ++ task->acl_role_id = current->acl_role_id; ++ task->role = current->role; ++ rcu_read_lock(); ++ read_lock(&grsec_exec_file_lock); ++ gr_apply_subject_to_task(task); ++ read_unlock(&grsec_exec_file_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG); ++ } ++ } ++ ++ if (unlikely ++ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE) ++ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ retval = new_mode; ++ ++ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN) ++ new_mode |= GR_INHERIT; ++ ++ if (!(mode & GR_NOLEARN)) ++ gr_log_learn(dentry, mnt, new_mode); ++ } ++ ++ return retval; ++} ++ ++struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry, ++ const struct dentry *parent, ++ const struct vfsmount *mnt) ++{ ++ struct name_entry *match; ++ struct acl_object_label *matchpo; ++ struct acl_subject_label *curracl; ++ char *path; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return NULL; ++ ++ preempt_disable(); ++ path = gr_to_filename_rbac(new_dentry, mnt); ++ match = lookup_name_entry_create(path); ++ ++ curracl = current->acl; ++ ++ if (match) { ++ read_lock(&gr_inode_lock); ++ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl); ++ read_unlock(&gr_inode_lock); ++ ++ if (matchpo) { ++ preempt_enable(); ++ return matchpo; ++ } ++ } ++ ++ // lookup parent ++ ++ matchpo = chk_obj_create_label(parent, mnt, curracl, path); ++ ++ preempt_enable(); ++ return matchpo; ++} ++ ++__u32 ++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent, ++ const struct vfsmount * mnt, const __u32 mode) ++{ ++ struct acl_object_label *matchpo; ++ __u32 retval; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (mode & ~GR_AUDITS); ++ ++ matchpo = gr_get_create_object(new_dentry, parent, mnt); ++ ++ retval = matchpo->mode & mode; ++ ++ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))) ++ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ gr_log_learn(new_dentry, mnt, new_mode); ++ return new_mode; ++ } ++ ++ return retval; ++} ++ ++__u32 ++gr_check_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, const struct vfsmount * old_mnt) ++{ ++ struct acl_object_label *obj; ++ __u32 oldmode, newmode; ++ __u32 needmode; ++ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ | ++ GR_DELETE | GR_INHERIT; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (GR_CREATE | GR_LINK); ++ ++ obj = chk_obj_label(old_dentry, old_mnt, current->acl); ++ oldmode = obj->mode; ++ ++ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt); ++ newmode = obj->mode; ++ ++ needmode = newmode & checkmodes; ++ ++ // old name for hardlink must have at least the permissions of the new name ++ if ((oldmode & needmode) != needmode) ++ goto bad; ++ ++ // if old name had restrictions/auditing, make sure the new name does as well ++ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS); ++ ++ // don't allow hardlinking of suid/sgid files without permission ++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) ++ needmode |= GR_SETID; ++ ++ if ((newmode & needmode) != needmode) ++ goto bad; ++ ++ // enforce minimum permissions ++ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK)) ++ return newmode; ++bad: ++ needmode = oldmode; ++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) ++ needmode |= GR_SETID; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK); ++ return (GR_CREATE | GR_LINK); ++ } else if (newmode & GR_SUPPRESS) ++ return GR_SUPPRESS; ++ else ++ return 0; ++} ++ ++int ++gr_check_hidden_task(const struct task_struct *task) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW)) ++ return 1; ++ ++ return 0; ++} ++ ++int ++gr_check_protected_task(const struct task_struct *task) ++{ ++ if (unlikely(!(gr_status & GR_READY) || !task)) ++ return 0; ++ ++ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && ++ task->acl != current->acl) ++ return 1; ++ ++ return 0; ++} ++ ++int ++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) ++{ ++ struct task_struct *p; ++ int ret = 0; ++ ++ if (unlikely(!(gr_status & GR_READY) || !pid)) ++ return ret; ++ ++ read_lock(&tasklist_lock); ++ do_each_pid_task(pid, type, p) { ++ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && ++ p->acl != current->acl) { ++ ret = 1; ++ goto out; ++ } ++ } while_each_pid_task(pid, type, p); ++out: ++ read_unlock(&tasklist_lock); ++ ++ return ret; ++} ++ ++void ++gr_copy_label(struct task_struct *tsk) ++{ ++ /* plain copying of fields is already done by dup_task_struct */ ++ tsk->signal->used_accept = 0; ++ tsk->acl_sp_role = 0; ++ //tsk->acl_role_id = current->acl_role_id; ++ //tsk->acl = current->acl; ++ //tsk->role = current->role; ++ tsk->signal->curr_ip = current->signal->curr_ip; ++ tsk->signal->saved_ip = current->signal->saved_ip; ++ if (current->exec_file) ++ get_file(current->exec_file); ++ //tsk->exec_file = current->exec_file; ++ //tsk->is_writable = current->is_writable; ++ if (unlikely(current->signal->used_accept)) { ++ current->signal->curr_ip = 0; ++ current->signal->saved_ip = 0; ++ } ++ ++ return; ++} ++ ++static void ++gr_set_proc_res(struct task_struct *task) ++{ ++ struct acl_subject_label *proc; ++ unsigned short i; ++ ++ proc = task->acl; ++ ++ if (proc->mode & (GR_LEARN | GR_INHERITLEARN)) ++ return; ++ ++ for (i = 0; i < RLIM_NLIMITS; i++) { ++ if (!(proc->resmask & (1 << i))) ++ continue; ++ ++ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur; ++ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max; ++ } ++ ++ return; ++} ++ ++extern int __gr_process_user_ban(struct user_struct *user); ++ ++int ++gr_check_user_change(int real, int effective, int fs) ++{ ++ unsigned int i; ++ __u16 num; ++ uid_t *uidlist; ++ int curuid; ++ int realok = 0; ++ int effectiveok = 0; ++ int fsok = 0; ++ ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++ struct user_struct *user; ++ ++ if (real == -1) ++ goto skipit; ++ ++ user = find_user(real); ++ if (user == NULL) ++ goto skipit; ++ ++ if (__gr_process_user_ban(user)) { ++ /* for find_user */ ++ free_uid(user); ++ return 1; ++ } ++ ++ /* for find_user */ ++ free_uid(user); ++ ++skipit: ++#endif ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ gr_log_learn_id_change('u', real, effective, fs); ++ ++ num = current->acl->user_trans_num; ++ uidlist = current->acl->user_transitions; ++ ++ if (uidlist == NULL) ++ return 0; ++ ++ if (real == -1) ++ realok = 1; ++ if (effective == -1) ++ effectiveok = 1; ++ if (fs == -1) ++ fsok = 1; ++ ++ if (current->acl->user_trans_type & GR_ID_ALLOW) { ++ for (i = 0; i < num; i++) { ++ curuid = (int)uidlist[i]; ++ if (real == curuid) ++ realok = 1; ++ if (effective == curuid) ++ effectiveok = 1; ++ if (fs == curuid) ++ fsok = 1; ++ } ++ } else if (current->acl->user_trans_type & GR_ID_DENY) { ++ for (i = 0; i < num; i++) { ++ curuid = (int)uidlist[i]; ++ if (real == curuid) ++ break; ++ if (effective == curuid) ++ break; ++ if (fs == curuid) ++ break; ++ } ++ /* not in deny list */ ++ if (i == num) { ++ realok = 1; ++ effectiveok = 1; ++ fsok = 1; ++ } ++ } ++ ++ if (realok && effectiveok && fsok) ++ return 0; ++ else { ++ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); ++ return 1; ++ } ++} ++ ++int ++gr_check_group_change(int real, int effective, int fs) ++{ ++ unsigned int i; ++ __u16 num; ++ gid_t *gidlist; ++ int curgid; ++ int realok = 0; ++ int effectiveok = 0; ++ int fsok = 0; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ gr_log_learn_id_change('g', real, effective, fs); ++ ++ num = current->acl->group_trans_num; ++ gidlist = current->acl->group_transitions; ++ ++ if (gidlist == NULL) ++ return 0; ++ ++ if (real == -1) ++ realok = 1; ++ if (effective == -1) ++ effectiveok = 1; ++ if (fs == -1) ++ fsok = 1; ++ ++ if (current->acl->group_trans_type & GR_ID_ALLOW) { ++ for (i = 0; i < num; i++) { ++ curgid = (int)gidlist[i]; ++ if (real == curgid) ++ realok = 1; ++ if (effective == curgid) ++ effectiveok = 1; ++ if (fs == curgid) ++ fsok = 1; ++ } ++ } else if (current->acl->group_trans_type & GR_ID_DENY) { ++ for (i = 0; i < num; i++) { ++ curgid = (int)gidlist[i]; ++ if (real == curgid) ++ break; ++ if (effective == curgid) ++ break; ++ if (fs == curgid) ++ break; ++ } ++ /* not in deny list */ ++ if (i == num) { ++ realok = 1; ++ effectiveok = 1; ++ fsok = 1; ++ } ++ } ++ ++ if (realok && effectiveok && fsok) ++ return 0; ++ else { ++ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); ++ return 1; ++ } ++} ++ ++extern int gr_acl_is_capable(const int cap); ++ ++void ++gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid) ++{ ++ struct acl_role_label *role = task->role; ++ struct acl_subject_label *subj = NULL; ++ struct acl_object_label *obj; ++ struct file *filp; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ filp = task->exec_file; ++ ++ /* kernel process, we'll give them the kernel role */ ++ if (unlikely(!filp)) { ++ task->role = kernel_role; ++ task->acl = kernel_role->root_label; ++ return; ++ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) ++ role = lookup_acl_role_label(task, uid, gid); ++ ++ /* don't change the role if we're not a privileged process */ ++ if (role && task->role != role && ++ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) || ++ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID)))) ++ return; ++ ++ /* perform subject lookup in possibly new role ++ we can use this result below in the case where role == task->role ++ */ ++ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role); ++ ++ /* if we changed uid/gid, but result in the same role ++ and are using inheritance, don't lose the inherited subject ++ if current subject is other than what normal lookup ++ would result in, we arrived via inheritance, don't ++ lose subject ++ */ ++ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) && ++ (subj == task->acl))) ++ task->acl = subj; ++ ++ task->role = role; ++ ++ task->is_writable = 0; ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); ++#endif ++ ++ gr_set_proc_res(task); ++ ++ return; ++} ++ ++int ++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, ++ const int unsafe_flags) ++{ ++ struct task_struct *task = current; ++ struct acl_subject_label *newacl; ++ struct acl_object_label *obj; ++ __u32 retmode; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ newacl = chk_subj_label(dentry, mnt, task->role); ++ ++ task_lock(task); ++ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) && ++ !(task->role->roletype & GR_ROLE_GOD) && ++ !gr_search_file(dentry, GR_PTRACERD, mnt) && ++ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) { ++ task_unlock(task); ++ if (unsafe_flags & LSM_UNSAFE_SHARE) ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt); ++ else ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt); ++ return -EACCES; ++ } ++ task_unlock(task); ++ ++ obj = chk_obj_label(dentry, mnt, task->acl); ++ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT); ++ ++ if (!(task->acl->mode & GR_INHERITLEARN) && ++ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) { ++ if (obj->nested) ++ task->acl = obj->nested; ++ else ++ task->acl = newacl; ++ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT) ++ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt); ++ ++ task->is_writable = 0; ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(dentry, mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(dentry, mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++ gr_set_proc_res(task); ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); ++#endif ++ return 0; ++} ++ ++/* always called with valid inodev ptr */ ++static void ++do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev) ++{ ++ struct acl_object_label *matchpo; ++ struct acl_subject_label *matchps; ++ struct acl_subject_label *subj; ++ struct acl_role_label *role; ++ unsigned int x; ++ ++ FOR_EACH_ROLE_START(role) ++ FOR_EACH_SUBJECT_START(role, subj, x) ++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL) ++ matchpo->mode |= GR_DELETED; ++ FOR_EACH_SUBJECT_END(subj,x) ++ FOR_EACH_NESTED_SUBJECT_START(role, subj) ++ if (subj->inode == ino && subj->device == dev) ++ subj->mode |= GR_DELETED; ++ FOR_EACH_NESTED_SUBJECT_END(subj) ++ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL) ++ matchps->mode |= GR_DELETED; ++ FOR_EACH_ROLE_END(role) ++ ++ inodev->nentry->deleted = 1; ++ ++ return; ++} ++ ++void ++gr_handle_delete(const ino_t ino, const dev_t dev) ++{ ++ struct inodev_entry *inodev; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ write_lock(&gr_inode_lock); ++ inodev = lookup_inodev_entry(ino, dev); ++ if (inodev != NULL) ++ do_handle_delete(inodev, ino, dev); ++ write_unlock(&gr_inode_lock); ++ ++ return; ++} ++ ++static void ++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice, ++ struct acl_subject_label *subj) ++{ ++ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != oldinode || ++ match->device != olddevice || ++ !(match->mode & GR_DELETED))) ++ match = match->next; ++ ++ if (match && (match->inode == oldinode) ++ && (match->device == olddevice) ++ && (match->mode & GR_DELETED)) { ++ if (match->prev == NULL) { ++ subj->obj_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->inode = newinode; ++ match->device = newdevice; ++ match->mode &= ~GR_DELETED; ++ ++ insert_acl_obj_label(match, subj); ++ } ++ ++ return; ++} ++ ++static void ++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice, ++ struct acl_role_label *role) ++{ ++ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != oldinode || ++ match->device != olddevice || ++ !(match->mode & GR_DELETED))) ++ match = match->next; ++ ++ if (match && (match->inode == oldinode) ++ && (match->device == olddevice) ++ && (match->mode & GR_DELETED)) { ++ if (match->prev == NULL) { ++ role->subj_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->inode = newinode; ++ match->device = newdevice; ++ match->mode &= ~GR_DELETED; ++ ++ insert_acl_subj_label(match, role); ++ } ++ ++ return; ++} ++ ++static void ++update_inodev_entry(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice) ++{ ++ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size); ++ struct inodev_entry *match; ++ ++ match = inodev_set.i_hash[index]; ++ ++ while (match && (match->nentry->inode != oldinode || ++ match->nentry->device != olddevice || !match->nentry->deleted)) ++ match = match->next; ++ ++ if (match && (match->nentry->inode == oldinode) ++ && (match->nentry->device == olddevice) && ++ match->nentry->deleted) { ++ if (match->prev == NULL) { ++ inodev_set.i_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->nentry->inode = newinode; ++ match->nentry->device = newdevice; ++ match->nentry->deleted = 0; ++ ++ insert_inodev_entry(match); ++ } ++ ++ return; ++} ++ ++static void ++__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev) ++{ ++ struct acl_subject_label *subj; ++ struct acl_role_label *role; ++ unsigned int x; ++ ++ FOR_EACH_ROLE_START(role) ++ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role); ++ ++ FOR_EACH_NESTED_SUBJECT_START(role, subj) ++ if ((subj->inode == ino) && (subj->device == dev)) { ++ subj->inode = ino; ++ subj->device = dev; ++ } ++ FOR_EACH_NESTED_SUBJECT_END(subj) ++ FOR_EACH_SUBJECT_START(role, subj, x) ++ update_acl_obj_label(matchn->inode, matchn->device, ++ ino, dev, subj); ++ FOR_EACH_SUBJECT_END(subj,x) ++ FOR_EACH_ROLE_END(role) ++ ++ update_inodev_entry(matchn->inode, matchn->device, ino, dev); ++ ++ return; ++} ++ ++static void ++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry, ++ const struct vfsmount *mnt) ++{ ++ ino_t ino = dentry->d_inode->i_ino; ++ dev_t dev = __get_dev(dentry); ++ ++ __do_handle_create(matchn, ino, dev); ++ ++ return; ++} ++ ++void ++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ struct name_entry *matchn; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt)); ++ ++ if (unlikely((unsigned long)matchn)) { ++ write_lock(&gr_inode_lock); ++ do_handle_create(matchn, dentry, mnt); ++ write_unlock(&gr_inode_lock); ++ } ++ preempt_enable(); ++ ++ return; ++} ++ ++void ++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) ++{ ++ struct name_entry *matchn; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt)); ++ ++ if (unlikely((unsigned long)matchn)) { ++ write_lock(&gr_inode_lock); ++ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev); ++ write_unlock(&gr_inode_lock); ++ } ++ preempt_enable(); ++ ++ return; ++} ++ ++void ++gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace) ++{ ++ struct name_entry *matchn; ++ struct inodev_entry *inodev; ++ struct inode *inode = new_dentry->d_inode; ++ ino_t old_ino = old_dentry->d_inode->i_ino; ++ dev_t old_dev = __get_dev(old_dentry); ++ ++ /* vfs_rename swaps the name and parent link for old_dentry and ++ new_dentry ++ at this point, old_dentry has the new name, parent link, and inode ++ for the renamed file ++ if a file is being replaced by a rename, new_dentry has the inode ++ and name for the replaced file ++ */ ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt)); ++ ++ /* we wouldn't have to check d_inode if it weren't for ++ NFS silly-renaming ++ */ ++ ++ write_lock(&gr_inode_lock); ++ if (unlikely(replace && inode)) { ++ ino_t new_ino = inode->i_ino; ++ dev_t new_dev = __get_dev(new_dentry); ++ ++ inodev = lookup_inodev_entry(new_ino, new_dev); ++ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode))) ++ do_handle_delete(inodev, new_ino, new_dev); ++ } ++ ++ inodev = lookup_inodev_entry(old_ino, old_dev); ++ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode))) ++ do_handle_delete(inodev, old_ino, old_dev); ++ ++ if (unlikely((unsigned long)matchn)) ++ do_handle_create(matchn, old_dentry, mnt); ++ ++ write_unlock(&gr_inode_lock); ++ preempt_enable(); ++ ++ return; ++} ++ ++static int ++lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt, ++ unsigned char **sum) ++{ ++ struct acl_role_label *r; ++ struct role_allowed_ip *ipp; ++ struct role_transition *trans; ++ unsigned int i; ++ int found = 0; ++ u32 curr_ip = current->signal->curr_ip; ++ ++ current->signal->saved_ip = curr_ip; ++ ++ /* check transition table */ ++ ++ for (trans = current->role->transitions; trans; trans = trans->next) { ++ if (!strcmp(rolename, trans->rolename)) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) ++ return 0; ++ ++ /* handle special roles that do not require authentication ++ and check ip */ ++ ++ FOR_EACH_ROLE_START(r) ++ if (!strcmp(rolename, r->rolename) && ++ (r->roletype & GR_ROLE_SPECIAL)) { ++ found = 0; ++ if (r->allowed_ips != NULL) { ++ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) { ++ if ((ntohl(curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask)) ++ found = 1; ++ } ++ } else ++ found = 2; ++ if (!found) ++ return 0; ++ ++ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) || ++ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) { ++ *salt = NULL; ++ *sum = NULL; ++ return 1; ++ } ++ } ++ FOR_EACH_ROLE_END(r) ++ ++ for (i = 0; i < num_sprole_pws; i++) { ++ if (!strcmp(rolename, acl_special_roles[i]->rolename)) { ++ *salt = acl_special_roles[i]->salt; ++ *sum = acl_special_roles[i]->sum; ++ return 1; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++assign_special_role(char *rolename) ++{ ++ struct acl_object_label *obj; ++ struct acl_role_label *r; ++ struct acl_role_label *assigned = NULL; ++ struct task_struct *tsk; ++ struct file *filp; ++ ++ FOR_EACH_ROLE_START(r) ++ if (!strcmp(rolename, r->rolename) && ++ (r->roletype & GR_ROLE_SPECIAL)) { ++ assigned = r; ++ break; ++ } ++ FOR_EACH_ROLE_END(r) ++ ++ if (!assigned) ++ return; ++ ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ ++ tsk = current->real_parent; ++ if (tsk == NULL) ++ goto out_unlock; ++ ++ filp = tsk->exec_file; ++ if (filp == NULL) ++ goto out_unlock; ++ ++ tsk->is_writable = 0; ++ ++ tsk->acl_sp_role = 1; ++ tsk->acl_role_id = ++acl_sp_role_value; ++ tsk->role = assigned; ++ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role); ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ tsk->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ tsk->is_writable = 1; ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid); ++#endif ++ ++out_unlock: ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return; ++} ++ ++int gr_check_secure_terminal(struct task_struct *task) ++{ ++ struct task_struct *p, *p2, *p3; ++ struct files_struct *files; ++ struct fdtable *fdt; ++ struct file *our_file = NULL, *file; ++ int i; ++ ++ if (task->signal->tty == NULL) ++ return 1; ++ ++ files = get_files_struct(task); ++ if (files != NULL) { ++ rcu_read_lock(); ++ fdt = files_fdtable(files); ++ for (i=0; i < fdt->max_fds; i++) { ++ file = fcheck_files(files, i); ++ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) { ++ get_file(file); ++ our_file = file; ++ } ++ } ++ rcu_read_unlock(); ++ put_files_struct(files); ++ } ++ ++ if (our_file == NULL) ++ return 1; ++ ++ read_lock(&tasklist_lock); ++ do_each_thread(p2, p) { ++ files = get_files_struct(p); ++ if (files == NULL || ++ (p->signal && p->signal->tty == task->signal->tty)) { ++ if (files != NULL) ++ put_files_struct(files); ++ continue; ++ } ++ rcu_read_lock(); ++ fdt = files_fdtable(files); ++ for (i=0; i < fdt->max_fds; i++) { ++ file = fcheck_files(files, i); ++ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) && ++ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) { ++ p3 = task; ++ while (p3->pid > 0) { ++ if (p3 == p) ++ break; ++ p3 = p3->real_parent; ++ } ++ if (p3 == p) ++ break; ++ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p); ++ gr_handle_alertkill(p); ++ rcu_read_unlock(); ++ put_files_struct(files); ++ read_unlock(&tasklist_lock); ++ fput(our_file); ++ return 0; ++ } ++ } ++ rcu_read_unlock(); ++ put_files_struct(files); ++ } while_each_thread(p2, p); ++ read_unlock(&tasklist_lock); ++ ++ fput(our_file); ++ return 1; ++} ++ ++ssize_t ++write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos) ++{ ++ struct gr_arg_wrapper uwrap; ++ unsigned char *sprole_salt = NULL; ++ unsigned char *sprole_sum = NULL; ++ int error = sizeof (struct gr_arg_wrapper); ++ int error2 = 0; ++ ++ mutex_lock(&gr_dev_mutex); ++ ++ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) { ++ error = -EPERM; ++ goto out; ++ } ++ ++ if (count != sizeof (struct gr_arg_wrapper)) { ++ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper)); ++ error = -EINVAL; ++ goto out; ++ } ++ ++ ++ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) { ++ gr_auth_expires = 0; ++ gr_auth_attempts = 0; ++ } ++ ++ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) { ++ error = -EFAULT; ++ goto out; ++ } ++ ++ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) { ++ error = -EINVAL; ++ goto out; ++ } ++ ++ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) { ++ error = -EFAULT; ++ goto out; ++ } ++ ++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM && ++ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && ++ time_after(gr_auth_expires, get_seconds())) { ++ error = -EBUSY; ++ goto out; ++ } ++ ++ /* if non-root trying to do anything other than use a special role, ++ do not attempt authentication, do not count towards authentication ++ locking ++ */ ++ ++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS && ++ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM && ++ current_uid()) { ++ error = -EPERM; ++ goto out; ++ } ++ ++ /* ensure pw and special role name are null terminated */ ++ ++ gr_usermode->pw[GR_PW_LEN - 1] = '\0'; ++ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0'; ++ ++ /* Okay. ++ * We have our enough of the argument structure..(we have yet ++ * to copy_from_user the tables themselves) . Copy the tables ++ * only if we need them, i.e. for loading operations. */ ++ ++ switch (gr_usermode->mode) { ++ case GR_STATUS: ++ if (gr_status & GR_READY) { ++ error = 1; ++ if (!gr_check_secure_terminal(current)) ++ error = 3; ++ } else ++ error = 2; ++ goto out; ++ case GR_SHUTDOWN: ++ if ((gr_status & GR_READY) ++ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ pax_open_kernel(); ++ gr_status &= ~GR_READY; ++ pax_close_kernel(); ++ ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG); ++ free_variables(); ++ memset(gr_usermode, 0, sizeof (struct gr_arg)); ++ memset(gr_system_salt, 0, GR_SALT_LEN); ++ memset(gr_system_sum, 0, GR_SHA_LEN); ++ } else if (gr_status & GR_READY) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG); ++ error = -EPERM; ++ } else { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG); ++ error = -EAGAIN; ++ } ++ break; ++ case GR_ENABLE: ++ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode))) ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION); ++ else { ++ if (gr_status & GR_READY) ++ error = -EAGAIN; ++ else ++ error = error2; ++ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION); ++ } ++ break; ++ case GR_RELOAD: ++ if (!(gr_status & GR_READY)) { ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION); ++ error = -EAGAIN; ++ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ preempt_disable(); ++ ++ pax_open_kernel(); ++ gr_status &= ~GR_READY; ++ pax_close_kernel(); ++ ++ free_variables(); ++ if (!(error2 = gracl_init(gr_usermode))) { ++ preempt_enable(); ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION); ++ } else { ++ preempt_enable(); ++ error = error2; ++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); ++ } ++ } else { ++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); ++ error = -EPERM; ++ } ++ break; ++ case GR_SEGVMOD: ++ if (unlikely(!(gr_status & GR_READY))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG); ++ if (gr_usermode->segv_device && gr_usermode->segv_inode) { ++ struct acl_subject_label *segvacl; ++ segvacl = ++ lookup_acl_subj_label(gr_usermode->segv_inode, ++ gr_usermode->segv_device, ++ current->role); ++ if (segvacl) { ++ segvacl->crashes = 0; ++ segvacl->expires = 0; ++ } ++ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) { ++ gr_remove_uid(gr_usermode->segv_uid); ++ } ++ } else { ++ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG); ++ error = -EPERM; ++ } ++ break; ++ case GR_SPROLE: ++ case GR_SPROLEPAM: ++ if (unlikely(!(gr_status & GR_READY))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) { ++ current->role->expires = 0; ++ current->role->auth_attempts = 0; ++ } ++ ++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && ++ time_after(current->role->expires, get_seconds())) { ++ error = -EBUSY; ++ goto out; ++ } ++ ++ if (lookup_special_role_auth ++ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum) ++ && ((!sprole_salt && !sprole_sum) ++ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) { ++ char *p = ""; ++ assign_special_role(gr_usermode->sp_role); ++ read_lock(&tasklist_lock); ++ if (current->real_parent) ++ p = current->real_parent->role->rolename; ++ read_unlock(&tasklist_lock); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG, ++ p, acl_sp_role_value); ++ } else { ++ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role); ++ error = -EPERM; ++ if(!(current->role->auth_attempts++)) ++ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; ++ ++ goto out; ++ } ++ break; ++ case GR_UNSPROLE: ++ if (unlikely(!(gr_status & GR_READY))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (current->role->roletype & GR_ROLE_SPECIAL) { ++ char *p = ""; ++ int i = 0; ++ ++ read_lock(&tasklist_lock); ++ if (current->real_parent) { ++ p = current->real_parent->role->rolename; ++ i = current->real_parent->acl_role_id; ++ } ++ read_unlock(&tasklist_lock); ++ ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i); ++ gr_set_acls(1); ++ } else { ++ error = -EPERM; ++ goto out; ++ } ++ break; ++ default: ++ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode); ++ error = -EINVAL; ++ break; ++ } ++ ++ if (error != -EPERM) ++ goto out; ++ ++ if(!(gr_auth_attempts++)) ++ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; ++ ++ out: ++ mutex_unlock(&gr_dev_mutex); ++ return error; ++} ++ ++/* must be called with ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++*/ ++int gr_apply_subject_to_task(struct task_struct *task) ++{ ++ struct acl_object_label *obj; ++ char *tmpname; ++ struct acl_subject_label *tmpsubj; ++ struct file *filp; ++ struct name_entry *nmatch; ++ ++ filp = task->exec_file; ++ if (filp == NULL) ++ return 0; ++ ++ /* the following is to apply the correct subject ++ on binaries running when the RBAC system ++ is enabled, when the binaries have been ++ replaced or deleted since their execution ++ ----- ++ when the RBAC system starts, the inode/dev ++ from exec_file will be one the RBAC system ++ is unaware of. It only knows the inode/dev ++ of the present file on disk, or the absence ++ of it. ++ */ ++ preempt_disable(); ++ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt); ++ ++ nmatch = lookup_name_entry(tmpname); ++ preempt_enable(); ++ tmpsubj = NULL; ++ if (nmatch) { ++ if (nmatch->deleted) ++ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role); ++ else ++ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role); ++ if (tmpsubj != NULL) ++ task->acl = tmpsubj; ++ } ++ if (tmpsubj == NULL) ++ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, ++ task->role); ++ if (task->acl) { ++ task->is_writable = 0; ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++ gr_set_proc_res(task); ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); ++#endif ++ } else { ++ return 1; ++ } ++ ++ return 0; ++} ++ ++int ++gr_set_acls(const int type) ++{ ++ struct task_struct *task, *task2; ++ struct acl_role_label *role = current->role; ++ __u16 acl_role_id = current->acl_role_id; ++ const struct cred *cred; ++ int ret; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ do_each_thread(task2, task) { ++ /* check to see if we're called from the exit handler, ++ if so, only replace ACLs that have inherited the admin ++ ACL */ ++ ++ if (type && (task->role != role || ++ task->acl_role_id != acl_role_id)) ++ continue; ++ ++ task->acl_role_id = 0; ++ task->acl_sp_role = 0; ++ ++ if (task->exec_file) { ++ cred = __task_cred(task); ++ task->role = lookup_acl_role_label(task, cred->uid, cred->gid); ++ ret = gr_apply_subject_to_task(task); ++ if (ret) { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid); ++ return ret; ++ } ++ } else { ++ // it's a kernel process ++ task->role = kernel_role; ++ task->acl = kernel_role->root_label; ++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN ++ task->acl->mode &= ~GR_PROCFIND; ++#endif ++ } ++ } while_each_thread(task2, task); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return 0; ++} ++ ++void ++gr_learn_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ struct acl_subject_label *acl; ++ const struct cred *cred; ++ ++ if (unlikely((gr_status & GR_READY) && ++ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) ++ goto skip_reslog; ++ ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ gr_log_resource(task, res, wanted, gt); ++#endif ++ skip_reslog: ++ ++ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS)) ++ return; ++ ++ acl = task->acl; ++ ++ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) || ++ !(acl->resmask & (1 << (unsigned short) res)))) ++ return; ++ ++ if (wanted >= acl->res[res].rlim_cur) { ++ unsigned long res_add; ++ ++ res_add = wanted; ++ switch (res) { ++ case RLIMIT_CPU: ++ res_add += GR_RLIM_CPU_BUMP; ++ break; ++ case RLIMIT_FSIZE: ++ res_add += GR_RLIM_FSIZE_BUMP; ++ break; ++ case RLIMIT_DATA: ++ res_add += GR_RLIM_DATA_BUMP; ++ break; ++ case RLIMIT_STACK: ++ res_add += GR_RLIM_STACK_BUMP; ++ break; ++ case RLIMIT_CORE: ++ res_add += GR_RLIM_CORE_BUMP; ++ break; ++ case RLIMIT_RSS: ++ res_add += GR_RLIM_RSS_BUMP; ++ break; ++ case RLIMIT_NPROC: ++ res_add += GR_RLIM_NPROC_BUMP; ++ break; ++ case RLIMIT_NOFILE: ++ res_add += GR_RLIM_NOFILE_BUMP; ++ break; ++ case RLIMIT_MEMLOCK: ++ res_add += GR_RLIM_MEMLOCK_BUMP; ++ break; ++ case RLIMIT_AS: ++ res_add += GR_RLIM_AS_BUMP; ++ break; ++ case RLIMIT_LOCKS: ++ res_add += GR_RLIM_LOCKS_BUMP; ++ break; ++ case RLIMIT_SIGPENDING: ++ res_add += GR_RLIM_SIGPENDING_BUMP; ++ break; ++ case RLIMIT_MSGQUEUE: ++ res_add += GR_RLIM_MSGQUEUE_BUMP; ++ break; ++ case RLIMIT_NICE: ++ res_add += GR_RLIM_NICE_BUMP; ++ break; ++ case RLIMIT_RTPRIO: ++ res_add += GR_RLIM_RTPRIO_BUMP; ++ break; ++ case RLIMIT_RTTIME: ++ res_add += GR_RLIM_RTTIME_BUMP; ++ break; ++ } ++ ++ acl->res[res].rlim_cur = res_add; ++ ++ if (wanted > acl->res[res].rlim_max) ++ acl->res[res].rlim_max = res_add; ++ ++ /* only log the subject filename, since resource logging is supported for ++ single-subject learning only */ ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, ++ task->role->roletype, cred->uid, cred->gid, acl->filename, ++ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max, ++ "", (unsigned long) res, &task->signal->saved_ip); ++ rcu_read_unlock(); ++ } ++ ++ return; ++} ++ ++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)) ++void ++pax_set_initial_flags(struct linux_binprm *bprm) ++{ ++ struct task_struct *task = current; ++ struct acl_subject_label *proc; ++ unsigned long flags; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ flags = pax_get_flags(task); ++ ++ proc = task->acl; ++ ++ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC) ++ flags &= ~MF_PAX_PAGEEXEC; ++ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC) ++ flags &= ~MF_PAX_SEGMEXEC; ++ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP) ++ flags &= ~MF_PAX_RANDMMAP; ++ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP) ++ flags &= ~MF_PAX_EMUTRAMP; ++ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT) ++ flags &= ~MF_PAX_MPROTECT; ++ ++ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC) ++ flags |= MF_PAX_PAGEEXEC; ++ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC) ++ flags |= MF_PAX_SEGMEXEC; ++ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP) ++ flags |= MF_PAX_RANDMMAP; ++ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP) ++ flags |= MF_PAX_EMUTRAMP; ++ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT) ++ flags |= MF_PAX_MPROTECT; ++ ++ pax_set_flags(task, flags); ++ ++ return; ++} ++#endif ++ ++#ifdef CONFIG_SYSCTL ++/* Eric Biederman likes breaking userland ABI and every inode-based security ++ system to save 35kb of memory */ ++ ++/* we modify the passed in filename, but adjust it back before returning */ ++static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len) ++{ ++ struct name_entry *nmatch; ++ char *p, *lastp = NULL; ++ struct acl_object_label *obj = NULL, *tmp; ++ struct acl_subject_label *tmpsubj; ++ char c = '\0'; ++ ++ read_lock(&gr_inode_lock); ++ ++ p = name + len - 1; ++ do { ++ nmatch = lookup_name_entry(name); ++ if (lastp != NULL) ++ *lastp = c; ++ ++ if (nmatch == NULL) ++ goto next_component; ++ tmpsubj = current->acl; ++ do { ++ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj); ++ if (obj != NULL) { ++ tmp = obj->globbed; ++ while (tmp) { ++ if (!glob_match(tmp->filename, name)) { ++ obj = tmp; ++ goto found_obj; ++ } ++ tmp = tmp->next; ++ } ++ goto found_obj; ++ } ++ } while ((tmpsubj = tmpsubj->parent_subject)); ++next_component: ++ /* end case */ ++ if (p == name) ++ break; ++ ++ while (*p != '/') ++ p--; ++ if (p == name) ++ lastp = p + 1; ++ else { ++ lastp = p; ++ p--; ++ } ++ c = *lastp; ++ *lastp = '\0'; ++ } while (1); ++found_obj: ++ read_unlock(&gr_inode_lock); ++ /* obj returned will always be non-null */ ++ return obj; ++} ++ ++/* returns 0 when allowing, non-zero on error ++ op of 0 is used for readdir, so we don't log the names of hidden files ++*/ ++__u32 ++gr_handle_sysctl(const struct ctl_table *table, const int op) ++{ ++ struct ctl_table *tmp; ++ const char *proc_sys = "/proc/sys"; ++ char *path; ++ struct acl_object_label *obj; ++ unsigned short len = 0, pos = 0, depth = 0, i; ++ __u32 err = 0; ++ __u32 mode = 0; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ /* for now, ignore operations on non-sysctl entries if it's not a ++ readdir*/ ++ if (table->child != NULL && op != 0) ++ return 0; ++ ++ mode |= GR_FIND; ++ /* it's only a read if it's an entry, read on dirs is for readdir */ ++ if (op & MAY_READ) ++ mode |= GR_READ; ++ if (op & MAY_WRITE) ++ mode |= GR_WRITE; ++ ++ preempt_disable(); ++ ++ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id()); ++ ++ /* it's only a read/write if it's an actual entry, not a dir ++ (which are opened for readdir) ++ */ ++ ++ /* convert the requested sysctl entry into a pathname */ ++ ++ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) { ++ len += strlen(tmp->procname); ++ len++; ++ depth++; ++ } ++ ++ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) { ++ /* deny */ ++ goto out; ++ } ++ ++ memset(path, 0, PAGE_SIZE); ++ ++ memcpy(path, proc_sys, strlen(proc_sys)); ++ ++ pos += strlen(proc_sys); ++ ++ for (; depth > 0; depth--) { ++ path[pos] = '/'; ++ pos++; ++ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) { ++ if (depth == i) { ++ memcpy(path + pos, tmp->procname, ++ strlen(tmp->procname)); ++ pos += strlen(tmp->procname); ++ } ++ i++; ++ } ++ } ++ ++ obj = gr_lookup_by_name(path, pos); ++ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS); ++ ++ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) && ++ ((err & mode) != mode))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ err = 0; ++ gr_log_learn_sysctl(path, new_mode); ++ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) { ++ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path); ++ err = -ENOENT; ++ } else if (!(err & GR_FIND)) { ++ err = -ENOENT; ++ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) { ++ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied", ++ path, (mode & GR_READ) ? " reading" : "", ++ (mode & GR_WRITE) ? " writing" : ""); ++ err = -EACCES; ++ } else if ((err & mode) != mode) { ++ err = -EACCES; ++ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) { ++ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful", ++ path, (mode & GR_READ) ? " reading" : "", ++ (mode & GR_WRITE) ? " writing" : ""); ++ err = 0; ++ } else ++ err = 0; ++ ++ out: ++ preempt_enable(); ++ ++ return err; ++} ++#endif ++ ++int ++gr_handle_proc_ptrace(struct task_struct *task) ++{ ++ struct file *filp; ++ struct task_struct *tmp = task; ++ struct task_struct *curtemp = current; ++ __u32 retmode; ++ ++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++#endif ++ ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ filp = task->exec_file; ++ ++ while (tmp->pid > 0) { ++ if (tmp == curtemp) ++ break; ++ tmp = tmp->real_parent; ++ } ++ ++ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) || ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return 1; ++ } ++ ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (!(gr_status & GR_READY)) { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return 0; ++ } ++#endif ++ ++ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ ++ if (retmode & GR_NOPTRACE) ++ return 1; ++ ++ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD) ++ && (current->acl != task->acl || (current->acl != current->role->root_label ++ && current->pid != task->pid))) ++ return 1; ++ ++ return 0; ++} ++ ++void task_grsec_rbac(struct seq_file *m, struct task_struct *p) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ if (!(current->role->roletype & GR_ROLE_GOD)) ++ return; ++ ++ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n", ++ p->role->rolename, gr_task_roletype_to_char(p), ++ p->acl->filename); ++} ++ ++int ++gr_handle_ptrace(struct task_struct *task, const long request) ++{ ++ struct task_struct *tmp = task; ++ struct task_struct *curtemp = current; ++ __u32 retmode; ++ ++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++#endif ++ ++ read_lock(&tasklist_lock); ++ while (tmp->pid > 0) { ++ if (tmp == curtemp) ++ break; ++ tmp = tmp->real_parent; ++ } ++ ++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) || ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) { ++ read_unlock(&tasklist_lock); ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ read_unlock(&tasklist_lock); ++ ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (!(gr_status & GR_READY)) ++ return 0; ++#endif ++ ++ read_lock(&grsec_exec_file_lock); ++ if (unlikely(!task->exec_file)) { ++ read_unlock(&grsec_exec_file_lock); ++ return 0; ++ } ++ ++ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt); ++ read_unlock(&grsec_exec_file_lock); ++ ++ if (retmode & GR_NOPTRACE) { ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ ++ if (retmode & GR_PTRACERD) { ++ switch (request) { ++ case PTRACE_SEIZE: ++ case PTRACE_POKETEXT: ++ case PTRACE_POKEDATA: ++ case PTRACE_POKEUSR: ++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) ++ case PTRACE_SETREGS: ++ case PTRACE_SETFPREGS: ++#endif ++#ifdef CONFIG_X86 ++ case PTRACE_SETFPXREGS: ++#endif ++#ifdef CONFIG_ALTIVEC ++ case PTRACE_SETVRREGS: ++#endif ++ return 1; ++ default: ++ return 0; ++ } ++ } else if (!(current->acl->mode & GR_POVERRIDE) && ++ !(current->role->roletype & GR_ROLE_GOD) && ++ (current->acl != task->acl)) { ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static int is_writable_mmap(const struct file *filp) ++{ ++ struct task_struct *task = current; ++ struct acl_object_label *obj, *obj2; ++ ++ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) && ++ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) { ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, ++ task->role->root_label); ++ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt); ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++int ++gr_acl_handle_mmap(const struct file *file, const unsigned long prot) ++{ ++ __u32 mode; ++ ++ if (unlikely(!file || !(prot & PROT_EXEC))) ++ return 1; ++ ++ if (is_writable_mmap(file)) ++ return 0; ++ ++ mode = ++ gr_search_file(file->f_path.dentry, ++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, ++ file->f_path.mnt); ++ ++ if (!gr_tpe_allow(file)) ++ return 0; ++ ++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_EXEC))) { ++ return 0; ++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 1; ++ } ++ ++ return 1; ++} ++ ++int ++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) ++{ ++ __u32 mode; ++ ++ if (unlikely(!file || !(prot & PROT_EXEC))) ++ return 1; ++ ++ if (is_writable_mmap(file)) ++ return 0; ++ ++ mode = ++ gr_search_file(file->f_path.dentry, ++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, ++ file->f_path.mnt); ++ ++ if (!gr_tpe_allow(file)) ++ return 0; ++ ++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_EXEC))) { ++ return 0; ++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 1; ++ } ++ ++ return 1; ++} ++ ++void ++gr_acl_handle_psacct(struct task_struct *task, const long code) ++{ ++ unsigned long runtime; ++ unsigned long cputime; ++ unsigned int wday, cday; ++ __u8 whr, chr; ++ __u8 wmin, cmin; ++ __u8 wsec, csec; ++ struct timespec timeval; ++ ++ if (unlikely(!(gr_status & GR_READY) || !task->acl || ++ !(task->acl->mode & GR_PROCACCT))) ++ return; ++ ++ do_posix_clock_monotonic_gettime(&timeval); ++ runtime = timeval.tv_sec - task->start_time.tv_sec; ++ wday = runtime / (3600 * 24); ++ runtime -= wday * (3600 * 24); ++ whr = runtime / 3600; ++ runtime -= whr * 3600; ++ wmin = runtime / 60; ++ runtime -= wmin * 60; ++ wsec = runtime; ++ ++ cputime = (task->utime + task->stime) / HZ; ++ cday = cputime / (3600 * 24); ++ cputime -= cday * (3600 * 24); ++ chr = cputime / 3600; ++ cputime -= chr * 3600; ++ cmin = cputime / 60; ++ cputime -= cmin * 60; ++ csec = cputime; ++ ++ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code); ++ ++ return; ++} ++ ++void gr_set_kernel_label(struct task_struct *task) ++{ ++ if (gr_status & GR_READY) { ++ task->role = kernel_role; ++ task->acl = kernel_role->root_label; ++ } ++ return; ++} ++ ++#ifdef CONFIG_TASKSTATS ++int gr_is_taskstats_denied(int pid) ++{ ++ struct task_struct *task; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *cred; ++#endif ++ int ret = 0; ++ ++ /* restrict taskstats viewing to un-chrooted root users ++ who have the 'view' subject flag if the RBAC system is enabled ++ */ ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ task = find_task_by_vpid(pid); ++ if (task) { ++#ifdef CONFIG_GRKERNSEC_CHROOT ++ if (proc_is_chrooted(task)) ++ ret = -EACCES; ++#endif ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ cred = __task_cred(task); ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ if (cred->uid != 0) ++ ret = -EACCES; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID)) ++ ret = -EACCES; ++#endif ++#endif ++ if (gr_status & GR_READY) { ++ if (!(task->acl->mode & GR_VIEW)) ++ ret = -EACCES; ++ } ++ } else ++ ret = -ENOENT; ++ ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return ret; ++} ++#endif ++ ++/* AUXV entries are filled via a descendant of search_binary_handler ++ after we've already applied the subject for the target ++*/ ++int gr_acl_enable_at_secure(void) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & GR_ATSECURE) ++ return 1; ++ ++ return 0; ++} ++ ++int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino) ++{ ++ struct task_struct *task = current; ++ struct dentry *dentry = file->f_path.dentry; ++ struct vfsmount *mnt = file->f_path.mnt; ++ struct acl_object_label *obj, *tmp; ++ struct acl_subject_label *subj; ++ unsigned int bufsize; ++ int is_not_root; ++ char *path; ++ dev_t dev = __get_dev(dentry); ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 1; ++ ++ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ return 1; ++ ++ /* ignore Eric Biederman */ ++ if (IS_PRIVATE(dentry->d_inode)) ++ return 1; ++ ++ subj = task->acl; ++ do { ++ obj = lookup_acl_obj_label(ino, dev, subj); ++ if (obj != NULL) ++ return (obj->mode & GR_FIND) ? 1 : 0; ++ } while ((subj = subj->parent_subject)); ++ ++ /* this is purely an optimization since we're looking for an object ++ for the directory we're doing a readdir on ++ if it's possible for any globbed object to match the entry we're ++ filling into the directory, then the object we find here will be ++ an anchor point with attached globbed objects ++ */ ++ obj = chk_obj_label_noglob(dentry, mnt, task->acl); ++ if (obj->globbed == NULL) ++ return (obj->mode & GR_FIND) ? 1 : 0; ++ ++ is_not_root = ((obj->filename[0] == '/') && ++ (obj->filename[1] == '\0')) ? 0 : 1; ++ bufsize = PAGE_SIZE - namelen - is_not_root; ++ ++ /* check bufsize > PAGE_SIZE || bufsize == 0 */ ++ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1))) ++ return 1; ++ ++ preempt_disable(); ++ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), ++ bufsize); ++ ++ bufsize = strlen(path); ++ ++ /* if base is "/", don't append an additional slash */ ++ if (is_not_root) ++ *(path + bufsize) = '/'; ++ memcpy(path + bufsize + is_not_root, name, namelen); ++ *(path + bufsize + namelen + is_not_root) = '\0'; ++ ++ tmp = obj->globbed; ++ while (tmp) { ++ if (!glob_match(tmp->filename, path)) { ++ preempt_enable(); ++ return (tmp->mode & GR_FIND) ? 1 : 0; ++ } ++ tmp = tmp->next; ++ } ++ preempt_enable(); ++ return (obj->mode & GR_FIND) ? 1 : 0; ++} ++ ++#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE ++EXPORT_SYMBOL(gr_acl_is_enabled); ++#endif ++EXPORT_SYMBOL(gr_learn_resource); ++EXPORT_SYMBOL(gr_set_kernel_label); ++#ifdef CONFIG_SECURITY ++EXPORT_SYMBOL(gr_check_user_change); ++EXPORT_SYMBOL(gr_check_group_change); ++#endif ++ +diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c +new file mode 100644 +index 0000000..34fefda +--- /dev/null ++++ b/grsecurity/gracl_alloc.c +@@ -0,0 +1,105 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++ ++static unsigned long alloc_stack_next = 1; ++static unsigned long alloc_stack_size = 1; ++static void **alloc_stack; ++ ++static __inline__ int ++alloc_pop(void) ++{ ++ if (alloc_stack_next == 1) ++ return 0; ++ ++ kfree(alloc_stack[alloc_stack_next - 2]); ++ ++ alloc_stack_next--; ++ ++ return 1; ++} ++ ++static __inline__ int ++alloc_push(void *buf) ++{ ++ if (alloc_stack_next >= alloc_stack_size) ++ return 1; ++ ++ alloc_stack[alloc_stack_next - 1] = buf; ++ ++ alloc_stack_next++; ++ ++ return 0; ++} ++ ++void * ++acl_alloc(unsigned long len) ++{ ++ void *ret = NULL; ++ ++ if (!len || len > PAGE_SIZE) ++ goto out; ++ ++ ret = kmalloc(len, GFP_KERNEL); ++ ++ if (ret) { ++ if (alloc_push(ret)) { ++ kfree(ret); ++ ret = NULL; ++ } ++ } ++ ++out: ++ return ret; ++} ++ ++void * ++acl_alloc_num(unsigned long num, unsigned long len) ++{ ++ if (!len || (num > (PAGE_SIZE / len))) ++ return NULL; ++ ++ return acl_alloc(num * len); ++} ++ ++void ++acl_free_all(void) ++{ ++ if (gr_acl_is_enabled() || !alloc_stack) ++ return; ++ ++ while (alloc_pop()) ; ++ ++ if (alloc_stack) { ++ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE) ++ kfree(alloc_stack); ++ else ++ vfree(alloc_stack); ++ } ++ ++ alloc_stack = NULL; ++ alloc_stack_size = 1; ++ alloc_stack_next = 1; ++ ++ return; ++} ++ ++int ++acl_alloc_stack_init(unsigned long size) ++{ ++ if ((size * sizeof (void *)) <= PAGE_SIZE) ++ alloc_stack = ++ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL); ++ else ++ alloc_stack = (void **) vmalloc(size * sizeof (void *)); ++ ++ alloc_stack_size = size; ++ ++ if (!alloc_stack) ++ return 0; ++ else ++ return 1; ++} +diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c +new file mode 100644 +index 0000000..955ddfb +--- /dev/null ++++ b/grsecurity/gracl_cap.c +@@ -0,0 +1,101 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++extern const char *captab_log[]; ++extern int captab_log_entries; ++ ++int ++gr_acl_is_capable(const int cap) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ struct acl_subject_label *curracl; ++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; ++ kernel_cap_t cap_audit = __cap_empty_set; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ curracl = task->acl; ++ ++ cap_drop = curracl->cap_lower; ++ cap_mask = curracl->cap_mask; ++ cap_audit = curracl->cap_invert_audit; ++ ++ while ((curracl = curracl->parent_subject)) { ++ /* if the cap isn't specified in the current computed mask but is specified in the ++ current level subject, and is lowered in the current level subject, then add ++ it to the set of dropped capabilities ++ otherwise, add the current level subject's mask to the current computed mask ++ */ ++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { ++ cap_raise(cap_mask, cap); ++ if (cap_raised(curracl->cap_lower, cap)) ++ cap_raise(cap_drop, cap); ++ if (cap_raised(curracl->cap_invert_audit, cap)) ++ cap_raise(cap_audit, cap); ++ } ++ } ++ ++ if (!cap_raised(cap_drop, cap)) { ++ if (cap_raised(cap_audit, cap)) ++ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]); ++ return 1; ++ } ++ ++ curracl = task->acl; ++ ++ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ && cap_raised(cred->cap_effective, cap)) { ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, ++ task->role->roletype, cred->uid, ++ cred->gid, task->exec_file ? ++ gr_to_filename(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : curracl->filename, ++ curracl->filename, 0UL, ++ 0UL, "", (unsigned long) cap, &task->signal->saved_ip); ++ return 1; ++ } ++ ++ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap)) ++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]); ++ return 0; ++} ++ ++int ++gr_acl_is_capable_nolog(const int cap) ++{ ++ struct acl_subject_label *curracl; ++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ curracl = current->acl; ++ ++ cap_drop = curracl->cap_lower; ++ cap_mask = curracl->cap_mask; ++ ++ while ((curracl = curracl->parent_subject)) { ++ /* if the cap isn't specified in the current computed mask but is specified in the ++ current level subject, and is lowered in the current level subject, then add ++ it to the set of dropped capabilities ++ otherwise, add the current level subject's mask to the current computed mask ++ */ ++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { ++ cap_raise(cap_mask, cap); ++ if (cap_raised(curracl->cap_lower, cap)) ++ cap_raise(cap_drop, cap); ++ } ++ } ++ ++ if (!cap_raised(cap_drop, cap)) ++ return 1; ++ ++ return 0; ++} ++ +diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c +new file mode 100644 +index 0000000..88d0e87 +--- /dev/null ++++ b/grsecurity/gracl_fs.c +@@ -0,0 +1,435 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/types.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/stat.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/gracl.h> ++ ++umode_t ++gr_acl_umask(void) ++{ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ return current->role->umask; ++} ++ ++__u32 ++gr_acl_handle_hidden_file(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ __u32 mode; ++ ++ if (unlikely(!dentry->d_inode)) ++ return GR_FIND; ++ ++ mode = ++ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt); ++ ++ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); ++ return mode; ++ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_FIND))) ++ return 0; ++ ++ return GR_FIND; ++} ++ ++__u32 ++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, ++ int acc_mode) ++{ ++ __u32 reqmode = GR_FIND; ++ __u32 mode; ++ ++ if (unlikely(!dentry->d_inode)) ++ return reqmode; ++ ++ if (acc_mode & MAY_APPEND) ++ reqmode |= GR_APPEND; ++ else if (acc_mode & MAY_WRITE) ++ reqmode |= GR_WRITE; ++ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode)) ++ reqmode |= GR_READ; ++ ++ mode = ++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, ++ mnt); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++__u32 ++gr_acl_handle_creat(const struct dentry * dentry, ++ const struct dentry * p_dentry, ++ const struct vfsmount * p_mnt, int open_flags, int acc_mode, ++ const int imode) ++{ ++ __u32 reqmode = GR_WRITE | GR_CREATE; ++ __u32 mode; ++ ++ if (acc_mode & MAY_APPEND) ++ reqmode |= GR_APPEND; ++ // if a directory was required or the directory already exists, then ++ // don't count this open as a read ++ if ((acc_mode & MAY_READ) && ++ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)))) ++ reqmode |= GR_READ; ++ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID))) ++ reqmode |= GR_SETID; ++ ++ mode = ++ gr_check_create(dentry, p_dentry, p_mnt, ++ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++__u32 ++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt, ++ const int fmode) ++{ ++ __u32 mode, reqmode = GR_FIND; ++ ++ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode)) ++ reqmode |= GR_EXEC; ++ if (fmode & S_IWOTH) ++ reqmode |= GR_WRITE; ++ if (fmode & S_IROTH) ++ reqmode |= GR_READ; ++ ++ mode = ++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, ++ mnt); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : "", ++ reqmode & GR_EXEC ? " executing" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : "", ++ reqmode & GR_EXEC ? " executing" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt) ++{ ++ __u32 mode; ++ ++ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); ++ ++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt); ++ return mode; ++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt); ++ return 0; ++ } else if (unlikely((mode & (reqmode)) != (reqmode))) ++ return 0; ++ ++ return (reqmode); ++} ++ ++__u32 ++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt, ++ umode_t *modeptr) ++{ ++ umode_t mode; ++ ++ *modeptr &= ~gr_acl_umask(); ++ mode = *modeptr; ++ ++ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode))) ++ return 1; ++ ++ if (unlikely(mode & (S_ISUID | S_ISGID))) { ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, ++ GR_CHMOD_ACL_MSG); ++ } else { ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG); ++ } ++} ++ ++__u32 ++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE, ++ GR_UNIXCONNECT_ACL_MSG); ++} ++ ++/* hardlinks require at minimum create and link permission, ++ any additional privilege required is based on the ++ privilege of the file being linked to ++*/ ++__u32 ++gr_acl_handle_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, ++ const struct vfsmount * old_mnt, const char *to) ++{ ++ __u32 mode; ++ __u32 needmode = GR_CREATE | GR_LINK; ++ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK; ++ ++ mode = ++ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry, ++ old_mnt); ++ ++ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) { ++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); ++ return mode; ++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); ++ return 0; ++ } else if (unlikely((mode & needmode) != needmode)) ++ return 0; ++ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_symlink(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, const char *from) ++{ ++ __u32 needmode = GR_WRITE | GR_CREATE; ++ __u32 mode; ++ ++ mode = ++ gr_check_create(new_dentry, parent_dentry, parent_mnt, ++ GR_CREATE | GR_AUDIT_CREATE | ++ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS); ++ ++ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) { ++ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); ++ return mode; ++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); ++ return 0; ++ } else if (unlikely((mode & needmode) != needmode)) ++ return 0; ++ ++ return (GR_WRITE | GR_CREATE); ++} ++ ++static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt) ++{ ++ __u32 mode; ++ ++ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); ++ ++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt); ++ return mode; ++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt); ++ return 0; ++ } else if (unlikely((mode & (reqmode)) != (reqmode))) ++ return 0; ++ ++ return (reqmode); ++} ++ ++__u32 ++gr_acl_handle_mknod(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const int mode) ++{ ++ __u32 reqmode = GR_WRITE | GR_CREATE; ++ if (unlikely(mode & (S_ISUID | S_ISGID))) ++ reqmode |= GR_SETID; ++ ++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, ++ reqmode, GR_MKNOD_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_mkdir(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt) ++{ ++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, ++ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG); ++} ++ ++#define RENAME_CHECK_SUCCESS(old, new) \ ++ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \ ++ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ))) ++ ++int ++gr_acl_handle_rename(struct dentry *new_dentry, ++ struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ struct dentry *old_dentry, ++ struct inode *old_parent_inode, ++ struct vfsmount *old_mnt, const char *newname) ++{ ++ __u32 comp1, comp2; ++ int error = 0; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ if (!new_dentry->d_inode) { ++ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt, ++ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ | ++ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS); ++ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE | ++ GR_DELETE | GR_AUDIT_DELETE | ++ GR_AUDIT_READ | GR_AUDIT_WRITE | ++ GR_SUPPRESS, old_mnt); ++ } else { ++ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE | ++ GR_CREATE | GR_DELETE | ++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | ++ GR_AUDIT_READ | GR_AUDIT_WRITE | ++ GR_SUPPRESS, parent_mnt); ++ comp2 = ++ gr_search_file(old_dentry, ++ GR_READ | GR_WRITE | GR_AUDIT_READ | ++ GR_DELETE | GR_AUDIT_DELETE | ++ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt); ++ } ++ ++ if (RENAME_CHECK_SUCCESS(comp1, comp2) && ++ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS))) ++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); ++ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS) ++ && !(comp2 & GR_SUPPRESS)) { ++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); ++ error = -EACCES; ++ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2))) ++ error = -EACCES; ++ ++ return error; ++} ++ ++void ++gr_acl_handle_exit(void) ++{ ++ u16 id; ++ char *rolename; ++ struct file *exec_file; ++ ++ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() && ++ !(current->role->roletype & GR_ROLE_PERSIST))) { ++ id = current->acl_role_id; ++ rolename = current->role->rolename; ++ gr_set_acls(1); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id); ++ } ++ ++ write_lock(&grsec_exec_file_lock); ++ exec_file = current->exec_file; ++ current->exec_file = NULL; ++ write_unlock(&grsec_exec_file_lock); ++ ++ if (exec_file) ++ fput(exec_file); ++} ++ ++int ++gr_acl_handle_procpidmem(const struct task_struct *task) ++{ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ if (task != current && task->acl->mode & GR_PROTPROCFD) ++ return -EACCES; ++ ++ return 0; ++} +diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c +new file mode 100644 +index 0000000..17050ca +--- /dev/null ++++ b/grsecurity/gracl_ip.c +@@ -0,0 +1,381 @@ ++#include <linux/kernel.h> ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <net/sock.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/skbuff.h> ++#include <linux/ip.h> ++#include <linux/udp.h> ++#include <linux/types.h> ++#include <linux/sched.h> ++#include <linux/netdevice.h> ++#include <linux/inetdevice.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++#define GR_BIND 0x01 ++#define GR_CONNECT 0x02 ++#define GR_INVERT 0x04 ++#define GR_BINDOVERRIDE 0x08 ++#define GR_CONNECTOVERRIDE 0x10 ++#define GR_SOCK_FAMILY 0x20 ++ ++static const char * gr_protocols[IPPROTO_MAX] = { ++ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt", ++ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet", ++ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1", ++ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp", ++ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++", ++ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre", ++ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile", ++ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63", ++ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv", ++ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", ++ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", ++ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp", ++ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim", ++ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip", ++ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp", ++ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup", ++ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135", ++ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143", ++ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151", ++ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159", ++ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167", ++ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175", ++ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183", ++ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191", ++ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199", ++ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207", ++ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215", ++ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223", ++ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231", ++ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239", ++ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247", ++ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255", ++ }; ++ ++static const char * gr_socktypes[SOCK_MAX] = { ++ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", ++ "unknown:7", "unknown:8", "unknown:9", "packet" ++ }; ++ ++static const char * gr_sockfamilies[AF_MAX+1] = { ++ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25", ++ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash", ++ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28", ++ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf" ++ }; ++ ++const char * ++gr_proto_to_name(unsigned char proto) ++{ ++ return gr_protocols[proto]; ++} ++ ++const char * ++gr_socktype_to_name(unsigned char type) ++{ ++ return gr_socktypes[type]; ++} ++ ++const char * ++gr_sockfamily_to_name(unsigned char family) ++{ ++ return gr_sockfamilies[family]; ++} ++ ++int ++gr_search_socket(const int domain, const int type, const int protocol) ++{ ++ struct acl_subject_label *curr; ++ const struct cred *cred = current_cred(); ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ goto exit; ++ ++ if ((domain < 0) || (type < 0) || (protocol < 0) || ++ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX)) ++ goto exit; // let the kernel handle it ++ ++ curr = current->acl; ++ ++ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) { ++ /* the family is allowed, if this is PF_INET allow it only if ++ the extra sock type/protocol checks pass */ ++ if (domain == PF_INET) ++ goto inet_check; ++ goto exit; ++ } else { ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, domain, 0, 0, GR_SOCK_FAMILY, ++ ¤t->signal->saved_ip); ++ goto exit; ++ } ++ goto exit_fail; ++ } ++ ++inet_check: ++ /* the rest of this checking is for IPv4 only */ ++ if (!curr->ips) ++ goto exit; ++ ++ if ((curr->ip_type & (1 << type)) && ++ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32)))) ++ goto exit; ++ ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ /* we don't place acls on raw sockets , and sometimes ++ dgram/ip sockets are opened for ioctl and not ++ bind/connect, so we'll fake a bind learn log */ ++ if (type == SOCK_RAW || type == SOCK_PACKET) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, 0, type, ++ protocol, GR_CONNECT, ¤t->signal->saved_ip); ++ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, 0, type, ++ protocol, GR_BIND, ¤t->signal->saved_ip); ++ } ++ /* we'll log when they use connect or bind */ ++ goto exit; ++ } ++ ++exit_fail: ++ if (domain == PF_INET) ++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain), ++ gr_socktype_to_name(type), gr_proto_to_name(protocol)); ++ else ++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain), ++ gr_socktype_to_name(type), protocol); ++ ++ return 0; ++exit: ++ return 1; ++} ++ ++int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask) ++{ ++ if ((ip->mode & mode) && ++ (ip_port >= ip->low) && ++ (ip_port <= ip->high) && ++ ((ntohl(ip_addr) & our_netmask) == ++ (ntohl(our_addr) & our_netmask)) ++ && (ip->proto[protocol / 32] & (1 << (protocol % 32))) ++ && (ip->type & (1 << type))) { ++ if (ip->mode & GR_INVERT) ++ return 2; // specifically denied ++ else ++ return 1; // allowed ++ } ++ ++ return 0; // not specifically allowed, may continue parsing ++} ++ ++static int ++gr_search_connectbind(const int full_mode, struct sock *sk, ++ struct sockaddr_in *addr, const int type) ++{ ++ char iface[IFNAMSIZ] = {0}; ++ struct acl_subject_label *curr; ++ struct acl_ip_label *ip; ++ struct inet_sock *isk; ++ struct net_device *dev; ++ struct in_device *idev; ++ unsigned long i; ++ int ret; ++ int mode = full_mode & (GR_BIND | GR_CONNECT); ++ __u32 ip_addr = 0; ++ __u32 our_addr; ++ __u32 our_netmask; ++ char *p; ++ __u16 ip_port = 0; ++ const struct cred *cred = current_cred(); ++ ++ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET)) ++ return 0; ++ ++ curr = current->acl; ++ isk = inet_sk(sk); ++ ++ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */ ++ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) ++ addr->sin_addr.s_addr = curr->inaddr_any_override; ++ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) { ++ struct sockaddr_in saddr; ++ int err; ++ ++ saddr.sin_family = AF_INET; ++ saddr.sin_addr.s_addr = curr->inaddr_any_override; ++ saddr.sin_port = isk->inet_sport; ++ ++ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); ++ if (err) ++ return err; ++ ++ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); ++ if (err) ++ return err; ++ } ++ ++ if (!curr->ips) ++ return 0; ++ ++ ip_addr = addr->sin_addr.s_addr; ++ ip_port = ntohs(addr->sin_port); ++ ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &ip_addr, ip_port, type, ++ sk->sk_protocol, mode, ¤t->signal->saved_ip); ++ return 0; ++ } ++ ++ for (i = 0; i < curr->ip_num; i++) { ++ ip = *(curr->ips + i); ++ if (ip->iface != NULL) { ++ strncpy(iface, ip->iface, IFNAMSIZ - 1); ++ p = strchr(iface, ':'); ++ if (p != NULL) ++ *p = '\0'; ++ dev = dev_get_by_name(sock_net(sk), iface); ++ if (dev == NULL) ++ continue; ++ idev = in_dev_get(dev); ++ if (idev == NULL) { ++ dev_put(dev); ++ continue; ++ } ++ rcu_read_lock(); ++ for_ifa(idev) { ++ if (!strcmp(ip->iface, ifa->ifa_label)) { ++ our_addr = ifa->ifa_address; ++ our_netmask = 0xffffffff; ++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); ++ if (ret == 1) { ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ return 0; ++ } else if (ret == 2) { ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ goto denied; ++ } ++ } ++ } endfor_ifa(idev); ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ } else { ++ our_addr = ip->addr; ++ our_netmask = ip->netmask; ++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); ++ if (ret == 1) ++ return 0; ++ else if (ret == 2) ++ goto denied; ++ } ++ } ++ ++denied: ++ if (mode == GR_BIND) ++ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); ++ else if (mode == GR_CONNECT) ++ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); ++ ++ return -EACCES; ++} ++ ++int ++gr_search_connect(struct socket *sock, struct sockaddr_in *addr) ++{ ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type); ++} ++ ++int ++gr_search_bind(struct socket *sock, struct sockaddr_in *addr) ++{ ++ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type); ++} ++ ++int gr_search_listen(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ struct sockaddr_in addr; ++ ++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr; ++ addr.sin_port = inet_sk(sk)->inet_sport; ++ ++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); ++} ++ ++int gr_search_accept(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ struct sockaddr_in addr; ++ ++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr; ++ addr.sin_port = inet_sk(sk)->inet_sport; ++ ++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); ++} ++ ++int ++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr) ++{ ++ if (addr) ++ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM); ++ else { ++ struct sockaddr_in sin; ++ const struct inet_sock *inet = inet_sk(sk); ++ ++ sin.sin_addr.s_addr = inet->inet_daddr; ++ sin.sin_port = inet->inet_dport; ++ ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); ++ } ++} ++ ++int ++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb) ++{ ++ struct sockaddr_in sin; ++ ++ if (unlikely(skb->len < sizeof (struct udphdr))) ++ return 0; // skip this packet ++ ++ sin.sin_addr.s_addr = ip_hdr(skb)->saddr; ++ sin.sin_port = udp_hdr(skb)->source; ++ ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); ++} +diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c +new file mode 100644 +index 0000000..25f54ef +--- /dev/null ++++ b/grsecurity/gracl_learn.c +@@ -0,0 +1,207 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/sched.h> ++#include <linux/poll.h> ++#include <linux/string.h> ++#include <linux/file.h> ++#include <linux/types.h> ++#include <linux/vmalloc.h> ++#include <linux/grinternal.h> ++ ++extern ssize_t write_grsec_handler(struct file * file, const char __user * buf, ++ size_t count, loff_t *ppos); ++extern int gr_acl_is_enabled(void); ++ ++static DECLARE_WAIT_QUEUE_HEAD(learn_wait); ++static int gr_learn_attached; ++ ++/* use a 512k buffer */ ++#define LEARN_BUFFER_SIZE (512 * 1024) ++ ++static DEFINE_SPINLOCK(gr_learn_lock); ++static DEFINE_MUTEX(gr_learn_user_mutex); ++ ++/* we need to maintain two buffers, so that the kernel context of grlearn ++ uses a semaphore around the userspace copying, and the other kernel contexts ++ use a spinlock when copying into the buffer, since they cannot sleep ++*/ ++static char *learn_buffer; ++static char *learn_buffer_user; ++static int learn_buffer_len; ++static int learn_buffer_user_len; ++ ++static ssize_t ++read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos) ++{ ++ DECLARE_WAITQUEUE(wait, current); ++ ssize_t retval = 0; ++ ++ add_wait_queue(&learn_wait, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ do { ++ mutex_lock(&gr_learn_user_mutex); ++ spin_lock(&gr_learn_lock); ++ if (learn_buffer_len) ++ break; ++ spin_unlock(&gr_learn_lock); ++ mutex_unlock(&gr_learn_user_mutex); ++ if (file->f_flags & O_NONBLOCK) { ++ retval = -EAGAIN; ++ goto out; ++ } ++ if (signal_pending(current)) { ++ retval = -ERESTARTSYS; ++ goto out; ++ } ++ ++ schedule(); ++ } while (1); ++ ++ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len); ++ learn_buffer_user_len = learn_buffer_len; ++ retval = learn_buffer_len; ++ learn_buffer_len = 0; ++ ++ spin_unlock(&gr_learn_lock); ++ ++ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len)) ++ retval = -EFAULT; ++ ++ mutex_unlock(&gr_learn_user_mutex); ++out: ++ set_current_state(TASK_RUNNING); ++ remove_wait_queue(&learn_wait, &wait); ++ return retval; ++} ++ ++static unsigned int ++poll_learn(struct file * file, poll_table * wait) ++{ ++ poll_wait(file, &learn_wait, wait); ++ ++ if (learn_buffer_len) ++ return (POLLIN | POLLRDNORM); ++ ++ return 0; ++} ++ ++void ++gr_clear_learn_entries(void) ++{ ++ char *tmp; ++ ++ mutex_lock(&gr_learn_user_mutex); ++ spin_lock(&gr_learn_lock); ++ tmp = learn_buffer; ++ learn_buffer = NULL; ++ spin_unlock(&gr_learn_lock); ++ if (tmp) ++ vfree(tmp); ++ if (learn_buffer_user != NULL) { ++ vfree(learn_buffer_user); ++ learn_buffer_user = NULL; ++ } ++ learn_buffer_len = 0; ++ mutex_unlock(&gr_learn_user_mutex); ++ ++ return; ++} ++ ++void ++gr_add_learn_entry(const char *fmt, ...) ++{ ++ va_list args; ++ unsigned int len; ++ ++ if (!gr_learn_attached) ++ return; ++ ++ spin_lock(&gr_learn_lock); ++ ++ /* leave a gap at the end so we know when it's "full" but don't have to ++ compute the exact length of the string we're trying to append ++ */ ++ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) { ++ spin_unlock(&gr_learn_lock); ++ wake_up_interruptible(&learn_wait); ++ return; ++ } ++ if (learn_buffer == NULL) { ++ spin_unlock(&gr_learn_lock); ++ return; ++ } ++ ++ va_start(args, fmt); ++ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args); ++ va_end(args); ++ ++ learn_buffer_len += len + 1; ++ ++ spin_unlock(&gr_learn_lock); ++ wake_up_interruptible(&learn_wait); ++ ++ return; ++} ++ ++static int ++open_learn(struct inode *inode, struct file *file) ++{ ++ if (file->f_mode & FMODE_READ && gr_learn_attached) ++ return -EBUSY; ++ if (file->f_mode & FMODE_READ) { ++ int retval = 0; ++ mutex_lock(&gr_learn_user_mutex); ++ if (learn_buffer == NULL) ++ learn_buffer = vmalloc(LEARN_BUFFER_SIZE); ++ if (learn_buffer_user == NULL) ++ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE); ++ if (learn_buffer == NULL) { ++ retval = -ENOMEM; ++ goto out_error; ++ } ++ if (learn_buffer_user == NULL) { ++ retval = -ENOMEM; ++ goto out_error; ++ } ++ learn_buffer_len = 0; ++ learn_buffer_user_len = 0; ++ gr_learn_attached = 1; ++out_error: ++ mutex_unlock(&gr_learn_user_mutex); ++ return retval; ++ } ++ return 0; ++} ++ ++static int ++close_learn(struct inode *inode, struct file *file) ++{ ++ if (file->f_mode & FMODE_READ) { ++ char *tmp = NULL; ++ mutex_lock(&gr_learn_user_mutex); ++ spin_lock(&gr_learn_lock); ++ tmp = learn_buffer; ++ learn_buffer = NULL; ++ spin_unlock(&gr_learn_lock); ++ if (tmp) ++ vfree(tmp); ++ if (learn_buffer_user != NULL) { ++ vfree(learn_buffer_user); ++ learn_buffer_user = NULL; ++ } ++ learn_buffer_len = 0; ++ learn_buffer_user_len = 0; ++ gr_learn_attached = 0; ++ mutex_unlock(&gr_learn_user_mutex); ++ } ++ ++ return 0; ++} ++ ++const struct file_operations grsec_fops = { ++ .read = read_learn, ++ .write = write_grsec_handler, ++ .open = open_learn, ++ .release = close_learn, ++ .poll = poll_learn, ++}; +diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c +new file mode 100644 +index 0000000..39645c9 +--- /dev/null ++++ b/grsecurity/gracl_res.c +@@ -0,0 +1,68 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/gracl.h> ++#include <linux/grinternal.h> ++ ++static const char *restab_log[] = { ++ [RLIMIT_CPU] = "RLIMIT_CPU", ++ [RLIMIT_FSIZE] = "RLIMIT_FSIZE", ++ [RLIMIT_DATA] = "RLIMIT_DATA", ++ [RLIMIT_STACK] = "RLIMIT_STACK", ++ [RLIMIT_CORE] = "RLIMIT_CORE", ++ [RLIMIT_RSS] = "RLIMIT_RSS", ++ [RLIMIT_NPROC] = "RLIMIT_NPROC", ++ [RLIMIT_NOFILE] = "RLIMIT_NOFILE", ++ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK", ++ [RLIMIT_AS] = "RLIMIT_AS", ++ [RLIMIT_LOCKS] = "RLIMIT_LOCKS", ++ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING", ++ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE", ++ [RLIMIT_NICE] = "RLIMIT_NICE", ++ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO", ++ [RLIMIT_RTTIME] = "RLIMIT_RTTIME", ++ [GR_CRASH_RES] = "RLIMIT_CRASH" ++}; ++ ++void ++gr_log_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ const struct cred *cred; ++ unsigned long rlim; ++ ++ if (!gr_acl_is_enabled() && !grsec_resource_logging) ++ return; ++ ++ // not yet supported resource ++ if (unlikely(!restab_log[res])) ++ return; ++ ++ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME) ++ rlim = task_rlimit_max(task, res); ++ else ++ rlim = task_rlimit(task, res); ++ ++ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim))) ++ return; ++ ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ ++ if (res == RLIMIT_NPROC && ++ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || ++ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE))) ++ goto out_rcu_unlock; ++ else if (res == RLIMIT_MEMLOCK && ++ cap_raised(cred->cap_effective, CAP_IPC_LOCK)) ++ goto out_rcu_unlock; ++ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE)) ++ goto out_rcu_unlock; ++ rcu_read_unlock(); ++ ++ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim); ++ ++ return; ++out_rcu_unlock: ++ rcu_read_unlock(); ++ return; ++} +diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c +new file mode 100644 +index 0000000..5556be3 +--- /dev/null ++++ b/grsecurity/gracl_segv.c +@@ -0,0 +1,299 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <asm/mman.h> ++#include <net/sock.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/slab.h> ++#include <linux/types.h> ++#include <linux/sched.h> ++#include <linux/timer.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++static struct crash_uid *uid_set; ++static unsigned short uid_used; ++static DEFINE_SPINLOCK(gr_uid_lock); ++extern rwlock_t gr_inode_lock; ++extern struct acl_subject_label * ++ lookup_acl_subj_label(const ino_t inode, const dev_t dev, ++ struct acl_role_label *role); ++ ++#ifdef CONFIG_BTRFS_FS ++extern dev_t get_btrfs_dev_from_inode(struct inode *inode); ++extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); ++#endif ++ ++static inline dev_t __get_dev(const struct dentry *dentry) ++{ ++#ifdef CONFIG_BTRFS_FS ++ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr) ++ return get_btrfs_dev_from_inode(dentry->d_inode); ++ else ++#endif ++ return dentry->d_inode->i_sb->s_dev; ++} ++ ++int ++gr_init_uidset(void) ++{ ++ uid_set = ++ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL); ++ uid_used = 0; ++ ++ return uid_set ? 1 : 0; ++} ++ ++void ++gr_free_uidset(void) ++{ ++ if (uid_set) ++ kfree(uid_set); ++ ++ return; ++} ++ ++int ++gr_find_uid(const uid_t uid) ++{ ++ struct crash_uid *tmp = uid_set; ++ uid_t buid; ++ int low = 0, high = uid_used - 1, mid; ++ ++ while (high >= low) { ++ mid = (low + high) >> 1; ++ buid = tmp[mid].uid; ++ if (buid == uid) ++ return mid; ++ if (buid > uid) ++ high = mid - 1; ++ if (buid < uid) ++ low = mid + 1; ++ } ++ ++ return -1; ++} ++ ++static __inline__ void ++gr_insertsort(void) ++{ ++ unsigned short i, j; ++ struct crash_uid index; ++ ++ for (i = 1; i < uid_used; i++) { ++ index = uid_set[i]; ++ j = i; ++ while ((j > 0) && uid_set[j - 1].uid > index.uid) { ++ uid_set[j] = uid_set[j - 1]; ++ j--; ++ } ++ uid_set[j] = index; ++ } ++ ++ return; ++} ++ ++static __inline__ void ++gr_insert_uid(const uid_t uid, const unsigned long expires) ++{ ++ int loc; ++ ++ if (uid_used == GR_UIDTABLE_MAX) ++ return; ++ ++ loc = gr_find_uid(uid); ++ ++ if (loc >= 0) { ++ uid_set[loc].expires = expires; ++ return; ++ } ++ ++ uid_set[uid_used].uid = uid; ++ uid_set[uid_used].expires = expires; ++ uid_used++; ++ ++ gr_insertsort(); ++ ++ return; ++} ++ ++void ++gr_remove_uid(const unsigned short loc) ++{ ++ unsigned short i; ++ ++ for (i = loc + 1; i < uid_used; i++) ++ uid_set[i - 1] = uid_set[i]; ++ ++ uid_used--; ++ ++ return; ++} ++ ++int ++gr_check_crash_uid(const uid_t uid) ++{ ++ int loc; ++ int ret = 0; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ spin_lock(&gr_uid_lock); ++ loc = gr_find_uid(uid); ++ ++ if (loc < 0) ++ goto out_unlock; ++ ++ if (time_before_eq(uid_set[loc].expires, get_seconds())) ++ gr_remove_uid(loc); ++ else ++ ret = 1; ++ ++out_unlock: ++ spin_unlock(&gr_uid_lock); ++ return ret; ++} ++ ++static __inline__ int ++proc_is_setxid(const struct cred *cred) ++{ ++ if (cred->uid != cred->euid || cred->uid != cred->suid || ++ cred->uid != cred->fsuid) ++ return 1; ++ if (cred->gid != cred->egid || cred->gid != cred->sgid || ++ cred->gid != cred->fsgid) ++ return 1; ++ ++ return 0; ++} ++ ++extern int gr_fake_force_sig(int sig, struct task_struct *t); ++ ++void ++gr_handle_crash(struct task_struct *task, const int sig) ++{ ++ struct acl_subject_label *curr; ++ struct task_struct *tsk, *tsk2; ++ const struct cred *cred; ++ const struct cred *cred2; ++ ++ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL) ++ return; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return; ++ ++ curr = task->acl; ++ ++ if (!(curr->resmask & (1 << GR_CRASH_RES))) ++ return; ++ ++ if (time_before_eq(curr->expires, get_seconds())) { ++ curr->expires = 0; ++ curr->crashes = 0; ++ } ++ ++ curr->crashes++; ++ ++ if (!curr->expires) ++ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max; ++ ++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && ++ time_after(curr->expires, get_seconds())) { ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ if (cred->uid && proc_is_setxid(cred)) { ++ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); ++ spin_lock(&gr_uid_lock); ++ gr_insert_uid(cred->uid, curr->expires); ++ spin_unlock(&gr_uid_lock); ++ curr->expires = 0; ++ curr->crashes = 0; ++ read_lock(&tasklist_lock); ++ do_each_thread(tsk2, tsk) { ++ cred2 = __task_cred(tsk); ++ if (tsk != task && cred2->uid == cred->uid) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&tasklist_lock); ++ } else { ++ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ do_each_thread(tsk2, tsk) { ++ if (likely(tsk != task)) { ++ // if this thread has the same subject as the one that triggered ++ // RES_CRASH and it's the same binary, kill it ++ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ } ++ rcu_read_unlock(); ++ } ++ ++ return; ++} ++ ++int ++gr_check_crash_exec(const struct file *filp) ++{ ++ struct acl_subject_label *curr; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ read_lock(&gr_inode_lock); ++ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino, ++ __get_dev(filp->f_path.dentry), ++ current->role); ++ read_unlock(&gr_inode_lock); ++ ++ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) || ++ (!curr->crashes && !curr->expires)) ++ return 0; ++ ++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && ++ time_after(curr->expires, get_seconds())) ++ return 1; ++ else if (time_before_eq(curr->expires, get_seconds())) { ++ curr->crashes = 0; ++ curr->expires = 0; ++ } ++ ++ return 0; ++} ++ ++void ++gr_handle_alertkill(struct task_struct *task) ++{ ++ struct acl_subject_label *curracl; ++ __u32 curr_ip; ++ struct task_struct *p, *p2; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return; ++ ++ curracl = task->acl; ++ curr_ip = task->signal->curr_ip; ++ ++ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) { ++ read_lock(&tasklist_lock); ++ do_each_thread(p2, p) { ++ if (p->signal->curr_ip == curr_ip) ++ gr_fake_force_sig(SIGKILL, p); ++ } while_each_thread(p2, p); ++ read_unlock(&tasklist_lock); ++ } else if (curracl->mode & GR_KILLPROC) ++ gr_fake_force_sig(SIGKILL, task); ++ ++ return; ++} +diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c +new file mode 100644 +index 0000000..9d83a69 +--- /dev/null ++++ b/grsecurity/gracl_shm.c +@@ -0,0 +1,40 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/ipc.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const uid_t cuid, const int shmid) ++{ ++ struct task_struct *task; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ ++ task = find_task_by_vpid(shm_cprid); ++ ++ if (unlikely(!task)) ++ task = find_task_by_vpid(shm_lapid); ++ ++ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) || ++ (task->pid == shm_lapid)) && ++ (task->acl->mode & GR_PROTSHM) && ++ (task->acl != current->acl))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid); ++ return 0; ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return 1; ++} +diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c +new file mode 100644 +index 0000000..bc0be01 +--- /dev/null ++++ b/grsecurity/grsec_chdir.c +@@ -0,0 +1,19 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void ++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ if ((grsec_enable_chdir && grsec_enable_group && ++ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir && ++ !grsec_enable_group)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt); ++ } ++#endif ++ return; ++} +diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c +new file mode 100644 +index 0000000..a2dc675 +--- /dev/null ++++ b/grsecurity/grsec_chroot.c +@@ -0,0 +1,351 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/mount.h> ++#include <linux/types.h> ++#include <linux/pid_namespace.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void gr_set_chroot_entries(struct task_struct *task, struct path *path) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry && ++ path->dentry != task->nsproxy->mnt_ns->root->mnt_root) ++ task->gr_is_chrooted = 1; ++ else ++ task->gr_is_chrooted = 0; ++ ++ task->gr_chroot_dentry = path->dentry; ++#endif ++ return; ++} ++ ++void gr_clear_chroot_entries(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC ++ task->gr_is_chrooted = 0; ++ task->gr_chroot_dentry = NULL; ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_unix(const pid_t pid) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ struct task_struct *p; ++ ++ if (unlikely(!grsec_enable_chroot_unix)) ++ return 1; ++ ++ if (likely(!proc_is_chrooted(current))) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ p = find_task_by_vpid_unrestricted(pid); ++ if (unlikely(p && !have_same_root(current, p))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG); ++ return 0; ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++#endif ++ return 1; ++} ++ ++int ++gr_handle_chroot_nice(void) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ if (grsec_enable_chroot_nice && (niceval < task_nice(p)) ++ && proc_is_chrooted(current)) { ++ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_rawio(const struct inode *inode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (grsec_enable_chroot_caps && proc_is_chrooted(current) && ++ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO)) ++ return 1; ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_fowner(struct pid *pid, enum pid_type type) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ struct task_struct *p; ++ int ret = 0; ++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid) ++ return ret; ++ ++ read_lock(&tasklist_lock); ++ do_each_pid_task(pid, type, p) { ++ if (!have_same_root(current, p)) { ++ ret = 1; ++ goto out; ++ } ++ } while_each_pid_task(pid, type, p); ++out: ++ read_unlock(&tasklist_lock); ++ return ret; ++#endif ++ return 0; ++} ++ ++int ++gr_pid_is_chrooted(struct task_struct *p) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL) ++ return 0; ++ ++ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) || ++ !have_same_root(current, p)) { ++ return 1; ++ } ++#endif ++ return 0; ++} ++ ++EXPORT_SYMBOL(gr_pid_is_chrooted); ++ ++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR) ++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt) ++{ ++ struct path path, currentroot; ++ int ret = 0; ++ ++ path.dentry = (struct dentry *)u_dentry; ++ path.mnt = (struct vfsmount *)u_mnt; ++ get_fs_root(current->fs, ¤troot); ++ if (path_is_under(&path, ¤troot)) ++ ret = 1; ++ path_put(¤troot); ++ ++ return ret; ++} ++#endif ++ ++int ++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ if (!grsec_enable_chroot_fchdir) ++ return 1; ++ ++ if (!proc_is_chrooted(current)) ++ return 1; ++ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt); ++ return 0; ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ struct task_struct *p; ++ time_t starttime; ++ ++ if (unlikely(!grsec_enable_chroot_shmat)) ++ return 1; ++ ++ if (likely(!proc_is_chrooted(current))) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ ++ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) { ++ starttime = p->start_time.tv_sec; ++ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) { ++ if (have_same_root(current, p)) { ++ goto allow; ++ } else { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); ++ return 0; ++ } ++ } ++ /* creator exited, pid reuse, fall through to next check */ ++ } ++ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) { ++ if (unlikely(!have_same_root(current, p))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); ++ return 0; ++ } ++ } ++ ++allow: ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++#endif ++ return 1; ++} ++ ++void ++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ if (grsec_enable_chroot_execlog && proc_is_chrooted(current)) ++ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt); ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_mknod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && ++ proc_is_chrooted(current)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_mount(const struct dentry *dentry, ++ const struct vfsmount *mnt, const char *dev_name) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) { ++ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_pivot(void) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ if (grsec_enable_chroot_double && proc_is_chrooted(current) && ++ !gr_is_outside_chroot(dentry, mnt)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++extern const char *captab_log[]; ++extern int captab_log_entries; ++ ++int ++gr_chroot_is_capable(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) { ++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS; ++ if (cap_raised(chroot_caps, cap)) { ++ const struct cred *creds = current_cred(); ++ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) { ++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]); ++ } ++ return 0; ++ } ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_chroot_is_capable_nolog(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) { ++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS; ++ if (cap_raised(chroot_caps, cap)) { ++ return 0; ++ } ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_handle_chroot_sysctl(const int op) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) && ++ proc_is_chrooted(current)) ++ return -EACCES; ++#endif ++ return 0; ++} ++ ++void ++gr_handle_chroot_chdir(struct path *path) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ if (grsec_enable_chroot_chdir) ++ set_fs_pwd(current->fs, path); ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ /* allow chmod +s on directories, but not files */ ++ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) && ++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) && ++ proc_is_chrooted(current)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c +new file mode 100644 +index 0000000..213ad8b +--- /dev/null ++++ b/grsecurity/grsec_disabled.c +@@ -0,0 +1,437 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/kdev_t.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/ip.h> ++#include <linux/skbuff.h> ++#include <linux/sysctl.h> ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++void ++pax_set_initial_flags(struct linux_binprm *bprm) ++{ ++ return; ++} ++#endif ++ ++#ifdef CONFIG_SYSCTL ++__u32 ++gr_handle_sysctl(const struct ctl_table * table, const int op) ++{ ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_TASKSTATS ++int gr_is_taskstats_denied(int pid) ++{ ++ return 0; ++} ++#endif ++ ++int ++gr_acl_is_enabled(void) ++{ ++ return 0; ++} ++ ++void ++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) ++{ ++ return; ++} ++ ++int ++gr_handle_rawio(const struct inode *inode) ++{ ++ return 0; ++} ++ ++void ++gr_acl_handle_psacct(struct task_struct *task, const long code) ++{ ++ return; ++} ++ ++int ++gr_handle_ptrace(struct task_struct *task, const long request) ++{ ++ return 0; ++} ++ ++int ++gr_handle_proc_ptrace(struct task_struct *task) ++{ ++ return 0; ++} ++ ++void ++gr_learn_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ return; ++} ++ ++int ++gr_set_acls(const int type) ++{ ++ return 0; ++} ++ ++int ++gr_check_hidden_task(const struct task_struct *tsk) ++{ ++ return 0; ++} ++ ++int ++gr_check_protected_task(const struct task_struct *task) ++{ ++ return 0; ++} ++ ++int ++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) ++{ ++ return 0; ++} ++ ++void ++gr_copy_label(struct task_struct *tsk) ++{ ++ return; ++} ++ ++void ++gr_set_pax_flags(struct task_struct *task) ++{ ++ return; ++} ++ ++int ++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, ++ const int unsafe_share) ++{ ++ return 0; ++} ++ ++void ++gr_handle_delete(const ino_t ino, const dev_t dev) ++{ ++ return; ++} ++ ++void ++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return; ++} ++ ++void ++gr_handle_crash(struct task_struct *task, const int sig) ++{ ++ return; ++} ++ ++int ++gr_check_crash_exec(const struct file *filp) ++{ ++ return 0; ++} ++ ++int ++gr_check_crash_uid(const uid_t uid) ++{ ++ return 0; ++} ++ ++void ++gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace) ++{ ++ return; ++} ++ ++int ++gr_search_socket(const int family, const int type, const int protocol) ++{ ++ return 1; ++} ++ ++int ++gr_search_connectbind(const int mode, const struct socket *sock, ++ const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++void ++gr_handle_alertkill(struct task_struct *task) ++{ ++ return; ++} ++ ++__u32 ++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_hidden_file(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, ++ int acc_mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++int ++gr_acl_handle_mmap(const struct file *file, const unsigned long prot, ++ unsigned int *vm_flags) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_truncate(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_access(const struct dentry * dentry, ++ const struct vfsmount * mnt, const int fmode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt, ++ umode_t *mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++void ++grsecurity_init(void) ++{ ++ return; ++} ++ ++umode_t gr_acl_umask(void) ++{ ++ return 0; ++} ++ ++__u32 ++gr_acl_handle_mknod(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const int mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_mkdir(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_symlink(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, const char *from) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, ++ const struct vfsmount * old_mnt, const char *to) ++{ ++ return 1; ++} ++ ++int ++gr_acl_handle_rename(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct inode *old_parent_inode, ++ const struct vfsmount *old_mnt, const char *newname) ++{ ++ return 0; ++} ++ ++int ++gr_acl_handle_filldir(const struct file *file, const char *name, ++ const int namelen, const ino_t ino) ++{ ++ return 1; ++} ++ ++int ++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const uid_t cuid, const int shmid) ++{ ++ return 1; ++} ++ ++int ++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++int ++gr_search_accept(const struct socket *sock) ++{ ++ return 0; ++} ++ ++int ++gr_search_listen(const struct socket *sock) ++{ ++ return 0; ++} ++ ++int ++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++__u32 ++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_creat(const struct dentry * dentry, ++ const struct dentry * p_dentry, ++ const struct vfsmount * p_mnt, int open_flags, int acc_mode, ++ const int imode) ++{ ++ return 1; ++} ++ ++void ++gr_acl_handle_exit(void) ++{ ++ return; ++} ++ ++int ++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) ++{ ++ return 1; ++} ++ ++void ++gr_set_role_label(const uid_t uid, const gid_t gid) ++{ ++ return; ++} ++ ++int ++gr_acl_handle_procpidmem(const struct task_struct *task) ++{ ++ return 0; ++} ++ ++int ++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb) ++{ ++ return 0; ++} ++ ++int ++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++void ++gr_set_kernel_label(struct task_struct *task) ++{ ++ return; ++} ++ ++int ++gr_check_user_change(int real, int effective, int fs) ++{ ++ return 0; ++} ++ ++int ++gr_check_group_change(int real, int effective, int fs) ++{ ++ return 0; ++} ++ ++int gr_acl_enable_at_secure(void) ++{ ++ return 0; ++} ++ ++dev_t gr_get_dev_from_dentry(struct dentry *dentry) ++{ ++ return dentry->d_inode->i_sb->s_dev; ++} ++ ++EXPORT_SYMBOL(gr_learn_resource); ++EXPORT_SYMBOL(gr_set_kernel_label); ++#ifdef CONFIG_SECURITY ++EXPORT_SYMBOL(gr_check_user_change); ++EXPORT_SYMBOL(gr_check_group_change); ++#endif +diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c +new file mode 100644 +index 0000000..2b05ada +--- /dev/null ++++ b/grsecurity/grsec_exec.c +@@ -0,0 +1,146 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/binfmts.h> ++#include <linux/fs.h> ++#include <linux/types.h> ++#include <linux/grdefs.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/capability.h> ++#include <linux/module.h> ++ ++#include <asm/uaccess.h> ++ ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++static char gr_exec_arg_buf[132]; ++static DEFINE_MUTEX(gr_exec_arg_mutex); ++#endif ++ ++extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr); ++ ++void ++gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv) ++{ ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ char *grarg = gr_exec_arg_buf; ++ unsigned int i, x, execlen = 0; ++ char c; ++ ++ if (!((grsec_enable_execlog && grsec_enable_group && ++ in_group_p(grsec_audit_gid)) ++ || (grsec_enable_execlog && !grsec_enable_group))) ++ return; ++ ++ mutex_lock(&gr_exec_arg_mutex); ++ memset(grarg, 0, sizeof(gr_exec_arg_buf)); ++ ++ for (i = 0; i < bprm->argc && execlen < 128; i++) { ++ const char __user *p; ++ unsigned int len; ++ ++ p = get_user_arg_ptr(argv, i); ++ if (IS_ERR(p)) ++ goto log; ++ ++ len = strnlen_user(p, 128 - execlen); ++ if (len > 128 - execlen) ++ len = 128 - execlen; ++ else if (len > 0) ++ len--; ++ if (copy_from_user(grarg + execlen, p, len)) ++ goto log; ++ ++ /* rewrite unprintable characters */ ++ for (x = 0; x < len; x++) { ++ c = *(grarg + execlen + x); ++ if (c < 32 || c > 126) ++ *(grarg + execlen + x) = ' '; ++ } ++ ++ execlen += len; ++ *(grarg + execlen) = ' '; ++ *(grarg + execlen + 1) = '\0'; ++ execlen++; ++ } ++ ++ log: ++ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry, ++ bprm->file->f_path.mnt, grarg); ++ mutex_unlock(&gr_exec_arg_mutex); ++#endif ++ return; ++} ++ ++#ifdef CONFIG_GRKERNSEC ++extern int gr_acl_is_capable(const int cap); ++extern int gr_acl_is_capable_nolog(const int cap); ++extern int gr_chroot_is_capable(const int cap); ++extern int gr_chroot_is_capable_nolog(const int cap); ++#endif ++ ++const char *captab_log[] = { ++ "CAP_CHOWN", ++ "CAP_DAC_OVERRIDE", ++ "CAP_DAC_READ_SEARCH", ++ "CAP_FOWNER", ++ "CAP_FSETID", ++ "CAP_KILL", ++ "CAP_SETGID", ++ "CAP_SETUID", ++ "CAP_SETPCAP", ++ "CAP_LINUX_IMMUTABLE", ++ "CAP_NET_BIND_SERVICE", ++ "CAP_NET_BROADCAST", ++ "CAP_NET_ADMIN", ++ "CAP_NET_RAW", ++ "CAP_IPC_LOCK", ++ "CAP_IPC_OWNER", ++ "CAP_SYS_MODULE", ++ "CAP_SYS_RAWIO", ++ "CAP_SYS_CHROOT", ++ "CAP_SYS_PTRACE", ++ "CAP_SYS_PACCT", ++ "CAP_SYS_ADMIN", ++ "CAP_SYS_BOOT", ++ "CAP_SYS_NICE", ++ "CAP_SYS_RESOURCE", ++ "CAP_SYS_TIME", ++ "CAP_SYS_TTY_CONFIG", ++ "CAP_MKNOD", ++ "CAP_LEASE", ++ "CAP_AUDIT_WRITE", ++ "CAP_AUDIT_CONTROL", ++ "CAP_SETFCAP", ++ "CAP_MAC_OVERRIDE", ++ "CAP_MAC_ADMIN", ++ "CAP_SYSLOG", ++ "CAP_WAKE_ALARM" ++}; ++ ++int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]); ++ ++int gr_is_capable(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap)) ++ return 1; ++ return 0; ++#else ++ return 1; ++#endif ++} ++ ++int gr_is_capable_nolog(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap)) ++ return 1; ++ return 0; ++#else ++ return 1; ++#endif ++} ++ ++EXPORT_SYMBOL(gr_is_capable); ++EXPORT_SYMBOL(gr_is_capable_nolog); +diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c +new file mode 100644 +index 0000000..d3ee748 +--- /dev/null ++++ b/grsecurity/grsec_fifo.c +@@ -0,0 +1,24 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt, ++ const struct dentry *dir, const int flag, const int acc_mode) ++{ ++#ifdef CONFIG_GRKERNSEC_FIFO ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) && ++ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) && ++ (dentry->d_inode->i_uid != dir->d_inode->i_uid) && ++ (cred->fsuid != dentry->d_inode->i_uid)) { ++ if (!inode_permission(dentry->d_inode, acc_mode)) ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c +new file mode 100644 +index 0000000..8ca18bf +--- /dev/null ++++ b/grsecurity/grsec_fork.c +@@ -0,0 +1,23 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/errno.h> ++ ++void ++gr_log_forkfail(const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) { ++ switch (retval) { ++ case -EAGAIN: ++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN"); ++ break; ++ case -ENOMEM: ++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM"); ++ break; ++ } ++ } ++#endif ++ return; ++} +diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c +new file mode 100644 +index 0000000..01ddde4 +--- /dev/null ++++ b/grsecurity/grsec_init.c +@@ -0,0 +1,277 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/gracl.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/percpu.h> ++#include <linux/module.h> ++ ++int grsec_enable_ptrace_readexec; ++int grsec_enable_setxid; ++int grsec_enable_brute; ++int grsec_enable_link; ++int grsec_enable_dmesg; ++int grsec_enable_harden_ptrace; ++int grsec_enable_fifo; ++int grsec_enable_execlog; ++int grsec_enable_signal; ++int grsec_enable_forkfail; ++int grsec_enable_audit_ptrace; ++int grsec_enable_time; ++int grsec_enable_audit_textrel; ++int grsec_enable_group; ++int grsec_audit_gid; ++int grsec_enable_chdir; ++int grsec_enable_mount; ++int grsec_enable_rofs; ++int grsec_enable_chroot_findtask; ++int grsec_enable_chroot_mount; ++int grsec_enable_chroot_shmat; ++int grsec_enable_chroot_fchdir; ++int grsec_enable_chroot_double; ++int grsec_enable_chroot_pivot; ++int grsec_enable_chroot_chdir; ++int grsec_enable_chroot_chmod; ++int grsec_enable_chroot_mknod; ++int grsec_enable_chroot_nice; ++int grsec_enable_chroot_execlog; ++int grsec_enable_chroot_caps; ++int grsec_enable_chroot_sysctl; ++int grsec_enable_chroot_unix; ++int grsec_enable_tpe; ++int grsec_tpe_gid; ++int grsec_enable_blackhole; ++#ifdef CONFIG_IPV6_MODULE ++EXPORT_SYMBOL(grsec_enable_blackhole); ++#endif ++int grsec_lastack_retries; ++int grsec_enable_tpe_all; ++int grsec_enable_tpe_invert; ++int grsec_enable_socket_all; ++int grsec_socket_all_gid; ++int grsec_enable_socket_client; ++int grsec_socket_client_gid; ++int grsec_enable_socket_server; ++int grsec_socket_server_gid; ++int grsec_resource_logging; ++int grsec_disable_privio; ++int grsec_enable_log_rwxmaps; ++int grsec_lock; ++ ++DEFINE_SPINLOCK(grsec_alert_lock); ++unsigned long grsec_alert_wtime = 0; ++unsigned long grsec_alert_fyet = 0; ++ ++DEFINE_SPINLOCK(grsec_audit_lock); ++ ++DEFINE_RWLOCK(grsec_exec_file_lock); ++ ++char *gr_shared_page[4]; ++ ++char *gr_alert_log_fmt; ++char *gr_audit_log_fmt; ++char *gr_alert_log_buf; ++char *gr_audit_log_buf; ++ ++extern struct gr_arg *gr_usermode; ++extern unsigned char *gr_system_salt; ++extern unsigned char *gr_system_sum; ++ ++void __init ++grsecurity_init(void) ++{ ++ int j; ++ /* create the per-cpu shared pages */ ++ ++#ifdef CONFIG_X86 ++ memset((char *)(0x41a + PAGE_OFFSET), 0, 36); ++#endif ++ ++ for (j = 0; j < 4; j++) { ++ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long)); ++ if (gr_shared_page[j] == NULL) { ++ panic("Unable to allocate grsecurity shared page"); ++ return; ++ } ++ } ++ ++ /* allocate log buffers */ ++ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL); ++ if (!gr_alert_log_fmt) { ++ panic("Unable to allocate grsecurity alert log format buffer"); ++ return; ++ } ++ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL); ++ if (!gr_audit_log_fmt) { ++ panic("Unable to allocate grsecurity audit log format buffer"); ++ return; ++ } ++ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL); ++ if (!gr_alert_log_buf) { ++ panic("Unable to allocate grsecurity alert log buffer"); ++ return; ++ } ++ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL); ++ if (!gr_audit_log_buf) { ++ panic("Unable to allocate grsecurity audit log buffer"); ++ return; ++ } ++ ++ /* allocate memory for authentication structure */ ++ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL); ++ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL); ++ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL); ++ ++ if (!gr_usermode || !gr_system_salt || !gr_system_sum) { ++ panic("Unable to allocate grsecurity authentication structure"); ++ return; ++ } ++ ++ ++#ifdef CONFIG_GRKERNSEC_IO ++#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO) ++ grsec_disable_privio = 1; ++#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON) ++ grsec_disable_privio = 1; ++#else ++ grsec_disable_privio = 0; ++#endif ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ /* for backward compatibility, tpe_invert always defaults to on if ++ enabled in the kernel ++ */ ++ grsec_enable_tpe_invert = 1; ++#endif ++ ++#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON) ++#ifndef CONFIG_GRKERNSEC_SYSCTL ++ grsec_lock = 1; ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL ++ grsec_enable_audit_textrel = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ grsec_enable_log_rwxmaps = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP ++ grsec_enable_group = 1; ++ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID; ++#endif ++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC ++ grsec_enable_ptrace_readexec = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ grsec_enable_chdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ grsec_enable_harden_ptrace = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ grsec_enable_mount = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_LINK ++ grsec_enable_link = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ grsec_enable_brute = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_DMESG ++ grsec_enable_dmesg = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ grsec_enable_blackhole = 1; ++ grsec_lastack_retries = 4; ++#endif ++#ifdef CONFIG_GRKERNSEC_FIFO ++ grsec_enable_fifo = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ grsec_enable_execlog = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++ grsec_enable_setxid = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ grsec_enable_signal = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ grsec_enable_forkfail = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_TIME ++ grsec_enable_time = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ grsec_resource_logging = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ grsec_enable_chroot_findtask = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ grsec_enable_chroot_unix = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ grsec_enable_chroot_mount = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ grsec_enable_chroot_fchdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ grsec_enable_chroot_shmat = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ grsec_enable_audit_ptrace = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ grsec_enable_chroot_double = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ grsec_enable_chroot_pivot = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ grsec_enable_chroot_chdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ grsec_enable_chroot_chmod = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ grsec_enable_chroot_mknod = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ grsec_enable_chroot_nice = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ grsec_enable_chroot_execlog = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ grsec_enable_chroot_caps = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ grsec_enable_chroot_sysctl = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE ++ grsec_enable_tpe = 1; ++ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID; ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ grsec_enable_tpe_all = 1; ++#endif ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ grsec_enable_socket_all = 1; ++ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID; ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ grsec_enable_socket_client = 1; ++ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID; ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ grsec_enable_socket_server = 1; ++ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID; ++#endif ++#endif ++ ++ return; ++} +diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c +new file mode 100644 +index 0000000..3efe141 +--- /dev/null ++++ b/grsecurity/grsec_link.c +@@ -0,0 +1,43 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_follow_link(const struct inode *parent, ++ const struct inode *inode, ++ const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_LINK ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_link && S_ISLNK(inode->i_mode) && ++ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) && ++ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) { ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_hardlink(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ struct inode *inode, const int mode, const char *to) ++{ ++#ifdef CONFIG_GRKERNSEC_LINK ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_link && cred->fsuid != inode->i_uid && ++ (!S_ISREG(mode) || (mode & S_ISUID) || ++ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) || ++ (inode_permission(inode, MAY_READ | MAY_WRITE))) && ++ !capable(CAP_FOWNER) && cred->uid) { ++ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c +new file mode 100644 +index 0000000..a45d2e9 +--- /dev/null ++++ b/grsecurity/grsec_log.c +@@ -0,0 +1,322 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/tty.h> ++#include <linux/fs.h> ++#include <linux/grinternal.h> ++ ++#ifdef CONFIG_TREE_PREEMPT_RCU ++#define DISABLE_PREEMPT() preempt_disable() ++#define ENABLE_PREEMPT() preempt_enable() ++#else ++#define DISABLE_PREEMPT() ++#define ENABLE_PREEMPT() ++#endif ++ ++#define BEGIN_LOCKS(x) \ ++ DISABLE_PREEMPT(); \ ++ rcu_read_lock(); \ ++ read_lock(&tasklist_lock); \ ++ read_lock(&grsec_exec_file_lock); \ ++ if (x != GR_DO_AUDIT) \ ++ spin_lock(&grsec_alert_lock); \ ++ else \ ++ spin_lock(&grsec_audit_lock) ++ ++#define END_LOCKS(x) \ ++ if (x != GR_DO_AUDIT) \ ++ spin_unlock(&grsec_alert_lock); \ ++ else \ ++ spin_unlock(&grsec_audit_lock); \ ++ read_unlock(&grsec_exec_file_lock); \ ++ read_unlock(&tasklist_lock); \ ++ rcu_read_unlock(); \ ++ ENABLE_PREEMPT(); \ ++ if (x == GR_DONT_AUDIT) \ ++ gr_handle_alertkill(current) ++ ++enum { ++ FLOODING, ++ NO_FLOODING ++}; ++ ++extern char *gr_alert_log_fmt; ++extern char *gr_audit_log_fmt; ++extern char *gr_alert_log_buf; ++extern char *gr_audit_log_buf; ++ ++static int gr_log_start(int audit) ++{ ++ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT; ++ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt; ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0) ++ unsigned long curr_secs = get_seconds(); ++ ++ if (audit == GR_DO_AUDIT) ++ goto set_fmt; ++ ++ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) { ++ grsec_alert_wtime = curr_secs; ++ grsec_alert_fyet = 0; ++ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME) ++ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { ++ grsec_alert_fyet++; ++ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { ++ grsec_alert_wtime = curr_secs; ++ grsec_alert_fyet++; ++ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); ++ return FLOODING; ++ } ++ else return FLOODING; ++ ++set_fmt: ++#endif ++ memset(buf, 0, PAGE_SIZE); ++ if (current->signal->curr_ip && gr_acl_is_enabled()) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename); ++ } else if (current->signal->curr_ip) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip); ++ } else if (gr_acl_is_enabled()) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename); ++ } else { ++ sprintf(fmt, "%s%s", loglevel, "grsec: "); ++ strcpy(buf, fmt); ++ } ++ ++ return NO_FLOODING; ++} ++ ++static void gr_log_middle(int audit, const char *msg, va_list ap) ++ __attribute__ ((format (printf, 2, 0))); ++ ++static void gr_log_middle(int audit, const char *msg, va_list ap) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ unsigned int len = strlen(buf); ++ ++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); ++ ++ return; ++} ++ ++static void gr_log_middle_varargs(int audit, const char *msg, ...) ++ __attribute__ ((format (printf, 2, 3))); ++ ++static void gr_log_middle_varargs(int audit, const char *msg, ...) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ unsigned int len = strlen(buf); ++ va_list ap; ++ ++ va_start(ap, msg); ++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); ++ va_end(ap); ++ ++ return; ++} ++ ++static void gr_log_end(int audit, int append_default) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ ++ if (append_default) { ++ unsigned int len = strlen(buf); ++ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent))); ++ } ++ ++ printk("%s\n", buf); ++ ++ return; ++} ++ ++void gr_log_varargs(int audit, const char *msg, int argtypes, ...) ++{ ++ int logtype; ++ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied"; ++ char *str1 = NULL, *str2 = NULL, *str3 = NULL; ++ void *voidptr = NULL; ++ int num1 = 0, num2 = 0; ++ unsigned long ulong1 = 0, ulong2 = 0; ++ struct dentry *dentry = NULL; ++ struct vfsmount *mnt = NULL; ++ struct file *file = NULL; ++ struct task_struct *task = NULL; ++ const struct cred *cred, *pcred; ++ va_list ap; ++ ++ BEGIN_LOCKS(audit); ++ logtype = gr_log_start(audit); ++ if (logtype == FLOODING) { ++ END_LOCKS(audit); ++ return; ++ } ++ va_start(ap, argtypes); ++ switch (argtypes) { ++ case GR_TTYSNIFF: ++ task = va_arg(ap, struct task_struct *); ++ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid); ++ break; ++ case GR_SYSCTL_HIDDEN: ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, str1); ++ break; ++ case GR_RBAC: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_RBAC_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1); ++ break; ++ case GR_STR_RBAC: ++ str1 = va_arg(ap, char *); ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_RBAC_MODE2: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ str2 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2); ++ break; ++ case GR_RBAC_MODE3: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ str2 = va_arg(ap, char *); ++ str3 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3); ++ break; ++ case GR_FILENAME: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_STR_FILENAME: ++ str1 = va_arg(ap, char *); ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_FILENAME_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1); ++ break; ++ case GR_FILENAME_TWO_INT: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ num1 = va_arg(ap, int); ++ num2 = va_arg(ap, int); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2); ++ break; ++ case GR_FILENAME_TWO_INT_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ num1 = va_arg(ap, int); ++ num2 = va_arg(ap, int); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1); ++ break; ++ case GR_TEXTREL: ++ file = va_arg(ap, struct file *); ++ ulong1 = va_arg(ap, unsigned long); ++ ulong2 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2); ++ break; ++ case GR_PTRACE: ++ task = va_arg(ap, struct task_struct *); ++ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid); ++ break; ++ case GR_RESOURCE: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ulong1 = va_arg(ap, unsigned long); ++ str1 = va_arg(ap, char *); ++ ulong2 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ break; ++ case GR_CAP: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ break; ++ case GR_SIG: ++ str1 = va_arg(ap, char *); ++ voidptr = va_arg(ap, void *); ++ gr_log_middle_varargs(audit, msg, str1, voidptr); ++ break; ++ case GR_SIG2: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ num1 = va_arg(ap, int); ++ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ break; ++ case GR_CRASH1: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ulong1 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1); ++ break; ++ case GR_CRASH2: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ulong1 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1); ++ break; ++ case GR_RWXMAP: ++ file = va_arg(ap, struct file *); ++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>"); ++ break; ++ case GR_PSACCT: ++ { ++ unsigned int wday, cday; ++ __u8 whr, chr; ++ __u8 wmin, cmin; ++ __u8 wsec, csec; ++ char cur_tty[64] = { 0 }; ++ char parent_tty[64] = { 0 }; ++ ++ task = va_arg(ap, struct task_struct *); ++ wday = va_arg(ap, unsigned int); ++ cday = va_arg(ap, unsigned int); ++ whr = va_arg(ap, int); ++ chr = va_arg(ap, int); ++ wmin = va_arg(ap, int); ++ cmin = va_arg(ap, int); ++ wsec = va_arg(ap, int); ++ csec = va_arg(ap, int); ++ ulong1 = va_arg(ap, unsigned long); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ } ++ break; ++ default: ++ gr_log_middle(audit, msg, ap); ++ } ++ va_end(ap); ++ // these don't need DEFAULTSECARGS printed on the end ++ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2) ++ gr_log_end(audit, 0); ++ else ++ gr_log_end(audit, 1); ++ END_LOCKS(audit); ++} +diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c +new file mode 100644 +index 0000000..f536303 +--- /dev/null ++++ b/grsecurity/grsec_mem.c +@@ -0,0 +1,40 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/mman.h> ++#include <linux/grinternal.h> ++ ++void ++gr_handle_ioperm(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG); ++ return; ++} ++ ++void ++gr_handle_iopl(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG); ++ return; ++} ++ ++void ++gr_handle_mem_readwrite(u64 from, u64 to) ++{ ++ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to); ++ return; ++} ++ ++void ++gr_handle_vm86(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG); ++ return; ++} ++ ++void ++gr_log_badprocpid(const char *entry) ++{ ++ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry); ++ return; ++} +diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c +new file mode 100644 +index 0000000..2131422 +--- /dev/null ++++ b/grsecurity/grsec_mount.c +@@ -0,0 +1,62 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mount.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void ++gr_log_remount(const char *devname, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none"); ++#endif ++ return; ++} ++ ++void ++gr_log_unmount(const char *devname, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none"); ++#endif ++ return; ++} ++ ++void ++gr_log_mount(const char *from, const char *to, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to); ++#endif ++ return; ++} ++ ++int ++gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags) ++{ ++#ifdef CONFIG_GRKERNSEC_ROFS ++ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt); ++ return -EPERM; ++ } else ++ return 0; ++#endif ++ return 0; ++} ++ ++int ++gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode) ++{ ++#ifdef CONFIG_GRKERNSEC_ROFS ++ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) && ++ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt); ++ return -EPERM; ++ } else ++ return 0; ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c +new file mode 100644 +index 0000000..a3b12a0 +--- /dev/null ++++ b/grsecurity/grsec_pax.c +@@ -0,0 +1,36 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++#include <linux/grsecurity.h> ++ ++void ++gr_log_textrel(struct vm_area_struct * vma) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL ++ if (grsec_enable_audit_textrel) ++ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff); ++#endif ++ return; ++} ++ ++void ++gr_log_rwxmmap(struct file *file) ++{ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ if (grsec_enable_log_rwxmaps) ++ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file); ++#endif ++ return; ++} ++ ++void ++gr_log_rwxmprotect(struct file *file) ++{ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ if (grsec_enable_log_rwxmaps) ++ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file); ++#endif ++ return; ++} +diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c +new file mode 100644 +index 0000000..f7f29aa +--- /dev/null ++++ b/grsecurity/grsec_ptrace.c +@@ -0,0 +1,30 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grinternal.h> ++#include <linux/security.h> ++ ++void ++gr_audit_ptrace(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ if (grsec_enable_audit_ptrace) ++ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task); ++#endif ++ return; ++} ++ ++int ++gr_ptrace_readexec(struct file *file, int unsafe_flags) ++{ ++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC ++ const struct dentry *dentry = file->f_path.dentry; ++ const struct vfsmount *mnt = file->f_path.mnt; ++ ++ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) && ++ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c +new file mode 100644 +index 0000000..7a5b2de +--- /dev/null ++++ b/grsecurity/grsec_sig.c +@@ -0,0 +1,207 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/delay.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/hardirq.h> ++ ++char *signames[] = { ++ [SIGSEGV] = "Segmentation fault", ++ [SIGILL] = "Illegal instruction", ++ [SIGABRT] = "Abort", ++ [SIGBUS] = "Invalid alignment/Bus error" ++}; ++ ++void ++gr_log_signal(const int sig, const void *addr, const struct task_struct *t) ++{ ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) || ++ (sig == SIGABRT) || (sig == SIGBUS))) { ++ if (t->pid == current->pid) { ++ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr); ++ } else { ++ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig); ++ } ++ } ++#endif ++ return; ++} ++ ++int ++gr_handle_signal(const struct task_struct *p, const int sig) ++{ ++#ifdef CONFIG_GRKERNSEC ++ /* ignore the 0 signal for protected task checks */ ++ if (current->pid > 1 && sig && gr_check_protected_task(p)) { ++ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig); ++ return -EPERM; ++ } else if (gr_pid_is_chrooted((struct task_struct *)p)) { ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++#ifdef CONFIG_GRKERNSEC ++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t); ++ ++int gr_fake_force_sig(int sig, struct task_struct *t) ++{ ++ unsigned long int flags; ++ int ret, blocked, ignored; ++ struct k_sigaction *action; ++ ++ spin_lock_irqsave(&t->sighand->siglock, flags); ++ action = &t->sighand->action[sig-1]; ++ ignored = action->sa.sa_handler == SIG_IGN; ++ blocked = sigismember(&t->blocked, sig); ++ if (blocked || ignored) { ++ action->sa.sa_handler = SIG_DFL; ++ if (blocked) { ++ sigdelset(&t->blocked, sig); ++ recalc_sigpending_and_wake(t); ++ } ++ } ++ if (action->sa.sa_handler == SIG_DFL) ++ t->signal->flags &= ~SIGNAL_UNKILLABLE; ++ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t); ++ ++ spin_unlock_irqrestore(&t->sighand->siglock, flags); ++ ++ return ret; ++} ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++#define GR_USER_BAN_TIME (15 * 60) ++ ++static int __get_dumpable(unsigned long mm_flags) ++{ ++ int ret; ++ ++ ret = mm_flags & MMF_DUMPABLE_MASK; ++ return (ret >= 2) ? 2 : ret; ++} ++#endif ++ ++void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags) ++{ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ uid_t uid = 0; ++ ++ if (!grsec_enable_brute) ++ return; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ if (p->real_parent && p->real_parent->exec_file == p->exec_file) ++ p->real_parent->brute = 1; ++ else { ++ const struct cred *cred = __task_cred(p), *cred2; ++ struct task_struct *tsk, *tsk2; ++ ++ if (!__get_dumpable(mm_flags) && cred->uid) { ++ struct user_struct *user; ++ ++ uid = cred->uid; ++ ++ /* this is put upon execution past expiration */ ++ user = find_user(uid); ++ if (user == NULL) ++ goto unlock; ++ user->banned = 1; ++ user->ban_expires = get_seconds() + GR_USER_BAN_TIME; ++ if (user->ban_expires == ~0UL) ++ user->ban_expires--; ++ ++ do_each_thread(tsk2, tsk) { ++ cred2 = __task_cred(tsk); ++ if (tsk != p && cred2->uid == uid) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } while_each_thread(tsk2, tsk); ++ } ++ } ++unlock: ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ if (uid) ++ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60); ++ ++#endif ++ return; ++} ++ ++void gr_handle_brute_check(void) ++{ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ if (current->brute) ++ msleep(30 * 1000); ++#endif ++ return; ++} ++ ++void gr_handle_kernel_exploit(void) ++{ ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ const struct cred *cred; ++ struct task_struct *tsk, *tsk2; ++ struct user_struct *user; ++ uid_t uid; ++ ++ if (in_irq() || in_serving_softirq() || in_nmi()) ++ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context"); ++ ++ uid = current_uid(); ++ ++ if (uid == 0) ++ panic("grsec: halting the system due to suspicious kernel crash caused by root"); ++ else { ++ /* kill all the processes of this user, hold a reference ++ to their creds struct, and prevent them from creating ++ another process until system reset ++ */ ++ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid); ++ /* we intentionally leak this ref */ ++ user = get_uid(current->cred->user); ++ if (user) { ++ user->banned = 1; ++ user->ban_expires = ~0UL; ++ } ++ ++ read_lock(&tasklist_lock); ++ do_each_thread(tsk2, tsk) { ++ cred = __task_cred(tsk); ++ if (cred->uid == uid) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&tasklist_lock); ++ } ++#endif ++} ++ ++int __gr_process_user_ban(struct user_struct *user) ++{ ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++ if (unlikely(user->banned)) { ++ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) { ++ user->banned = 0; ++ user->ban_expires = 0; ++ free_uid(user); ++ } else ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int gr_process_user_ban(void) ++{ ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++ return __gr_process_user_ban(current->cred->user); ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c +new file mode 100644 +index 0000000..4030d57 +--- /dev/null ++++ b/grsecurity/grsec_sock.c +@@ -0,0 +1,244 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/ip.h> ++#include <net/sock.h> ++#include <net/inet_sock.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/gracl.h> ++ ++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb); ++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr); ++ ++EXPORT_SYMBOL(gr_search_udp_recvmsg); ++EXPORT_SYMBOL(gr_search_udp_sendmsg); ++ ++#ifdef CONFIG_UNIX_MODULE ++EXPORT_SYMBOL(gr_acl_handle_unix); ++EXPORT_SYMBOL(gr_acl_handle_mknod); ++EXPORT_SYMBOL(gr_handle_chroot_unix); ++EXPORT_SYMBOL(gr_handle_create); ++#endif ++ ++#ifdef CONFIG_GRKERNSEC ++#define gr_conn_table_size 32749 ++struct conn_table_entry { ++ struct conn_table_entry *next; ++ struct signal_struct *sig; ++}; ++ ++struct conn_table_entry *gr_conn_table[gr_conn_table_size]; ++DEFINE_SPINLOCK(gr_conn_table_lock); ++ ++extern const char * gr_socktype_to_name(unsigned char type); ++extern const char * gr_proto_to_name(unsigned char proto); ++extern const char * gr_sockfamily_to_name(unsigned char family); ++ ++static __inline__ int ++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size) ++{ ++ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size); ++} ++ ++static __inline__ int ++conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, ++ __u16 sport, __u16 dport) ++{ ++ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr && ++ sig->gr_sport == sport && sig->gr_dport == dport)) ++ return 1; ++ else ++ return 0; ++} ++ ++static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent) ++{ ++ struct conn_table_entry **match; ++ unsigned int index; ++ ++ index = conn_hash(sig->gr_saddr, sig->gr_daddr, ++ sig->gr_sport, sig->gr_dport, ++ gr_conn_table_size); ++ ++ newent->sig = sig; ++ ++ match = &gr_conn_table[index]; ++ newent->next = *match; ++ *match = newent; ++ ++ return; ++} ++ ++static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig) ++{ ++ struct conn_table_entry *match, *last = NULL; ++ unsigned int index; ++ ++ index = conn_hash(sig->gr_saddr, sig->gr_daddr, ++ sig->gr_sport, sig->gr_dport, ++ gr_conn_table_size); ++ ++ match = gr_conn_table[index]; ++ while (match && !conn_match(match->sig, ++ sig->gr_saddr, sig->gr_daddr, sig->gr_sport, ++ sig->gr_dport)) { ++ last = match; ++ match = match->next; ++ } ++ ++ if (match) { ++ if (last) ++ last->next = match->next; ++ else ++ gr_conn_table[index] = NULL; ++ kfree(match); ++ } ++ ++ return; ++} ++ ++static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr, ++ __u16 sport, __u16 dport) ++{ ++ struct conn_table_entry *match; ++ unsigned int index; ++ ++ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size); ++ ++ match = gr_conn_table[index]; ++ while (match && !conn_match(match->sig, saddr, daddr, sport, dport)) ++ match = match->next; ++ ++ if (match) ++ return match->sig; ++ else ++ return NULL; ++} ++ ++#endif ++ ++void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct signal_struct *sig = task->signal; ++ struct conn_table_entry *newent; ++ ++ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC); ++ if (newent == NULL) ++ return; ++ /* no bh lock needed since we are called with bh disabled */ ++ spin_lock(&gr_conn_table_lock); ++ gr_del_task_from_ip_table_nolock(sig); ++ sig->gr_saddr = inet->inet_rcv_saddr; ++ sig->gr_daddr = inet->inet_daddr; ++ sig->gr_sport = inet->inet_sport; ++ sig->gr_dport = inet->inet_dport; ++ gr_add_to_task_ip_table_nolock(sig, newent); ++ spin_unlock(&gr_conn_table_lock); ++#endif ++ return; ++} ++ ++void gr_del_task_from_ip_table(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC ++ spin_lock_bh(&gr_conn_table_lock); ++ gr_del_task_from_ip_table_nolock(task->signal); ++ spin_unlock_bh(&gr_conn_table_lock); ++#endif ++ return; ++} ++ ++void ++gr_attach_curr_ip(const struct sock *sk) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct signal_struct *p, *set; ++ const struct inet_sock *inet = inet_sk(sk); ++ ++ if (unlikely(sk->sk_protocol != IPPROTO_TCP)) ++ return; ++ ++ set = current->signal; ++ ++ spin_lock_bh(&gr_conn_table_lock); ++ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr, ++ inet->inet_dport, inet->inet_sport); ++ if (unlikely(p != NULL)) { ++ set->curr_ip = p->curr_ip; ++ set->used_accept = 1; ++ gr_del_task_from_ip_table_nolock(p); ++ spin_unlock_bh(&gr_conn_table_lock); ++ return; ++ } ++ spin_unlock_bh(&gr_conn_table_lock); ++ ++ set->curr_ip = inet->inet_daddr; ++ set->used_accept = 1; ++#endif ++ return; ++} ++ ++int ++gr_handle_sock_all(const int family, const int type, const int protocol) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) && ++ (family != AF_UNIX)) { ++ if (family == AF_INET) ++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol)); ++ else ++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_server(const struct sockaddr *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ if (grsec_enable_socket_server && ++ in_group_p(grsec_socket_server_gid) && ++ sck && (sck->sa_family != AF_UNIX) && ++ (sck->sa_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_server_other(const struct sock *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ if (grsec_enable_socket_server && ++ in_group_p(grsec_socket_server_gid) && ++ sck && (sck->sk_family != AF_UNIX) && ++ (sck->sk_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_client(const struct sockaddr *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) && ++ sck && (sck->sa_family != AF_UNIX) && ++ (sck->sa_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c +new file mode 100644 +index 0000000..a1aedd7 +--- /dev/null ++++ b/grsecurity/grsec_sysctl.c +@@ -0,0 +1,451 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/sysctl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op) ++{ ++#ifdef CONFIG_GRKERNSEC_SYSCTL ++ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) { ++ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++#ifdef CONFIG_GRKERNSEC_ROFS ++static int __maybe_unused one = 1; ++#endif ++ ++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) ++struct ctl_table grsecurity_table[] = { ++#ifdef CONFIG_GRKERNSEC_SYSCTL ++#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO ++#ifdef CONFIG_GRKERNSEC_IO ++ { ++ .procname = "disable_priv_io", ++ .data = &grsec_disable_privio, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#endif ++#ifdef CONFIG_GRKERNSEC_LINK ++ { ++ .procname = "linking_restrictions", ++ .data = &grsec_enable_link, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ { ++ .procname = "deter_bruteforce", ++ .data = &grsec_enable_brute, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_FIFO ++ { ++ .procname = "fifo_restrictions", ++ .data = &grsec_enable_fifo, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC ++ { ++ .procname = "ptrace_readexec", ++ .data = &grsec_enable_ptrace_readexec, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++ { ++ .procname = "consistent_setxid", ++ .data = &grsec_enable_setxid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ { ++ .procname = "ip_blackhole", ++ .data = &grsec_enable_blackhole, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "lastack_retries", ++ .data = &grsec_lastack_retries, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ { ++ .procname = "exec_logging", ++ .data = &grsec_enable_execlog, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ { ++ .procname = "rwxmap_logging", ++ .data = &grsec_enable_log_rwxmaps, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ { ++ .procname = "signal_logging", ++ .data = &grsec_enable_signal, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ { ++ .procname = "forkfail_logging", ++ .data = &grsec_enable_forkfail, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TIME ++ { ++ .procname = "timechange_logging", ++ .data = &grsec_enable_time, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ { ++ .procname = "chroot_deny_shmat", ++ .data = &grsec_enable_chroot_shmat, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ { ++ .procname = "chroot_deny_unix", ++ .data = &grsec_enable_chroot_unix, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ { ++ .procname = "chroot_deny_mount", ++ .data = &grsec_enable_chroot_mount, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ { ++ .procname = "chroot_deny_fchdir", ++ .data = &grsec_enable_chroot_fchdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ { ++ .procname = "chroot_deny_chroot", ++ .data = &grsec_enable_chroot_double, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ { ++ .procname = "chroot_deny_pivot", ++ .data = &grsec_enable_chroot_pivot, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ { ++ .procname = "chroot_enforce_chdir", ++ .data = &grsec_enable_chroot_chdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ { ++ .procname = "chroot_deny_chmod", ++ .data = &grsec_enable_chroot_chmod, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ { ++ .procname = "chroot_deny_mknod", ++ .data = &grsec_enable_chroot_mknod, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ { ++ .procname = "chroot_restrict_nice", ++ .data = &grsec_enable_chroot_nice, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ { ++ .procname = "chroot_execlog", ++ .data = &grsec_enable_chroot_execlog, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ { ++ .procname = "chroot_caps", ++ .data = &grsec_enable_chroot_caps, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ { ++ .procname = "chroot_deny_sysctl", ++ .data = &grsec_enable_chroot_sysctl, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE ++ { ++ .procname = "tpe", ++ .data = &grsec_enable_tpe, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "tpe_gid", ++ .data = &grsec_tpe_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ { ++ .procname = "tpe_invert", ++ .data = &grsec_enable_tpe_invert, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ { ++ .procname = "tpe_restrict_all", ++ .data = &grsec_enable_tpe_all, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ { ++ .procname = "socket_all", ++ .data = &grsec_enable_socket_all, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "socket_all_gid", ++ .data = &grsec_socket_all_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ { ++ .procname = "socket_client", ++ .data = &grsec_enable_socket_client, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "socket_client_gid", ++ .data = &grsec_socket_client_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ { ++ .procname = "socket_server", ++ .data = &grsec_enable_socket_server, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "socket_server_gid", ++ .data = &grsec_socket_server_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP ++ { ++ .procname = "audit_group", ++ .data = &grsec_enable_group, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "audit_gid", ++ .data = &grsec_audit_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ { ++ .procname = "audit_chdir", ++ .data = &grsec_enable_chdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ { ++ .procname = "audit_mount", ++ .data = &grsec_enable_mount, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL ++ { ++ .procname = "audit_textrel", ++ .data = &grsec_enable_audit_textrel, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_DMESG ++ { ++ .procname = "dmesg", ++ .data = &grsec_enable_dmesg, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ { ++ .procname = "chroot_findtask", ++ .data = &grsec_enable_chroot_findtask, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ { ++ .procname = "resource_logging", ++ .data = &grsec_resource_logging, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ { ++ .procname = "audit_ptrace", ++ .data = &grsec_enable_audit_ptrace, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ { ++ .procname = "harden_ptrace", ++ .data = &grsec_enable_harden_ptrace, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++ { ++ .procname = "grsec_lock", ++ .data = &grsec_lock, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_ROFS ++ { ++ .procname = "romount_protect", ++ .data = &grsec_enable_rofs, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &one, ++ .extra2 = &one, ++ }, ++#endif ++ { } ++}; ++#endif +diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c +new file mode 100644 +index 0000000..0dc13c3 +--- /dev/null ++++ b/grsecurity/grsec_time.c +@@ -0,0 +1,16 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grinternal.h> ++#include <linux/module.h> ++ ++void ++gr_log_timechange(void) ++{ ++#ifdef CONFIG_GRKERNSEC_TIME ++ if (grsec_enable_time) ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG); ++#endif ++ return; ++} ++ ++EXPORT_SYMBOL(gr_log_timechange); +diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c +new file mode 100644 +index 0000000..07e0dc0 +--- /dev/null ++++ b/grsecurity/grsec_tpe.c +@@ -0,0 +1,73 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/grinternal.h> ++ ++extern int gr_acl_tpe_check(void); ++ ++int ++gr_tpe_allow(const struct file *file) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct inode *inode = file->f_path.dentry->d_parent->d_inode; ++ const struct cred *cred = current_cred(); ++ char *msg = NULL; ++ char *msg2 = NULL; ++ ++ // never restrict root ++ if (!cred->uid) ++ return 1; ++ ++ if (grsec_enable_tpe) { ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ++ msg = "not being in trusted group"; ++ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)) ++ msg = "being in untrusted group"; ++#else ++ if (in_group_p(grsec_tpe_gid)) ++ msg = "being in untrusted group"; ++#endif ++ } ++ if (!msg && gr_acl_tpe_check()) ++ msg = "being in untrusted role"; ++ ++ // not in any affected group/role ++ if (!msg) ++ goto next_check; ++ ++ if (inode->i_uid) ++ msg2 = "file in non-root-owned directory"; ++ else if (inode->i_mode & S_IWOTH) ++ msg2 = "file in world-writable directory"; ++ else if (inode->i_mode & S_IWGRP) ++ msg2 = "file in group-writable directory"; ++ ++ if (msg && msg2) { ++ char fullmsg[70] = {0}; ++ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2); ++ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } ++ msg = NULL; ++next_check: ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ if (!grsec_enable_tpe || !grsec_enable_tpe_all) ++ return 1; ++ ++ if (inode->i_uid && (inode->i_uid != cred->uid)) ++ msg = "directory not owned by user"; ++ else if (inode->i_mode & S_IWOTH) ++ msg = "file in world-writable directory"; ++ else if (inode->i_mode & S_IWGRP) ++ msg = "file in group-writable directory"; ++ ++ if (msg) { ++ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } ++#endif ++#endif ++ return 1; ++} +diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c +new file mode 100644 +index 0000000..9f7b1ac +--- /dev/null ++++ b/grsecurity/grsum.c +@@ -0,0 +1,61 @@ ++#include <linux/err.h> ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/scatterlist.h> ++#include <linux/crypto.h> ++#include <linux/gracl.h> ++ ++ ++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE) ++#error "crypto and sha256 must be built into the kernel" ++#endif ++ ++int ++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) ++{ ++ char *p; ++ struct crypto_hash *tfm; ++ struct hash_desc desc; ++ struct scatterlist sg; ++ unsigned char temp_sum[GR_SHA_LEN]; ++ volatile int retval = 0; ++ volatile int dummy = 0; ++ unsigned int i; ++ ++ sg_init_table(&sg, 1); ++ ++ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); ++ if (IS_ERR(tfm)) { ++ /* should never happen, since sha256 should be built in */ ++ return 1; ++ } ++ ++ desc.tfm = tfm; ++ desc.flags = 0; ++ ++ crypto_hash_init(&desc); ++ ++ p = salt; ++ sg_set_buf(&sg, p, GR_SALT_LEN); ++ crypto_hash_update(&desc, &sg, sg.length); ++ ++ p = entry->pw; ++ sg_set_buf(&sg, p, strlen(p)); ++ ++ crypto_hash_update(&desc, &sg, sg.length); ++ ++ crypto_hash_final(&desc, temp_sum); ++ ++ memset(entry->pw, 0, GR_PW_LEN); ++ ++ for (i = 0; i < GR_SHA_LEN; i++) ++ if (sum[i] != temp_sum[i]) ++ retval = 1; ++ else ++ dummy = 1; // waste a cycle ++ ++ crypto_free_hash(tfm); ++ ++ return retval; ++} +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h +index 6cd5b64..f620d2d 100644 +--- a/include/acpi/acpi_bus.h ++++ b/include/acpi/acpi_bus.h +@@ -107,7 +107,7 @@ struct acpi_device_ops { + acpi_op_bind bind; + acpi_op_unbind unbind; + acpi_op_notify notify; +-}; ++} __no_const; + + #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */ + +diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h +index b7babf0..71e4e74 100644 +--- a/include/asm-generic/atomic-long.h ++++ b/include/asm-generic/atomic-long.h +@@ -22,6 +22,12 @@ + + typedef atomic64_t atomic_long_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef atomic64_unchecked_t atomic_long_unchecked_t; ++#else ++typedef atomic64_t atomic_long_unchecked_t; ++#endif ++ + #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) + + static inline long atomic_long_read(atomic_long_t *l) +@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l) + return (long)atomic64_read(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ return (long)atomic64_read_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_set(atomic_long_t *l, long i) + { + atomic64_t *v = (atomic64_t *)l; +@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i) + atomic64_set(v, i); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_set_unchecked(v, i); ++} ++#endif ++ + static inline void atomic_long_inc(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l) + atomic64_inc(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_inc_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_dec(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l) + atomic64_dec(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_dec_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_add(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l) + atomic64_add(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_add_unchecked(i, v); ++} ++#endif ++ + static inline void atomic_long_sub(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l) + atomic64_sub(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_sub_unchecked(i, v); ++} ++#endif ++ + static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l) + return (long)atomic64_inc_return(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ return (long)atomic64_inc_return_unchecked(v); ++} ++#endif ++ + static inline long atomic_long_dec_return(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) + + typedef atomic_t atomic_long_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef atomic_unchecked_t atomic_long_unchecked_t; ++#else ++typedef atomic_t atomic_long_unchecked_t; ++#endif ++ + #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) + static inline long atomic_long_read(atomic_long_t *l) + { +@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l) + return (long)atomic_read(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ return (long)atomic_read_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_set(atomic_long_t *l, long i) + { + atomic_t *v = (atomic_t *)l; +@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i) + atomic_set(v, i); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_set_unchecked(v, i); ++} ++#endif ++ + static inline void atomic_long_inc(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l) + atomic_inc(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_inc_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_dec(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l) + atomic_dec(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_dec_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_add(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l) + atomic_add(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_add_unchecked(i, v); ++} ++#endif ++ + static inline void atomic_long_sub(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l) + atomic_sub(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_sub_unchecked(i, v); ++} ++#endif ++ + static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l) + return (long)atomic_inc_return(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ return (long)atomic_inc_return_unchecked(v); ++} ++#endif ++ + static inline long atomic_long_dec_return(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) + + #endif /* BITS_PER_LONG == 64 */ + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void pax_refcount_needs_these_functions(void) ++{ ++ atomic_read_unchecked((atomic_unchecked_t *)NULL); ++ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0); ++ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_inc_unchecked((atomic_unchecked_t *)NULL); ++ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL); ++ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL); ++ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_dec_unchecked((atomic_unchecked_t *)NULL); ++ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0); ++ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0); ++ ++ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0); ++ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL); ++ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL); ++ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL); ++} ++#else ++#define atomic_read_unchecked(v) atomic_read(v) ++#define atomic_set_unchecked(v, i) atomic_set((v), (i)) ++#define atomic_add_unchecked(i, v) atomic_add((i), (v)) ++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v)) ++#define atomic_inc_unchecked(v) atomic_inc(v) ++#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v) ++#define atomic_inc_return_unchecked(v) atomic_inc_return(v) ++#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v)) ++#define atomic_dec_unchecked(v) atomic_dec(v) ++#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n)) ++#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i)) ++ ++#define atomic_long_read_unchecked(v) atomic_long_read(v) ++#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i)) ++#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v)) ++#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v)) ++#define atomic_long_inc_unchecked(v) atomic_long_inc(v) ++#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v) ++#define atomic_long_dec_unchecked(v) atomic_long_dec(v) ++#endif ++ + #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ +diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h +index b18ce4f..2ee2843 100644 +--- a/include/asm-generic/atomic64.h ++++ b/include/asm-generic/atomic64.h +@@ -16,6 +16,8 @@ typedef struct { + long long counter; + } atomic64_t; + ++typedef atomic64_t atomic64_unchecked_t; ++ + #define ATOMIC64_INIT(i) { (i) } + + extern long long atomic64_read(const atomic64_t *v); +@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u); + #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* _ASM_GENERIC_ATOMIC64_H */ +diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h +index 1bfcfe5..e04c5c9 100644 +--- a/include/asm-generic/cache.h ++++ b/include/asm-generic/cache.h +@@ -6,7 +6,7 @@ + * cache lines need to provide their own cache.h. + */ + +-#define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_SHIFT 5UL ++#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT) + + #endif /* __ASM_GENERIC_CACHE_H */ +diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h +index 0d68a1e..b74a761 100644 +--- a/include/asm-generic/emergency-restart.h ++++ b/include/asm-generic/emergency-restart.h +@@ -1,7 +1,7 @@ + #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H + #define _ASM_GENERIC_EMERGENCY_RESTART_H + +-static inline void machine_emergency_restart(void) ++static inline __noreturn void machine_emergency_restart(void) + { + machine_restart(NULL); + } +diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h +index 0232ccb..13d9165 100644 +--- a/include/asm-generic/kmap_types.h ++++ b/include/asm-generic/kmap_types.h +@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE, + KMAP_D(17) KM_NMI, + KMAP_D(18) KM_NMI_PTE, + KMAP_D(19) KM_KDB, ++KMAP_D(20) KM_CLEARPAGE, + /* + * Remember to update debug_kmap_atomic() when adding new kmap types! + */ +-KMAP_D(20) KM_TYPE_NR ++KMAP_D(21) KM_TYPE_NR + }; + + #undef KMAP_D +diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h +index 9ceb03b..2efbcbd 100644 +--- a/include/asm-generic/local.h ++++ b/include/asm-generic/local.h +@@ -39,6 +39,7 @@ typedef struct + #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) + #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) + #define local_inc_return(l) atomic_long_inc_return(&(l)->a) ++#define local_dec_return(l) atomic_long_dec_return(&(l)->a) + + #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) + #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) +diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h +index 725612b..9cc513a 100644 +--- a/include/asm-generic/pgtable-nopmd.h ++++ b/include/asm-generic/pgtable-nopmd.h +@@ -1,14 +1,19 @@ + #ifndef _PGTABLE_NOPMD_H + #define _PGTABLE_NOPMD_H + +-#ifndef __ASSEMBLY__ +- + #include <asm-generic/pgtable-nopud.h> + +-struct mm_struct; +- + #define __PAGETABLE_PMD_FOLDED + ++#define PMD_SHIFT PUD_SHIFT ++#define PTRS_PER_PMD 1 ++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) ++#define PMD_MASK (~(PMD_SIZE-1)) ++ ++#ifndef __ASSEMBLY__ ++ ++struct mm_struct; ++ + /* + * Having the pmd type consist of a pud gets the size right, and allows + * us to conceptually access the pud entry that this pmd is folded into +@@ -16,11 +21,6 @@ struct mm_struct; + */ + typedef struct { pud_t pud; } pmd_t; + +-#define PMD_SHIFT PUD_SHIFT +-#define PTRS_PER_PMD 1 +-#define PMD_SIZE (1UL << PMD_SHIFT) +-#define PMD_MASK (~(PMD_SIZE-1)) +- + /* + * The "pud_xxx()" functions here are trivial for a folded two-level + * setup: the pmd is never bad, and a pmd always exists (as it's folded +diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h +index 810431d..ccc3638 100644 +--- a/include/asm-generic/pgtable-nopud.h ++++ b/include/asm-generic/pgtable-nopud.h +@@ -1,10 +1,15 @@ + #ifndef _PGTABLE_NOPUD_H + #define _PGTABLE_NOPUD_H + +-#ifndef __ASSEMBLY__ +- + #define __PAGETABLE_PUD_FOLDED + ++#define PUD_SHIFT PGDIR_SHIFT ++#define PTRS_PER_PUD 1 ++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) ++#define PUD_MASK (~(PUD_SIZE-1)) ++ ++#ifndef __ASSEMBLY__ ++ + /* + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into +@@ -12,11 +17,6 @@ + */ + typedef struct { pgd_t pgd; } pud_t; + +-#define PUD_SHIFT PGDIR_SHIFT +-#define PTRS_PER_PUD 1 +-#define PUD_SIZE (1UL << PUD_SHIFT) +-#define PUD_MASK (~(PUD_SIZE-1)) +- + /* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h +index 76bff2b..c7a14e2 100644 +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd) + #endif /* __HAVE_ARCH_PMD_WRITE */ + #endif + ++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL ++static inline unsigned long pax_open_kernel(void) { return 0; } ++#endif ++ ++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL ++static inline unsigned long pax_close_kernel(void) { return 0; } ++#endif ++ + #endif /* !__ASSEMBLY__ */ + + #endif /* _ASM_GENERIC_PGTABLE_H */ +diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h +index ac68c99..90d3439 100644 +--- a/include/asm-generic/uaccess.h ++++ b/include/asm-generic/uaccess.h +@@ -76,6 +76,8 @@ extern unsigned long search_exception_table(unsigned long); + */ + #ifndef __copy_from_user + static inline __must_check long __copy_from_user(void *to, ++ const void __user * from, unsigned long n) __size_overflow(3); ++static inline __must_check long __copy_from_user(void *to, + const void __user * from, unsigned long n) + { + if (__builtin_constant_p(n)) { +@@ -106,6 +108,8 @@ static inline __must_check long __copy_from_user(void *to, + + #ifndef __copy_to_user + static inline __must_check long __copy_to_user(void __user *to, ++ const void *from, unsigned long n) __size_overflow(3); ++static inline __must_check long __copy_to_user(void __user *to, + const void *from, unsigned long n) + { + if (__builtin_constant_p(n)) { +@@ -224,6 +228,7 @@ extern int __put_user_bad(void) __attribute__((noreturn)); + -EFAULT; \ + }) + ++static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) __size_overflow(1); + static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) + { + size = __copy_from_user(x, ptr, size); +@@ -240,6 +245,7 @@ extern int __get_user_bad(void) __attribute__((noreturn)); + #define __copy_to_user_inatomic __copy_to_user + #endif + ++static inline long copy_from_user(void *to, const void __user * from, unsigned long n) __size_overflow(3); + static inline long copy_from_user(void *to, + const void __user * from, unsigned long n) + { +@@ -250,6 +256,7 @@ static inline long copy_from_user(void *to, + return n; + } + ++static inline long copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3); + static inline long copy_to_user(void __user *to, + const void *from, unsigned long n) + { +@@ -309,6 +316,8 @@ static inline long strlen_user(const char __user *src) + */ + #ifndef __clear_user + static inline __must_check unsigned long ++__clear_user(void __user *to, unsigned long n) __size_overflow(2); ++static inline __must_check unsigned long + __clear_user(void __user *to, unsigned long n) + { + memset((void __force *)to, 0, n); +@@ -317,6 +326,8 @@ __clear_user(void __user *to, unsigned long n) + #endif + + static inline __must_check unsigned long ++clear_user(void __user *to, unsigned long n) __size_overflow(2); ++static inline __must_check unsigned long + clear_user(void __user *to, unsigned long n) + { + might_sleep(); +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index b5e2e4c..6a5373e 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -217,6 +217,7 @@ + .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_rodata) = .; \ + *(.rodata) *(.rodata.*) \ ++ *(.data..read_only) \ + *(__vermagic) /* Kernel version magic */ \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ +@@ -722,17 +723,18 @@ + * section in the linker script will go there too. @phdr should have + * a leading colon. + * +- * Note that this macros defines __per_cpu_load as an absolute symbol. ++ * Note that this macros defines per_cpu_load as an absolute symbol. + * If there is no need to put the percpu section at a predetermined + * address, use PERCPU_SECTION. + */ + #define PERCPU_VADDR(cacheline, vaddr, phdr) \ +- VMLINUX_SYMBOL(__per_cpu_load) = .; \ +- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ ++ per_cpu_load = .; \ ++ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \ + - LOAD_OFFSET) { \ ++ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \ + PERCPU_INPUT(cacheline) \ + } phdr \ +- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); ++ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu); + + /** + * PERCPU_SECTION - define output section for percpu area, simple version +diff --git a/include/drm/drmP.h b/include/drm/drmP.h +index bf4b2dc..2d0762f 100644 +--- a/include/drm/drmP.h ++++ b/include/drm/drmP.h +@@ -72,6 +72,7 @@ + #include <linux/workqueue.h> + #include <linux/poll.h> + #include <asm/pgalloc.h> ++#include <asm/local.h> + #include "drm.h" + + #include <linux/idr.h> +@@ -1038,7 +1039,7 @@ struct drm_device { + + /** \name Usage Counters */ + /*@{ */ +- int open_count; /**< Outstanding files open */ ++ local_t open_count; /**< Outstanding files open */ + atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ + atomic_t vma_count; /**< Outstanding vma areas open */ + int buf_use; /**< Buffers in use -- cannot alloc */ +@@ -1049,7 +1050,7 @@ struct drm_device { + /*@{ */ + unsigned long counters; + enum drm_stat_type types[15]; +- atomic_t counts[15]; ++ atomic_unchecked_t counts[15]; + /*@} */ + + struct list_head filelist; +diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h +index 73b0712..0b7ef2f 100644 +--- a/include/drm/drm_crtc_helper.h ++++ b/include/drm/drm_crtc_helper.h +@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs { + + /* disable crtc when not in use - more explicit than dpms off */ + void (*disable)(struct drm_crtc *crtc); +-}; ++} __no_const; + + struct drm_encoder_helper_funcs { + void (*dpms)(struct drm_encoder *encoder, int mode); +@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs { + struct drm_connector *connector); + /* disable encoder when not in use - more explicit than dpms off */ + void (*disable)(struct drm_encoder *encoder); +-}; ++} __no_const; + + struct drm_connector_helper_funcs { + int (*get_modes)(struct drm_connector *connector); +diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h +index 26c1f78..6722682 100644 +--- a/include/drm/ttm/ttm_memory.h ++++ b/include/drm/ttm/ttm_memory.h +@@ -47,7 +47,7 @@ + + struct ttm_mem_shrink { + int (*do_shrink) (struct ttm_mem_shrink *); +-}; ++} __no_const; + + /** + * struct ttm_mem_global - Global memory accounting structure. +diff --git a/include/linux/a.out.h b/include/linux/a.out.h +index e86dfca..40cc55f 100644 +--- a/include/linux/a.out.h ++++ b/include/linux/a.out.h +@@ -39,6 +39,14 @@ enum machine_type { + M_MIPS2 = 152 /* MIPS R6000/R4000 binary */ + }; + ++/* Constants for the N_FLAGS field */ ++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ ++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */ ++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */ ++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */ ++/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ ++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ ++ + #if !defined (N_MAGIC) + #define N_MAGIC(exec) ((exec).a_info & 0xffff) + #endif +diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h +index 49a83ca..df96b54 100644 +--- a/include/linux/atmdev.h ++++ b/include/linux/atmdev.h +@@ -237,7 +237,7 @@ struct compat_atm_iobuf { + #endif + + struct k_atm_aal_stats { +-#define __HANDLE_ITEM(i) atomic_t i ++#define __HANDLE_ITEM(i) atomic_unchecked_t i + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + }; +diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h +index fd88a39..8a801b4 100644 +--- a/include/linux/binfmts.h ++++ b/include/linux/binfmts.h +@@ -18,7 +18,7 @@ struct pt_regs; + #define BINPRM_BUF_SIZE 128 + + #ifdef __KERNEL__ +-#include <linux/list.h> ++#include <linux/sched.h> + + #define CORENAME_MAX_SIZE 128 + +@@ -58,6 +58,7 @@ struct linux_binprm { + unsigned interp_flags; + unsigned interp_data; + unsigned long loader, exec; ++ char tcomm[TASK_COMM_LEN]; + }; + + #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 +@@ -88,6 +89,7 @@ struct linux_binfmt { + int (*load_binary)(struct linux_binprm *, struct pt_regs * regs); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *cprm); ++ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags); + unsigned long min_coredump; /* minimal dump size */ + }; + +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index 0ed1eb0..3ab569b 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -1315,7 +1315,7 @@ struct block_device_operations { + /* this callback is with swap_lock and sometimes page table lock held */ + void (*swap_slot_free_notify) (struct block_device *, unsigned long); + struct module *owner; +-}; ++} __do_const; + + extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, + unsigned long); +diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h +index 4d1a074..88f929a 100644 +--- a/include/linux/blktrace_api.h ++++ b/include/linux/blktrace_api.h +@@ -162,7 +162,7 @@ struct blk_trace { + struct dentry *dir; + struct dentry *dropped_file; + struct dentry *msg_file; +- atomic_t dropped; ++ atomic_unchecked_t dropped; + }; + + extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); +diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h +index 83195fb..0b0f77d 100644 +--- a/include/linux/byteorder/little_endian.h ++++ b/include/linux/byteorder/little_endian.h +@@ -42,51 +42,51 @@ + + static inline __le64 __cpu_to_le64p(const __u64 *p) + { +- return (__force __le64)*p; ++ return (__force const __le64)*p; + } + static inline __u64 __le64_to_cpup(const __le64 *p) + { +- return (__force __u64)*p; ++ return (__force const __u64)*p; + } + static inline __le32 __cpu_to_le32p(const __u32 *p) + { +- return (__force __le32)*p; ++ return (__force const __le32)*p; + } + static inline __u32 __le32_to_cpup(const __le32 *p) + { +- return (__force __u32)*p; ++ return (__force const __u32)*p; + } + static inline __le16 __cpu_to_le16p(const __u16 *p) + { +- return (__force __le16)*p; ++ return (__force const __le16)*p; + } + static inline __u16 __le16_to_cpup(const __le16 *p) + { +- return (__force __u16)*p; ++ return (__force const __u16)*p; + } + static inline __be64 __cpu_to_be64p(const __u64 *p) + { +- return (__force __be64)__swab64p(p); ++ return (__force const __be64)__swab64p(p); + } + static inline __u64 __be64_to_cpup(const __be64 *p) + { +- return __swab64p((__u64 *)p); ++ return __swab64p((const __u64 *)p); + } + static inline __be32 __cpu_to_be32p(const __u32 *p) + { +- return (__force __be32)__swab32p(p); ++ return (__force const __be32)__swab32p(p); + } + static inline __u32 __be32_to_cpup(const __be32 *p) + { +- return __swab32p((__u32 *)p); ++ return __swab32p((const __u32 *)p); + } + static inline __be16 __cpu_to_be16p(const __u16 *p) + { +- return (__force __be16)__swab16p(p); ++ return (__force const __be16)__swab16p(p); + } + static inline __u16 __be16_to_cpup(const __be16 *p) + { +- return __swab16p((__u16 *)p); ++ return __swab16p((const __u16 *)p); + } + #define __cpu_to_le64s(x) do { (void)(x); } while (0) + #define __le64_to_cpus(x) do { (void)(x); } while (0) +diff --git a/include/linux/cache.h b/include/linux/cache.h +index 4c57065..4307975 100644 +--- a/include/linux/cache.h ++++ b/include/linux/cache.h +@@ -16,6 +16,10 @@ + #define __read_mostly + #endif + ++#ifndef __read_only ++#define __read_only __read_mostly ++#endif ++ + #ifndef ____cacheline_aligned + #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) + #endif +diff --git a/include/linux/capability.h b/include/linux/capability.h +index a63d13d..069bfd5 100644 +--- a/include/linux/capability.h ++++ b/include/linux/capability.h +@@ -548,6 +548,9 @@ extern bool capable(int cap); + extern bool ns_capable(struct user_namespace *ns, int cap); + extern bool task_ns_capable(struct task_struct *t, int cap); + extern bool nsown_capable(int cap); ++extern bool task_ns_capable_nolog(struct task_struct *t, int cap); ++extern bool ns_capable_nolog(struct user_namespace *ns, int cap); ++extern bool capable_nolog(int cap); + + /* audit system wants to get cap info from files as well */ + extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); +diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h +index 04ffb2e..6799180 100644 +--- a/include/linux/cleancache.h ++++ b/include/linux/cleancache.h +@@ -31,7 +31,7 @@ struct cleancache_ops { + void (*flush_page)(int, struct cleancache_filekey, pgoff_t); + void (*flush_inode)(int, struct cleancache_filekey); + void (*flush_fs)(int); +-}; ++} __no_const; + + extern struct cleancache_ops + cleancache_register_ops(struct cleancache_ops *ops); +diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h +index dfadc96..d90deca 100644 +--- a/include/linux/compiler-gcc4.h ++++ b/include/linux/compiler-gcc4.h +@@ -31,6 +31,15 @@ + + + #if __GNUC_MINOR__ >= 5 ++ ++#ifdef CONSTIFY_PLUGIN ++#define __no_const __attribute__((no_const)) ++#define __do_const __attribute__((do_const)) ++#endif ++ ++#ifdef SIZE_OVERFLOW_PLUGIN ++#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__))) ++#endif + /* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer +@@ -46,6 +55,11 @@ + #define __noclone __attribute__((__noclone__)) + + #endif ++ ++#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__))) ++#define __bos(ptr, arg) __builtin_object_size((ptr), (arg)) ++#define __bos0(ptr) __bos((ptr), 0) ++#define __bos1(ptr) __bos((ptr), 1) + #endif + + #if __GNUC_MINOR__ > 0 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index 320d6c9..1221a6b 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -5,31 +5,62 @@ + + #ifdef __CHECKER__ + # define __user __attribute__((noderef, address_space(1))) ++# define __force_user __force __user + # define __kernel __attribute__((address_space(0))) ++# define __force_kernel __force __kernel + # define __safe __attribute__((safe)) + # define __force __attribute__((force)) + # define __nocast __attribute__((nocast)) + # define __iomem __attribute__((noderef, address_space(2))) ++# define __force_iomem __force __iomem + # define __acquires(x) __attribute__((context(x,0,1))) + # define __releases(x) __attribute__((context(x,1,0))) + # define __acquire(x) __context__(x,1) + # define __release(x) __context__(x,-1) + # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) + # define __percpu __attribute__((noderef, address_space(3))) ++# define __force_percpu __force __percpu + #ifdef CONFIG_SPARSE_RCU_POINTER + # define __rcu __attribute__((noderef, address_space(4))) ++# define __force_rcu __force __rcu + #else + # define __rcu ++# define __force_rcu + #endif + extern void __chk_user_ptr(const volatile void __user *); + extern void __chk_io_ptr(const volatile void __iomem *); ++#elif defined(CHECKER_PLUGIN) ++//# define __user ++//# define __force_user ++//# define __kernel ++//# define __force_kernel ++# define __safe ++# define __force ++# define __nocast ++# define __iomem ++# define __force_iomem ++# define __chk_user_ptr(x) (void)0 ++# define __chk_io_ptr(x) (void)0 ++# define __builtin_warning(x, y...) (1) ++# define __acquires(x) ++# define __releases(x) ++# define __acquire(x) (void)0 ++# define __release(x) (void)0 ++# define __cond_lock(x,c) (c) ++# define __percpu ++# define __force_percpu ++# define __rcu ++# define __force_rcu + #else + # define __user ++# define __force_user + # define __kernel ++# define __force_kernel + # define __safe + # define __force + # define __nocast + # define __iomem ++# define __force_iomem + # define __chk_user_ptr(x) (void)0 + # define __chk_io_ptr(x) (void)0 + # define __builtin_warning(x, y...) (1) +@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); + # define __release(x) (void)0 + # define __cond_lock(x,c) (c) + # define __percpu ++# define __force_percpu + # define __rcu ++# define __force_rcu + #endif + + #ifdef __KERNEL__ +@@ -264,6 +297,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + # define __attribute_const__ /* unimplemented */ + #endif + ++#ifndef __no_const ++# define __no_const ++#endif ++ ++#ifndef __do_const ++# define __do_const ++#endif ++ ++#ifndef __size_overflow ++# define __size_overflow(...) ++#endif + /* + * Tell gcc if a function is cold. The compiler will assume any path + * directly leading to the call is unlikely. +@@ -273,6 +317,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + #define __cold + #endif + ++#ifndef __alloc_size ++#define __alloc_size(...) ++#endif ++ ++#ifndef __bos ++#define __bos(ptr, arg) ++#endif ++ ++#ifndef __bos0 ++#define __bos0(ptr) ++#endif ++ ++#ifndef __bos1 ++#define __bos1(ptr) ++#endif ++ + /* Simple shorthand for a section definition */ + #ifndef __section + # define __section(S) __attribute__ ((__section__(#S))) +@@ -306,6 +366,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + * use is to mediate communication between process-level code and irq/NMI + * handlers, all running on the same CPU. + */ +-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) ++#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x)) ++#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x)) + + #endif /* __LINUX_COMPILER_H */ +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h +index e9eaec5..bfeb9bb 100644 +--- a/include/linux/cpuset.h ++++ b/include/linux/cpuset.h +@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void) + * nodemask. + */ + smp_mb(); +- --ACCESS_ONCE(current->mems_allowed_change_disable); ++ --ACCESS_ONCE_RW(current->mems_allowed_change_disable); + } + + static inline void set_mems_allowed(nodemask_t nodemask) +diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h +index b936763..48685ee 100644 +--- a/include/linux/crash_dump.h ++++ b/include/linux/crash_dump.h +@@ -14,7 +14,7 @@ extern unsigned long long elfcorehdr_addr; + extern unsigned long long elfcorehdr_size; + + extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, +- unsigned long, int); ++ unsigned long, int) __size_overflow(3); + + /* Architecture code defines this if there are other possible ELF + * machine types, e.g. on bi-arch capable hardware. */ +diff --git a/include/linux/cred.h b/include/linux/cred.h +index 4030896..8d6f342 100644 +--- a/include/linux/cred.h ++++ b/include/linux/cred.h +@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk) + static inline void validate_process_creds(void) + { + } ++static inline void validate_task_creds(struct task_struct *task) ++{ ++} + #endif + + /** +diff --git a/include/linux/crypto.h b/include/linux/crypto.h +index 8a94217..15d49e3 100644 +--- a/include/linux/crypto.h ++++ b/include/linux/crypto.h +@@ -365,7 +365,7 @@ struct cipher_tfm { + const u8 *key, unsigned int keylen); + void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +-}; ++} __no_const; + + struct hash_tfm { + int (*init)(struct hash_desc *desc); +@@ -386,13 +386,13 @@ struct compress_tfm { + int (*cot_decompress)(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); +-}; ++} __no_const; + + struct rng_tfm { + int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, + unsigned int dlen); + int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); +-}; ++} __no_const; + + #define crt_ablkcipher crt_u.ablkcipher + #define crt_aead crt_u.aead +diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h +index 7925bf0..d5143d2 100644 +--- a/include/linux/decompress/mm.h ++++ b/include/linux/decompress/mm.h +@@ -77,7 +77,7 @@ static void free(void *where) + * warnings when not needed (indeed large_malloc / large_free are not + * needed by inflate */ + +-#define malloc(a) kmalloc(a, GFP_KERNEL) ++#define malloc(a) kmalloc((a), GFP_KERNEL) + #define free(a) kfree(a) + + #define large_malloc(a) vmalloc(a) +diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h +index e13117c..e9fc938 100644 +--- a/include/linux/dma-mapping.h ++++ b/include/linux/dma-mapping.h +@@ -46,7 +46,7 @@ struct dma_map_ops { + u64 (*get_required_mask)(struct device *dev); + #endif + int is_phys; +-}; ++} __do_const; + + #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) + +diff --git a/include/linux/efi.h b/include/linux/efi.h +index 2362a0b..cfaf8fcc 100644 +--- a/include/linux/efi.h ++++ b/include/linux/efi.h +@@ -446,7 +446,7 @@ struct efivar_operations { + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; +-}; ++} __no_const; + + struct efivars { + /* +diff --git a/include/linux/elf.h b/include/linux/elf.h +index 31f0508..5421c01 100644 +--- a/include/linux/elf.h ++++ b/include/linux/elf.h +@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword; + #define PT_GNU_EH_FRAME 0x6474e550 + + #define PT_GNU_STACK (PT_LOOS + 0x474e551) ++#define PT_GNU_RELRO (PT_LOOS + 0x474e552) ++ ++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580) ++ ++/* Constants for the e_flags field */ ++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ ++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */ ++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */ ++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */ ++/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ ++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ + + /* + * Extended Numbering +@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword; + #define DT_DEBUG 21 + #define DT_TEXTREL 22 + #define DT_JMPREL 23 ++#define DT_FLAGS 30 ++ #define DF_TEXTREL 0x00000004 + #define DT_ENCODING 32 + #define OLD_DT_LOOS 0x60000000 + #define DT_LOOS 0x6000000d +@@ -252,6 +265,19 @@ typedef struct elf64_hdr { + #define PF_W 0x2 + #define PF_X 0x1 + ++#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */ ++#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */ ++#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */ ++#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */ ++#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */ ++#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */ ++/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */ ++/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */ ++#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */ ++#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */ ++#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */ ++#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */ ++ + typedef struct elf32_phdr{ + Elf32_Word p_type; + Elf32_Off p_offset; +@@ -344,6 +370,8 @@ typedef struct elf64_shdr { + #define EI_OSABI 7 + #define EI_PAD 8 + ++#define EI_PAX 14 ++ + #define ELFMAG0 0x7f /* EI_MAG */ + #define ELFMAG1 'E' + #define ELFMAG2 'L' +@@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC []; + #define elf_note elf32_note + #define elf_addr_t Elf32_Off + #define Elf_Half Elf32_Half ++#define elf_dyn Elf32_Dyn + + #else + +@@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC []; + #define elf_note elf64_note + #define elf_addr_t Elf64_Off + #define Elf_Half Elf64_Half ++#define elf_dyn Elf64_Dyn + + #endif + +diff --git a/include/linux/filter.h b/include/linux/filter.h +index 8eeb205..d59bfa2 100644 +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ + + struct sk_buff; + struct sock; ++struct bpf_jit_work; + + struct sk_filter + { +@@ -141,6 +142,9 @@ struct sk_filter + unsigned int len; /* Number of filter blocks */ + unsigned int (*bpf_func)(const struct sk_buff *skb, + const struct sock_filter *filter); ++#ifdef CONFIG_BPF_JIT ++ struct bpf_jit_work *work; ++#endif + struct rcu_head rcu; + struct sock_filter insns[0]; + }; +diff --git a/include/linux/firewire.h b/include/linux/firewire.h +index 84ccf8e..2e9b14c 100644 +--- a/include/linux/firewire.h ++++ b/include/linux/firewire.h +@@ -428,7 +428,7 @@ struct fw_iso_context { + union { + fw_iso_callback_t sc; + fw_iso_mc_callback_t mc; +- } callback; ++ } __no_const callback; + void *callback_data; + }; + +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 10b2288..09180e4 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -1609,7 +1609,8 @@ struct file_operations { + int (*setlease)(struct file *, long, struct file_lock **); + long (*fallocate)(struct file *file, int mode, loff_t offset, + loff_t len); +-}; ++} __do_const; ++typedef struct file_operations __no_const file_operations_no_const; + + struct inode_operations { + struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); +diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h +index 003dc0f..3c4ea97 100644 +--- a/include/linux/fs_struct.h ++++ b/include/linux/fs_struct.h +@@ -6,7 +6,7 @@ + #include <linux/seqlock.h> + + struct fs_struct { +- int users; ++ atomic_t users; + spinlock_t lock; + seqcount_t seq; + int umask; +diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h +index ce31408..b1ad003 100644 +--- a/include/linux/fscache-cache.h ++++ b/include/linux/fscache-cache.h +@@ -102,7 +102,7 @@ struct fscache_operation { + fscache_operation_release_t release; + }; + +-extern atomic_t fscache_op_debug_id; ++extern atomic_unchecked_t fscache_op_debug_id; + extern void fscache_op_work_func(struct work_struct *work); + + extern void fscache_enqueue_operation(struct fscache_operation *); +@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op, + { + INIT_WORK(&op->work, fscache_op_work_func); + atomic_set(&op->usage, 1); +- op->debug_id = atomic_inc_return(&fscache_op_debug_id); ++ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); + op->processor = processor; + op->release = release; + INIT_LIST_HEAD(&op->pend_link); +diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h +index 2a53f10..0187fdf 100644 +--- a/include/linux/fsnotify.h ++++ b/include/linux/fsnotify.h +@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) + */ + static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) + { +- return kstrdup(name, GFP_KERNEL); ++ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL); + } + + /* +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h +index 91d0e0a3..035666b 100644 +--- a/include/linux/fsnotify_backend.h ++++ b/include/linux/fsnotify_backend.h +@@ -105,6 +105,7 @@ struct fsnotify_ops { + void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); + void (*free_event_priv)(struct fsnotify_event_private_data *priv); + }; ++typedef struct fsnotify_ops __no_const fsnotify_ops_no_const; + + /* + * A group is a "thing" that wants to receive notification about filesystem +diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h +index c3da42d..c70e0df 100644 +--- a/include/linux/ftrace_event.h ++++ b/include/linux/ftrace_event.h +@@ -97,7 +97,7 @@ struct trace_event_functions { + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +-}; ++} __no_const; + + struct trace_event { + struct hlist_node node; +@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, + extern int trace_add_event_call(struct ftrace_event_call *call); + extern void trace_remove_event_call(struct ftrace_event_call *call); + +-#define is_signed_type(type) (((type)(-1)) < 0) ++#define is_signed_type(type) (((type)(-1)) < (type)1) + + int trace_set_clr_event(const char *system, const char *event, int set); + +diff --git a/include/linux/genhd.h b/include/linux/genhd.h +index c6f7f6a..aa0f7d3 100644 +--- a/include/linux/genhd.h ++++ b/include/linux/genhd.h +@@ -185,7 +185,7 @@ struct gendisk { + struct kobject *slave_dir; + + struct timer_rand_state *random; +- atomic_t sync_io; /* RAID */ ++ atomic_unchecked_t sync_io; /* RAID */ + struct disk_events *ev; + #ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity *integrity; +diff --git a/include/linux/gracl.h b/include/linux/gracl.h +new file mode 100644 +index 0000000..8a130b6 +--- /dev/null ++++ b/include/linux/gracl.h +@@ -0,0 +1,319 @@ ++#ifndef GR_ACL_H ++#define GR_ACL_H ++ ++#include <linux/grdefs.h> ++#include <linux/resource.h> ++#include <linux/capability.h> ++#include <linux/dcache.h> ++#include <asm/resource.h> ++ ++/* Major status information */ ++ ++#define GR_VERSION "grsecurity 2.9" ++#define GRSECURITY_VERSION 0x2900 ++ ++enum { ++ GR_SHUTDOWN = 0, ++ GR_ENABLE = 1, ++ GR_SPROLE = 2, ++ GR_RELOAD = 3, ++ GR_SEGVMOD = 4, ++ GR_STATUS = 5, ++ GR_UNSPROLE = 6, ++ GR_PASSSET = 7, ++ GR_SPROLEPAM = 8, ++}; ++ ++/* Password setup definitions ++ * kernel/grhash.c */ ++enum { ++ GR_PW_LEN = 128, ++ GR_SALT_LEN = 16, ++ GR_SHA_LEN = 32, ++}; ++ ++enum { ++ GR_SPROLE_LEN = 64, ++}; ++ ++enum { ++ GR_NO_GLOB = 0, ++ GR_REG_GLOB, ++ GR_CREATE_GLOB ++}; ++ ++#define GR_NLIMITS 32 ++ ++/* Begin Data Structures */ ++ ++struct sprole_pw { ++ unsigned char *rolename; ++ unsigned char salt[GR_SALT_LEN]; ++ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */ ++}; ++ ++struct name_entry { ++ __u32 key; ++ ino_t inode; ++ dev_t device; ++ char *name; ++ __u16 len; ++ __u8 deleted; ++ struct name_entry *prev; ++ struct name_entry *next; ++}; ++ ++struct inodev_entry { ++ struct name_entry *nentry; ++ struct inodev_entry *prev; ++ struct inodev_entry *next; ++}; ++ ++struct acl_role_db { ++ struct acl_role_label **r_hash; ++ __u32 r_size; ++}; ++ ++struct inodev_db { ++ struct inodev_entry **i_hash; ++ __u32 i_size; ++}; ++ ++struct name_db { ++ struct name_entry **n_hash; ++ __u32 n_size; ++}; ++ ++struct crash_uid { ++ uid_t uid; ++ unsigned long expires; ++}; ++ ++struct gr_hash_struct { ++ void **table; ++ void **nametable; ++ void *first; ++ __u32 table_size; ++ __u32 used_size; ++ int type; ++}; ++ ++/* Userspace Grsecurity ACL data structures */ ++ ++struct acl_subject_label { ++ char *filename; ++ ino_t inode; ++ dev_t device; ++ __u32 mode; ++ kernel_cap_t cap_mask; ++ kernel_cap_t cap_lower; ++ kernel_cap_t cap_invert_audit; ++ ++ struct rlimit res[GR_NLIMITS]; ++ __u32 resmask; ++ ++ __u8 user_trans_type; ++ __u8 group_trans_type; ++ uid_t *user_transitions; ++ gid_t *group_transitions; ++ __u16 user_trans_num; ++ __u16 group_trans_num; ++ ++ __u32 sock_families[2]; ++ __u32 ip_proto[8]; ++ __u32 ip_type; ++ struct acl_ip_label **ips; ++ __u32 ip_num; ++ __u32 inaddr_any_override; ++ ++ __u32 crashes; ++ unsigned long expires; ++ ++ struct acl_subject_label *parent_subject; ++ struct gr_hash_struct *hash; ++ struct acl_subject_label *prev; ++ struct acl_subject_label *next; ++ ++ struct acl_object_label **obj_hash; ++ __u32 obj_hash_size; ++ __u16 pax_flags; ++}; ++ ++struct role_allowed_ip { ++ __u32 addr; ++ __u32 netmask; ++ ++ struct role_allowed_ip *prev; ++ struct role_allowed_ip *next; ++}; ++ ++struct role_transition { ++ char *rolename; ++ ++ struct role_transition *prev; ++ struct role_transition *next; ++}; ++ ++struct acl_role_label { ++ char *rolename; ++ uid_t uidgid; ++ __u16 roletype; ++ ++ __u16 auth_attempts; ++ unsigned long expires; ++ ++ struct acl_subject_label *root_label; ++ struct gr_hash_struct *hash; ++ ++ struct acl_role_label *prev; ++ struct acl_role_label *next; ++ ++ struct role_transition *transitions; ++ struct role_allowed_ip *allowed_ips; ++ uid_t *domain_children; ++ __u16 domain_child_num; ++ ++ umode_t umask; ++ ++ struct acl_subject_label **subj_hash; ++ __u32 subj_hash_size; ++}; ++ ++struct user_acl_role_db { ++ struct acl_role_label **r_table; ++ __u32 num_pointers; /* Number of allocations to track */ ++ __u32 num_roles; /* Number of roles */ ++ __u32 num_domain_children; /* Number of domain children */ ++ __u32 num_subjects; /* Number of subjects */ ++ __u32 num_objects; /* Number of objects */ ++}; ++ ++struct acl_object_label { ++ char *filename; ++ ino_t inode; ++ dev_t device; ++ __u32 mode; ++ ++ struct acl_subject_label *nested; ++ struct acl_object_label *globbed; ++ ++ /* next two structures not used */ ++ ++ struct acl_object_label *prev; ++ struct acl_object_label *next; ++}; ++ ++struct acl_ip_label { ++ char *iface; ++ __u32 addr; ++ __u32 netmask; ++ __u16 low, high; ++ __u8 mode; ++ __u32 type; ++ __u32 proto[8]; ++ ++ /* next two structures not used */ ++ ++ struct acl_ip_label *prev; ++ struct acl_ip_label *next; ++}; ++ ++struct gr_arg { ++ struct user_acl_role_db role_db; ++ unsigned char pw[GR_PW_LEN]; ++ unsigned char salt[GR_SALT_LEN]; ++ unsigned char sum[GR_SHA_LEN]; ++ unsigned char sp_role[GR_SPROLE_LEN]; ++ struct sprole_pw *sprole_pws; ++ dev_t segv_device; ++ ino_t segv_inode; ++ uid_t segv_uid; ++ __u16 num_sprole_pws; ++ __u16 mode; ++}; ++ ++struct gr_arg_wrapper { ++ struct gr_arg *arg; ++ __u32 version; ++ __u32 size; ++}; ++ ++struct subject_map { ++ struct acl_subject_label *user; ++ struct acl_subject_label *kernel; ++ struct subject_map *prev; ++ struct subject_map *next; ++}; ++ ++struct acl_subj_map_db { ++ struct subject_map **s_hash; ++ __u32 s_size; ++}; ++ ++/* End Data Structures Section */ ++ ++/* Hash functions generated by empirical testing by Brad Spengler ++ Makes good use of the low bits of the inode. Generally 0-1 times ++ in loop for successful match. 0-3 for unsuccessful match. ++ Shift/add algorithm with modulus of table size and an XOR*/ ++ ++static __inline__ unsigned int ++rhash(const uid_t uid, const __u16 type, const unsigned int sz) ++{ ++ return ((((uid + type) << (16 + type)) ^ uid) % sz); ++} ++ ++ static __inline__ unsigned int ++shash(const struct acl_subject_label *userp, const unsigned int sz) ++{ ++ return ((const unsigned long)userp % sz); ++} ++ ++static __inline__ unsigned int ++fhash(const ino_t ino, const dev_t dev, const unsigned int sz) ++{ ++ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz); ++} ++ ++static __inline__ unsigned int ++nhash(const char *name, const __u16 len, const unsigned int sz) ++{ ++ return full_name_hash((const unsigned char *)name, len) % sz; ++} ++ ++#define FOR_EACH_ROLE_START(role) \ ++ role = role_list; \ ++ while (role) { ++ ++#define FOR_EACH_ROLE_END(role) \ ++ role = role->prev; \ ++ } ++ ++#define FOR_EACH_SUBJECT_START(role,subj,iter) \ ++ subj = NULL; \ ++ iter = 0; \ ++ while (iter < role->subj_hash_size) { \ ++ if (subj == NULL) \ ++ subj = role->subj_hash[iter]; \ ++ if (subj == NULL) { \ ++ iter++; \ ++ continue; \ ++ } ++ ++#define FOR_EACH_SUBJECT_END(subj,iter) \ ++ subj = subj->next; \ ++ if (subj == NULL) \ ++ iter++; \ ++ } ++ ++ ++#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \ ++ subj = role->hash->first; \ ++ while (subj != NULL) { ++ ++#define FOR_EACH_NESTED_SUBJECT_END(subj) \ ++ subj = subj->next; \ ++ } ++ ++#endif ++ +diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h +new file mode 100644 +index 0000000..323ecf2 +--- /dev/null ++++ b/include/linux/gralloc.h +@@ -0,0 +1,9 @@ ++#ifndef __GRALLOC_H ++#define __GRALLOC_H ++ ++void acl_free_all(void); ++int acl_alloc_stack_init(unsigned long size); ++void *acl_alloc(unsigned long len); ++void *acl_alloc_num(unsigned long num, unsigned long len); ++ ++#endif +diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h +new file mode 100644 +index 0000000..b30e9bc +--- /dev/null ++++ b/include/linux/grdefs.h +@@ -0,0 +1,140 @@ ++#ifndef GRDEFS_H ++#define GRDEFS_H ++ ++/* Begin grsecurity status declarations */ ++ ++enum { ++ GR_READY = 0x01, ++ GR_STATUS_INIT = 0x00 // disabled state ++}; ++ ++/* Begin ACL declarations */ ++ ++/* Role flags */ ++ ++enum { ++ GR_ROLE_USER = 0x0001, ++ GR_ROLE_GROUP = 0x0002, ++ GR_ROLE_DEFAULT = 0x0004, ++ GR_ROLE_SPECIAL = 0x0008, ++ GR_ROLE_AUTH = 0x0010, ++ GR_ROLE_NOPW = 0x0020, ++ GR_ROLE_GOD = 0x0040, ++ GR_ROLE_LEARN = 0x0080, ++ GR_ROLE_TPE = 0x0100, ++ GR_ROLE_DOMAIN = 0x0200, ++ GR_ROLE_PAM = 0x0400, ++ GR_ROLE_PERSIST = 0x0800 ++}; ++ ++/* ACL Subject and Object mode flags */ ++enum { ++ GR_DELETED = 0x80000000 ++}; ++ ++/* ACL Object-only mode flags */ ++enum { ++ GR_READ = 0x00000001, ++ GR_APPEND = 0x00000002, ++ GR_WRITE = 0x00000004, ++ GR_EXEC = 0x00000008, ++ GR_FIND = 0x00000010, ++ GR_INHERIT = 0x00000020, ++ GR_SETID = 0x00000040, ++ GR_CREATE = 0x00000080, ++ GR_DELETE = 0x00000100, ++ GR_LINK = 0x00000200, ++ GR_AUDIT_READ = 0x00000400, ++ GR_AUDIT_APPEND = 0x00000800, ++ GR_AUDIT_WRITE = 0x00001000, ++ GR_AUDIT_EXEC = 0x00002000, ++ GR_AUDIT_FIND = 0x00004000, ++ GR_AUDIT_INHERIT= 0x00008000, ++ GR_AUDIT_SETID = 0x00010000, ++ GR_AUDIT_CREATE = 0x00020000, ++ GR_AUDIT_DELETE = 0x00040000, ++ GR_AUDIT_LINK = 0x00080000, ++ GR_PTRACERD = 0x00100000, ++ GR_NOPTRACE = 0x00200000, ++ GR_SUPPRESS = 0x00400000, ++ GR_NOLEARN = 0x00800000, ++ GR_INIT_TRANSFER= 0x01000000 ++}; ++ ++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \ ++ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \ ++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK) ++ ++/* ACL subject-only mode flags */ ++enum { ++ GR_KILL = 0x00000001, ++ GR_VIEW = 0x00000002, ++ GR_PROTECTED = 0x00000004, ++ GR_LEARN = 0x00000008, ++ GR_OVERRIDE = 0x00000010, ++ /* just a placeholder, this mode is only used in userspace */ ++ GR_DUMMY = 0x00000020, ++ GR_PROTSHM = 0x00000040, ++ GR_KILLPROC = 0x00000080, ++ GR_KILLIPPROC = 0x00000100, ++ /* just a placeholder, this mode is only used in userspace */ ++ GR_NOTROJAN = 0x00000200, ++ GR_PROTPROCFD = 0x00000400, ++ GR_PROCACCT = 0x00000800, ++ GR_RELAXPTRACE = 0x00001000, ++ GR_NESTED = 0x00002000, ++ GR_INHERITLEARN = 0x00004000, ++ GR_PROCFIND = 0x00008000, ++ GR_POVERRIDE = 0x00010000, ++ GR_KERNELAUTH = 0x00020000, ++ GR_ATSECURE = 0x00040000, ++ GR_SHMEXEC = 0x00080000 ++}; ++ ++enum { ++ GR_PAX_ENABLE_SEGMEXEC = 0x0001, ++ GR_PAX_ENABLE_PAGEEXEC = 0x0002, ++ GR_PAX_ENABLE_MPROTECT = 0x0004, ++ GR_PAX_ENABLE_RANDMMAP = 0x0008, ++ GR_PAX_ENABLE_EMUTRAMP = 0x0010, ++ GR_PAX_DISABLE_SEGMEXEC = 0x0100, ++ GR_PAX_DISABLE_PAGEEXEC = 0x0200, ++ GR_PAX_DISABLE_MPROTECT = 0x0400, ++ GR_PAX_DISABLE_RANDMMAP = 0x0800, ++ GR_PAX_DISABLE_EMUTRAMP = 0x1000, ++}; ++ ++enum { ++ GR_ID_USER = 0x01, ++ GR_ID_GROUP = 0x02, ++}; ++ ++enum { ++ GR_ID_ALLOW = 0x01, ++ GR_ID_DENY = 0x02, ++}; ++ ++#define GR_CRASH_RES 31 ++#define GR_UIDTABLE_MAX 500 ++ ++/* begin resource learning section */ ++enum { ++ GR_RLIM_CPU_BUMP = 60, ++ GR_RLIM_FSIZE_BUMP = 50000, ++ GR_RLIM_DATA_BUMP = 10000, ++ GR_RLIM_STACK_BUMP = 1000, ++ GR_RLIM_CORE_BUMP = 10000, ++ GR_RLIM_RSS_BUMP = 500000, ++ GR_RLIM_NPROC_BUMP = 1, ++ GR_RLIM_NOFILE_BUMP = 5, ++ GR_RLIM_MEMLOCK_BUMP = 50000, ++ GR_RLIM_AS_BUMP = 500000, ++ GR_RLIM_LOCKS_BUMP = 2, ++ GR_RLIM_SIGPENDING_BUMP = 5, ++ GR_RLIM_MSGQUEUE_BUMP = 10000, ++ GR_RLIM_NICE_BUMP = 1, ++ GR_RLIM_RTPRIO_BUMP = 1, ++ GR_RLIM_RTTIME_BUMP = 1000000 ++}; ++ ++#endif +diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h +new file mode 100644 +index 0000000..da390f1 +--- /dev/null ++++ b/include/linux/grinternal.h +@@ -0,0 +1,221 @@ ++#ifndef __GRINTERNAL_H ++#define __GRINTERNAL_H ++ ++#ifdef CONFIG_GRKERNSEC ++ ++#include <linux/fs.h> ++#include <linux/mnt_namespace.h> ++#include <linux/nsproxy.h> ++#include <linux/gracl.h> ++#include <linux/grdefs.h> ++#include <linux/grmsg.h> ++ ++void gr_add_learn_entry(const char *fmt, ...) ++ __attribute__ ((format (printf, 1, 2))); ++__u32 gr_search_file(const struct dentry *dentry, const __u32 mode, ++ const struct vfsmount *mnt); ++__u32 gr_check_create(const struct dentry *new_dentry, ++ const struct dentry *parent, ++ const struct vfsmount *mnt, const __u32 mode); ++int gr_check_protected_task(const struct task_struct *task); ++__u32 to_gr_audit(const __u32 reqmode); ++int gr_set_acls(const int type); ++int gr_apply_subject_to_task(struct task_struct *task); ++int gr_acl_is_enabled(void); ++char gr_roletype_to_char(void); ++ ++void gr_handle_alertkill(struct task_struct *task); ++char *gr_to_filename(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename1(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename2(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename3(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++ ++extern int grsec_enable_ptrace_readexec; ++extern int grsec_enable_harden_ptrace; ++extern int grsec_enable_link; ++extern int grsec_enable_fifo; ++extern int grsec_enable_execve; ++extern int grsec_enable_shm; ++extern int grsec_enable_execlog; ++extern int grsec_enable_signal; ++extern int grsec_enable_audit_ptrace; ++extern int grsec_enable_forkfail; ++extern int grsec_enable_time; ++extern int grsec_enable_rofs; ++extern int grsec_enable_chroot_shmat; ++extern int grsec_enable_chroot_mount; ++extern int grsec_enable_chroot_double; ++extern int grsec_enable_chroot_pivot; ++extern int grsec_enable_chroot_chdir; ++extern int grsec_enable_chroot_chmod; ++extern int grsec_enable_chroot_mknod; ++extern int grsec_enable_chroot_fchdir; ++extern int grsec_enable_chroot_nice; ++extern int grsec_enable_chroot_execlog; ++extern int grsec_enable_chroot_caps; ++extern int grsec_enable_chroot_sysctl; ++extern int grsec_enable_chroot_unix; ++extern int grsec_enable_tpe; ++extern int grsec_tpe_gid; ++extern int grsec_enable_tpe_all; ++extern int grsec_enable_tpe_invert; ++extern int grsec_enable_socket_all; ++extern int grsec_socket_all_gid; ++extern int grsec_enable_socket_client; ++extern int grsec_socket_client_gid; ++extern int grsec_enable_socket_server; ++extern int grsec_socket_server_gid; ++extern int grsec_audit_gid; ++extern int grsec_enable_group; ++extern int grsec_enable_audit_textrel; ++extern int grsec_enable_log_rwxmaps; ++extern int grsec_enable_mount; ++extern int grsec_enable_chdir; ++extern int grsec_resource_logging; ++extern int grsec_enable_blackhole; ++extern int grsec_lastack_retries; ++extern int grsec_enable_brute; ++extern int grsec_lock; ++ ++extern spinlock_t grsec_alert_lock; ++extern unsigned long grsec_alert_wtime; ++extern unsigned long grsec_alert_fyet; ++ ++extern spinlock_t grsec_audit_lock; ++ ++extern rwlock_t grsec_exec_file_lock; ++ ++#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \ ++ gr_to_filename2((tsk)->exec_file->f_path.dentry, \ ++ (tsk)->exec_file->f_vfsmnt) : "/") ++ ++#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \ ++ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \ ++ (tsk)->real_parent->exec_file->f_vfsmnt) : "/") ++ ++#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \ ++ gr_to_filename((tsk)->exec_file->f_path.dentry, \ ++ (tsk)->exec_file->f_vfsmnt) : "/") ++ ++#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \ ++ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \ ++ (tsk)->real_parent->exec_file->f_vfsmnt) : "/") ++ ++#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted) ++ ++#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry) ++ ++#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \ ++ (task)->pid, (cred)->uid, \ ++ (cred)->euid, (cred)->gid, (cred)->egid, \ ++ gr_parent_task_fullpath(task), \ ++ (task)->real_parent->comm, (task)->real_parent->pid, \ ++ (pcred)->uid, (pcred)->euid, \ ++ (pcred)->gid, (pcred)->egid ++ ++#define GR_CHROOT_CAPS {{ \ ++ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ ++ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ ++ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \ ++ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \ ++ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \ ++ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \ ++ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }} ++ ++#define security_learn(normal_msg,args...) \ ++({ \ ++ read_lock(&grsec_exec_file_lock); \ ++ gr_add_learn_entry(normal_msg "\n", ## args); \ ++ read_unlock(&grsec_exec_file_lock); \ ++}) ++ ++enum { ++ GR_DO_AUDIT, ++ GR_DONT_AUDIT, ++ /* used for non-audit messages that we shouldn't kill the task on */ ++ GR_DONT_AUDIT_GOOD ++}; ++ ++enum { ++ GR_TTYSNIFF, ++ GR_RBAC, ++ GR_RBAC_STR, ++ GR_STR_RBAC, ++ GR_RBAC_MODE2, ++ GR_RBAC_MODE3, ++ GR_FILENAME, ++ GR_SYSCTL_HIDDEN, ++ GR_NOARGS, ++ GR_ONE_INT, ++ GR_ONE_INT_TWO_STR, ++ GR_ONE_STR, ++ GR_STR_INT, ++ GR_TWO_STR_INT, ++ GR_TWO_INT, ++ GR_TWO_U64, ++ GR_THREE_INT, ++ GR_FIVE_INT_TWO_STR, ++ GR_TWO_STR, ++ GR_THREE_STR, ++ GR_FOUR_STR, ++ GR_STR_FILENAME, ++ GR_FILENAME_STR, ++ GR_FILENAME_TWO_INT, ++ GR_FILENAME_TWO_INT_STR, ++ GR_TEXTREL, ++ GR_PTRACE, ++ GR_RESOURCE, ++ GR_CAP, ++ GR_SIG, ++ GR_SIG2, ++ GR_CRASH1, ++ GR_CRASH2, ++ GR_PSACCT, ++ GR_RWXMAP ++}; ++ ++#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str) ++#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task) ++#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt) ++#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str) ++#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt) ++#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2) ++#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3) ++#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt) ++#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS) ++#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num) ++#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2) ++#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str) ++#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num) ++#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2) ++#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2) ++#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3) ++#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2) ++#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2) ++#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num) ++#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3) ++#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4) ++#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt) ++#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str) ++#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2) ++#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str) ++#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2) ++#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task) ++#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2) ++#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str) ++#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr) ++#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num) ++#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong) ++#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1) ++#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) ++#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str) ++ ++void gr_log_varargs(int audit, const char *msg, int argtypes, ...); ++ ++#endif ++ ++#endif +diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h +new file mode 100644 +index 0000000..ae576a1 +--- /dev/null ++++ b/include/linux/grmsg.h +@@ -0,0 +1,109 @@ ++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" ++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" ++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " ++#define GR_STOPMOD_MSG "denied modification of module state by " ++#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by " ++#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by " ++#define GR_IOPERM_MSG "denied use of ioperm() by " ++#define GR_IOPL_MSG "denied use of iopl() by " ++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " ++#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by " ++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " ++#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by " ++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " ++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4" ++#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4" ++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " ++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " ++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " ++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " ++#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by " ++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " ++#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by " ++#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against " ++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " ++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " ++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " ++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " ++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " ++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " ++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " ++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " ++#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by " ++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " ++#define GR_EXEC_ACL_MSG "%s execution of %.950s by " ++#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by " ++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds" ++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds" ++#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by " ++#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by " ++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " ++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " ++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " ++#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by " ++#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by " ++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " ++#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by " ++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " ++#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by " ++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " ++#define GR_INITF_ACL_MSG "init_variables() failed %s by " ++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader" ++#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by " ++#define GR_SHUTS_ACL_MSG "shutdown auth success for " ++#define GR_SHUTF_ACL_MSG "shutdown auth failure for " ++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " ++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " ++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " ++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " ++#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by " ++#define GR_ENABLEF_ACL_MSG "unable to load %s for " ++#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system" ++#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by " ++#define GR_RELOADF_ACL_MSG "failed reload of %s for " ++#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for " ++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " ++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " ++#define GR_SPROLEF_ACL_MSG "special role %s failure for " ++#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for " ++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " ++#define GR_INVMODE_ACL_MSG "invalid mode %d by " ++#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by " ++#define GR_FAILFORK_MSG "failed fork with errno %s by " ++#define GR_NICE_CHROOT_MSG "denied priority change by " ++#define GR_UNISIGLOG_MSG "%.32s occurred at %p in " ++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " ++#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by " ++#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by " ++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " ++#define GR_TIME_MSG "time set by " ++#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by " ++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " ++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " ++#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by " ++#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by " ++#define GR_BIND_MSG "denied bind() by " ++#define GR_CONNECT_MSG "denied connect() by " ++#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by " ++#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by " ++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4" ++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " ++#define GR_CAP_ACL_MSG "use of %s denied for " ++#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for " ++#define GR_CAP_ACL_MSG2 "use of %s permitted for " ++#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for " ++#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for " ++#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by " ++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by " ++#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by " ++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " ++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by " ++#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for " ++#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by " ++#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by " ++#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by " ++#define GR_VM86_MSG "denied use of vm86 by " ++#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by " ++#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by " ++#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by " ++#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by " +diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h +new file mode 100644 +index 0000000..2ccf677 +--- /dev/null ++++ b/include/linux/grsecurity.h +@@ -0,0 +1,229 @@ ++#ifndef GR_SECURITY_H ++#define GR_SECURITY_H ++#include <linux/fs.h> ++#include <linux/fs_struct.h> ++#include <linux/binfmts.h> ++#include <linux/gracl.h> ++ ++/* notify of brain-dead configs */ ++#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled." ++#endif ++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC) ++#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled." ++#endif ++#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP) ++#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled." ++#endif ++#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR) ++#error "CONFIG_PAX enabled, but no PaX options are enabled." ++#endif ++ ++#include <linux/compat.h> ++ ++struct user_arg_ptr { ++#ifdef CONFIG_COMPAT ++ bool is_compat; ++#endif ++ union { ++ const char __user *const __user *native; ++#ifdef CONFIG_COMPAT ++ compat_uptr_t __user *compat; ++#endif ++ } ptr; ++}; ++ ++void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags); ++void gr_handle_brute_check(void); ++void gr_handle_kernel_exploit(void); ++int gr_process_user_ban(void); ++ ++char gr_roletype_to_char(void); ++ ++int gr_acl_enable_at_secure(void); ++ ++int gr_check_user_change(int real, int effective, int fs); ++int gr_check_group_change(int real, int effective, int fs); ++ ++void gr_del_task_from_ip_table(struct task_struct *p); ++ ++int gr_pid_is_chrooted(struct task_struct *p); ++int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type); ++int gr_handle_chroot_nice(void); ++int gr_handle_chroot_sysctl(const int op); ++int gr_handle_chroot_setpriority(struct task_struct *p, ++ const int niceval); ++int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt); ++int gr_handle_chroot_chroot(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_chroot_chdir(struct path *path); ++int gr_handle_chroot_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode); ++int gr_handle_chroot_mknod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode); ++int gr_handle_chroot_mount(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const char *dev_name); ++int gr_handle_chroot_pivot(void); ++int gr_handle_chroot_unix(const pid_t pid); ++ ++int gr_handle_rawio(const struct inode *inode); ++ ++void gr_handle_ioperm(void); ++void gr_handle_iopl(void); ++ ++umode_t gr_acl_umask(void); ++ ++int gr_tpe_allow(const struct file *file); ++ ++void gr_set_chroot_entries(struct task_struct *task, struct path *path); ++void gr_clear_chroot_entries(struct task_struct *task); ++ ++void gr_log_forkfail(const int retval); ++void gr_log_timechange(void); ++void gr_log_signal(const int sig, const void *addr, const struct task_struct *t); ++void gr_log_chdir(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_log_chroot_exec(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv); ++void gr_log_remount(const char *devname, const int retval); ++void gr_log_unmount(const char *devname, const int retval); ++void gr_log_mount(const char *from, const char *to, const int retval); ++void gr_log_textrel(struct vm_area_struct *vma); ++void gr_log_rwxmmap(struct file *file); ++void gr_log_rwxmprotect(struct file *file); ++ ++int gr_handle_follow_link(const struct inode *parent, ++ const struct inode *inode, ++ const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_handle_fifo(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const struct dentry *dir, const int flag, ++ const int acc_mode); ++int gr_handle_hardlink(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ struct inode *inode, ++ const int mode, const char *to); ++ ++int gr_is_capable(const int cap); ++int gr_is_capable_nolog(const int cap); ++void gr_learn_resource(const struct task_struct *task, const int limit, ++ const unsigned long wanted, const int gt); ++void gr_copy_label(struct task_struct *tsk); ++void gr_handle_crash(struct task_struct *task, const int sig); ++int gr_handle_signal(const struct task_struct *p, const int sig); ++int gr_check_crash_uid(const uid_t uid); ++int gr_check_protected_task(const struct task_struct *task); ++int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type); ++int gr_acl_handle_mmap(const struct file *file, ++ const unsigned long prot); ++int gr_acl_handle_mprotect(const struct file *file, ++ const unsigned long prot); ++int gr_check_hidden_task(const struct task_struct *tsk); ++__u32 gr_acl_handle_truncate(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_utime(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_access(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int fmode); ++__u32 gr_acl_handle_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, umode_t *mode); ++__u32 gr_acl_handle_chown(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_setxattr(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_handle_ptrace(struct task_struct *task, const long request); ++int gr_handle_proc_ptrace(struct task_struct *task); ++__u32 gr_acl_handle_execve(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_check_crash_exec(const struct file *filp); ++int gr_acl_is_enabled(void); ++void gr_set_kernel_label(struct task_struct *task); ++void gr_set_role_label(struct task_struct *task, const uid_t uid, ++ const gid_t gid); ++int gr_set_proc_label(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const int unsafe_flags); ++__u32 gr_acl_handle_hidden_file(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_open(const struct dentry *dentry, ++ const struct vfsmount *mnt, int acc_mode); ++__u32 gr_acl_handle_creat(const struct dentry *dentry, ++ const struct dentry *p_dentry, ++ const struct vfsmount *p_mnt, ++ int open_flags, int acc_mode, const int imode); ++void gr_handle_create(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_proc_create(const struct dentry *dentry, ++ const struct inode *inode); ++__u32 gr_acl_handle_mknod(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const int mode); ++__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt); ++__u32 gr_acl_handle_rmdir(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_delete(const ino_t ino, const dev_t dev); ++__u32 gr_acl_handle_unlink(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_symlink(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const char *from); ++__u32 gr_acl_handle_link(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct vfsmount *old_mnt, const char *to); ++int gr_acl_handle_rename(struct dentry *new_dentry, ++ struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ struct dentry *old_dentry, ++ struct inode *old_parent_inode, ++ struct vfsmount *old_mnt, const char *newname); ++void gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace); ++__u32 gr_check_link(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct vfsmount *old_mnt); ++int gr_acl_handle_filldir(const struct file *file, const char *name, ++ const unsigned int namelen, const ino_t ino); ++ ++__u32 gr_acl_handle_unix(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_acl_handle_exit(void); ++void gr_acl_handle_psacct(struct task_struct *task, const long code); ++int gr_acl_handle_procpidmem(const struct task_struct *task); ++int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags); ++int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode); ++void gr_audit_ptrace(struct task_struct *task); ++dev_t gr_get_dev_from_dentry(struct dentry *dentry); ++ ++int gr_ptrace_readexec(struct file *file, int unsafe_flags); ++ ++#ifdef CONFIG_GRKERNSEC ++void task_grsec_rbac(struct seq_file *m, struct task_struct *p); ++void gr_handle_vm86(void); ++void gr_handle_mem_readwrite(u64 from, u64 to); ++ ++void gr_log_badprocpid(const char *entry); ++ ++extern int grsec_enable_dmesg; ++extern int grsec_disable_privio; ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++extern int grsec_enable_chroot_findtask; ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern int grsec_enable_setxid; ++#endif ++#endif ++ ++#endif +diff --git a/include/linux/grsock.h b/include/linux/grsock.h +new file mode 100644 +index 0000000..e7ffaaf +--- /dev/null ++++ b/include/linux/grsock.h +@@ -0,0 +1,19 @@ ++#ifndef __GRSOCK_H ++#define __GRSOCK_H ++ ++extern void gr_attach_curr_ip(const struct sock *sk); ++extern int gr_handle_sock_all(const int family, const int type, ++ const int protocol); ++extern int gr_handle_sock_server(const struct sockaddr *sck); ++extern int gr_handle_sock_server_other(const struct sock *sck); ++extern int gr_handle_sock_client(const struct sockaddr *sck); ++extern int gr_search_connect(struct socket * sock, ++ struct sockaddr_in * addr); ++extern int gr_search_bind(struct socket * sock, ++ struct sockaddr_in * addr); ++extern int gr_search_listen(struct socket * sock); ++extern int gr_search_accept(struct socket * sock); ++extern int gr_search_socket(const int domain, const int type, ++ const int protocol); ++ ++#endif +diff --git a/include/linux/hid.h b/include/linux/hid.h +index c235e4e..f0cf7a0 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -679,7 +679,7 @@ struct hid_ll_driver { + unsigned int code, int value); + + int (*parse)(struct hid_device *hdev); +-}; ++} __no_const; + + #define PM_HINT_FULLON 1<<5 + #define PM_HINT_NORMAL 1<<1 +diff --git a/include/linux/highmem.h b/include/linux/highmem.h +index 3a93f73..b19d0b3 100644 +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page) + kunmap_atomic(kaddr, KM_USER0); + } + ++static inline void sanitize_highpage(struct page *page) ++{ ++ void *kaddr; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ kaddr = kmap_atomic(page, KM_CLEARPAGE); ++ clear_page(kaddr); ++ kunmap_atomic(kaddr, KM_CLEARPAGE); ++ local_irq_restore(flags); ++} ++ + static inline void zero_user_segments(struct page *page, + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) +diff --git a/include/linux/i2c.h b/include/linux/i2c.h +index 07d103a..04ec65b 100644 +--- a/include/linux/i2c.h ++++ b/include/linux/i2c.h +@@ -364,6 +364,7 @@ struct i2c_algorithm { + /* To determine what the adapter supports */ + u32 (*functionality) (struct i2c_adapter *); + }; ++typedef struct i2c_algorithm __no_const i2c_algorithm_no_const; + + /* + * i2c_adapter is the structure used to identify a physical i2c bus along +diff --git a/include/linux/i2o.h b/include/linux/i2o.h +index a6deef4..c56a7f2 100644 +--- a/include/linux/i2o.h ++++ b/include/linux/i2o.h +@@ -564,7 +564,7 @@ struct i2o_controller { + struct i2o_device *exec; /* Executive */ + #if BITS_PER_LONG == 64 + spinlock_t context_list_lock; /* lock for context_list */ +- atomic_t context_list_counter; /* needed for unique contexts */ ++ atomic_unchecked_t context_list_counter; /* needed for unique contexts */ + struct list_head context_list; /* list of context id's + and pointers */ + #endif +diff --git a/include/linux/init.h b/include/linux/init.h +index 9146f39..885354d 100644 +--- a/include/linux/init.h ++++ b/include/linux/init.h +@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline); + + /* Each module must use one module_init(). */ + #define module_init(initfn) \ +- static inline initcall_t __inittest(void) \ ++ static inline __used initcall_t __inittest(void) \ + { return initfn; } \ + int init_module(void) __attribute__((alias(#initfn))); + + /* This is only required if you want to be unloadable. */ + #define module_exit(exitfn) \ +- static inline exitcall_t __exittest(void) \ ++ static inline __used exitcall_t __exittest(void) \ + { return exitfn; } \ + void cleanup_module(void) __attribute__((alias(#exitfn))); + +diff --git a/include/linux/init_task.h b/include/linux/init_task.h +index 32574ee..00d4ef1 100644 +--- a/include/linux/init_task.h ++++ b/include/linux/init_task.h +@@ -128,6 +128,12 @@ extern struct cred init_cred; + + #define INIT_TASK_COMM "swapper" + ++#ifdef CONFIG_X86 ++#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO, ++#else ++#define INIT_TASK_THREAD_INFO ++#endif ++ + /* + * INIT_TASK is used to set up the first task table, touch at + * your own risk!. Base=0, limit=0x1fffff (=2MB) +@@ -166,6 +172,7 @@ extern struct cred init_cred; + RCU_INIT_POINTER(.cred, &init_cred), \ + .comm = INIT_TASK_COMM, \ + .thread = INIT_THREAD, \ ++ INIT_TASK_THREAD_INFO \ + .fs = &init_fs, \ + .files = &init_files, \ + .signal = &init_signals, \ +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index e6ca56d..8583707 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -296,7 +296,7 @@ struct iommu_flush { + u8 fm, u64 type); + void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type); +-}; ++} __no_const; + + enum { + SR_DMAR_FECTL_REG, +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index a64b00e..464d8bc 100644 +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -441,7 +441,7 @@ enum + /* map softirq index to softirq name. update 'softirq_to_name' in + * kernel/softirq.c when adding a new softirq. + */ +-extern char *softirq_to_name[NR_SOFTIRQS]; ++extern const char * const softirq_to_name[NR_SOFTIRQS]; + + /* softirq mask and active fields moved to irq_cpustat_t in + * asm/hardirq.h to get better cache usage. KAO +@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS]; + + struct softirq_action + { +- void (*action)(struct softirq_action *); ++ void (*action)(void); + }; + + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +-extern void open_softirq(int nr, void (*action)(struct softirq_action *)); ++extern void open_softirq(int nr, void (*action)(void)); + extern void softirq_init(void); + static inline void __raise_softirq_irqoff(unsigned int nr) + { +diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h +index 3875719..4cd454c 100644 +--- a/include/linux/kallsyms.h ++++ b/include/linux/kallsyms.h +@@ -15,7 +15,8 @@ + + struct module; + +-#ifdef CONFIG_KALLSYMS ++#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS) ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + /* Lookup the address for a symbol. Returns 0 if not found. */ + unsigned long kallsyms_lookup_name(const char *name); + +@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u + /* Stupid that this does nothing, but I didn't create this mess. */ + #define __print_symbol(fmt, addr) + #endif /*CONFIG_KALLSYMS*/ ++#else /* when included by kallsyms.c, vsnprintf.c, or ++ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */ ++extern void __print_symbol(const char *fmt, unsigned long address); ++extern int sprint_backtrace(char *buffer, unsigned long address); ++extern int sprint_symbol(char *buffer, unsigned long address); ++const char *kallsyms_lookup(unsigned long addr, ++ unsigned long *symbolsize, ++ unsigned long *offset, ++ char **modname, char *namebuf); ++#endif + + /* This macro allows us to keep printk typechecking */ + static __printf(1, 2) +diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h +index fa39183..40160be 100644 +--- a/include/linux/kgdb.h ++++ b/include/linux/kgdb.h +@@ -53,7 +53,7 @@ extern int kgdb_connected; + extern int kgdb_io_module_registered; + + extern atomic_t kgdb_setting_breakpoint; +-extern atomic_t kgdb_cpu_doing_single_step; ++extern atomic_unchecked_t kgdb_cpu_doing_single_step; + + extern struct task_struct *kgdb_usethread; + extern struct task_struct *kgdb_contthread; +@@ -251,7 +251,7 @@ struct kgdb_arch { + void (*disable_hw_break)(struct pt_regs *regs); + void (*remove_all_hw_break)(void); + void (*correct_hw_break)(void); +-}; ++} __do_const; + + /** + * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. +@@ -276,7 +276,7 @@ struct kgdb_io { + void (*pre_exception) (void); + void (*post_exception) (void); + int is_console; +-}; ++} __do_const; + + extern struct kgdb_arch arch_kgdb_ops; + +diff --git a/include/linux/kmod.h b/include/linux/kmod.h +index b16f653..eb908f4 100644 +--- a/include/linux/kmod.h ++++ b/include/linux/kmod.h +@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */ + * usually useless though. */ + extern __printf(2, 3) + int __request_module(bool wait, const char *name, ...); ++extern __printf(3, 4) ++int ___request_module(bool wait, char *param_name, const char *name, ...); + #define request_module(mod...) __request_module(true, mod) + #define request_module_nowait(mod...) __request_module(false, mod) + #define try_then_request_module(x, mod...) \ +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index d526231..c9599fc 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); + void vcpu_load(struct kvm_vcpu *vcpu); + void vcpu_put(struct kvm_vcpu *vcpu); + +-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, ++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align, + struct module *module); + void kvm_exit(void); + +@@ -385,20 +385,20 @@ void kvm_get_pfn(pfn_t pfn); + int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, + int len); + int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, +- unsigned long len); +-int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); ++ unsigned long len) __size_overflow(4); ++int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) __size_overflow(2,4); + int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, +- void *data, unsigned long len); ++ void *data, unsigned long len) __size_overflow(4); + int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, + int offset, int len); + int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, +- unsigned long len); ++ unsigned long len) __size_overflow(2,4); + int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, +- void *data, unsigned long len); ++ void *data, unsigned long len) __size_overflow(4); + int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + gpa_t gpa); + int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); +-int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); ++int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) __size_overflow(2,3); + struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); + int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); + unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); +@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg); + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); + +-int kvm_arch_init(void *opaque); ++int kvm_arch_init(const void *opaque); + void kvm_arch_exit(void); + + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); +@@ -690,7 +690,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm); + int kvm_set_irq_routing(struct kvm *kvm, + const struct kvm_irq_routing_entry *entries, + unsigned nr, +- unsigned flags); ++ unsigned flags) __size_overflow(3); + void kvm_free_irq_routing(struct kvm *kvm); + + #else +diff --git a/include/linux/libata.h b/include/linux/libata.h +index cafc09a..d7e7829 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -909,7 +909,7 @@ struct ata_port_operations { + * fields must be pointers. + */ + const struct ata_port_operations *inherits; +-}; ++} __do_const; + + struct ata_port_info { + unsigned long flags; +diff --git a/include/linux/mca.h b/include/linux/mca.h +index 3797270..7765ede 100644 +--- a/include/linux/mca.h ++++ b/include/linux/mca.h +@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions { + int region); + void * (*mca_transform_memory)(struct mca_device *, + void *memory); +-}; ++} __no_const; + + struct mca_bus { + u64 default_dma_mask; +diff --git a/include/linux/memory.h b/include/linux/memory.h +index 935699b..11042cc 100644 +--- a/include/linux/memory.h ++++ b/include/linux/memory.h +@@ -144,7 +144,7 @@ struct memory_accessor { + size_t count); + ssize_t (*write)(struct memory_accessor *, const char *buf, + off_t offset, size_t count); +-}; ++} __no_const; + + /* + * Kernel text modification mutex, used for code patching. Users of this lock +diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h +index 9970337..9444122 100644 +--- a/include/linux/mfd/abx500.h ++++ b/include/linux/mfd/abx500.h +@@ -188,6 +188,7 @@ struct abx500_ops { + int (*event_registers_startup_state_get) (struct device *, u8 *); + int (*startup_irq_enabled) (struct device *, unsigned int); + }; ++typedef struct abx500_ops __no_const abx500_ops_no_const; + + int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops); + void abx500_remove_ops(struct device *dev); +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 4baadd1..2e0b45e 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp); + + #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ + #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */ ++#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */ ++#else + #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ ++#endif ++ + #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ + #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ + +@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page); + int set_page_dirty_lock(struct page *page); + int clear_page_dirty_for_io(struct page *page); + +-/* Is the vma a continuation of the stack vma above it? */ +-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +-} +- +-static inline int stack_guard_page_start(struct vm_area_struct *vma, +- unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSDOWN) && +- (vma->vm_start == addr) && +- !vma_growsdown(vma->vm_prev, addr); +-} +- +-/* Is the vma a continuation of the stack vma below it? */ +-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); +-} +- +-static inline int stack_guard_page_end(struct vm_area_struct *vma, +- unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSUP) && +- (vma->vm_end == addr) && +- !vma_growsup(vma->vm_next, addr); +-} +- + extern unsigned long move_page_tables(struct vm_area_struct *vma, + unsigned long old_addr, struct vm_area_struct *new_vma, + unsigned long new_addr, unsigned long len); +@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) + } + #endif + ++#ifdef CONFIG_MMU ++pgprot_t vm_get_page_prot(vm_flags_t vm_flags); ++#else ++static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) ++{ ++ return __pgprot(0); ++} ++#endif ++ + int vma_wants_writenotify(struct vm_area_struct *vma); + + extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, +@@ -1419,6 +1407,7 @@ out: + } + + extern int do_munmap(struct mm_struct *, unsigned long, size_t); ++extern int __do_munmap(struct mm_struct *, unsigned long, size_t); + + extern unsigned long do_brk(unsigned long, unsigned long); + +@@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add + extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, + struct vm_area_struct **pprev); + ++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma); ++extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma); ++extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl); ++ + /* Look up the first VMA which intersects the interval start_addr..end_addr-1, + NULL if none. Assume start_addr < end_addr. */ + static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +@@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) + return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + } + +-#ifdef CONFIG_MMU +-pgprot_t vm_get_page_prot(unsigned long vm_flags); +-#else +-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) +-{ +- return __pgprot(0); +-} +-#endif +- + struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); + int remap_pfn_range(struct vm_area_struct *, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t); +@@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn); + extern int sysctl_memory_failure_early_kill; + extern int sysctl_memory_failure_recovery; + extern void shake_page(struct page *p, int access); +-extern atomic_long_t mce_bad_pages; ++extern atomic_long_unchecked_t mce_bad_pages; + extern int soft_offline_page(struct page *page, int flags); + + extern void dump_page(struct page *page); +@@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, + unsigned int pages_per_huge_page); + #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ + ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot); ++#else ++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {} ++#endif ++ + #endif /* __KERNEL__ */ + #endif /* _LINUX_MM_H */ +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index 5b42f1b..759e4b4 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -253,6 +253,8 @@ struct vm_area_struct { + #ifdef CONFIG_NUMA + struct mempolicy *vm_policy; /* NUMA policy for the VMA */ + #endif ++ ++ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */ + }; + + struct core_thread { +@@ -389,6 +391,24 @@ struct mm_struct { + #ifdef CONFIG_CPUMASK_OFFSTACK + struct cpumask cpumask_allocation; + #endif ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ unsigned long pax_flags; ++#endif ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ unsigned long call_dl_resolve; ++#endif ++ ++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) ++ unsigned long call_syscall; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ unsigned long delta_mmap; /* randomized offset */ ++ unsigned long delta_stack; /* randomized offset */ ++#endif ++ + }; + + static inline void mm_init_cpumask(struct mm_struct *mm) +diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h +index 1d1b1e1..2a13c78 100644 +--- a/include/linux/mmu_notifier.h ++++ b/include/linux/mmu_notifier.h +@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) + */ + #define ptep_clear_flush_notify(__vma, __address, __ptep) \ + ({ \ +- pte_t __pte; \ ++ pte_t ___pte; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ +- __pte = ptep_clear_flush(___vma, ___address, __ptep); \ ++ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \ + mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ +- __pte; \ ++ ___pte; \ + }) + + #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \ +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index 188cb2f..d78409b 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -369,7 +369,7 @@ struct zone { + unsigned long flags; /* zone flags, see below */ + + /* Zone statistics */ +- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + /* + * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on +diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h +index 468819c..17b9db3 100644 +--- a/include/linux/mod_devicetable.h ++++ b/include/linux/mod_devicetable.h +@@ -12,7 +12,7 @@ + typedef unsigned long kernel_ulong_t; + #endif + +-#define PCI_ANY_ID (~0) ++#define PCI_ANY_ID ((__u16)~0) + + struct pci_device_id { + __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ +@@ -131,7 +131,7 @@ struct usb_device_id { + #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100 + #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200 + +-#define HID_ANY_ID (~0) ++#define HID_ANY_ID (~0U) + + struct hid_device_id { + __u16 bus; +diff --git a/include/linux/module.h b/include/linux/module.h +index 3cb7839..511cb87 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -17,6 +17,7 @@ + #include <linux/moduleparam.h> + #include <linux/tracepoint.h> + #include <linux/export.h> ++#include <linux/fs.h> + + #include <linux/percpu.h> + #include <asm/module.h> +@@ -261,19 +262,16 @@ struct module + int (*init)(void); + + /* If this is non-NULL, vfree after init() returns */ +- void *module_init; ++ void *module_init_rx, *module_init_rw; + + /* Here is the actual code + data, vfree'd on unload. */ +- void *module_core; ++ void *module_core_rx, *module_core_rw; + + /* Here are the sizes of the init and core sections */ +- unsigned int init_size, core_size; ++ unsigned int init_size_rw, core_size_rw; + + /* The size of the executable code in each section. */ +- unsigned int init_text_size, core_text_size; +- +- /* Size of RO sections of the module (text+rodata) */ +- unsigned int init_ro_size, core_ro_size; ++ unsigned int init_size_rx, core_size_rx; + + /* Arch-specific module values */ + struct mod_arch_specific arch; +@@ -329,6 +327,10 @@ struct module + #ifdef CONFIG_EVENT_TRACING + struct ftrace_event_call **trace_events; + unsigned int num_trace_events; ++ struct file_operations trace_id; ++ struct file_operations trace_enable; ++ struct file_operations trace_format; ++ struct file_operations trace_filter; + #endif + #ifdef CONFIG_FTRACE_MCOUNT_RECORD + unsigned int num_ftrace_callsites; +@@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr); + bool is_module_percpu_address(unsigned long addr); + bool is_module_text_address(unsigned long addr); + ++static inline int within_module_range(unsigned long addr, void *start, unsigned long size) ++{ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ if (ktla_ktva(addr) >= (unsigned long)start && ++ ktla_ktva(addr) < (unsigned long)start + size) ++ return 1; ++#endif ++ ++ return ((void *)addr >= start && (void *)addr < start + size); ++} ++ ++static inline int within_module_core_rx(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx); ++} ++ ++static inline int within_module_core_rw(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw); ++} ++ ++static inline int within_module_init_rx(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx); ++} ++ ++static inline int within_module_init_rw(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw); ++} ++ + static inline int within_module_core(unsigned long addr, struct module *mod) + { +- return (unsigned long)mod->module_core <= addr && +- addr < (unsigned long)mod->module_core + mod->core_size; ++ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod); + } + + static inline int within_module_init(unsigned long addr, struct module *mod) + { +- return (unsigned long)mod->module_init <= addr && +- addr < (unsigned long)mod->module_init + mod->init_size; ++ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod); + } + + /* Search for module by name: must hold module_mutex. */ +diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h +index b2be02e..edb10c9 100644 +--- a/include/linux/moduleloader.h ++++ b/include/linux/moduleloader.h +@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section); + + /* Allocator used for allocating struct module, core sections and init + sections. Returns NULL on failure. */ +-void *module_alloc(unsigned long size); ++void *module_alloc(unsigned long size) __size_overflow(1); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++void *module_alloc_exec(unsigned long size); ++#else ++#define module_alloc_exec(x) module_alloc(x) ++#endif + + /* Free memory returned from module_alloc. */ + void module_free(struct module *mod, void *module_region); + ++#ifdef CONFIG_PAX_KERNEXEC ++void module_free_exec(struct module *mod, void *module_region); ++#else ++#define module_free_exec(x, y) module_free((x), (y)) ++#endif ++ + /* Apply the given relocation to the (simplified) ELF. Return -error + or 0. */ + int apply_relocate(Elf_Shdr *sechdrs, +diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h +index 7939f63..ec6df57 100644 +--- a/include/linux/moduleparam.h ++++ b/include/linux/moduleparam.h +@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void) + * @len is usually just sizeof(string). + */ + #define module_param_string(name, string, len, perm) \ +- static const struct kparam_string __param_string_##name \ ++ static const struct kparam_string __param_string_##name __used \ + = { len, string }; \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + ¶m_ops_string, \ +@@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp); + * module_param_named() for why this might be necessary. + */ + #define module_param_array_named(name, array, type, nump, perm) \ +- static const struct kparam_array __param_arr_##name \ ++ static const struct kparam_array __param_arr_##name __used \ + = { .max = ARRAY_SIZE(array), .num = nump, \ + .ops = ¶m_ops_##type, \ + .elemsize = sizeof(array[0]), .elem = array }; \ +diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h +index a9e6ba4..0f9e29b 100644 +--- a/include/linux/mtd/map.h ++++ b/include/linux/mtd/map.h +@@ -25,6 +25,7 @@ + #include <linux/types.h> + #include <linux/list.h> + #include <linux/string.h> ++#include <linux/kernel.h> + #include <linux/bug.h> + + +diff --git a/include/linux/namei.h b/include/linux/namei.h +index ffc0213..2c1f2cb 100644 +--- a/include/linux/namei.h ++++ b/include/linux/namei.h +@@ -24,7 +24,7 @@ struct nameidata { + unsigned seq; + int last_type; + unsigned depth; +- char *saved_names[MAX_NESTED_LINKS + 1]; ++ const char *saved_names[MAX_NESTED_LINKS + 1]; + + /* Intent data */ + union { +@@ -94,12 +94,12 @@ extern int follow_up(struct path *); + extern struct dentry *lock_rename(struct dentry *, struct dentry *); + extern void unlock_rename(struct dentry *, struct dentry *); + +-static inline void nd_set_link(struct nameidata *nd, char *path) ++static inline void nd_set_link(struct nameidata *nd, const char *path) + { + nd->saved_names[nd->depth] = path; + } + +-static inline char *nd_get_link(struct nameidata *nd) ++static inline const char *nd_get_link(const struct nameidata *nd) + { + return nd->saved_names[nd->depth]; + } +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index a82ad4d..90d15b7 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -949,6 +949,7 @@ struct net_device_ops { + int (*ndo_set_features)(struct net_device *dev, + u32 features); + }; ++typedef struct net_device_ops __no_const net_device_ops_no_const; + + /* + * The DEVICE structure. +@@ -1088,7 +1089,7 @@ struct net_device { + int iflink; + + struct net_device_stats stats; +- atomic_long_t rx_dropped; /* dropped packets by core network ++ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network + * Do not use this in drivers. + */ + +diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h +new file mode 100644 +index 0000000..33f4af8 +--- /dev/null ++++ b/include/linux/netfilter/xt_gradm.h +@@ -0,0 +1,9 @@ ++#ifndef _LINUX_NETFILTER_XT_GRADM_H ++#define _LINUX_NETFILTER_XT_GRADM_H 1 ++ ++struct xt_gradm_mtinfo { ++ __u16 flags; ++ __u16 invflags; ++}; ++ ++#endif +diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h +index c65a18a..0c05f3a 100644 +--- a/include/linux/of_pdt.h ++++ b/include/linux/of_pdt.h +@@ -32,7 +32,7 @@ struct of_pdt_ops { + + /* return 0 on success; fill in 'len' with number of bytes in path */ + int (*pkg2path)(phandle node, char *buf, const int buflen, int *len); +-}; ++} __no_const; + + extern void *prom_early_alloc(unsigned long size); + +diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h +index a4c5624..2dabfb7 100644 +--- a/include/linux/oprofile.h ++++ b/include/linux/oprofile.h +@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root, + int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root, + char const * name, ulong * val); + +-/** Create a file for read-only access to an atomic_t. */ ++/** Create a file for read-only access to an atomic_unchecked_t. */ + int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root, +- char const * name, atomic_t * val); ++ char const * name, atomic_unchecked_t * val); + + /** create a directory */ + struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root, +@@ -163,7 +163,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co + * Read an ASCII string for a number from a userspace buffer and fill *val on success. + * Returns 0 on success, < 0 on error. + */ +-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count); ++int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) __size_overflow(3); + + /** lock for read/write safety */ + extern raw_spinlock_t oprofilefs_lock; +diff --git a/include/linux/padata.h b/include/linux/padata.h +index 4633b2f..988bc08 100644 +--- a/include/linux/padata.h ++++ b/include/linux/padata.h +@@ -129,7 +129,7 @@ struct parallel_data { + struct padata_instance *pinst; + struct padata_parallel_queue __percpu *pqueue; + struct padata_serial_queue __percpu *squeue; +- atomic_t seq_nr; ++ atomic_unchecked_t seq_nr; + atomic_t reorder_objects; + atomic_t refcnt; + unsigned int max_seq_nr; +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h +index b1f8912..c955bff 100644 +--- a/include/linux/perf_event.h ++++ b/include/linux/perf_event.h +@@ -748,8 +748,8 @@ struct perf_event { + + enum perf_event_active_state state; + unsigned int attach_state; +- local64_t count; +- atomic64_t child_count; ++ local64_t count; /* PaX: fix it one day */ ++ atomic64_unchecked_t child_count; + + /* + * These are the total time in nanoseconds that the event +@@ -800,8 +800,8 @@ struct perf_event { + * These accumulate total time (in nanoseconds) that children + * events have been enabled and running, respectively. + */ +- atomic64_t child_total_time_enabled; +- atomic64_t child_total_time_running; ++ atomic64_unchecked_t child_total_time_enabled; ++ atomic64_unchecked_t child_total_time_running; + + /* + * Protect attach/detach and child_list: +diff --git a/include/linux/personality.h b/include/linux/personality.h +index 8fc7dd1a..c19d89e 100644 +--- a/include/linux/personality.h ++++ b/include/linux/personality.h +@@ -44,6 +44,7 @@ enum { + #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \ + ADDR_NO_RANDOMIZE | \ + ADDR_COMPAT_LAYOUT | \ ++ ADDR_LIMIT_3GB | \ + MMAP_PAGE_ZERO) + + /* +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h +index 77257c9..51d473a 100644 +--- a/include/linux/pipe_fs_i.h ++++ b/include/linux/pipe_fs_i.h +@@ -46,9 +46,9 @@ struct pipe_buffer { + struct pipe_inode_info { + wait_queue_head_t wait; + unsigned int nrbufs, curbuf, buffers; +- unsigned int readers; +- unsigned int writers; +- unsigned int waiting_writers; ++ atomic_t readers; ++ atomic_t writers; ++ atomic_t waiting_writers; + unsigned int r_counter; + unsigned int w_counter; + struct page *tmp_page; +diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h +index d3085e7..fd01052 100644 +--- a/include/linux/pm_runtime.h ++++ b/include/linux/pm_runtime.h +@@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) + + static inline void pm_runtime_mark_last_busy(struct device *dev) + { +- ACCESS_ONCE(dev->power.last_busy) = jiffies; ++ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies; + } + + #else /* !CONFIG_PM_RUNTIME */ +diff --git a/include/linux/poison.h b/include/linux/poison.h +index 79159de..f1233a9 100644 +--- a/include/linux/poison.h ++++ b/include/linux/poison.h +@@ -19,8 +19,8 @@ + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) +-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) ++#define LIST_POISON1 ((void *) (long)0xFFFFFF01) ++#define LIST_POISON2 ((void *) (long)0xFFFFFF02) + + /********** include/linux/timer.h **********/ + /* +diff --git a/include/linux/preempt.h b/include/linux/preempt.h +index 58969b2..ead129b 100644 +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -123,7 +123,7 @@ struct preempt_ops { + void (*sched_in)(struct preempt_notifier *notifier, int cpu); + void (*sched_out)(struct preempt_notifier *notifier, + struct task_struct *next); +-}; ++} __no_const; + + /** + * preempt_notifier - key for installing preemption notifiers +diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h +index 643b96c..ef55a9c 100644 +--- a/include/linux/proc_fs.h ++++ b/include/linux/proc_fs.h +@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode, + return proc_create_data(name, mode, parent, proc_fops, NULL); + } + ++static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode, ++ struct proc_dir_entry *parent, const struct file_operations *proc_fops) ++{ ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL); ++#else ++ return proc_create_data(name, mode, parent, proc_fops, NULL); ++#endif ++} ++ ++ + static inline struct proc_dir_entry *create_proc_read_entry(const char *name, + mode_t mode, struct proc_dir_entry *base, + read_proc_t *read_proc, void * data) +@@ -258,7 +271,7 @@ union proc_op { + int (*proc_show)(struct seq_file *m, + struct pid_namespace *ns, struct pid *pid, + struct task_struct *task); +-}; ++} __no_const; + + struct ctl_table_header; + struct ctl_table; +diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h +index 800f113..e9ee2e3 100644 +--- a/include/linux/ptrace.h ++++ b/include/linux/ptrace.h +@@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child); + extern void exit_ptrace(struct task_struct *tracer); + #define PTRACE_MODE_READ 1 + #define PTRACE_MODE_ATTACH 2 +-/* Returns 0 on success, -errno on denial. */ +-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode); + /* Returns true on success, false on denial. */ + extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); ++/* Returns true on success, false on denial. */ ++extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode); ++/* Returns true on success, false on denial. */ ++extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode); + + static inline int ptrace_reparented(struct task_struct *child) + { +diff --git a/include/linux/random.h b/include/linux/random.h +index 8f74538..02a1012 100644 +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -69,12 +69,17 @@ void srandom32(u32 seed); + + u32 prandom32(struct rnd_state *); + ++static inline unsigned long pax_get_random_long(void) ++{ ++ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0); ++} ++ + /* + * Handle minimum values for seeds + */ + static inline u32 __seed(u32 x, u32 m) + { +- return (x < m) ? x + m : x; ++ return (x <= m) ? x + m + 1 : x; + } + + /** +diff --git a/include/linux/reboot.h b/include/linux/reboot.h +index e0879a7..a12f962 100644 +--- a/include/linux/reboot.h ++++ b/include/linux/reboot.h +@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *); + * Architecture-specific implementations of sys_reboot commands. + */ + +-extern void machine_restart(char *cmd); +-extern void machine_halt(void); +-extern void machine_power_off(void); ++extern void machine_restart(char *cmd) __noreturn; ++extern void machine_halt(void) __noreturn; ++extern void machine_power_off(void) __noreturn; + + extern void machine_shutdown(void); + struct pt_regs; +@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *); + */ + + extern void kernel_restart_prepare(char *cmd); +-extern void kernel_restart(char *cmd); +-extern void kernel_halt(void); +-extern void kernel_power_off(void); ++extern void kernel_restart(char *cmd) __noreturn; ++extern void kernel_halt(void) __noreturn; ++extern void kernel_power_off(void) __noreturn; + + extern int C_A_D; /* for sysctl */ + void ctrl_alt_del(void); +@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force); + * Emergency restart, callable from an interrupt handler. + */ + +-extern void emergency_restart(void); ++extern void emergency_restart(void) __noreturn; + #include <asm/emergency-restart.h> + + #endif +diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h +index 96d465f..b084e05 100644 +--- a/include/linux/reiserfs_fs.h ++++ b/include/linux/reiserfs_fs.h +@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode) + #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */ + + #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter) +-#define get_generation(s) atomic_read (&fs_generation(s)) ++#define get_generation(s) atomic_read_unchecked (&fs_generation(s)) + #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen) + #define __fs_changed(gen,s) (gen != get_generation (s)) + #define fs_changed(gen,s) \ +diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h +index 52c83b6..18ed7eb 100644 +--- a/include/linux/reiserfs_fs_sb.h ++++ b/include/linux/reiserfs_fs_sb.h +@@ -386,7 +386,7 @@ struct reiserfs_sb_info { + /* Comment? -Hans */ + wait_queue_head_t s_wait; + /* To be obsoleted soon by per buffer seals.. -Hans */ +- atomic_t s_generation_counter; // increased by one every time the ++ atomic_unchecked_t s_generation_counter; // increased by one every time the + // tree gets re-balanced + unsigned long s_properties; /* File system properties. Currently holds + on-disk FS format */ +diff --git a/include/linux/relay.h b/include/linux/relay.h +index 14a86bc..17d0700 100644 +--- a/include/linux/relay.h ++++ b/include/linux/relay.h +@@ -159,7 +159,7 @@ struct rchan_callbacks + * The callback should return 0 if successful, negative if not. + */ + int (*remove_buf_file)(struct dentry *dentry); +-}; ++} __no_const; + + /* + * CONFIG_RELAY kernel API, kernel/relay.c +diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h +index c6c6084..5bf1212 100644 +--- a/include/linux/rfkill.h ++++ b/include/linux/rfkill.h +@@ -147,6 +147,7 @@ struct rfkill_ops { + void (*query)(struct rfkill *rfkill, void *data); + int (*set_block)(void *data, bool blocked); + }; ++typedef struct rfkill_ops __no_const rfkill_ops_no_const; + + #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) + /** +diff --git a/include/linux/rio.h b/include/linux/rio.h +index 4d50611..c6858a2 100644 +--- a/include/linux/rio.h ++++ b/include/linux/rio.h +@@ -315,7 +315,7 @@ struct rio_ops { + int mbox, void *buffer, size_t len); + int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf); + void *(*get_inb_message)(struct rio_mport *mport, int mbox); +-}; ++} __no_const; + + #define RIO_RESOURCE_MEM 0x00000100 + #define RIO_RESOURCE_DOORBELL 0x00000200 +diff --git a/include/linux/rmap.h b/include/linux/rmap.h +index 2148b12..519b820 100644 +--- a/include/linux/rmap.h ++++ b/include/linux/rmap.h +@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma) + void anon_vma_init(void); /* create anon_vma_cachep */ + int anon_vma_prepare(struct vm_area_struct *); + void unlink_anon_vmas(struct vm_area_struct *); +-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); +-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); ++int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *); ++int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *); + void __anon_vma_link(struct vm_area_struct *); + + static inline void anon_vma_merge(struct vm_area_struct *vma, +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 1c4f3e9..342eb1f 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -101,6 +101,7 @@ struct bio_list; + struct fs_struct; + struct perf_event_context; + struct blk_plug; ++struct linux_binprm; + + /* + * List of flags we want to share for kernel threads, +@@ -380,10 +381,13 @@ struct user_namespace; + #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) + + extern int sysctl_max_map_count; ++extern unsigned long sysctl_heap_stack_gap; + + #include <linux/aio.h> + + #ifdef CONFIG_MMU ++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len); ++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len); + extern void arch_pick_mmap_layout(struct mm_struct *mm); + extern unsigned long + arch_get_unmapped_area(struct file *, unsigned long, unsigned long, +@@ -629,6 +633,17 @@ struct signal_struct { + #ifdef CONFIG_TASKSTATS + struct taskstats *stats; + #endif ++ ++#ifdef CONFIG_GRKERNSEC ++ u32 curr_ip; ++ u32 saved_ip; ++ u32 gr_saddr; ++ u32 gr_daddr; ++ u16 gr_sport; ++ u16 gr_dport; ++ u8 used_accept:1; ++#endif ++ + #ifdef CONFIG_AUDIT + unsigned audit_tty; + struct tty_audit_buf *tty_audit_buf; +@@ -710,6 +725,11 @@ struct user_struct { + struct key *session_keyring; /* UID's default session keyring */ + #endif + ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE) ++ unsigned int banned; ++ unsigned long ban_expires; ++#endif ++ + /* Hash table maintenance information */ + struct hlist_node uidhash_node; + uid_t uid; +@@ -1337,8 +1357,8 @@ struct task_struct { + struct list_head thread_group; + + struct completion *vfork_done; /* for vfork() */ +- int __user *set_child_tid; /* CLONE_CHILD_SETTID */ +- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ ++ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */ ++ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + + cputime_t utime, stime, utimescaled, stimescaled; + cputime_t gtime; +@@ -1354,13 +1374,6 @@ struct task_struct { + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; + +-/* process credentials */ +- const struct cred __rcu *real_cred; /* objective and real subjective task +- * credentials (COW) */ +- const struct cred __rcu *cred; /* effective (overridable) subjective task +- * credentials (COW) */ +- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ +- + char comm[TASK_COMM_LEN]; /* executable name excluding path + - access with [gs]et_task_comm (which lock + it with task_lock()) +@@ -1377,8 +1390,16 @@ struct task_struct { + #endif + /* CPU-specific state of this task */ + struct thread_struct thread; ++/* thread_info moved to task_struct */ ++#ifdef CONFIG_X86 ++ struct thread_info tinfo; ++#endif + /* filesystem information */ + struct fs_struct *fs; ++ ++ const struct cred __rcu *cred; /* effective (overridable) subjective task ++ * credentials (COW) */ ++ + /* open file information */ + struct files_struct *files; + /* namespaces */ +@@ -1425,6 +1446,11 @@ struct task_struct { + struct rt_mutex_waiter *pi_blocked_on; + #endif + ++/* process credentials */ ++ const struct cred __rcu *real_cred; /* objective and real subjective task ++ * credentials (COW) */ ++ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ ++ + #ifdef CONFIG_DEBUG_MUTEXES + /* mutex deadlock detection */ + struct mutex_waiter *blocked_on; +@@ -1540,6 +1566,27 @@ struct task_struct { + unsigned long default_timer_slack_ns; + + struct list_head *scm_work_list; ++ ++#ifdef CONFIG_GRKERNSEC ++ /* grsecurity */ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ u64 exec_id; ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++ const struct cred *delayed_cred; ++#endif ++ struct dentry *gr_chroot_dentry; ++ struct acl_subject_label *acl; ++ struct acl_role_label *role; ++ struct file *exec_file; ++ u16 acl_role_id; ++ /* is this the task that authenticated to the special role */ ++ u8 acl_sp_role; ++ u8 is_writable; ++ u8 brute; ++ u8 gr_is_chrooted; ++#endif ++ + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* Index of current stored address in ret_stack */ + int curr_ret_stack; +@@ -1574,6 +1621,51 @@ struct task_struct { + #endif + }; + ++#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */ ++#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */ ++#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */ ++#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */ ++/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */ ++#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */ ++ ++#ifdef CONFIG_PAX_SOFTMODE ++extern int pax_softmode; ++#endif ++ ++extern int pax_check_flags(unsigned long *); ++ ++/* if tsk != current then task_lock must be held on it */ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++static inline unsigned long pax_get_flags(struct task_struct *tsk) ++{ ++ if (likely(tsk->mm)) ++ return tsk->mm->pax_flags; ++ else ++ return 0UL; ++} ++ ++/* if tsk != current then task_lock must be held on it */ ++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags) ++{ ++ if (likely(tsk->mm)) { ++ tsk->mm->pax_flags = flags; ++ return 0; ++ } ++ return -EINVAL; ++} ++#endif ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++extern void pax_set_initial_flags(struct linux_binprm *bprm); ++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) ++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); ++#endif ++ ++extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp); ++extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp); ++extern void pax_report_refcount_overflow(struct pt_regs *regs); ++extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type); ++ + /* Future-safe accessor for struct task_struct's cpus_allowed. */ + #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) + +@@ -2081,7 +2173,9 @@ void yield(void); + extern struct exec_domain default_exec_domain; + + union thread_union { ++#ifndef CONFIG_X86 + struct thread_info thread_info; ++#endif + unsigned long stack[THREAD_SIZE/sizeof(long)]; + }; + +@@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns; + */ + + extern struct task_struct *find_task_by_vpid(pid_t nr); ++extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr); + extern struct task_struct *find_task_by_pid_ns(pid_t nr, + struct pid_namespace *ns); + +@@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm) + extern void mmput(struct mm_struct *); + /* Grab a reference to a task's mm, if it is not already going away */ + extern struct mm_struct *get_task_mm(struct task_struct *task); ++/* ++ * Grab a reference to a task's mm, if it is not already going away ++ * and ptrace_may_access with the mode parameter passed to it ++ * succeeds. ++ */ ++extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); + /* Remove the current tasks stale references to the old mm_struct */ + extern void mm_release(struct task_struct *, struct mm_struct *); + /* Allocate a new mm structure and copy contents from tsk->mm */ +@@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *); + extern void exit_itimers(struct signal_struct *); + extern void flush_itimer_signals(void); + +-extern NORET_TYPE void do_group_exit(int); ++extern __noreturn void do_group_exit(int); + + extern void daemonize(const char *, ...); + extern int allow_signal(int); +@@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p) + + #endif + +-static inline int object_is_on_stack(void *obj) ++static inline int object_starts_on_stack(void *obj) + { +- void *stack = task_stack_page(current); ++ const void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); + } + ++#ifdef CONFIG_PAX_USERCOPY ++extern int object_is_on_stack(const void *obj, unsigned long len); ++#endif ++ + extern void thread_info_cache_init(void); + + #ifdef CONFIG_DEBUG_STACK_USAGE +diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h +index 899fbb4..1cb4138 100644 +--- a/include/linux/screen_info.h ++++ b/include/linux/screen_info.h +@@ -43,7 +43,8 @@ struct screen_info { + __u16 pages; /* 0x32 */ + __u16 vesa_attributes; /* 0x34 */ + __u32 capabilities; /* 0x36 */ +- __u8 _reserved[6]; /* 0x3a */ ++ __u16 vesapm_size; /* 0x3a */ ++ __u8 _reserved[4]; /* 0x3c */ + } __attribute__((packed)); + + #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */ +diff --git a/include/linux/security.h b/include/linux/security.h +index e8c619d..e0cbd1c 100644 +--- a/include/linux/security.h ++++ b/include/linux/security.h +@@ -37,6 +37,7 @@ + #include <linux/xfrm.h> + #include <linux/slab.h> + #include <linux/xattr.h> ++#include <linux/grsecurity.h> + #include <net/flow.h> + + /* Maximum number of letters for an LSM name string */ +diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h +index 0b69a46..b2ffa4c 100644 +--- a/include/linux/seq_file.h ++++ b/include/linux/seq_file.h +@@ -24,6 +24,9 @@ struct seq_file { + struct mutex lock; + const struct seq_operations *op; + int poll_event; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ u64 exec_id; ++#endif + void *private; + }; + +@@ -33,6 +36,7 @@ struct seq_operations { + void * (*next) (struct seq_file *m, void *v, loff_t *pos); + int (*show) (struct seq_file *m, void *v); + }; ++typedef struct seq_operations __no_const seq_operations_no_const; + + #define SEQ_SKIP 1 + +diff --git a/include/linux/shm.h b/include/linux/shm.h +index 92808b8..c28cac4 100644 +--- a/include/linux/shm.h ++++ b/include/linux/shm.h +@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */ + + /* The task created the shm object. NULL if the task is dead. */ + struct task_struct *shm_creator; ++#ifdef CONFIG_GRKERNSEC ++ time_t shm_createtime; ++ pid_t shm_lapid; ++#endif + }; + + /* shm_mode upper byte flags */ +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index 6cf8b53..bcce844 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) + */ + static inline int skb_queue_empty(const struct sk_buff_head *list) + { +- return list->next == (struct sk_buff *)list; ++ return list->next == (const struct sk_buff *)list; + } + + /** +@@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) + static inline bool skb_queue_is_last(const struct sk_buff_head *list, + const struct sk_buff *skb) + { +- return skb->next == (struct sk_buff *)list; ++ return skb->next == (const struct sk_buff *)list; + } + + /** +@@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list, + static inline bool skb_queue_is_first(const struct sk_buff_head *list, + const struct sk_buff *skb) + { +- return skb->prev == (struct sk_buff *)list; ++ return skb->prev == (const struct sk_buff *)list; + } + + /** +@@ -1533,7 +1533,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) + * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) + */ + #ifndef NET_SKB_PAD +-#define NET_SKB_PAD max(32, L1_CACHE_BYTES) ++#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES) + #endif + + extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); +diff --git a/include/linux/slab.h b/include/linux/slab.h +index 573c809..07e1f43 100644 +--- a/include/linux/slab.h ++++ b/include/linux/slab.h +@@ -11,12 +11,20 @@ + + #include <linux/gfp.h> + #include <linux/types.h> ++#include <linux/err.h> + + /* + * Flags to pass to kmem_cache_create(). + * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. + */ + #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ ++ ++#ifdef CONFIG_PAX_USERCOPY ++#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */ ++#else ++#define SLAB_USERCOPY 0x00000000UL ++#endif ++ + #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ + #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ + #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ +@@ -87,10 +95,13 @@ + * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. + * Both make kfree a no-op. + */ +-#define ZERO_SIZE_PTR ((void *)16) ++#define ZERO_SIZE_PTR \ ++({ \ ++ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\ ++ (void *)(-MAX_ERRNO-1L); \ ++}) + +-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ +- (unsigned long)ZERO_SIZE_PTR) ++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1) + + /* + * struct kmem_cache related prototypes +@@ -156,11 +167,12 @@ unsigned int kmem_cache_size(struct kmem_cache *); + /* + * Common kmalloc functions provided by all allocators + */ +-void * __must_check __krealloc(const void *, size_t, gfp_t); +-void * __must_check krealloc(const void *, size_t, gfp_t); ++void * __must_check __krealloc(const void *, size_t, gfp_t) __size_overflow(2); ++void * __must_check krealloc(const void *, size_t, gfp_t) __size_overflow(2); + void kfree(const void *); + void kzfree(const void *); + size_t ksize(const void *); ++void check_object_size(const void *ptr, unsigned long n, bool to); + + /* + * Allocator specific definitions. These are mainly used to establish optimized +@@ -287,7 +299,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, + */ + #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ + (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) +-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); ++extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1); + #define kmalloc_track_caller(size, flags) \ + __kmalloc_track_caller(size, flags, _RET_IP_) + #else +@@ -306,7 +318,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); + */ + #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ + (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) +-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); ++extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1); + #define kmalloc_node_track_caller(size, flags, node) \ + __kmalloc_node_track_caller(size, flags, node, \ + _RET_IP_) +diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h +index d00e0ba..d61fb1f 100644 +--- a/include/linux/slab_def.h ++++ b/include/linux/slab_def.h +@@ -68,10 +68,10 @@ struct kmem_cache { + unsigned long node_allocs; + unsigned long node_frees; + unsigned long node_overflow; +- atomic_t allochit; +- atomic_t allocmiss; +- atomic_t freehit; +- atomic_t freemiss; ++ atomic_unchecked_t allochit; ++ atomic_unchecked_t allocmiss; ++ atomic_unchecked_t freehit; ++ atomic_unchecked_t freemiss; + + /* + * If debugging is enabled, then the allocator can add additional +@@ -109,7 +109,7 @@ struct cache_sizes { + extern struct cache_sizes malloc_sizes[]; + + void *kmem_cache_alloc(struct kmem_cache *, gfp_t); +-void *__kmalloc(size_t size, gfp_t flags); ++void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1); + + #ifdef CONFIG_TRACING + extern void *kmem_cache_alloc_trace(size_t size, +@@ -127,6 +127,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep) + } + #endif + ++static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1); + static __always_inline void *kmalloc(size_t size, gfp_t flags) + { + struct kmem_cache *cachep; +@@ -162,7 +163,7 @@ found: + } + + #ifdef CONFIG_NUMA +-extern void *__kmalloc_node(size_t size, gfp_t flags, int node); ++extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1); + extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); + + #ifdef CONFIG_TRACING +@@ -181,6 +182,7 @@ kmem_cache_alloc_node_trace(size_t size, + } + #endif + ++static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1); + static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) + { + struct kmem_cache *cachep; +diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h +index 0ec00b3..65e7e0e 100644 +--- a/include/linux/slob_def.h ++++ b/include/linux/slob_def.h +@@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, + return kmem_cache_alloc_node(cachep, flags, -1); + } + +-void *__kmalloc_node(size_t size, gfp_t flags, int node); ++void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1); + ++static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1); + static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) + { + return __kmalloc_node(size, flags, node); +@@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) + * kmalloc is the normal method of allocating memory + * in the kernel. + */ ++static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1); + static __always_inline void *kmalloc(size_t size, gfp_t flags) + { + return __kmalloc_node(size, flags, -1); + } + ++static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1); + static __always_inline void *__kmalloc(size_t size, gfp_t flags) + { + return kmalloc(size, flags); +diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h +index a32bcfd..d26bd6e 100644 +--- a/include/linux/slub_def.h ++++ b/include/linux/slub_def.h +@@ -89,7 +89,7 @@ struct kmem_cache { + struct kmem_cache_order_objects max; + struct kmem_cache_order_objects min; + gfp_t allocflags; /* gfp flags to use on each alloc */ +- int refcount; /* Refcount for slab cache destroy */ ++ atomic_t refcount; /* Refcount for slab cache destroy */ + void (*ctor)(void *); + int inuse; /* Offset to metadata */ + int align; /* Alignment */ +@@ -204,6 +204,7 @@ static __always_inline int kmalloc_index(size_t size) + * This ought to end up with a global pointer to the right cache + * in kmalloc_caches. + */ ++static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1); + static __always_inline struct kmem_cache *kmalloc_slab(size_t size) + { + int index = kmalloc_index(size); +@@ -215,9 +216,11 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) + } + + void *kmem_cache_alloc(struct kmem_cache *, gfp_t); +-void *__kmalloc(size_t size, gfp_t flags); ++void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1); + + static __always_inline void * ++kmalloc_order(size_t size, gfp_t flags, unsigned int order) __size_overflow(1); ++static __always_inline void * + kmalloc_order(size_t size, gfp_t flags, unsigned int order) + { + void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); +@@ -256,12 +259,14 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) + } + #endif + ++static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1); + static __always_inline void *kmalloc_large(size_t size, gfp_t flags) + { + unsigned int order = get_order(size); + return kmalloc_order_trace(size, flags, order); + } + ++static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1); + static __always_inline void *kmalloc(size_t size, gfp_t flags) + { + if (__builtin_constant_p(size)) { +@@ -281,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) + } + + #ifdef CONFIG_NUMA +-void *__kmalloc_node(size_t size, gfp_t flags, int node); ++void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1); + void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); + + #ifdef CONFIG_TRACING +@@ -298,6 +303,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, + } + #endif + ++static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1); + static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) + { + if (__builtin_constant_p(size) && +diff --git a/include/linux/sonet.h b/include/linux/sonet.h +index de8832d..0147b46 100644 +--- a/include/linux/sonet.h ++++ b/include/linux/sonet.h +@@ -61,7 +61,7 @@ struct sonet_stats { + #include <linux/atomic.h> + + struct k_sonet_stats { +-#define __HANDLE_ITEM(i) atomic_t i ++#define __HANDLE_ITEM(i) atomic_unchecked_t i + __SONET_ITEMS + #undef __HANDLE_ITEM + }; +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h +index 3d8f9c4..69f1c0a 100644 +--- a/include/linux/sunrpc/clnt.h ++++ b/include/linux/sunrpc/clnt.h +@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap) + { + switch (sap->sa_family) { + case AF_INET: +- return ntohs(((struct sockaddr_in *)sap)->sin_port); ++ return ntohs(((const struct sockaddr_in *)sap)->sin_port); + case AF_INET6: +- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); ++ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port); + } + return 0; + } +@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, + static inline bool __rpc_copy_addr4(struct sockaddr *dst, + const struct sockaddr *src) + { +- const struct sockaddr_in *ssin = (struct sockaddr_in *) src; ++ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src; + struct sockaddr_in *dsin = (struct sockaddr_in *) dst; + + dsin->sin_family = ssin->sin_family; +@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa) + if (sa->sa_family != AF_INET6) + return 0; + +- return ((struct sockaddr_in6 *) sa)->sin6_scope_id; ++ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id; + } + + #endif /* __KERNEL__ */ +diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h +index e775689..9e206d9 100644 +--- a/include/linux/sunrpc/sched.h ++++ b/include/linux/sunrpc/sched.h +@@ -105,6 +105,7 @@ struct rpc_call_ops { + void (*rpc_call_done)(struct rpc_task *, void *); + void (*rpc_release)(void *); + }; ++typedef struct rpc_call_ops __no_const rpc_call_ops_no_const; + + struct rpc_task_setup { + struct rpc_task *task; +diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h +index c14fe86..393245e 100644 +--- a/include/linux/sunrpc/svc_rdma.h ++++ b/include/linux/sunrpc/svc_rdma.h +@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord; + extern unsigned int svcrdma_max_requests; + extern unsigned int svcrdma_max_req_size; + +-extern atomic_t rdma_stat_recv; +-extern atomic_t rdma_stat_read; +-extern atomic_t rdma_stat_write; +-extern atomic_t rdma_stat_sq_starve; +-extern atomic_t rdma_stat_rq_starve; +-extern atomic_t rdma_stat_rq_poll; +-extern atomic_t rdma_stat_rq_prod; +-extern atomic_t rdma_stat_sq_poll; +-extern atomic_t rdma_stat_sq_prod; ++extern atomic_unchecked_t rdma_stat_recv; ++extern atomic_unchecked_t rdma_stat_read; ++extern atomic_unchecked_t rdma_stat_write; ++extern atomic_unchecked_t rdma_stat_sq_starve; ++extern atomic_unchecked_t rdma_stat_rq_starve; ++extern atomic_unchecked_t rdma_stat_rq_poll; ++extern atomic_unchecked_t rdma_stat_rq_prod; ++extern atomic_unchecked_t rdma_stat_sq_poll; ++extern atomic_unchecked_t rdma_stat_sq_prod; + + #define RPCRDMA_VERSION 1 + +diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h +index 703cfa33..0b8ca72ac 100644 +--- a/include/linux/sysctl.h ++++ b/include/linux/sysctl.h +@@ -155,7 +155,11 @@ enum + KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ + }; + +- ++#ifdef CONFIG_PAX_SOFTMODE ++enum { ++ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */ ++}; ++#endif + + /* CTL_VM names: */ + enum +@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write, + + extern int proc_dostring(struct ctl_table *, int, + void __user *, size_t *, loff_t *); ++extern int proc_dostring_modpriv(struct ctl_table *, int, ++ void __user *, size_t *, loff_t *); + extern int proc_dointvec(struct ctl_table *, int, + void __user *, size_t *, loff_t *); + extern int proc_dointvec_minmax(struct ctl_table *, int, +diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h +index a71a292..51bd91d 100644 +--- a/include/linux/tracehook.h ++++ b/include/linux/tracehook.h +@@ -54,12 +54,12 @@ struct linux_binprm; + /* + * ptrace report for syscall entry and exit looks identical. + */ +-static inline void ptrace_report_syscall(struct pt_regs *regs) ++static inline int ptrace_report_syscall(struct pt_regs *regs) + { + int ptrace = current->ptrace; + + if (!(ptrace & PT_PTRACED)) +- return; ++ return 0; + + ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); + +@@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs) + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } ++ ++ return fatal_signal_pending(current); + } + + /** +@@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs) + static inline __must_check int tracehook_report_syscall_entry( + struct pt_regs *regs) + { +- ptrace_report_syscall(regs); +- return 0; ++ return ptrace_report_syscall(regs); + } + + /** +diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h +index ff7dc08..893e1bd 100644 +--- a/include/linux/tty_ldisc.h ++++ b/include/linux/tty_ldisc.h +@@ -148,7 +148,7 @@ struct tty_ldisc_ops { + + struct module *owner; + +- int refcount; ++ atomic_t refcount; + }; + + struct tty_ldisc { +diff --git a/include/linux/types.h b/include/linux/types.h +index 57a9723..dbe234a 100644 +--- a/include/linux/types.h ++++ b/include/linux/types.h +@@ -213,10 +213,26 @@ typedef struct { + int counter; + } atomic_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ int counter; ++} atomic_unchecked_t; ++#else ++typedef atomic_t atomic_unchecked_t; ++#endif ++ + #ifdef CONFIG_64BIT + typedef struct { + long counter; + } atomic64_t; ++ ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ long counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif + #endif + + struct list_head { +diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h +index 5ca0951..53a2fff 100644 +--- a/include/linux/uaccess.h ++++ b/include/linux/uaccess.h +@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to, + long ret; \ + mm_segment_t old_fs = get_fs(); \ + \ +- set_fs(KERNEL_DS); \ + pagefault_disable(); \ +- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ +- pagefault_enable(); \ ++ set_fs(KERNEL_DS); \ ++ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \ + set_fs(old_fs); \ ++ pagefault_enable(); \ + ret; \ + }) + +@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size); + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +-extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); ++extern long notrace probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3); + extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); + + #endif /* __LINUX_UACCESS_H__ */ +diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h +index 99c1b4d..bb94261 100644 +--- a/include/linux/unaligned/access_ok.h ++++ b/include/linux/unaligned/access_ok.h +@@ -6,32 +6,32 @@ + + static inline u16 get_unaligned_le16(const void *p) + { +- return le16_to_cpup((__le16 *)p); ++ return le16_to_cpup((const __le16 *)p); + } + + static inline u32 get_unaligned_le32(const void *p) + { +- return le32_to_cpup((__le32 *)p); ++ return le32_to_cpup((const __le32 *)p); + } + + static inline u64 get_unaligned_le64(const void *p) + { +- return le64_to_cpup((__le64 *)p); ++ return le64_to_cpup((const __le64 *)p); + } + + static inline u16 get_unaligned_be16(const void *p) + { +- return be16_to_cpup((__be16 *)p); ++ return be16_to_cpup((const __be16 *)p); + } + + static inline u32 get_unaligned_be32(const void *p) + { +- return be32_to_cpup((__be32 *)p); ++ return be32_to_cpup((const __be32 *)p); + } + + static inline u64 get_unaligned_be64(const void *p) + { +- return be64_to_cpup((__be64 *)p); ++ return be64_to_cpup((const __be64 *)p); + } + + static inline void put_unaligned_le16(u16 val, void *p) +diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h +index e5a40c3..20ab0f6 100644 +--- a/include/linux/usb/renesas_usbhs.h ++++ b/include/linux/usb/renesas_usbhs.h +@@ -39,7 +39,7 @@ enum { + */ + struct renesas_usbhs_driver_callback { + int (*notify_hotplug)(struct platform_device *pdev); +-}; ++} __no_const; + + /* + * callback functions for platform +@@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback { + * VBUS control is needed for Host + */ + int (*set_vbus)(struct platform_device *pdev, int enable); +-}; ++} __no_const; + + /* + * parameters for renesas usbhs +diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h +index 6f8fbcf..8259001 100644 +--- a/include/linux/vermagic.h ++++ b/include/linux/vermagic.h +@@ -25,9 +25,35 @@ + #define MODULE_ARCH_VERMAGIC "" + #endif + ++#ifdef CONFIG_PAX_REFCOUNT ++#define MODULE_PAX_REFCOUNT "REFCOUNT " ++#else ++#define MODULE_PAX_REFCOUNT "" ++#endif ++ ++#ifdef CONSTIFY_PLUGIN ++#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN " ++#else ++#define MODULE_CONSTIFY_PLUGIN "" ++#endif ++ ++#ifdef STACKLEAK_PLUGIN ++#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN " ++#else ++#define MODULE_STACKLEAK_PLUGIN "" ++#endif ++ ++#ifdef CONFIG_GRKERNSEC ++#define MODULE_GRSEC "GRSEC " ++#else ++#define MODULE_GRSEC "" ++#endif ++ + #define VERMAGIC_STRING \ + UTS_RELEASE " " \ + MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ + MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ +- MODULE_ARCH_VERMAGIC ++ MODULE_ARCH_VERMAGIC \ ++ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \ ++ MODULE_GRSEC + +diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h +index 4bde182..c42a656 100644 +--- a/include/linux/vmalloc.h ++++ b/include/linux/vmalloc.h +@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ + #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ + #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ + #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */ ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */ ++#endif ++ + /* bits [20..32] reserved for arch specific ioremap internals */ + + /* +@@ -51,18 +56,18 @@ static inline void vmalloc_init(void) + } + #endif + +-extern void *vmalloc(unsigned long size); +-extern void *vzalloc(unsigned long size); +-extern void *vmalloc_user(unsigned long size); +-extern void *vmalloc_node(unsigned long size, int node); +-extern void *vzalloc_node(unsigned long size, int node); +-extern void *vmalloc_exec(unsigned long size); +-extern void *vmalloc_32(unsigned long size); +-extern void *vmalloc_32_user(unsigned long size); +-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); ++extern void *vmalloc(unsigned long size) __size_overflow(1); ++extern void *vzalloc(unsigned long size) __size_overflow(1); ++extern void *vmalloc_user(unsigned long size) __size_overflow(1); ++extern void *vmalloc_node(unsigned long size, int node) __size_overflow(1); ++extern void *vzalloc_node(unsigned long size, int node) __size_overflow(1); ++extern void *vmalloc_exec(unsigned long size) __size_overflow(1); ++extern void *vmalloc_32(unsigned long size) __size_overflow(1); ++extern void *vmalloc_32_user(unsigned long size) __size_overflow(1); ++extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __size_overflow(1); + extern void *__vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, +- pgprot_t prot, int node, void *caller); ++ pgprot_t prot, int node, void *caller) __size_overflow(1); + extern void vfree(const void *addr); + + extern void *vmap(struct page **pages, unsigned int count, +@@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); + extern void free_vm_area(struct vm_struct *area); + + /* for /dev/kmem */ +-extern long vread(char *buf, char *addr, unsigned long count); +-extern long vwrite(char *buf, char *addr, unsigned long count); ++extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3); ++extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3); + + /* + * Internals. Dont't use.. +diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h +index 65efb92..137adbb 100644 +--- a/include/linux/vmstat.h ++++ b/include/linux/vmstat.h +@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu) + /* + * Zone based page accounting with per cpu differentials. + */ +-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + static inline void zone_page_state_add(long x, struct zone *zone, + enum zone_stat_item item) + { +- atomic_long_add(x, &zone->vm_stat[item]); +- atomic_long_add(x, &vm_stat[item]); ++ atomic_long_add_unchecked(x, &zone->vm_stat[item]); ++ atomic_long_add_unchecked(x, &vm_stat[item]); + } + + static inline unsigned long global_page_state(enum zone_stat_item item) + { +- long x = atomic_long_read(&vm_stat[item]); ++ long x = atomic_long_read_unchecked(&vm_stat[item]); + #ifdef CONFIG_SMP + if (x < 0) + x = 0; +@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item) + static inline unsigned long zone_page_state(struct zone *zone, + enum zone_stat_item item) + { +- long x = atomic_long_read(&zone->vm_stat[item]); ++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]); + #ifdef CONFIG_SMP + if (x < 0) + x = 0; +@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone, + static inline unsigned long zone_page_state_snapshot(struct zone *zone, + enum zone_stat_item item) + { +- long x = atomic_long_read(&zone->vm_stat[item]); ++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]); + + #ifdef CONFIG_SMP + int cpu; +@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone, + + static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) + { +- atomic_long_inc(&zone->vm_stat[item]); +- atomic_long_inc(&vm_stat[item]); ++ atomic_long_inc_unchecked(&zone->vm_stat[item]); ++ atomic_long_inc_unchecked(&vm_stat[item]); + } + + static inline void __inc_zone_page_state(struct page *page, +@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page, + + static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) + { +- atomic_long_dec(&zone->vm_stat[item]); +- atomic_long_dec(&vm_stat[item]); ++ atomic_long_dec_unchecked(&zone->vm_stat[item]); ++ atomic_long_dec_unchecked(&vm_stat[item]); + } + + static inline void __dec_zone_page_state(struct page *page, +diff --git a/include/linux/xattr.h b/include/linux/xattr.h +index e5d1220..ef6e406 100644 +--- a/include/linux/xattr.h ++++ b/include/linux/xattr.h +@@ -57,6 +57,11 @@ + #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default" + #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT + ++/* User namespace */ ++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax." ++#define XATTR_PAX_FLAGS_SUFFIX "flags" ++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX ++ + #ifdef __KERNEL__ + + #include <linux/types.h> +diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h +index 4aeff96..b378cdc 100644 +--- a/include/media/saa7146_vv.h ++++ b/include/media/saa7146_vv.h +@@ -163,7 +163,7 @@ struct saa7146_ext_vv + int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *); + + /* the extension can override this */ +- struct v4l2_ioctl_ops ops; ++ v4l2_ioctl_ops_no_const ops; + /* pointer to the saa7146 core ops */ + const struct v4l2_ioctl_ops *core_ops; + +diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h +index c7c40f1..4f01585 100644 +--- a/include/media/v4l2-dev.h ++++ b/include/media/v4l2-dev.h +@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local); + + + struct v4l2_file_operations { +- struct module *owner; ++ struct module * const owner; + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + unsigned int (*poll) (struct file *, struct poll_table_struct *); +@@ -68,6 +68,7 @@ struct v4l2_file_operations { + int (*open) (struct file *); + int (*release) (struct file *); + }; ++typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const; + + /* + * Newer version of video_device, handled by videodev2.c +diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h +index 4d1c74a..65e1221 100644 +--- a/include/media/v4l2-ioctl.h ++++ b/include/media/v4l2-ioctl.h +@@ -274,7 +274,7 @@ struct v4l2_ioctl_ops { + long (*vidioc_default) (struct file *file, void *fh, + bool valid_prio, int cmd, void *arg); + }; +- ++typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const; + + /* v4l debugging and diagnostics */ + +diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h +index 8d55251..dfe5b0a 100644 +--- a/include/net/caif/caif_hsi.h ++++ b/include/net/caif/caif_hsi.h +@@ -98,7 +98,7 @@ struct cfhsi_drv { + void (*rx_done_cb) (struct cfhsi_drv *drv); + void (*wake_up_cb) (struct cfhsi_drv *drv); + void (*wake_down_cb) (struct cfhsi_drv *drv); +-}; ++} __no_const; + + /* Structure implemented by HSI device. */ + struct cfhsi_dev { +diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h +index 9e5425b..8136ffc 100644 +--- a/include/net/caif/cfctrl.h ++++ b/include/net/caif/cfctrl.h +@@ -52,7 +52,7 @@ struct cfctrl_rsp { + void (*radioset_rsp)(void); + void (*reject_rsp)(struct cflayer *layer, u8 linkid, + struct cflayer *client_layer); +-}; ++} __no_const; + + /* Link Setup Parameters for CAIF-Links. */ + struct cfctrl_link_param { +@@ -101,8 +101,8 @@ struct cfctrl_request_info { + struct cfctrl { + struct cfsrvl serv; + struct cfctrl_rsp res; +- atomic_t req_seq_no; +- atomic_t rsp_seq_no; ++ atomic_unchecked_t req_seq_no; ++ atomic_unchecked_t rsp_seq_no; + struct list_head list; + /* Protects from simultaneous access to first_req list */ + spinlock_t info_list_lock; +diff --git a/include/net/flow.h b/include/net/flow.h +index 2a7eefd..3250f3b 100644 +--- a/include/net/flow.h ++++ b/include/net/flow.h +@@ -218,6 +218,6 @@ extern struct flow_cache_object *flow_cache_lookup( + + extern void flow_cache_flush(void); + extern void flow_cache_flush_deferred(void); +-extern atomic_t flow_cache_genid; ++extern atomic_unchecked_t flow_cache_genid; + + #endif +diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h +index e9ff3fc..9d3e5c7 100644 +--- a/include/net/inetpeer.h ++++ b/include/net/inetpeer.h +@@ -48,8 +48,8 @@ struct inet_peer { + */ + union { + struct { +- atomic_t rid; /* Frag reception counter */ +- atomic_t ip_id_count; /* IP ID for the next packet */ ++ atomic_unchecked_t rid; /* Frag reception counter */ ++ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */ + __u32 tcp_ts; + __u32 tcp_ts_stamp; + }; +@@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more) + more++; + inet_peer_refcheck(p); + do { +- old = atomic_read(&p->ip_id_count); ++ old = atomic_read_unchecked(&p->ip_id_count); + new = old + more; + if (!new) + new = 1; +- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old); ++ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old); + return new; + } + +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h +index 10422ef..662570f 100644 +--- a/include/net/ip_fib.h ++++ b/include/net/ip_fib.h +@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); + + #define FIB_RES_SADDR(net, res) \ + ((FIB_RES_NH(res).nh_saddr_genid == \ +- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \ ++ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \ + FIB_RES_NH(res).nh_saddr : \ + fib_info_update_nh_saddr((net), &FIB_RES_NH(res))) + #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw) +diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h +index e5a7b9a..f4fc44b 100644 +--- a/include/net/ip_vs.h ++++ b/include/net/ip_vs.h +@@ -509,7 +509,7 @@ struct ip_vs_conn { + struct ip_vs_conn *control; /* Master control connection */ + atomic_t n_control; /* Number of controlled ones */ + struct ip_vs_dest *dest; /* real server */ +- atomic_t in_pkts; /* incoming packet counter */ ++ atomic_unchecked_t in_pkts; /* incoming packet counter */ + + /* packet transmitter for different forwarding methods. If it + mangles the packet, it must return NF_DROP or better NF_STOLEN, +@@ -647,7 +647,7 @@ struct ip_vs_dest { + __be16 port; /* port number of the server */ + union nf_inet_addr addr; /* IP address of the server */ + volatile unsigned flags; /* dest status flags */ +- atomic_t conn_flags; /* flags to copy to conn */ ++ atomic_unchecked_t conn_flags; /* flags to copy to conn */ + atomic_t weight; /* server weight */ + + atomic_t refcnt; /* reference counter */ +diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h +index 69b610a..fe3962c 100644 +--- a/include/net/irda/ircomm_core.h ++++ b/include/net/irda/ircomm_core.h +@@ -51,7 +51,7 @@ typedef struct { + int (*connect_response)(struct ircomm_cb *, struct sk_buff *); + int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *, + struct ircomm_info *); +-} call_t; ++} __no_const call_t; + + struct ircomm_cb { + irda_queue_t queue; +diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h +index 59ba38bc..d515662 100644 +--- a/include/net/irda/ircomm_tty.h ++++ b/include/net/irda/ircomm_tty.h +@@ -35,6 +35,7 @@ + #include <linux/termios.h> + #include <linux/timer.h> + #include <linux/tty.h> /* struct tty_struct */ ++#include <asm/local.h> + + #include <net/irda/irias_object.h> + #include <net/irda/ircomm_core.h> +@@ -105,8 +106,8 @@ struct ircomm_tty_cb { + unsigned short close_delay; + unsigned short closing_wait; /* time to wait before closing */ + +- int open_count; +- int blocked_open; /* # of blocked opens */ ++ local_t open_count; ++ local_t blocked_open; /* # of blocked opens */ + + /* Protect concurent access to : + * o self->open_count +diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h +index f2419cf..473679f 100644 +--- a/include/net/iucv/af_iucv.h ++++ b/include/net/iucv/af_iucv.h +@@ -139,7 +139,7 @@ struct iucv_sock { + struct iucv_sock_list { + struct hlist_head head; + rwlock_t lock; +- atomic_t autobind_name; ++ atomic_unchecked_t autobind_name; + }; + + unsigned int iucv_sock_poll(struct file *file, struct socket *sock, +diff --git a/include/net/neighbour.h b/include/net/neighbour.h +index 2720884..3aa5c25 100644 +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -122,7 +122,7 @@ struct neigh_ops { + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); +-}; ++} __do_const; + + struct pneigh_entry { + struct pneigh_entry *next; +diff --git a/include/net/netlink.h b/include/net/netlink.h +index cb1f350..3279d2c 100644 +--- a/include/net/netlink.h ++++ b/include/net/netlink.h +@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb) + static inline void nlmsg_trim(struct sk_buff *skb, const void *mark) + { + if (mark) +- skb_trim(skb, (unsigned char *) mark - skb->data); ++ skb_trim(skb, (const unsigned char *) mark - skb->data); + } + + /** +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h +index d786b4f..4c3dd41 100644 +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -56,8 +56,8 @@ struct netns_ipv4 { + + unsigned int sysctl_ping_group_range[2]; + +- atomic_t rt_genid; +- atomic_t dev_addr_genid; ++ atomic_unchecked_t rt_genid; ++ atomic_unchecked_t dev_addr_genid; + + #ifdef CONFIG_IP_MROUTE + #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h +index 6a72a58..e6a127d 100644 +--- a/include/net/sctp/sctp.h ++++ b/include/net/sctp/sctp.h +@@ -318,9 +318,9 @@ do { \ + + #else /* SCTP_DEBUG */ + +-#define SCTP_DEBUG_PRINTK(whatever...) +-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) +-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) ++#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0) ++#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0) ++#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0) + #define SCTP_ENABLE_DEBUG + #define SCTP_DISABLE_DEBUG + #define SCTP_ASSERT(expr, str, func) +diff --git a/include/net/sock.h b/include/net/sock.h +index 32e3937..87a1dbc 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -277,7 +277,7 @@ struct sock { + #ifdef CONFIG_RPS + __u32 sk_rxhash; + #endif +- atomic_t sk_drops; ++ atomic_unchecked_t sk_drops; + int sk_rcvbuf; + + struct sk_filter __rcu *sk_filter; +@@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags) + } + + static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, +- char __user *from, char *to, ++ char __user *from, unsigned char *to, + int copy, int offset) + { + if (skb->ip_summed == CHECKSUM_NONE) { +diff --git a/include/net/tcp.h b/include/net/tcp.h +index bb18c4d..bb87972 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo { + char *name; + sa_family_t family; + const struct file_operations *seq_fops; +- struct seq_operations seq_ops; ++ seq_operations_no_const seq_ops; + }; + + struct tcp_iter_state { +diff --git a/include/net/udp.h b/include/net/udp.h +index 3b285f4..0219639 100644 +--- a/include/net/udp.h ++++ b/include/net/udp.h +@@ -237,7 +237,7 @@ struct udp_seq_afinfo { + sa_family_t family; + struct udp_table *udp_table; + const struct file_operations *seq_fops; +- struct seq_operations seq_ops; ++ seq_operations_no_const seq_ops; + }; + + struct udp_iter_state { +diff --git a/include/net/xfrm.h b/include/net/xfrm.h +index b203e14..1df3991 100644 +--- a/include/net/xfrm.h ++++ b/include/net/xfrm.h +@@ -505,7 +505,7 @@ struct xfrm_policy { + struct timer_list timer; + + struct flow_cache_object flo; +- atomic_t genid; ++ atomic_unchecked_t genid; + u32 priority; + u32 index; + struct xfrm_mark mark; +diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h +index 1a046b1..ee0bef0 100644 +--- a/include/rdma/iw_cm.h ++++ b/include/rdma/iw_cm.h +@@ -122,7 +122,7 @@ struct iw_cm_verbs { + int backlog); + + int (*destroy_listen)(struct iw_cm_id *cm_id); +-}; ++} __no_const; + + /** + * iw_create_cm_id - Create an IW CM identifier. +diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h +index 5d1a758..1dbf795 100644 +--- a/include/scsi/libfc.h ++++ b/include/scsi/libfc.h +@@ -748,6 +748,7 @@ struct libfc_function_template { + */ + void (*disc_stop_final) (struct fc_lport *); + }; ++typedef struct libfc_function_template __no_const libfc_function_template_no_const; + + /** + * struct fc_disc - Discovery context +@@ -851,7 +852,7 @@ struct fc_lport { + struct fc_vport *vport; + + /* Operational Information */ +- struct libfc_function_template tt; ++ libfc_function_template_no_const tt; + u8 link_up; + u8 qfull; + enum fc_lport_state state; +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h +index 5591ed5..13eb457 100644 +--- a/include/scsi/scsi_device.h ++++ b/include/scsi/scsi_device.h +@@ -161,9 +161,9 @@ struct scsi_device { + unsigned int max_device_blocked; /* what device_blocked counts down from */ + #define SCSI_DEFAULT_DEVICE_BLOCKED 3 + +- atomic_t iorequest_cnt; +- atomic_t iodone_cnt; +- atomic_t ioerr_cnt; ++ atomic_unchecked_t iorequest_cnt; ++ atomic_unchecked_t iodone_cnt; ++ atomic_unchecked_t ioerr_cnt; + + struct device sdev_gendev, + sdev_dev; +diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h +index 2a65167..91e01f8 100644 +--- a/include/scsi/scsi_transport_fc.h ++++ b/include/scsi/scsi_transport_fc.h +@@ -711,7 +711,7 @@ struct fc_function_template { + unsigned long show_host_system_hostname:1; + + unsigned long disable_target_scan:1; +-}; ++} __do_const; + + + /** +diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h +index 030b87c..98a6954 100644 +--- a/include/sound/ak4xxx-adda.h ++++ b/include/sound/ak4xxx-adda.h +@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops { + void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg, + unsigned char val); + void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate); +-}; ++} __no_const; + + #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */ + +diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h +index 8c05e47..2b5df97 100644 +--- a/include/sound/hwdep.h ++++ b/include/sound/hwdep.h +@@ -49,7 +49,7 @@ struct snd_hwdep_ops { + struct snd_hwdep_dsp_status *status); + int (*dsp_load)(struct snd_hwdep *hw, + struct snd_hwdep_dsp_image *image); +-}; ++} __no_const; + + struct snd_hwdep { + struct snd_card *card; +diff --git a/include/sound/info.h b/include/sound/info.h +index 5492cc4..1a65278 100644 +--- a/include/sound/info.h ++++ b/include/sound/info.h +@@ -44,7 +44,7 @@ struct snd_info_entry_text { + struct snd_info_buffer *buffer); + void (*write)(struct snd_info_entry *entry, + struct snd_info_buffer *buffer); +-}; ++} __no_const; + + struct snd_info_entry_ops { + int (*open)(struct snd_info_entry *entry, +diff --git a/include/sound/pcm.h b/include/sound/pcm.h +index 0cf91b2..b70cae4 100644 +--- a/include/sound/pcm.h ++++ b/include/sound/pcm.h +@@ -81,6 +81,7 @@ struct snd_pcm_ops { + int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma); + int (*ack)(struct snd_pcm_substream *substream); + }; ++typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const; + + /* + * +diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h +index af1b49e..a5d55a5 100644 +--- a/include/sound/sb16_csp.h ++++ b/include/sound/sb16_csp.h +@@ -146,7 +146,7 @@ struct snd_sb_csp_ops { + int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels); + int (*csp_stop) (struct snd_sb_csp * p); + int (*csp_qsound_transfer) (struct snd_sb_csp * p); +-}; ++} __no_const; + + /* + * CSP private data +diff --git a/include/sound/soc.h b/include/sound/soc.h +index 11cfb59..e3f93f4 100644 +--- a/include/sound/soc.h ++++ b/include/sound/soc.h +@@ -683,7 +683,7 @@ struct snd_soc_platform_driver { + /* platform IO - used for platform DAPM */ + unsigned int (*read)(struct snd_soc_platform *, unsigned int); + int (*write)(struct snd_soc_platform *, unsigned int, unsigned int); +-}; ++} __do_const; + + struct snd_soc_platform { + const char *name; +diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h +index 444cd6b..3327cc5 100644 +--- a/include/sound/ymfpci.h ++++ b/include/sound/ymfpci.h +@@ -358,7 +358,7 @@ struct snd_ymfpci { + spinlock_t reg_lock; + spinlock_t voice_lock; + wait_queue_head_t interrupt_sleep; +- atomic_t interrupt_sleep_count; ++ atomic_unchecked_t interrupt_sleep_count; + struct snd_info_entry *proc_entry; + const struct firmware *dsp_microcode; + const struct firmware *controller_microcode; +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 94bbec3..3a8c6b0 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -346,7 +346,7 @@ struct t10_reservation_ops { + int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32); + int (*t10_pr_register)(struct se_cmd *); + int (*t10_pr_clear)(struct se_cmd *); +-}; ++} __no_const; + + struct t10_reservation { + /* Reservation effects all target ports */ +@@ -465,8 +465,8 @@ struct se_cmd { + atomic_t t_se_count; + atomic_t t_task_cdbs_left; + atomic_t t_task_cdbs_ex_left; +- atomic_t t_task_cdbs_sent; +- atomic_t t_transport_aborted; ++ atomic_unchecked_t t_task_cdbs_sent; ++ atomic_unchecked_t t_transport_aborted; + atomic_t t_transport_active; + atomic_t t_transport_complete; + atomic_t t_transport_queue_active; +@@ -705,7 +705,7 @@ struct se_device { + /* Active commands on this virtual SE device */ + atomic_t simple_cmds; + atomic_t depth_left; +- atomic_t dev_ordered_id; ++ atomic_unchecked_t dev_ordered_id; + atomic_t execute_tasks; + atomic_t dev_ordered_sync; + atomic_t dev_qf_count; +diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h +index 1c09820..7f5ec79 100644 +--- a/include/trace/events/irq.h ++++ b/include/trace/events/irq.h +@@ -36,7 +36,7 @@ struct softirq_action; + */ + TRACE_EVENT(irq_handler_entry, + +- TP_PROTO(int irq, struct irqaction *action), ++ TP_PROTO(int irq, const struct irqaction *action), + + TP_ARGS(irq, action), + +@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry, + */ + TRACE_EVENT(irq_handler_exit, + +- TP_PROTO(int irq, struct irqaction *action, int ret), ++ TP_PROTO(int irq, const struct irqaction *action, int ret), + + TP_ARGS(irq, action, ret), + +diff --git a/include/video/udlfb.h b/include/video/udlfb.h +index c41f308..6918de3 100644 +--- a/include/video/udlfb.h ++++ b/include/video/udlfb.h +@@ -52,10 +52,10 @@ struct dlfb_data { + u32 pseudo_palette[256]; + int blank_mode; /*one of FB_BLANK_ */ + /* blit-only rendering path metrics, exposed through sysfs */ +- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */ +- atomic_t bytes_identical; /* saved effort with backbuffer comparison */ +- atomic_t bytes_sent; /* to usb, after compression including overhead */ +- atomic_t cpu_kcycles_used; /* transpired during pixel processing */ ++ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */ ++ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */ ++ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */ ++ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */ + }; + + #define NR_USB_REQUEST_I2C_SUB_IO 0x02 +diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h +index 0993a22..32ba2fe 100644 +--- a/include/video/uvesafb.h ++++ b/include/video/uvesafb.h +@@ -177,6 +177,7 @@ struct uvesafb_par { + u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */ + u8 pmi_setpal; /* PMI for palette changes */ + u16 *pmi_base; /* protected mode interface location */ ++ u8 *pmi_code; /* protected mode code location */ + void *pmi_start; + void *pmi_pal; + u8 *vbe_state_orig; /* +diff --git a/init/Kconfig b/init/Kconfig +index 43298f9..2f56c12 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1214,7 +1214,7 @@ config SLUB_DEBUG + + config COMPAT_BRK + bool "Disable heap randomization" +- default y ++ default n + help + Randomizing heap placement makes heap exploits harder, but it + also breaks ancient binaries (including anything libc5 based). +diff --git a/init/do_mounts.c b/init/do_mounts.c +index db6e5ee..7677ff7 100644 +--- a/init/do_mounts.c ++++ b/init/do_mounts.c +@@ -325,11 +325,11 @@ static void __init get_fs_names(char *page) + + static int __init do_mount_root(char *name, char *fs, int flags, void *data) + { +- int err = sys_mount(name, "/root", fs, flags, data); ++ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data); + if (err) + return err; + +- sys_chdir((const char __user __force *)"/root"); ++ sys_chdir((const char __force_user*)"/root"); + ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev; + printk(KERN_INFO + "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n", +@@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...) + va_start(args, fmt); + vsprintf(buf, fmt, args); + va_end(args); +- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0); ++ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0); + if (fd >= 0) { + sys_ioctl(fd, FDEJECT, 0); + sys_close(fd); + } + printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf); +- fd = sys_open("/dev/console", O_RDWR, 0); ++ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0); + if (fd >= 0) { + sys_ioctl(fd, TCGETS, (long)&termios); + termios.c_lflag &= ~ICANON; + sys_ioctl(fd, TCSETSF, (long)&termios); +- sys_read(fd, &c, 1); ++ sys_read(fd, (char __user *)&c, 1); + termios.c_lflag |= ICANON; + sys_ioctl(fd, TCSETSF, (long)&termios); + sys_close(fd); +@@ -553,6 +553,6 @@ void __init prepare_namespace(void) + mount_root(); + out: + devtmpfs_mount("dev"); +- sys_mount(".", "/", NULL, MS_MOVE, NULL); +- sys_chroot((const char __user __force *)"."); ++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL); ++ sys_chroot((const char __force_user *)"."); + } +diff --git a/init/do_mounts.h b/init/do_mounts.h +index f5b978a..69dbfe8 100644 +--- a/init/do_mounts.h ++++ b/init/do_mounts.h +@@ -15,15 +15,15 @@ extern int root_mountflags; + + static inline int create_dev(char *name, dev_t dev) + { +- sys_unlink(name); +- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev)); ++ sys_unlink((char __force_user *)name); ++ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev)); + } + + #if BITS_PER_LONG == 32 + static inline u32 bstat(char *name) + { + struct stat64 stat; +- if (sys_stat64(name, &stat) != 0) ++ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0) + return 0; + if (!S_ISBLK(stat.st_mode)) + return 0; +@@ -35,7 +35,7 @@ static inline u32 bstat(char *name) + static inline u32 bstat(char *name) + { + struct stat stat; +- if (sys_newstat(name, &stat) != 0) ++ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0) + return 0; + if (!S_ISBLK(stat.st_mode)) + return 0; +diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c +index 3098a38..253064e 100644 +--- a/init/do_mounts_initrd.c ++++ b/init/do_mounts_initrd.c +@@ -44,13 +44,13 @@ static void __init handle_initrd(void) + create_dev("/dev/root.old", Root_RAM0); + /* mount initrd on rootfs' /root */ + mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY); +- sys_mkdir("/old", 0700); +- root_fd = sys_open("/", 0, 0); +- old_fd = sys_open("/old", 0, 0); ++ sys_mkdir((const char __force_user *)"/old", 0700); ++ root_fd = sys_open((const char __force_user *)"/", 0, 0); ++ old_fd = sys_open((const char __force_user *)"/old", 0, 0); + /* move initrd over / and chdir/chroot in initrd root */ +- sys_chdir("/root"); +- sys_mount(".", "/", NULL, MS_MOVE, NULL); +- sys_chroot("."); ++ sys_chdir((const char __force_user *)"/root"); ++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL); ++ sys_chroot((const char __force_user *)"."); + + /* + * In case that a resume from disk is carried out by linuxrc or one of +@@ -67,15 +67,15 @@ static void __init handle_initrd(void) + + /* move initrd to rootfs' /old */ + sys_fchdir(old_fd); +- sys_mount("/", ".", NULL, MS_MOVE, NULL); ++ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL); + /* switch root and cwd back to / of rootfs */ + sys_fchdir(root_fd); +- sys_chroot("."); ++ sys_chroot((const char __force_user *)"."); + sys_close(old_fd); + sys_close(root_fd); + + if (new_decode_dev(real_root_dev) == Root_RAM0) { +- sys_chdir("/old"); ++ sys_chdir((const char __force_user *)"/old"); + return; + } + +@@ -83,17 +83,17 @@ static void __init handle_initrd(void) + mount_root(); + + printk(KERN_NOTICE "Trying to move old root to /initrd ... "); +- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL); ++ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL); + if (!error) + printk("okay\n"); + else { +- int fd = sys_open("/dev/root.old", O_RDWR, 0); ++ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0); + if (error == -ENOENT) + printk("/initrd does not exist. Ignored.\n"); + else + printk("failed\n"); + printk(KERN_NOTICE "Unmounting old root\n"); +- sys_umount("/old", MNT_DETACH); ++ sys_umount((char __force_user *)"/old", MNT_DETACH); + printk(KERN_NOTICE "Trying to free ramdisk memory ... "); + if (fd < 0) { + error = fd; +@@ -116,11 +116,11 @@ int __init initrd_load(void) + * mounted in the normal path. + */ + if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) { +- sys_unlink("/initrd.image"); ++ sys_unlink((const char __force_user *)"/initrd.image"); + handle_initrd(); + return 1; + } + } +- sys_unlink("/initrd.image"); ++ sys_unlink((const char __force_user *)"/initrd.image"); + return 0; + } +diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c +index 32c4799..c27ee74 100644 +--- a/init/do_mounts_md.c ++++ b/init/do_mounts_md.c +@@ -170,7 +170,7 @@ static void __init md_setup_drive(void) + partitioned ? "_d" : "", minor, + md_setup_args[ent].device_names); + +- fd = sys_open(name, 0, 0); ++ fd = sys_open((char __force_user *)name, 0, 0); + if (fd < 0) { + printk(KERN_ERR "md: open failed - cannot start " + "array %s\n", name); +@@ -233,7 +233,7 @@ static void __init md_setup_drive(void) + * array without it + */ + sys_close(fd); +- fd = sys_open(name, 0, 0); ++ fd = sys_open((char __force_user *)name, 0, 0); + sys_ioctl(fd, BLKRRPART, 0); + } + sys_close(fd); +@@ -283,7 +283,7 @@ static void __init autodetect_raid(void) + + wait_for_device_probe(); + +- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0); ++ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0); + if (fd >= 0) { + sys_ioctl(fd, RAID_AUTORUN, raid_autopart); + sys_close(fd); +diff --git a/init/initramfs.c b/init/initramfs.c +index 2531811..040d4d4 100644 +--- a/init/initramfs.c ++++ b/init/initramfs.c +@@ -74,7 +74,7 @@ static void __init free_hash(void) + } + } + +-static long __init do_utime(char __user *filename, time_t mtime) ++static long __init do_utime(__force char __user *filename, time_t mtime) + { + struct timespec t[2]; + +@@ -109,7 +109,7 @@ static void __init dir_utime(void) + struct dir_entry *de, *tmp; + list_for_each_entry_safe(de, tmp, &dir_list, list) { + list_del(&de->list); +- do_utime(de->name, de->mtime); ++ do_utime((char __force_user *)de->name, de->mtime); + kfree(de->name); + kfree(de); + } +@@ -271,7 +271,7 @@ static int __init maybe_link(void) + if (nlink >= 2) { + char *old = find_link(major, minor, ino, mode, collected); + if (old) +- return (sys_link(old, collected) < 0) ? -1 : 1; ++ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1; + } + return 0; + } +@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode) + { + struct stat st; + +- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) { ++ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) { + if (S_ISDIR(st.st_mode)) +- sys_rmdir(path); ++ sys_rmdir((char __force_user *)path); + else +- sys_unlink(path); ++ sys_unlink((char __force_user *)path); + } + } + +@@ -305,7 +305,7 @@ static int __init do_name(void) + int openflags = O_WRONLY|O_CREAT; + if (ml != 1) + openflags |= O_TRUNC; +- wfd = sys_open(collected, openflags, mode); ++ wfd = sys_open((char __force_user *)collected, openflags, mode); + + if (wfd >= 0) { + sys_fchown(wfd, uid, gid); +@@ -317,17 +317,17 @@ static int __init do_name(void) + } + } + } else if (S_ISDIR(mode)) { +- sys_mkdir(collected, mode); +- sys_chown(collected, uid, gid); +- sys_chmod(collected, mode); ++ sys_mkdir((char __force_user *)collected, mode); ++ sys_chown((char __force_user *)collected, uid, gid); ++ sys_chmod((char __force_user *)collected, mode); + dir_add(collected, mtime); + } else if (S_ISBLK(mode) || S_ISCHR(mode) || + S_ISFIFO(mode) || S_ISSOCK(mode)) { + if (maybe_link() == 0) { +- sys_mknod(collected, mode, rdev); +- sys_chown(collected, uid, gid); +- sys_chmod(collected, mode); +- do_utime(collected, mtime); ++ sys_mknod((char __force_user *)collected, mode, rdev); ++ sys_chown((char __force_user *)collected, uid, gid); ++ sys_chmod((char __force_user *)collected, mode); ++ do_utime((char __force_user *)collected, mtime); + } + } + return 0; +@@ -336,15 +336,15 @@ static int __init do_name(void) + static int __init do_copy(void) + { + if (count >= body_len) { +- sys_write(wfd, victim, body_len); ++ sys_write(wfd, (char __force_user *)victim, body_len); + sys_close(wfd); +- do_utime(vcollected, mtime); ++ do_utime((char __force_user *)vcollected, mtime); + kfree(vcollected); + eat(body_len); + state = SkipIt; + return 0; + } else { +- sys_write(wfd, victim, count); ++ sys_write(wfd, (char __force_user *)victim, count); + body_len -= count; + eat(count); + return 1; +@@ -355,9 +355,9 @@ static int __init do_symlink(void) + { + collected[N_ALIGN(name_len) + body_len] = '\0'; + clean_path(collected, 0); +- sys_symlink(collected + N_ALIGN(name_len), collected); +- sys_lchown(collected, uid, gid); +- do_utime(collected, mtime); ++ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected); ++ sys_lchown((char __force_user *)collected, uid, gid); ++ do_utime((char __force_user *)collected, mtime); + state = SkipIt; + next_state = Reset; + return 0; +diff --git a/init/main.c b/init/main.c +index 217ed23..ec5406f 100644 +--- a/init/main.c ++++ b/init/main.c +@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { } + extern void tc_init(void); + #endif + ++extern void grsecurity_init(void); ++ + /* + * Debug helper: via this flag we know that we are in 'early bootup code' + * where only the boot processor is running with IRQ disabled. This means +@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str) + + __setup("reset_devices", set_reset_devices); + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++extern char pax_enter_kernel_user[]; ++extern char pax_exit_kernel_user[]; ++extern pgdval_t clone_pgd_mask; ++#endif ++ ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF) ++static int __init setup_pax_nouderef(char *str) ++{ ++#ifdef CONFIG_X86_32 ++ unsigned int cpu; ++ struct desc_struct *gdt; ++ ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { ++ gdt = get_cpu_gdt_table(cpu); ++ gdt[GDT_ENTRY_KERNEL_DS].type = 3; ++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf; ++ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf; ++ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf; ++ } ++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory"); ++#else ++ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1); ++ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1); ++ clone_pgd_mask = ~(pgdval_t)0UL; ++#endif ++ ++ return 0; ++} ++early_param("pax_nouderef", setup_pax_nouderef); ++#endif ++ ++#ifdef CONFIG_PAX_SOFTMODE ++int pax_softmode; ++ ++static int __init setup_pax_softmode(char *str) ++{ ++ get_option(&str, &pax_softmode); ++ return 1; ++} ++__setup("pax_softmode=", setup_pax_softmode); ++#endif ++ + static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; + const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; + static const char *panic_later, *panic_param; +@@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn) + { + int count = preempt_count(); + int ret; ++ const char *msg1 = "", *msg2 = ""; + + if (initcall_debug) + ret = do_one_initcall_debug(fn); +@@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn) + sprintf(msgbuf, "error code %d ", ret); + + if (preempt_count() != count) { +- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); ++ msg1 = " preemption imbalance"; + preempt_count() = count; + } + if (irqs_disabled()) { +- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); ++ msg2 = " disabled interrupts"; + local_irq_enable(); + } +- if (msgbuf[0]) { +- printk("initcall %pF returned with %s\n", fn, msgbuf); ++ if (msgbuf[0] || *msg1 || *msg2) { ++ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2); + } + + return ret; +@@ -820,7 +866,7 @@ static int __init kernel_init(void * unused) + do_basic_setup(); + + /* Open the /dev/console on the rootfs, this should never fail */ +- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) ++ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0) + printk(KERN_WARNING "Warning: unable to open an initial console.\n"); + + (void) sys_dup(0); +@@ -833,11 +879,13 @@ static int __init kernel_init(void * unused) + if (!ramdisk_execute_command) + ramdisk_execute_command = "/init"; + +- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { ++ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) { + ramdisk_execute_command = NULL; + prepare_namespace(); + } + ++ grsecurity_init(); ++ + /* + * Ok, we have completed the initial bootup, and + * we're essentially up and running. Get rid of the +diff --git a/ipc/mqueue.c b/ipc/mqueue.c +index 5b4293d..f179875 100644 +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb, + mq_bytes = (mq_msg_tblsz + + (info->attr.mq_maxmsg * info->attr.mq_msgsize)); + ++ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1); + spin_lock(&mq_lock); + if (u->mq_bytes + mq_bytes < u->mq_bytes || + u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) { +diff --git a/ipc/msg.c b/ipc/msg.c +index 7385de2..a8180e08 100644 +--- a/ipc/msg.c ++++ b/ipc/msg.c +@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) + return security_msg_queue_associate(msq, msgflg); + } + ++static struct ipc_ops msg_ops = { ++ .getnew = newque, ++ .associate = msg_security, ++ .more_checks = NULL ++}; ++ + SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) + { + struct ipc_namespace *ns; +- struct ipc_ops msg_ops; + struct ipc_params msg_params; + + ns = current->nsproxy->ipc_ns; + +- msg_ops.getnew = newque; +- msg_ops.associate = msg_security; +- msg_ops.more_checks = NULL; +- + msg_params.key = key; + msg_params.flg = msgflg; + +diff --git a/ipc/sem.c b/ipc/sem.c +index 5215a81..cfc0cac 100644 +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, + return 0; + } + ++static struct ipc_ops sem_ops = { ++ .getnew = newary, ++ .associate = sem_security, ++ .more_checks = sem_more_checks ++}; ++ + SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) + { + struct ipc_namespace *ns; +- struct ipc_ops sem_ops; + struct ipc_params sem_params; + + ns = current->nsproxy->ipc_ns; +@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) + if (nsems < 0 || nsems > ns->sc_semmsl) + return -EINVAL; + +- sem_ops.getnew = newary; +- sem_ops.associate = sem_security; +- sem_ops.more_checks = sem_more_checks; +- + sem_params.key = key; + sem_params.flg = semflg; + sem_params.u.nsems = nsems; +diff --git a/ipc/shm.c b/ipc/shm.c +index b76be5b..859e750 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); + static int sysvipc_shm_proc_show(struct seq_file *s, void *it); + #endif + ++#ifdef CONFIG_GRKERNSEC ++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const uid_t cuid, ++ const int shmid); ++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime); ++#endif ++ + void shm_init_ns(struct ipc_namespace *ns) + { + ns->shm_ctlmax = SHMMAX; +@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) + shp->shm_lprid = 0; + shp->shm_atim = shp->shm_dtim = 0; + shp->shm_ctim = get_seconds(); ++#ifdef CONFIG_GRKERNSEC ++ { ++ struct timespec timeval; ++ do_posix_clock_monotonic_gettime(&timeval); ++ ++ shp->shm_createtime = timeval.tv_sec; ++ } ++#endif + shp->shm_segsz = size; + shp->shm_nattch = 0; + shp->shm_file = file; +@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, + return 0; + } + ++static struct ipc_ops shm_ops = { ++ .getnew = newseg, ++ .associate = shm_security, ++ .more_checks = shm_more_checks ++}; ++ + SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) + { + struct ipc_namespace *ns; +- struct ipc_ops shm_ops; + struct ipc_params shm_params; + + ns = current->nsproxy->ipc_ns; + +- shm_ops.getnew = newseg; +- shm_ops.associate = shm_security; +- shm_ops.more_checks = shm_more_checks; +- + shm_params.key = key; + shm_params.flg = shmflg; + shm_params.u.size = size; +@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) + f_mode = FMODE_READ | FMODE_WRITE; + } + if (shmflg & SHM_EXEC) { ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->pax_flags & MF_PAX_MPROTECT) ++ goto out; ++#endif ++ + prot |= PROT_EXEC; + acc_mode |= S_IXUGO; + } +@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) + if (err) + goto out_unlock; + ++#ifdef CONFIG_GRKERNSEC ++ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime, ++ shp->shm_perm.cuid, shmid) || ++ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) { ++ err = -EACCES; ++ goto out_unlock; ++ } ++#endif ++ + path = shp->shm_file->f_path; + path_get(&path); + shp->shm_nattch++; ++#ifdef CONFIG_GRKERNSEC ++ shp->shm_lapid = current->pid; ++#endif + size = i_size_read(path.dentry->d_inode); + shm_unlock(shp); + +diff --git a/kernel/acct.c b/kernel/acct.c +index fa7eb3d..7faf116 100644 +--- a/kernel/acct.c ++++ b/kernel/acct.c +@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct, + */ + flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; +- file->f_op->write(file, (char *)&ac, ++ file->f_op->write(file, (char __force_user *)&ac, + sizeof(acct_t), &file->f_pos); + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; + set_fs(fs); +diff --git a/kernel/audit.c b/kernel/audit.c +index 09fae26..ed71d5b 100644 +--- a/kernel/audit.c ++++ b/kernel/audit.c +@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0; + 3) suppressed due to audit_rate_limit + 4) suppressed due to audit_backlog_limit + */ +-static atomic_t audit_lost = ATOMIC_INIT(0); ++static atomic_unchecked_t audit_lost = ATOMIC_INIT(0); + + /* The netlink socket. */ + static struct sock *audit_sock; +@@ -237,7 +237,7 @@ void audit_log_lost(const char *message) + unsigned long now; + int print; + +- atomic_inc(&audit_lost); ++ atomic_inc_unchecked(&audit_lost); + + print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit); + +@@ -256,7 +256,7 @@ void audit_log_lost(const char *message) + printk(KERN_WARNING + "audit: audit_lost=%d audit_rate_limit=%d " + "audit_backlog_limit=%d\n", +- atomic_read(&audit_lost), ++ atomic_read_unchecked(&audit_lost), + audit_rate_limit, + audit_backlog_limit); + audit_panic(message); +@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + status_set.pid = audit_pid; + status_set.rate_limit = audit_rate_limit; + status_set.backlog_limit = audit_backlog_limit; +- status_set.lost = atomic_read(&audit_lost); ++ status_set.lost = atomic_read_unchecked(&audit_lost); + status_set.backlog = skb_queue_len(&audit_skb_queue); + audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0, + &status_set, sizeof(status_set)); +@@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt, + avail = audit_expand(ab, + max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail)); + if (!avail) +- goto out; ++ goto out_va_end; + len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2); + } +- va_end(args2); + if (len > 0) + skb_put(skb, len); ++out_va_end: ++ va_end(args2); + out: + return; + } +diff --git a/kernel/auditsc.c b/kernel/auditsc.c +index 47b7fc1..c003c33 100644 +--- a/kernel/auditsc.c ++++ b/kernel/auditsc.c +@@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context, + struct audit_buffer **ab, + struct audit_aux_data_execve *axi) + { +- int i; +- size_t len, len_sent = 0; ++ int i, len; ++ size_t len_sent = 0; + const char __user *p; + char *buf; + +@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx, + } + + /* global counter which is incremented every time something logs in */ +-static atomic_t session_id = ATOMIC_INIT(0); ++static atomic_unchecked_t session_id = ATOMIC_INIT(0); + + /** + * audit_set_loginuid - set a task's audit_context loginuid +@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0); + */ + int audit_set_loginuid(struct task_struct *task, uid_t loginuid) + { +- unsigned int sessionid = atomic_inc_return(&session_id); ++ unsigned int sessionid = atomic_inc_return_unchecked(&session_id); + struct audit_context *context = task->audit_context; + + if (context && context->in_syscall) { +diff --git a/kernel/capability.c b/kernel/capability.c +index b463871..fa3ea1f 100644 +--- a/kernel/capability.c ++++ b/kernel/capability.c +@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr) + * before modification is attempted and the application + * fails. + */ ++ if (tocopy > ARRAY_SIZE(kdata)) ++ return -EFAULT; ++ + if (copy_to_user(dataptr, kdata, tocopy + * sizeof(struct __user_cap_data_struct))) { + return -EFAULT; +@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap) + BUG(); + } + +- if (security_capable(ns, current_cred(), cap) == 0) { ++ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) { + current->flags |= PF_SUPERPRIV; + return true; + } +@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap) + } + EXPORT_SYMBOL(ns_capable); + ++bool ns_capable_nolog(struct user_namespace *ns, int cap) ++{ ++ if (unlikely(!cap_valid(cap))) { ++ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap); ++ BUG(); ++ } ++ ++ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) { ++ current->flags |= PF_SUPERPRIV; ++ return true; ++ } ++ return false; ++} ++EXPORT_SYMBOL(ns_capable_nolog); ++ ++bool capable_nolog(int cap) ++{ ++ return ns_capable_nolog(&init_user_ns, cap); ++} ++EXPORT_SYMBOL(capable_nolog); ++ + /** + * task_ns_capable - Determine whether current task has a superior + * capability targeted at a specific task's user namespace. +@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap) + } + EXPORT_SYMBOL(task_ns_capable); + ++bool task_ns_capable_nolog(struct task_struct *t, int cap) ++{ ++ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap); ++} ++EXPORT_SYMBOL(task_ns_capable_nolog); ++ + /** + * nsown_capable - Check superior capability to one's own user_ns + * @cap: The capability in question +diff --git a/kernel/compat.c b/kernel/compat.c +index f346ced..aa2b1f4 100644 +--- a/kernel/compat.c ++++ b/kernel/compat.c +@@ -13,6 +13,7 @@ + + #include <linux/linkage.h> + #include <linux/compat.h> ++#include <linux/module.h> + #include <linux/errno.h> + #include <linux/time.h> + #include <linux/signal.h> +@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart) + mm_segment_t oldfs; + long ret; + +- restart->nanosleep.rmtp = (struct timespec __user *) &rmt; ++ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt; + oldfs = get_fs(); + set_fs(KERNEL_DS); + ret = hrtimer_nanosleep_restart(restart); +@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, + oldfs = get_fs(); + set_fs(KERNEL_DS); + ret = hrtimer_nanosleep(&tu, +- rmtp ? (struct timespec __user *)&rmt : NULL, ++ rmtp ? (struct timespec __force_user *)&rmt : NULL, + HRTIMER_MODE_REL, CLOCK_MONOTONIC); + set_fs(oldfs); + +@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_sigpending((old_sigset_t __user *) &s); ++ ret = sys_sigpending((old_sigset_t __force_user *) &s); + set_fs(old_fs); + if (ret == 0) + ret = put_user(s, set); +@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = sys_sigprocmask(how, +- set ? (old_sigset_t __user *) &s : NULL, +- oset ? (old_sigset_t __user *) &s : NULL); ++ set ? (old_sigset_t __force_user *) &s : NULL, ++ oset ? (old_sigset_t __force_user *) &s : NULL); + set_fs(old_fs); + if (ret == 0) + if (oset) +@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource, + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_old_getrlimit(resource, &r); ++ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r); + set_fs(old_fs); + + if (!ret) { +@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_getrusage(who, (struct rusage __user *) &r); ++ ret = sys_getrusage(who, (struct rusage __force_user *) &r); + set_fs(old_fs); + + if (ret) +@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, + set_fs (KERNEL_DS); + ret = sys_wait4(pid, + (stat_addr ? +- (unsigned int __user *) &status : NULL), +- options, (struct rusage __user *) &r); ++ (unsigned int __force_user *) &status : NULL), ++ options, (struct rusage __force_user *) &r); + set_fs (old_fs); + + if (ret > 0) { +@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, + memset(&info, 0, sizeof(info)); + + set_fs(KERNEL_DS); +- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, +- uru ? (struct rusage __user *)&ru : NULL); ++ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options, ++ uru ? (struct rusage __force_user *)&ru : NULL); + set_fs(old_fs); + + if ((ret < 0) || (info.si_signo == 0)) +@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_timer_settime(timer_id, flags, +- (struct itimerspec __user *) &newts, +- (struct itimerspec __user *) &oldts); ++ (struct itimerspec __force_user *) &newts, ++ (struct itimerspec __force_user *) &oldts); + set_fs(oldfs); + if (!err && old && put_compat_itimerspec(old, &oldts)) + return -EFAULT; +@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_timer_gettime(timer_id, +- (struct itimerspec __user *) &ts); ++ (struct itimerspec __force_user *) &ts); + set_fs(oldfs); + if (!err && put_compat_itimerspec(setting, &ts)) + return -EFAULT; +@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_settime(which_clock, +- (struct timespec __user *) &ts); ++ (struct timespec __force_user *) &ts); + set_fs(oldfs); + return err; + } +@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_gettime(which_clock, +- (struct timespec __user *) &ts); ++ (struct timespec __force_user *) &ts); + set_fs(oldfs); + if (!err && put_compat_timespec(&ts, tp)) + return -EFAULT; +@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock, + + oldfs = get_fs(); + set_fs(KERNEL_DS); +- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc); ++ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc); + set_fs(oldfs); + + err = compat_put_timex(utp, &txc); +@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_getres(which_clock, +- (struct timespec __user *) &ts); ++ (struct timespec __force_user *) &ts); + set_fs(oldfs); + if (!err && tp && put_compat_timespec(&ts, tp)) + return -EFAULT; +@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) + long err; + mm_segment_t oldfs; + struct timespec tu; +- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; ++ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp; + +- restart->nanosleep.rmtp = (struct timespec __user *) &tu; ++ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu; + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = clock_nanosleep_restart(restart); +@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_nanosleep(which_clock, flags, +- (struct timespec __user *) &in, +- (struct timespec __user *) &out); ++ (struct timespec __force_user *) &in, ++ (struct timespec __force_user *) &out); + set_fs(oldfs); + + if ((err == -ERESTART_RESTARTBLOCK) && rmtp && +diff --git a/kernel/configs.c b/kernel/configs.c +index 42e8fa0..9e7406b 100644 +--- a/kernel/configs.c ++++ b/kernel/configs.c +@@ -74,8 +74,19 @@ static int __init ikconfig_init(void) + struct proc_dir_entry *entry; + + /* create the current config file */ ++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM) ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM) ++ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL, ++ &ikconfig_file_ops); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL, ++ &ikconfig_file_ops); ++#endif ++#else + entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL, + &ikconfig_file_ops); ++#endif ++ + if (!entry) + return -ENOMEM; + +diff --git a/kernel/cred.c b/kernel/cred.c +index 5791612..a3c04dc 100644 +--- a/kernel/cred.c ++++ b/kernel/cred.c +@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk) + validate_creds(cred); + put_cred(cred); + } ++ ++#ifdef CONFIG_GRKERNSEC_SETXID ++ cred = (struct cred *) tsk->delayed_cred; ++ if (cred) { ++ tsk->delayed_cred = NULL; ++ validate_creds(cred); ++ put_cred(cred); ++ } ++#endif + } + + /** +@@ -470,7 +479,7 @@ error_put: + * Always returns 0 thus allowing this function to be tail-called at the end + * of, say, sys_setgid(). + */ +-int commit_creds(struct cred *new) ++static int __commit_creds(struct cred *new) + { + struct task_struct *task = current; + const struct cred *old = task->real_cred; +@@ -489,6 +498,8 @@ int commit_creds(struct cred *new) + + get_cred(new); /* we will require a ref for the subj creds too */ + ++ gr_set_role_label(task, new->uid, new->gid); ++ + /* dumpability changes */ + if (old->euid != new->euid || + old->egid != new->egid || +@@ -538,6 +549,92 @@ int commit_creds(struct cred *new) + put_cred(old); + return 0; + } ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern int set_user(struct cred *new); ++ ++void gr_delayed_cred_worker(void) ++{ ++ const struct cred *new = current->delayed_cred; ++ struct cred *ncred; ++ ++ current->delayed_cred = NULL; ++ ++ if (current_uid() && new != NULL) { ++ // from doing get_cred on it when queueing this ++ put_cred(new); ++ return; ++ } else if (new == NULL) ++ return; ++ ++ ncred = prepare_creds(); ++ if (!ncred) ++ goto die; ++ // uids ++ ncred->uid = new->uid; ++ ncred->euid = new->euid; ++ ncred->suid = new->suid; ++ ncred->fsuid = new->fsuid; ++ // gids ++ ncred->gid = new->gid; ++ ncred->egid = new->egid; ++ ncred->sgid = new->sgid; ++ ncred->fsgid = new->fsgid; ++ // groups ++ if (set_groups(ncred, new->group_info) < 0) { ++ abort_creds(ncred); ++ goto die; ++ } ++ // caps ++ ncred->securebits = new->securebits; ++ ncred->cap_inheritable = new->cap_inheritable; ++ ncred->cap_permitted = new->cap_permitted; ++ ncred->cap_effective = new->cap_effective; ++ ncred->cap_bset = new->cap_bset; ++ ++ if (set_user(ncred)) { ++ abort_creds(ncred); ++ goto die; ++ } ++ ++ // from doing get_cred on it when queueing this ++ put_cred(new); ++ ++ __commit_creds(ncred); ++ return; ++die: ++ // from doing get_cred on it when queueing this ++ put_cred(new); ++ do_group_exit(SIGKILL); ++} ++#endif ++ ++int commit_creds(struct cred *new) ++{ ++#ifdef CONFIG_GRKERNSEC_SETXID ++ struct task_struct *t; ++ ++ /* we won't get called with tasklist_lock held for writing ++ and interrupts disabled as the cred struct in that case is ++ init_cred ++ */ ++ if (grsec_enable_setxid && !current_is_single_threaded() && ++ !current_uid() && new->uid) { ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ for (t = next_thread(current); t != current; ++ t = next_thread(t)) { ++ if (t->delayed_cred == NULL) { ++ t->delayed_cred = get_cred(new); ++ set_tsk_need_resched(t); ++ } ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ } ++#endif ++ return __commit_creds(new); ++} ++ + EXPORT_SYMBOL(commit_creds); + + /** +diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c +index 0d7c087..01b8cef 100644 +--- a/kernel/debug/debug_core.c ++++ b/kernel/debug/debug_core.c +@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock); + */ + static atomic_t masters_in_kgdb; + static atomic_t slaves_in_kgdb; +-static atomic_t kgdb_break_tasklet_var; ++static atomic_unchecked_t kgdb_break_tasklet_var; + atomic_t kgdb_setting_breakpoint; + + struct task_struct *kgdb_usethread; +@@ -129,7 +129,7 @@ int kgdb_single_step; + static pid_t kgdb_sstep_pid; + + /* to keep track of the CPU which is doing the single stepping*/ +-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); ++atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); + + /* + * If you are debugging a problem where roundup (the collection of +@@ -542,7 +542,7 @@ return_normal: + * kernel will only try for the value of sstep_tries before + * giving up and continuing on. + */ +- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && ++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 && + (kgdb_info[cpu].task && + kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { + atomic_set(&kgdb_active, -1); +@@ -636,8 +636,8 @@ cpu_master_loop: + } + + kgdb_restore: +- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { +- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step); ++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) { ++ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step); + if (kgdb_info[sstep_cpu].task) + kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; + else +@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void) + static void kgdb_tasklet_bpt(unsigned long ing) + { + kgdb_breakpoint(); +- atomic_set(&kgdb_break_tasklet_var, 0); ++ atomic_set_unchecked(&kgdb_break_tasklet_var, 0); + } + + static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0); + + void kgdb_schedule_breakpoint(void) + { +- if (atomic_read(&kgdb_break_tasklet_var) || ++ if (atomic_read_unchecked(&kgdb_break_tasklet_var) || + atomic_read(&kgdb_active) != -1 || + atomic_read(&kgdb_setting_breakpoint)) + return; +- atomic_inc(&kgdb_break_tasklet_var); ++ atomic_inc_unchecked(&kgdb_break_tasklet_var); + tasklet_schedule(&kgdb_tasklet_breakpoint); + } + EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint); +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c +index 63786e7..0780cac 100644 +--- a/kernel/debug/kdb/kdb_main.c ++++ b/kernel/debug/kdb/kdb_main.c +@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv) + list_for_each_entry(mod, kdb_modules, list) { + + kdb_printf("%-20s%8u 0x%p ", mod->name, +- mod->core_size, (void *)mod); ++ mod->core_size_rx + mod->core_size_rw, (void *)mod); + #ifdef CONFIG_MODULE_UNLOAD + kdb_printf("%4d ", module_refcount(mod)); + #endif +@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv) + kdb_printf(" (Loading)"); + else + kdb_printf(" (Live)"); +- kdb_printf(" 0x%p", mod->module_core); ++ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw); + + #ifdef CONFIG_MODULE_UNLOAD + { +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 58690af..d903d75 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write, + return 0; + } + +-static atomic64_t perf_event_id; ++static atomic64_unchecked_t perf_event_id; + + static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, + enum event_type_t event_type); +@@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info) + + static inline u64 perf_event_count(struct perf_event *event) + { +- return local64_read(&event->count) + atomic64_read(&event->child_count); ++ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count); + } + + static u64 perf_event_read(struct perf_event *event) +@@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) + mutex_lock(&event->child_mutex); + total += perf_event_read(event); + *enabled += event->total_time_enabled + +- atomic64_read(&event->child_total_time_enabled); ++ atomic64_read_unchecked(&event->child_total_time_enabled); + *running += event->total_time_running + +- atomic64_read(&event->child_total_time_running); ++ atomic64_read_unchecked(&event->child_total_time_running); + + list_for_each_entry(child, &event->child_list, child_list) { + total += perf_event_read(child); +@@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event) + userpg->offset -= local64_read(&event->hw.prev_count); + + userpg->time_enabled = enabled + +- atomic64_read(&event->child_total_time_enabled); ++ atomic64_read_unchecked(&event->child_total_time_enabled); + + userpg->time_running = running + +- atomic64_read(&event->child_total_time_running); ++ atomic64_read_unchecked(&event->child_total_time_running); + + barrier(); + ++userpg->lock; +@@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle, + values[n++] = perf_event_count(event); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = enabled + +- atomic64_read(&event->child_total_time_enabled); ++ atomic64_read_unchecked(&event->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = running + +- atomic64_read(&event->child_total_time_running); ++ atomic64_read_unchecked(&event->child_total_time_running); + } + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(event); +@@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) + * need to add enough zero bytes after the string to handle + * the 64bit alignment we do later. + */ +- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); ++ buf = kzalloc(PATH_MAX, GFP_KERNEL); + if (!buf) { + name = strncpy(tmp, "//enomem", sizeof(tmp)); + goto got_name; + } +- name = d_path(&file->f_path, buf, PATH_MAX); ++ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64)); + if (IS_ERR(name)) { + name = strncpy(tmp, "//toolong", sizeof(tmp)); + goto got_name; +@@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, + event->parent = parent_event; + + event->ns = get_pid_ns(current->nsproxy->pid_ns); +- event->id = atomic64_inc_return(&perf_event_id); ++ event->id = atomic64_inc_return_unchecked(&perf_event_id); + + event->state = PERF_EVENT_STATE_INACTIVE; + +@@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event, + /* + * Add back the child's count to the parent's count: + */ +- atomic64_add(child_val, &parent_event->child_count); +- atomic64_add(child_event->total_time_enabled, ++ atomic64_add_unchecked(child_val, &parent_event->child_count); ++ atomic64_add_unchecked(child_event->total_time_enabled, + &parent_event->child_total_time_enabled); +- atomic64_add(child_event->total_time_running, ++ atomic64_add_unchecked(child_event->total_time_running, + &parent_event->child_total_time_running); + + /* +diff --git a/kernel/exit.c b/kernel/exit.c +index e6e01b9..0a21b0a 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -57,6 +57,10 @@ + #include <asm/pgtable.h> + #include <asm/mmu_context.h> + ++#ifdef CONFIG_GRKERNSEC ++extern rwlock_t grsec_exec_file_lock; ++#endif ++ + static void exit_mm(struct task_struct * tsk); + + static void __unhash_process(struct task_struct *p, bool group_dead) +@@ -168,6 +172,10 @@ void release_task(struct task_struct * p) + struct task_struct *leader; + int zap_leader; + repeat: ++#ifdef CONFIG_NET ++ gr_del_task_from_ip_table(p); ++#endif ++ + /* don't need to get the RCU readlock here - the process is dead and + * can't be modifying its own credentials. But shut RCU-lockdep up */ + rcu_read_lock(); +@@ -380,7 +388,7 @@ int allow_signal(int sig) + * know it'll be handled, so that they don't get converted to + * SIGKILL or just silently dropped. + */ +- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; ++ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + return 0; +@@ -416,6 +424,17 @@ void daemonize(const char *name, ...) + vsnprintf(current->comm, sizeof(current->comm), name, args); + va_end(args); + ++#ifdef CONFIG_GRKERNSEC ++ write_lock(&grsec_exec_file_lock); ++ if (current->exec_file) { ++ fput(current->exec_file); ++ current->exec_file = NULL; ++ } ++ write_unlock(&grsec_exec_file_lock); ++#endif ++ ++ gr_set_kernel_label(current); ++ + /* + * If we were started as result of loading a module, close all of the + * user space pages. We don't need them, and if we didn't close them +@@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code) + struct task_struct *tsk = current; + int group_dead; + ++ set_fs(USER_DS); ++ + profile_task_exit(tsk); + + WARN_ON(blk_needs_flush_plug(tsk)); +@@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code) + * mm_release()->clear_child_tid() from writing to a user-controlled + * kernel address. + */ +- set_fs(USER_DS); + + ptrace_event(PTRACE_EVENT_EXIT, code); + +@@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code) + tsk->exit_code = code; + taskstats_exit(tsk, group_dead); + ++ gr_acl_handle_psacct(tsk, code); ++ gr_acl_handle_exit(); ++ + exit_mm(tsk); + + if (group_dead) +@@ -1068,7 +1091,7 @@ SYSCALL_DEFINE1(exit, int, error_code) + * Take down every thread in the group. This is called by fatal signals + * as well as by sys_exit_group (below). + */ +-NORET_TYPE void ++__noreturn void + do_group_exit(int exit_code) + { + struct signal_struct *sig = current->signal; +diff --git a/kernel/fork.c b/kernel/fork.c +index 0acf42c0..9e40e2e 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -281,7 +281,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) + *stackend = STACK_END_MAGIC; /* for overflow detection */ + + #ifdef CONFIG_CC_STACKPROTECTOR +- tsk->stack_canary = get_random_int(); ++ tsk->stack_canary = pax_get_random_long(); + #endif + + /* +@@ -305,13 +305,77 @@ out: + } + + #ifdef CONFIG_MMU ++static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt) ++{ ++ struct vm_area_struct *tmp; ++ unsigned long charge; ++ struct mempolicy *pol; ++ struct file *file; ++ ++ charge = 0; ++ if (mpnt->vm_flags & VM_ACCOUNT) { ++ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; ++ if (security_vm_enough_memory(len)) ++ goto fail_nomem; ++ charge = len; ++ } ++ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); ++ if (!tmp) ++ goto fail_nomem; ++ *tmp = *mpnt; ++ tmp->vm_mm = mm; ++ INIT_LIST_HEAD(&tmp->anon_vma_chain); ++ pol = mpol_dup(vma_policy(mpnt)); ++ if (IS_ERR(pol)) ++ goto fail_nomem_policy; ++ vma_set_policy(tmp, pol); ++ if (anon_vma_fork(tmp, mpnt)) ++ goto fail_nomem_anon_vma_fork; ++ tmp->vm_flags &= ~VM_LOCKED; ++ tmp->vm_next = tmp->vm_prev = NULL; ++ tmp->vm_mirror = NULL; ++ file = tmp->vm_file; ++ if (file) { ++ struct inode *inode = file->f_path.dentry->d_inode; ++ struct address_space *mapping = file->f_mapping; ++ ++ get_file(file); ++ if (tmp->vm_flags & VM_DENYWRITE) ++ atomic_dec(&inode->i_writecount); ++ mutex_lock(&mapping->i_mmap_mutex); ++ if (tmp->vm_flags & VM_SHARED) ++ mapping->i_mmap_writable++; ++ flush_dcache_mmap_lock(mapping); ++ /* insert tmp into the share list, just after mpnt */ ++ vma_prio_tree_add(tmp, mpnt); ++ flush_dcache_mmap_unlock(mapping); ++ mutex_unlock(&mapping->i_mmap_mutex); ++ } ++ ++ /* ++ * Clear hugetlb-related page reserves for children. This only ++ * affects MAP_PRIVATE mappings. Faults generated by the child ++ * are not guaranteed to succeed, even if read-only ++ */ ++ if (is_vm_hugetlb_page(tmp)) ++ reset_vma_resv_huge_pages(tmp); ++ ++ return tmp; ++ ++fail_nomem_anon_vma_fork: ++ mpol_put(pol); ++fail_nomem_policy: ++ kmem_cache_free(vm_area_cachep, tmp); ++fail_nomem: ++ vm_unacct_memory(charge); ++ return NULL; ++} ++ + static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + { + struct vm_area_struct *mpnt, *tmp, *prev, **pprev; + struct rb_node **rb_link, *rb_parent; + int retval; +- unsigned long charge; +- struct mempolicy *pol; + + down_write(&oldmm->mmap_sem); + flush_cache_dup_mm(oldmm); +@@ -323,8 +387,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + mm->locked_vm = 0; + mm->mmap = NULL; + mm->mmap_cache = NULL; +- mm->free_area_cache = oldmm->mmap_base; +- mm->cached_hole_size = ~0UL; ++ mm->free_area_cache = oldmm->free_area_cache; ++ mm->cached_hole_size = oldmm->cached_hole_size; + mm->map_count = 0; + cpumask_clear(mm_cpumask(mm)); + mm->mm_rb = RB_ROOT; +@@ -340,8 +404,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + + prev = NULL; + for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { +- struct file *file; +- + if (mpnt->vm_flags & VM_DONTCOPY) { + long pages = vma_pages(mpnt); + mm->total_vm -= pages; +@@ -349,53 +411,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + -pages); + continue; + } +- charge = 0; +- if (mpnt->vm_flags & VM_ACCOUNT) { +- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; +- if (security_vm_enough_memory(len)) +- goto fail_nomem; +- charge = len; ++ tmp = dup_vma(mm, mpnt); ++ if (!tmp) { ++ retval = -ENOMEM; ++ goto out; + } +- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); +- if (!tmp) +- goto fail_nomem; +- *tmp = *mpnt; +- INIT_LIST_HEAD(&tmp->anon_vma_chain); +- pol = mpol_dup(vma_policy(mpnt)); +- retval = PTR_ERR(pol); +- if (IS_ERR(pol)) +- goto fail_nomem_policy; +- vma_set_policy(tmp, pol); +- tmp->vm_mm = mm; +- if (anon_vma_fork(tmp, mpnt)) +- goto fail_nomem_anon_vma_fork; +- tmp->vm_flags &= ~VM_LOCKED; +- tmp->vm_next = tmp->vm_prev = NULL; +- file = tmp->vm_file; +- if (file) { +- struct inode *inode = file->f_path.dentry->d_inode; +- struct address_space *mapping = file->f_mapping; +- +- get_file(file); +- if (tmp->vm_flags & VM_DENYWRITE) +- atomic_dec(&inode->i_writecount); +- mutex_lock(&mapping->i_mmap_mutex); +- if (tmp->vm_flags & VM_SHARED) +- mapping->i_mmap_writable++; +- flush_dcache_mmap_lock(mapping); +- /* insert tmp into the share list, just after mpnt */ +- vma_prio_tree_add(tmp, mpnt); +- flush_dcache_mmap_unlock(mapping); +- mutex_unlock(&mapping->i_mmap_mutex); +- } +- +- /* +- * Clear hugetlb-related page reserves for children. This only +- * affects MAP_PRIVATE mappings. Faults generated by the child +- * are not guaranteed to succeed, even if read-only +- */ +- if (is_vm_hugetlb_page(tmp)) +- reset_vma_resv_huge_pages(tmp); + + /* + * Link in the new vma and copy the page table entries. +@@ -418,6 +438,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + if (retval) + goto out; + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) { ++ struct vm_area_struct *mpnt_m; ++ ++ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) { ++ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm); ++ ++ if (!mpnt->vm_mirror) ++ continue; ++ ++ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) { ++ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt); ++ mpnt->vm_mirror = mpnt_m; ++ } else { ++ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm); ++ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror; ++ mpnt_m->vm_mirror->vm_mirror = mpnt_m; ++ mpnt->vm_mirror->vm_mirror = mpnt; ++ } ++ } ++ BUG_ON(mpnt_m); ++ } ++#endif ++ + /* a new mm has just been created */ + arch_dup_mmap(oldmm, mm); + retval = 0; +@@ -426,14 +471,6 @@ out: + flush_tlb_mm(oldmm); + up_write(&oldmm->mmap_sem); + return retval; +-fail_nomem_anon_vma_fork: +- mpol_put(pol); +-fail_nomem_policy: +- kmem_cache_free(vm_area_cachep, tmp); +-fail_nomem: +- retval = -ENOMEM; +- vm_unacct_memory(charge); +- goto out; + } + + static inline int mm_alloc_pgd(struct mm_struct *mm) +@@ -645,6 +682,26 @@ struct mm_struct *get_task_mm(struct task_struct *task) + } + EXPORT_SYMBOL_GPL(get_task_mm); + ++struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) ++{ ++ struct mm_struct *mm; ++ int err; ++ ++ err = mutex_lock_killable(&task->signal->cred_guard_mutex); ++ if (err) ++ return ERR_PTR(err); ++ ++ mm = get_task_mm(task); ++ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) || ++ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) { ++ mmput(mm); ++ mm = ERR_PTR(-EACCES); ++ } ++ mutex_unlock(&task->signal->cred_guard_mutex); ++ ++ return mm; ++} ++ + /* Please note the differences between mmput and mm_release. + * mmput is called whenever we stop holding onto a mm_struct, + * error success whatever. +@@ -830,13 +887,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) + spin_unlock(&fs->lock); + return -EAGAIN; + } +- fs->users++; ++ atomic_inc(&fs->users); + spin_unlock(&fs->lock); + return 0; + } + tsk->fs = copy_fs_struct(fs); + if (!tsk->fs) + return -ENOMEM; ++ gr_set_chroot_entries(tsk, &tsk->fs->root); + return 0; + } + +@@ -1100,6 +1158,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); + #endif + retval = -EAGAIN; ++ ++ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0); ++ + if (atomic_read(&p->real_cred->user->processes) >= + task_rlimit(p, RLIMIT_NPROC)) { + if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && +@@ -1259,6 +1320,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, + if (clone_flags & CLONE_THREAD) + p->tgid = current->tgid; + ++ gr_copy_label(p); ++ + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; + /* + * Clear TID on mm_release()? +@@ -1421,6 +1484,8 @@ bad_fork_cleanup_count: + bad_fork_free: + free_task(p); + fork_out: ++ gr_log_forkfail(retval); ++ + return ERR_PTR(retval); + } + +@@ -1521,6 +1586,8 @@ long do_fork(unsigned long clone_flags, + if (clone_flags & CLONE_PARENT_SETTID) + put_user(nr, parent_tidptr); + ++ gr_handle_brute_check(); ++ + if (clone_flags & CLONE_VFORK) { + p->vfork_done = &vfork; + init_completion(&vfork); +@@ -1630,7 +1697,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) + return 0; + + /* don't need lock here; in the worst case we'll do useless copy */ +- if (fs->users == 1) ++ if (atomic_read(&fs->users) == 1) + return 0; + + *new_fsp = copy_fs_struct(fs); +@@ -1719,7 +1786,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) + fs = current->fs; + spin_lock(&fs->lock); + current->fs = new_fs; +- if (--fs->users) ++ gr_set_chroot_entries(current, ¤t->fs->root); ++ if (atomic_dec_return(&fs->users)) + new_fs = NULL; + else + new_fs = fs; +diff --git a/kernel/futex.c b/kernel/futex.c +index 1614be2..37abc7e 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -54,6 +54,7 @@ + #include <linux/mount.h> + #include <linux/pagemap.h> + #include <linux/syscalls.h> ++#include <linux/ptrace.h> + #include <linux/signal.h> + #include <linux/export.h> + #include <linux/magic.h> +@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) + struct page *page, *page_head; + int err, ro = 0; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE) ++ return -EFAULT; ++#endif ++ + /* + * The futex address must be "naturally" aligned. + */ +@@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, + if (!p) + goto err_unlock; + ret = -EPERM; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (!ptrace_may_access(p, PTRACE_MODE_READ)) ++ goto err_unlock; ++#endif + pcred = __task_cred(p); + /* If victim is in different user_ns, then uids are not + comparable, so we must have CAP_SYS_PTRACE */ +@@ -2724,6 +2734,7 @@ static int __init futex_init(void) + { + u32 curval; + int i; ++ mm_segment_t oldfs; + + /* + * This will fail and we want it. Some arch implementations do +@@ -2735,8 +2746,11 @@ static int __init futex_init(void) + * implementation, the non-functional ones will return + * -ENOSYS. + */ ++ oldfs = get_fs(); ++ set_fs(USER_DS); + if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) + futex_cmpxchg_enabled = 1; ++ set_fs(oldfs); + + for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { + plist_head_init(&futex_queues[i].chain); +diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c +index 5f9e689..582d46d 100644 +--- a/kernel/futex_compat.c ++++ b/kernel/futex_compat.c +@@ -10,6 +10,7 @@ + #include <linux/compat.h> + #include <linux/nsproxy.h> + #include <linux/futex.h> ++#include <linux/ptrace.h> + + #include <asm/uaccess.h> + +@@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, + { + struct compat_robust_list_head __user *head; + unsigned long ret; +- const struct cred *cred = current_cred(), *pcred; ++ const struct cred *cred = current_cred(); ++ const struct cred *pcred; + + if (!futex_cmpxchg_enabled) + return -ENOSYS; +@@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, + if (!p) + goto err_unlock; + ret = -EPERM; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (!ptrace_may_access(p, PTRACE_MODE_READ)) ++ goto err_unlock; ++#endif + pcred = __task_cred(p); + /* If victim is in different user_ns, then uids are not + comparable, so we must have CAP_SYS_PTRACE */ +diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c +index 9b22d03..6295b62 100644 +--- a/kernel/gcov/base.c ++++ b/kernel/gcov/base.c +@@ -102,11 +102,6 @@ void gcov_enable_events(void) + } + + #ifdef CONFIG_MODULES +-static inline int within(void *addr, void *start, unsigned long size) +-{ +- return ((addr >= start) && (addr < start + size)); +-} +- + /* Update list and generate events when modules are unloaded. */ + static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, + void *data) +@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, + prev = NULL; + /* Remove entries located in module from linked list. */ + for (info = gcov_info_head; info; info = info->next) { +- if (within(info, mod->module_core, mod->core_size)) { ++ if (within_module_core_rw((unsigned long)info, mod)) { + if (prev) + prev->next = info->next; + else +diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c +index ae34bf5..4e2f3d0 100644 +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void) + local_irq_restore(flags); + } + +-static void run_hrtimer_softirq(struct softirq_action *h) ++static void run_hrtimer_softirq(void) + { + hrtimer_peek_ahead_timers(); + } +diff --git a/kernel/jump_label.c b/kernel/jump_label.c +index 66ff710..05a5128 100644 +--- a/kernel/jump_label.c ++++ b/kernel/jump_label.c +@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) + + size = (((unsigned long)stop - (unsigned long)start) + / sizeof(struct jump_entry)); ++ pax_open_kernel(); + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); ++ pax_close_kernel(); + } + + static void jump_label_update(struct jump_label_key *key, int enable); +@@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod) + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; + struct jump_entry *iter; + ++ pax_open_kernel(); + for (iter = iter_start; iter < iter_stop; iter++) { + if (within_module_init(iter->code, mod)) + iter->code = 0; + } ++ pax_close_kernel(); + } + + static int +diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c +index 079f1d3..a407562 100644 +--- a/kernel/kallsyms.c ++++ b/kernel/kallsyms.c +@@ -11,6 +11,9 @@ + * Changed the compression method from stem compression to "table lookup" + * compression (see scripts/kallsyms.c for a more complete description) + */ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <linux/kallsyms.h> + #include <linux/module.h> + #include <linux/init.h> +@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak)); + + static inline int is_kernel_inittext(unsigned long addr) + { ++ if (system_state != SYSTEM_BOOTING) ++ return 0; ++ + if (addr >= (unsigned long)_sinittext + && addr <= (unsigned long)_einittext) + return 1; + return 0; + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++#ifdef CONFIG_MODULES ++static inline int is_module_text(unsigned long addr) ++{ ++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END) ++ return 1; ++ ++ addr = ktla_ktva(addr); ++ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END; ++} ++#else ++static inline int is_module_text(unsigned long addr) ++{ ++ return 0; ++} ++#endif ++#endif ++ + static inline int is_kernel_text(unsigned long addr) + { + if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || +@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr) + + static inline int is_kernel(unsigned long addr) + { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (is_kernel_text(addr) || is_kernel_inittext(addr)) ++ return 1; ++ ++ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end) ++#else + if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) ++#endif ++ + return 1; + return in_gate_area_no_mm(addr); + } + + static int is_ksym_addr(unsigned long addr) + { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (is_module_text(addr)) ++ return 0; ++#endif ++ + if (all_var) + return is_kernel(addr); + +@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter) + + static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) + { +- iter->name[0] = '\0'; + iter->nameoff = get_symbol_offset(new_pos); + iter->pos = new_pos; + } +@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p) + { + struct kallsym_iter *iter = m->private; + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ if (current_uid()) ++ return 0; ++#endif ++ + /* Some debugging symbols have no name. Ignore them. */ + if (!iter->name[0]) + return 0; +@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file) + struct kallsym_iter *iter; + int ret; + +- iter = kmalloc(sizeof(*iter), GFP_KERNEL); ++ iter = kzalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return -ENOMEM; + reset_iter(iter, 0); +diff --git a/kernel/kexec.c b/kernel/kexec.c +index dc7bc08..4601964 100644 +--- a/kernel/kexec.c ++++ b/kernel/kexec.c +@@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, + unsigned long flags) + { + struct compat_kexec_segment in; +- struct kexec_segment out, __user *ksegments; ++ struct kexec_segment out; ++ struct kexec_segment __user *ksegments; + unsigned long i, result; + + /* Don't allow clients that don't understand the native +diff --git a/kernel/kmod.c b/kernel/kmod.c +index a4bea97..7a1ae9a 100644 +--- a/kernel/kmod.c ++++ b/kernel/kmod.c +@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; + * If module auto-loading support is disabled then this function + * becomes a no-operation. + */ +-int __request_module(bool wait, const char *fmt, ...) ++static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap) + { +- va_list args; + char module_name[MODULE_NAME_LEN]; + unsigned int max_modprobes; + int ret; +- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL }; ++ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL }; + static char *envp[] = { "HOME=/", + "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", +@@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...) + #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ + static int kmod_loop_msg; + +- va_start(args, fmt); +- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); +- va_end(args); ++ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap); + if (ret >= MODULE_NAME_LEN) + return -ENAMETOOLONG; + +@@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...) + if (ret) + return ret; + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (!current_uid()) { ++ /* hack to workaround consolekit/udisks stupidity */ ++ read_lock(&tasklist_lock); ++ if (!strcmp(current->comm, "mount") && ++ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) { ++ read_unlock(&tasklist_lock); ++ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name); ++ return -EPERM; ++ } ++ read_unlock(&tasklist_lock); ++ } ++#endif ++ + /* If modprobe needs a service that is in a module, we get a recursive + * loop. Limit the number of running kmod threads to max_threads/2 or + * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method +@@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...) + atomic_dec(&kmod_concurrent); + return ret; + } ++ ++int ___request_module(bool wait, char *module_param, const char *fmt, ...) ++{ ++ va_list args; ++ int ret; ++ ++ va_start(args, fmt); ++ ret = ____request_module(wait, module_param, fmt, args); ++ va_end(args); ++ ++ return ret; ++} ++ ++int __request_module(bool wait, const char *fmt, ...) ++{ ++ va_list args; ++ int ret; ++ ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (current_uid()) { ++ char module_param[MODULE_NAME_LEN]; ++ ++ memset(module_param, 0, sizeof(module_param)); ++ ++ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid()); ++ ++ va_start(args, fmt); ++ ret = ____request_module(wait, module_param, fmt, args); ++ va_end(args); ++ ++ return ret; ++ } ++#endif ++ ++ va_start(args, fmt); ++ ret = ____request_module(wait, NULL, fmt, args); ++ va_end(args); ++ ++ return ret; ++} ++ + EXPORT_SYMBOL(__request_module); + #endif /* CONFIG_MODULES */ + +@@ -222,7 +274,7 @@ static int wait_for_helper(void *data) + * + * Thus the __user pointer cast is valid here. + */ +- sys_wait4(pid, (int __user *)&ret, 0, NULL); ++ sys_wait4(pid, (int __force_user *)&ret, 0, NULL); + + /* + * If ret is 0, either ____call_usermodehelper failed and the +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index bc90b87..43c7d8c 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) + * kernel image and loaded module images reside. This is required + * so x86_64 can correctly handle the %rip-relative fixups. + */ +- kip->insns = module_alloc(PAGE_SIZE); ++ kip->insns = module_alloc_exec(PAGE_SIZE); + if (!kip->insns) { + kfree(kip); + return NULL; +@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) + */ + if (!list_is_singular(&kip->list)) { + list_del(&kip->list); +- module_free(NULL, kip->insns); ++ module_free_exec(NULL, kip->insns); + kfree(kip); + } + return 1; +@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void) + { + int i, err = 0; + unsigned long offset = 0, size = 0; +- char *modname, namebuf[128]; ++ char *modname, namebuf[KSYM_NAME_LEN]; + const char *symbol_name; + void *addr; + struct kprobe_blackpoint *kb; +@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) + const char *sym = NULL; + unsigned int i = *(loff_t *) v; + unsigned long offset = 0; +- char *modname, namebuf[128]; ++ char *modname, namebuf[KSYM_NAME_LEN]; + + head = &kprobe_table[i]; + preempt_disable(); +diff --git a/kernel/lockdep.c b/kernel/lockdep.c +index b2e08c9..01d8049 100644 +--- a/kernel/lockdep.c ++++ b/kernel/lockdep.c +@@ -592,6 +592,10 @@ static int static_obj(void *obj) + end = (unsigned long) &_end, + addr = (unsigned long) obj; + ++#ifdef CONFIG_PAX_KERNEXEC ++ start = ktla_ktva(start); ++#endif ++ + /* + * static variable? + */ +@@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) + if (!static_obj(lock->key)) { + debug_locks_off(); + printk("INFO: trying to register non-static key.\n"); ++ printk("lock:%pS key:%pS.\n", lock, lock->key); + printk("the code is fine but needs lockdep annotation.\n"); + printk("turning off the locking correctness validator.\n"); + dump_stack(); +@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, + if (!class) + return 0; + } +- atomic_inc((atomic_t *)&class->ops); ++ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops); + if (very_verbose(class)) { + printk("\nacquire class [%p] %s", class->key, class->name); + if (class->name_version > 1) +diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c +index 91c32a0..b2c71c5 100644 +--- a/kernel/lockdep_proc.c ++++ b/kernel/lockdep_proc.c +@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v) + + static void print_name(struct seq_file *m, struct lock_class *class) + { +- char str[128]; ++ char str[KSYM_NAME_LEN]; + const char *name = class->name; + + if (!name) { +diff --git a/kernel/module.c b/kernel/module.c +index 178333c..04e3408 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -58,6 +58,7 @@ + #include <linux/jump_label.h> + #include <linux/pfn.h> + #include <linux/bsearch.h> ++#include <linux/grsecurity.h> + + #define CREATE_TRACE_POINTS + #include <trace/events/module.h> +@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list); + + /* Bounds of module allocation, for speeding __module_address. + * Protected by module_mutex. */ +-static unsigned long module_addr_min = -1UL, module_addr_max = 0; ++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0; ++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0; + + int register_module_notifier(struct notifier_block * nb) + { +@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, + return true; + + list_for_each_entry_rcu(mod, &modules, list) { +- struct symsearch arr[] = { ++ struct symsearch modarr[] = { + { mod->syms, mod->syms + mod->num_syms, mod->crcs, + NOT_GPL_ONLY, false }, + { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, +@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, + #endif + }; + +- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) ++ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data)) + return true; + } + return false; +@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod) + static int percpu_modalloc(struct module *mod, + unsigned long size, unsigned long align) + { +- if (align > PAGE_SIZE) { ++ if (align-1 >= PAGE_SIZE) { + printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", + mod->name, align, PAGE_SIZE); + align = PAGE_SIZE; +@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod, + */ + #ifdef CONFIG_SYSFS + +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + static inline bool sect_empty(const Elf_Shdr *sect) + { + return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; +@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base, + + static void unset_module_core_ro_nx(struct module *mod) + { +- set_page_attributes(mod->module_core + mod->core_text_size, +- mod->module_core + mod->core_size, ++ set_page_attributes(mod->module_core_rw, ++ mod->module_core_rw + mod->core_size_rw, + set_memory_x); +- set_page_attributes(mod->module_core, +- mod->module_core + mod->core_ro_size, ++ set_page_attributes(mod->module_core_rx, ++ mod->module_core_rx + mod->core_size_rx, + set_memory_rw); + } + + static void unset_module_init_ro_nx(struct module *mod) + { +- set_page_attributes(mod->module_init + mod->init_text_size, +- mod->module_init + mod->init_size, ++ set_page_attributes(mod->module_init_rw, ++ mod->module_init_rw + mod->init_size_rw, + set_memory_x); +- set_page_attributes(mod->module_init, +- mod->module_init + mod->init_ro_size, ++ set_page_attributes(mod->module_init_rx, ++ mod->module_init_rx + mod->init_size_rx, + set_memory_rw); + } + +@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void) + + mutex_lock(&module_mutex); + list_for_each_entry_rcu(mod, &modules, list) { +- if ((mod->module_core) && (mod->core_text_size)) { +- set_page_attributes(mod->module_core, +- mod->module_core + mod->core_text_size, ++ if ((mod->module_core_rx) && (mod->core_size_rx)) { ++ set_page_attributes(mod->module_core_rx, ++ mod->module_core_rx + mod->core_size_rx, + set_memory_rw); + } +- if ((mod->module_init) && (mod->init_text_size)) { +- set_page_attributes(mod->module_init, +- mod->module_init + mod->init_text_size, ++ if ((mod->module_init_rx) && (mod->init_size_rx)) { ++ set_page_attributes(mod->module_init_rx, ++ mod->module_init_rx + mod->init_size_rx, + set_memory_rw); + } + } +@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void) + + mutex_lock(&module_mutex); + list_for_each_entry_rcu(mod, &modules, list) { +- if ((mod->module_core) && (mod->core_text_size)) { +- set_page_attributes(mod->module_core, +- mod->module_core + mod->core_text_size, ++ if ((mod->module_core_rx) && (mod->core_size_rx)) { ++ set_page_attributes(mod->module_core_rx, ++ mod->module_core_rx + mod->core_size_rx, + set_memory_ro); + } +- if ((mod->module_init) && (mod->init_text_size)) { +- set_page_attributes(mod->module_init, +- mod->module_init + mod->init_text_size, ++ if ((mod->module_init_rx) && (mod->init_size_rx)) { ++ set_page_attributes(mod->module_init_rx, ++ mod->module_init_rx + mod->init_size_rx, + set_memory_ro); + } + } +@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod) + + /* This may be NULL, but that's OK */ + unset_module_init_ro_nx(mod); +- module_free(mod, mod->module_init); ++ module_free(mod, mod->module_init_rw); ++ module_free_exec(mod, mod->module_init_rx); + kfree(mod->args); + percpu_modfree(mod); + + /* Free lock-classes: */ +- lockdep_free_key_range(mod->module_core, mod->core_size); ++ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx); ++ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw); + + /* Finally, free the core (containing the module structure) */ + unset_module_core_ro_nx(mod); +- module_free(mod, mod->module_core); ++ module_free_exec(mod, mod->module_core_rx); ++ module_free(mod, mod->module_core_rw); + + #ifdef CONFIG_MPU + update_protections(current->mm); +@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) + unsigned int i; + int ret = 0; + const struct kernel_symbol *ksym; ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ int is_fs_load = 0; ++ int register_filesystem_found = 0; ++ char *p; ++ ++ p = strstr(mod->args, "grsec_modharden_fs"); ++ if (p) { ++ char *endptr = p + strlen("grsec_modharden_fs"); ++ /* copy \0 as well */ ++ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1); ++ is_fs_load = 1; ++ } ++#endif + + for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { + const char *name = info->strtab + sym[i].st_name; + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ /* it's a real shame this will never get ripped and copied ++ upstream! ;( ++ */ ++ if (is_fs_load && !strcmp(name, "register_filesystem")) ++ register_filesystem_found = 1; ++#endif ++ + switch (sym[i].st_shndx) { + case SHN_COMMON: + /* We compiled with -fno-common. These are not +@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) + ksym = resolve_symbol_wait(mod, info, name); + /* Ok if resolved. */ + if (ksym && !IS_ERR(ksym)) { ++ pax_open_kernel(); + sym[i].st_value = ksym->value; ++ pax_close_kernel(); + break; + } + +@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) + secbase = (unsigned long)mod_percpu(mod); + else + secbase = info->sechdrs[sym[i].st_shndx].sh_addr; ++ pax_open_kernel(); + sym[i].st_value += secbase; ++ pax_close_kernel(); + break; + } + } + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (is_fs_load && !register_filesystem_found) { ++ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name); ++ ret = -EPERM; ++ } ++#endif ++ + return ret; + } + +@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info) + || s->sh_entsize != ~0UL + || strstarts(sname, ".init")) + continue; +- s->sh_entsize = get_offset(mod, &mod->core_size, s, i); ++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) ++ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i); ++ else ++ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i); + DEBUGP("\t%s\n", name); + } +- switch (m) { +- case 0: /* executable */ +- mod->core_size = debug_align(mod->core_size); +- mod->core_text_size = mod->core_size; +- break; +- case 1: /* RO: text and ro-data */ +- mod->core_size = debug_align(mod->core_size); +- mod->core_ro_size = mod->core_size; +- break; +- case 3: /* whole core */ +- mod->core_size = debug_align(mod->core_size); +- break; +- } + } + + DEBUGP("Init section allocation order:\n"); +@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info) + || s->sh_entsize != ~0UL + || !strstarts(sname, ".init")) + continue; +- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) +- | INIT_OFFSET_MASK); ++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) ++ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i); ++ else ++ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i); ++ s->sh_entsize |= INIT_OFFSET_MASK; + DEBUGP("\t%s\n", sname); + } +- switch (m) { +- case 0: /* executable */ +- mod->init_size = debug_align(mod->init_size); +- mod->init_text_size = mod->init_size; +- break; +- case 1: /* RO: text and ro-data */ +- mod->init_size = debug_align(mod->init_size); +- mod->init_ro_size = mod->init_size; +- break; +- case 3: /* whole init */ +- mod->init_size = debug_align(mod->init_size); +- break; +- } + } + } + +@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info) + + /* Put symbol section at end of init part of module. */ + symsect->sh_flags |= SHF_ALLOC; +- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, ++ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect, + info->index.sym) | INIT_OFFSET_MASK; + DEBUGP("\t%s\n", info->secstrings + symsect->sh_name); + +@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info) + } + + /* Append room for core symbols at end of core part. */ +- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); +- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); ++ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1); ++ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym); + + /* Put string table section at end of init part of module. */ + strsect->sh_flags |= SHF_ALLOC; +- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, ++ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect, + info->index.str) | INIT_OFFSET_MASK; + DEBUGP("\t%s\n", info->secstrings + strsect->sh_name); + + /* Append room for core symbols' strings at end of core part. */ +- info->stroffs = mod->core_size; ++ info->stroffs = mod->core_size_rx; + __set_bit(0, info->strmap); +- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size); ++ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size); + } + + static void add_kallsyms(struct module *mod, const struct load_info *info) +@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + /* Make sure we get permanent strtab: don't use info->strtab. */ + mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + ++ pax_open_kernel(); ++ + /* Set types up while we still have access to sections. */ + for (i = 0; i < mod->num_symtab; i++) + mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); + +- mod->core_symtab = dst = mod->module_core + info->symoffs; ++ mod->core_symtab = dst = mod->module_core_rx + info->symoffs; + src = mod->symtab; + *dst = *src; + for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { +@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + } + mod->core_num_syms = ndst; + +- mod->core_strtab = s = mod->module_core + info->stroffs; ++ mod->core_strtab = s = mod->module_core_rx + info->stroffs; + for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i) + if (test_bit(i, info->strmap)) + *++s = mod->strtab[i]; ++ ++ pax_close_kernel(); + } + #else + static inline void layout_symtab(struct module *mod, struct load_info *info) +@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size) + return size == 0 ? NULL : vmalloc_exec(size); + } + +-static void *module_alloc_update_bounds(unsigned long size) ++static void *module_alloc_update_bounds_rw(unsigned long size) + { + void *ret = module_alloc(size); + + if (ret) { + mutex_lock(&module_mutex); + /* Update module bounds. */ +- if ((unsigned long)ret < module_addr_min) +- module_addr_min = (unsigned long)ret; +- if ((unsigned long)ret + size > module_addr_max) +- module_addr_max = (unsigned long)ret + size; ++ if ((unsigned long)ret < module_addr_min_rw) ++ module_addr_min_rw = (unsigned long)ret; ++ if ((unsigned long)ret + size > module_addr_max_rw) ++ module_addr_max_rw = (unsigned long)ret + size; ++ mutex_unlock(&module_mutex); ++ } ++ return ret; ++} ++ ++static void *module_alloc_update_bounds_rx(unsigned long size) ++{ ++ void *ret = module_alloc_exec(size); ++ ++ if (ret) { ++ mutex_lock(&module_mutex); ++ /* Update module bounds. */ ++ if ((unsigned long)ret < module_addr_min_rx) ++ module_addr_min_rx = (unsigned long)ret; ++ if ((unsigned long)ret + size > module_addr_max_rx) ++ module_addr_max_rx = (unsigned long)ret + size; + mutex_unlock(&module_mutex); + } + return ret; +@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info) + static int check_modinfo(struct module *mod, struct load_info *info) + { + const char *modmagic = get_modinfo(info, "vermagic"); ++ const char *license = get_modinfo(info, "license"); + int err; + ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR ++ if (!license || !license_is_gpl_compatible(license)) ++ return -ENOEXEC; ++#endif ++ + /* This is allowed: modprobe --force will invalidate it. */ + if (!modmagic) { + err = try_to_force_load(mod, "bad vermagic"); +@@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info) + } + + /* Set up license info based on the info section */ +- set_license(mod, get_modinfo(info, "license")); ++ set_license(mod, license); + + return 0; + } +@@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info) + void *ptr; + + /* Do the allocs. */ +- ptr = module_alloc_update_bounds(mod->core_size); ++ ptr = module_alloc_update_bounds_rw(mod->core_size_rw); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. Just mark it as not being a +@@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info) + if (!ptr) + return -ENOMEM; + +- memset(ptr, 0, mod->core_size); +- mod->module_core = ptr; ++ memset(ptr, 0, mod->core_size_rw); ++ mod->module_core_rw = ptr; + +- ptr = module_alloc_update_bounds(mod->init_size); ++ ptr = module_alloc_update_bounds_rw(mod->init_size_rw); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. This block doesn't need to be + * scanned as it contains data and code that will be freed + * after the module is initialized. + */ +- kmemleak_ignore(ptr); +- if (!ptr && mod->init_size) { +- module_free(mod, mod->module_core); ++ kmemleak_not_leak(ptr); ++ if (!ptr && mod->init_size_rw) { ++ module_free(mod, mod->module_core_rw); + return -ENOMEM; + } +- memset(ptr, 0, mod->init_size); +- mod->module_init = ptr; ++ memset(ptr, 0, mod->init_size_rw); ++ mod->module_init_rw = ptr; ++ ++ ptr = module_alloc_update_bounds_rx(mod->core_size_rx); ++ kmemleak_not_leak(ptr); ++ if (!ptr) { ++ module_free(mod, mod->module_init_rw); ++ module_free(mod, mod->module_core_rw); ++ return -ENOMEM; ++ } ++ ++ pax_open_kernel(); ++ memset(ptr, 0, mod->core_size_rx); ++ pax_close_kernel(); ++ mod->module_core_rx = ptr; ++ ++ ptr = module_alloc_update_bounds_rx(mod->init_size_rx); ++ kmemleak_not_leak(ptr); ++ if (!ptr && mod->init_size_rx) { ++ module_free_exec(mod, mod->module_core_rx); ++ module_free(mod, mod->module_init_rw); ++ module_free(mod, mod->module_core_rw); ++ return -ENOMEM; ++ } ++ ++ pax_open_kernel(); ++ memset(ptr, 0, mod->init_size_rx); ++ pax_close_kernel(); ++ mod->module_init_rx = ptr; + + /* Transfer each section which specifies SHF_ALLOC */ + DEBUGP("final section addresses:\n"); +@@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info) + if (!(shdr->sh_flags & SHF_ALLOC)) + continue; + +- if (shdr->sh_entsize & INIT_OFFSET_MASK) +- dest = mod->module_init +- + (shdr->sh_entsize & ~INIT_OFFSET_MASK); +- else +- dest = mod->module_core + shdr->sh_entsize; ++ if (shdr->sh_entsize & INIT_OFFSET_MASK) { ++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC)) ++ dest = mod->module_init_rw ++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK); ++ else ++ dest = mod->module_init_rx ++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK); ++ } else { ++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC)) ++ dest = mod->module_core_rw + shdr->sh_entsize; ++ else ++ dest = mod->module_core_rx + shdr->sh_entsize; ++ } ++ ++ if (shdr->sh_type != SHT_NOBITS) { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_X86_64 ++ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR)) ++ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT); ++#endif ++ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) { ++ pax_open_kernel(); ++ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); ++ pax_close_kernel(); ++ } else ++#endif + +- if (shdr->sh_type != SHT_NOBITS) + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); ++ } + /* Update sh_addr to point to copy in image. */ +- shdr->sh_addr = (unsigned long)dest; ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ if (shdr->sh_flags & SHF_EXECINSTR) ++ shdr->sh_addr = ktva_ktla((unsigned long)dest); ++ else ++#endif ++ ++ shdr->sh_addr = (unsigned long)dest; + DEBUGP("\t0x%lx %s\n", + shdr->sh_addr, info->secstrings + shdr->sh_name); + } +@@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod) + * Do it before processing of module parameters, so the module + * can provide parameter accessor functions of its own. + */ +- if (mod->module_init) +- flush_icache_range((unsigned long)mod->module_init, +- (unsigned long)mod->module_init +- + mod->init_size); +- flush_icache_range((unsigned long)mod->module_core, +- (unsigned long)mod->module_core + mod->core_size); ++ if (mod->module_init_rx) ++ flush_icache_range((unsigned long)mod->module_init_rx, ++ (unsigned long)mod->module_init_rx ++ + mod->init_size_rx); ++ flush_icache_range((unsigned long)mod->module_core_rx, ++ (unsigned long)mod->module_core_rx + mod->core_size_rx); + + set_fs(old_fs); + } +@@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info) + { + kfree(info->strmap); + percpu_modfree(mod); +- module_free(mod, mod->module_init); +- module_free(mod, mod->module_core); ++ module_free_exec(mod, mod->module_init_rx); ++ module_free_exec(mod, mod->module_core_rx); ++ module_free(mod, mod->module_init_rw); ++ module_free(mod, mod->module_core_rw); + } + + int __weak module_finalize(const Elf_Ehdr *hdr, +@@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod, + if (err) + goto free_unload; + ++ /* Now copy in args */ ++ mod->args = strndup_user(uargs, ~0UL >> 1); ++ if (IS_ERR(mod->args)) { ++ err = PTR_ERR(mod->args); ++ goto free_unload; ++ } ++ + /* Set up MODINFO_ATTR fields */ + setup_modinfo(mod, &info); + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ { ++ char *p, *p2; ++ ++ if (strstr(mod->args, "grsec_modharden_netdev")) { ++ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name); ++ err = -EPERM; ++ goto free_modinfo; ++ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) { ++ p += strlen("grsec_modharden_normal"); ++ p2 = strstr(p, "_"); ++ if (p2) { ++ *p2 = '\0'; ++ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p); ++ *p2 = '_'; ++ } ++ err = -EPERM; ++ goto free_modinfo; ++ } ++ } ++#endif ++ + /* Fix up syms, so that st_value is a pointer to location. */ + err = simplify_symbols(mod, &info); + if (err < 0) +@@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod, + + flush_module_icache(mod); + +- /* Now copy in args */ +- mod->args = strndup_user(uargs, ~0UL >> 1); +- if (IS_ERR(mod->args)) { +- err = PTR_ERR(mod->args); +- goto free_arch_cleanup; +- } +- + /* Mark state as coming so strong_try_module_get() ignores us. */ + mod->state = MODULE_STATE_COMING; + +@@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod, + unlock: + mutex_unlock(&module_mutex); + synchronize_sched(); +- kfree(mod->args); +- free_arch_cleanup: + module_arch_cleanup(mod); + free_modinfo: + free_modinfo(mod); ++ kfree(mod->args); + free_unload: + module_unload_free(mod); + free_module: +@@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, + MODULE_STATE_COMING, mod); + + /* Set RO and NX regions for core */ +- set_section_ro_nx(mod->module_core, +- mod->core_text_size, +- mod->core_ro_size, +- mod->core_size); ++ set_section_ro_nx(mod->module_core_rx, ++ mod->core_size_rx, ++ mod->core_size_rx, ++ mod->core_size_rx); + + /* Set RO and NX regions for init */ +- set_section_ro_nx(mod->module_init, +- mod->init_text_size, +- mod->init_ro_size, +- mod->init_size); ++ set_section_ro_nx(mod->module_init_rx, ++ mod->init_size_rx, ++ mod->init_size_rx, ++ mod->init_size_rx); + + do_mod_ctors(mod); + /* Start the module */ +@@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, + mod->strtab = mod->core_strtab; + #endif + unset_module_init_ro_nx(mod); +- module_free(mod, mod->module_init); +- mod->module_init = NULL; +- mod->init_size = 0; +- mod->init_ro_size = 0; +- mod->init_text_size = 0; ++ module_free(mod, mod->module_init_rw); ++ module_free_exec(mod, mod->module_init_rx); ++ mod->module_init_rw = NULL; ++ mod->module_init_rx = NULL; ++ mod->init_size_rw = 0; ++ mod->init_size_rx = 0; + mutex_unlock(&module_mutex); + + return 0; +@@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod, + unsigned long nextval; + + /* At worse, next value is at end of module */ +- if (within_module_init(addr, mod)) +- nextval = (unsigned long)mod->module_init+mod->init_text_size; ++ if (within_module_init_rx(addr, mod)) ++ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx; ++ else if (within_module_init_rw(addr, mod)) ++ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw; ++ else if (within_module_core_rx(addr, mod)) ++ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx; ++ else if (within_module_core_rw(addr, mod)) ++ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw; + else +- nextval = (unsigned long)mod->module_core+mod->core_text_size; ++ return NULL; + + /* Scan for closest preceding symbol, and next symbol. (ELF + starts real symbols at 1). */ +@@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p) + char buf[8]; + + seq_printf(m, "%s %u", +- mod->name, mod->init_size + mod->core_size); ++ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw); + print_unload_info(m, mod); + + /* Informative for users. */ +@@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p) + mod->state == MODULE_STATE_COMING ? "Loading": + "Live"); + /* Used by oprofile and other similar tools. */ +- seq_printf(m, " 0x%pK", mod->module_core); ++ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw); + + /* Taints info */ + if (mod->taints) +@@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = { + + static int __init proc_modules_init(void) + { ++#ifndef CONFIG_GRKERNSEC_HIDESYM ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations); ++#else + proc_create("modules", 0, NULL, &proc_modules_operations); ++#endif ++#else ++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations); ++#endif + return 0; + } + module_init(proc_modules_init); +@@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr) + { + struct module *mod; + +- if (addr < module_addr_min || addr > module_addr_max) ++ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) && ++ (addr < module_addr_min_rw || addr > module_addr_max_rw)) + return NULL; + + list_for_each_entry_rcu(mod, &modules, list) +- if (within_module_core(addr, mod) +- || within_module_init(addr, mod)) ++ if (within_module_init(addr, mod) || within_module_core(addr, mod)) + return mod; + return NULL; + } +@@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr) + */ + struct module *__module_text_address(unsigned long addr) + { +- struct module *mod = __module_address(addr); ++ struct module *mod; ++ ++#ifdef CONFIG_X86_32 ++ addr = ktla_ktva(addr); ++#endif ++ ++ if (addr < module_addr_min_rx || addr > module_addr_max_rx) ++ return NULL; ++ ++ mod = __module_address(addr); ++ + if (mod) { + /* Make sure it's within the text section. */ +- if (!within(addr, mod->module_init, mod->init_text_size) +- && !within(addr, mod->module_core, mod->core_text_size)) ++ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod)) + mod = NULL; + } + return mod; +diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c +index 7e3443f..b2a1e6b 100644 +--- a/kernel/mutex-debug.c ++++ b/kernel/mutex-debug.c +@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter) + } + + void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, +- struct thread_info *ti) ++ struct task_struct *task) + { + SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); + + /* Mark the current thread as blocked on the lock: */ +- ti->task->blocked_on = waiter; ++ task->blocked_on = waiter; + } + + void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, +- struct thread_info *ti) ++ struct task_struct *task) + { + DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); +- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task); +- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter); +- ti->task->blocked_on = NULL; ++ DEBUG_LOCKS_WARN_ON(waiter->task != task); ++ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); ++ task->blocked_on = NULL; + + list_del_init(&waiter->list); + waiter->task = NULL; +diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h +index 0799fd3..d06ae3b 100644 +--- a/kernel/mutex-debug.h ++++ b/kernel/mutex-debug.h +@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock, + extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); + extern void debug_mutex_add_waiter(struct mutex *lock, + struct mutex_waiter *waiter, +- struct thread_info *ti); ++ struct task_struct *task); + extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, +- struct thread_info *ti); ++ struct task_struct *task); + extern void debug_mutex_unlock(struct mutex *lock); + extern void debug_mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); +diff --git a/kernel/mutex.c b/kernel/mutex.c +index 89096dd..f91ebc5 100644 +--- a/kernel/mutex.c ++++ b/kernel/mutex.c +@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + spin_lock_mutex(&lock->wait_lock, flags); + + debug_mutex_lock_common(lock, &waiter); +- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); ++ debug_mutex_add_waiter(lock, &waiter, task); + + /* add waiting tasks to the end of the waitqueue (FIFO): */ + list_add_tail(&waiter.list, &lock->wait_list); +@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + * TASK_UNINTERRUPTIBLE case.) + */ + if (unlikely(signal_pending_state(state, task))) { +- mutex_remove_waiter(lock, &waiter, +- task_thread_info(task)); ++ mutex_remove_waiter(lock, &waiter, task); + mutex_release(&lock->dep_map, 1, ip); + spin_unlock_mutex(&lock->wait_lock, flags); + +@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + done: + lock_acquired(&lock->dep_map, ip); + /* got the lock - rejoice! */ +- mutex_remove_waiter(lock, &waiter, current_thread_info()); ++ mutex_remove_waiter(lock, &waiter, task); + mutex_set_owner(lock); + + /* set it to 0 if there are no waiters left: */ +diff --git a/kernel/padata.c b/kernel/padata.c +index b452599..5d68f4e 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst, + padata->pd = pd; + padata->cb_cpu = cb_cpu; + +- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr)) +- atomic_set(&pd->seq_nr, -1); ++ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr)) ++ atomic_set_unchecked(&pd->seq_nr, -1); + +- padata->seq_nr = atomic_inc_return(&pd->seq_nr); ++ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr); + + target_cpu = padata_cpu_hash(padata); + queue = per_cpu_ptr(pd->pqueue, target_cpu); +@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, + padata_init_pqueues(pd); + padata_init_squeues(pd); + setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); +- atomic_set(&pd->seq_nr, -1); ++ atomic_set_unchecked(&pd->seq_nr, -1); + atomic_set(&pd->reorder_objects, 0); + atomic_set(&pd->refcnt, 0); + pd->pinst = pinst; +diff --git a/kernel/panic.c b/kernel/panic.c +index 3458469..342c500 100644 +--- a/kernel/panic.c ++++ b/kernel/panic.c +@@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...) + va_end(args); + printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); + #ifdef CONFIG_DEBUG_BUGVERBOSE +- dump_stack(); ++ /* ++ * Avoid nested stack-dumping if a panic occurs during oops processing ++ */ ++ if (!oops_in_progress) ++ dump_stack(); + #endif + + /* +@@ -382,7 +386,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, + const char *board; + + printk(KERN_WARNING "------------[ cut here ]------------\n"); +- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller); ++ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller); + board = dmi_get_system_info(DMI_PRODUCT_NAME); + if (board) + printk(KERN_WARNING "Hardware name: %s\n", board); +@@ -437,7 +441,8 @@ EXPORT_SYMBOL(warn_slowpath_null); + */ + void __stack_chk_fail(void) + { +- panic("stack-protector: Kernel stack is corrupted in: %p\n", ++ dump_stack(); ++ panic("stack-protector: Kernel stack is corrupted in: %pA\n", + __builtin_return_address(0)); + } + EXPORT_SYMBOL(__stack_chk_fail); +diff --git a/kernel/pid.c b/kernel/pid.c +index fa5f722..0c93e57 100644 +--- a/kernel/pid.c ++++ b/kernel/pid.c +@@ -33,6 +33,7 @@ + #include <linux/rculist.h> + #include <linux/bootmem.h> + #include <linux/hash.h> ++#include <linux/security.h> + #include <linux/pid_namespace.h> + #include <linux/init_task.h> + #include <linux/syscalls.h> +@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID; + + int pid_max = PID_MAX_DEFAULT; + +-#define RESERVED_PIDS 300 ++#define RESERVED_PIDS 500 + + int pid_max_min = RESERVED_PIDS + 1; + int pid_max_max = PID_MAX_LIMIT; +@@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task); + */ + struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) + { ++ struct task_struct *task; ++ + rcu_lockdep_assert(rcu_read_lock_held(), + "find_task_by_pid_ns() needs rcu_read_lock()" + " protection"); +- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); ++ ++ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); ++ ++ if (gr_pid_is_chrooted(task)) ++ return NULL; ++ ++ return task; + } + + struct task_struct *find_task_by_vpid(pid_t vnr) +@@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr) + return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns); + } + ++struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr) ++{ ++ rcu_lockdep_assert(rcu_read_lock_held(), ++ "find_task_by_pid_ns() needs rcu_read_lock()" ++ " protection"); ++ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID); ++} ++ + struct pid *get_task_pid(struct task_struct *task, enum pid_type type) + { + struct pid *pid; +diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c +index e7cb76d..75eceb3 100644 +--- a/kernel/posix-cpu-timers.c ++++ b/kernel/posix-cpu-timers.c +@@ -6,6 +6,7 @@ + #include <linux/posix-timers.h> + #include <linux/errno.h> + #include <linux/math64.h> ++#include <linux/security.h> + #include <asm/uaccess.h> + #include <linux/kernel_stat.h> + #include <trace/events/timer.h> +@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = { + + static __init int init_posix_cpu_timers(void) + { +- struct k_clock process = { ++ static struct k_clock process = { + .clock_getres = process_cpu_clock_getres, + .clock_get = process_cpu_clock_get, + .timer_create = process_cpu_timer_create, + .nsleep = process_cpu_nsleep, + .nsleep_restart = process_cpu_nsleep_restart, + }; +- struct k_clock thread = { ++ static struct k_clock thread = { + .clock_getres = thread_cpu_clock_getres, + .clock_get = thread_cpu_clock_get, + .timer_create = thread_cpu_timer_create, +diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c +index 69185ae..cc2847a 100644 +--- a/kernel/posix-timers.c ++++ b/kernel/posix-timers.c +@@ -43,6 +43,7 @@ + #include <linux/idr.h> + #include <linux/posix-clock.h> + #include <linux/posix-timers.h> ++#include <linux/grsecurity.h> + #include <linux/syscalls.h> + #include <linux/wait.h> + #include <linux/workqueue.h> +@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock); + * which we beg off on and pass to do_sys_settimeofday(). + */ + +-static struct k_clock posix_clocks[MAX_CLOCKS]; ++static struct k_clock *posix_clocks[MAX_CLOCKS]; + + /* + * These ones are defined below. +@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp) + */ + static __init int init_posix_timers(void) + { +- struct k_clock clock_realtime = { ++ static struct k_clock clock_realtime = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_clock_realtime_get, + .clock_set = posix_clock_realtime_set, +@@ -239,7 +240,7 @@ static __init int init_posix_timers(void) + .timer_get = common_timer_get, + .timer_del = common_timer_del, + }; +- struct k_clock clock_monotonic = { ++ static struct k_clock clock_monotonic = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_ktime_get_ts, + .nsleep = common_nsleep, +@@ -249,19 +250,19 @@ static __init int init_posix_timers(void) + .timer_get = common_timer_get, + .timer_del = common_timer_del, + }; +- struct k_clock clock_monotonic_raw = { ++ static struct k_clock clock_monotonic_raw = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_get_monotonic_raw, + }; +- struct k_clock clock_realtime_coarse = { ++ static struct k_clock clock_realtime_coarse = { + .clock_getres = posix_get_coarse_res, + .clock_get = posix_get_realtime_coarse, + }; +- struct k_clock clock_monotonic_coarse = { ++ static struct k_clock clock_monotonic_coarse = { + .clock_getres = posix_get_coarse_res, + .clock_get = posix_get_monotonic_coarse, + }; +- struct k_clock clock_boottime = { ++ static struct k_clock clock_boottime = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_get_boottime, + .nsleep = common_nsleep, +@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id, + return; + } + +- posix_clocks[clock_id] = *new_clock; ++ posix_clocks[clock_id] = new_clock; + } + EXPORT_SYMBOL_GPL(posix_timers_register_clock); + +@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id) + return (id & CLOCKFD_MASK) == CLOCKFD ? + &clock_posix_dynamic : &clock_posix_cpu; + +- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres) ++ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres) + return NULL; +- return &posix_clocks[id]; ++ return posix_clocks[id]; + } + + static int common_timer_create(struct k_itimer *new_timer) +@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, + if (copy_from_user(&new_tp, tp, sizeof (*tp))) + return -EFAULT; + ++ /* only the CLOCK_REALTIME clock can be set, all other clocks ++ have their clock_set fptr set to a nosettime dummy function ++ CLOCK_REALTIME has a NULL clock_set fptr which causes it to ++ call common_clock_set, which calls do_sys_settimeofday, which ++ we hook ++ */ ++ + return kc->clock_set(which_clock, &new_tp); + } + +diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c +index d523593..68197a4 100644 +--- a/kernel/power/poweroff.c ++++ b/kernel/power/poweroff.c +@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = { + .enable_mask = SYSRQ_ENABLE_BOOT, + }; + +-static int pm_sysrq_init(void) ++static int __init pm_sysrq_init(void) + { + register_sysrq_key('o', &sysrq_poweroff_op); + return 0; +diff --git a/kernel/power/process.c b/kernel/power/process.c +index 3d4b954..11af930 100644 +--- a/kernel/power/process.c ++++ b/kernel/power/process.c +@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only) + u64 elapsed_csecs64; + unsigned int elapsed_csecs; + bool wakeup = false; ++ bool timedout = false; + + do_gettimeofday(&start); + +@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only) + + while (true) { + todo = 0; ++ if (time_after(jiffies, end_time)) ++ timedout = true; + read_lock(&tasklist_lock); + do_each_thread(g, p) { + if (frozen(p) || !freezable(p)) +@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only) + * try_to_stop() after schedule() in ptrace/signal + * stop sees TIF_FREEZE. + */ +- if (!task_is_stopped_or_traced(p) && +- !freezer_should_skip(p)) ++ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) { + todo++; ++ if (timedout) { ++ printk(KERN_ERR "Task refusing to freeze:\n"); ++ sched_show_task(p); ++ } ++ } + } while_each_thread(g, p); + read_unlock(&tasklist_lock); + +@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only) + todo += wq_busy; + } + +- if (!todo || time_after(jiffies, end_time)) ++ if (!todo || timedout) + break; + + if (pm_wakeup_pending()) { +diff --git a/kernel/printk.c b/kernel/printk.c +index 7982a0a..2095fdc 100644 +--- a/kernel/printk.c ++++ b/kernel/printk.c +@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file) + if (from_file && type != SYSLOG_ACTION_OPEN) + return 0; + ++#ifdef CONFIG_GRKERNSEC_DMESG ++ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN)) ++ return -EPERM; ++#endif ++ + if (syslog_action_restricted(type)) { + if (capable(CAP_SYSLOG)) + return 0; +diff --git a/kernel/profile.c b/kernel/profile.c +index 76b8e77..a2930e8 100644 +--- a/kernel/profile.c ++++ b/kernel/profile.c +@@ -39,7 +39,7 @@ struct profile_hit { + /* Oprofile timer tick hook */ + static int (*timer_hook)(struct pt_regs *) __read_mostly; + +-static atomic_t *prof_buffer; ++static atomic_unchecked_t *prof_buffer; + static unsigned long prof_len, prof_shift; + + int prof_on __read_mostly; +@@ -281,7 +281,7 @@ static void profile_flip_buffers(void) + hits[i].pc = 0; + continue; + } +- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); ++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]); + hits[i].hits = hits[i].pc = 0; + } + } +@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) + * Add the current hit(s) and flush the write-queue out + * to the global buffer: + */ +- atomic_add(nr_hits, &prof_buffer[pc]); ++ atomic_add_unchecked(nr_hits, &prof_buffer[pc]); + for (i = 0; i < NR_PROFILE_HIT; ++i) { +- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); ++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]); + hits[i].pc = hits[i].hits = 0; + } + out: +@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) + { + unsigned long pc; + pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; +- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); ++ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); + } + #endif /* !CONFIG_SMP */ + +@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) + return -EFAULT; + buf++; p++; count--; read++; + } +- pnt = (char *)prof_buffer + p - sizeof(atomic_t); ++ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t); + if (copy_to_user(buf, (void *)pnt, count)) + return -EFAULT; + read += count; +@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf, + } + #endif + profile_discard_flip_buffers(); +- memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); ++ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t)); + return count; + } + +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index 78ab24a..332c915 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state) + return ret; + } + +-int __ptrace_may_access(struct task_struct *task, unsigned int mode) ++static int __ptrace_may_access(struct task_struct *task, unsigned int mode, ++ unsigned int log) + { + const struct cred *cred = current_cred(), *tcred; + +@@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) + cred->gid == tcred->sgid && + cred->gid == tcred->gid)) + goto ok; +- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)) ++ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) || ++ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))) + goto ok; + rcu_read_unlock(); + return -EPERM; +@@ -207,7 +209,9 @@ ok: + smp_rmb(); + if (task->mm) + dumpable = get_dumpable(task->mm); +- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE)) ++ if (!dumpable && ++ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) || ++ (log && !task_ns_capable(task, CAP_SYS_PTRACE)))) + return -EPERM; + + return security_ptrace_access_check(task, mode); +@@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) + { + int err; + task_lock(task); +- err = __ptrace_may_access(task, mode); ++ err = __ptrace_may_access(task, mode, 0); ++ task_unlock(task); ++ return !err; ++} ++ ++bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode) ++{ ++ return __ptrace_may_access(task, mode, 0); ++} ++ ++bool ptrace_may_access_log(struct task_struct *task, unsigned int mode) ++{ ++ int err; ++ task_lock(task); ++ err = __ptrace_may_access(task, mode, 1); + task_unlock(task); + return !err; + } +@@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request, + goto out; + + task_lock(task); +- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); ++ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1); + task_unlock(task); + if (retval) + goto unlock_creds; +@@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request, + task->ptrace = PT_PTRACED; + if (seize) + task->ptrace |= PT_SEIZED; +- if (task_ns_capable(task, CAP_SYS_PTRACE)) ++ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE)) + task->ptrace |= PT_PTRACE_CAP; + + __ptrace_link(task, current); +@@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst + break; + return -EIO; + } +- if (copy_to_user(dst, buf, retval)) ++ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval)) + return -EFAULT; + copied += retval; + src += retval; +@@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request, + bool seized = child->ptrace & PT_SEIZED; + int ret = -EIO; + siginfo_t siginfo, *si; +- void __user *datavp = (void __user *) data; ++ void __user *datavp = (__force void __user *) data; + unsigned long __user *datalp = datavp; + unsigned long flags; + +@@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, + goto out; + } + ++ if (gr_handle_ptrace(child, request)) { ++ ret = -EPERM; ++ goto out_put_task_struct; ++ } ++ + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { + ret = ptrace_attach(child, request, data); + /* + * Some architectures need to do book-keeping after + * a ptrace attach. + */ +- if (!ret) ++ if (!ret) { + arch_ptrace_attach(child); ++ gr_audit_ptrace(child); ++ } + goto out_put_task_struct; + } + +@@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, + copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); + if (copied != sizeof(tmp)) + return -EIO; +- return put_user(tmp, (unsigned long __user *)data); ++ return put_user(tmp, (__force unsigned long __user *)data); + } + + int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, +@@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, + goto out; + } + ++ if (gr_handle_ptrace(child, request)) { ++ ret = -EPERM; ++ goto out_put_task_struct; ++ } ++ + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { + ret = ptrace_attach(child, request, data); + /* + * Some architectures need to do book-keeping after + * a ptrace attach. + */ +- if (!ret) ++ if (!ret) { + arch_ptrace_attach(child); ++ gr_audit_ptrace(child); ++ } + goto out_put_task_struct; + } + +diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c +index 636af6d..8af70ab 100644 +--- a/kernel/rcutiny.c ++++ b/kernel/rcutiny.c +@@ -46,7 +46,7 @@ + struct rcu_ctrlblk; + static void invoke_rcu_callbacks(void); + static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); +-static void rcu_process_callbacks(struct softirq_action *unused); ++static void rcu_process_callbacks(void); + static void __call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *rcu), + struct rcu_ctrlblk *rcp); +@@ -186,7 +186,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) + RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count)); + } + +-static void rcu_process_callbacks(struct softirq_action *unused) ++static void rcu_process_callbacks(void) + { + __rcu_process_callbacks(&rcu_sched_ctrlblk); + __rcu_process_callbacks(&rcu_bh_ctrlblk); +diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c +index 764825c..3aa6ac4 100644 +--- a/kernel/rcutorture.c ++++ b/kernel/rcutorture.c +@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = + { 0 }; + static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = + { 0 }; +-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; +-static atomic_t n_rcu_torture_alloc; +-static atomic_t n_rcu_torture_alloc_fail; +-static atomic_t n_rcu_torture_free; +-static atomic_t n_rcu_torture_mberror; +-static atomic_t n_rcu_torture_error; ++static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; ++static atomic_unchecked_t n_rcu_torture_alloc; ++static atomic_unchecked_t n_rcu_torture_alloc_fail; ++static atomic_unchecked_t n_rcu_torture_free; ++static atomic_unchecked_t n_rcu_torture_mberror; ++static atomic_unchecked_t n_rcu_torture_error; + static long n_rcu_torture_boost_ktrerror; + static long n_rcu_torture_boost_rterror; + static long n_rcu_torture_boost_failure; +@@ -223,11 +223,11 @@ rcu_torture_alloc(void) + + spin_lock_bh(&rcu_torture_lock); + if (list_empty(&rcu_torture_freelist)) { +- atomic_inc(&n_rcu_torture_alloc_fail); ++ atomic_inc_unchecked(&n_rcu_torture_alloc_fail); + spin_unlock_bh(&rcu_torture_lock); + return NULL; + } +- atomic_inc(&n_rcu_torture_alloc); ++ atomic_inc_unchecked(&n_rcu_torture_alloc); + p = rcu_torture_freelist.next; + list_del_init(p); + spin_unlock_bh(&rcu_torture_lock); +@@ -240,7 +240,7 @@ rcu_torture_alloc(void) + static void + rcu_torture_free(struct rcu_torture *p) + { +- atomic_inc(&n_rcu_torture_free); ++ atomic_inc_unchecked(&n_rcu_torture_free); + spin_lock_bh(&rcu_torture_lock); + list_add_tail(&p->rtort_free, &rcu_torture_freelist); + spin_unlock_bh(&rcu_torture_lock); +@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p) + i = rp->rtort_pipe_count; + if (i > RCU_TORTURE_PIPE_LEN) + i = RCU_TORTURE_PIPE_LEN; +- atomic_inc(&rcu_torture_wcount[i]); ++ atomic_inc_unchecked(&rcu_torture_wcount[i]); + if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { + rp->rtort_mbtest = 0; + rcu_torture_free(rp); +@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p) + i = rp->rtort_pipe_count; + if (i > RCU_TORTURE_PIPE_LEN) + i = RCU_TORTURE_PIPE_LEN; +- atomic_inc(&rcu_torture_wcount[i]); ++ atomic_inc_unchecked(&rcu_torture_wcount[i]); + if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { + rp->rtort_mbtest = 0; + list_del(&rp->rtort_free); +@@ -872,7 +872,7 @@ rcu_torture_writer(void *arg) + i = old_rp->rtort_pipe_count; + if (i > RCU_TORTURE_PIPE_LEN) + i = RCU_TORTURE_PIPE_LEN; +- atomic_inc(&rcu_torture_wcount[i]); ++ atomic_inc_unchecked(&rcu_torture_wcount[i]); + old_rp->rtort_pipe_count++; + cur_ops->deferred_free(old_rp); + } +@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused) + return; + } + if (p->rtort_mbtest == 0) +- atomic_inc(&n_rcu_torture_mberror); ++ atomic_inc_unchecked(&n_rcu_torture_mberror); + spin_lock(&rand_lock); + cur_ops->read_delay(&rand); + n_rcu_torture_timers++; +@@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg) + continue; + } + if (p->rtort_mbtest == 0) +- atomic_inc(&n_rcu_torture_mberror); ++ atomic_inc_unchecked(&n_rcu_torture_mberror); + cur_ops->read_delay(&rand); + preempt_disable(); + pipe_count = p->rtort_pipe_count; +@@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page) + rcu_torture_current, + rcu_torture_current_version, + list_empty(&rcu_torture_freelist), +- atomic_read(&n_rcu_torture_alloc), +- atomic_read(&n_rcu_torture_alloc_fail), +- atomic_read(&n_rcu_torture_free), +- atomic_read(&n_rcu_torture_mberror), ++ atomic_read_unchecked(&n_rcu_torture_alloc), ++ atomic_read_unchecked(&n_rcu_torture_alloc_fail), ++ atomic_read_unchecked(&n_rcu_torture_free), ++ atomic_read_unchecked(&n_rcu_torture_mberror), + n_rcu_torture_boost_ktrerror, + n_rcu_torture_boost_rterror, + n_rcu_torture_boost_failure, + n_rcu_torture_boosts, + n_rcu_torture_timers); +- if (atomic_read(&n_rcu_torture_mberror) != 0 || ++ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 || + n_rcu_torture_boost_ktrerror != 0 || + n_rcu_torture_boost_rterror != 0 || + n_rcu_torture_boost_failure != 0) +@@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page) + cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); + if (i > 1) { + cnt += sprintf(&page[cnt], "!!! "); +- atomic_inc(&n_rcu_torture_error); ++ atomic_inc_unchecked(&n_rcu_torture_error); + WARN_ON_ONCE(1); + } + cnt += sprintf(&page[cnt], "Reader Pipe: "); +@@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page) + cnt += sprintf(&page[cnt], "Free-Block Circulation: "); + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { + cnt += sprintf(&page[cnt], " %d", +- atomic_read(&rcu_torture_wcount[i])); ++ atomic_read_unchecked(&rcu_torture_wcount[i])); + } + cnt += sprintf(&page[cnt], "\n"); + if (cur_ops->stats) +@@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void) + + if (cur_ops->cleanup) + cur_ops->cleanup(); +- if (atomic_read(&n_rcu_torture_error)) ++ if (atomic_read_unchecked(&n_rcu_torture_error)) + rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); + else + rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); +@@ -1465,17 +1465,17 @@ rcu_torture_init(void) + + rcu_torture_current = NULL; + rcu_torture_current_version = 0; +- atomic_set(&n_rcu_torture_alloc, 0); +- atomic_set(&n_rcu_torture_alloc_fail, 0); +- atomic_set(&n_rcu_torture_free, 0); +- atomic_set(&n_rcu_torture_mberror, 0); +- atomic_set(&n_rcu_torture_error, 0); ++ atomic_set_unchecked(&n_rcu_torture_alloc, 0); ++ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0); ++ atomic_set_unchecked(&n_rcu_torture_free, 0); ++ atomic_set_unchecked(&n_rcu_torture_mberror, 0); ++ atomic_set_unchecked(&n_rcu_torture_error, 0); + n_rcu_torture_boost_ktrerror = 0; + n_rcu_torture_boost_rterror = 0; + n_rcu_torture_boost_failure = 0; + n_rcu_torture_boosts = 0; + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) +- atomic_set(&rcu_torture_wcount[i], 0); ++ atomic_set_unchecked(&rcu_torture_wcount[i], 0); + for_each_possible_cpu(cpu) { + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { + per_cpu(rcu_torture_count, cpu)[i] = 0; +diff --git a/kernel/rcutree.c b/kernel/rcutree.c +index 6b76d81..7afc1b3 100644 +--- a/kernel/rcutree.c ++++ b/kernel/rcutree.c +@@ -367,9 +367,9 @@ void rcu_enter_nohz(void) + trace_rcu_dyntick("Start"); + /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ + smp_mb__before_atomic_inc(); /* See above. */ +- atomic_inc(&rdtp->dynticks); ++ atomic_inc_unchecked(&rdtp->dynticks); + smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ +- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); ++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1); + local_irq_restore(flags); + } + +@@ -391,10 +391,10 @@ void rcu_exit_nohz(void) + return; + } + smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ +- atomic_inc(&rdtp->dynticks); ++ atomic_inc_unchecked(&rdtp->dynticks); + /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ + smp_mb__after_atomic_inc(); /* See above. */ +- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); ++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)); + trace_rcu_dyntick("End"); + local_irq_restore(flags); + } +@@ -411,14 +411,14 @@ void rcu_nmi_enter(void) + struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); + + if (rdtp->dynticks_nmi_nesting == 0 && +- (atomic_read(&rdtp->dynticks) & 0x1)) ++ (atomic_read_unchecked(&rdtp->dynticks) & 0x1)) + return; + rdtp->dynticks_nmi_nesting++; + smp_mb__before_atomic_inc(); /* Force delay from prior write. */ +- atomic_inc(&rdtp->dynticks); ++ atomic_inc_unchecked(&rdtp->dynticks); + /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ + smp_mb__after_atomic_inc(); /* See above. */ +- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); ++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)); + } + + /** +@@ -437,9 +437,9 @@ void rcu_nmi_exit(void) + return; + /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ + smp_mb__before_atomic_inc(); /* See above. */ +- atomic_inc(&rdtp->dynticks); ++ atomic_inc_unchecked(&rdtp->dynticks); + smp_mb__after_atomic_inc(); /* Force delay to next write. */ +- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); ++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1); + } + + /** +@@ -474,7 +474,7 @@ void rcu_irq_exit(void) + */ + static int dyntick_save_progress_counter(struct rcu_data *rdp) + { +- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); ++ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks); + return 0; + } + +@@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) + unsigned int curr; + unsigned int snap; + +- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); ++ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks); + snap = (unsigned int)rdp->dynticks_snap; + + /* +@@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) + /* + * Do RCU core processing for the current CPU. + */ +-static void rcu_process_callbacks(struct softirq_action *unused) ++static void rcu_process_callbacks(void) + { + trace_rcu_utilization("Start RCU core"); + __rcu_process_callbacks(&rcu_sched_state, +diff --git a/kernel/rcutree.h b/kernel/rcutree.h +index 849ce9e..74bc9de 100644 +--- a/kernel/rcutree.h ++++ b/kernel/rcutree.h +@@ -86,7 +86,7 @@ + struct rcu_dynticks { + int dynticks_nesting; /* Track irq/process nesting level. */ + int dynticks_nmi_nesting; /* Track NMI nesting level. */ +- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */ ++ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */ + }; + + /* RCU's kthread states for tracing. */ +diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h +index 4b9b9f8..2326053 100644 +--- a/kernel/rcutree_plugin.h ++++ b/kernel/rcutree_plugin.h +@@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void) + + /* Clean up and exit. */ + smp_mb(); /* ensure expedited GP seen before counter increment. */ +- ACCESS_ONCE(sync_rcu_preempt_exp_count)++; ++ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++; + unlock_mb_ret: + mutex_unlock(&sync_rcu_preempt_exp_mutex); + mb_ret: +@@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited); + + #else /* #ifndef CONFIG_SMP */ + +-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0); +-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0); ++static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0); ++static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0); + + static int synchronize_sched_expedited_cpu_stop(void *data) + { +@@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void) + int firstsnap, s, snap, trycount = 0; + + /* Note that atomic_inc_return() implies full memory barrier. */ +- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); ++ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started); + get_online_cpus(); + + /* +@@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void) + } + + /* Check to see if someone else did our work for us. */ +- s = atomic_read(&sync_sched_expedited_done); ++ s = atomic_read_unchecked(&sync_sched_expedited_done); + if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) { + smp_mb(); /* ensure test happens before caller kfree */ + return; +@@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void) + * grace period works for us. + */ + get_online_cpus(); +- snap = atomic_read(&sync_sched_expedited_started) - 1; ++ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1; + smp_mb(); /* ensure read is before try_stop_cpus(). */ + } + +@@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void) + * than we did beat us to the punch. + */ + do { +- s = atomic_read(&sync_sched_expedited_done); ++ s = atomic_read_unchecked(&sync_sched_expedited_done); + if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) { + smp_mb(); /* ensure test happens before caller kfree */ + break; + } +- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s); ++ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s); + + put_online_cpus(); + } +@@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu) + for_each_online_cpu(thatcpu) { + if (thatcpu == cpu) + continue; +- snap = atomic_add_return(0, &per_cpu(rcu_dynticks, ++ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks, + thatcpu).dynticks); + smp_mb(); /* Order sampling of snap with end of grace period. */ + if ((snap & 0x1) != 0) { +diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c +index 9feffa4..54058df 100644 +--- a/kernel/rcutree_trace.c ++++ b/kernel/rcutree_trace.c +@@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) + rdp->qs_pending); + #ifdef CONFIG_NO_HZ + seq_printf(m, " dt=%d/%d/%d df=%lu", +- atomic_read(&rdp->dynticks->dynticks), ++ atomic_read_unchecked(&rdp->dynticks->dynticks), + rdp->dynticks->dynticks_nesting, + rdp->dynticks->dynticks_nmi_nesting, + rdp->dynticks_fqs); +@@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) + rdp->qs_pending); + #ifdef CONFIG_NO_HZ + seq_printf(m, ",%d,%d,%d,%lu", +- atomic_read(&rdp->dynticks->dynticks), ++ atomic_read_unchecked(&rdp->dynticks->dynticks), + rdp->dynticks->dynticks_nesting, + rdp->dynticks->dynticks_nmi_nesting, + rdp->dynticks_fqs); +diff --git a/kernel/resource.c b/kernel/resource.c +index 7640b3a..5879283 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = { + + static int __init ioresources_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations); ++ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations); ++ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations); ++#endif ++#else + proc_create("ioports", 0, NULL, &proc_ioports_operations); + proc_create("iomem", 0, NULL, &proc_iomem_operations); ++#endif + return 0; + } + __initcall(ioresources_init); +diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c +index 3d9f31c..7fefc9e 100644 +--- a/kernel/rtmutex-tester.c ++++ b/kernel/rtmutex-tester.c +@@ -20,7 +20,7 @@ + #define MAX_RT_TEST_MUTEXES 8 + + static spinlock_t rttest_lock; +-static atomic_t rttest_event; ++static atomic_unchecked_t rttest_event; + + struct test_thread_data { + int opcode; +@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + + case RTTEST_LOCKCONT: + td->mutexes[td->opdata] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + return 0; + + case RTTEST_RESET: +@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + return 0; + + case RTTEST_RESETEVENT: +- atomic_set(&rttest_event, 0); ++ atomic_set_unchecked(&rttest_event, 0); + return 0; + + default: +@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + return ret; + + td->mutexes[id] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + rt_mutex_lock(&mutexes[id]); +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + td->mutexes[id] = 4; + return 0; + +@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + return ret; + + td->mutexes[id] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + ret = rt_mutex_lock_interruptible(&mutexes[id], 0); +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + td->mutexes[id] = ret ? 0 : 4; + return ret ? -EINTR : 0; + +@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4) + return ret; + +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + rt_mutex_unlock(&mutexes[id]); +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + td->mutexes[id] = 0; + return 0; + +@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) + break; + + td->mutexes[dat] = 2; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + break; + + default: +@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) + return; + + td->mutexes[dat] = 3; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + break; + + case RTTEST_LOCKNOWAIT: +@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) + return; + + td->mutexes[dat] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + return; + + default: +diff --git a/kernel/sched.c b/kernel/sched.c +index d6b149c..896cbb8 100644 +--- a/kernel/sched.c ++++ b/kernel/sched.c +@@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq) + BUG(); /* the idle class will always have a runnable task */ + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++static inline void gr_cred_schedule(void) ++{ ++ if (unlikely(current->delayed_cred)) ++ gr_delayed_cred_worker(); ++} ++#else ++static inline void gr_cred_schedule(void) ++{ ++} ++#endif ++ + /* + * __schedule() is the main scheduler function. + */ +@@ -4408,6 +4421,8 @@ need_resched: + + schedule_debug(prev); + ++ gr_cred_schedule(); ++ + if (sched_feat(HRTICK)) + hrtick_clear(rq); + +@@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice) + /* convert nice value [19,-20] to rlimit style value [1,40] */ + int nice_rlim = 20 - nice; + ++ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1); ++ + return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || + capable(CAP_SYS_NICE)); + } +@@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment) + if (nice > 19) + nice = 19; + +- if (increment < 0 && !can_nice(current, nice)) ++ if (increment < 0 && (!can_nice(current, nice) || ++ gr_handle_chroot_nice())) + return -EPERM; + + retval = security_task_setnice(current, nice); +@@ -5288,6 +5306,7 @@ recheck: + unsigned long rlim_rtprio = + task_rlimit(p, RLIMIT_RTPRIO); + ++ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1); + /* can't set/change the rt policy */ + if (policy != p->policy && !rlim_rtprio) + return -EPERM; +diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c +index 429242f..d7cca82 100644 +--- a/kernel/sched_autogroup.c ++++ b/kernel/sched_autogroup.c +@@ -7,7 +7,7 @@ + + unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; + static struct autogroup autogroup_default; +-static atomic_t autogroup_seq_nr; ++static atomic_unchecked_t autogroup_seq_nr; + + static void __init autogroup_init(struct task_struct *init_task) + { +@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void) + + kref_init(&ag->kref); + init_rwsem(&ag->lock); +- ag->id = atomic_inc_return(&autogroup_seq_nr); ++ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr); + ag->tg = tg; + #ifdef CONFIG_RT_GROUP_SCHED + /* +diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c +index 8a39fa3..34f3dbc 100644 +--- a/kernel/sched_fair.c ++++ b/kernel/sched_fair.c +@@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } + * run_rebalance_domains is triggered when needed from the scheduler tick. + * Also triggered for nohz idle balancing (with nohz_balancing_kick set). + */ +-static void run_rebalance_domains(struct softirq_action *h) ++static void run_rebalance_domains(void) + { + int this_cpu = smp_processor_id(); + struct rq *this_rq = cpu_rq(this_cpu); +diff --git a/kernel/signal.c b/kernel/signal.c +index 2065515..aed2987 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep; + + int print_fatal_signals __read_mostly; + +-static void __user *sig_handler(struct task_struct *t, int sig) ++static __sighandler_t sig_handler(struct task_struct *t, int sig) + { + return t->sighand->action[sig - 1].sa.sa_handler; + } + +-static int sig_handler_ignored(void __user *handler, int sig) ++static int sig_handler_ignored(__sighandler_t handler, int sig) + { + /* Is it explicitly or implicitly ignored? */ + return handler == SIG_IGN || +@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig) + static int sig_task_ignored(struct task_struct *t, int sig, + int from_ancestor_ns) + { +- void __user *handler; ++ __sighandler_t handler; + + handler = sig_handler(t, sig); + +@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi + atomic_inc(&user->sigpending); + rcu_read_unlock(); + ++ if (!override_rlimit) ++ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1); ++ + if (override_rlimit || + atomic_read(&user->sigpending) <= + task_rlimit(t, RLIMIT_SIGPENDING)) { +@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default) + + int unhandled_signal(struct task_struct *tsk, int sig) + { +- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; ++ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler; + if (is_global_init(tsk)) + return 1; + if (handler != SIG_IGN && handler != SIG_DFL) +@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info, + } + } + ++ /* allow glibc communication via tgkill to other threads in our ++ thread group */ ++ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL || ++ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid) ++ && gr_handle_signal(t, sig)) ++ return -EPERM; ++ + return security_task_kill(t, info, sig, 0); + } + +@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) + return send_signal(sig, info, p, 1); + } + +-static int ++int + specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) + { + return send_signal(sig, info, t, 0); +@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + unsigned long int flags; + int ret, blocked, ignored; + struct k_sigaction *action; ++ int is_unhandled = 0; + + spin_lock_irqsave(&t->sighand->siglock, flags); + action = &t->sighand->action[sig-1]; +@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + } + if (action->sa.sa_handler == SIG_DFL) + t->signal->flags &= ~SIGNAL_UNKILLABLE; ++ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL) ++ is_unhandled = 1; + ret = specific_send_sig_info(sig, info, t); + spin_unlock_irqrestore(&t->sighand->siglock, flags); + ++ /* only deal with unhandled signals, java etc trigger SIGSEGV during ++ normal operation */ ++ if (is_unhandled) { ++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t); ++ gr_handle_crash(t, sig); ++ } ++ + return ret; + } + +@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) + ret = check_kill_permission(sig, info, p); + rcu_read_unlock(); + +- if (!ret && sig) ++ if (!ret && sig) { + ret = do_send_sig_info(sig, info, p, true); ++ if (!ret) ++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p); ++ } + + return ret; + } +@@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) + int error = -ESRCH; + + rcu_read_lock(); +- p = find_task_by_vpid(pid); ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ /* allow glibc communication via tgkill to other threads in our ++ thread group */ ++ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL && ++ sig == (SIGRTMIN+1) && tgid == info->si_pid) ++ p = find_task_by_vpid_unrestricted(pid); ++ else ++#endif ++ p = find_task_by_vpid(pid); + if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { + error = check_kill_permission(sig, info, p); + /* +diff --git a/kernel/smp.c b/kernel/smp.c +index db197d6..17aef0b 100644 +--- a/kernel/smp.c ++++ b/kernel/smp.c +@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait) + } + EXPORT_SYMBOL(smp_call_function); + +-void ipi_call_lock(void) ++void ipi_call_lock(void) __acquires(call_function.lock) + { + raw_spin_lock(&call_function.lock); + } + +-void ipi_call_unlock(void) ++void ipi_call_unlock(void) __releases(call_function.lock) + { + raw_spin_unlock(&call_function.lock); + } + +-void ipi_call_lock_irq(void) ++void ipi_call_lock_irq(void) __acquires(call_function.lock) + { + raw_spin_lock_irq(&call_function.lock); + } + +-void ipi_call_unlock_irq(void) ++void ipi_call_unlock_irq(void) __releases(call_function.lock) + { + raw_spin_unlock_irq(&call_function.lock); + } +diff --git a/kernel/softirq.c b/kernel/softirq.c +index 2c71d91..1021f81 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp + + DEFINE_PER_CPU(struct task_struct *, ksoftirqd); + +-char *softirq_to_name[NR_SOFTIRQS] = { ++const char * const softirq_to_name[NR_SOFTIRQS] = { + "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", + "TASKLET", "SCHED", "HRTIMER", "RCU" + }; +@@ -235,7 +235,7 @@ restart: + kstat_incr_softirqs_this_cpu(vec_nr); + + trace_softirq_entry(vec_nr); +- h->action(h); ++ h->action(); + trace_softirq_exit(vec_nr); + if (unlikely(prev_count != preempt_count())) { + printk(KERN_ERR "huh, entered softirq %u %s %p" +@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr) + local_irq_restore(flags); + } + +-void open_softirq(int nr, void (*action)(struct softirq_action *)) ++void open_softirq(int nr, void (*action)(void)) + { +- softirq_vec[nr].action = action; ++ pax_open_kernel(); ++ *(void **)&softirq_vec[nr].action = action; ++ pax_close_kernel(); + } + + /* +@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) + + EXPORT_SYMBOL(__tasklet_hi_schedule_first); + +-static void tasklet_action(struct softirq_action *a) ++static void tasklet_action(void) + { + struct tasklet_struct *list; + +@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a) + } + } + +-static void tasklet_hi_action(struct softirq_action *a) ++static void tasklet_hi_action(void) + { + struct tasklet_struct *list; + +diff --git a/kernel/sys.c b/kernel/sys.c +index 481611f..0754d86 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error) + error = -EACCES; + goto out; + } ++ ++ if (gr_handle_chroot_setpriority(p, niceval)) { ++ error = -EACCES; ++ goto out; ++ } ++ + no_nice = security_task_setnice(p, niceval); + if (no_nice) { + error = no_nice; +@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) + goto error; + } + ++ if (gr_check_group_change(new->gid, new->egid, -1)) ++ goto error; ++ + if (rgid != (gid_t) -1 || + (egid != (gid_t) -1 && egid != old->gid)) + new->sgid = new->egid; +@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid) + old = current_cred(); + + retval = -EPERM; ++ ++ if (gr_check_group_change(gid, gid, gid)) ++ goto error; ++ + if (nsown_capable(CAP_SETGID)) + new->gid = new->egid = new->sgid = new->fsgid = gid; + else if (gid == old->gid || gid == old->sgid) +@@ -618,7 +631,7 @@ error: + /* + * change the user struct in a credentials set to match the new UID + */ +-static int set_user(struct cred *new) ++int set_user(struct cred *new) + { + struct user_struct *new_user; + +@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) + goto error; + } + ++ if (gr_check_user_change(new->uid, new->euid, -1)) ++ goto error; ++ + if (new->uid != old->uid) { + retval = set_user(new); + if (retval < 0) +@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid) + old = current_cred(); + + retval = -EPERM; ++ ++ if (gr_check_crash_uid(uid)) ++ goto error; ++ if (gr_check_user_change(uid, uid, uid)) ++ goto error; ++ + if (nsown_capable(CAP_SETUID)) { + new->suid = new->uid = uid; + if (uid != old->uid) { +@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) + goto error; + } + ++ if (gr_check_user_change(ruid, euid, -1)) ++ goto error; ++ + if (ruid != (uid_t) -1) { + new->uid = ruid; + if (ruid != old->uid) { +@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) + goto error; + } + ++ if (gr_check_group_change(rgid, egid, -1)) ++ goto error; ++ + if (rgid != (gid_t) -1) + new->gid = rgid; + if (egid != (gid_t) -1) +@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) + old = current_cred(); + old_fsuid = old->fsuid; + ++ if (gr_check_user_change(-1, -1, uid)) ++ goto error; ++ + if (uid == old->uid || uid == old->euid || + uid == old->suid || uid == old->fsuid || + nsown_capable(CAP_SETUID)) { +@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) + } + } + ++error: + abort_creds(new); + return old_fsuid; + +@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid) + if (gid == old->gid || gid == old->egid || + gid == old->sgid || gid == old->fsgid || + nsown_capable(CAP_SETGID)) { ++ if (gr_check_group_change(-1, -1, gid)) ++ goto error; ++ + if (gid != old_fsgid) { + new->fsgid = gid; + goto change_okay; + } + } + ++error: + abort_creds(new); + return old_fsgid; + +@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len) + } + v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; + snprintf(buf, len, "2.6.%u%s", v, rest); +- ret = copy_to_user(release, buf, len); ++ if (len > sizeof(buf)) ++ ret = -EFAULT; ++ else ++ ret = copy_to_user(release, buf, len); + } + return ret; + } +@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) + return -EFAULT; + + down_read(&uts_sem); +- error = __copy_to_user(&name->sysname, &utsname()->sysname, ++ error = __copy_to_user(name->sysname, &utsname()->sysname, + __OLD_UTS_LEN); + error |= __put_user(0, name->sysname + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->nodename, &utsname()->nodename, ++ error |= __copy_to_user(name->nodename, &utsname()->nodename, + __OLD_UTS_LEN); + error |= __put_user(0, name->nodename + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->release, &utsname()->release, ++ error |= __copy_to_user(name->release, &utsname()->release, + __OLD_UTS_LEN); + error |= __put_user(0, name->release + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->version, &utsname()->version, ++ error |= __copy_to_user(name->version, &utsname()->version, + __OLD_UTS_LEN); + error |= __put_user(0, name->version + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->machine, &utsname()->machine, ++ error |= __copy_to_user(name->machine, &utsname()->machine, + __OLD_UTS_LEN); + error |= __put_user(0, name->machine + __OLD_UTS_LEN); + up_read(&uts_sem); +@@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, + error = get_dumpable(me->mm); + break; + case PR_SET_DUMPABLE: +- if (arg2 < 0 || arg2 > 1) { ++ if (arg2 > 1) { + error = -EINVAL; + break; + } +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index ae27196..7506d69 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -86,6 +86,13 @@ + + + #if defined(CONFIG_SYSCTL) ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++extern __u32 gr_handle_sysctl(const ctl_table *table, const int op); ++extern int gr_handle_sysctl_mod(const char *dirname, const char *name, ++ const int op); ++extern int gr_handle_chroot_sysctl(const int op); + + /* External variables not in a header file. */ + extern int sysctl_overcommit_memory; +@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write, + } + + #endif ++extern struct ctl_table grsecurity_table[]; + + static struct ctl_table root_table[]; + static struct ctl_table_root sysctl_table_root; +@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[]; + int sysctl_legacy_va_layout; + #endif + ++#ifdef CONFIG_PAX_SOFTMODE ++static ctl_table pax_table[] = { ++ { ++ .procname = "softmode", ++ .data = &pax_softmode, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ ++ { } ++}; ++#endif ++ + /* The default sysctl tables: */ + + static struct ctl_table root_table[] = { +@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000; + #endif + + static struct ctl_table kern_table[] = { ++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) ++ { ++ .procname = "grsecurity", ++ .mode = 0500, ++ .child = grsecurity_table, ++ }, ++#endif ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ { ++ .procname = "pax", ++ .mode = 0500, ++ .child = pax_table, ++ }, ++#endif ++ + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = { + .data = &modprobe_path, + .maxlen = KMOD_PATH_LEN, + .mode = 0644, +- .proc_handler = proc_dostring, ++ .proc_handler = proc_dostring_modpriv, + }, + { + .procname = "modules_disabled", +@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = { + .extra1 = &zero, + .extra2 = &one, + }, ++#endif + { + .procname = "kptr_restrict", + .data = &kptr_restrict, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dmesg_restrict, ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ .extra1 = &two, ++#else + .extra1 = &zero, ++#endif + .extra2 = &two, + }, +-#endif + { + .procname = "ngroups_max", + .data = &ngroups_max, +@@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = { + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + }, ++ { ++ .procname = "heap_stack_gap", ++ .data = &sysctl_heap_stack_gap, ++ .maxlen = sizeof(sysctl_heap_stack_gap), ++ .mode = 0644, ++ .proc_handler = proc_doulongvec_minmax, ++ }, + #else + { + .procname = "nr_trim_pages", +@@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op) + int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op) + { + int mode; ++ int error; ++ ++ if (table->parent != NULL && table->parent->procname != NULL && ++ table->procname != NULL && ++ gr_handle_sysctl_mod(table->parent->procname, table->procname, op)) ++ return -EACCES; ++ if (gr_handle_chroot_sysctl(op)) ++ return -EACCES; ++ error = gr_handle_sysctl(table, op); ++ if (error) ++ return error; + + if (root->permissions) + mode = root->permissions(root, current->nsproxy, table); +@@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write, + buffer, lenp, ppos); + } + ++int proc_dostring_modpriv(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ if (write && !capable(CAP_SYS_MODULE)) ++ return -EPERM; ++ ++ return _proc_do_string(table->data, table->maxlen, write, ++ buffer, lenp, ppos); ++} ++ + static size_t proc_skip_spaces(char **buf) + { + size_t ret; +@@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val, + len = strlen(tmp); + if (len > *size) + len = *size; ++ if (len > sizeof(tmp)) ++ len = sizeof(tmp); + if (copy_to_user(*buf, tmp, len)) + return -EFAULT; + *size -= len; +@@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int + *i = val; + } else { + val = convdiv * (*i) / convmul; +- if (!first) ++ if (!first) { + err = proc_put_char(&buffer, &left, '\t'); ++ if (err) ++ break; ++ } + err = proc_put_long(&buffer, &left, val, false); + if (err) + break; +@@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write, + return -ENOSYS; + } + ++int proc_dostring_modpriv(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ return -ENOSYS; ++} ++ + int proc_dointvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +@@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax); + EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); + EXPORT_SYMBOL(proc_dointvec_ms_jiffies); + EXPORT_SYMBOL(proc_dostring); ++EXPORT_SYMBOL(proc_dostring_modpriv); + EXPORT_SYMBOL(proc_doulongvec_minmax); + EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax); + EXPORT_SYMBOL(register_sysctl_table); +diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c +index a650694..aaeeb20 100644 +--- a/kernel/sysctl_binary.c ++++ b/kernel/sysctl_binary.c +@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file, + int i; + + set_fs(KERNEL_DS); +- result = vfs_read(file, buffer, BUFSZ - 1, &pos); ++ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos); + set_fs(old_fs); + if (result < 0) + goto out_kfree; +@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file, + } + + set_fs(KERNEL_DS); +- result = vfs_write(file, buffer, str - buffer, &pos); ++ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos); + set_fs(old_fs); + if (result < 0) + goto out_kfree; +@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file, + int i; + + set_fs(KERNEL_DS); +- result = vfs_read(file, buffer, BUFSZ - 1, &pos); ++ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos); + set_fs(old_fs); + if (result < 0) + goto out_kfree; +@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file, + } + + set_fs(KERNEL_DS); +- result = vfs_write(file, buffer, str - buffer, &pos); ++ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos); + set_fs(old_fs); + if (result < 0) + goto out_kfree; +@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file, + int i; + + set_fs(KERNEL_DS); +- result = vfs_read(file, buf, sizeof(buf) - 1, &pos); ++ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos); + set_fs(old_fs); + if (result < 0) + goto out; +@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file, + __le16 dnaddr; + + set_fs(KERNEL_DS); +- result = vfs_read(file, buf, sizeof(buf) - 1, &pos); ++ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos); + set_fs(old_fs); + if (result < 0) + goto out; +@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file, + le16_to_cpu(dnaddr) & 0x3ff); + + set_fs(KERNEL_DS); +- result = vfs_write(file, buf, len, &pos); ++ result = vfs_write(file, (const char __force_user *)buf, len, &pos); + set_fs(old_fs); + if (result < 0) + goto out; +diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c +index 362da65..ab8ef8c 100644 +--- a/kernel/sysctl_check.c ++++ b/kernel/sysctl_check.c +@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) + set_fail(&fail, table, "Directory with extra2"); + } else { + if ((table->proc_handler == proc_dostring) || ++ (table->proc_handler == proc_dostring_modpriv) || + (table->proc_handler == proc_dointvec) || + (table->proc_handler == proc_dointvec_minmax) || + (table->proc_handler == proc_dointvec_jiffies) || +diff --git a/kernel/taskstats.c b/kernel/taskstats.c +index e660464..c8b9e67 100644 +--- a/kernel/taskstats.c ++++ b/kernel/taskstats.c +@@ -27,9 +27,12 @@ + #include <linux/cgroup.h> + #include <linux/fs.h> + #include <linux/file.h> ++#include <linux/grsecurity.h> + #include <net/genetlink.h> + #include <linux/atomic.h> + ++extern int gr_is_taskstats_denied(int pid); ++ + /* + * Maximum length of a cpumask that can be specified in + * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute +@@ -556,6 +559,9 @@ err: + + static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) + { ++ if (gr_is_taskstats_denied(current->pid)) ++ return -EACCES; ++ + if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK]) + return cmd_attr_register_cpumask(info); + else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK]) +diff --git a/kernel/time.c b/kernel/time.c +index 73e416d..cfc6f69 100644 +--- a/kernel/time.c ++++ b/kernel/time.c +@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) + return error; + + if (tz) { ++ /* we log in do_settimeofday called below, so don't log twice ++ */ ++ if (!tv) ++ gr_log_timechange(); ++ + /* SMP safe, global irq locking makes it work. */ + sys_tz = *tz; + update_vsyscall_tz(); +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c +index 8a46f5d..bbe6f9c 100644 +--- a/kernel/time/alarmtimer.c ++++ b/kernel/time/alarmtimer.c +@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void) + struct platform_device *pdev; + int error = 0; + int i; +- struct k_clock alarm_clock = { ++ static struct k_clock alarm_clock = { + .clock_getres = alarm_clock_getres, + .clock_get = alarm_clock_get, + .timer_create = alarm_timer_create, +diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c +index fd4a7b1..fae5c2a 100644 +--- a/kernel/time/tick-broadcast.c ++++ b/kernel/time/tick-broadcast.c +@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) + * then clear the broadcast bit. + */ + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { +- int cpu = smp_processor_id(); ++ cpu = smp_processor_id(); + + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); + tick_broadcast_clear_oneshot(cpu); +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c +index 2378413..be455fd 100644 +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -14,6 +14,7 @@ + #include <linux/init.h> + #include <linux/mm.h> + #include <linux/sched.h> ++#include <linux/grsecurity.h> + #include <linux/syscore_ops.h> + #include <linux/clocksource.h> + #include <linux/jiffies.h> +@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv) + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + ++ gr_log_timechange(); ++ + write_seqlock_irqsave(&xtime_lock, flags); + + timekeeping_forward_now(); +diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c +index 3258455..f35227d 100644 +--- a/kernel/time/timer_list.c ++++ b/kernel/time/timer_list.c +@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); + + static void print_name_offset(struct seq_file *m, void *sym) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ SEQ_printf(m, "<%p>", NULL); ++#else + char symname[KSYM_NAME_LEN]; + + if (lookup_symbol_name((unsigned long)sym, symname) < 0) + SEQ_printf(m, "<%pK>", sym); + else + SEQ_printf(m, "%s", symname); ++#endif + } + + static void +@@ -112,7 +116,11 @@ next_one: + static void + print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ SEQ_printf(m, " .base: %p\n", NULL); ++#else + SEQ_printf(m, " .base: %pK\n", base); ++#endif + SEQ_printf(m, " .index: %d\n", + base->index); + SEQ_printf(m, " .resolution: %Lu nsecs\n", +@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void) + { + struct proc_dir_entry *pe; + ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops); ++#else + pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); ++#endif + if (!pe) + return -ENOMEM; + return 0; +diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c +index 0b537f2..9e71eca 100644 +--- a/kernel/time/timer_stats.c ++++ b/kernel/time/timer_stats.c +@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop; + static unsigned long nr_entries; + static struct entry entries[MAX_ENTRIES]; + +-static atomic_t overflow_count; ++static atomic_unchecked_t overflow_count; + + /* + * The entries are in a hash-table, for fast lookup: +@@ -140,7 +140,7 @@ static void reset_entries(void) + nr_entries = 0; + memset(entries, 0, sizeof(entries)); + memset(tstat_hash_table, 0, sizeof(tstat_hash_table)); +- atomic_set(&overflow_count, 0); ++ atomic_set_unchecked(&overflow_count, 0); + } + + static struct entry *alloc_entry(void) +@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, + if (likely(entry)) + entry->count++; + else +- atomic_inc(&overflow_count); ++ atomic_inc_unchecked(&overflow_count); + + out_unlock: + raw_spin_unlock_irqrestore(lock, flags); +@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, + + static void print_name_offset(struct seq_file *m, unsigned long addr) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(m, "<%p>", NULL); ++#else + char symname[KSYM_NAME_LEN]; + + if (lookup_symbol_name(addr, symname) < 0) + seq_printf(m, "<%p>", (void *)addr); + else + seq_printf(m, "%s", symname); ++#endif + } + + static int tstats_show(struct seq_file *m, void *v) +@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v) + + seq_puts(m, "Timer Stats Version: v0.2\n"); + seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); +- if (atomic_read(&overflow_count)) ++ if (atomic_read_unchecked(&overflow_count)) + seq_printf(m, "Overflow: %d entries\n", +- atomic_read(&overflow_count)); ++ atomic_read_unchecked(&overflow_count)); + + for (i = 0; i < nr_entries; i++) { + entry = entries + i; +@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void) + { + struct proc_dir_entry *pe; + ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops); ++#else + pe = proc_create("timer_stats", 0644, NULL, &tstats_fops); ++#endif + if (!pe) + return -ENOMEM; + return 0; +diff --git a/kernel/timer.c b/kernel/timer.c +index 9c3c62b..441690e 100644 +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick) + /* + * This function runs timers and the timer-tq in bottom half context. + */ +-static void run_timer_softirq(struct softirq_action *h) ++static void run_timer_softirq(void) + { + struct tvec_base *base = __this_cpu_read(tvec_bases); + +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c +index 16fc34a..efd8bb8 100644 +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, + struct blk_trace *bt = filp->private_data; + char buf[16]; + +- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); ++ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped)); + + return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + } +@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, + return 1; + + bt = buf->chan->private_data; +- atomic_inc(&bt->dropped); ++ atomic_inc_unchecked(&bt->dropped); + return 0; + } + +@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + + bt->dir = dir; + bt->dev = dev; +- atomic_set(&bt->dropped, 0); ++ atomic_set_unchecked(&bt->dropped, 0); + + ret = -EIO; + bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 25b4f4d..6f4772d 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) + if (unlikely(ftrace_disabled)) + return 0; + ++ ret = ftrace_arch_code_modify_prepare(); ++ FTRACE_WARN_ON(ret); ++ if (ret) ++ return 0; ++ + ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); ++ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process()); + if (ret) { + ftrace_bug(ret, ip); +- return 0; + } +- return 1; ++ return ret ? 0 : 1; + } + + /* +@@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp) + + int + register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, +- void *data) ++ void *data) + { + struct ftrace_func_probe *entry; + struct ftrace_page *pg; +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index f2bd275..adaf3a2 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = { + }; + #endif + +-static struct dentry *d_tracer; +- + struct dentry *tracing_init_dentry(void) + { ++ static struct dentry *d_tracer; + static int once; + + if (d_tracer) +@@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void) + return d_tracer; + } + +-static struct dentry *d_percpu; +- + struct dentry *tracing_dentry_percpu(void) + { ++ static struct dentry *d_percpu; + static int once; + struct dentry *d_tracer; + +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index c212a7f..7b02394 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list); + struct ftrace_module_file_ops { + struct list_head list; + struct module *mod; +- struct file_operations id; +- struct file_operations enable; +- struct file_operations format; +- struct file_operations filter; + }; + + static struct ftrace_module_file_ops * +@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod) + + file_ops->mod = mod; + +- file_ops->id = ftrace_event_id_fops; +- file_ops->id.owner = mod; +- +- file_ops->enable = ftrace_enable_fops; +- file_ops->enable.owner = mod; +- +- file_ops->filter = ftrace_event_filter_fops; +- file_ops->filter.owner = mod; +- +- file_ops->format = ftrace_event_format_fops; +- file_ops->format.owner = mod; ++ pax_open_kernel(); ++ *(void **)&mod->trace_id.owner = mod; ++ *(void **)&mod->trace_enable.owner = mod; ++ *(void **)&mod->trace_filter.owner = mod; ++ *(void **)&mod->trace_format.owner = mod; ++ pax_close_kernel(); + + list_add(&file_ops->list, &ftrace_module_file_list); + +@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod) + + for_each_event(call, start, end) { + __trace_add_event_call(*call, mod, +- &file_ops->id, &file_ops->enable, +- &file_ops->filter, &file_ops->format); ++ &mod->trace_id, &mod->trace_enable, ++ &mod->trace_filter, &mod->trace_format); + } + } + +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 00d527c..7c5b1a3 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, + long ret; + int maxlen = get_rloc_len(*(u32 *)dest); + u8 *dst = get_rloc_data(dest); +- u8 *src = addr; ++ const u8 __user *src = (const u8 __force_user *)addr; + mm_segment_t old_fs = get_fs(); + if (!maxlen) + return; +@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, + pagefault_disable(); + do + ret = __copy_from_user_inatomic(dst++, src++, 1); +- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen); ++ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen); + dst[-1] = '\0'; + pagefault_enable(); + set_fs(old_fs); +@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, + ((u8 *)get_rloc_data(dest))[0] = '\0'; + *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); + } else +- *(u32 *)dest = make_data_rloc(src - (u8 *)addr, ++ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr, + get_rloc_offs(*(u32 *)dest)); + } + /* Return the length of string -- including null terminal byte */ +@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, + set_fs(KERNEL_DS); + pagefault_disable(); + do { +- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); ++ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1); + len++; + } while (c && ret == 0 && len < MAX_STRING_SIZE); + pagefault_enable(); +diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c +index fd3c8aa..5f324a6 100644 +--- a/kernel/trace/trace_mmiotrace.c ++++ b/kernel/trace/trace_mmiotrace.c +@@ -24,7 +24,7 @@ struct header_iter { + static struct trace_array *mmio_trace_array; + static bool overrun_detected; + static unsigned long prev_overruns; +-static atomic_t dropped_count; ++static atomic_unchecked_t dropped_count; + + static void mmio_reset_data(struct trace_array *tr) + { +@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter) + + static unsigned long count_overruns(struct trace_iterator *iter) + { +- unsigned long cnt = atomic_xchg(&dropped_count, 0); ++ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0); + unsigned long over = ring_buffer_overruns(iter->tr->buffer); + + if (over > prev_overruns) +@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, + event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, + sizeof(*entry), 0, pc); + if (!event) { +- atomic_inc(&dropped_count); ++ atomic_inc_unchecked(&dropped_count); + return; + } + entry = ring_buffer_event_data(event); +@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, + event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, + sizeof(*entry), 0, pc); + if (!event) { +- atomic_inc(&dropped_count); ++ atomic_inc_unchecked(&dropped_count); + return; + } + entry = ring_buffer_event_data(event); +diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c +index 5199930..26c73a0 100644 +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c +@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path) + + p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); + if (!IS_ERR(p)) { +- p = mangle_path(s->buffer + s->len, p, "\n"); ++ p = mangle_path(s->buffer + s->len, p, "\n\"); + if (p) { + s->len = p - s->buffer; + return 1; +diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c +index 77575b3..6e623d1 100644 +--- a/kernel/trace/trace_stack.c ++++ b/kernel/trace/trace_stack.c +@@ -50,7 +50,7 @@ static inline void check_stack(void) + return; + + /* we do not handle interrupt stacks yet */ +- if (!object_is_on_stack(&this_size)) ++ if (!object_starts_on_stack(&this_size)) + return; + + local_irq_save(flags); +diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c +index 209b379..7f76423 100644 +--- a/kernel/trace/trace_workqueue.c ++++ b/kernel/trace/trace_workqueue.c +@@ -22,7 +22,7 @@ struct cpu_workqueue_stats { + int cpu; + pid_t pid; + /* Can be inserted from interrupt or user context, need to be atomic */ +- atomic_t inserted; ++ atomic_unchecked_t inserted; + /* + * Don't need to be atomic, works are serialized in a single workqueue thread + * on a single CPU. +@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore, + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { + if (node->pid == wq_thread->pid) { +- atomic_inc(&node->inserted); ++ atomic_inc_unchecked(&node->inserted); + goto found; + } + } +@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p) + tsk = get_pid_task(pid, PIDTYPE_PID); + if (tsk) { + seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, +- atomic_read(&cws->inserted), cws->executed, ++ atomic_read_unchecked(&cws->inserted), cws->executed, + tsk->comm); + put_task_struct(tsk); + } +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 82928f5..92da771 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -1103,6 +1103,7 @@ config LATENCYTOP + depends on DEBUG_KERNEL + depends on STACKTRACE_SUPPORT + depends on PROC_FS ++ depends on !GRKERNSEC_HIDESYM + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND + select KALLSYMS + select KALLSYMS_ALL +diff --git a/lib/bitmap.c b/lib/bitmap.c +index 0d4a127..33a06c7 100644 +--- a/lib/bitmap.c ++++ b/lib/bitmap.c +@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, + { + int c, old_c, totaldigits, ndigits, nchunks, nbits; + u32 chunk; +- const char __user __force *ubuf = (const char __user __force *)buf; ++ const char __user *ubuf = (const char __force_user *)buf; + + bitmap_zero(maskp, nmaskbits); + +@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf, + { + if (!access_ok(VERIFY_READ, ubuf, ulen)) + return -EFAULT; +- return __bitmap_parse((const char __force *)ubuf, ++ return __bitmap_parse((const char __force_kernel *)ubuf, + ulen, 1, maskp, nmaskbits); + + } +@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, + { + unsigned a, b; + int c, old_c, totaldigits; +- const char __user __force *ubuf = (const char __user __force *)buf; ++ const char __user *ubuf = (const char __force_user *)buf; + int exp_digit, in_range; + + totaldigits = c = 0; +@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf, + { + if (!access_ok(VERIFY_READ, ubuf, ulen)) + return -EFAULT; +- return __bitmap_parselist((const char __force *)ubuf, ++ return __bitmap_parselist((const char __force_kernel *)ubuf, + ulen, 1, maskp, nmaskbits); + } + EXPORT_SYMBOL(bitmap_parselist_user); +diff --git a/lib/bug.c b/lib/bug.c +index 1955209..cbbb2ad 100644 +--- a/lib/bug.c ++++ b/lib/bug.c +@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) + return BUG_TRAP_TYPE_NONE; + + bug = find_bug(bugaddr); ++ if (!bug) ++ return BUG_TRAP_TYPE_NONE; + + file = NULL; + line = 0; +diff --git a/lib/debugobjects.c b/lib/debugobjects.c +index a78b7c6..2c73084 100644 +--- a/lib/debugobjects.c ++++ b/lib/debugobjects.c +@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack) + if (limit > 4) + return; + +- is_on_stack = object_is_on_stack(addr); ++ is_on_stack = object_starts_on_stack(addr); + if (is_on_stack == onstack) + return; + +diff --git a/lib/devres.c b/lib/devres.c +index 7c0e953..f642b5c 100644 +--- a/lib/devres.c ++++ b/lib/devres.c +@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache); + void devm_iounmap(struct device *dev, void __iomem *addr) + { + WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, +- (void *)addr)); ++ (void __force *)addr)); + iounmap(addr); + } + EXPORT_SYMBOL(devm_iounmap); +@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) + { + ioport_unmap(addr); + WARN_ON(devres_destroy(dev, devm_ioport_map_release, +- devm_ioport_map_match, (void *)addr)); ++ devm_ioport_map_match, (void __force *)addr)); + } + EXPORT_SYMBOL(devm_ioport_unmap); + +diff --git a/lib/dma-debug.c b/lib/dma-debug.c +index fea790a..ebb0e82 100644 +--- a/lib/dma-debug.c ++++ b/lib/dma-debug.c +@@ -925,7 +925,7 @@ out: + + static void check_for_stack(struct device *dev, void *addr) + { +- if (object_is_on_stack(addr)) ++ if (object_starts_on_stack(addr)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from" + "stack [addr=%p]\n", addr); + } +diff --git a/lib/extable.c b/lib/extable.c +index 4cac81e..63e9b8f 100644 +--- a/lib/extable.c ++++ b/lib/extable.c +@@ -13,6 +13,7 @@ + #include <linux/init.h> + #include <linux/sort.h> + #include <asm/uaccess.h> ++#include <asm/pgtable.h> + + #ifndef ARCH_HAS_SORT_EXTABLE + /* +@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b) + void sort_extable(struct exception_table_entry *start, + struct exception_table_entry *finish) + { ++ pax_open_kernel(); + sort(start, finish - start, sizeof(struct exception_table_entry), + cmp_ex, NULL); ++ pax_close_kernel(); + } + + #ifdef CONFIG_MODULES +diff --git a/lib/inflate.c b/lib/inflate.c +index 013a761..c28f3fc 100644 +--- a/lib/inflate.c ++++ b/lib/inflate.c +@@ -269,7 +269,7 @@ static void free(void *where) + malloc_ptr = free_mem_ptr; + } + #else +-#define malloc(a) kmalloc(a, GFP_KERNEL) ++#define malloc(a) kmalloc((a), GFP_KERNEL) + #define free(a) kfree(a) + #endif + +diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c +index bd2bea9..6b3c95e 100644 +--- a/lib/is_single_threaded.c ++++ b/lib/is_single_threaded.c +@@ -22,6 +22,9 @@ bool current_is_single_threaded(void) + struct task_struct *p, *t; + bool ret; + ++ if (!mm) ++ return true; ++ + if (atomic_read(&task->signal->live) != 1) + return false; + +diff --git a/lib/kref.c b/lib/kref.c +index 3efb882..8492f4c 100644 +--- a/lib/kref.c ++++ b/lib/kref.c +@@ -52,7 +52,7 @@ void kref_get(struct kref *kref) + */ + int kref_put(struct kref *kref, void (*release)(struct kref *kref)) + { +- WARN_ON(release == NULL); ++ BUG_ON(release == NULL); + WARN_ON(release == (void (*)(struct kref *))kfree); + + if (atomic_dec_and_test(&kref->refcount)) { +diff --git a/lib/radix-tree.c b/lib/radix-tree.c +index d9df745..e73c2fe 100644 +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c +@@ -80,7 +80,7 @@ struct radix_tree_preload { + int nr; + struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; + }; +-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; ++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); + + static inline void *ptr_to_indirect(void *ptr) + { +diff --git a/lib/vsprintf.c b/lib/vsprintf.c +index 993599e..f1dbc14 100644 +--- a/lib/vsprintf.c ++++ b/lib/vsprintf.c +@@ -16,6 +16,9 @@ + * - scnprintf and vscnprintf + */ + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <stdarg.h> + #include <linux/module.h> + #include <linux/types.h> +@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr, + char sym[KSYM_SYMBOL_LEN]; + if (ext == 'B') + sprint_backtrace(sym, value); +- else if (ext != 'f' && ext != 's') ++ else if (ext != 'f' && ext != 's' && ext != 'a') + sprint_symbol(sym, value); + else + kallsyms_lookup(value, NULL, NULL, NULL, sym); +@@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr, + return string(buf, end, uuid, spec); + } + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++int kptr_restrict __read_mostly = 2; ++#else + int kptr_restrict __read_mostly; ++#endif + + /* + * Show a '%p' thing. A kernel extension is that the '%p' is followed +@@ -791,6 +798,8 @@ int kptr_restrict __read_mostly; + * - 'S' For symbolic direct pointers with offset + * - 's' For symbolic direct pointers without offset + * - 'B' For backtraced symbolic direct pointers with offset ++ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM ++ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM + * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] + * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] + * - 'M' For a 6-byte MAC address, it prints the address in the +@@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, + { + if (!ptr && *fmt != 'K') { + /* +- * Print (null) with the same width as a pointer so it makes ++ * Print (nil) with the same width as a pointer so it makes + * tabular output look nice. + */ + if (spec.field_width == -1) + spec.field_width = 2 * sizeof(void *); +- return string(buf, end, "(null)", spec); ++ return string(buf, end, "(nil)", spec); + } + + switch (*fmt) { +@@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, + /* Fallthrough */ + case 'S': + case 's': ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ break; ++#else ++ return symbol_string(buf, end, ptr, spec, *fmt); ++#endif ++ case 'A': ++ case 'a': + case 'B': + return symbol_string(buf, end, ptr, spec, *fmt); + case 'R': +@@ -878,9 +894,15 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, + case 'U': + return uuid_string(buf, end, ptr, spec, fmt); + case 'V': +- return buf + vsnprintf(buf, end > buf ? end - buf : 0, +- ((struct va_format *)ptr)->fmt, +- *(((struct va_format *)ptr)->va)); ++ { ++ va_list va; ++ ++ va_copy(va, *((struct va_format *)ptr)->va); ++ buf += vsnprintf(buf, end > buf ? end - buf : 0, ++ ((struct va_format *)ptr)->fmt, va); ++ va_end(va); ++ return buf; ++ } + case 'K': + /* + * %pK cannot be used in IRQ context because its test +@@ -1608,11 +1630,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) + typeof(type) value; \ + if (sizeof(type) == 8) { \ + args = PTR_ALIGN(args, sizeof(u32)); \ +- *(u32 *)&value = *(u32 *)args; \ +- *((u32 *)&value + 1) = *(u32 *)(args + 4); \ ++ *(u32 *)&value = *(const u32 *)args; \ ++ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \ + } else { \ + args = PTR_ALIGN(args, sizeof(type)); \ +- value = *(typeof(type) *)args; \ ++ value = *(const typeof(type) *)args; \ + } \ + args += sizeof(type); \ + value; \ +@@ -1675,7 +1697,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) + case FORMAT_TYPE_STR: { + const char *str_arg = args; + args += strlen(str_arg) + 1; +- str = string(str, end, (char *)str_arg, spec); ++ str = string(str, end, str_arg, spec); + break; + } + +diff --git a/localversion-grsec b/localversion-grsec +new file mode 100644 +index 0000000..7cd6065 +--- /dev/null ++++ b/localversion-grsec +@@ -0,0 +1 @@ ++-grsec +diff --git a/mm/Kconfig b/mm/Kconfig +index 011b110..b492af2 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -241,10 +241,10 @@ config KSM + root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). + + config DEFAULT_MMAP_MIN_ADDR +- int "Low address space to protect from user allocation" ++ int "Low address space to protect from user allocation" + depends on MMU +- default 4096 +- help ++ default 65536 ++ help + This is the portion of low virtual memory which should be protected + from userspace allocation. Keeping a user from writing to low pages + can help reduce the impact of kernel NULL pointer bugs. +diff --git a/mm/filemap.c b/mm/filemap.c +index 03c5b0e..a01e793 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) + struct address_space *mapping = file->f_mapping; + + if (!mapping->a_ops->readpage) +- return -ENOEXEC; ++ return -ENODEV; + file_accessed(file); + vma->vm_ops = &generic_file_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; +@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i + *pos = i_size_read(inode); + + if (limit != RLIM_INFINITY) { ++ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0); + if (*pos >= limit) { + send_sig(SIGXFSZ, current, 0); + return -EFBIG; +diff --git a/mm/fremap.c b/mm/fremap.c +index 9ed4fd4..c42648d 100644 +--- a/mm/fremap.c ++++ b/mm/fremap.c +@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, + retry: + vma = find_vma(mm, start); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC)) ++ goto out; ++#endif ++ + /* + * Make sure the vma is shared, that it supports prefaulting, + * and that the remapped range is valid and fully within +diff --git a/mm/highmem.c b/mm/highmem.c +index 57d82c6..e9e0552 100644 +--- a/mm/highmem.c ++++ b/mm/highmem.c +@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void) + * So no dangers, even with speculative execution. + */ + page = pte_page(pkmap_page_table[i]); ++ pax_open_kernel(); + pte_clear(&init_mm, (unsigned long)page_address(page), + &pkmap_page_table[i]); +- ++ pax_close_kernel(); + set_page_address(page, NULL); + need_flush = 1; + } +@@ -186,9 +187,11 @@ start: + } + } + vaddr = PKMAP_ADDR(last_pkmap_nr); ++ ++ pax_open_kernel(); + set_pte_at(&init_mm, vaddr, + &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); +- ++ pax_close_kernel(); + pkmap_count[last_pkmap_nr] = 1; + set_page_address(page, (void *)vaddr); + +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 8f005e9..1cb1036 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -704,7 +704,7 @@ out: + * run pte_offset_map on the pmd, if an huge pmd could + * materialize from under us from a different thread. + */ +- if (unlikely(__pte_alloc(mm, vma, pmd, address))) ++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address))) + return VM_FAULT_OOM; + /* if an huge pmd materialized from under us just retry later */ + if (unlikely(pmd_trans_huge(*pmd))) +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 2316840..b418671 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, + return 1; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ struct vm_area_struct *vma_m; ++ unsigned long address_m; ++ pte_t *ptep_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK); ++ get_page(page_m); ++ hugepage_add_anon_rmap(page_m, vma_m, address_m); ++ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0)); ++} ++#endif ++ + /* + * Hugetlb_cow() should be called with page lock of the original hugepage held. + */ +@@ -2450,6 +2471,11 @@ retry_avoidcopy: + make_huge_pte(vma, new_page, 1)); + page_remove_rmap(old_page); + hugepage_add_new_anon_rmap(new_page, vma, address); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_huge_pte(vma, address, new_page); ++#endif ++ + /* Make the old page be freed below */ + new_page = old_page; + mmu_notifier_invalidate_range_end(mm, +@@ -2601,6 +2627,10 @@ retry: + && (vma->vm_flags & VM_SHARED))); + set_huge_pte_at(mm, address, ptep, new_pte); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_huge_pte(vma, address, page); ++#endif ++ + if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { + /* Optimization, do the COW without a second fault */ + ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); +@@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + static DEFINE_MUTEX(hugetlb_instantiation_mutex); + struct hstate *h = hstate_vma(vma); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + ptep = huge_pte_offset(mm, address); + if (ptep) { + entry = huge_ptep_get(ptep); +@@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + VM_FAULT_SET_HINDEX(h - hstates); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ unsigned long address_m; ++ ++ if (vma->vm_start > vma_m->vm_start) { ++ address_m = address; ++ address -= SEGMEXEC_TASK_SIZE; ++ vma = vma_m; ++ h = hstate_vma(vma); ++ } else ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ++ if (!huge_pte_alloc(mm, address_m, huge_page_size(h))) ++ return VM_FAULT_OOM; ++ address_m &= HPAGE_MASK; ++ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL); ++ } ++#endif ++ + ptep = huge_pte_alloc(mm, address, huge_page_size(h)); + if (!ptep) + return VM_FAULT_OOM; +diff --git a/mm/internal.h b/mm/internal.h +index 2189af4..f2ca332 100644 +--- a/mm/internal.h ++++ b/mm/internal.h +@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page); + * in mm/page_alloc.c + */ + extern void __free_pages_bootmem(struct page *page, unsigned int order); ++extern void free_compound_page(struct page *page); + extern void prep_compound_page(struct page *page, unsigned long order); + #ifdef CONFIG_MEMORY_FAILURE + extern bool is_free_buddy_page(struct page *page); +diff --git a/mm/kmemleak.c b/mm/kmemleak.c +index f3b2a00..61da94d 100644 +--- a/mm/kmemleak.c ++++ b/mm/kmemleak.c +@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq, + + for (i = 0; i < object->trace_len; i++) { + void *ptr = (void *)object->trace[i]; +- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); ++ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr); + } + } + +diff --git a/mm/maccess.c b/mm/maccess.c +index d53adf9..03a24bf 100644 +--- a/mm/maccess.c ++++ b/mm/maccess.c +@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size) + set_fs(KERNEL_DS); + pagefault_disable(); + ret = __copy_from_user_inatomic(dst, +- (__force const void __user *)src, size); ++ (const void __force_user *)src, size); + pagefault_enable(); + set_fs(old_fs); + +@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) + + set_fs(KERNEL_DS); + pagefault_disable(); +- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); ++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size); + pagefault_enable(); + set_fs(old_fs); + +diff --git a/mm/madvise.c b/mm/madvise.c +index 74bf193..feb6fd3 100644 +--- a/mm/madvise.c ++++ b/mm/madvise.c +@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma, + pgoff_t pgoff; + unsigned long new_flags = vma->vm_flags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + switch (behavior) { + case MADV_NORMAL: + new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; +@@ -110,6 +114,13 @@ success: + /* + * vm_flags is protected by the mmap_sem held in write mode. + */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) ++ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT); ++#endif ++ + vma->vm_flags = new_flags; + + out: +@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma, + struct vm_area_struct ** prev, + unsigned long start, unsigned long end) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + *prev = vma; + if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) + return -EINVAL; +@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma, + zap_page_range(vma, start, end - start, &details); + } else + zap_page_range(vma, start, end - start, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ if (unlikely(vma->vm_flags & VM_NONLINEAR)) { ++ struct zap_details details = { ++ .nonlinear_vma = vma_m, ++ .last_index = ULONG_MAX, ++ }; ++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details); ++ } else ++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL); ++ } ++#endif ++ + return 0; + } + +@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) + if (end < start) + goto out; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ goto out; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ goto out; ++ + error = 0; + if (end == start) + goto out; +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 06d3479..0778eef 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0; + + int sysctl_memory_failure_recovery __read_mostly = 1; + +-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); ++atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); + + #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) + +@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, + si.si_signo = SIGBUS; + si.si_errno = 0; + si.si_code = BUS_MCEERR_AO; +- si.si_addr = (void *)addr; ++ si.si_addr = (void __user *)addr; + #ifdef __ARCH_SI_TRAPNO + si.si_trapno = trapno; + #endif +@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) + } + + nr_pages = 1 << compound_trans_order(hpage); +- atomic_long_add(nr_pages, &mce_bad_pages); ++ atomic_long_add_unchecked(nr_pages, &mce_bad_pages); + + /* + * We need/can do nothing about count=0 pages. +@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) + if (!PageHWPoison(hpage) + || (hwpoison_filter(p) && TestClearPageHWPoison(p)) + || (p != hpage && TestSetPageHWPoison(hpage))) { +- atomic_long_sub(nr_pages, &mce_bad_pages); ++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages); + return 0; + } + set_page_hwpoison_huge_page(hpage); +@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) + } + if (hwpoison_filter(p)) { + if (TestClearPageHWPoison(p)) +- atomic_long_sub(nr_pages, &mce_bad_pages); ++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages); + unlock_page(hpage); + put_page(hpage); + return 0; +@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn) + return 0; + } + if (TestClearPageHWPoison(p)) +- atomic_long_sub(nr_pages, &mce_bad_pages); ++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages); + pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn); + return 0; + } +@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn) + */ + if (TestClearPageHWPoison(page)) { + pr_info("MCE: Software-unpoisoned page %#lx\n", pfn); +- atomic_long_sub(nr_pages, &mce_bad_pages); ++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages); + freeit = 1; + if (PageHuge(page)) + clear_page_hwpoison_huge_page(page); +@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags) + } + done: + if (!PageHWPoison(hpage)) +- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages); ++ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages); + set_page_hwpoison_huge_page(hpage); + dequeue_hwpoisoned_huge_page(hpage); + /* keep elevated page count for bad page */ +@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags) + return ret; + + done: +- atomic_long_add(1, &mce_bad_pages); ++ atomic_long_add_unchecked(1, &mce_bad_pages); + SetPageHWPoison(page); + /* keep elevated page count for bad page */ + return ret; +diff --git a/mm/memory.c b/mm/memory.c +index 829d437..3d3926a 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + return; + + pmd = pmd_offset(pud, start); ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD) + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); ++#endif ++ + } + + static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, +@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, + if (end - 1 > ceiling - 1) + return; + ++#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD) + pud = pud_offset(pgd, start); + pgd_clear(pgd); + pud_free_tlb(tlb, pud, start); ++#endif ++ + } + + /* +@@ -1566,12 +1573,6 @@ no_page_table: + return page; + } + +-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +-{ +- return stack_guard_page_start(vma, addr) || +- stack_guard_page_end(vma, addr+PAGE_SIZE); +-} +- + /** + * __get_user_pages() - pin user pages in memory + * @tsk: task_struct of target task +@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); + i = 0; + +- do { ++ while (nr_pages) { + struct vm_area_struct *vma; + +- vma = find_extend_vma(mm, start); ++ vma = find_vma(mm, start); + if (!vma && in_gate_area(mm, start)) { + unsigned long pg = start & PAGE_MASK; + pgd_t *pgd; +@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + goto next_page; + } + +- if (!vma || ++ if (!vma || start < vma->vm_start || + (vma->vm_flags & (VM_IO | VM_PFNMAP)) || + !(vm_flags & vma->vm_flags)) + return i ? : -EFAULT; +@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + int ret; + unsigned int fault_flags = 0; + +- /* For mlock, just skip the stack guard page. */ +- if (foll_flags & FOLL_MLOCK) { +- if (stack_guard_page(vma, start)) +- goto next_page; +- } + if (foll_flags & FOLL_WRITE) + fault_flags |= FAULT_FLAG_WRITE; + if (nonblocking) +@@ -1800,7 +1796,7 @@ next_page: + start += PAGE_SIZE; + nr_pages--; + } while (nr_pages && start < vma->vm_end); +- } while (nr_pages); ++ } + return i; + } + EXPORT_SYMBOL(__get_user_pages); +@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, + page_add_file_rmap(page); + set_pte_at(mm, addr, pte, mk_pte(page, prot)); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_file_pte(vma, addr, page, ptl); ++#endif ++ + retval = 0; + pte_unmap_unlock(pte, ptl); + return retval; +@@ -2041,10 +2041,22 @@ out: + int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, + struct page *page) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; + if (!page_count(page)) + return -EINVAL; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) ++ vma_m->vm_flags |= VM_INSERTPAGE; ++#endif ++ + vma->vm_flags |= VM_INSERTPAGE; + return insert_page(vma, addr, page, vma->vm_page_prot); + } +@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) + { + BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); ++ BUG_ON(vma->vm_mirror); + + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; +@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo + copy_user_highpage(dst, src, va, vma); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ spinlock_t *ptl; ++ pte_t *pte, entry; ++ ++ pte = pte_offset_map_lock(mm, pmd, address, &ptl); ++ entry = *pte; ++ if (!pte_present(entry)) { ++ if (!pte_none(entry)) { ++ BUG_ON(pte_file(entry)); ++ free_swap_and_cache(pte_to_swp_entry(entry)); ++ pte_clear_not_present_full(mm, address, pte, 0); ++ } ++ } else { ++ struct page *page; ++ ++ flush_cache_page(vma, address, pte_pfn(entry)); ++ entry = ptep_clear_flush(vma, address, pte); ++ BUG_ON(pte_dirty(entry)); ++ page = vm_normal_page(vma, address, entry); ++ if (page) { ++ update_hiwater_rss(mm); ++ if (PageAnon(page)) ++ dec_mm_counter_fast(mm, MM_ANONPAGES); ++ else ++ dec_mm_counter_fast(mm, MM_FILEPAGES); ++ page_remove_rmap(page); ++ page_cache_release(page); ++ } ++ } ++ pte_unmap_unlock(pte, ptl); ++} ++ ++/* PaX: if vma is mirrored, synchronize the mirror's PTE ++ * ++ * the ptl of the lower mapped page is held on entry and is not released on exit ++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc) ++ */ ++static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ BUG_ON(!page_m || !PageAnon(page_m)); ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(!PageLocked(page_m)); ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); ++ page_cache_get(page_m); ++ page_add_anon_rmap(page_m, vma_m, address_m); ++ inc_mm_counter_fast(mm, MM_ANONPAGES); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++ update_mmu_cache(vma_m, address_m, entry_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap(pte_m); ++ unlock_page(page_m); ++} ++ ++void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ BUG_ON(!page_m || PageAnon(page_m)); ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); ++ page_cache_get(page_m); ++ page_add_file_rmap(page_m); ++ inc_mm_counter_fast(mm, MM_FILEPAGES); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++ update_mmu_cache(vma_m, address_m, entry_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap(pte_m); ++} ++ ++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap(pte_m); ++} ++ ++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl) ++{ ++ struct page *page_m; ++ pte_t entry; ++ ++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC)) ++ goto out; ++ ++ entry = *pte; ++ page_m = vm_normal_page(vma, address, entry); ++ if (!page_m) ++ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl); ++ else if (PageAnon(page_m)) { ++ if (pax_find_mirror_vma(vma)) { ++ pte_unmap_unlock(pte, ptl); ++ lock_page(page_m); ++ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl); ++ if (pte_same(entry, *pte)) ++ pax_mirror_anon_pte(vma, address, page_m, ptl); ++ else ++ unlock_page(page_m); ++ } ++ } else ++ pax_mirror_file_pte(vma, address, page_m, ptl); ++ ++out: ++ pte_unmap_unlock(pte, ptl); ++} ++#endif ++ + /* + * This routine handles present pages, when users try to write + * to a shared page. It is done by copying the page to a new address +@@ -2656,6 +2849,12 @@ gotten: + */ + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (likely(pte_same(*page_table, orig_pte))) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(new_page)); ++#endif ++ + if (old_page) { + if (!PageAnon(old_page)) { + dec_mm_counter_fast(mm, MM_FILEPAGES); +@@ -2707,6 +2906,10 @@ gotten: + page_remove_rmap(old_page); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_anon_pte(vma, address, new_page, ptl); ++#endif ++ + /* Free the old page.. */ + new_page = old_page; + ret |= VM_FAULT_WRITE; +@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, + swap_free(entry); + if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) + try_to_free_swap(page); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma)) ++#endif ++ + unlock_page(page); + if (swapcache) { + /* +@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, page_table); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_anon_pte(vma, address, page, ptl); ++#endif ++ + unlock: + pte_unmap_unlock(page_table, ptl); + out: +@@ -3028,40 +3241,6 @@ out_release: + } + + /* +- * This is like a special single-page "expand_{down|up}wards()", +- * except we must first make sure that 'address{-|+}PAGE_SIZE' +- * doesn't hit another vma. +- */ +-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +-{ +- address &= PAGE_MASK; +- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { +- struct vm_area_struct *prev = vma->vm_prev; +- +- /* +- * Is there a mapping abutting this one below? +- * +- * That's only ok if it's the same stack mapping +- * that has gotten split.. +- */ +- if (prev && prev->vm_end == address) +- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; +- +- expand_downwards(vma, address - PAGE_SIZE); +- } +- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { +- struct vm_area_struct *next = vma->vm_next; +- +- /* As VM_GROWSDOWN but s/below/above/ */ +- if (next && next->vm_start == address + PAGE_SIZE) +- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; +- +- expand_upwards(vma, address + PAGE_SIZE); +- } +- return 0; +-} +- +-/* + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. +@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + unsigned int flags) + { +- struct page *page; ++ struct page *page = NULL; + spinlock_t *ptl; + pte_t entry; + +- pte_unmap(page_table); +- +- /* Check if we need to add a guard page to the stack */ +- if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGBUS; +- +- /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + vma->vm_page_prot)); +- page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ ptl = pte_lockptr(mm, pmd); ++ spin_lock(ptl); + if (!pte_none(*page_table)) + goto unlock; + goto setpte; + } + + /* Allocate our own private page. */ ++ pte_unmap(page_table); ++ + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, address); +@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + if (!pte_none(*page_table)) + goto release; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(page)); ++#endif ++ + inc_mm_counter_fast(mm, MM_ANONPAGES); + page_add_new_anon_rmap(page, vma, address); + setpte: +@@ -3116,6 +3296,12 @@ setpte: + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, page_table); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (page) ++ pax_mirror_anon_pte(vma, address, page, ptl); ++#endif ++ + unlock: + pte_unmap_unlock(page_table, ptl); + return 0; +@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, + */ + /* Only go through if we didn't race with anybody else... */ + if (likely(pte_same(*page_table, orig_pte))) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (anon && pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(page)); ++#endif ++ + flush_icache_page(vma, page); + entry = mk_pte(page, vma->vm_page_prot); + if (flags & FAULT_FLAG_WRITE) +@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, + + /* no need to invalidate: a not-present page won't be cached */ + update_mmu_cache(vma, address, page_table); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (anon) ++ pax_mirror_anon_pte(vma, address, page, ptl); ++ else ++ pax_mirror_file_pte(vma, address, page, ptl); ++#endif ++ + } else { + if (cow_page) + mem_cgroup_uncharge_page(cow_page); +@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm, + if (flags & FAULT_FLAG_WRITE) + flush_tlb_fix_spurious_fault(vma, address); + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_pte(vma, address, pte, pmd, ptl); ++ return 0; ++#endif ++ + unlock: + pte_unmap_unlock(pte, ptl); + return 0; +@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, + pmd_t *pmd; + pte_t *pte; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + __set_current_state(TASK_RUNNING); + + count_vm_event(PGFAULT); +@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, + if (unlikely(is_vm_hugetlb_page(vma))) + return hugetlb_fault(mm, vma, address, flags); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ unsigned long address_m; ++ pgd_t *pgd_m; ++ pud_t *pud_m; ++ pmd_t *pmd_m; ++ ++ if (vma->vm_start > vma_m->vm_start) { ++ address_m = address; ++ address -= SEGMEXEC_TASK_SIZE; ++ vma = vma_m; ++ } else ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ++ pgd_m = pgd_offset(mm, address_m); ++ pud_m = pud_alloc(mm, pgd_m, address_m); ++ if (!pud_m) ++ return VM_FAULT_OOM; ++ pmd_m = pmd_alloc(mm, pud_m, address_m); ++ if (!pmd_m) ++ return VM_FAULT_OOM; ++ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m)) ++ return VM_FAULT_OOM; ++ pax_unmap_mirror_pte(vma_m, address_m, pmd_m); ++ } ++#endif ++ + pgd = pgd_offset(mm, address); + pud = pud_alloc(mm, pgd, address); + if (!pud) +@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, + * run pte_offset_map on the pmd, if an huge pmd could + * materialize from under us from a different thread. + */ +- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address)) ++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address))) + return VM_FAULT_OOM; + /* if an huge pmd materialized from under us just retry later */ + if (unlikely(pmd_trans_huge(*pmd))) +@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void) + gate_vma.vm_start = FIXADDR_USER_START; + gate_vma.vm_end = FIXADDR_USER_END; + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; +- gate_vma.vm_page_prot = __P101; ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); + /* + * Make sure the vDSO gets into every core dump. + * Dumping its contents makes post-mortem fully interpretable later +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index c3fdbcb..2e8ef90 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, + unsigned long vmstart; + unsigned long vmend; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + vma = find_vma_prev(mm, start, &prev); + if (!vma || vma->vm_start > start) + return -EFAULT; +@@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, + err = policy_vma(vma, new_pol); + if (err) + goto out; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ err = policy_vma(vma_m, new_pol); ++ if (err) ++ goto out; ++ } ++#endif ++ + } + + out: +@@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len, + + if (end < start) + return -EINVAL; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ return -EINVAL; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + if (end == start) + return 0; + +@@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, + if (!mm) + goto out; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (mm != current->mm && ++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) { ++ err = -EPERM; ++ goto out; ++ } ++#endif ++ + /* + * Check if this process has the right to modify the specified + * process. The right exists if the process has administrative +@@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, + rcu_read_lock(); + tcred = __task_cred(task); + if (cred->euid != tcred->suid && cred->euid != tcred->uid && +- cred->uid != tcred->suid && cred->uid != tcred->uid && +- !capable(CAP_SYS_NICE)) { ++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); + err = -EPERM; + goto out; +diff --git a/mm/migrate.c b/mm/migrate.c +index 177aca4..ab3a744 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, + if (!mm) + return -EINVAL; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (mm != current->mm && ++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) { ++ err = -EPERM; ++ goto out; ++ } ++#endif ++ + /* + * Check if this process has the right to modify the specified + * process. The right exists if the process has administrative +@@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, + rcu_read_lock(); + tcred = __task_cred(task); + if (cred->euid != tcred->suid && cred->euid != tcred->uid && +- cred->uid != tcred->suid && cred->uid != tcred->uid && +- !capable(CAP_SYS_NICE)) { ++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); + err = -EPERM; + goto out; +diff --git a/mm/mlock.c b/mm/mlock.c +index 4f4f53b..9511904 100644 +--- a/mm/mlock.c ++++ b/mm/mlock.c +@@ -13,6 +13,7 @@ + #include <linux/pagemap.h> + #include <linux/mempolicy.h> + #include <linux/syscalls.h> ++#include <linux/security.h> + #include <linux/sched.h> + #include <linux/export.h> + #include <linux/rmap.h> +@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on) + return -EINVAL; + if (end == start) + return 0; ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + vma = find_vma_prev(current->mm, start, &prev); + if (!vma || vma->vm_start > start) + return -ENOMEM; +@@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on) + for (nstart = start ; ; ) { + vm_flags_t newflags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) ++ break; ++#endif ++ + /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ + + newflags = vma->vm_flags | VM_LOCKED; +@@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) + lock_limit >>= PAGE_SHIFT; + + /* check against resource limits */ ++ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1); + if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) + error = do_mlock(start, len, 1); + up_write(¤t->mm->mmap_sem); +@@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) + static int do_mlockall(int flags) + { + struct vm_area_struct * vma, * prev = NULL; +- unsigned int def_flags = 0; + + if (flags & MCL_FUTURE) +- def_flags = VM_LOCKED; +- current->mm->def_flags = def_flags; ++ current->mm->def_flags |= VM_LOCKED; ++ else ++ current->mm->def_flags &= ~VM_LOCKED; + if (flags == MCL_FUTURE) + goto out; + + for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { + vm_flags_t newflags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) ++ break; ++#endif ++ ++ BUG_ON(vma->vm_end > TASK_SIZE); + newflags = vma->vm_flags | VM_LOCKED; + if (!(flags & MCL_CURRENT)) + newflags &= ~VM_LOCKED; +@@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags) + lock_limit >>= PAGE_SHIFT; + + ret = -ENOMEM; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1); + if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || + capable(CAP_IPC_LOCK)) + ret = do_mlockall(flags); +diff --git a/mm/mmap.c b/mm/mmap.c +index eae90af..c930262 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -46,6 +46,16 @@ + #define arch_rebalance_pgtables(addr, len) (addr) + #endif + ++static inline void verify_mm_writelocked(struct mm_struct *mm) ++{ ++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX) ++ if (unlikely(down_read_trylock(&mm->mmap_sem))) { ++ up_read(&mm->mmap_sem); ++ BUG(); ++ } ++#endif ++} ++ + static void unmap_region(struct mm_struct *mm, + struct vm_area_struct *vma, struct vm_area_struct *prev, + unsigned long start, unsigned long end); +@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm, + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes + * + */ +-pgprot_t protection_map[16] = { ++pgprot_t protection_map[16] __read_only = { + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, + __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 + }; + +-pgprot_t vm_get_page_prot(unsigned long vm_flags) ++pgprot_t vm_get_page_prot(vm_flags_t vm_flags) + { +- return __pgprot(pgprot_val(protection_map[vm_flags & ++ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | + pgprot_val(arch_vm_get_page_prot(vm_flags))); ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if (!(__supported_pte_mask & _PAGE_NX) && ++ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC && ++ (vm_flags & (VM_READ | VM_WRITE))) ++ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot))))); ++#endif ++ ++ return prot; + } + EXPORT_SYMBOL(vm_get_page_prot); + + int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */ + int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */ + int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; ++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024; + /* + * Make sure vm_committed_as in one cacheline and not cacheline shared with + * other variables. It can be updated by several CPUs frequently. +@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) + struct vm_area_struct *next = vma->vm_next; + + might_sleep(); ++ BUG_ON(vma->vm_mirror); + if (vma->vm_ops && vma->vm_ops->close) + vma->vm_ops->close(vma); + if (vma->vm_file) { +@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) + * not page aligned -Ram Gupta + */ + rlim = rlimit(RLIMIT_DATA); ++ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1); + if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + + (mm->end_data - mm->start_data) > rlim) + goto out; +@@ -689,6 +711,12 @@ static int + can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE) ++ return 0; ++#endif ++ + if (is_mergeable_vma(vma, file, vm_flags) && + is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { + if (vma->vm_pgoff == vm_pgoff) +@@ -708,6 +736,12 @@ static int + can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE) ++ return 0; ++#endif ++ + if (is_mergeable_vma(vma, file, vm_flags) && + is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { + pgoff_t vm_pglen; +@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, + struct vm_area_struct *vma_merge(struct mm_struct *mm, + struct vm_area_struct *prev, unsigned long addr, + unsigned long end, unsigned long vm_flags, +- struct anon_vma *anon_vma, struct file *file, ++ struct anon_vma *anon_vma, struct file *file, + pgoff_t pgoff, struct mempolicy *policy) + { + pgoff_t pglen = (end - addr) >> PAGE_SHIFT; + struct vm_area_struct *area, *next; + int err; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE; ++ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL; ++ ++ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end); ++#endif ++ + /* + * We later require that vma->vm_flags == vm_flags, + * so this tests vma->vm_flags & VM_SPECIAL, too. +@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, + if (next && next->vm_end == end) /* cases 6, 7, 8 */ + next = next->vm_next; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (prev) ++ prev_m = pax_find_mirror_vma(prev); ++ if (area) ++ area_m = pax_find_mirror_vma(area); ++ if (next) ++ next_m = pax_find_mirror_vma(next); ++#endif ++ + /* + * Can it merge with the predecessor? + */ +@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, + /* cases 1, 6 */ + err = vma_adjust(prev, prev->vm_start, + next->vm_end, prev->vm_pgoff, NULL); +- } else /* cases 2, 5, 7 */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && prev_m) ++ err = vma_adjust(prev_m, prev_m->vm_start, ++ next_m->vm_end, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } else { /* cases 2, 5, 7 */ + err = vma_adjust(prev, prev->vm_start, + end, prev->vm_pgoff, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && prev_m) ++ err = vma_adjust(prev_m, prev_m->vm_start, ++ end_m, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } + if (err) + return NULL; + khugepaged_enter_vma_merge(prev); +@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, + mpol_equal(policy, vma_policy(next)) && + can_vma_merge_before(next, vm_flags, + anon_vma, file, pgoff+pglen)) { +- if (prev && addr < prev->vm_end) /* case 4 */ ++ if (prev && addr < prev->vm_end) { /* case 4 */ + err = vma_adjust(prev, prev->vm_start, + addr, prev->vm_pgoff, NULL); +- else /* cases 3, 8 */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && prev_m) ++ err = vma_adjust(prev_m, prev_m->vm_start, ++ addr_m, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } else { /* cases 3, 8 */ + err = vma_adjust(area, addr, next->vm_end, + next->vm_pgoff - pglen, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && area_m) ++ err = vma_adjust(area_m, addr_m, next_m->vm_end, ++ next_m->vm_pgoff - pglen, NULL); ++#endif ++ ++ } + if (err) + return NULL; + khugepaged_enter_vma_merge(area); +@@ -921,14 +1001,11 @@ none: + void vm_stat_account(struct mm_struct *mm, unsigned long flags, + struct file *file, long pages) + { +- const unsigned long stack_flags +- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); +- + if (file) { + mm->shared_vm += pages; + if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) + mm->exec_vm += pages; +- } else if (flags & stack_flags) ++ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN)) + mm->stack_vm += pages; + if (flags & (VM_RESERVED|VM_IO)) + mm->reserved_vm += pages; +@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + * (the exception is when the underlying filesystem is noexec + * mounted, in which case we dont add PROT_EXEC.) + */ +- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) ++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) + if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) + prot |= PROT_EXEC; + +@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + /* Obtain the address to map to. we verify (or select) it and ensure + * that it represents a valid section of the address space. + */ +- addr = get_unmapped_area(file, addr, len, pgoff, flags); ++ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0)); + if (addr & ~PAGE_MASK) + return addr; + +@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | + mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) { ++#ifndef CONFIG_PAX_MPROTECT_COMPAT ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) { ++ gr_log_rwxmmap(file); ++ ++#ifdef CONFIG_PAX_EMUPLT ++ vm_flags &= ~VM_EXEC; ++#else ++ return -EPERM; ++#endif ++ ++ } ++ ++ if (!(vm_flags & VM_EXEC)) ++ vm_flags &= ~VM_MAYEXEC; ++#else ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC) ++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++#endif ++ else ++ vm_flags &= ~VM_MAYWRITE; ++ } ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file) ++ vm_flags &= ~VM_PAGEEXEC; ++#endif ++ + if (flags & MAP_LOCKED) + if (!can_do_mlock()) + return -EPERM; +@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + locked += mm->locked_vm; + lock_limit = rlimit(RLIMIT_MEMLOCK); + lock_limit >>= PAGE_SHIFT; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) + return -EAGAIN; + } +@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + if (error) + return error; + ++ if (!gr_acl_handle_mmap(file, prot)) ++ return -EACCES; ++ + return mmap_region(file, addr, len, flags, vm_flags, pgoff); + } + EXPORT_SYMBOL(do_mmap_pgoff); +@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma) + vm_flags_t vm_flags = vma->vm_flags; + + /* If it was private or non-writable, the write bit is already clear */ +- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) ++ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED)) + return 0; + + /* The backer wishes to know when pages are first written to? */ +@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long charged = 0; + struct inode *inode = file ? file->f_path.dentry->d_inode : NULL; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++#endif ++ ++ /* ++ * mm->mmap_sem is required to protect against another thread ++ * changing the mappings in case we sleep. ++ */ ++ verify_mm_writelocked(mm); ++ + /* Clear old maps */ + error = -ENOMEM; +-munmap_back: + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + if (vma && vma->vm_start < addr + len) { + if (do_munmap(mm, addr, len)) + return -ENOMEM; +- goto munmap_back; ++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); ++ BUG_ON(vma && vma->vm_start < addr + len); + } + + /* Check against address space limit. */ +@@ -1258,6 +1379,16 @@ munmap_back: + goto unacct_error; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) { ++ error = -ENOMEM; ++ goto free_vma; ++ } ++ } ++#endif ++ + vma->vm_mm = mm; + vma->vm_start = addr; + vma->vm_end = addr + len; +@@ -1266,8 +1397,9 @@ munmap_back: + vma->vm_pgoff = pgoff; + INIT_LIST_HEAD(&vma->anon_vma_chain); + ++ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */ ++ + if (file) { +- error = -EINVAL; + if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) + goto free_vma; + if (vm_flags & VM_DENYWRITE) { +@@ -1281,6 +1413,19 @@ munmap_back: + error = file->f_op->mmap(file, vma); + if (error) + goto unmap_and_free_vma; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m && (vm_flags & VM_EXECUTABLE)) ++ added_exe_file_vma(mm); ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) { ++ vma->vm_flags |= VM_PAGEEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ } ++#endif ++ + if (vm_flags & VM_EXECUTABLE) + added_exe_file_vma(mm); + +@@ -1293,6 +1438,8 @@ munmap_back: + pgoff = vma->vm_pgoff; + vm_flags = vma->vm_flags; + } else if (vm_flags & VM_SHARED) { ++ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP))) ++ goto free_vma; + error = shmem_zero_setup(vma); + if (error) + goto free_vma; +@@ -1316,6 +1463,11 @@ munmap_back: + vma_link(mm, vma, prev, rb_link, rb_parent); + file = vma->vm_file; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ BUG_ON(pax_mirror_vma(vma_m, vma)); ++#endif ++ + /* Once vma denies write, undo our temporary denial count */ + if (correct_wcount) + atomic_inc(&inode->i_writecount); +@@ -1324,6 +1476,7 @@ out: + + mm->total_vm += len >> PAGE_SHIFT; + vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); ++ track_exec_limit(mm, addr, addr + len, vm_flags); + if (vm_flags & VM_LOCKED) { + if (!mlock_vma_pages_range(vma, addr, addr + len)) + mm->locked_vm += (len >> PAGE_SHIFT); +@@ -1341,6 +1494,12 @@ unmap_and_free_vma: + unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); + charged = 0; + free_vma: ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ kmem_cache_free(vm_area_cachep, vma_m); ++#endif ++ + kmem_cache_free(vm_area_cachep, vma); + unacct_error: + if (charged) +@@ -1348,6 +1507,44 @@ unacct_error: + return error; + } + ++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len) ++{ ++ if (!vma) { ++#ifdef CONFIG_STACK_GROWSUP ++ if (addr > sysctl_heap_stack_gap) ++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap); ++ else ++ vma = find_vma(current->mm, 0); ++ if (vma && (vma->vm_flags & VM_GROWSUP)) ++ return false; ++#endif ++ return true; ++ } ++ ++ if (addr + len > vma->vm_start) ++ return false; ++ ++ if (vma->vm_flags & VM_GROWSDOWN) ++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len; ++#ifdef CONFIG_STACK_GROWSUP ++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) ++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap; ++#endif ++ ++ return true; ++} ++ ++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len) ++{ ++ if (vma->vm_start < len) ++ return -ENOMEM; ++ if (!(vma->vm_flags & VM_GROWSDOWN)) ++ return vma->vm_start - len; ++ if (sysctl_heap_stack_gap <= vma->vm_start - len) ++ return vma->vm_start - len - sysctl_heap_stack_gap; ++ return -ENOMEM; ++} ++ + /* Get an address range which is currently unmapped. + * For shmat() with addr=0. + * +@@ -1374,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (flags & MAP_FIXED) + return addr; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + if (len > mm->cached_hole_size) { +- start_addr = addr = mm->free_area_cache; ++ start_addr = addr = mm->free_area_cache; + } else { +- start_addr = addr = TASK_UNMAPPED_BASE; +- mm->cached_hole_size = 0; ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; + } + + full_search: +@@ -1396,34 +1598,40 @@ full_search: + * Start a new search - just in case we missed + * some holes. + */ +- if (start_addr != TASK_UNMAPPED_BASE) { +- addr = TASK_UNMAPPED_BASE; +- start_addr = addr; ++ if (start_addr != mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { +- /* +- * Remember the place where we stopped the search: +- */ +- mm->free_area_cache = addr + len; +- return addr; +- } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; + } ++ ++ /* ++ * Remember the place where we stopped the search: ++ */ ++ mm->free_area_cache = addr + len; ++ return addr; + } + #endif + + void arch_unmap_area(struct mm_struct *mm, unsigned long addr) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr) ++ return; ++#endif ++ + /* + * Is this a new hole at the lowest possible address? + */ +- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { ++ if (addr >= mm->mmap_base && addr < mm->free_area_cache) { + mm->free_area_cache = addr; + mm->cached_hole_size = ~0UL; + } +@@ -1441,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + { + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; +- unsigned long addr = addr0; ++ unsigned long base = mm->mmap_base, addr = addr0; + + /* requested length too big for entire address space */ + if (len > TASK_SIZE) +@@ -1450,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (flags & MAP_FIXED) + return addr; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + + /* check if free_area_cache is useful for us */ +@@ -1471,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -1488,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + +@@ -1497,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ +- addr = vma->vm_start-len; +- } while (len < vma->vm_start); ++ addr = skip_heap_stack_gap(vma, len); ++ } while (!IS_ERR_VALUE(addr)); + + bottomup: + /* +@@ -1507,13 +1720,21 @@ bottomup: + * can happen with large stack limits and large mmap() + * allocations. + */ ++ mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ ++ mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; +- mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ +- mm->free_area_cache = mm->mmap_base; ++ mm->mmap_base = base; ++ mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; + + return addr; +@@ -1522,6 +1743,12 @@ bottomup: + + void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr) ++ return; ++#endif ++ + /* + * Is this a new hole at the highest possible address? + */ +@@ -1529,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) + mm->free_area_cache = addr; + + /* dont allow allocations above current base */ +- if (mm->free_area_cache > mm->mmap_base) ++ if (mm->free_area_cache > mm->mmap_base) { + mm->free_area_cache = mm->mmap_base; ++ mm->cached_hole_size = ~0UL; ++ } + } + + unsigned long +@@ -1603,40 +1832,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) + + EXPORT_SYMBOL(find_vma); + +-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ ++/* ++ * Same as find_vma, but also return a pointer to the previous VMA in *pprev. ++ */ + struct vm_area_struct * + find_vma_prev(struct mm_struct *mm, unsigned long addr, + struct vm_area_struct **pprev) + { +- struct vm_area_struct *vma = NULL, *prev = NULL; +- struct rb_node *rb_node; +- if (!mm) +- goto out; +- +- /* Guard against addr being lower than the first VMA */ +- vma = mm->mmap; +- +- /* Go through the RB tree quickly. */ +- rb_node = mm->mm_rb.rb_node; +- +- while (rb_node) { +- struct vm_area_struct *vma_tmp; +- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); +- +- if (addr < vma_tmp->vm_end) { +- rb_node = rb_node->rb_left; +- } else { +- prev = vma_tmp; +- if (!prev->vm_next || (addr < prev->vm_next->vm_end)) +- break; ++ struct vm_area_struct *vma; ++ ++ vma = find_vma(mm, addr); ++ if (vma) { ++ *pprev = vma->vm_prev; ++ } else { ++ struct rb_node *rb_node = mm->mm_rb.rb_node; ++ *pprev = NULL; ++ while (rb_node) { ++ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); + rb_node = rb_node->rb_right; + } + } ++ return vma; ++} ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma) ++{ ++ struct vm_area_struct *vma_m; + +-out: +- *pprev = prev; +- return prev ? prev->vm_next : vma; ++ BUG_ON(!vma || vma->vm_start >= vma->vm_end); ++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) { ++ BUG_ON(vma->vm_mirror); ++ return NULL; ++ } ++ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end); ++ vma_m = vma->vm_mirror; ++ BUG_ON(!vma_m || vma_m->vm_mirror != vma); ++ BUG_ON(vma->vm_file != vma_m->vm_file); ++ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start); ++ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff); ++ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root); ++ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED)); ++ return vma_m; + } ++#endif + + /* + * Verify that the stack growth is acceptable and +@@ -1654,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + return -ENOMEM; + + /* Stack limit test */ ++ gr_learn_resource(current, RLIMIT_STACK, size, 1); + if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + return -ENOMEM; + +@@ -1664,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + locked = mm->locked_vm + grow; + limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); + limit >>= PAGE_SHIFT; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); + if (locked > limit && !capable(CAP_IPC_LOCK)) + return -ENOMEM; + } +@@ -1694,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + * PA-RISC uses this for its stack; IA64 for its Register Backing Store. + * vma is the last one with address > vma->vm_end. Have to extend vma. + */ ++#ifndef CONFIG_IA64 ++static ++#endif + int expand_upwards(struct vm_area_struct *vma, unsigned long address) + { + int error; ++ bool locknext; + + if (!(vma->vm_flags & VM_GROWSUP)) + return -EFAULT; + ++ /* Also guard against wrapping around to address 0. */ ++ if (address < PAGE_ALIGN(address+1)) ++ address = PAGE_ALIGN(address+1); ++ else ++ return -ENOMEM; ++ + /* + * We must make sure the anon_vma is allocated + * so that the anon_vma locking is not a noop. + */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; ++ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN); ++ if (locknext && anon_vma_prepare(vma->vm_next)) ++ return -ENOMEM; + vma_lock_anon_vma(vma); ++ if (locknext) ++ vma_lock_anon_vma(vma->vm_next); + + /* + * vma->vm_start/vm_end cannot change under us because the caller + * is required to hold the mmap_sem in read mode. We need the +- * anon_vma lock to serialize against concurrent expand_stacks. +- * Also guard against wrapping around to address 0. ++ * anon_vma locks to serialize against concurrent expand_stacks ++ * and expand_upwards. + */ +- if (address < PAGE_ALIGN(address+4)) +- address = PAGE_ALIGN(address+4); +- else { +- vma_unlock_anon_vma(vma); +- return -ENOMEM; +- } + error = 0; + + /* Somebody else might have raced and expanded it already */ +- if (address > vma->vm_end) { ++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) { + unsigned long size, grow; + + size = address - vma->vm_start; +@@ -1739,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) + } + } + } ++ if (locknext) ++ vma_unlock_anon_vma(vma->vm_next); + vma_unlock_anon_vma(vma); + khugepaged_enter_vma_merge(vma); + return error; +@@ -1752,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma, + unsigned long address) + { + int error; ++ bool lockprev = false; ++ struct vm_area_struct *prev; + + /* + * We must make sure the anon_vma is allocated +@@ -1765,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma, + if (error) + return error; + ++ prev = vma->vm_prev; ++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) ++ lockprev = prev && (prev->vm_flags & VM_GROWSUP); ++#endif ++ if (lockprev && anon_vma_prepare(prev)) ++ return -ENOMEM; ++ if (lockprev) ++ vma_lock_anon_vma(prev); ++ + vma_lock_anon_vma(vma); + + /* +@@ -1774,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma, + */ + + /* Somebody else might have raced and expanded it already */ +- if (address < vma->vm_start) { ++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) { + unsigned long size, grow; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++#endif ++ + size = vma->vm_end - address; + grow = (vma->vm_start - address) >> PAGE_SHIFT; + +@@ -1786,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma, + if (!error) { + vma->vm_start = address; + vma->vm_pgoff -= grow; ++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ vma_m->vm_start -= grow << PAGE_SHIFT; ++ vma_m->vm_pgoff -= grow; ++ } ++#endif ++ + perf_event_mmap(vma); + } + } + } + vma_unlock_anon_vma(vma); ++ if (lockprev) ++ vma_unlock_anon_vma(prev); + khugepaged_enter_vma_merge(vma); + return error; + } +@@ -1860,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) + do { + long nrpages = vma_pages(vma); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) { ++ vma = remove_vma(vma); ++ continue; ++ } ++#endif ++ + mm->total_vm -= nrpages; + vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); + vma = remove_vma(vma); +@@ -1905,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, + insertion_point = (prev ? &prev->vm_next : &mm->mmap); + vma->vm_prev = NULL; + do { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma->vm_mirror) { ++ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma); ++ vma->vm_mirror->vm_mirror = NULL; ++ vma->vm_mirror->vm_flags &= ~VM_EXEC; ++ vma->vm_mirror = NULL; ++ } ++#endif ++ + rb_erase(&vma->vm_rb, &mm->mm_rb); + mm->map_count--; + tail_vma = vma; +@@ -1933,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + struct vm_area_struct *new; + int err = -ENOMEM; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m, *new_m = NULL; ++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE; ++#endif ++ + if (is_vm_hugetlb_page(vma) && (addr & + ~(huge_page_mask(hstate_vma(vma))))) + return -EINVAL; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++#endif ++ + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + if (!new) + goto out_err; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); ++ if (!new_m) { ++ kmem_cache_free(vm_area_cachep, new); ++ goto out_err; ++ } ++ } ++#endif ++ + /* most fields are the same, copy all, and then fixup */ + *new = *vma; + +@@ -1953,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ *new_m = *vma_m; ++ INIT_LIST_HEAD(&new_m->anon_vma_chain); ++ new_m->vm_mirror = new; ++ new->vm_mirror = new_m; ++ ++ if (new_below) ++ new_m->vm_end = addr_m; ++ else { ++ new_m->vm_start = addr_m; ++ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT); ++ } ++ } ++#endif ++ + pol = mpol_dup(vma_policy(vma)); + if (IS_ERR(pol)) { + err = PTR_ERR(pol); +@@ -1978,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + else + err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && vma_m) { ++ if (anon_vma_clone(new_m, vma_m)) ++ goto out_free_mpol; ++ ++ mpol_get(pol); ++ vma_set_policy(new_m, pol); ++ ++ if (new_m->vm_file) { ++ get_file(new_m->vm_file); ++ if (vma_m->vm_flags & VM_EXECUTABLE) ++ added_exe_file_vma(mm); ++ } ++ ++ if (new_m->vm_ops && new_m->vm_ops->open) ++ new_m->vm_ops->open(new_m); ++ ++ if (new_below) ++ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff + ++ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m); ++ else ++ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m); ++ ++ if (err) { ++ if (new_m->vm_ops && new_m->vm_ops->close) ++ new_m->vm_ops->close(new_m); ++ if (new_m->vm_file) { ++ if (vma_m->vm_flags & VM_EXECUTABLE) ++ removed_exe_file_vma(mm); ++ fput(new_m->vm_file); ++ } ++ mpol_put(pol); ++ } ++ } ++#endif ++ + /* Success. */ + if (!err) + return 0; +@@ -1990,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + removed_exe_file_vma(mm); + fput(new->vm_file); + } +- unlink_anon_vmas(new); + out_free_mpol: + mpol_put(pol); + out_free_vma: ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (new_m) { ++ unlink_anon_vmas(new_m); ++ kmem_cache_free(vm_area_cachep, new_m); ++ } ++#endif ++ ++ unlink_anon_vmas(new); + kmem_cache_free(vm_area_cachep, new); + out_err: + return err; +@@ -2006,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, int new_below) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) { ++ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE); ++ if (mm->map_count >= sysctl_max_map_count-1) ++ return -ENOMEM; ++ } else ++#endif ++ + if (mm->map_count >= sysctl_max_map_count) + return -ENOMEM; + +@@ -2017,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, + * work. This now handles partial unmappings. + * Jeremy Fitzhardinge jeremy@goop.org + */ ++#ifdef CONFIG_PAX_SEGMEXEC + int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) + { ++ int ret = __do_munmap(mm, start, len); ++ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC)) ++ return ret; ++ ++ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len); ++} ++ ++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ++#else ++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ++#endif ++{ + unsigned long end; + struct vm_area_struct *vma, *prev, *last; + ++ /* ++ * mm->mmap_sem is required to protect against another thread ++ * changing the mappings in case we sleep. ++ */ ++ verify_mm_writelocked(mm); ++ + if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) + return -EINVAL; + +@@ -2096,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) + /* Fix up all other VM information */ + remove_vma_list(mm, vma); + ++ track_exec_limit(mm, start, end, 0UL); ++ + return 0; + } + +@@ -2108,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) + + profile_munmap(addr); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ++ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len)) ++ return -EINVAL; ++#endif ++ + down_write(&mm->mmap_sem); + ret = do_munmap(mm, addr, len); + up_write(&mm->mmap_sem); + return ret; + } + +-static inline void verify_mm_writelocked(struct mm_struct *mm) +-{ +-#ifdef CONFIG_DEBUG_VM +- if (unlikely(down_read_trylock(&mm->mmap_sem))) { +- WARN_ON(1); +- up_read(&mm->mmap_sem); +- } +-#endif +-} +- + /* + * this is really a simplified "do_mmap". it only handles + * anonymous maps. eventually we may be able to do some +@@ -2137,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) + struct rb_node ** rb_link, * rb_parent; + pgoff_t pgoff = addr >> PAGE_SHIFT; + int error; ++ unsigned long charged; + + len = PAGE_ALIGN(len); + if (!len) +@@ -2148,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len) + + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) ++ flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); + if (error & ~PAGE_MASK) + return error; + ++ charged = len >> PAGE_SHIFT; ++ + /* + * mlock MCL_FUTURE? + */ + if (mm->def_flags & VM_LOCKED) { + unsigned long locked, lock_limit; +- locked = len >> PAGE_SHIFT; ++ locked = charged; + locked += mm->locked_vm; + lock_limit = rlimit(RLIMIT_MEMLOCK); + lock_limit >>= PAGE_SHIFT; +@@ -2174,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len) + /* + * Clear old maps. this also does some error checking for us + */ +- munmap_back: + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + if (vma && vma->vm_start < addr + len) { + if (do_munmap(mm, addr, len)) + return -ENOMEM; +- goto munmap_back; ++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); ++ BUG_ON(vma && vma->vm_start < addr + len); + } + + /* Check against address space limits *after* clearing old maps... */ +- if (!may_expand_vm(mm, len >> PAGE_SHIFT)) ++ if (!may_expand_vm(mm, charged)) + return -ENOMEM; + + if (mm->map_count > sysctl_max_map_count) + return -ENOMEM; + +- if (security_vm_enough_memory(len >> PAGE_SHIFT)) ++ if (security_vm_enough_memory(charged)) + return -ENOMEM; + + /* Can we just expand an old private anonymous mapping? */ +@@ -2203,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) + */ + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!vma) { +- vm_unacct_memory(len >> PAGE_SHIFT); ++ vm_unacct_memory(charged); + return -ENOMEM; + } + +@@ -2217,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) + vma_link(mm, vma, prev, rb_link, rb_parent); + out: + perf_event_mmap(vma); +- mm->total_vm += len >> PAGE_SHIFT; ++ mm->total_vm += charged; + if (flags & VM_LOCKED) { + if (!mlock_vma_pages_range(vma, addr, addr + len)) +- mm->locked_vm += (len >> PAGE_SHIFT); ++ mm->locked_vm += charged; + } ++ track_exec_limit(mm, addr, addr + len, flags); + return addr; + } + +@@ -2268,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm) + * Walk the list again, actually closing and freeing it, + * with preemption enabled, without holding any MM locks. + */ +- while (vma) ++ while (vma) { ++ vma->vm_mirror = NULL; + vma = remove_vma(vma); ++ } + + BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); + } +@@ -2283,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) + struct vm_area_struct * __vma, * prev; + struct rb_node ** rb_link, * rb_parent; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++#endif ++ ++ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1)) ++ return -EPERM; ++ + /* + * The vm_pgoff of a purely anonymous vma should be irrelevant + * until its first write fault, when page's anon_vma and index +@@ -2305,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) + if ((vma->vm_flags & VM_ACCOUNT) && + security_vm_enough_memory_mm(mm, vma_pages(vma))) + return -ENOMEM; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) ++ return -ENOMEM; ++ } ++#endif ++ + vma_link(mm, vma, prev, rb_link, rb_parent); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ BUG_ON(pax_mirror_vma(vma_m, vma)); ++#endif ++ + return 0; + } + +@@ -2323,6 +2769,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, + struct rb_node **rb_link, *rb_parent; + struct mempolicy *pol; + ++ BUG_ON(vma->vm_mirror); ++ + /* + * If anonymous vma has not yet been faulted, update new pgoff + * to match new location, to increase its chance of merging. +@@ -2373,6 +2821,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, + return NULL; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma) ++{ ++ struct vm_area_struct *prev_m; ++ struct rb_node **rb_link_m, *rb_parent_m; ++ struct mempolicy *pol_m; ++ ++ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)); ++ BUG_ON(vma->vm_mirror || vma_m->vm_mirror); ++ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m))); ++ *vma_m = *vma; ++ INIT_LIST_HEAD(&vma_m->anon_vma_chain); ++ if (anon_vma_clone(vma_m, vma)) ++ return -ENOMEM; ++ pol_m = vma_policy(vma_m); ++ mpol_get(pol_m); ++ vma_set_policy(vma_m, pol_m); ++ vma_m->vm_start += SEGMEXEC_TASK_SIZE; ++ vma_m->vm_end += SEGMEXEC_TASK_SIZE; ++ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED); ++ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags); ++ if (vma_m->vm_file) ++ get_file(vma_m->vm_file); ++ if (vma_m->vm_ops && vma_m->vm_ops->open) ++ vma_m->vm_ops->open(vma_m); ++ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m); ++ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m); ++ vma_m->vm_mirror = vma; ++ vma->vm_mirror = vma_m; ++ return 0; ++} ++#endif ++ + /* + * Return true if the calling process may expand its vm space by the passed + * number of pages +@@ -2383,7 +2864,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) + unsigned long lim; + + lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT; +- ++ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1); + if (cur + npages > lim) + return 0; + return 1; +@@ -2454,6 +2935,22 @@ int install_special_mapping(struct mm_struct *mm, + vma->vm_start = addr; + vma->vm_end = addr + len; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) { ++#ifndef CONFIG_PAX_MPROTECT_COMPAT ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) ++ return -EPERM; ++ if (!(vm_flags & VM_EXEC)) ++ vm_flags &= ~VM_MAYEXEC; ++#else ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC) ++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++#endif ++ else ++ vm_flags &= ~VM_MAYWRITE; ++ } ++#endif ++ + vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + +diff --git a/mm/mprotect.c b/mm/mprotect.c +index 5a688a2..27e031c 100644 +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -23,10 +23,16 @@ + #include <linux/mmu_notifier.h> + #include <linux/migrate.h> + #include <linux/perf_event.h> ++ ++#ifdef CONFIG_PAX_MPROTECT ++#include <linux/elf.h> ++#endif ++ + #include <asm/uaccess.h> + #include <asm/pgtable.h> + #include <asm/cacheflush.h> + #include <asm/tlbflush.h> ++#include <asm/mmu_context.h> + + #ifndef pgprot_modify + static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma, + flush_tlb_range(vma, start, end); + } + ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++/* called while holding the mmap semaphor for writing except stack expansion */ ++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) ++{ ++ unsigned long oldlimit, newlimit = 0UL; ++ ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX)) ++ return; ++ ++ spin_lock(&mm->page_table_lock); ++ oldlimit = mm->context.user_cs_limit; ++ if ((prot & VM_EXEC) && oldlimit < end) ++ /* USER_CS limit moved up */ ++ newlimit = end; ++ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end) ++ /* USER_CS limit moved down */ ++ newlimit = start; ++ ++ if (newlimit) { ++ mm->context.user_cs_limit = newlimit; ++ ++#ifdef CONFIG_SMP ++ wmb(); ++ cpus_clear(mm->context.cpu_user_cs_mask); ++ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask); ++#endif ++ ++ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id()); ++ } ++ spin_unlock(&mm->page_table_lock); ++ if (newlimit == end) { ++ struct vm_area_struct *vma = find_vma(mm, oldlimit); ++ ++ for (; vma && vma->vm_start < end; vma = vma->vm_next) ++ if (is_vm_hugetlb_page(vma)) ++ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot); ++ else ++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma)); ++ } ++} ++#endif ++ + int + mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + unsigned long start, unsigned long end, unsigned long newflags) +@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + int error; + int dirty_accountable = 0; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++ unsigned long start_m, end_m; ++ ++ start_m = start + SEGMEXEC_TASK_SIZE; ++ end_m = end + SEGMEXEC_TASK_SIZE; ++#endif ++ + if (newflags == oldflags) { + *pprev = vma; + return 0; + } + ++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) { ++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next; ++ ++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end) ++ return -ENOMEM; ++ ++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end) ++ return -ENOMEM; ++ } ++ + /* + * If we make a private mapping writable we increase our commit; + * but (without finer accounting) cannot reduce our commit if we +@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + } + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) { ++ if (start != vma->vm_start) { ++ error = split_vma(mm, vma, start, 1); ++ if (error) ++ goto fail; ++ BUG_ON(!*pprev || (*pprev)->vm_next == vma); ++ *pprev = (*pprev)->vm_next; ++ } ++ ++ if (end != vma->vm_end) { ++ error = split_vma(mm, vma, end, 0); ++ if (error) ++ goto fail; ++ } ++ ++ if (pax_find_mirror_vma(vma)) { ++ error = __do_munmap(mm, start_m, end_m - start_m); ++ if (error) ++ goto fail; ++ } else { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) { ++ error = -ENOMEM; ++ goto fail; ++ } ++ vma->vm_flags = newflags; ++ error = pax_mirror_vma(vma_m, vma); ++ if (error) { ++ vma->vm_flags = oldflags; ++ goto fail; ++ } ++ } ++ } ++#endif ++ + /* + * First try to merge with previous and/or next vma. + */ +@@ -204,9 +306,21 @@ success: + * vm_flags and vm_page_prot are protected by the mmap_sem + * held in write mode. + */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ)) ++ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ; ++#endif ++ + vma->vm_flags = newflags; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->binfmt && mm->binfmt->handle_mprotect) ++ mm->binfmt->handle_mprotect(vma, newflags); ++#endif ++ + vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, +- vm_get_page_prot(newflags)); ++ vm_get_page_prot(vma->vm_flags)); + + if (vma_wants_writenotify(vma)) { + vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); +@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + end = start + len; + if (end <= start) + return -ENOMEM; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ return -EINVAL; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + if (!arch_validate_prot(prot)) + return -EINVAL; + +@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + /* + * Does the application expect PROT_READ to imply PROT_EXEC: + */ +- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) ++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) + prot |= PROT_EXEC; + + vm_flags = calc_vm_prot_bits(prot); +@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + if (start > vma->vm_start) + prev = vma; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect) ++ current->mm->binfmt->handle_mprotect(vma, vm_flags); ++#endif ++ + for (nstart = start ; ; ) { + unsigned long newflags; + +@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + + /* newflags >> 4 shift VM_MAY% in place of VM_% */ + if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { ++ if (prot & (PROT_WRITE | PROT_EXEC)) ++ gr_log_rwxmprotect(vma->vm_file); ++ ++ error = -EACCES; ++ goto out; ++ } ++ ++ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) { + error = -EACCES; + goto out; + } +@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); + if (error) + goto out; ++ ++ track_exec_limit(current->mm, nstart, tmp, vm_flags); ++ + nstart = tmp; + + if (nstart < prev->vm_end) +diff --git a/mm/mremap.c b/mm/mremap.c +index d6959cb..18a402a 100644 +--- a/mm/mremap.c ++++ b/mm/mremap.c +@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, + continue; + pte = ptep_get_and_clear(mm, old_addr, old_pte); + pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC) ++ pte = pte_exprotect(pte); ++#endif ++ + set_pte_at(mm, new_addr, new_pte, pte); + } + +@@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, + if (is_vm_hugetlb_page(vma)) + goto Einval; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ goto Einval; ++#endif ++ + /* We can't remap across vm area boundaries */ + if (old_len > vma->vm_end - addr) + goto Efault; +@@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr, + unsigned long ret = -EINVAL; + unsigned long charged = 0; + unsigned long map_flags; ++ unsigned long pax_task_size = TASK_SIZE; + + if (new_addr & ~PAGE_MASK) + goto out; + +- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len) + goto out; + + /* Check if the location we're moving into overlaps the + * old location at all, and fail if it does. + */ +- if ((new_addr <= addr) && (new_addr+new_len) > addr) +- goto out; +- +- if ((addr <= new_addr) && (addr+old_len) > new_addr) ++ if (addr + old_len > new_addr && new_addr + new_len > addr) + goto out; + + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); +@@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr, + struct vm_area_struct *vma; + unsigned long ret = -EINVAL; + unsigned long charged = 0; ++ unsigned long pax_task_size = TASK_SIZE; + + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) + goto out; +@@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr, + if (!new_len) + goto out; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ if (new_len > pax_task_size || addr > pax_task_size-new_len || ++ old_len > pax_task_size || addr > pax_task_size-old_len) ++ goto out; ++ + if (flags & MREMAP_FIXED) { + if (flags & MREMAP_MAYMOVE) + ret = mremap_to(addr, old_len, new_addr, new_len); +@@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr, + addr + new_len); + } + ret = addr; ++ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags); + goto out; + } + } +@@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr, + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); + if (ret) + goto out; ++ ++ map_flags = vma->vm_flags; + ret = move_vma(vma, addr, old_len, new_len, new_addr); ++ if (!(ret & ~PAGE_MASK)) { ++ track_exec_limit(current->mm, addr, addr + old_len, 0UL); ++ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags); ++ } + } + out: + if (ret & ~PAGE_MASK) +diff --git a/mm/nobootmem.c b/mm/nobootmem.c +index 7fa41b4..6087460 100644 +--- a/mm/nobootmem.c ++++ b/mm/nobootmem.c +@@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) + unsigned long __init free_all_memory_core_early(int nodeid) + { + int i; +- u64 start, end; ++ u64 start, end, startrange, endrange; + unsigned long count = 0; +- struct range *range = NULL; ++ struct range *range = NULL, rangerange = { 0, 0 }; + int nr_range; + + nr_range = get_free_all_memory_range(&range, nodeid); ++ startrange = __pa(range) >> PAGE_SHIFT; ++ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT; + + for (i = 0; i < nr_range; i++) { + start = range[i].start; + end = range[i].end; ++ if (start <= endrange && startrange < end) { ++ BUG_ON(rangerange.start | rangerange.end); ++ rangerange = range[i]; ++ continue; ++ } + count += end - start; + __free_pages_memory(start, end); + } ++ start = rangerange.start; ++ end = rangerange.end; ++ count += end - start; ++ __free_pages_memory(start, end); + + return count; + } +diff --git a/mm/nommu.c b/mm/nommu.c +index f59e170..34e2a2b 100644 +--- a/mm/nommu.c ++++ b/mm/nommu.c +@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ + int sysctl_overcommit_ratio = 50; /* default is 50% */ + int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; + int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; +-int heap_stack_gap = 0; + + atomic_long_t mmap_pages_allocated; + +@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) + EXPORT_SYMBOL(find_vma); + + /* +- * find a VMA +- * - we don't extend stack VMAs under NOMMU conditions +- */ +-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) +-{ +- return find_vma(mm, addr); +-} +- +-/* + * expand a stack to a given address + * - not supported under NOMMU conditions + */ +@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, + + /* most fields are the same, copy all, and then fixup */ + *new = *vma; ++ INIT_LIST_HEAD(&new->anon_vma_chain); + *region = *vma->vm_region; + new->vm_region = region; + +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 485be89..c059ad3 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -341,7 +341,7 @@ out: + * This usage means that zero-order pages may not be compound. + */ + +-static void free_compound_page(struct page *page) ++void free_compound_page(struct page *page) + { + __free_pages_ok(page, compound_order(page)); + } +@@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order) + int i; + int bad = 0; + ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ unsigned long index = 1UL << order; ++#endif ++ + trace_mm_page_free_direct(page, order); + kmemcheck_free_shadow(page, order); + +@@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order) + debug_check_no_obj_freed(page_address(page), + PAGE_SIZE << order); + } ++ ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ for (; index; --index) ++ sanitize_highpage(page + index - 1); ++#endif ++ + arch_free_page(page, order); + kernel_map_pages(page, 1 << order, 0); + +@@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) + arch_alloc_page(page, order); + kernel_map_pages(page, 1 << order, 1); + ++#ifndef CONFIG_PAX_MEMORY_SANITIZE + if (gfp_flags & __GFP_ZERO) + prep_zero_page(page, order, gfp_flags); ++#endif + + if (order && (gfp_flags & __GFP_COMP)) + prep_compound_page(page, order); +@@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn) + unsigned long pfn; + + for (pfn = start_pfn; pfn < end_pfn; pfn++) { ++#ifdef CONFIG_X86_32 ++ /* boot failures in VMware 8 on 32bit vanilla since ++ this change */ ++ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn))) ++#else + if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn))) ++#endif + return 1; + } + return 0; +diff --git a/mm/percpu.c b/mm/percpu.c +index 716eb4a..8d10419 100644 +--- a/mm/percpu.c ++++ b/mm/percpu.c +@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly; + static unsigned int pcpu_high_unit_cpu __read_mostly; + + /* the address of the first chunk which starts with the kernel static area */ +-void *pcpu_base_addr __read_mostly; ++void *pcpu_base_addr __read_only; + EXPORT_SYMBOL_GPL(pcpu_base_addr); + + static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ +diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c +index e920aa3..137702a 100644 +--- a/mm/process_vm_access.c ++++ b/mm/process_vm_access.c +@@ -13,6 +13,7 @@ + #include <linux/uio.h> + #include <linux/sched.h> + #include <linux/highmem.h> ++#include <linux/security.h> + #include <linux/ptrace.h> + #include <linux/slab.h> + #include <linux/syscalls.h> +@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, + size_t iov_l_curr_offset = 0; + ssize_t iov_len; + ++ return -ENOSYS; // PaX: until properly audited ++ + /* + * Work out how many pages of struct pages we're going to need + * when eventually calling get_user_pages + */ + for (i = 0; i < riovcnt; i++) { + iov_len = rvec[i].iov_len; +- if (iov_len > 0) { +- nr_pages_iov = ((unsigned long)rvec[i].iov_base +- + iov_len) +- / PAGE_SIZE - (unsigned long)rvec[i].iov_base +- / PAGE_SIZE + 1; +- nr_pages = max(nr_pages, nr_pages_iov); +- } ++ if (iov_len <= 0) ++ continue; ++ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE - ++ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1; ++ nr_pages = max(nr_pages, nr_pages_iov); + } + + if (nr_pages == 0) +@@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, + goto free_proc_pages; + } + +- task_lock(task); +- if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) { +- task_unlock(task); ++ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) { + rc = -EPERM; + goto put_task_struct; + } +- mm = task->mm; + +- if (!mm || (task->flags & PF_KTHREAD)) { +- task_unlock(task); +- rc = -EINVAL; ++ mm = mm_access(task, PTRACE_MODE_ATTACH); ++ if (!mm || IS_ERR(mm)) { ++ rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; ++ /* ++ * Explicitly map EACCES to EPERM as EPERM is a more a ++ * appropriate error code for process_vw_readv/writev ++ */ ++ if (rc == -EACCES) ++ rc = -EPERM; + goto put_task_struct; + } + +- atomic_inc(&mm->mm_users); +- task_unlock(task); +- + for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { + rc = process_vm_rw_single_vec( + (unsigned long)rvec[i].iov_base, rvec[i].iov_len, +diff --git a/mm/rmap.c b/mm/rmap.c +index a4fd368..e0ffec7 100644 +--- a/mm/rmap.c ++++ b/mm/rmap.c +@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma) + struct anon_vma *anon_vma = vma->anon_vma; + struct anon_vma_chain *avc; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct anon_vma_chain *avc_m = NULL; ++#endif ++ + might_sleep(); + if (unlikely(!anon_vma)) { + struct mm_struct *mm = vma->vm_mm; +@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma) + if (!avc) + goto out_enomem; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ avc_m = anon_vma_chain_alloc(GFP_KERNEL); ++ if (!avc_m) ++ goto out_enomem_free_avc; ++#endif ++ + anon_vma = find_mergeable_anon_vma(vma); + allocated = NULL; + if (!anon_vma) { +@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma) + /* page_table_lock to protect against threads */ + spin_lock(&mm->page_table_lock); + if (likely(!vma->anon_vma)) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma); ++ ++ if (vma_m) { ++ BUG_ON(vma_m->anon_vma); ++ vma_m->anon_vma = anon_vma; ++ avc_m->anon_vma = anon_vma; ++ avc_m->vma = vma; ++ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain); ++ list_add(&avc_m->same_anon_vma, &anon_vma->head); ++ avc_m = NULL; ++ } ++#endif ++ + vma->anon_vma = anon_vma; + avc->anon_vma = anon_vma; + avc->vma = vma; +@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma) + + if (unlikely(allocated)) + put_anon_vma(allocated); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (unlikely(avc_m)) ++ anon_vma_chain_free(avc_m); ++#endif ++ + if (unlikely(avc)) + anon_vma_chain_free(avc); + } + return 0; + + out_enomem_free_avc: ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (avc_m) ++ anon_vma_chain_free(avc_m); ++#endif ++ + anon_vma_chain_free(avc); + out_enomem: + return -ENOMEM; +@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, + * Attach the anon_vmas from src to dst. + * Returns 0 on success, -ENOMEM on failure. + */ +-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) ++int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src) + { + struct anon_vma_chain *avc, *pavc; + struct anon_vma *root = NULL; +@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) + * the corresponding VMA in the parent process is attached to. + * Returns 0 on success, non-zero on failure. + */ +-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) ++int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma) + { + struct anon_vma_chain *avc; + struct anon_vma *anon_vma; +diff --git a/mm/shmem.c b/mm/shmem.c +index 6c253f7..367e20a 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -31,7 +31,7 @@ + #include <linux/export.h> + #include <linux/swap.h> + +-static struct vfsmount *shm_mnt; ++struct vfsmount *shm_mnt; + + #ifdef CONFIG_SHMEM + /* +@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt; + #define BOGO_DIRENT_SIZE 20 + + /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ +-#define SHORT_SYMLINK_LEN 128 ++#define SHORT_SYMLINK_LEN 64 + + struct shmem_xattr { + struct list_head list; /* anchored by shmem_inode_info->xattr_list */ +@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) + int err = -ENOMEM; + + /* Round up to L1_CACHE_BYTES to resist false sharing */ +- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), +- L1_CACHE_BYTES), GFP_KERNEL); ++ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL); + if (!sbinfo) + return -ENOMEM; + +diff --git a/mm/slab.c b/mm/slab.c +index 83311c9a..fcf8f86 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -151,7 +151,7 @@ + + /* Legal flag mask for kmem_cache_create(). */ + #if DEBUG +-# define CREATE_MASK (SLAB_RED_ZONE | \ ++# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \ + SLAB_POISON | SLAB_HWCACHE_ALIGN | \ + SLAB_CACHE_DMA | \ + SLAB_STORE_USER | \ +@@ -159,7 +159,7 @@ + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ + SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) + #else +-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ ++# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \ + SLAB_CACHE_DMA | \ + SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ +@@ -288,7 +288,7 @@ struct kmem_list3 { + * Need this for bootstrapping a per node allocator. + */ + #define NUM_INIT_LISTS (3 * MAX_NUMNODES) +-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; ++static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS]; + #define CACHE_CACHE 0 + #define SIZE_AC MAX_NUMNODES + #define SIZE_L3 (2 * MAX_NUMNODES) +@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent) + if ((x)->max_freeable < i) \ + (x)->max_freeable = i; \ + } while (0) +-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) +-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) +-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) +-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) ++#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit) ++#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss) ++#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit) ++#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss) + #else + #define STATS_INC_ACTIVE(x) do { } while (0) + #define STATS_DEC_ACTIVE(x) do { } while (0) +@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, + * reciprocal_divide(offset, cache->reciprocal_buffer_size) + */ + static inline unsigned int obj_to_index(const struct kmem_cache *cache, +- const struct slab *slab, void *obj) ++ const struct slab *slab, const void *obj) + { + u32 offset = (obj - slab->s_mem); + return reciprocal_divide(offset, cache->reciprocal_buffer_size); +@@ -564,7 +564,7 @@ struct cache_names { + static struct cache_names __initdata cache_names[] = { + #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, + #include <linux/kmalloc_sizes.h> +- {NULL,} ++ {NULL} + #undef CACHE + }; + +@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void) + sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, + sizes[INDEX_AC].cs_size, + ARCH_KMALLOC_MINALIGN, +- ARCH_KMALLOC_FLAGS|SLAB_PANIC, ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY, + NULL); + + if (INDEX_AC != INDEX_L3) { +@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void) + kmem_cache_create(names[INDEX_L3].name, + sizes[INDEX_L3].cs_size, + ARCH_KMALLOC_MINALIGN, +- ARCH_KMALLOC_FLAGS|SLAB_PANIC, ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY, + NULL); + } + +@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void) + sizes->cs_cachep = kmem_cache_create(names->name, + sizes->cs_size, + ARCH_KMALLOC_MINALIGN, +- ARCH_KMALLOC_FLAGS|SLAB_PANIC, ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY, + NULL); + } + #ifdef CONFIG_ZONE_DMA +@@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p) + } + /* cpu stats */ + { +- unsigned long allochit = atomic_read(&cachep->allochit); +- unsigned long allocmiss = atomic_read(&cachep->allocmiss); +- unsigned long freehit = atomic_read(&cachep->freehit); +- unsigned long freemiss = atomic_read(&cachep->freemiss); ++ unsigned long allochit = atomic_read_unchecked(&cachep->allochit); ++ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss); ++ unsigned long freehit = atomic_read_unchecked(&cachep->freehit); ++ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss); + + seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", + allochit, allocmiss, freehit, freemiss); +@@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void) + { + proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations); + #ifdef CONFIG_DEBUG_SLAB_LEAK +- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); ++ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations); + #endif + return 0; + } + module_init(slab_proc_init); + #endif + ++void check_object_size(const void *ptr, unsigned long n, bool to) ++{ ++ ++#ifdef CONFIG_PAX_USERCOPY ++ struct page *page; ++ struct kmem_cache *cachep = NULL; ++ struct slab *slabp; ++ unsigned int objnr; ++ unsigned long offset; ++ const char *type; ++ ++ if (!n) ++ return; ++ ++ type = "<null>"; ++ if (ZERO_OR_NULL_PTR(ptr)) ++ goto report; ++ ++ if (!virt_addr_valid(ptr)) ++ return; ++ ++ page = virt_to_head_page(ptr); ++ ++ type = "<process stack>"; ++ if (!PageSlab(page)) { ++ if (object_is_on_stack(ptr, n) == -1) ++ goto report; ++ return; ++ } ++ ++ cachep = page_get_cache(page); ++ type = cachep->name; ++ if (!(cachep->flags & SLAB_USERCOPY)) ++ goto report; ++ ++ slabp = page_get_slab(page); ++ objnr = obj_to_index(cachep, slabp, ptr); ++ BUG_ON(objnr >= cachep->num); ++ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep); ++ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset) ++ return; ++ ++report: ++ pax_report_usercopy(ptr, n, to, type); ++#endif ++ ++} ++EXPORT_SYMBOL(check_object_size); ++ + /** + * ksize - get the actual amount of memory allocated for a given object + * @objp: Pointer to the object +diff --git a/mm/slob.c b/mm/slob.c +index 8105be4..e045f96 100644 +--- a/mm/slob.c ++++ b/mm/slob.c +@@ -29,7 +29,7 @@ + * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls + * alloc_pages() directly, allocating compound pages so the page order + * does not have to be separately tracked, and also stores the exact +- * allocation size in page->private so that it can be used to accurately ++ * allocation size in slob_page->size so that it can be used to accurately + * provide ksize(). These objects are detected in kfree() because slob_page() + * is false for them. + * +@@ -58,6 +58,7 @@ + */ + + #include <linux/kernel.h> ++#include <linux/sched.h> + #include <linux/slab.h> + #include <linux/mm.h> + #include <linux/swap.h> /* struct reclaim_state */ +@@ -102,7 +103,8 @@ struct slob_page { + unsigned long flags; /* mandatory */ + atomic_t _count; /* mandatory */ + slobidx_t units; /* free units left in page */ +- unsigned long pad[2]; ++ unsigned long pad[1]; ++ unsigned long size; /* size when >=PAGE_SIZE */ + slob_t *free; /* first free slob_t in page */ + struct list_head list; /* linked list of free pages */ + }; +@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large); + */ + static inline int is_slob_page(struct slob_page *sp) + { +- return PageSlab((struct page *)sp); ++ return PageSlab((struct page *)sp) && !sp->size; + } + + static inline void set_slob_page(struct slob_page *sp) +@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp) + + static inline struct slob_page *slob_page(const void *addr) + { +- return (struct slob_page *)virt_to_page(addr); ++ return (struct slob_page *)virt_to_head_page(addr); + } + + /* +@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next) + /* + * Return the size of a slob block. + */ +-static slobidx_t slob_units(slob_t *s) ++static slobidx_t slob_units(const slob_t *s) + { + if (s->units > 0) + return s->units; +@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s) + /* + * Return the next free slob block pointer after this one. + */ +-static slob_t *slob_next(slob_t *s) ++static slob_t *slob_next(const slob_t *s) + { + slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); + slobidx_t next; +@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s) + /* + * Returns true if s is the last free block in its page. + */ +-static int slob_last(slob_t *s) ++static int slob_last(const slob_t *s) + { + return !((unsigned long)slob_next(s) & ~PAGE_MASK); + } +@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) + if (!page) + return NULL; + ++ set_slob_page(page); + return page_address(page); + } + +@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) + if (!b) + return NULL; + sp = slob_page(b); +- set_slob_page(sp); + + spin_lock_irqsave(&slob_lock, flags); + sp->units = SLOB_UNITS(PAGE_SIZE); + sp->free = b; ++ sp->size = 0; + INIT_LIST_HEAD(&sp->list); + set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); + set_slob_page_free(sp, slob_list); +@@ -476,10 +479,9 @@ out: + * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. + */ + +-void *__kmalloc_node(size_t size, gfp_t gfp, int node) ++static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align) + { +- unsigned int *m; +- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); ++ slob_t *m; + void *ret; + + gfp &= gfp_allowed_mask; +@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) + + if (!m) + return NULL; +- *m = size; ++ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT); ++ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT); ++ m[0].units = size; ++ m[1].units = align; + ret = (void *)m + align; + + trace_kmalloc_node(_RET_IP_, ret, +@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) + gfp |= __GFP_COMP; + ret = slob_new_pages(gfp, order, node); + if (ret) { +- struct page *page; +- page = virt_to_page(ret); +- page->private = size; ++ struct slob_page *sp; ++ sp = slob_page(ret); ++ sp->size = size; + } + + trace_kmalloc_node(_RET_IP_, ret, + size, PAGE_SIZE << order, gfp, node); + } + +- kmemleak_alloc(ret, size, 1, gfp); ++ return ret; ++} ++ ++void *__kmalloc_node(size_t size, gfp_t gfp, int node) ++{ ++ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); ++ void *ret = __kmalloc_node_align(size, gfp, node, align); ++ ++ if (!ZERO_OR_NULL_PTR(ret)) ++ kmemleak_alloc(ret, size, 1, gfp); + return ret; + } + EXPORT_SYMBOL(__kmalloc_node); +@@ -533,13 +547,92 @@ void kfree(const void *block) + sp = slob_page(block); + if (is_slob_page(sp)) { + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +- unsigned int *m = (unsigned int *)(block - align); +- slob_free(m, *m + align); +- } else ++ slob_t *m = (slob_t *)(block - align); ++ slob_free(m, m[0].units + align); ++ } else { ++ clear_slob_page(sp); ++ free_slob_page(sp); ++ sp->size = 0; + put_page(&sp->page); ++ } + } + EXPORT_SYMBOL(kfree); + ++void check_object_size(const void *ptr, unsigned long n, bool to) ++{ ++ ++#ifdef CONFIG_PAX_USERCOPY ++ struct slob_page *sp; ++ const slob_t *free; ++ const void *base; ++ unsigned long flags; ++ const char *type; ++ ++ if (!n) ++ return; ++ ++ type = "<null>"; ++ if (ZERO_OR_NULL_PTR(ptr)) ++ goto report; ++ ++ if (!virt_addr_valid(ptr)) ++ return; ++ ++ type = "<process stack>"; ++ sp = slob_page(ptr); ++ if (!PageSlab((struct page *)sp)) { ++ if (object_is_on_stack(ptr, n) == -1) ++ goto report; ++ return; ++ } ++ ++ type = "<slob>"; ++ if (sp->size) { ++ base = page_address(&sp->page); ++ if (base <= ptr && n <= sp->size - (ptr - base)) ++ return; ++ goto report; ++ } ++ ++ /* some tricky double walking to find the chunk */ ++ spin_lock_irqsave(&slob_lock, flags); ++ base = (void *)((unsigned long)ptr & PAGE_MASK); ++ free = sp->free; ++ ++ while (!slob_last(free) && (void *)free <= ptr) { ++ base = free + slob_units(free); ++ free = slob_next(free); ++ } ++ ++ while (base < (void *)free) { ++ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units; ++ int size = SLOB_UNIT * SLOB_UNITS(m + align); ++ int offset; ++ ++ if (ptr < base + align) ++ break; ++ ++ offset = ptr - base - align; ++ if (offset >= m) { ++ base += size; ++ continue; ++ } ++ ++ if (n > m - offset) ++ break; ++ ++ spin_unlock_irqrestore(&slob_lock, flags); ++ return; ++ } ++ ++ spin_unlock_irqrestore(&slob_lock, flags); ++report: ++ pax_report_usercopy(ptr, n, to, type); ++#endif ++ ++} ++EXPORT_SYMBOL(check_object_size); ++ + /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ + size_t ksize(const void *block) + { +@@ -552,10 +645,10 @@ size_t ksize(const void *block) + sp = slob_page(block); + if (is_slob_page(sp)) { + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +- unsigned int *m = (unsigned int *)(block - align); +- return SLOB_UNITS(*m) * SLOB_UNIT; ++ slob_t *m = (slob_t *)(block - align); ++ return SLOB_UNITS(m[0].units) * SLOB_UNIT; + } else +- return sp->page.private; ++ return sp->size; + } + EXPORT_SYMBOL(ksize); + +@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, + { + struct kmem_cache *c; + ++#ifdef CONFIG_PAX_USERCOPY ++ c = __kmalloc_node_align(sizeof(struct kmem_cache), ++ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN); ++#else + c = slob_alloc(sizeof(struct kmem_cache), + GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); ++#endif + + if (c) { + c->name = name; +@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) + + lockdep_trace_alloc(flags); + ++#ifdef CONFIG_PAX_USERCOPY ++ b = __kmalloc_node_align(c->size, flags, node, c->align); ++#else + if (c->size < PAGE_SIZE) { + b = slob_alloc(c->size, flags, c->align, node); + trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, + SLOB_UNITS(c->size) * SLOB_UNIT, + flags, node); + } else { ++ struct slob_page *sp; ++ + b = slob_new_pages(flags, get_order(c->size), node); ++ sp = slob_page(b); ++ sp->size = c->size; + trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, + PAGE_SIZE << get_order(c->size), + flags, node); + } ++#endif + + if (c->ctor) + c->ctor(b); +@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); + + static void __kmem_cache_free(void *b, int size) + { +- if (size < PAGE_SIZE) ++ struct slob_page *sp = slob_page(b); ++ ++ if (is_slob_page(sp)) + slob_free(b, size); +- else ++ else { ++ clear_slob_page(sp); ++ free_slob_page(sp); ++ sp->size = 0; + slob_free_pages(b, get_order(size)); ++ } + } + + static void kmem_rcu_free(struct rcu_head *head) +@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head) + + void kmem_cache_free(struct kmem_cache *c, void *b) + { ++ int size = c->size; ++ ++#ifdef CONFIG_PAX_USERCOPY ++ if (size + c->align < PAGE_SIZE) { ++ size += c->align; ++ b -= c->align; ++ } ++#endif ++ + kmemleak_free_recursive(b, c->flags); + if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { + struct slob_rcu *slob_rcu; +- slob_rcu = b + (c->size - sizeof(struct slob_rcu)); +- slob_rcu->size = c->size; ++ slob_rcu = b + (size - sizeof(struct slob_rcu)); ++ slob_rcu->size = size; + call_rcu(&slob_rcu->head, kmem_rcu_free); + } else { +- __kmem_cache_free(b, c->size); ++ __kmem_cache_free(b, size); + } + ++#ifdef CONFIG_PAX_USERCOPY ++ trace_kfree(_RET_IP_, b); ++#else + trace_kmem_cache_free(_RET_IP_, b); ++#endif ++ + } + EXPORT_SYMBOL(kmem_cache_free); + +diff --git a/mm/slub.c b/mm/slub.c +index 1a919f0..1739c9b 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -208,7 +208,7 @@ struct track { + + enum track_item { TRACK_ALLOC, TRACK_FREE }; + +-#ifdef CONFIG_SYSFS ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int sysfs_slab_add(struct kmem_cache *); + static int sysfs_slab_alias(struct kmem_cache *, const char *); + static void sysfs_slab_remove(struct kmem_cache *); +@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t) + if (!t->addr) + return; + +- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", ++ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n", + s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); + #ifdef CONFIG_STACKTRACE + { +@@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) + + page = virt_to_head_page(x); + ++ BUG_ON(!PageSlab(page)); ++ + slab_free(s, page, x, _RET_IP_); + + trace_kmem_cache_free(_RET_IP_, x); +@@ -2592,7 +2594,7 @@ static int slub_min_objects; + * Merge control. If this is set then no merging of slab caches will occur. + * (Could be removed. This was introduced to pacify the merge skeptics.) + */ +-static int slub_nomerge; ++static int slub_nomerge = 1; + + /* + * Calculate the order of allocation given an slab object size. +@@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s, + else + s->cpu_partial = 30; + +- s->refcount = 1; ++ atomic_set(&s->refcount, 1); + #ifdef CONFIG_NUMA + s->remote_node_defrag_ratio = 1000; + #endif +@@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s) + void kmem_cache_destroy(struct kmem_cache *s) + { + down_write(&slub_lock); +- s->refcount--; +- if (!s->refcount) { ++ if (atomic_dec_and_test(&s->refcount)) { + list_del(&s->list); + up_write(&slub_lock); + if (kmem_cache_close(s)) { +@@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) + EXPORT_SYMBOL(__kmalloc_node); + #endif + ++void check_object_size(const void *ptr, unsigned long n, bool to) ++{ ++ ++#ifdef CONFIG_PAX_USERCOPY ++ struct page *page; ++ struct kmem_cache *s = NULL; ++ unsigned long offset; ++ const char *type; ++ ++ if (!n) ++ return; ++ ++ type = "<null>"; ++ if (ZERO_OR_NULL_PTR(ptr)) ++ goto report; ++ ++ if (!virt_addr_valid(ptr)) ++ return; ++ ++ page = virt_to_head_page(ptr); ++ ++ type = "<process stack>"; ++ if (!PageSlab(page)) { ++ if (object_is_on_stack(ptr, n) == -1) ++ goto report; ++ return; ++ } ++ ++ s = page->slab; ++ type = s->name; ++ if (!(s->flags & SLAB_USERCOPY)) ++ goto report; ++ ++ offset = (ptr - page_address(page)) % s->size; ++ if (offset <= s->objsize && n <= s->objsize - offset) ++ return; ++ ++report: ++ pax_report_usercopy(ptr, n, to, type); ++#endif ++ ++} ++EXPORT_SYMBOL(check_object_size); ++ + size_t ksize(const void *object) + { + struct page *page; +@@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) + int node; + + list_add(&s->list, &slab_caches); +- s->refcount = -1; ++ atomic_set(&s->refcount, -1); + + for_each_node_state(node, N_NORMAL_MEMORY) { + struct kmem_cache_node *n = get_node(s, node); +@@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void) + + /* Caches that are not of the two-to-the-power-of size */ + if (KMALLOC_MIN_SIZE <= 32) { +- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0); ++ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY); + caches++; + } + + if (KMALLOC_MIN_SIZE <= 64) { +- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0); ++ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY); + caches++; + } + + for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { +- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); ++ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY); + caches++; + } + +@@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s) + /* + * We may have set a slab to be unmergeable during bootstrap. + */ +- if (s->refcount < 0) ++ if (atomic_read(&s->refcount) < 0) + return 1; + + return 0; +@@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, + down_write(&slub_lock); + s = find_mergeable(size, align, flags, name, ctor); + if (s) { +- s->refcount++; ++ atomic_inc(&s->refcount); + /* + * Adjust the object sizes so that we clear + * the complete object on kzalloc. +@@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, + s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); + + if (sysfs_slab_alias(s, name)) { +- s->refcount--; ++ atomic_dec(&s->refcount); + goto err; + } + up_write(&slub_lock); +@@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, + } + #endif + +-#ifdef CONFIG_SYSFS ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int count_inuse(struct page *page) + { + return page->inuse; +@@ -4410,12 +4455,12 @@ static void resiliency_test(void) + validate_slab_cache(kmalloc_caches[9]); + } + #else +-#ifdef CONFIG_SYSFS ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static void resiliency_test(void) {}; + #endif + #endif + +-#ifdef CONFIG_SYSFS ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + enum slab_stat_type { + SL_ALL, /* All slabs */ + SL_PARTIAL, /* Only partially allocated slabs */ +@@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor); + + static ssize_t aliases_show(struct kmem_cache *s, char *buf) + { +- return sprintf(buf, "%d\n", s->refcount - 1); ++ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1); + } + SLAB_ATTR_RO(aliases); + +@@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s) + return name; + } + ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int sysfs_slab_add(struct kmem_cache *s) + { + int err; +@@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) + kobject_del(&s->kobj); + kobject_put(&s->kobj); + } ++#endif + + /* + * Need to buffer aliases during bootup until sysfs becomes +@@ -5298,6 +5345,7 @@ struct saved_alias { + + static struct saved_alias *alias_list; + ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int sysfs_slab_alias(struct kmem_cache *s, const char *name) + { + struct saved_alias *al; +@@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) + alias_list = al; + return 0; + } ++#endif + + static int __init slab_sysfs_init(void) + { +diff --git a/mm/swap.c b/mm/swap.c +index 55b266d..a532537 100644 +--- a/mm/swap.c ++++ b/mm/swap.c +@@ -31,6 +31,7 @@ + #include <linux/backing-dev.h> + #include <linux/memcontrol.h> + #include <linux/gfp.h> ++#include <linux/hugetlb.h> + + #include "internal.h" + +@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page) + + __page_cache_release(page); + dtor = get_compound_page_dtor(page); ++ if (!PageHuge(page)) ++ BUG_ON(dtor != free_compound_page); + (*dtor)(page); + } + +diff --git a/mm/swapfile.c b/mm/swapfile.c +index b1cd120..aaae885 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex); + + static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); + /* Activity counter to indicate that a swapon or swapoff has occurred */ +-static atomic_t proc_poll_event = ATOMIC_INIT(0); ++static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0); + + static inline unsigned char swap_count(unsigned char ent) + { +@@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) + } + filp_close(swap_file, NULL); + err = 0; +- atomic_inc(&proc_poll_event); ++ atomic_inc_unchecked(&proc_poll_event); + wake_up_interruptible(&proc_poll_wait); + + out_dput: +@@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait) + + poll_wait(file, &proc_poll_wait, wait); + +- if (seq->poll_event != atomic_read(&proc_poll_event)) { +- seq->poll_event = atomic_read(&proc_poll_event); ++ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) { ++ seq->poll_event = atomic_read_unchecked(&proc_poll_event); + return POLLIN | POLLRDNORM | POLLERR | POLLPRI; + } + +@@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file) + return ret; + + seq = file->private_data; +- seq->poll_event = atomic_read(&proc_poll_event); ++ seq->poll_event = atomic_read_unchecked(&proc_poll_event); + return 0; + } + +@@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) + (p->flags & SWP_DISCARDABLE) ? "D" : ""); + + mutex_unlock(&swapon_mutex); +- atomic_inc(&proc_poll_event); ++ atomic_inc_unchecked(&proc_poll_event); + wake_up_interruptible(&proc_poll_wait); + + if (S_ISREG(inode->i_mode)) +diff --git a/mm/util.c b/mm/util.c +index 136ac4f..f917fa9 100644 +--- a/mm/util.c ++++ b/mm/util.c +@@ -243,6 +243,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, + void arch_pick_mmap_layout(struct mm_struct *mm) + { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index 27be2f0..633e5cc 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) + + pte = pte_offset_kernel(pmd, addr); + do { +- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); +- WARN_ON(!pte_none(ptent) && !pte_present(ptent)); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) { ++ BUG_ON(!pte_exec(*pte)); ++ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC)); ++ continue; ++ } ++#endif ++ ++ { ++ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); ++ WARN_ON(!pte_none(ptent) && !pte_present(ptent)); ++ } + } while (pte++, addr += PAGE_SIZE, addr != end); + } + +@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, pgprot_t prot, struct page **pages, int *nr) + { + pte_t *pte; ++ int ret = -ENOMEM; + + /* + * nr is a running index into the array which helps higher level +@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, + pte = pte_alloc_kernel(pmd, addr); + if (!pte) + return -ENOMEM; ++ ++ pax_open_kernel(); + do { + struct page *page = pages[*nr]; + +- if (WARN_ON(!pte_none(*pte))) +- return -EBUSY; +- if (WARN_ON(!page)) +- return -ENOMEM; ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (pgprot_val(prot) & _PAGE_NX) ++#endif ++ ++ if (WARN_ON(!pte_none(*pte))) { ++ ret = -EBUSY; ++ goto out; ++ } ++ if (WARN_ON(!page)) { ++ ret = -ENOMEM; ++ goto out; ++ } + set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); + (*nr)++; + } while (pte++, addr += PAGE_SIZE, addr != end); +- return 0; ++ ret = 0; ++out: ++ pax_close_kernel(); ++ return ret; + } + + static int vmap_pmd_range(pud_t *pud, unsigned long addr, +@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x) + * and fall back on vmalloc() if that fails. Others + * just put it in the vmalloc space. + */ +-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) ++#ifdef CONFIG_MODULES ++#ifdef MODULES_VADDR + unsigned long addr = (unsigned long)x; + if (addr >= MODULES_VADDR && addr < MODULES_END) + return 1; + #endif ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END) ++ return 1; ++#endif ++ ++#endif ++ + return is_vmalloc_addr(x); + } + +@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) + + if (!pgd_none(*pgd)) { + pud_t *pud = pud_offset(pgd, addr); ++#ifdef CONFIG_X86 ++ if (!pud_large(*pud)) ++#endif + if (!pud_none(*pud)) { + pmd_t *pmd = pmd_offset(pud, addr); ++#ifdef CONFIG_X86 ++ if (!pmd_large(*pmd)) ++#endif + if (!pmd_none(*pmd)) { + pte_t *ptep, pte; + +@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, + struct vm_struct *area; + + BUG_ON(in_interrupt()); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (flags & VM_KERNEXEC) { ++ if (start != VMALLOC_START || end != VMALLOC_END) ++ return NULL; ++ start = (unsigned long)MODULES_EXEC_VADDR; ++ end = (unsigned long)MODULES_EXEC_END; ++ } ++#endif ++ + if (flags & VM_IOREMAP) { + int bit = fls(size); + +@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count, + if (count > totalram_pages) + return NULL; + ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (!(pgprot_val(prot) & _PAGE_NX)) ++ flags |= VM_KERNEXEC; ++#endif ++ + area = get_vm_area_caller((count << PAGE_SHIFT), flags, + __builtin_return_address(0)); + if (!area) +@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, + if (!size || (size >> PAGE_SHIFT) > totalram_pages) + goto fail; + ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (!(pgprot_val(prot) & _PAGE_NX)) ++ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC, ++ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller); ++ else ++#endif ++ + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, + start, end, node, gfp_mask, caller); + if (!area) +@@ -1800,10 +1862,9 @@ EXPORT_SYMBOL(vzalloc_node); + * For tight control over page level allocator and protection flags + * use __vmalloc() instead. + */ +- + void *vmalloc_exec(unsigned long size) + { +- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, ++ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC, + -1, __builtin_return_address(0)); + } + +@@ -2098,6 +2159,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, + unsigned long uaddr = vma->vm_start; + unsigned long usize = vma->vm_end - vma->vm_start; + ++ BUG_ON(vma->vm_mirror); ++ + if ((PAGE_SIZE-1) & (unsigned long)addr) + return -EINVAL; + +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 8fd603b..cf0d930 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu) + * + * vm_stat contains the global counters + */ +-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp; ++atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp; + EXPORT_SYMBOL(vm_stat); + + #ifdef CONFIG_SMP +@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu) + v = p->vm_stat_diff[i]; + p->vm_stat_diff[i] = 0; + local_irq_restore(flags); +- atomic_long_add(v, &zone->vm_stat[i]); ++ atomic_long_add_unchecked(v, &zone->vm_stat[i]); + global_diff[i] += v; + #ifdef CONFIG_NUMA + /* 3 seconds idle till flush */ +@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu) + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + if (global_diff[i]) +- atomic_long_add(global_diff[i], &vm_stat[i]); ++ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]); + } + + #endif +@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void) + start_cpu_timer(cpu); + #endif + #ifdef CONFIG_PROC_FS +- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); +- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); +- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); +- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); ++ { ++ mode_t gr_mode = S_IRUGO; ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ gr_mode = S_IRUSR; ++#endif ++ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations); ++ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops); ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations); ++#else ++ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations); ++#endif ++ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations); ++ } + #endif + return 0; + } +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c +index 5471628..cef8398 100644 +--- a/net/8021q/vlan.c ++++ b/net/8021q/vlan.c +@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) + err = -EPERM; + if (!capable(CAP_NET_ADMIN)) + break; +- if ((args.u.name_type >= 0) && +- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { ++ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) { + struct vlan_net *vn; + + vn = net_generic(net, vlan_net_id); +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c +index fdfdb57..38d368c 100644 +--- a/net/9p/trans_fd.c ++++ b/net/9p/trans_fd.c +@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len) + oldfs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos); ++ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos); + set_fs(oldfs); + + if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) +diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c +index f41f026..fe76ea8 100644 +--- a/net/atm/atm_misc.c ++++ b/net/atm/atm_misc.c +@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize) + if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) + return 1; + atm_return(vcc, truesize); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return 0; + } + EXPORT_SYMBOL(atm_charge); +@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size, + } + } + atm_return(vcc, guess); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return NULL; + } + EXPORT_SYMBOL(atm_alloc_charge); +@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal); + + void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to) + { +-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) ++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) + __SONET_ITEMS + #undef __HANDLE_ITEM + } +@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats); + + void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to) + { +-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) ++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i) + __SONET_ITEMS + #undef __HANDLE_ITEM + } +diff --git a/net/atm/lec.h b/net/atm/lec.h +index dfc0719..47c5322 100644 +--- a/net/atm/lec.h ++++ b/net/atm/lec.h +@@ -48,7 +48,7 @@ struct lane2_ops { + const u8 *tlvs, u32 sizeoftlvs); + void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr, + const u8 *tlvs, u32 sizeoftlvs); +-}; ++} __no_const; + + /* + * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType +diff --git a/net/atm/mpc.h b/net/atm/mpc.h +index 0919a88..a23d54e 100644 +--- a/net/atm/mpc.h ++++ b/net/atm/mpc.h +@@ -33,7 +33,7 @@ struct mpoa_client { + struct mpc_parameters parameters; /* parameters for this client */ + + const struct net_device_ops *old_ops; +- struct net_device_ops new_ops; ++ net_device_ops_no_const new_ops; + }; + + +diff --git a/net/atm/proc.c b/net/atm/proc.c +index 0d020de..011c7bb 100644 +--- a/net/atm/proc.c ++++ b/net/atm/proc.c +@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal, + const struct k_atm_aal_stats *stats) + { + seq_printf(seq, "%s ( %d %d %d %d %d )", aal, +- atomic_read(&stats->tx), atomic_read(&stats->tx_err), +- atomic_read(&stats->rx), atomic_read(&stats->rx_err), +- atomic_read(&stats->rx_drop)); ++ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err), ++ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err), ++ atomic_read_unchecked(&stats->rx_drop)); + } + + static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) +diff --git a/net/atm/resources.c b/net/atm/resources.c +index 23f45ce..c748f1a 100644 +--- a/net/atm/resources.c ++++ b/net/atm/resources.c +@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister); + static void copy_aal_stats(struct k_atm_aal_stats *from, + struct atm_aal_stats *to) + { +-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) ++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + } +@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from, + static void subtract_aal_stats(struct k_atm_aal_stats *from, + struct atm_aal_stats *to) + { +-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) ++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i) + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + } +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c +index 3512e25..2b33401 100644 +--- a/net/batman-adv/bat_iv_ogm.c ++++ b/net/batman-adv/bat_iv_ogm.c +@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes) + + /* change sequence number to network order */ + batman_ogm_packet->seqno = +- htonl((uint32_t)atomic_read(&hard_iface->seqno)); ++ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno)); + + batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); + batman_ogm_packet->tt_crc = htons((uint16_t) +@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes) + else + batman_ogm_packet->gw_flags = NO_FLAGS; + +- atomic_inc(&hard_iface->seqno); ++ atomic_inc_unchecked(&hard_iface->seqno); + + slide_own_bcast_window(hard_iface); + bat_ogm_queue_add(bat_priv, hard_iface->packet_buff, +@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr, + return; + + /* could be changed by schedule_own_packet() */ +- if_incoming_seqno = atomic_read(&if_incoming->seqno); ++ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno); + + has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); + +diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c +index 7704df4..beb4e16 100644 +--- a/net/batman-adv/hard-interface.c ++++ b/net/batman-adv/hard-interface.c +@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface, + hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; + dev_add_pack(&hard_iface->batman_adv_ptype); + +- atomic_set(&hard_iface->seqno, 1); +- atomic_set(&hard_iface->frag_seqno, 1); ++ atomic_set_unchecked(&hard_iface->seqno, 1); ++ atomic_set_unchecked(&hard_iface->frag_seqno, 1); + bat_info(hard_iface->soft_iface, "Adding interface: %s\n", + hard_iface->net_dev->name); + +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c +index f9cc957..efd9dae 100644 +--- a/net/batman-adv/soft-interface.c ++++ b/net/batman-adv/soft-interface.c +@@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) + + /* set broadcast sequence number */ + bcast_packet->seqno = +- htonl(atomic_inc_return(&bat_priv->bcast_seqno)); ++ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno)); + + add_bcast_packet_to_list(bat_priv, skb, 1); + +@@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name) + atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN); + + atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); +- atomic_set(&bat_priv->bcast_seqno, 1); ++ atomic_set_unchecked(&bat_priv->bcast_seqno, 1); + atomic_set(&bat_priv->ttvn, 0); + atomic_set(&bat_priv->tt_local_changes, 0); + atomic_set(&bat_priv->tt_ogm_append_cnt, 0); +diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h +index ab8d0fe..ceba3fd 100644 +--- a/net/batman-adv/types.h ++++ b/net/batman-adv/types.h +@@ -38,8 +38,8 @@ struct hard_iface { + int16_t if_num; + char if_status; + struct net_device *net_dev; +- atomic_t seqno; +- atomic_t frag_seqno; ++ atomic_unchecked_t seqno; ++ atomic_unchecked_t frag_seqno; + unsigned char *packet_buff; + int packet_len; + struct kobject *hardif_obj; +@@ -154,7 +154,7 @@ struct bat_priv { + atomic_t orig_interval; /* uint */ + atomic_t hop_penalty; /* uint */ + atomic_t log_level; /* uint */ +- atomic_t bcast_seqno; ++ atomic_unchecked_t bcast_seqno; + atomic_t bcast_queue_left; + atomic_t batman_queue_left; + atomic_t ttvn; /* translation table version number */ +diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c +index 07d1c1d..7e9bea9 100644 +--- a/net/batman-adv/unicast.c ++++ b/net/batman-adv/unicast.c +@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, + frag1->flags = UNI_FRAG_HEAD | large_tail; + frag2->flags = large_tail; + +- seqno = atomic_add_return(2, &hard_iface->frag_seqno); ++ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno); + frag1->seqno = htons(seqno - 1); + frag2->seqno = htons(seqno); + +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index c1c597e..05ebb40 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]) + memset(&cp, 0, sizeof(cp)); + + cp.handle = cpu_to_le16(conn->handle); +- memcpy(cp.ltk, ltk, sizeof(ltk)); ++ memcpy(cp.ltk, ltk, sizeof(cp.ltk)); + + hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); + } +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index 17b5b1c..826d872 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi + break; + + case L2CAP_CONF_RFC: +- if (olen == sizeof(rfc)) +- memcpy(&rfc, (void *)val, olen); ++ if (olen != sizeof(rfc)) ++ break; ++ ++ memcpy(&rfc, (void *)val, olen); + + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && + rfc.mode != chan->mode) +@@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) + + switch (type) { + case L2CAP_CONF_RFC: +- if (olen == sizeof(rfc)) +- memcpy(&rfc, (void *)val, olen); ++ if (olen != sizeof(rfc)) ++ break; ++ ++ memcpy(&rfc, (void *)val, olen); + goto done; + } + } +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c +index 8eb6b15..e3db7ab 100644 +--- a/net/bridge/br_multicast.c ++++ b/net/bridge/br_multicast.c +@@ -1488,7 +1488,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, + nexthdr = ip6h->nexthdr; + offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr); + +- if (offset < 0 || nexthdr != IPPROTO_ICMPV6) ++ if (nexthdr != IPPROTO_ICMPV6) + return 0; + + /* Okay, we found ICMPv6 header */ +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c +index 5864cc4..121f3a30 100644 +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) + tmp.valid_hooks = t->table->valid_hooks; + } + mutex_unlock(&ebt_mutex); +- if (copy_to_user(user, &tmp, *len) != 0){ ++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){ + BUGPRINT("c2u Didn't work\n"); + ret = -EFAULT; + break; +diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c +index a986280..13444a1 100644 +--- a/net/caif/caif_socket.c ++++ b/net/caif/caif_socket.c +@@ -48,19 +48,20 @@ static struct dentry *debugfsdir; + #ifdef CONFIG_DEBUG_FS + struct debug_fs_counter { + atomic_t caif_nr_socks; +- atomic_t caif_sock_create; +- atomic_t num_connect_req; +- atomic_t num_connect_resp; +- atomic_t num_connect_fail_resp; +- atomic_t num_disconnect; +- atomic_t num_remote_shutdown_ind; +- atomic_t num_tx_flow_off_ind; +- atomic_t num_tx_flow_on_ind; +- atomic_t num_rx_flow_off; +- atomic_t num_rx_flow_on; ++ atomic_unchecked_t caif_sock_create; ++ atomic_unchecked_t num_connect_req; ++ atomic_unchecked_t num_connect_resp; ++ atomic_unchecked_t num_connect_fail_resp; ++ atomic_unchecked_t num_disconnect; ++ atomic_unchecked_t num_remote_shutdown_ind; ++ atomic_unchecked_t num_tx_flow_off_ind; ++ atomic_unchecked_t num_tx_flow_on_ind; ++ atomic_unchecked_t num_rx_flow_off; ++ atomic_unchecked_t num_rx_flow_on; + }; + static struct debug_fs_counter cnt; + #define dbfs_atomic_inc(v) atomic_inc_return(v) ++#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v) + #define dbfs_atomic_dec(v) atomic_dec_return(v) + #else + #define dbfs_atomic_inc(v) 0 +@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + atomic_read(&cf_sk->sk.sk_rmem_alloc), + sk_rcvbuf_lowwater(cf_sk)); + set_rx_flow_off(cf_sk); +- dbfs_atomic_inc(&cnt.num_rx_flow_off); ++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off); + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); + } + +@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + set_rx_flow_off(cf_sk); + if (net_ratelimit()) + pr_debug("sending flow OFF due to rmem_schedule\n"); +- dbfs_atomic_inc(&cnt.num_rx_flow_off); ++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off); + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); + } + skb->dev = NULL; +@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr, + switch (flow) { + case CAIF_CTRLCMD_FLOW_ON_IND: + /* OK from modem to start sending again */ +- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind); ++ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind); + set_tx_flow_on(cf_sk); + cf_sk->sk.sk_state_change(&cf_sk->sk); + break; + + case CAIF_CTRLCMD_FLOW_OFF_IND: + /* Modem asks us to shut up */ +- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind); ++ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind); + set_tx_flow_off(cf_sk); + cf_sk->sk.sk_state_change(&cf_sk->sk); + break; +@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr, + /* We're now connected */ + caif_client_register_refcnt(&cf_sk->layer, + cfsk_hold, cfsk_put); +- dbfs_atomic_inc(&cnt.num_connect_resp); ++ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp); + cf_sk->sk.sk_state = CAIF_CONNECTED; + set_tx_flow_on(cf_sk); + cf_sk->sk.sk_state_change(&cf_sk->sk); +@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr, + + case CAIF_CTRLCMD_INIT_FAIL_RSP: + /* Connect request failed */ +- dbfs_atomic_inc(&cnt.num_connect_fail_resp); ++ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp); + cf_sk->sk.sk_err = ECONNREFUSED; + cf_sk->sk.sk_state = CAIF_DISCONNECTED; + cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; +@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr, + + case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: + /* Modem has closed this connection, or device is down. */ +- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind); ++ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind); + cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; + cf_sk->sk.sk_err = ECONNRESET; + set_rx_flow_on(cf_sk); +@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk) + return; + + if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { +- dbfs_atomic_inc(&cnt.num_rx_flow_on); ++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on); + set_rx_flow_on(cf_sk); + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); + } +@@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, + /*ifindex = id of the interface.*/ + cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; + +- dbfs_atomic_inc(&cnt.num_connect_req); ++ dbfs_atomic_inc_unchecked(&cnt.num_connect_req); + cf_sk->layer.receive = caif_sktrecv_cb; + + err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, +@@ -943,7 +944,7 @@ static int caif_release(struct socket *sock) + spin_unlock_bh(&sk->sk_receive_queue.lock); + sock->sk = NULL; + +- dbfs_atomic_inc(&cnt.num_disconnect); ++ dbfs_atomic_inc_unchecked(&cnt.num_disconnect); + + WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); + if (cf_sk->debugfs_socket_dir != NULL) +@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, + cf_sk->conn_req.protocol = protocol; + /* Increase the number of sockets created. */ + dbfs_atomic_inc(&cnt.caif_nr_socks); +- num = dbfs_atomic_inc(&cnt.caif_sock_create); ++ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create); + #ifdef CONFIG_DEBUG_FS + if (!IS_ERR(debugfsdir)) { + +diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c +index 5cf5222..6f704ad 100644 +--- a/net/caif/cfctrl.c ++++ b/net/caif/cfctrl.c +@@ -9,6 +9,7 @@ + #include <linux/stddef.h> + #include <linux/spinlock.h> + #include <linux/slab.h> ++#include <linux/sched.h> + #include <net/caif/caif_layer.h> + #include <net/caif/cfpkt.h> + #include <net/caif/cfctrl.h> +@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void) + memset(&dev_info, 0, sizeof(dev_info)); + dev_info.id = 0xff; + cfsrvl_init(&this->serv, 0, &dev_info, false); +- atomic_set(&this->req_seq_no, 1); +- atomic_set(&this->rsp_seq_no, 1); ++ atomic_set_unchecked(&this->req_seq_no, 1); ++ atomic_set_unchecked(&this->rsp_seq_no, 1); + this->serv.layer.receive = cfctrl_recv; + sprintf(this->serv.layer.name, "ctrl"); + this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; +@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl, + struct cfctrl_request_info *req) + { + spin_lock_bh(&ctrl->info_list_lock); +- atomic_inc(&ctrl->req_seq_no); +- req->sequence_no = atomic_read(&ctrl->req_seq_no); ++ atomic_inc_unchecked(&ctrl->req_seq_no); ++ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no); + list_add_tail(&req->list, &ctrl->list); + spin_unlock_bh(&ctrl->info_list_lock); + } +@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, + if (p != first) + pr_warn("Requests are not received in order\n"); + +- atomic_set(&ctrl->rsp_seq_no, ++ atomic_set_unchecked(&ctrl->rsp_seq_no, + p->sequence_no); + list_del(&p->list); + goto out; +diff --git a/net/can/gw.c b/net/can/gw.c +index 3d79b12..8de85fa 100644 +--- a/net/can/gw.c ++++ b/net/can/gw.c +@@ -96,7 +96,7 @@ struct cf_mod { + struct { + void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor); + void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8); +- } csumfunc; ++ } __no_const csumfunc; + }; + + +diff --git a/net/compat.c b/net/compat.c +index 6def90e..c6992fa 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) + __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || + __get_user(kmsg->msg_flags, &umsg->msg_flags)) + return -EFAULT; +- kmsg->msg_name = compat_ptr(tmp1); +- kmsg->msg_iov = compat_ptr(tmp2); +- kmsg->msg_control = compat_ptr(tmp3); ++ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1); ++ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2); ++ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3); + return 0; + } + +@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + + if (kern_msg->msg_namelen) { + if (mode == VERIFY_READ) { +- int err = move_addr_to_kernel(kern_msg->msg_name, ++ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name, + kern_msg->msg_namelen, + kern_address); + if (err < 0) +@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + kern_msg->msg_name = NULL; + + tot_len = iov_from_user_compat_to_kern(kern_iov, +- (struct compat_iovec __user *)kern_msg->msg_iov, ++ (struct compat_iovec __force_user *)kern_msg->msg_iov, + kern_msg->msg_iovlen); + if (tot_len >= 0) + kern_msg->msg_iov = kern_iov; +@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + + #define CMSG_COMPAT_FIRSTHDR(msg) \ + (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \ +- (struct compat_cmsghdr __user *)((msg)->msg_control) : \ ++ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \ + (struct compat_cmsghdr __user *)NULL) + + #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \ + ((ucmlen) >= sizeof(struct compat_cmsghdr) && \ + (ucmlen) <= (unsigned long) \ + ((mhdr)->msg_controllen - \ +- ((char *)(ucmsg) - (char *)(mhdr)->msg_control))) ++ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control))) + + static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg, + struct compat_cmsghdr __user *cmsg, int cmsg_len) + { + char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len); +- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) > ++ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) > + msg->msg_controllen) + return NULL; + return (struct compat_cmsghdr __user *)ptr; +@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat + { + struct compat_timeval ctv; + struct compat_timespec cts[3]; +- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; ++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control; + struct compat_cmsghdr cmhdr; + int cmlen; + +@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat + + void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) + { +- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; ++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control; + int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int); + int fdnum = scm->fp->count; + struct file **fp = scm->fp->fp; +@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level, + return -EFAULT; + old_fs = get_fs(); + set_fs(KERNEL_DS); +- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime)); ++ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime)); + set_fs(old_fs); + + return err; +@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname, + len = sizeof(ktime); + old_fs = get_fs(); + set_fs(KERNEL_DS); +- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len); ++ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len); + set_fs(old_fs); + + if (!err) { +@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, + case MCAST_JOIN_GROUP: + case MCAST_LEAVE_GROUP: + { +- struct compat_group_req __user *gr32 = (void *)optval; ++ struct compat_group_req __user *gr32 = (void __user *)optval; + struct group_req __user *kgr = + compat_alloc_user_space(sizeof(struct group_req)); + u32 interface; +@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, + case MCAST_BLOCK_SOURCE: + case MCAST_UNBLOCK_SOURCE: + { +- struct compat_group_source_req __user *gsr32 = (void *)optval; ++ struct compat_group_source_req __user *gsr32 = (void __user *)optval; + struct group_source_req __user *kgsr = compat_alloc_user_space( + sizeof(struct group_source_req)); + u32 interface; +@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, + } + case MCAST_MSFILTER: + { +- struct compat_group_filter __user *gf32 = (void *)optval; ++ struct compat_group_filter __user *gf32 = (void __user *)optval; + struct group_filter __user *kgf; + u32 interface, fmode, numsrc; + +@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname, + char __user *optval, int __user *optlen, + int (*getsockopt)(struct sock *, int, int, char __user *, int __user *)) + { +- struct compat_group_filter __user *gf32 = (void *)optval; ++ struct compat_group_filter __user *gf32 = (void __user *)optval; + struct group_filter __user *kgf; + int __user *koptlen; + u32 interface, fmode, numsrc; +diff --git a/net/core/datagram.c b/net/core/datagram.c +index 68bbf9f..5ef0d12 100644 +--- a/net/core/datagram.c ++++ b/net/core/datagram.c +@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) + } + + kfree_skb(skb); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + sk_mem_reclaim_partial(sk); + + return err; +diff --git a/net/core/dev.c b/net/core/dev.c +index c56cacf..b28e35f 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name) + if (no_module && capable(CAP_NET_ADMIN)) + no_module = request_module("netdev-%s", name); + if (no_module && capable(CAP_SYS_MODULE)) { ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ ___request_module(true, "grsec_modharden_netdev", "%s", name); ++#else + if (!request_module("%s", name)) + pr_err("Loading kernel module for a network device " + "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " + "instead\n", name); ++#endif + } + } + EXPORT_SYMBOL(dev_load); +@@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) + { + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { +- atomic_long_inc(&dev->rx_dropped); ++ atomic_long_inc_unchecked(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) + nf_reset(skb); + + if (unlikely(!is_skb_forwardable(dev, skb))) { +- atomic_long_inc(&dev->rx_dropped); ++ atomic_long_inc_unchecked(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) + + struct dev_gso_cb { + void (*destructor)(struct sk_buff *skb); +-}; ++} __no_const; + + #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) + +@@ -2970,7 +2974,7 @@ enqueue: + + local_irq_restore(flags); + +- atomic_long_inc(&skb->dev->rx_dropped); ++ atomic_long_inc_unchecked(&skb->dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb) + } + EXPORT_SYMBOL(netif_rx_ni); + +-static void net_tx_action(struct softirq_action *h) ++static void net_tx_action(void) + { + struct softnet_data *sd = &__get_cpu_var(softnet_data); + +@@ -3333,7 +3337,7 @@ ncls: + if (pt_prev) { + ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); + } else { +- atomic_long_inc(&skb->dev->rx_dropped); ++ atomic_long_inc_unchecked(&skb->dev->rx_dropped); + kfree_skb(skb); + /* Jamal, now you will not able to escape explaining + * me how you were going to use this. :-) +@@ -3897,7 +3901,7 @@ void netif_napi_del(struct napi_struct *napi) + } + EXPORT_SYMBOL(netif_napi_del); + +-static void net_rx_action(struct softirq_action *h) ++static void net_rx_action(void) + { + struct softnet_data *sd = &__get_cpu_var(softnet_data); + unsigned long time_limit = jiffies + 2; +@@ -5955,7 +5959,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, + } else { + netdev_stats_to_stats64(storage, &dev->stats); + } +- storage->rx_dropped += atomic_long_read(&dev->rx_dropped); ++ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped); + return storage; + } + EXPORT_SYMBOL(dev_get_stats); +diff --git a/net/core/flow.c b/net/core/flow.c +index e318c7e..168b1d0 100644 +--- a/net/core/flow.c ++++ b/net/core/flow.c +@@ -61,7 +61,7 @@ struct flow_cache { + struct timer_list rnd_timer; + }; + +-atomic_t flow_cache_genid = ATOMIC_INIT(0); ++atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0); + EXPORT_SYMBOL(flow_cache_genid); + static struct flow_cache flow_cache_global; + static struct kmem_cache *flow_cachep __read_mostly; +@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg) + + static int flow_entry_valid(struct flow_cache_entry *fle) + { +- if (atomic_read(&flow_cache_genid) != fle->genid) ++ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid) + return 0; + if (fle->object && !fle->object->ops->check(fle->object)) + return 0; +@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, + hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); + fcp->hash_count++; + } +- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { ++ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) { + flo = fle->object; + if (!flo) + goto ret_object; +@@ -280,7 +280,7 @@ nocache: + } + flo = resolver(net, key, family, dir, flo, ctx); + if (fle) { +- fle->genid = atomic_read(&flow_cache_genid); ++ fle->genid = atomic_read_unchecked(&flow_cache_genid); + if (!IS_ERR(flo)) + fle->object = flo; + else +diff --git a/net/core/iovec.c b/net/core/iovec.c +index c40f27e..7f49254 100644 +--- a/net/core/iovec.c ++++ b/net/core/iovec.c +@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, + if (m->msg_namelen) { + if (mode == VERIFY_READ) { + void __user *namep; +- namep = (void __user __force *) m->msg_name; ++ namep = (void __force_user *) m->msg_name; + err = move_addr_to_kernel(namep, m->msg_namelen, + address); + if (err < 0) +@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, + } + + size = m->msg_iovlen * sizeof(struct iovec); +- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) ++ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size)) + return -EFAULT; + + m->msg_iov = iov; +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 9083e82..1673203 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -57,7 +57,7 @@ struct rtnl_link { + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + rtnl_calcit_func calcit; +-}; ++} __no_const; + + static DEFINE_MUTEX(rtnl_mutex); + static u16 min_ifinfo_dump_size; +diff --git a/net/core/scm.c b/net/core/scm.c +index ff52ad0..aff1c0f 100644 +--- a/net/core/scm.c ++++ b/net/core/scm.c +@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send); + int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) + { + struct cmsghdr __user *cm +- = (__force struct cmsghdr __user *)msg->msg_control; ++ = (struct cmsghdr __force_user *)msg->msg_control; + struct cmsghdr cmhdr; + int cmlen = CMSG_LEN(len); + int err; +@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) + err = -EFAULT; + if (copy_to_user(cm, &cmhdr, sizeof cmhdr)) + goto out; +- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr))) ++ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr))) + goto out; + cmlen = CMSG_SPACE(len); + if (msg->msg_controllen < cmlen) +@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg); + void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) + { + struct cmsghdr __user *cm +- = (__force struct cmsghdr __user*)msg->msg_control; ++ = (struct cmsghdr __force_user *)msg->msg_control; + + int fdmax = 0; + int fdnum = scm->fp->count; +@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) + if (fdnum < fdmax) + fdmax = fdnum; + +- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax; ++ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax; + i++, cmfptr++) + { + int new_fd; +diff --git a/net/core/sock.c b/net/core/sock.c +index b23f174..b9a0d26 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + struct sk_buff_head *list = &sk->sk_receive_queue; + + if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + trace_sock_rcvqueue_full(sk, skb); + return -ENOMEM; + } +@@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + return err; + + if (!sk_rmem_schedule(sk, skb->truesize)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + return -ENOBUFS; + } + +@@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + skb_dst_force(skb); + + spin_lock_irqsave(&list->lock, flags); +- skb->dropcount = atomic_read(&sk->sk_drops); ++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops); + __skb_queue_tail(list, skb); + spin_unlock_irqrestore(&list->lock, flags); + +@@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) + skb->dev = NULL; + + if (sk_rcvqueues_full(sk, skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + goto discard_and_relse; + } + if (nested) +@@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) + mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); + } else if (sk_add_backlog(sk, skb)) { + bh_unlock_sock(sk); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + goto discard_and_relse; + } + +@@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, + if (len > sizeof(peercred)) + len = sizeof(peercred); + cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); +- if (copy_to_user(optval, &peercred, len)) ++ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len)) + return -EFAULT; + goto lenout; + } +@@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, + return -ENOTCONN; + if (lv < len) + return -EINVAL; +- if (copy_to_user(optval, address, len)) ++ if (len > sizeof(address) || copy_to_user(optval, address, len)) + return -EFAULT; + goto lenout; + } +@@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, + + if (len > lv) + len = lv; +- if (copy_to_user(optval, &v, len)) ++ if (len > sizeof(v) || copy_to_user(optval, &v, len)) + return -EFAULT; + lenout: + if (put_user(len, optlen)) +@@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) + */ + smp_wmb(); + atomic_set(&sk->sk_refcnt, 1); +- atomic_set(&sk->sk_drops, 0); ++ atomic_set_unchecked(&sk->sk_drops, 0); + } + EXPORT_SYMBOL(sock_init_data); + +diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c +index 02e75d1..9a57a7c 100644 +--- a/net/decnet/sysctl_net_decnet.c ++++ b/net/decnet/sysctl_net_decnet.c +@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write, + + if (len > *lenp) len = *lenp; + +- if (copy_to_user(buffer, addr, len)) ++ if (len > sizeof addr || copy_to_user(buffer, addr, len)) + return -EFAULT; + + *lenp = len; +@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write, + + if (len > *lenp) len = *lenp; + +- if (copy_to_user(buffer, devname, len)) ++ if (len > sizeof devname || copy_to_user(buffer, devname, len)) + return -EFAULT; + + *lenp = len; +diff --git a/net/econet/Kconfig b/net/econet/Kconfig +index 39a2d29..f39c0fe 100644 +--- a/net/econet/Kconfig ++++ b/net/econet/Kconfig +@@ -4,7 +4,7 @@ + + config ECONET + tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)" +- depends on EXPERIMENTAL && INET ++ depends on EXPERIMENTAL && INET && BROKEN + ---help--- + Econet is a fairly old and slow networking protocol mainly used by + Acorn computers to access file and print servers. It uses native +diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c +index 36d1440..44ff28b 100644 +--- a/net/ipv4/ah4.c ++++ b/net/ipv4/ah4.c +@@ -19,6 +19,8 @@ struct ah_skb_cb { + #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) + + static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, ++ unsigned int size) __size_overflow(3); ++static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, + unsigned int size) + { + unsigned int len; +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 92fc5f6..b790d91 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, + #ifdef CONFIG_IP_ROUTE_MULTIPATH + fib_sync_up(dev); + #endif +- atomic_inc(&net->ipv4.dev_addr_genid); ++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid); + rt_cache_flush(dev_net(dev), -1); + break; + case NETDEV_DOWN: + fib_del_ifaddr(ifa, NULL); +- atomic_inc(&net->ipv4.dev_addr_genid); ++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid); + if (ifa->ifa_dev->ifa_list == NULL) { + /* Last address was deleted from this interface. + * Disable IP. +@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo + #ifdef CONFIG_IP_ROUTE_MULTIPATH + fib_sync_up(dev); + #endif +- atomic_inc(&net->ipv4.dev_addr_genid); ++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid); + rt_cache_flush(dev_net(dev), -1); + break; + case NETDEV_DOWN: +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index 80106d8..232e898 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) + nh->nh_saddr = inet_select_addr(nh->nh_dev, + nh->nh_gw, + nh->nh_parent->fib_scope); +- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); ++ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid); + + return nh->nh_saddr; + } +diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c +index ccee270..db23c3c 100644 +--- a/net/ipv4/inet_diag.c ++++ b/net/ipv4/inet_diag.c +@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk, + r->idiag_retrans = 0; + + r->id.idiag_if = sk->sk_bound_dev_if; ++ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ r->id.idiag_cookie[0] = 0; ++ r->id.idiag_cookie[1] = 0; ++#else + r->id.idiag_cookie[0] = (u32)(unsigned long)sk; + r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); ++#endif + + r->id.idiag_sport = inet->inet_sport; + r->id.idiag_dport = inet->inet_dport; +@@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, + r->idiag_family = tw->tw_family; + r->idiag_retrans = 0; + r->id.idiag_if = tw->tw_bound_dev_if; ++ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ r->id.idiag_cookie[0] = 0; ++ r->id.idiag_cookie[1] = 0; ++#else + r->id.idiag_cookie[0] = (u32)(unsigned long)tw; + r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); ++#endif ++ + r->id.idiag_sport = tw->tw_sport; + r->id.idiag_dport = tw->tw_dport; + r->id.idiag_src[0] = tw->tw_rcv_saddr; +@@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb, + if (sk == NULL) + goto unlock; + ++#ifndef CONFIG_GRKERNSEC_HIDESYM + err = -ESTALE; + if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE || + req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) && + ((u32)(unsigned long)sk != req->id.idiag_cookie[0] || + (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1])) + goto out; ++#endif + + err = -ENOMEM; + rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + +@@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, + r->idiag_retrans = req->retrans; + + r->id.idiag_if = sk->sk_bound_dev_if; ++ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ r->id.idiag_cookie[0] = 0; ++ r->id.idiag_cookie[1] = 0; ++#else + r->id.idiag_cookie[0] = (u32)(unsigned long)req; + r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1); ++#endif + + tmo = req->expires - jiffies; + if (tmo < 0) +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c +index 984ec65..97ac518 100644 +--- a/net/ipv4/inet_hashtables.c ++++ b/net/ipv4/inet_hashtables.c +@@ -18,12 +18,15 @@ + #include <linux/sched.h> + #include <linux/slab.h> + #include <linux/wait.h> ++#include <linux/security.h> + + #include <net/inet_connection_sock.h> + #include <net/inet_hashtables.h> + #include <net/secure_seq.h> + #include <net/ip.h> + ++extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet); ++ + /* + * Allocate and initialize a new local port bind bucket. + * The bindhash mutex for snum's hash chain must be held here. +@@ -530,6 +533,8 @@ ok: + twrefcnt += inet_twsk_bind_unhash(tw, hinfo); + spin_unlock(&head->lock); + ++ gr_update_task_in_ip_table(current, inet_sk(sk)); ++ + if (tw) { + inet_twsk_deschedule(tw, death_row); + while (twrefcnt) { +diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c +index 86f13c67..59a35b5 100644 +--- a/net/ipv4/inetpeer.c ++++ b/net/ipv4/inetpeer.c +@@ -436,8 +436,8 @@ relookup: + if (p) { + p->daddr = *daddr; + atomic_set(&p->refcnt, 1); +- atomic_set(&p->rid, 0); +- atomic_set(&p->ip_id_count, ++ atomic_set_unchecked(&p->rid, 0); ++ atomic_set_unchecked(&p->ip_id_count, + (daddr->family == AF_INET) ? + secure_ip_id(daddr->addr.a4) : + secure_ipv6_id(daddr->addr.a6)); +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c +index fdaabf2..0ec3205 100644 +--- a/net/ipv4/ip_fragment.c ++++ b/net/ipv4/ip_fragment.c +@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp) + return 0; + + start = qp->rid; +- end = atomic_inc_return(&peer->rid); ++ end = atomic_inc_return_unchecked(&peer->rid); + qp->rid = end; + + rc = qp->q.fragments && (end - start) > max; +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c +index 09ff51b..d3968eb 100644 +--- a/net/ipv4/ip_sockglue.c ++++ b/net/ipv4/ip_sockglue.c +@@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, + len = min_t(unsigned int, len, opt->optlen); + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, opt->__data, len)) ++ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) || ++ copy_to_user(optval, opt->__data, len)) + return -EFAULT; + return 0; + } +@@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, + if (sk->sk_type != SOCK_STREAM) + return -ENOPROTOOPT; + +- msg.msg_control = optval; ++ msg.msg_control = (void __force_kernel *)optval; + msg.msg_controllen = len; + msg.msg_flags = flags; + +diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c +index 99ec116..c5628fe 100644 +--- a/net/ipv4/ipconfig.c ++++ b/net/ipv4/ipconfig.c +@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg) + + mm_segment_t oldfs = get_fs(); + set_fs(get_ds()); +- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg); ++ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg); + set_fs(oldfs); + return res; + } +@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg) + + mm_segment_t oldfs = get_fs(); + set_fs(get_ds()); +- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg); ++ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg); + set_fs(oldfs); + return res; + } +@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg) + + mm_segment_t oldfs = get_fs(); + set_fs(get_ds()); +- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg); ++ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg); + set_fs(oldfs); + return res; + } +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c +index fd7a3f6..e5be655 100644 +--- a/net/ipv4/netfilter/arp_tables.c ++++ b/net/ipv4/netfilter/arp_tables.c +@@ -984,6 +984,11 @@ static int __do_replace(struct net *net, const char *name, + unsigned int valid_hooks, + struct xt_table_info *newinfo, + unsigned int num_counters, ++ void __user *counters_ptr) __size_overflow(5); ++static int __do_replace(struct net *net, const char *name, ++ unsigned int valid_hooks, ++ struct xt_table_info *newinfo, ++ unsigned int num_counters, + void __user *counters_ptr) + { + int ret; +@@ -1104,6 +1109,8 @@ static int do_replace(struct net *net, const void __user *user, + } + + static int do_add_counters(struct net *net, const void __user *user, ++ unsigned int len, int compat) __size_overflow(3); ++static int do_add_counters(struct net *net, const void __user *user, + unsigned int len, int compat) + { + unsigned int i, curcpu; +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c +index 24e556e..a8daf7a 100644 +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -1172,6 +1172,10 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, + static int + __do_replace(struct net *net, const char *name, unsigned int valid_hooks, + struct xt_table_info *newinfo, unsigned int num_counters, ++ void __user *counters_ptr) __size_overflow(5); ++static int ++__do_replace(struct net *net, const char *name, unsigned int valid_hooks, ++ struct xt_table_info *newinfo, unsigned int num_counters, + void __user *counters_ptr) + { + int ret; +@@ -1293,6 +1297,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len) + + static int + do_add_counters(struct net *net, const void __user *user, ++ unsigned int len, int compat) __size_overflow(3); ++static int ++do_add_counters(struct net *net, const void __user *user, + unsigned int len, int compat) + { + unsigned int i, curcpu; +diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c +index 2133c30..0e8047e 100644 +--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c ++++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c +@@ -435,6 +435,10 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, + static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, + unsigned char *eoc, + unsigned long **oid, ++ unsigned int *len) __size_overflow(2); ++static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, ++ unsigned char *eoc, ++ unsigned long **oid, + unsigned int *len) + { + unsigned long subid; +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index 43d4c3b..1914409 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f, + sk_rmem_alloc_get(sp), + 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), + atomic_read(&sp->sk_refcnt), sp, +- atomic_read(&sp->sk_drops), len); ++ atomic_read_unchecked(&sp->sk_drops), len); + } + + static int ping_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c +index 007e2eb..85a18a0 100644 +--- a/net/ipv4/raw.c ++++ b/net/ipv4/raw.c +@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) + int raw_rcv(struct sock *sk, struct sk_buff *skb) + { + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -738,16 +738,20 @@ static int raw_init(struct sock *sk) + + static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen) + { ++ struct icmp_filter filter; ++ + if (optlen > sizeof(struct icmp_filter)) + optlen = sizeof(struct icmp_filter); +- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen)) ++ if (copy_from_user(&filter, optval, optlen)) + return -EFAULT; ++ raw_sk(sk)->filter = filter; + return 0; + } + + static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen) + { + int len, ret = -EFAULT; ++ struct icmp_filter filter; + + if (get_user(len, optlen)) + goto out; +@@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o + if (len > sizeof(struct icmp_filter)) + len = sizeof(struct icmp_filter); + ret = -EFAULT; +- if (put_user(len, optlen) || +- copy_to_user(optval, &raw_sk(sk)->filter, len)) ++ filter = raw_sk(sk)->filter; ++ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len)) + goto out; + ret = 0; + out: return ret; +@@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) + sk_wmem_alloc_get(sp), + sk_rmem_alloc_get(sp), + 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif ++ atomic_read_unchecked(&sp->sk_drops)); + } + + static int raw_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 94cdbc5..0cb0063 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, + + static inline int rt_genid(struct net *net) + { +- return atomic_read(&net->ipv4.rt_genid); ++ return atomic_read_unchecked(&net->ipv4.rt_genid); + } + + #ifdef CONFIG_PROC_FS +@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net) + unsigned char shuffle; + + get_random_bytes(&shuffle, sizeof(shuffle)); +- atomic_add(shuffle + 1U, &net->ipv4.rt_genid); ++ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid); + redirect_genid++; + } + +@@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net, + error = rt->dst.error; + if (peer) { + inet_peer_refcheck(rt->peer); +- id = atomic_read(&peer->ip_id_count) & 0xffff; ++ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff; + if (peer->tcp_ts_stamp) { + ts = peer->tcp_ts; + tsage = get_seconds() - peer->tcp_ts_stamp; +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c +index 90f6544..769c0e9 100644 +--- a/net/ipv4/syncookies.c ++++ b/net/ipv4/syncookies.c +@@ -278,6 +278,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, + struct rtable *rt; + __u8 rcv_wscale; + bool ecn_ok = false; ++ struct flowi4 fl4; + + if (!sysctl_tcp_syncookies || !th->ack || th->rst) + goto out; +@@ -346,20 +347,16 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, + * hasn't changed since we received the original syn, but I see + * no easy way to do this. + */ +- { +- struct flowi4 fl4; +- +- flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), +- RT_SCOPE_UNIVERSE, IPPROTO_TCP, +- inet_sk_flowi_flags(sk), +- (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, +- ireq->loc_addr, th->source, th->dest); +- security_req_classify_flow(req, flowi4_to_flowi(&fl4)); +- rt = ip_route_output_key(sock_net(sk), &fl4); +- if (IS_ERR(rt)) { +- reqsk_free(req); +- goto out; +- } ++ flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), ++ RT_SCOPE_UNIVERSE, IPPROTO_TCP, ++ inet_sk_flowi_flags(sk), ++ (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, ++ ireq->loc_addr, th->source, th->dest); ++ security_req_classify_flow(req, flowi4_to_flowi(&fl4)); ++ rt = ip_route_output_key(sock_net(sk), &fl4); ++ if (IS_ERR(rt)) { ++ reqsk_free(req); ++ goto out; + } + + /* Try to redo what tcp_v4_send_synack did. */ +@@ -373,5 +370,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, + ireq->rcv_wscale = rcv_wscale; + + ret = get_cookie_sock(sk, skb, req, &rt->dst); ++ /* ip_queue_xmit() depends on our flow being setup ++ * Normal sockets get it right from inet_csk_route_child_sock() ++ */ ++ if (ret) ++ inet_sk(ret)->cork.fl.u.ip4 = fl4; + out: return ret; + } +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index eb90aa8..74908e1 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly; + int sysctl_tcp_low_latency __read_mostly; + EXPORT_SYMBOL(sysctl_tcp_low_latency); + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif + + #ifdef CONFIG_TCP_MD5SIG + static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, +@@ -1465,9 +1468,13 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, + inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; + newinet->inet_id = newtp->write_seq ^ jiffies; + +- if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) +- goto put_and_exit; +- ++ if (!dst) { ++ dst = inet_csk_route_child_sock(sk, newsk, req); ++ if (!dst) ++ goto put_and_exit; ++ } else { ++ /* syncookie case : see end of cookie_v4_check() */ ++ } + sk_setup_caps(newsk, dst); + + tcp_mtup_init(newsk); +@@ -1632,6 +1639,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) + return 0; + + reset: ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + tcp_v4_send_reset(rsk, skb); + discard: + kfree_skb(skb); +@@ -1694,12 +1704,19 @@ int tcp_v4_rcv(struct sk_buff *skb) + TCP_SKB_CB(skb)->sacked = 0; + + sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); +- if (!sk) ++ if (!sk) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 1; ++#endif + goto no_tcp_socket; +- ++ } + process: +- if (sk->sk_state == TCP_TIME_WAIT) ++ if (sk->sk_state == TCP_TIME_WAIT) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 2; ++#endif + goto do_time_wait; ++ } + + if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { + NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); +@@ -1749,6 +1766,10 @@ no_tcp_socket: + bad_packet: + TCP_INC_STATS_BH(net, TCP_MIB_INERRS); + } else { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (ret == 1 && ++ (skb->dev->flags & IFF_LOOPBACK))) ++#endif + tcp_v4_send_reset(NULL, skb); + } + +@@ -2409,7 +2430,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req, + 0, /* non standard timer */ + 0, /* open_requests have no inode */ + atomic_read(&sk->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + req, ++#endif + len); + } + +@@ -2459,7 +2484,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) + sock_i_uid(sk), + icsk->icsk_probes_out, + sock_i_ino(sk), +- atomic_read(&sk->sk_refcnt), sk, ++ atomic_read(&sk->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sk, ++#endif + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, +@@ -2487,7 +2517,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw, + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n", + i, src, srcp, dest, destp, tw->tw_substate, 0, 0, + 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, +- atomic_read(&tw->tw_refcnt), tw, len); ++ atomic_read(&tw->tw_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ tw, ++#endif ++ len); + } + + #define TMPSZ 150 +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c +index 66363b6..b0654a3 100644 +--- a/net/ipv4/tcp_minisocks.c ++++ b/net/ipv4/tcp_minisocks.c +@@ -27,6 +27,10 @@ + #include <net/inet_common.h> + #include <net/xfrm.h> + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + int sysctl_tcp_syncookies __read_mostly = 1; + EXPORT_SYMBOL(sysctl_tcp_syncookies); + +@@ -751,6 +755,10 @@ listen_overflow: + + embryonic_reset: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); ++ ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + if (!(flg & TCP_FLAG_RST)) + req->rsk_ops->send_reset(sk, skb); + +diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c +index 85ee7eb..53277ab 100644 +--- a/net/ipv4/tcp_probe.c ++++ b/net/ipv4/tcp_probe.c +@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf, + if (cnt + width >= len) + break; + +- if (copy_to_user(buf + cnt, tbuf, width)) ++ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width)) + return -EFAULT; + cnt += width; + } +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c +index 2e0f0af..e2948bf 100644 +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -22,6 +22,10 @@ + #include <linux/gfp.h> + #include <net/tcp.h> + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_lastack_retries; ++#endif ++ + int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; + int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; + int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME; +@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk) + } + } + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if ((sk->sk_state == TCP_LAST_ACK) && ++ (grsec_lastack_retries > 0) && ++ (grsec_lastack_retries < retry_until)) ++ retry_until = grsec_lastack_retries; ++#endif ++ + if (retransmits_timed_out(sk, retry_until, + syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) { + /* Has it gone just too far? */ +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 5a65eea..bd913a1 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -86,6 +86,7 @@ + #include <linux/types.h> + #include <linux/fcntl.h> + #include <linux/module.h> ++#include <linux/security.h> + #include <linux/socket.h> + #include <linux/sockios.h> + #include <linux/igmp.h> +@@ -108,6 +109,10 @@ + #include <trace/events/udp.h> + #include "udp_impl.h" + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + struct udp_table udp_table __read_mostly; + EXPORT_SYMBOL(udp_table); + +@@ -565,6 +570,9 @@ found: + return s; + } + ++extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb); ++extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr); ++ + /* + * This routine is called by the ICMP module when it gets some + * sort of error condition. If err < 0 then the socket should +@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + dport = usin->sin_port; + if (dport == 0) + return -EINVAL; ++ ++ err = gr_search_udp_sendmsg(sk, usin); ++ if (err) ++ return err; + } else { + if (sk->sk_state != TCP_ESTABLISHED) + return -EDESTADDRREQ; ++ ++ err = gr_search_udp_sendmsg(sk, NULL); ++ if (err) ++ return err; ++ + daddr = inet->inet_daddr; + dport = inet->inet_dport; + /* Open fast path for connected socket. +@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk) + udp_lib_checksum_complete(skb)) { + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, + IS_UDPLITE(sk)); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + __skb_unlink(skb, rcvq); + __skb_queue_tail(&list_kill, skb); + } +@@ -1185,6 +1202,10 @@ try_again: + if (!skb) + goto out; + ++ err = gr_search_udp_recvmsg(sk, skb); ++ if (err) ++ goto out_free; ++ + ulen = skb->len - sizeof(struct udphdr); + copied = len; + if (copied > ulen) +@@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + + drop: + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return -1; + } +@@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count, + skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); + + if (!skb1) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, + IS_UDPLITE(sk)); + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, +@@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, + goto csum_error; + + UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK)) ++#endif + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); + + /* +@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, + sk_wmem_alloc_get(sp), + sk_rmem_alloc_get(sp), + 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, +- atomic_read(&sp->sk_drops), len); ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif ++ atomic_read_unchecked(&sp->sk_drops), len); + } + + int udp4_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index a5521c5..984a2f4 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -2153,7 +2153,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg) + p.iph.ihl = 5; + p.iph.protocol = IPPROTO_IPV6; + p.iph.ttl = 64; +- ifr.ifr_ifru.ifru_data = (__force void __user *)&p; ++ ifr.ifr_ifru.ifru_data = (void __force_user *)&p; + + if (ops->ndo_do_ioctl) { + mm_segment_t oldfs = get_fs(); +diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c +index 4c0f894..fca5d15 100644 +--- a/net/ipv6/ah6.c ++++ b/net/ipv6/ah6.c +@@ -56,6 +56,8 @@ struct ah_skb_cb { + #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) + + static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, ++ unsigned int size) __size_overflow(3); ++static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, + unsigned int size) + { + unsigned int len; +diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c +index 1567fb1..29af910 100644 +--- a/net/ipv6/inet6_connection_sock.c ++++ b/net/ipv6/inet6_connection_sock.c +@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, + #ifdef CONFIG_XFRM + { + struct rt6_info *rt = (struct rt6_info *)dst; +- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); ++ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid); + } + #endif + } +@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) + #ifdef CONFIG_XFRM + if (dst) { + struct rt6_info *rt = (struct rt6_info *)dst; +- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { ++ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) { + __sk_dst_reset(sk); + dst = NULL; + } +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c +index 26cb08c..8af9877 100644 +--- a/net/ipv6/ipv6_sockglue.c ++++ b/net/ipv6/ipv6_sockglue.c +@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, + if (sk->sk_type != SOCK_STREAM) + return -ENOPROTOOPT; + +- msg.msg_control = optval; ++ msg.msg_control = (void __force_kernel *)optval; + msg.msg_controllen = len; + msg.msg_flags = flags; + +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c +index 94874b0..dc413fa 100644 +--- a/net/ipv6/netfilter/ip6_tables.c ++++ b/net/ipv6/netfilter/ip6_tables.c +@@ -1194,6 +1194,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, + static int + __do_replace(struct net *net, const char *name, unsigned int valid_hooks, + struct xt_table_info *newinfo, unsigned int num_counters, ++ void __user *counters_ptr) __size_overflow(5); ++static int ++__do_replace(struct net *net, const char *name, unsigned int valid_hooks, ++ struct xt_table_info *newinfo, unsigned int num_counters, + void __user *counters_ptr) + { + int ret; +@@ -1315,6 +1319,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len) + + static int + do_add_counters(struct net *net, const void __user *user, unsigned int len, ++ int compat) __size_overflow(3); ++static int ++do_add_counters(struct net *net, const void __user *user, unsigned int len, + int compat) + { + unsigned int i, curcpu; +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index 361ebf3..d5628fb 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb) + { + if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) && + skb_checksum_complete(skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) + struct raw6_sock *rp = raw6_sk(sk); + + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) + + if (inet->hdrincl) { + if (skb_checksum_complete(skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -601,7 +601,7 @@ out: + return err; + } + +-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, ++static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length, + struct flowi6 *fl6, struct dst_entry **dstp, + unsigned int flags) + { +@@ -909,12 +909,15 @@ do_confirm: + static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, + char __user *optval, int optlen) + { ++ struct icmp6_filter filter; ++ + switch (optname) { + case ICMPV6_FILTER: + if (optlen > sizeof(struct icmp6_filter)) + optlen = sizeof(struct icmp6_filter); +- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) ++ if (copy_from_user(&filter, optval, optlen)) + return -EFAULT; ++ raw6_sk(sk)->filter = filter; + return 0; + default: + return -ENOPROTOOPT; +@@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen) + { + int len; ++ struct icmp6_filter filter; + + switch (optname) { + case ICMPV6_FILTER: +@@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, + len = sizeof(struct icmp6_filter); + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, &raw6_sk(sk)->filter, len)) ++ filter = raw6_sk(sk)->filter; ++ if (len > sizeof filter || copy_to_user(optval, &filter, len)) + return -EFAULT; + return 0; + default: +@@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) + 0, 0L, 0, + sock_i_uid(sp), 0, + sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif ++ atomic_read_unchecked(&sp->sk_drops)); + } + + static int raw6_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index b859e4a..f9d1589 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, + } + #endif + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + static void tcp_v6_hash(struct sock *sk) + { + if (sk->sk_state != TCP_CLOSE) { +@@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) + return 0; + + reset: ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + tcp_v6_send_reset(sk, skb); + discard: + if (opt_skb) +@@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb) + TCP_SKB_CB(skb)->sacked = 0; + + sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); +- if (!sk) ++ if (!sk) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 1; ++#endif + goto no_tcp_socket; ++ } + + process: +- if (sk->sk_state == TCP_TIME_WAIT) ++ if (sk->sk_state == TCP_TIME_WAIT) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 2; ++#endif + goto do_time_wait; ++ } + + if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { + NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); +@@ -1783,6 +1798,10 @@ no_tcp_socket: + bad_packet: + TCP_INC_STATS_BH(net, TCP_MIB_INERRS); + } else { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (ret == 1 && ++ (skb->dev->flags & IFF_LOOPBACK))) ++#endif + tcp_v6_send_reset(NULL, skb); + } + +@@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq, + uid, + 0, /* non standard timer */ + 0, /* open_requests have no inode */ +- 0, req); ++ 0, ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL ++#else ++ req ++#endif ++ ); + } + + static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) +@@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) + sock_i_uid(sp), + icsk->icsk_probes_out, + sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, +@@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq, + dest->s6_addr32[2], dest->s6_addr32[3], destp, + tw->tw_substate, 0, 0, + 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, +- atomic_read(&tw->tw_refcnt), tw); ++ atomic_read(&tw->tw_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL ++#else ++ tw ++#endif ++ ); + } + + static int tcp6_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index 8c25419..47a51ae 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -50,6 +50,10 @@ + #include <linux/seq_file.h> + #include "udp_impl.h" + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) + { + const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; +@@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) + + return 0; + drop: +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + drop_no_sk_drops_inc: + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + kfree_skb(skb); +@@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count, + continue; + } + drop: +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + UDP6_INC_STATS_BH(sock_net(sk), + UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); + UDP6_INC_STATS_BH(sock_net(sk), +@@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, + UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, + proto == IPPROTO_UDPLITE); + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK)) ++#endif + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); + + kfree_skb(skb); +@@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, + if (!sock_owned_by_user(sk)) + udpv6_queue_rcv_skb(sk, skb); + else if (sk_add_backlog(sk, skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + bh_unlock_sock(sk); + sock_put(sk); + goto discard; +@@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket + 0, 0L, 0, + sock_i_uid(sp), 0, + sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, +- atomic_read(&sp->sk_drops)); ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif ++ atomic_read_unchecked(&sp->sk_drops)); + } + + int udp6_seq_show(struct seq_file *seq, void *v) +diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c +index 253695d..9481ce8 100644 +--- a/net/irda/ircomm/ircomm_tty.c ++++ b/net/irda/ircomm/ircomm_tty.c +@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, + add_wait_queue(&self->open_wait, &wait); + + IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", +- __FILE__,__LINE__, tty->driver->name, self->open_count ); ++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) ); + + /* As far as I can see, we protect open_count - Jean II */ + spin_lock_irqsave(&self->spinlock, flags); + if (!tty_hung_up_p(filp)) { + extra_count = 1; +- self->open_count--; ++ local_dec(&self->open_count); + } + spin_unlock_irqrestore(&self->spinlock, flags); +- self->blocked_open++; ++ local_inc(&self->blocked_open); + + while (1) { + if (tty->termios->c_cflag & CBAUD) { +@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, + } + + IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", +- __FILE__,__LINE__, tty->driver->name, self->open_count ); ++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) ); + + schedule(); + } +@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, + if (extra_count) { + /* ++ is not atomic, so this should be protected - Jean II */ + spin_lock_irqsave(&self->spinlock, flags); +- self->open_count++; ++ local_inc(&self->open_count); + spin_unlock_irqrestore(&self->spinlock, flags); + } +- self->blocked_open--; ++ local_dec(&self->blocked_open); + + IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", +- __FILE__,__LINE__, tty->driver->name, self->open_count); ++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count)); + + if (!retval) + self->flags |= ASYNC_NORMAL_ACTIVE; +@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) + } + /* ++ is not atomic, so this should be protected - Jean II */ + spin_lock_irqsave(&self->spinlock, flags); +- self->open_count++; ++ local_inc(&self->open_count); + + tty->driver_data = self; + self->tty = tty; + spin_unlock_irqrestore(&self->spinlock, flags); + + IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, +- self->line, self->open_count); ++ self->line, local_read(&self->open_count)); + + /* Not really used by us, but lets do it anyway */ + self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0; +@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) + return; + } + +- if ((tty->count == 1) && (self->open_count != 1)) { ++ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) { + /* + * Uh, oh. tty->count is 1, which means that the tty + * structure will be freed. state->count should always +@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) + */ + IRDA_DEBUG(0, "%s(), bad serial port count; " + "tty->count is 1, state->count is %d\n", __func__ , +- self->open_count); +- self->open_count = 1; ++ local_read(&self->open_count)); ++ local_set(&self->open_count, 1); + } + +- if (--self->open_count < 0) { ++ if (local_dec_return(&self->open_count) < 0) { + IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n", +- __func__, self->line, self->open_count); +- self->open_count = 0; ++ __func__, self->line, local_read(&self->open_count)); ++ local_set(&self->open_count, 0); + } +- if (self->open_count) { ++ if (local_read(&self->open_count)) { + spin_unlock_irqrestore(&self->spinlock, flags); + + IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ ); +@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) + tty->closing = 0; + self->tty = NULL; + +- if (self->blocked_open) { ++ if (local_read(&self->blocked_open)) { + if (self->close_delay) + schedule_timeout_interruptible(self->close_delay); + wake_up_interruptible(&self->open_wait); +@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty) + spin_lock_irqsave(&self->spinlock, flags); + self->flags &= ~ASYNC_NORMAL_ACTIVE; + self->tty = NULL; +- self->open_count = 0; ++ local_set(&self->open_count, 0); + spin_unlock_irqrestore(&self->spinlock, flags); + + wake_up_interruptible(&self->open_wait); +@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m) + seq_putc(m, '\n'); + + seq_printf(m, "Role: %s\n", self->client ? "client" : "server"); +- seq_printf(m, "Open count: %d\n", self->open_count); ++ seq_printf(m, "Open count: %d\n", local_read(&self->open_count)); + seq_printf(m, "Max data size: %d\n", self->max_data_size); + seq_printf(m, "Max header size: %d\n", self->max_header_size); + +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c +index 274d150..656a144 100644 +--- a/net/iucv/af_iucv.c ++++ b/net/iucv/af_iucv.c +@@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk) + + write_lock_bh(&iucv_sk_list.lock); + +- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); ++ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name)); + while (__iucv_get_sock_by_name(name)) { + sprintf(name, "%08x", +- atomic_inc_return(&iucv_sk_list.autobind_name)); ++ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name)); + } + + write_unlock_bh(&iucv_sk_list.lock); +diff --git a/net/key/af_key.c b/net/key/af_key.c +index 1e733e9..3d73c9f 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc + static u32 get_acqseq(void) + { + u32 res; +- static atomic_t acqseq; ++ static atomic_unchecked_t acqseq; + + do { +- res = atomic_inc_return(&acqseq); ++ res = atomic_inc_return_unchecked(&acqseq); + } while (!res); + return res; + } +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index 73495f1..ad51356 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -27,6 +27,7 @@ + #include <net/ieee80211_radiotap.h> + #include <net/cfg80211.h> + #include <net/mac80211.h> ++#include <asm/local.h> + #include "key.h" + #include "sta_info.h" + +@@ -764,7 +765,7 @@ struct ieee80211_local { + /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ + spinlock_t queue_stop_reason_lock; + +- int open_count; ++ local_t open_count; + int monitors, cooked_mntrs; + /* number of interfaces with corresponding FIF_ flags */ + int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll, +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index 30d7355..e260095 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) + break; + } + +- if (local->open_count == 0) { ++ if (local_read(&local->open_count) == 0) { + res = drv_start(local); + if (res) + goto err_del_bss; +@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) + memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); + + if (!is_valid_ether_addr(dev->dev_addr)) { +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + drv_stop(local); + return -EADDRNOTAVAIL; + } +@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) + mutex_unlock(&local->mtx); + + if (coming_up) +- local->open_count++; ++ local_inc(&local->open_count); + + if (hw_reconf_flags) { + ieee80211_hw_config(local, hw_reconf_flags); +@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) + err_del_interface: + drv_remove_interface(local, &sdata->vif); + err_stop: +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + drv_stop(local); + err_del_bss: + sdata->bss = NULL; +@@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, + } + + if (going_down) +- local->open_count--; ++ local_dec(&local->open_count); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: +@@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, + + ieee80211_recalc_ps(local, -1); + +- if (local->open_count == 0) { ++ if (local_read(&local->open_count) == 0) { + if (local->ops->napi_poll) + napi_disable(&local->napi); + ieee80211_clear_tx_pending(local); +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index 7d9b21d..0687004 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) + local->hw.conf.power_level = power; + } + +- if (changed && local->open_count) { ++ if (changed && local_read(&local->open_count)) { + ret = drv_config(local, changed); + /* + * Goal: +diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c +index 9ee7164..56c5061 100644 +--- a/net/mac80211/pm.c ++++ b/net/mac80211/pm.c +@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + goto suspend; + + ieee80211_scan_cancel(local); +@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) + cancel_work_sync(&local->dynamic_ps_enable_work); + del_timer_sync(&local->dynamic_ps_timer); + +- local->wowlan = wowlan && local->open_count; ++ local->wowlan = wowlan && local_read(&local->open_count); + if (local->wowlan) { + int err = drv_suspend(local, wowlan); + if (err < 0) { +@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) + } + + /* stop hardware - this must stop RX */ +- if (local->open_count) ++ if (local_read(&local->open_count)) + ieee80211_stop_device(local); + + suspend: +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index 7d84b87..6a69cd9 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, + + ASSERT_RTNL(); + +- if (local->open_count) ++ if (local_read(&local->open_count)) + return -EBUSY; + + if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { +diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c +index c97a065..ff61928 100644 +--- a/net/mac80211/rc80211_pid_debugfs.c ++++ b/net/mac80211/rc80211_pid_debugfs.c +@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf, + + spin_unlock_irqrestore(&events->lock, status); + +- if (copy_to_user(buf, pb, p)) ++ if (p > sizeof(pb) || copy_to_user(buf, pb, p)) + return -EFAULT; + + return p; +diff --git a/net/mac80211/util.c b/net/mac80211/util.c +index d5230ec..c604b21 100644 +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) + drv_set_coverage_class(local, hw->wiphy->coverage_class); + + /* everything else happens only if HW was up & running */ +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + goto wake_up; + + /* +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig +index d5597b7..ab6d39c 100644 +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP + + To compile it as a module, choose M here. If unsure, say N. + ++config NETFILTER_XT_MATCH_GRADM ++ tristate '"gradm" match support' ++ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED ++ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC ++ ---help--- ++ The gradm match allows to match on grsecurity RBAC being enabled. ++ It is useful when iptables rules are applied early on bootup to ++ prevent connections to the machine (except from a trusted host) ++ while the RBAC system is disabled. ++ + config NETFILTER_XT_MATCH_HASHLIMIT + tristate '"hashlimit" match support' + depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) +diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile +index 1a02853..5d8c22e 100644 +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o + obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o + obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o + obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o ++obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o + obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o + obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o + obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o +diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c +index 29fa5ba..8debc79 100644 +--- a/net/netfilter/ipvs/ip_vs_conn.c ++++ b/net/netfilter/ipvs/ip_vs_conn.c +@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) + /* Increase the refcnt counter of the dest */ + atomic_inc(&dest->refcnt); + +- conn_flags = atomic_read(&dest->conn_flags); ++ conn_flags = atomic_read_unchecked(&dest->conn_flags); + if (cp->protocol != IPPROTO_UDP) + conn_flags &= ~IP_VS_CONN_F_ONE_PACKET; + /* Bind with the destination and its corresponding transmitter */ +@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, + atomic_set(&cp->refcnt, 1); + + atomic_set(&cp->n_control, 0); +- atomic_set(&cp->in_pkts, 0); ++ atomic_set_unchecked(&cp->in_pkts, 0); + + atomic_inc(&ipvs->conn_count); + if (flags & IP_VS_CONN_F_NO_CPORT) +@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp) + + /* Don't drop the entry if its number of incoming packets is not + located in [0, 8] */ +- i = atomic_read(&cp->in_pkts); ++ i = atomic_read_unchecked(&cp->in_pkts); + if (i > 8 || i < 0) return 0; + + if (!todrop_rate[i]) return 0; +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c +index 6dc7d7d..e45913a 100644 +--- a/net/netfilter/ipvs/ip_vs_core.c ++++ b/net/netfilter/ipvs/ip_vs_core.c +@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, + ret = cp->packet_xmit(skb, cp, pd->pp); + /* do not touch skb anymore */ + +- atomic_inc(&cp->in_pkts); ++ atomic_inc_unchecked(&cp->in_pkts); + ip_vs_conn_put(cp); + return ret; + } +@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) + if (cp->flags & IP_VS_CONN_F_ONE_PACKET) + pkts = sysctl_sync_threshold(ipvs); + else +- pkts = atomic_add_return(1, &cp->in_pkts); ++ pkts = atomic_add_return_unchecked(1, &cp->in_pkts); + + if ((ipvs->sync_state & IP_VS_STATE_MASTER) && + cp->protocol == IPPROTO_SCTP) { +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c +index e1a66cf..0910076 100644 +--- a/net/netfilter/ipvs/ip_vs_ctl.c ++++ b/net/netfilter/ipvs/ip_vs_ctl.c +@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, + ip_vs_rs_hash(ipvs, dest); + write_unlock_bh(&ipvs->rs_lock); + } +- atomic_set(&dest->conn_flags, conn_flags); ++ atomic_set_unchecked(&dest->conn_flags, conn_flags); + + /* bind the service */ + if (!dest->svc) { +@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) + " %-7s %-6d %-10d %-10d\n", + &dest->addr.in6, + ntohs(dest->port), +- ip_vs_fwd_name(atomic_read(&dest->conn_flags)), ++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)), + atomic_read(&dest->weight), + atomic_read(&dest->activeconns), + atomic_read(&dest->inactconns)); +@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) + "%-7s %-6d %-10d %-10d\n", + ntohl(dest->addr.ip), + ntohs(dest->port), +- ip_vs_fwd_name(atomic_read(&dest->conn_flags)), ++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)), + atomic_read(&dest->weight), + atomic_read(&dest->activeconns), + atomic_read(&dest->inactconns)); +@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get, + + entry.addr = dest->addr.ip; + entry.port = dest->port; +- entry.conn_flags = atomic_read(&dest->conn_flags); ++ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags); + entry.weight = atomic_read(&dest->weight); + entry.u_threshold = dest->u_threshold; + entry.l_threshold = dest->l_threshold; +@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) + NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); + + NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, +- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); ++ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); + NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); + NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); + NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); +diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c +index 2b6678c0..aaa41fc 100644 +--- a/net/netfilter/ipvs/ip_vs_sync.c ++++ b/net/netfilter/ipvs/ip_vs_sync.c +@@ -649,7 +649,7 @@ control: + * i.e only increment in_pkts for Templates. + */ + if (cp->flags & IP_VS_CONN_F_TEMPLATE) { +- int pkts = atomic_add_return(1, &cp->in_pkts); ++ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts); + + if (pkts % sysctl_sync_period(ipvs) != 1) + return; +@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, + + if (opt) + memcpy(&cp->in_seq, opt, sizeof(*opt)); +- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); ++ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs)); + cp->state = state; + cp->old_state = cp->state; + /* +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index aa2d720..d8aa111 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + else + rc = NF_ACCEPT; + /* do not touch skb anymore */ +- atomic_inc(&cp->in_pkts); ++ atomic_inc_unchecked(&cp->in_pkts); + goto out; + } + +@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, + else + rc = NF_ACCEPT; + /* do not touch skb anymore */ +- atomic_inc(&cp->in_pkts); ++ atomic_inc_unchecked(&cp->in_pkts); + goto out; + } + +diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c +index 66b2c54..c7884e3 100644 +--- a/net/netfilter/nfnetlink_log.c ++++ b/net/netfilter/nfnetlink_log.c +@@ -70,7 +70,7 @@ struct nfulnl_instance { + }; + + static DEFINE_SPINLOCK(instances_lock); +-static atomic_t global_seq; ++static atomic_unchecked_t global_seq; + + #define INSTANCE_BUCKETS 16 + static struct hlist_head instance_table[INSTANCE_BUCKETS]; +@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst, + /* global sequence number */ + if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) + NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, +- htonl(atomic_inc_return(&global_seq))); ++ htonl(atomic_inc_return_unchecked(&global_seq))); + + if (data_len) { + struct nlattr *nla; +diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c +new file mode 100644 +index 0000000..6905327 +--- /dev/null ++++ b/net/netfilter/xt_gradm.c +@@ -0,0 +1,51 @@ ++/* ++ * gradm match for netfilter ++ * Copyright © Zbigniew Krzystolik, 2010 ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License; either version ++ * 2 or 3 as published by the Free Software Foundation. ++ */ ++#include <linux/module.h> ++#include <linux/moduleparam.h> ++#include <linux/skbuff.h> ++#include <linux/netfilter/x_tables.h> ++#include <linux/grsecurity.h> ++#include <linux/netfilter/xt_gradm.h> ++ ++static bool ++gradm_mt(const struct sk_buff *skb, struct xt_action_param *par) ++{ ++ const struct xt_gradm_mtinfo *info = par->matchinfo; ++ bool retval = false; ++ if (gr_acl_is_enabled()) ++ retval = true; ++ return retval ^ info->invflags; ++} ++ ++static struct xt_match gradm_mt_reg __read_mostly = { ++ .name = "gradm", ++ .revision = 0, ++ .family = NFPROTO_UNSPEC, ++ .match = gradm_mt, ++ .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)), ++ .me = THIS_MODULE, ++}; ++ ++static int __init gradm_mt_init(void) ++{ ++ return xt_register_match(&gradm_mt_reg); ++} ++ ++static void __exit gradm_mt_exit(void) ++{ ++ xt_unregister_match(&gradm_mt_reg); ++} ++ ++module_init(gradm_mt_init); ++module_exit(gradm_mt_exit); ++MODULE_AUTHOR("Zbigniew Krzystolik zbyniu@destrukcja.pl"); ++MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("ipt_gradm"); ++MODULE_ALIAS("ip6t_gradm"); +diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c +index 4fe4fb4..87a89e5 100644 +--- a/net/netfilter/xt_statistic.c ++++ b/net/netfilter/xt_statistic.c +@@ -19,7 +19,7 @@ + #include <linux/module.h> + + struct xt_statistic_priv { +- atomic_t count; ++ atomic_unchecked_t count; + } ____cacheline_aligned_in_smp; + + MODULE_LICENSE("GPL"); +@@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) + break; + case XT_STATISTIC_MODE_NTH: + do { +- oval = atomic_read(&info->master->count); ++ oval = atomic_read_unchecked(&info->master->count); + nval = (oval == info->u.nth.every) ? 0 : oval + 1; +- } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval); ++ } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval); + if (nval == 0) + ret = !ret; + break; +@@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par) + info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); + if (info->master == NULL) + return -ENOMEM; +- atomic_set(&info->master->count, info->u.nth.count); ++ atomic_set_unchecked(&info->master->count, info->u.nth.count); + + return 0; + } +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 1201b6d..bcff8c6 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk) + sk->sk_error_report(sk); + } + } +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + } + + static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) +@@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v) + sk_wmem_alloc_get(s), + nlk->cb, + atomic_read(&s->sk_refcnt), +- atomic_read(&s->sk_drops), ++ atomic_read_unchecked(&s->sk_drops), + sock_i_ino(s) + ); + +diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c +index 732152f..60bb09e 100644 +--- a/net/netrom/af_netrom.c ++++ b/net/netrom/af_netrom.c +@@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr, + struct sock *sk = sock->sk; + struct nr_sock *nr = nr_sk(sk); + ++ memset(sax, 0, sizeof(*sax)); + lock_sock(sk); + if (peer != 0) { + if (sk->sk_state != TCP_ESTABLISHED) { +@@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr, + *uaddr_len = sizeof(struct full_sockaddr_ax25); + } else { + sax->fsa_ax25.sax25_family = AF_NETROM; +- sax->fsa_ax25.sax25_ndigis = 0; + sax->fsa_ax25.sax25_call = nr->source_addr; + *uaddr_len = sizeof(struct sockaddr_ax25); + } +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index d9d4970..d5a6a68 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, + + spin_lock(&sk->sk_receive_queue.lock); + po->stats.tp_packets++; +- skb->dropcount = atomic_read(&sk->sk_drops); ++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops); + __skb_queue_tail(&sk->sk_receive_queue, skb); + spin_unlock(&sk->sk_receive_queue.lock); + sk->sk_data_ready(sk, skb->len); +@@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, + drop_n_acct: + spin_lock(&sk->sk_receive_queue.lock); + po->stats.tp_drops++; +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + spin_unlock(&sk->sk_receive_queue.lock); + + drop_n_restore: +@@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, + case PACKET_HDRLEN: + if (len > sizeof(int)) + len = sizeof(int); +- if (copy_from_user(&val, optval, len)) ++ if (len > sizeof(val) || copy_from_user(&val, optval, len)) + return -EFAULT; + switch (val) { + case TPACKET_V1: +@@ -3316,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, + + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, data, len)) ++ if (len > sizeof(st) || copy_to_user(optval, data, len)) + return -EFAULT; + return 0; + } +diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c +index d65f699..05aa6ce 100644 +--- a/net/phonet/af_phonet.c ++++ b/net/phonet/af_phonet.c +@@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol) + { + struct phonet_protocol *pp; + +- if (protocol >= PHONET_NPROTO) ++ if (protocol < 0 || protocol >= PHONET_NPROTO) + return NULL; + + rcu_read_lock(); +@@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol, + { + int err = 0; + +- if (protocol >= PHONET_NPROTO) ++ if (protocol < 0 || protocol >= PHONET_NPROTO) + return -EINVAL; + + err = proto_register(pp->prot, 1); +diff --git a/net/phonet/pep.c b/net/phonet/pep.c +index 2ba6e9f..409573f 100644 +--- a/net/phonet/pep.c ++++ b/net/phonet/pep.c +@@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) + + case PNS_PEP_CTRL_REQ: + if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + break; + } + __skb_pull(skb, 4); +@@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) + } + + if (pn->rx_credits == 0) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + err = -ENOBUFS; + break; + } +@@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) + } + + if (pn->rx_credits == 0) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + err = NET_RX_DROP; + break; + } +diff --git a/net/phonet/socket.c b/net/phonet/socket.c +index 4c7eff3..59c727f 100644 +--- a/net/phonet/socket.c ++++ b/net/phonet/socket.c +@@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v) + pn->resource, sk->sk_state, + sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), + sock_i_uid(sk), sock_i_ino(sk), +- atomic_read(&sk->sk_refcnt), sk, +- atomic_read(&sk->sk_drops), &len); ++ atomic_read(&sk->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sk, ++#endif ++ atomic_read_unchecked(&sk->sk_drops), &len); + } + seq_printf(seq, "%*s\n", 127 - len, ""); + return 0; +diff --git a/net/rds/cong.c b/net/rds/cong.c +index e5b65ac..f3b6fb7 100644 +--- a/net/rds/cong.c ++++ b/net/rds/cong.c +@@ -78,7 +78,7 @@ + * finds that the saved generation number is smaller than the global generation + * number, it wakes up the process. + */ +-static atomic_t rds_cong_generation = ATOMIC_INIT(0); ++static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0); + + /* + * Congestion monitoring +@@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask) + rdsdebug("waking map %p for %pI4\n", + map, &map->m_addr); + rds_stats_inc(s_cong_update_received); +- atomic_inc(&rds_cong_generation); ++ atomic_inc_unchecked(&rds_cong_generation); + if (waitqueue_active(&map->m_waitq)) + wake_up(&map->m_waitq); + if (waitqueue_active(&rds_poll_waitq)) +@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated); + + int rds_cong_updated_since(unsigned long *recent) + { +- unsigned long gen = atomic_read(&rds_cong_generation); ++ unsigned long gen = atomic_read_unchecked(&rds_cong_generation); + + if (likely(*recent == gen)) + return 0; +diff --git a/net/rds/ib.h b/net/rds/ib.h +index edfaaaf..8c89879 100644 +--- a/net/rds/ib.h ++++ b/net/rds/ib.h +@@ -128,7 +128,7 @@ struct rds_ib_connection { + /* sending acks */ + unsigned long i_ack_flags; + #ifdef KERNEL_HAS_ATOMIC64 +- atomic64_t i_ack_next; /* next ACK to send */ ++ atomic64_unchecked_t i_ack_next; /* next ACK to send */ + #else + spinlock_t i_ack_lock; /* protect i_ack_next */ + u64 i_ack_next; /* next ACK to send */ +diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c +index 51c8689..36c555f 100644 +--- a/net/rds/ib_cm.c ++++ b/net/rds/ib_cm.c +@@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn) + /* Clear the ACK state */ + clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); + #ifdef KERNEL_HAS_ATOMIC64 +- atomic64_set(&ic->i_ack_next, 0); ++ atomic64_set_unchecked(&ic->i_ack_next, 0); + #else + ic->i_ack_next = 0; + #endif +diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c +index e29e0ca..fa3a6a3 100644 +--- a/net/rds/ib_recv.c ++++ b/net/rds/ib_recv.c +@@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic) + static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, + int ack_required) + { +- atomic64_set(&ic->i_ack_next, seq); ++ atomic64_set_unchecked(&ic->i_ack_next, seq); + if (ack_required) { + smp_mb__before_clear_bit(); + set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); +@@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic) + clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); + smp_mb__after_clear_bit(); + +- return atomic64_read(&ic->i_ack_next); ++ return atomic64_read_unchecked(&ic->i_ack_next); + } + #endif + +diff --git a/net/rds/iw.h b/net/rds/iw.h +index 04ce3b1..48119a6 100644 +--- a/net/rds/iw.h ++++ b/net/rds/iw.h +@@ -134,7 +134,7 @@ struct rds_iw_connection { + /* sending acks */ + unsigned long i_ack_flags; + #ifdef KERNEL_HAS_ATOMIC64 +- atomic64_t i_ack_next; /* next ACK to send */ ++ atomic64_unchecked_t i_ack_next; /* next ACK to send */ + #else + spinlock_t i_ack_lock; /* protect i_ack_next */ + u64 i_ack_next; /* next ACK to send */ +diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c +index 9556d28..f046d0e 100644 +--- a/net/rds/iw_cm.c ++++ b/net/rds/iw_cm.c +@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn) + /* Clear the ACK state */ + clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); + #ifdef KERNEL_HAS_ATOMIC64 +- atomic64_set(&ic->i_ack_next, 0); ++ atomic64_set_unchecked(&ic->i_ack_next, 0); + #else + ic->i_ack_next = 0; + #endif +diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c +index 5e57347..3916042 100644 +--- a/net/rds/iw_recv.c ++++ b/net/rds/iw_recv.c +@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic) + static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, + int ack_required) + { +- atomic64_set(&ic->i_ack_next, seq); ++ atomic64_set_unchecked(&ic->i_ack_next, seq); + if (ack_required) { + smp_mb__before_clear_bit(); + set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); +@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic) + clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); + smp_mb__after_clear_bit(); + +- return atomic64_read(&ic->i_ack_next); ++ return atomic64_read_unchecked(&ic->i_ack_next); + } + #endif + +diff --git a/net/rds/tcp.c b/net/rds/tcp.c +index edac9ef..16bcb98 100644 +--- a/net/rds/tcp.c ++++ b/net/rds/tcp.c +@@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock) + int val = 1; + + set_fs(KERNEL_DS); +- sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val, ++ sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val, + sizeof(val)); + set_fs(oldfs); + } +diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c +index 1b4fd68..2234175 100644 +--- a/net/rds/tcp_send.c ++++ b/net/rds/tcp_send.c +@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val) + + oldfs = get_fs(); + set_fs(KERNEL_DS); +- sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val, ++ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val, + sizeof(val)); + set_fs(oldfs); + } +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c +index 74c064c..fdec26f 100644 +--- a/net/rxrpc/af_rxrpc.c ++++ b/net/rxrpc/af_rxrpc.c +@@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops; + __be32 rxrpc_epoch; + + /* current debugging ID */ +-atomic_t rxrpc_debug_id; ++atomic_unchecked_t rxrpc_debug_id; + + /* count of skbs currently in use */ + atomic_t rxrpc_n_skbs; +diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c +index f99cfce..cc529dd 100644 +--- a/net/rxrpc/ar-ack.c ++++ b/net/rxrpc/ar-ack.c +@@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call) + + _enter("{%d,%d,%d,%d},", + call->acks_hard, call->acks_unacked, +- atomic_read(&call->sequence), ++ atomic_read_unchecked(&call->sequence), + CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); + + stop = 0; +@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call) + + /* each Tx packet has a new serial number */ + sp->hdr.serial = +- htonl(atomic_inc_return(&call->conn->serial)); ++ htonl(atomic_inc_return_unchecked(&call->conn->serial)); + + hdr = (struct rxrpc_header *) txb->head; + hdr->serial = sp->hdr.serial; +@@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) + */ + static void rxrpc_clear_tx_window(struct rxrpc_call *call) + { +- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence)); ++ rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence)); + } + + /* +@@ -629,7 +629,7 @@ process_further: + + latest = ntohl(sp->hdr.serial); + hard = ntohl(ack.firstPacket); +- tx = atomic_read(&call->sequence); ++ tx = atomic_read_unchecked(&call->sequence); + + _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", + latest, +@@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work) + goto maybe_reschedule; + + send_ACK_with_skew: +- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - ++ ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) - + ntohl(ack.serial)); + send_ACK: + mtu = call->conn->trans->peer->if_mtu; +@@ -1173,7 +1173,7 @@ send_ACK: + ackinfo.rxMTU = htonl(5692); + ackinfo.jumbo_max = htonl(4); + +- hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial)); + _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", + ntohl(hdr.serial), + ntohs(ack.maxSkew), +@@ -1191,7 +1191,7 @@ send_ACK: + send_message: + _debug("send message"); + +- hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial)); + _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial)); + send_message_2: + +diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c +index bf656c2..48f9d27 100644 +--- a/net/rxrpc/ar-call.c ++++ b/net/rxrpc/ar-call.c +@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) + spin_lock_init(&call->lock); + rwlock_init(&call->state_lock); + atomic_set(&call->usage, 1); +- call->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; + + memset(&call->sock_node, 0xed, sizeof(call->sock_node)); +diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c +index 4106ca9..a338d7a 100644 +--- a/net/rxrpc/ar-connection.c ++++ b/net/rxrpc/ar-connection.c +@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) + rwlock_init(&conn->lock); + spin_lock_init(&conn->state_lock); + atomic_set(&conn->usage, 1); +- conn->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + conn->avail_calls = RXRPC_MAXCALLS; + conn->size_align = 4; + conn->header_size = sizeof(struct rxrpc_header); +diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c +index e7ed43a..6afa140 100644 +--- a/net/rxrpc/ar-connevent.c ++++ b/net/rxrpc/ar-connevent.c +@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, + + len = iov[0].iov_len + iov[1].iov_len; + +- hdr.serial = htonl(atomic_inc_return(&conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); + _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); +diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c +index 1a2b0633..e8d1382 100644 +--- a/net/rxrpc/ar-input.c ++++ b/net/rxrpc/ar-input.c +@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) + /* track the latest serial number on this connection for ACK packet + * information */ + serial = ntohl(sp->hdr.serial); +- hi_serial = atomic_read(&call->conn->hi_serial); ++ hi_serial = atomic_read_unchecked(&call->conn->hi_serial); + while (serial > hi_serial) +- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial, ++ hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial, + serial); + + /* request ACK generation for any ACK or DATA packet that requests +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h +index 8e22bd3..f66d1c0 100644 +--- a/net/rxrpc/ar-internal.h ++++ b/net/rxrpc/ar-internal.h +@@ -272,8 +272,8 @@ struct rxrpc_connection { + int error; /* error code for local abort */ + int debug_id; /* debug ID for printks */ + unsigned call_counter; /* call ID counter */ +- atomic_t serial; /* packet serial number counter */ +- atomic_t hi_serial; /* highest serial number received */ ++ atomic_unchecked_t serial; /* packet serial number counter */ ++ atomic_unchecked_t hi_serial; /* highest serial number received */ + u8 avail_calls; /* number of calls available */ + u8 size_align; /* data size alignment (for security) */ + u8 header_size; /* rxrpc + security header size */ +@@ -346,7 +346,7 @@ struct rxrpc_call { + spinlock_t lock; + rwlock_t state_lock; /* lock for state transition */ + atomic_t usage; +- atomic_t sequence; /* Tx data packet sequence counter */ ++ atomic_unchecked_t sequence; /* Tx data packet sequence counter */ + u32 abort_code; /* local/remote abort code */ + enum { /* current state of call */ + RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ +@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code) + */ + extern atomic_t rxrpc_n_skbs; + extern __be32 rxrpc_epoch; +-extern atomic_t rxrpc_debug_id; ++extern atomic_unchecked_t rxrpc_debug_id; + extern struct workqueue_struct *rxrpc_workqueue; + + /* +diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c +index 87f7135..74d3703 100644 +--- a/net/rxrpc/ar-local.c ++++ b/net/rxrpc/ar-local.c +@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx) + spin_lock_init(&local->lock); + rwlock_init(&local->services_lock); + atomic_set(&local->usage, 1); +- local->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + memcpy(&local->srx, srx, sizeof(*srx)); + } + +diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c +index 338d793..47391d0 100644 +--- a/net/rxrpc/ar-output.c ++++ b/net/rxrpc/ar-output.c +@@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb, + sp->hdr.cid = call->cid; + sp->hdr.callNumber = call->call_id; + sp->hdr.seq = +- htonl(atomic_inc_return(&call->sequence)); ++ htonl(atomic_inc_return_unchecked(&call->sequence)); + sp->hdr.serial = +- htonl(atomic_inc_return(&conn->serial)); ++ htonl(atomic_inc_return_unchecked(&conn->serial)); + sp->hdr.type = RXRPC_PACKET_TYPE_DATA; + sp->hdr.userStatus = 0; + sp->hdr.securityIndex = conn->security_ix; +diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c +index 2754f09..b20e38f 100644 +--- a/net/rxrpc/ar-peer.c ++++ b/net/rxrpc/ar-peer.c +@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx, + INIT_LIST_HEAD(&peer->error_targets); + spin_lock_init(&peer->lock); + atomic_set(&peer->usage, 1); +- peer->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + memcpy(&peer->srx, srx, sizeof(*srx)); + + rxrpc_assess_MTU_size(peer); +diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c +index 38047f7..9f48511 100644 +--- a/net/rxrpc/ar-proc.c ++++ b/net/rxrpc/ar-proc.c +@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) + atomic_read(&conn->usage), + rxrpc_conn_states[conn->state], + key_serial(conn->key), +- atomic_read(&conn->serial), +- atomic_read(&conn->hi_serial)); ++ atomic_read_unchecked(&conn->serial), ++ atomic_read_unchecked(&conn->hi_serial)); + + return 0; + } +diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c +index 92df566..87ec1bf 100644 +--- a/net/rxrpc/ar-transport.c ++++ b/net/rxrpc/ar-transport.c +@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, + spin_lock_init(&trans->client_lock); + rwlock_init(&trans->conn_lock); + atomic_set(&trans->usage, 1); +- trans->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + + if (peer->srx.transport.family == AF_INET) { + switch (peer->srx.transport_type) { +diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c +index 7635107..4670276 100644 +--- a/net/rxrpc/rxkad.c ++++ b/net/rxrpc/rxkad.c +@@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) + + len = iov[0].iov_len + iov[1].iov_len; + +- hdr.serial = htonl(atomic_inc_return(&conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); + _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial)); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); +@@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn, + + len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; + +- hdr->serial = htonl(atomic_inc_return(&conn->serial)); ++ hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial)); + _proto("Tx RESPONSE %%%u", ntohl(hdr->serial)); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); +diff --git a/net/sctp/proc.c b/net/sctp/proc.c +index 1e2eee8..ce3967e 100644 +--- a/net/sctp/proc.c ++++ b/net/sctp/proc.c +@@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) + seq_printf(seq, + "%8pK %8pK %-3d %-3d %-2d %-4d " + "%4d %8d %8d %7d %5lu %-5d %5d ", +- assoc, sk, sctp_sk(sk)->type, sk->sk_state, ++ assoc, sk, ++ sctp_sk(sk)->type, sk->sk_state, + assoc->state, hash, + assoc->assoc_id, + assoc->sndbuf_used, +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 54a7cd2..944edae 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, + addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; + if (space_left < addrlen) + return -ENOMEM; +- if (copy_to_user(to, &temp, addrlen)) ++ if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen)) + return -EFAULT; + to += addrlen; + cnt++; +diff --git a/net/socket.c b/net/socket.c +index 2dce67a..1e91168 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -88,6 +88,7 @@ + #include <linux/nsproxy.h> + #include <linux/magic.h> + #include <linux/slab.h> ++#include <linux/in.h> + + #include <asm/uaccess.h> + #include <asm/unistd.h> +@@ -105,6 +106,8 @@ + #include <linux/sockios.h> + #include <linux/atalk.h> + ++#include <linux/grsock.h> ++ + static int sock_no_open(struct inode *irrelevant, struct file *dontcare); + static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos); +@@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type, + &sockfs_dentry_operations, SOCKFS_MAGIC); + } + +-static struct vfsmount *sock_mnt __read_mostly; ++struct vfsmount *sock_mnt __read_mostly; + + static struct file_system_type sock_fs_type = { + .name = "sockfs", +@@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol, + return -EAFNOSUPPORT; + if (type < 0 || type >= SOCK_MAX) + return -EINVAL; ++ if (protocol < 0) ++ return -EINVAL; + + /* Compatibility. + +@@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) + if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) + flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; + ++ if(!gr_search_socket(family, type, protocol)) { ++ retval = -EACCES; ++ goto out; ++ } ++ ++ if (gr_handle_sock_all(family, type, protocol)) { ++ retval = -EACCES; ++ goto out; ++ } ++ + retval = sock_create(family, type, protocol, &sock); + if (retval < 0) + goto out; +@@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) + if (sock) { + err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); + if (err >= 0) { ++ if (gr_handle_sock_server((struct sockaddr *)&address)) { ++ err = -EACCES; ++ goto error; ++ } ++ err = gr_search_bind(sock, (struct sockaddr_in *)&address); ++ if (err) ++ goto error; ++ + err = security_socket_bind(sock, + (struct sockaddr *)&address, + addrlen); +@@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) + (struct sockaddr *) + &address, addrlen); + } ++error: + fput_light(sock->file, fput_needed); + } + return err; +@@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) + if ((unsigned)backlog > somaxconn) + backlog = somaxconn; + ++ if (gr_handle_sock_server_other(sock->sk)) { ++ err = -EPERM; ++ goto error; ++ } ++ ++ err = gr_search_listen(sock); ++ if (err) ++ goto error; ++ + err = security_socket_listen(sock, backlog); + if (!err) + err = sock->ops->listen(sock, backlog); + ++error: + fput_light(sock->file, fput_needed); + } + return err; +@@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, + newsock->type = sock->type; + newsock->ops = sock->ops; + ++ if (gr_handle_sock_server_other(sock->sk)) { ++ err = -EPERM; ++ sock_release(newsock); ++ goto out_put; ++ } ++ ++ err = gr_search_accept(sock); ++ if (err) { ++ sock_release(newsock); ++ goto out_put; ++ } ++ + /* + * We don't need try_module_get here, as the listening socket (sock) + * has the protocol module (sock->ops->owner) held. +@@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, + fd_install(newfd, newfile); + err = newfd; + ++ gr_attach_curr_ip(newsock->sk); ++ + out_put: + fput_light(sock->file, fput_needed); + out: +@@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, + int, addrlen) + { + struct socket *sock; ++ struct sockaddr *sck; + struct sockaddr_storage address; + int err, fput_needed; + +@@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, + if (err < 0) + goto out_put; + ++ sck = (struct sockaddr *)&address; ++ ++ if (gr_handle_sock_client(sck)) { ++ err = -EACCES; ++ goto out_put; ++ } ++ ++ err = gr_search_connect(sock, (struct sockaddr_in *)sck); ++ if (err) ++ goto out_put; ++ + err = + security_socket_connect(sock, (struct sockaddr *)&address, addrlen); + if (err) +@@ -1950,7 +2010,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, + * checking falls down on this. + */ + if (copy_from_user(ctl_buf, +- (void __user __force *)msg_sys->msg_control, ++ (void __force_user *)msg_sys->msg_control, + ctl_len)) + goto out_freectl; + msg_sys->msg_control = ctl_buf; +@@ -2120,7 +2180,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, + * kernel msghdr to use the kernel address space) + */ + +- uaddr = (__force void __user *)msg_sys->msg_name; ++ uaddr = (void __force_user *)msg_sys->msg_name; + uaddr_len = COMPAT_NAMELEN(msg); + if (MSG_CMSG_COMPAT & flags) { + err = verify_compat_iovec(msg_sys, iov, +@@ -2748,7 +2808,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) + } + + ifr = compat_alloc_user_space(buf_size); +- rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); ++ rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8); + + if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) + return -EFAULT; +@@ -2772,12 +2832,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) + offsetof(struct ethtool_rxnfc, fs.ring_cookie)); + + if (copy_in_user(rxnfc, compat_rxnfc, +- (void *)(&rxnfc->fs.m_ext + 1) - +- (void *)rxnfc) || ++ (void __user *)(&rxnfc->fs.m_ext + 1) - ++ (void __user *)rxnfc) || + copy_in_user(&rxnfc->fs.ring_cookie, + &compat_rxnfc->fs.ring_cookie, +- (void *)(&rxnfc->fs.location + 1) - +- (void *)&rxnfc->fs.ring_cookie) || ++ (void __user *)(&rxnfc->fs.location + 1) - ++ (void __user *)&rxnfc->fs.ring_cookie) || + copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, + sizeof(rxnfc->rule_cnt))) + return -EFAULT; +@@ -2789,12 +2849,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) + + if (convert_out) { + if (copy_in_user(compat_rxnfc, rxnfc, +- (const void *)(&rxnfc->fs.m_ext + 1) - +- (const void *)rxnfc) || ++ (const void __user *)(&rxnfc->fs.m_ext + 1) - ++ (const void __user *)rxnfc) || + copy_in_user(&compat_rxnfc->fs.ring_cookie, + &rxnfc->fs.ring_cookie, +- (const void *)(&rxnfc->fs.location + 1) - +- (const void *)&rxnfc->fs.ring_cookie) || ++ (const void __user *)(&rxnfc->fs.location + 1) - ++ (const void __user *)&rxnfc->fs.ring_cookie) || + copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, + sizeof(rxnfc->rule_cnt))) + return -EFAULT; +@@ -2864,7 +2924,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd, + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = dev_ioctl(net, cmd, +- (struct ifreq __user __force *) &kifr); ++ (struct ifreq __force_user *) &kifr); + set_fs(old_fs); + + return err; +@@ -2973,7 +3033,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd, + + old_fs = get_fs(); + set_fs(KERNEL_DS); +- err = dev_ioctl(net, cmd, (void __user __force *)&ifr); ++ err = dev_ioctl(net, cmd, (void __force_user *)&ifr); + set_fs(old_fs); + + if (cmd == SIOCGIFMAP && !err) { +@@ -3078,7 +3138,7 @@ static int routing_ioctl(struct net *net, struct socket *sock, + ret |= __get_user(rtdev, &(ur4->rt_dev)); + if (rtdev) { + ret |= copy_from_user(devname, compat_ptr(rtdev), 15); +- r4.rt_dev = (char __user __force *)devname; ++ r4.rt_dev = (char __force_user *)devname; + devname[15] = 0; + } else + r4.rt_dev = NULL; +@@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname, + int __user *uoptlen; + int err; + +- uoptval = (char __user __force *) optval; +- uoptlen = (int __user __force *) optlen; ++ uoptval = (char __force_user *) optval; ++ uoptlen = (int __force_user *) optlen; + + set_fs(KERNEL_DS); + if (level == SOL_SOCKET) +@@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, + char __user *uoptval; + int err; + +- uoptval = (char __user __force *) optval; ++ uoptval = (char __force_user *) optval; + + set_fs(KERNEL_DS); + if (level == SOL_SOCKET) +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c +index 00a1a2a..6a0138a 100644 +--- a/net/sunrpc/sched.c ++++ b/net/sunrpc/sched.c +@@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word) + #ifdef RPC_DEBUG + static void rpc_task_set_debuginfo(struct rpc_task *task) + { +- static atomic_t rpc_pid; ++ static atomic_unchecked_t rpc_pid; + +- task->tk_pid = atomic_inc_return(&rpc_pid); ++ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid); + } + #else + static inline void rpc_task_set_debuginfo(struct rpc_task *task) +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c +index 71bed1c..5dff36d 100644 +--- a/net/sunrpc/svcsock.c ++++ b/net/sunrpc/svcsock.c +@@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp, + int buflen, unsigned int base) + { + size_t save_iovlen; +- void __user *save_iovbase; ++ void *save_iovbase; + unsigned int i; + int ret; + +diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c +index 09af4fa..77110a9 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma.c ++++ b/net/sunrpc/xprtrdma/svc_rdma.c +@@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE; + static unsigned int min_max_inline = 4096; + static unsigned int max_max_inline = 65536; + +-atomic_t rdma_stat_recv; +-atomic_t rdma_stat_read; +-atomic_t rdma_stat_write; +-atomic_t rdma_stat_sq_starve; +-atomic_t rdma_stat_rq_starve; +-atomic_t rdma_stat_rq_poll; +-atomic_t rdma_stat_rq_prod; +-atomic_t rdma_stat_sq_poll; +-atomic_t rdma_stat_sq_prod; ++atomic_unchecked_t rdma_stat_recv; ++atomic_unchecked_t rdma_stat_read; ++atomic_unchecked_t rdma_stat_write; ++atomic_unchecked_t rdma_stat_sq_starve; ++atomic_unchecked_t rdma_stat_rq_starve; ++atomic_unchecked_t rdma_stat_rq_poll; ++atomic_unchecked_t rdma_stat_rq_prod; ++atomic_unchecked_t rdma_stat_sq_poll; ++atomic_unchecked_t rdma_stat_sq_prod; + + /* Temporary NFS request map and context caches */ + struct kmem_cache *svc_rdma_map_cachep; +@@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write, + len -= *ppos; + if (len > *lenp) + len = *lenp; +- if (len && copy_to_user(buffer, str_buf, len)) ++ if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len))) + return -EFAULT; + *lenp = len; + *ppos += len; +@@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = { + { + .procname = "rdma_stat_read", + .data = &rdma_stat_read, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_recv", + .data = &rdma_stat_recv, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_write", + .data = &rdma_stat_write, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_sq_starve", + .data = &rdma_stat_sq_starve, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_rq_starve", + .data = &rdma_stat_rq_starve, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_rq_poll", + .data = &rdma_stat_rq_poll, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_rq_prod", + .data = &rdma_stat_rq_prod, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_sq_poll", + .data = &rdma_stat_sq_poll, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_sq_prod", + .data = &rdma_stat_sq_prod, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, +diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +index df67211..c354b13 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +@@ -499,7 +499,7 @@ next_sge: + svc_rdma_put_context(ctxt, 0); + goto out; + } +- atomic_inc(&rdma_stat_read); ++ atomic_inc_unchecked(&rdma_stat_read); + + if (read_wr.num_sge < chl_map->ch[ch_no].count) { + chl_map->ch[ch_no].count -= read_wr.num_sge; +@@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) + dto_q); + list_del_init(&ctxt->dto_q); + } else { +- atomic_inc(&rdma_stat_rq_starve); ++ atomic_inc_unchecked(&rdma_stat_rq_starve); + clear_bit(XPT_DATA, &xprt->xpt_flags); + ctxt = NULL; + } +@@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) + dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n", + ctxt, rdma_xprt, rqstp, ctxt->wc_status); + BUG_ON(ctxt->wc_status != IB_WC_SUCCESS); +- atomic_inc(&rdma_stat_recv); ++ atomic_inc_unchecked(&rdma_stat_recv); + + /* Build up the XDR from the receive buffers. */ + rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); +diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c +index 249a835..fb2794b 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c +@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, + write_wr.wr.rdma.remote_addr = to; + + /* Post It */ +- atomic_inc(&rdma_stat_write); ++ atomic_inc_unchecked(&rdma_stat_write); + if (svc_rdma_send(xprt, &write_wr)) + goto err; + return 0; +diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c +index ba1296d..0fec1a5 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c +@@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt) + return; + + ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); +- atomic_inc(&rdma_stat_rq_poll); ++ atomic_inc_unchecked(&rdma_stat_rq_poll); + + while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { + ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; +@@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt) + } + + if (ctxt) +- atomic_inc(&rdma_stat_rq_prod); ++ atomic_inc_unchecked(&rdma_stat_rq_prod); + + set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); + /* +@@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) + return; + + ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); +- atomic_inc(&rdma_stat_sq_poll); ++ atomic_inc_unchecked(&rdma_stat_sq_poll); + while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { + if (wc.status != IB_WC_SUCCESS) + /* Close the transport */ +@@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) + } + + if (ctxt) +- atomic_inc(&rdma_stat_sq_prod); ++ atomic_inc_unchecked(&rdma_stat_sq_prod); + } + + static void sq_comp_handler(struct ib_cq *cq, void *cq_context) +@@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) + spin_lock_bh(&xprt->sc_lock); + if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { + spin_unlock_bh(&xprt->sc_lock); +- atomic_inc(&rdma_stat_sq_starve); ++ atomic_inc_unchecked(&rdma_stat_sq_starve); + + /* See if we can opportunistically reap SQ WR to make room */ + sq_cq_reap(xprt); +diff --git a/net/sysctl_net.c b/net/sysctl_net.c +index e758139..d29ea47 100644 +--- a/net/sysctl_net.c ++++ b/net/sysctl_net.c +@@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root, + struct ctl_table *table) + { + /* Allow network administrator to have same access as root. */ +- if (capable(CAP_NET_ADMIN)) { ++ if (capable_nolog(CAP_NET_ADMIN)) { + int mode = (table->mode >> 6) & 7; + return (mode << 6) | (mode << 3) | mode; + } +diff --git a/net/tipc/link.c b/net/tipc/link.c +index ae98a72..7bb6056 100644 +--- a/net/tipc/link.c ++++ b/net/tipc/link.c +@@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender, + struct tipc_msg fragm_hdr; + struct sk_buff *buf, *buf_chain, *prev; + u32 fragm_crs, fragm_rest, hsz, sect_rest; +- const unchar *sect_crs; ++ const unchar __user *sect_crs; + int curr_sect; + u32 fragm_no; + +@@ -1247,7 +1247,7 @@ again: + + if (!sect_rest) { + sect_rest = msg_sect[++curr_sect].iov_len; +- sect_crs = (const unchar *)msg_sect[curr_sect].iov_base; ++ sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base; + } + + if (sect_rest < fragm_rest) +@@ -1266,7 +1266,7 @@ error: + } + } else + skb_copy_to_linear_data_offset(buf, fragm_crs, +- sect_crs, sz); ++ (const void __force_kernel *)sect_crs, sz); + sect_crs += sz; + sect_rest -= sz; + fragm_crs += sz; +diff --git a/net/tipc/msg.c b/net/tipc/msg.c +index 83d5096..dcba497 100644 +--- a/net/tipc/msg.c ++++ b/net/tipc/msg.c +@@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, + msg_sect[cnt].iov_len); + else + skb_copy_to_linear_data_offset(*buf, pos, +- msg_sect[cnt].iov_base, ++ (const void __force_kernel *)msg_sect[cnt].iov_base, + msg_sect[cnt].iov_len); + pos += msg_sect[cnt].iov_len; + } +diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c +index 1983717..4d6102c 100644 +--- a/net/tipc/subscr.c ++++ b/net/tipc/subscr.c +@@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub, + { + struct iovec msg_sect; + +- msg_sect.iov_base = (void *)&sub->evt; ++ msg_sect.iov_base = (void __force_user *)&sub->evt; + msg_sect.iov_len = sizeof(struct tipc_event); + + sub->evt.event = htohl(event, sub->swap); +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index d99678a..3514a21 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net, + err = -ECONNREFUSED; + if (!S_ISSOCK(inode->i_mode)) + goto put_fail; ++ ++ if (!gr_acl_handle_unix(path.dentry, path.mnt)) { ++ err = -EACCES; ++ goto put_fail; ++ } ++ + u = unix_find_socket_byinode(inode); + if (!u) + goto put_fail; +@@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net, + if (u) { + struct dentry *dentry; + dentry = unix_sk(u)->dentry; ++ ++ if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) { ++ err = -EPERM; ++ sock_put(u); ++ goto fail; ++ } ++ + if (dentry) + touch_atime(unix_sk(u)->mnt, dentry); + } else +@@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + err = security_path_mknod(&path, dentry, mode, 0); + if (err) + goto out_mknod_drop_write; ++ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) { ++ err = -EACCES; ++ goto out_mknod_drop_write; ++ } + err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); + out_mknod_drop_write: + mnt_drop_write(path.mnt); + if (err) + goto out_mknod_dput; ++ ++ gr_handle_create(dentry, path.mnt); ++ + mutex_unlock(&path.dentry->d_inode->i_mutex); + dput(path.dentry); + path.dentry = dentry; +diff --git a/net/wireless/core.h b/net/wireless/core.h +index b9ec306..b4a563e 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -27,7 +27,7 @@ struct cfg80211_registered_device { + struct mutex mtx; + + /* rfkill support */ +- struct rfkill_ops rfkill_ops; ++ rfkill_ops_no_const rfkill_ops; + struct rfkill *rfkill; + struct work_struct rfkill_sync; + +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index 0af7f54..c916d2f 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, + */ + + /* Support for very large requests */ +- if ((descr->flags & IW_DESCR_FLAG_NOMAX) && +- (user_length > descr->max_tokens)) { ++ if (user_length > descr->max_tokens) { + /* Allow userspace to GET more than max so + * we can support any size GET requests. + * There is still a limit : -ENOMEM. +@@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, + } + } + +- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { +- /* +- * If this is a GET, but not NOMAX, it means that the extra +- * data is not bounded by userspace, but by max_tokens. Thus +- * set the length to max_tokens. This matches the extra data +- * allocation. +- * The driver should fill it with the number of tokens it +- * provided, and it may check iwp->length rather than having +- * knowledge of max_tokens. If the driver doesn't change the +- * iwp->length, this ioctl just copies back max_token tokens +- * filled with zeroes. Hopefully the driver isn't claiming +- * them to be valid data. +- */ +- iwp->length = descr->max_tokens; +- } +- + err = handler(dev, info, (union iwreq_data *) iwp, extra); + + iwp->length += essid_compat; +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 9049a5c..cfa6f5c 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy) + { + policy->walk.dead = 1; + +- atomic_inc(&policy->genid); ++ atomic_inc_unchecked(&policy->genid); + + if (del_timer(&policy->timer)) + xfrm_pol_put(policy); +@@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) + hlist_add_head(&policy->bydst, chain); + xfrm_pol_hold(policy); + net->xfrm.policy_count[dir]++; +- atomic_inc(&flow_cache_genid); ++ atomic_inc_unchecked(&flow_cache_genid); + if (delpol) + __xfrm_policy_unlink(delpol, dir); + policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); +@@ -1530,7 +1530,7 @@ free_dst: + goto out; + } + +-static int inline ++static inline int + xfrm_dst_alloc_copy(void **target, const void *src, int size) + { + if (!*target) { +@@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size) + return 0; + } + +-static int inline ++static inline int + xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) + { + #ifdef CONFIG_XFRM_SUB_POLICY +@@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) + #endif + } + +-static int inline ++static inline int + xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl) + { + #ifdef CONFIG_XFRM_SUB_POLICY +@@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, + + xdst->num_pols = num_pols; + memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); +- xdst->policy_genid = atomic_read(&pols[0]->genid); ++ xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid); + + return xdst; + } +@@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first) + if (xdst->xfrm_genid != dst->xfrm->genid) + return 0; + if (xdst->num_pols > 0 && +- xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) ++ xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid)) + return 0; + + mtu = dst_mtu(dst->child); +@@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol, + sizeof(pol->xfrm_vec[i].saddr)); + pol->xfrm_vec[i].encap_family = mp->new_family; + /* flush bundles */ +- atomic_inc(&pol->genid); ++ atomic_inc_unchecked(&pol->genid); + } + } + +diff --git a/scripts/Makefile.build b/scripts/Makefile.build +index d2b366c..51ff91ebc 100644 +--- a/scripts/Makefile.build ++++ b/scripts/Makefile.build +@@ -109,7 +109,7 @@ endif + endif + + # Do not include host rules unless needed +-ifneq ($(hostprogs-y)$(hostprogs-m),) ++ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),) + include scripts/Makefile.host + endif + +diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean +index 686cb0d..9d653bf 100644 +--- a/scripts/Makefile.clean ++++ b/scripts/Makefile.clean +@@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn)) + __clean-files := $(extra-y) $(always) \ + $(targets) $(clean-files) \ + $(host-progs) \ +- $(hostprogs-y) $(hostprogs-m) $(hostprogs-) ++ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \ ++ $(hostlibs-y) $(hostlibs-m) $(hostlibs-) + + __clean-files := $(filter-out $(no-clean-files), $(__clean-files)) + +diff --git a/scripts/Makefile.host b/scripts/Makefile.host +index 1ac414f..a1c1451 100644 +--- a/scripts/Makefile.host ++++ b/scripts/Makefile.host +@@ -31,6 +31,7 @@ + # Note: Shared libraries consisting of C++ files are not supported + + __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m)) ++__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m)) + + # C code + # Executables compiled from a single .c file +@@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs))) + # Shared libaries (only .c supported) + # Shared libraries (.so) - all .so files referenced in "xxx-objs" + host-cshlib := $(sort $(filter %.so, $(host-cobjs))) ++host-cshlib += $(sort $(filter %.so, $(__hostlibs))) + # Remove .so files from "xxx-objs" + host-cobjs := $(filter-out %.so,$(host-cobjs)) + +diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c +index cb1f50c..cef2a7c 100644 +--- a/scripts/basic/fixdep.c ++++ b/scripts/basic/fixdep.c +@@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz) + /* + * Lookup a value in the configuration string. + */ +-static int is_defined_config(const char *name, int len, unsigned int hash) ++static int is_defined_config(const char *name, unsigned int len, unsigned int hash) + { + struct item *aux; + +@@ -211,10 +211,10 @@ static void clear_config(void) + /* + * Record the use of a CONFIG_* word. + */ +-static void use_config(const char *m, int slen) ++static void use_config(const char *m, unsigned int slen) + { + unsigned int hash = strhash(m, slen); +- int c, i; ++ unsigned int c, i; + + if (is_defined_config(m, slen, hash)) + return; +@@ -235,9 +235,9 @@ static void use_config(const char *m, int slen) + + static void parse_config_file(const char *map, size_t len) + { +- const int *end = (const int *) (map + len); ++ const unsigned int *end = (const unsigned int *) (map + len); + /* start at +1, so that p can never be < map */ +- const int *m = (const int *) map + 1; ++ const unsigned int *m = (const unsigned int *) map + 1; + const char *p, *q; + + for (; m < end; m++) { +@@ -406,7 +406,7 @@ static void print_deps(void) + static void traps(void) + { + static char test[] __attribute__((aligned(sizeof(int)))) = "CONF"; +- int *p = (int *)test; ++ unsigned int *p = (unsigned int *)test; + + if (*p != INT_CONF) { + fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n", +diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh +new file mode 100644 +index 0000000..8729101 +--- /dev/null ++++ b/scripts/gcc-plugin.sh +@@ -0,0 +1,2 @@ ++#!/bin/sh ++echo -e "#include "gcc-plugin.h"\n#include "tree.h"\n#include "tm.h"\n#include "rtl.h"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y" +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c +index f936d1f..a66d95f 100644 +--- a/scripts/mod/file2alias.c ++++ b/scripts/mod/file2alias.c +@@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id, + unsigned long size, unsigned long id_size, + void *symval) + { +- int i; ++ unsigned int i; + + if (size % id_size || size < id_size) { + if (cross_build != 0) +@@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id, + /* USB is special because the bcdDevice can be matched against a numeric range */ + /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */ + static void do_usb_entry(struct usb_device_id *id, +- unsigned int bcdDevice_initial, int bcdDevice_initial_digits, ++ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits, + unsigned char range_lo, unsigned char range_hi, + unsigned char max, struct module *mod) + { +@@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod) + { + unsigned int devlo, devhi; + unsigned char chi, clo, max; +- int ndigits; ++ unsigned int ndigits; + + id->match_flags = TO_NATIVE(id->match_flags); + id->idVendor = TO_NATIVE(id->idVendor); +@@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size, + for (i = 0; i < count; i++) { + const char *id = (char *)devs[i].id; + char acpi_id[sizeof(devs[0].id)]; +- int j; ++ unsigned int j; + + buf_printf(&mod->dev_table_buf, + "MODULE_ALIAS("pnp:d%s*");\n", id); +@@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size, + + for (j = 0; j < PNP_MAX_DEVICES; j++) { + const char *id = (char *)card->devs[j].id; +- int i2, j2; ++ unsigned int i2, j2; + int dup = 0; + + if (!id[0]) +@@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size, + /* add an individual alias for every device entry */ + if (!dup) { + char acpi_id[sizeof(card->devs[0].id)]; +- int k; ++ unsigned int k; + + buf_printf(&mod->dev_table_buf, + "MODULE_ALIAS("pnp:d%s*");\n", id); +@@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s) + static int do_dmi_entry(const char *filename, struct dmi_system_id *id, + char *alias) + { +- int i, j; ++ unsigned int i, j; + + sprintf(alias, "dmi*"); + +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c +index 2bd594e..d43245e 100644 +--- a/scripts/mod/modpost.c ++++ b/scripts/mod/modpost.c +@@ -919,6 +919,7 @@ enum mismatch { + ANY_INIT_TO_ANY_EXIT, + ANY_EXIT_TO_ANY_INIT, + EXPORT_TO_INIT_EXIT, ++ DATA_TO_TEXT + }; + + struct sectioncheck { +@@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = { + .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL }, + .mismatch = EXPORT_TO_INIT_EXIT, + .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL }, ++}, ++/* Do not reference code from writable data */ ++{ ++ .fromsec = { DATA_SECTIONS, NULL }, ++ .tosec = { TEXT_SECTIONS, NULL }, ++ .mismatch = DATA_TO_TEXT + } + }; + +@@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr, + continue; + if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) + continue; +- if (sym->st_value == addr) +- return sym; + /* Find a symbol nearby - addr are maybe negative */ + d = sym->st_value - addr; ++ if (d == 0) ++ return sym; + if (d < 0) + d = addr - sym->st_value; + if (d < distance) { +@@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname, + tosym, prl_to, prl_to, tosym); + free(prl_to); + break; ++ case DATA_TO_TEXT: ++/* ++ fprintf(stderr, ++ "The variable %s references\n" ++ "the %s %s%s%s\n", ++ fromsym, to, sec2annotation(tosec), tosym, to_p); ++*/ ++ break; + } + fprintf(stderr, "\n"); + } +@@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf, + static void check_sec_ref(struct module *mod, const char *modname, + struct elf_info *elf) + { +- int i; ++ unsigned int i; + Elf_Shdr *sechdrs = elf->sechdrs; + + /* Walk through all sections */ +@@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf, + va_end(ap); + } + +-void buf_write(struct buffer *buf, const char *s, int len) ++void buf_write(struct buffer *buf, const char *s, unsigned int len) + { + if (buf->size - buf->pos < len) { + buf->size += len + SZ; +@@ -1972,7 +1987,7 @@ static void write_if_changed(struct buffer *b, const char *fname) + if (fstat(fileno(file), &st) < 0) + goto close_write; + +- if (st.st_size != b->pos) ++ if (st.st_size != (off_t)b->pos) + goto close_write; + + tmp = NOFAIL(malloc(b->pos)); +diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h +index 2031119..b5433af 100644 +--- a/scripts/mod/modpost.h ++++ b/scripts/mod/modpost.h +@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr); + + struct buffer { + char *p; +- int pos; +- int size; ++ unsigned int pos; ++ unsigned int size; + }; + + void __attribute__((format(printf, 2, 3))) + buf_printf(struct buffer *buf, const char *fmt, ...); + + void +-buf_write(struct buffer *buf, const char *s, int len); ++buf_write(struct buffer *buf, const char *s, unsigned int len); + + struct module { + struct module *next; +diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c +index 9dfcd6d..099068e 100644 +--- a/scripts/mod/sumversion.c ++++ b/scripts/mod/sumversion.c +@@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum, + goto out; + } + +- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) { ++ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) { + warn("writing sum in %s failed: %s\n", + filename, strerror(errno)); + goto out; +diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c +index 5c11312..72742b5 100644 +--- a/scripts/pnmtologo.c ++++ b/scripts/pnmtologo.c +@@ -237,14 +237,14 @@ static void write_header(void) + fprintf(out, " * Linux logo %s\n", logoname); + fputs(" */\n\n", out); + fputs("#include <linux/linux_logo.h>\n\n", out); +- fprintf(out, "static unsigned char %s_data[] __initdata = {\n", ++ fprintf(out, "static unsigned char %s_data[] = {\n", + logoname); + } + + static void write_footer(void) + { + fputs("\n};\n\n", out); +- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname); ++ fprintf(out, "const struct linux_logo %s = {\n", logoname); + fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]); + fprintf(out, "\t.width\t\t= %d,\n", logo_width); + fprintf(out, "\t.height\t\t= %d,\n", logo_height); +@@ -374,7 +374,7 @@ static void write_logo_clut224(void) + fputs("\n};\n\n", out); + + /* write logo clut */ +- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", ++ fprintf(out, "static unsigned char %s_clut[] = {\n", + logoname); + write_hex_cnt = 0; + for (i = 0; i < logo_clutsize; i++) { +diff --git a/security/Kconfig b/security/Kconfig +index 51bd5a0..58c5b70 100644 +--- a/security/Kconfig ++++ b/security/Kconfig +@@ -4,6 +4,639 @@ + + menu "Security options" + ++source grsecurity/Kconfig ++ ++menu "PaX" ++ ++ config ARCH_TRACK_EXEC_LIMIT ++ bool ++ ++ config PAX_KERNEXEC_PLUGIN ++ bool ++ ++ config PAX_PER_CPU_PGD ++ bool ++ ++ config TASK_SIZE_MAX_SHIFT ++ int ++ depends on X86_64 ++ default 47 if !PAX_PER_CPU_PGD ++ default 42 if PAX_PER_CPU_PGD ++ ++ config PAX_ENABLE_PAE ++ bool ++ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM)) ++ ++config PAX ++ bool "Enable various PaX features" ++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) ++ help ++ This allows you to enable various PaX features. PaX adds ++ intrusion prevention mechanisms to the kernel that reduce ++ the risks posed by exploitable memory corruption bugs. ++ ++menu "PaX Control" ++ depends on PAX ++ ++config PAX_SOFTMODE ++ bool 'Support soft mode' ++ help ++ Enabling this option will allow you to run PaX in soft mode, that ++ is, PaX features will not be enforced by default, only on executables ++ marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS ++ support as they are the only way to mark executables for soft mode use. ++ ++ Soft mode can be activated by using the "pax_softmode=1" kernel command ++ line option on boot. Furthermore you can control various PaX features ++ at runtime via the entries in /proc/sys/kernel/pax. ++ ++config PAX_EI_PAX ++ bool 'Use legacy ELF header marking' ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'chpax' utility available at ++ http://pax.grsecurity.net/. The control flags will be read from ++ an otherwise reserved part of the ELF header. This marking has ++ numerous drawbacks (no support for soft-mode, toolchain does not ++ know about the non-standard use of the ELF header) therefore it ++ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS ++ support. ++ ++ If you have applications not marked by the PT_PAX_FLAGS ELF program ++ header and you cannot use XATTR_PAX_FLAGS then you MUST enable this ++ option otherwise they will not get any protection. ++ ++ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking ++ support as well, they will override the legacy EI_PAX marks. ++ ++config PAX_PT_PAX_FLAGS ++ bool 'Use ELF program header marking' ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'paxctl' utility available at ++ http://pax.grsecurity.net/. The control flags will be read from ++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking ++ has the benefits of supporting both soft mode and being fully ++ integrated into the toolchain (the binutils patch is available ++ from http://pax.grsecurity.net). ++ ++ If you have applications not marked by the PT_PAX_FLAGS ELF program ++ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking ++ support otherwise they will not get any protection. ++ ++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you ++ must make sure that the marks are the same if a binary has both marks. ++ ++ Note that if you enable the legacy EI_PAX marking support as well, ++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks. ++ ++config PAX_XATTR_PAX_FLAGS ++ bool 'Use filesystem extended attributes marking' ++ select CIFS_XATTR if CIFS ++ select EXT2_FS_XATTR if EXT2_FS ++ select EXT3_FS_XATTR if EXT3_FS ++ select EXT4_FS_XATTR if EXT4_FS ++ select JFFS2_FS_XATTR if JFFS2_FS ++ select REISERFS_FS_XATTR if REISERFS_FS ++ select SQUASHFS_XATTR if SQUASHFS ++ select TMPFS_XATTR if TMPFS ++ select UBIFS_FS_XATTR if UBIFS_FS ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'setfattr' utility. The control ++ flags will be read from the user.pax.flags extended attribute of ++ the file. This marking has the benefit of supporting binary-only ++ applications that self-check themselves (e.g., skype) and would ++ not tolerate chpax/paxctl changes. The main drawback is that ++ extended attributes are not supported by some filesystems (e.g., ++ isofs, udf, vfat) so copying files through such filesystems will ++ lose the extended attributes and these PaX markings. ++ ++ If you have applications not marked by the PT_PAX_FLAGS ELF program ++ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking ++ support otherwise they will not get any protection. ++ ++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you ++ must make sure that the marks are the same if a binary has both marks. ++ ++ Note that if you enable the legacy EI_PAX marking support as well, ++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks. ++ ++choice ++ prompt 'MAC system integration' ++ default PAX_HAVE_ACL_FLAGS ++ help ++ Mandatory Access Control systems have the option of controlling ++ PaX flags on a per executable basis, choose the method supported ++ by your particular system. ++ ++ - "none": if your MAC system does not interact with PaX, ++ - "direct": if your MAC system defines pax_set_initial_flags() itself, ++ - "hook": if your MAC system uses the pax_set_initial_flags_func callback. ++ ++ NOTE: this option is for developers/integrators only. ++ ++ config PAX_NO_ACL_FLAGS ++ bool 'none' ++ ++ config PAX_HAVE_ACL_FLAGS ++ bool 'direct' ++ ++ config PAX_HOOK_ACL_FLAGS ++ bool 'hook' ++endchoice ++ ++endmenu ++ ++menu "Non-executable pages" ++ depends on PAX ++ ++config PAX_NOEXEC ++ bool "Enforce non-executable pages" ++ depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86 ++ help ++ By design some architectures do not allow for protecting memory ++ pages against execution or even if they do, Linux does not make ++ use of this feature. In practice this means that if a page is ++ readable (such as the stack or heap) it is also executable. ++ ++ There is a well known exploit technique that makes use of this ++ fact and a common programming mistake where an attacker can ++ introduce code of his choice somewhere in the attacked program's ++ memory (typically the stack or the heap) and then execute it. ++ ++ If the attacked program was running with different (typically ++ higher) privileges than that of the attacker, then he can elevate ++ his own privilege level (e.g. get a root shell, write to files for ++ which he does not have write access to, etc). ++ ++ Enabling this option will let you choose from various features ++ that prevent the injection and execution of 'foreign' code in ++ a program. ++ ++ This will also break programs that rely on the old behaviour and ++ expect that dynamically allocated memory via the malloc() family ++ of functions is executable (which it is not). Notable examples ++ are the XFree86 4.x server, the java runtime and wine. ++ ++config PAX_PAGEEXEC ++ bool "Paging based non-executable pages" ++ depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7) ++ select S390_SWITCH_AMODE if S390 ++ select S390_EXEC_PROTECT if S390 ++ select ARCH_TRACK_EXEC_LIMIT if X86_32 ++ help ++ This implementation is based on the paging feature of the CPU. ++ On i386 without hardware non-executable bit support there is a ++ variable but usually low performance impact, however on Intel's ++ P4 core based CPUs it is very high so you should not enable this ++ for kernels meant to be used on such CPUs. ++ ++ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386 ++ with hardware non-executable bit support there is no performance ++ impact, on ppc the impact is negligible. ++ ++ Note that several architectures require various emulations due to ++ badly designed userland ABIs, this will cause a performance impact ++ but will disappear as soon as userland is fixed. For example, ppc ++ userland MUST have been built with secure-plt by a recent toolchain. ++ ++config PAX_SEGMEXEC ++ bool "Segmentation based non-executable pages" ++ depends on PAX_NOEXEC && X86_32 ++ help ++ This implementation is based on the segmentation feature of the ++ CPU and has a very small performance impact, however applications ++ will be limited to a 1.5 GB address space instead of the normal ++ 3 GB. ++ ++config PAX_EMUTRAMP ++ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86) ++ default y if PARISC ++ help ++ There are some programs and libraries that for one reason or ++ another attempt to execute special small code snippets from ++ non-executable memory pages. Most notable examples are the ++ signal handler return code generated by the kernel itself and ++ the GCC trampolines. ++ ++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then ++ such programs will no longer work under your kernel. ++ ++ As a remedy you can say Y here and use the 'chpax' or 'paxctl' ++ utilities to enable trampoline emulation for the affected programs ++ yet still have the protection provided by the non-executable pages. ++ ++ On parisc you MUST enable this option and EMUSIGRT as well, otherwise ++ your system will not even boot. ++ ++ Alternatively you can say N here and use the 'chpax' or 'paxctl' ++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC ++ for the affected files. ++ ++ NOTE: enabling this feature *may* open up a loophole in the ++ protection provided by non-executable pages that an attacker ++ could abuse. Therefore the best solution is to not have any ++ files on your system that would require this option. This can ++ be achieved by not using libc5 (which relies on the kernel ++ signal handler return code) and not using or rewriting programs ++ that make use of the nested function implementation of GCC. ++ Skilled users can just fix GCC itself so that it implements ++ nested function calls in a way that does not interfere with PaX. ++ ++config PAX_EMUSIGRT ++ bool "Automatically emulate sigreturn trampolines" ++ depends on PAX_EMUTRAMP && PARISC ++ default y ++ help ++ Enabling this option will have the kernel automatically detect ++ and emulate signal return trampolines executing on the stack ++ that would otherwise lead to task termination. ++ ++ This solution is intended as a temporary one for users with ++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17, ++ Modula-3 runtime, etc) or executables linked to such, basically ++ everything that does not specify its own SA_RESTORER function in ++ normal executable memory like glibc 2.1+ does. ++ ++ On parisc you MUST enable this option, otherwise your system will ++ not even boot. ++ ++ NOTE: this feature cannot be disabled on a per executable basis ++ and since it *does* open up a loophole in the protection provided ++ by non-executable pages, the best solution is to not have any ++ files on your system that would require this option. ++ ++config PAX_MPROTECT ++ bool "Restrict mprotect()" ++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) ++ help ++ Enabling this option will prevent programs from ++ - changing the executable status of memory pages that were ++ not originally created as executable, ++ - making read-only executable pages writable again, ++ - creating executable pages from anonymous memory, ++ - making read-only-after-relocations (RELRO) data pages writable again. ++ ++ You should say Y here to complete the protection provided by ++ the enforcement of non-executable pages. ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control ++ this feature on a per file basis. ++ ++config PAX_MPROTECT_COMPAT ++ bool "Use legacy/compat protection demoting (read help)" ++ depends on PAX_MPROTECT ++ default n ++ help ++ The current implementation of PAX_MPROTECT denies RWX allocations/mprotects ++ by sending the proper error code to the application. For some broken ++ userland, this can cause problems with Python or other applications. The ++ current implementation however allows for applications like clamav to ++ detect if JIT compilation/execution is allowed and to fall back gracefully ++ to an interpreter-based mode if it does not. While we encourage everyone ++ to use the current implementation as-is and push upstream to fix broken ++ userland (note that the RWX logging option can assist with this), in some ++ environments this may not be possible. Having to disable MPROTECT ++ completely on certain binaries reduces the security benefit of PaX, ++ so this option is provided for those environments to revert to the old ++ behavior. ++ ++config PAX_ELFRELOCS ++ bool "Allow ELF text relocations (read help)" ++ depends on PAX_MPROTECT ++ default n ++ help ++ Non-executable pages and mprotect() restrictions are effective ++ in preventing the introduction of new executable code into an ++ attacked task's address space. There remain only two venues ++ for this kind of attack: if the attacker can execute already ++ existing code in the attacked task then he can either have it ++ create and mmap() a file containing his code or have it mmap() ++ an already existing ELF library that does not have position ++ independent code in it and use mprotect() on it to make it ++ writable and copy his code there. While protecting against ++ the former approach is beyond PaX, the latter can be prevented ++ by having only PIC ELF libraries on one's system (which do not ++ need to relocate their code). If you are sure this is your case, ++ as is the case with all modern Linux distributions, then leave ++ this option disabled. You should say 'n' here. ++ ++config PAX_ETEXECRELOCS ++ bool "Allow ELF ET_EXEC text relocations" ++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC) ++ select PAX_ELFRELOCS ++ default y ++ help ++ On some architectures there are incorrectly created applications ++ that require text relocations and would not work without enabling ++ this option. If you are an alpha, ia64 or parisc user, you should ++ enable this option and disable it once you have made sure that ++ none of your applications need it. ++ ++config PAX_EMUPLT ++ bool "Automatically emulate ELF PLT" ++ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC) ++ default y ++ help ++ Enabling this option will have the kernel automatically detect ++ and emulate the Procedure Linkage Table entries in ELF files. ++ On some architectures such entries are in writable memory, and ++ become non-executable leading to task termination. Therefore ++ it is mandatory that you enable this option on alpha, parisc, ++ sparc and sparc64, otherwise your system would not even boot. ++ ++ NOTE: this feature *does* open up a loophole in the protection ++ provided by the non-executable pages, therefore the proper ++ solution is to modify the toolchain to produce a PLT that does ++ not need to be writable. ++ ++config PAX_DLRESOLVE ++ bool 'Emulate old glibc resolver stub' ++ depends on PAX_EMUPLT && SPARC ++ default n ++ help ++ This option is needed if userland has an old glibc (before 2.4) ++ that puts a 'save' instruction into the runtime generated resolver ++ stub that needs special emulation. ++ ++config PAX_KERNEXEC ++ bool "Enforce non-executable kernel pages" ++ depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN ++ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE) ++ select PAX_KERNEXEC_PLUGIN if X86_64 ++ help ++ This is the kernel land equivalent of PAGEEXEC and MPROTECT, ++ that is, enabling this option will make it harder to inject ++ and execute 'foreign' code in kernel memory itself. ++ ++ Note that on x86_64 kernels there is a known regression when ++ this feature and KVM/VMX are both enabled in the host kernel. ++ ++choice ++ prompt "Return Address Instrumentation Method" ++ default PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ depends on PAX_KERNEXEC_PLUGIN ++ help ++ Select the method used to instrument function pointer dereferences. ++ Note that binary modules cannot be instrumented by this approach. ++ ++ config PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ bool "bts" ++ help ++ This method is compatible with binary only modules but has ++ a higher runtime overhead. ++ ++ config PAX_KERNEXEC_PLUGIN_METHOD_OR ++ bool "or" ++ depends on !PARAVIRT ++ help ++ This method is incompatible with binary only modules but has ++ a lower runtime overhead. ++endchoice ++ ++config PAX_KERNEXEC_PLUGIN_METHOD ++ string ++ default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR ++ default "" ++ ++config PAX_KERNEXEC_MODULE_TEXT ++ int "Minimum amount of memory reserved for module code" ++ default "4" ++ depends on PAX_KERNEXEC && X86_32 && MODULES ++ help ++ Due to implementation details the kernel must reserve a fixed ++ amount of memory for module code at compile time that cannot be ++ changed at runtime. Here you can specify the minimum amount ++ in MB that will be reserved. Due to the same implementation ++ details this size will always be rounded up to the next 2/4 MB ++ boundary (depends on PAE) so the actually available memory for ++ module code will usually be more than this minimum. ++ ++ The default 4 MB should be enough for most users but if you have ++ an excessive number of modules (e.g., most distribution configs ++ compile many drivers as modules) or use huge modules such as ++ nvidia's kernel driver, you will need to adjust this amount. ++ A good rule of thumb is to look at your currently loaded kernel ++ modules and add up their sizes. ++ ++endmenu ++ ++menu "Address Space Layout Randomization" ++ depends on PAX ++ ++config PAX_ASLR ++ bool "Address Space Layout Randomization" ++ help ++ Many if not most exploit techniques rely on the knowledge of ++ certain addresses in the attacked program. The following options ++ will allow the kernel to apply a certain amount of randomization ++ to specific parts of the program thereby forcing an attacker to ++ guess them in most cases. Any failed guess will most likely crash ++ the attacked program which allows the kernel to detect such attempts ++ and react on them. PaX itself provides no reaction mechanisms, ++ instead it is strongly encouraged that you make use of Nergal's ++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's ++ (http://www.grsecurity.net/) built-in crash detection features or ++ develop one yourself. ++ ++ By saying Y here you can choose to randomize the following areas: ++ - top of the task's kernel stack ++ - top of the task's userland stack ++ - base address for mmap() requests that do not specify one ++ (this includes all libraries) ++ - base address of the main executable ++ ++ It is strongly recommended to say Y here as address space layout ++ randomization has negligible impact on performance yet it provides ++ a very effective protection. ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control ++ this feature on a per file basis. ++ ++config PAX_RANDKSTACK ++ bool "Randomize kernel stack base" ++ depends on X86_TSC && X86 ++ help ++ By saying Y here the kernel will randomize every task's kernel ++ stack on every system call. This will not only force an attacker ++ to guess it but also prevent him from making use of possible ++ leaked information about it. ++ ++ Since the kernel stack is a rather scarce resource, randomization ++ may cause unexpected stack overflows, therefore you should very ++ carefully test your system. Note that once enabled in the kernel ++ configuration, this feature cannot be disabled on a per file basis. ++ ++config PAX_RANDUSTACK ++ bool "Randomize user stack base" ++ depends on PAX_ASLR ++ help ++ By saying Y here the kernel will randomize every task's userland ++ stack. The randomization is done in two steps where the second ++ one may apply a big amount of shift to the top of the stack and ++ cause problems for programs that want to use lots of memory (more ++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is). ++ For this reason the second step can be controlled by 'chpax' or ++ 'paxctl' on a per file basis. ++ ++config PAX_RANDMMAP ++ bool "Randomize mmap() base" ++ depends on PAX_ASLR ++ help ++ By saying Y here the kernel will use a randomized base address for ++ mmap() requests that do not specify one themselves. As a result ++ all dynamically loaded libraries will appear at random addresses ++ and therefore be harder to exploit by a technique where an attacker ++ attempts to execute library code for his purposes (e.g. spawn a ++ shell from an exploited program that is running at an elevated ++ privilege level). ++ ++ Furthermore, if a program is relinked as a dynamic ELF file, its ++ base address will be randomized as well, completing the full ++ randomization of the address space layout. Attacking such programs ++ becomes a guess game. You can find an example of doing this at ++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at ++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz . ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this ++ feature on a per file basis. ++ ++endmenu ++ ++menu "Miscellaneous hardening features" ++ ++config PAX_MEMORY_SANITIZE ++ bool "Sanitize all freed memory" ++ depends on !HIBERNATION ++ help ++ By saying Y here the kernel will erase memory pages as soon as they ++ are freed. This in turn reduces the lifetime of data stored in the ++ pages, making it less likely that sensitive information such as ++ passwords, cryptographic secrets, etc stay in memory for too long. ++ ++ This is especially useful for programs whose runtime is short, long ++ lived processes and the kernel itself benefit from this as long as ++ they operate on whole memory pages and ensure timely freeing of pages ++ that may hold sensitive information. ++ ++ The tradeoff is performance impact, on a single CPU system kernel ++ compilation sees a 3% slowdown, other systems and workloads may vary ++ and you are advised to test this feature on your expected workload ++ before deploying it. ++ ++ Note that this feature does not protect data stored in live pages, ++ e.g., process memory swapped to disk may stay there for a long time. ++ ++config PAX_MEMORY_STACKLEAK ++ bool "Sanitize kernel stack" ++ depends on X86 ++ help ++ By saying Y here the kernel will erase the kernel stack before it ++ returns from a system call. This in turn reduces the information ++ that a kernel stack leak bug can reveal. ++ ++ Note that such a bug can still leak information that was put on ++ the stack by the current system call (the one eventually triggering ++ the bug) but traces of earlier system calls on the kernel stack ++ cannot leak anymore. ++ ++ The tradeoff is performance impact: on a single CPU system kernel ++ compilation sees a 1% slowdown, other systems and workloads may vary ++ and you are advised to test this feature on your expected workload ++ before deploying it. ++ ++ Note: full support for this feature requires gcc with plugin support ++ so make sure your compiler is at least gcc 4.5.0. Using older gcc ++ versions means that functions with large enough stack frames may ++ leave uninitialized memory behind that may be exposed to a later ++ syscall leaking the stack. ++ ++config PAX_MEMORY_UDEREF ++ bool "Prevent invalid userland pointer dereference" ++ depends on X86 && !UML_X86 && !XEN ++ select PAX_PER_CPU_PGD if X86_64 ++ help ++ By saying Y here the kernel will be prevented from dereferencing ++ userland pointers in contexts where the kernel expects only kernel ++ pointers. This is both a useful runtime debugging feature and a ++ security measure that prevents exploiting a class of kernel bugs. ++ ++ The tradeoff is that some virtualization solutions may experience ++ a huge slowdown and therefore you should not enable this feature ++ for kernels meant to run in such environments. Whether a given VM ++ solution is affected or not is best determined by simply trying it ++ out, the performance impact will be obvious right on boot as this ++ mechanism engages from very early on. A good rule of thumb is that ++ VMs running on CPUs without hardware virtualization support (i.e., ++ the majority of IA-32 CPUs) will likely experience the slowdown. ++ ++config PAX_REFCOUNT ++ bool "Prevent various kernel object reference counter overflows" ++ depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86) ++ help ++ By saying Y here the kernel will detect and prevent overflowing ++ various (but not all) kinds of object reference counters. Such ++ overflows can normally occur due to bugs only and are often, if ++ not always, exploitable. ++ ++ The tradeoff is that data structures protected by an overflowed ++ refcount will never be freed and therefore will leak memory. Note ++ that this leak also happens even without this protection but in ++ that case the overflow can eventually trigger the freeing of the ++ data structure while it is still being used elsewhere, resulting ++ in the exploitable situation that this feature prevents. ++ ++ Since this has a negligible performance impact, you should enable ++ this feature. ++ ++config PAX_USERCOPY ++ bool "Harden heap object copies between kernel and userland" ++ depends on X86 || PPC || SPARC || ARM ++ depends on GRKERNSEC && (SLAB || SLUB || SLOB) ++ help ++ By saying Y here the kernel will enforce the size of heap objects ++ when they are copied in either direction between the kernel and ++ userland, even if only a part of the heap object is copied. ++ ++ Specifically, this checking prevents information leaking from the ++ kernel heap during kernel to userland copies (if the kernel heap ++ object is otherwise fully initialized) and prevents kernel heap ++ overflows during userland to kernel copies. ++ ++ Note that the current implementation provides the strictest bounds ++ checks for the SLUB allocator. ++ ++ Enabling this option also enables per-slab cache protection against ++ data in a given cache being copied into/out of via userland ++ accessors. Though the whitelist of regions will be reduced over ++ time, it notably protects important data structures like task structs. ++ ++ If frame pointers are enabled on x86, this option will also restrict ++ copies into and out of the kernel stack to local variables within a ++ single frame. ++ ++ Since this has a negligible performance impact, you should enable ++ this feature. ++ ++config PAX_SIZE_OVERFLOW ++ bool "Prevent various integer overflows in function size parameters" ++ help ++ By saying Y here the kernel recomputes expressions of function ++ arguments marked by a size_overflow attribute with double integer ++ precision (DImode/TImode for 32/64 bit integer types). ++ ++ The recomputed argument is checked against INT_MAX and an event ++ is logged on overflow and the triggering process is killed. ++ ++ Homepage: ++ http://www.grsecurity.net/~ephox/overflow_plugin/ ++ ++endmenu ++ ++endmenu ++ + config KEYS + bool "Enable access key retention support" + help +@@ -169,7 +802,7 @@ config INTEL_TXT + config LSM_MMAP_MIN_ADDR + int "Low address space for LSM to protect from user allocation" + depends on SECURITY && SECURITY_SELINUX +- default 32768 if ARM ++ default 32768 if ALPHA || ARM || PARISC || SPARC32 + default 65536 + help + This is the portion of low virtual memory which should be protected +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c +index 3783202..1852837 100644 +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task, + return error; + } + +-static struct security_operations apparmor_ops = { ++static struct security_operations apparmor_ops __read_only = { + .name = "apparmor", + + .ptrace_access_check = apparmor_ptrace_access_check, +diff --git a/security/commoncap.c b/security/commoncap.c +index ee4f848..a320c64 100644 +--- a/security/commoncap.c ++++ b/security/commoncap.c +@@ -28,6 +28,7 @@ + #include <linux/prctl.h> + #include <linux/securebits.h> + #include <linux/user_namespace.h> ++#include <net/sock.h> + + /* + * If a non-root user executes a setuid-root binary in +@@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb) + + int cap_netlink_recv(struct sk_buff *skb, int cap) + { +- if (!cap_raised(current_cap(), cap)) ++ if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap)) + return -EPERM; + return 0; + } +@@ -579,6 +580,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm) + { + const struct cred *cred = current_cred(); + ++ if (gr_acl_enable_at_secure()) ++ return 1; ++ + if (cred->uid != 0) { + if (bprm->cap_effective) + return 1; +diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h +index 3ccf7ac..d73ad64 100644 +--- a/security/integrity/ima/ima.h ++++ b/security/integrity/ima/ima.h +@@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename, + extern spinlock_t ima_queue_lock; + + struct ima_h_table { +- atomic_long_t len; /* number of stored measurements in the list */ +- atomic_long_t violations; ++ atomic_long_unchecked_t len; /* number of stored measurements in the list */ ++ atomic_long_unchecked_t violations; + struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE]; + }; + extern struct ima_h_table ima_htable; +diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c +index 88a2788..581ab92 100644 +--- a/security/integrity/ima/ima_api.c ++++ b/security/integrity/ima/ima_api.c +@@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename, + int result; + + /* can overflow, only indicator */ +- atomic_long_inc(&ima_htable.violations); ++ atomic_long_inc_unchecked(&ima_htable.violations); + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { +diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c +index c5c5a72..2ad942f 100644 +--- a/security/integrity/ima/ima_audit.c ++++ b/security/integrity/ima/ima_audit.c +@@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode, + audit_log_format(ab, " name="); + audit_log_untrustedstring(ab, fname); + } +- if (inode) +- audit_log_format(ab, " dev=%s ino=%lu", +- inode->i_sb->s_id, inode->i_ino); ++ if (inode) { ++ audit_log_format(ab, " dev="); ++ audit_log_untrustedstring(ab, inode->i_sb->s_id); ++ audit_log_format(ab, " ino=%lu", inode->i_ino); ++ } + audit_log_format(ab, " res=%d", !result ? 0 : 1); + audit_log_end(ab); + } +diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c +index e1aa2b4..52027bf 100644 +--- a/security/integrity/ima/ima_fs.c ++++ b/security/integrity/ima/ima_fs.c +@@ -28,12 +28,12 @@ + static int valid_policy = 1; + #define TMPBUFLEN 12 + static ssize_t ima_show_htable_value(char __user *buf, size_t count, +- loff_t *ppos, atomic_long_t *val) ++ loff_t *ppos, atomic_long_unchecked_t *val) + { + char tmpbuf[TMPBUFLEN]; + ssize_t len; + +- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val)); ++ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val)); + return simple_read_from_buffer(buf, count, ppos, tmpbuf, len); + } + +diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c +index 55a6271..ad829c3 100644 +--- a/security/integrity/ima/ima_queue.c ++++ b/security/integrity/ima/ima_queue.c +@@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry) + INIT_LIST_HEAD(&qe->later); + list_add_tail_rcu(&qe->later, &ima_measurements); + +- atomic_long_inc(&ima_htable.len); ++ atomic_long_inc_unchecked(&ima_htable.len); + key = ima_hash_key(entry->digest); + hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); + return 0; +diff --git a/security/keys/compat.c b/security/keys/compat.c +index 4c48e13..7abdac9 100644 +--- a/security/keys/compat.c ++++ b/security/keys/compat.c +@@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov( + if (ret == 0) + goto no_payload_free; + +- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); ++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid); + + if (iov != iovstack) + kfree(iov); +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index 0b3f5d7..892c8a6 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key) + /* + * Copy the iovec data from userspace + */ +-static long copy_from_user_iovec(void *buffer, const struct iovec *iov, ++static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov, + unsigned ioc) + { + for (; ioc > 0; ioc--) { +@@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov, + * If successful, 0 will be returned. + */ + long keyctl_instantiate_key_common(key_serial_t id, +- const struct iovec *payload_iov, ++ const struct iovec __user *payload_iov, + unsigned ioc, + size_t plen, + key_serial_t ringid) +@@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id, + [0].iov_len = plen + }; + +- return keyctl_instantiate_key_common(id, iov, 1, plen, ringid); ++ return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid); + } + + return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid); +@@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id, + if (ret == 0) + goto no_payload_free; + +- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); ++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid); + + if (iov != iovstack) + kfree(iov); +diff --git a/security/keys/keyring.c b/security/keys/keyring.c +index 37a7f3b..86dc19f 100644 +--- a/security/keys/keyring.c ++++ b/security/keys/keyring.c +@@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring, + ret = -EFAULT; + + for (loop = 0; loop < klist->nkeys; loop++) { ++ key_serial_t serial; + key = klist->keys[loop]; ++ serial = key->serial; + + tmp = sizeof(key_serial_t); + if (tmp > buflen) + tmp = buflen; + +- if (copy_to_user(buffer, +- &key->serial, +- tmp) != 0) ++ if (copy_to_user(buffer, &serial, tmp)) + goto error; + + buflen -= tmp; +diff --git a/security/lsm_audit.c b/security/lsm_audit.c +index 893af8a..ba9237c 100644 +--- a/security/lsm_audit.c ++++ b/security/lsm_audit.c +@@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab, + audit_log_d_path(ab, "path=", &a->u.path); + + inode = a->u.path.dentry->d_inode; +- if (inode) +- audit_log_format(ab, " dev=%s ino=%lu", +- inode->i_sb->s_id, +- inode->i_ino); ++ if (inode) { ++ audit_log_format(ab, " dev="); ++ audit_log_untrustedstring(ab, inode->i_sb->s_id); ++ audit_log_format(ab, " ino=%lu", inode->i_ino); ++ } + break; + } + case LSM_AUDIT_DATA_DENTRY: { +@@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab, + audit_log_untrustedstring(ab, a->u.dentry->d_name.name); + + inode = a->u.dentry->d_inode; +- if (inode) +- audit_log_format(ab, " dev=%s ino=%lu", +- inode->i_sb->s_id, +- inode->i_ino); ++ if (inode) { ++ audit_log_format(ab, " dev="); ++ audit_log_untrustedstring(ab, inode->i_sb->s_id); ++ audit_log_format(ab, " ino=%lu", inode->i_ino); ++ } + break; + } + case LSM_AUDIT_DATA_INODE: { +@@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab, + dentry->d_name.name); + dput(dentry); + } +- audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id, +- inode->i_ino); ++ audit_log_format(ab, " dev="); ++ audit_log_untrustedstring(ab, inode->i_sb->s_id); ++ audit_log_format(ab, " ino=%lu", inode->i_ino); + break; + } + case LSM_AUDIT_DATA_TASK: +diff --git a/security/min_addr.c b/security/min_addr.c +index f728728..6457a0c 100644 +--- a/security/min_addr.c ++++ b/security/min_addr.c +@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; + */ + static void update_mmap_min_addr(void) + { ++#ifndef SPARC + #ifdef CONFIG_LSM_MMAP_MIN_ADDR + if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR) + mmap_min_addr = dac_mmap_min_addr; +@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void) + #else + mmap_min_addr = dac_mmap_min_addr; + #endif ++#endif + } + + /* +diff --git a/security/security.c b/security/security.c +index e2f684a..8d62ef5 100644 +--- a/security/security.c ++++ b/security/security.c +@@ -26,8 +26,8 @@ + static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] = + CONFIG_DEFAULT_SECURITY; + +-static struct security_operations *security_ops; +-static struct security_operations default_security_ops = { ++static struct security_operations *security_ops __read_only; ++static struct security_operations default_security_ops __read_only = { + .name = "default", + }; + +@@ -68,7 +68,9 @@ int __init security_init(void) + + void reset_security_ops(void) + { ++ pax_open_kernel(); + security_ops = &default_security_ops; ++ pax_close_kernel(); + } + + /* Save user chosen LSM */ +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index 1126c10..effb32b 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -94,8 +94,6 @@ + + #define NUM_SEL_MNT_OPTS 5 + +-extern struct security_operations *security_ops; +- + /* SECMARK reference count */ + static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0); + +@@ -5449,7 +5447,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer) + + #endif + +-static struct security_operations selinux_ops = { ++static struct security_operations selinux_ops __read_only = { + .name = "selinux", + + .ptrace_access_check = selinux_ptrace_access_check, +diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h +index b43813c..74be837 100644 +--- a/security/selinux/include/xfrm.h ++++ b/security/selinux/include/xfrm.h +@@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); + + static inline void selinux_xfrm_notify_policyload(void) + { +- atomic_inc(&flow_cache_genid); ++ atomic_inc_unchecked(&flow_cache_genid); + } + #else + static inline int selinux_xfrm_enabled(void) +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index 7db62b4..ee4d949 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) + return 0; + } + +-struct security_operations smack_ops = { ++struct security_operations smack_ops __read_only = { + .name = "smack", + + .ptrace_access_check = smack_ptrace_access_check, +diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c +index 4b327b6..646c57a 100644 +--- a/security/tomoyo/tomoyo.c ++++ b/security/tomoyo/tomoyo.c +@@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg, + * tomoyo_security_ops is a "struct security_operations" which is used for + * registering TOMOYO. + */ +-static struct security_operations tomoyo_security_ops = { ++static struct security_operations tomoyo_security_ops __read_only = { + .name = "tomoyo", + .cred_alloc_blank = tomoyo_cred_alloc_blank, + .cred_prepare = tomoyo_cred_prepare, +diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c +index 762af68..7103453 100644 +--- a/sound/aoa/codecs/onyx.c ++++ b/sound/aoa/codecs/onyx.c +@@ -54,7 +54,7 @@ struct onyx { + spdif_locked:1, + analog_locked:1, + original_mute:2; +- int open_count; ++ local_t open_count; + struct codec_info *codec_info; + + /* mutex serializes concurrent access to the device +@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii, + struct onyx *onyx = cii->codec_data; + + mutex_lock(&onyx->mutex); +- onyx->open_count++; ++ local_inc(&onyx->open_count); + mutex_unlock(&onyx->mutex); + + return 0; +@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii, + struct onyx *onyx = cii->codec_data; + + mutex_lock(&onyx->mutex); +- onyx->open_count--; +- if (!onyx->open_count) ++ if (local_dec_and_test(&onyx->open_count)) + onyx->spdif_locked = onyx->analog_locked = 0; + mutex_unlock(&onyx->mutex); + +diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h +index ffd2025..df062c9 100644 +--- a/sound/aoa/codecs/onyx.h ++++ b/sound/aoa/codecs/onyx.h +@@ -11,6 +11,7 @@ + #include <linux/i2c.h> + #include <asm/pmac_low_i2c.h> + #include <asm/prom.h> ++#include <asm/local.h> + + /* PCM3052 register definitions */ + +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index 3cc4b86..af0a951 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const + if (in_kernel) { + mm_segment_t fs; + fs = snd_enter_user(); +- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames); ++ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames); + snd_leave_user(fs); + } else { +- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames); ++ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames); + } + if (ret != -EPIPE && ret != -ESTRPIPE) + break; +@@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p + if (in_kernel) { + mm_segment_t fs; + fs = snd_enter_user(); +- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames); ++ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames); + snd_leave_user(fs); + } else { +- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames); ++ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames); + } + if (ret == -EPIPE) { + if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { +@@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha + struct snd_pcm_plugin_channel *channels; + size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8; + if (!in_kernel) { +- if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes)) ++ if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes)) + return -EFAULT; + buf = runtime->oss.buffer; + } +@@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha + } + } else { + tmp = snd_pcm_oss_write2(substream, +- (const char __force *)buf, ++ (const char __force_kernel *)buf, + runtime->oss.period_bytes, 0); + if (tmp <= 0) + goto err; +@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf, + struct snd_pcm_runtime *runtime = substream->runtime; + snd_pcm_sframes_t frames, frames1; + #ifdef CONFIG_SND_PCM_OSS_PLUGINS +- char __user *final_dst = (char __force __user *)buf; ++ char __user *final_dst = (char __force_user *)buf; + if (runtime->oss.plugin_first) { + struct snd_pcm_plugin_channel *channels; + size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8; +@@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use + xfer += tmp; + runtime->oss.buffer_used -= tmp; + } else { +- tmp = snd_pcm_oss_read2(substream, (char __force *)buf, ++ tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf, + runtime->oss.period_bytes, 0); + if (tmp <= 0) + goto err; +@@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) + size1); + size1 /= runtime->channels; /* frames */ + fs = snd_enter_user(); +- snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1); ++ snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1); + snd_leave_user(fs); + } + } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) { +diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c +index 91cdf94..4085161 100644 +--- a/sound/core/pcm_compat.c ++++ b/sound/core/pcm_compat.c +@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream, + int err; + + fs = snd_enter_user(); +- err = snd_pcm_delay(substream, &delay); ++ err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay); + snd_leave_user(fs); + if (err < 0) + return err; +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 25ed9fe..24c46e9 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, + switch (substream->stream) { + case SNDRV_PCM_STREAM_PLAYBACK: + result = snd_pcm_playback_ioctl1(NULL, substream, cmd, +- (void __user *)arg); ++ (void __force_user *)arg); + break; + case SNDRV_PCM_STREAM_CAPTURE: + result = snd_pcm_capture_ioctl1(NULL, substream, cmd, +- (void __user *)arg); ++ (void __force_user *)arg); + break; + default: + result = -EINVAL; +diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c +index 5cf8d65..912a79c 100644 +--- a/sound/core/seq/seq_device.c ++++ b/sound/core/seq/seq_device.c +@@ -64,7 +64,7 @@ struct ops_list { + int argsize; /* argument size */ + + /* operators */ +- struct snd_seq_dev_ops ops; ++ struct snd_seq_dev_ops *ops; + + /* registred devices */ + struct list_head dev_list; /* list of devices */ +@@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry, + + mutex_lock(&ops->reg_mutex); + /* copy driver operators */ +- ops->ops = *entry; ++ ops->ops = entry; + ops->driver |= DRIVER_LOADED; + ops->argsize = argsize; + +@@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops) + dev->name, ops->id, ops->argsize, dev->argsize); + return -EINVAL; + } +- if (ops->ops.init_device(dev) >= 0) { ++ if (ops->ops->init_device(dev) >= 0) { + dev->status = SNDRV_SEQ_DEVICE_REGISTERED; + ops->num_init_devices++; + } else { +@@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops) + dev->name, ops->id, ops->argsize, dev->argsize); + return -EINVAL; + } +- if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) { ++ if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) { + dev->status = SNDRV_SEQ_DEVICE_FREE; + dev->driver_data = NULL; + ops->num_init_devices--; +diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c +index f24bf9a..1f7b67c 100644 +--- a/sound/drivers/mts64.c ++++ b/sound/drivers/mts64.c +@@ -29,6 +29,7 @@ + #include <sound/initval.h> + #include <sound/rawmidi.h> + #include <sound/control.h> ++#include <asm/local.h> + + #define CARD_NAME "Miditerminal 4140" + #define DRIVER_NAME "MTS64" +@@ -67,7 +68,7 @@ struct mts64 { + struct pardevice *pardev; + int pardev_claimed; + +- int open_count; ++ local_t open_count; + int current_midi_output_port; + int current_midi_input_port; + u8 mode[MTS64_NUM_INPUT_PORTS]; +@@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream) + { + struct mts64 *mts = substream->rmidi->private_data; + +- if (mts->open_count == 0) { ++ if (local_read(&mts->open_count) == 0) { + /* We don't need a spinlock here, because this is just called + if the device has not been opened before. + So there aren't any IRQs from the device */ +@@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream) + + msleep(50); + } +- ++(mts->open_count); ++ local_inc(&mts->open_count); + + return 0; + } +@@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream) + struct mts64 *mts = substream->rmidi->private_data; + unsigned long flags; + +- --(mts->open_count); +- if (mts->open_count == 0) { ++ if (local_dec_return(&mts->open_count) == 0) { + /* We need the spinlock_irqsave here because we can still + have IRQs at this point */ + spin_lock_irqsave(&mts->lock, flags); +@@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream) + + msleep(500); + +- } else if (mts->open_count < 0) +- mts->open_count = 0; ++ } else if (local_read(&mts->open_count) < 0) ++ local_set(&mts->open_count, 0); + + return 0; + } +diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c +index b953fb4..1999c01 100644 +--- a/sound/drivers/opl4/opl4_lib.c ++++ b/sound/drivers/opl4/opl4_lib.c +@@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch clemens@ladisch.de"); + MODULE_DESCRIPTION("OPL4 driver"); + MODULE_LICENSE("GPL"); + +-static void inline snd_opl4_wait(struct snd_opl4 *opl4) ++static inline void snd_opl4_wait(struct snd_opl4 *opl4) + { + int timeout = 10; + while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0) +diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c +index f664823..590c745 100644 +--- a/sound/drivers/portman2x4.c ++++ b/sound/drivers/portman2x4.c +@@ -48,6 +48,7 @@ + #include <sound/initval.h> + #include <sound/rawmidi.h> + #include <sound/control.h> ++#include <asm/local.h> + + #define CARD_NAME "Portman 2x4" + #define DRIVER_NAME "portman" +@@ -85,7 +86,7 @@ struct portman { + struct pardevice *pardev; + int pardev_claimed; + +- int open_count; ++ local_t open_count; + int mode[PORTMAN_NUM_INPUT_PORTS]; + struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS]; + }; +diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c +index 87657dd..a8268d4 100644 +--- a/sound/firewire/amdtp.c ++++ b/sound/firewire/amdtp.c +@@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle) + ptr = s->pcm_buffer_pointer + data_blocks; + if (ptr >= pcm->runtime->buffer_size) + ptr -= pcm->runtime->buffer_size; +- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr; ++ ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr; + + s->pcm_period_pointer += data_blocks; + if (s->pcm_period_pointer >= pcm->runtime->period_size) { +@@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start); + */ + void amdtp_out_stream_update(struct amdtp_out_stream *s) + { +- ACCESS_ONCE(s->source_node_id_field) = ++ ACCESS_ONCE_RW(s->source_node_id_field) = + (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24; + } + EXPORT_SYMBOL(amdtp_out_stream_update); +diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h +index 537a9cb..8e8c8e9 100644 +--- a/sound/firewire/amdtp.h ++++ b/sound/firewire/amdtp.h +@@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s) + static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s, + struct snd_pcm_substream *pcm) + { +- ACCESS_ONCE(s->pcm) = pcm; ++ ACCESS_ONCE_RW(s->pcm) = pcm; + } + + /** +diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c +index cd094ec..eca1277 100644 +--- a/sound/firewire/isight.c ++++ b/sound/firewire/isight.c +@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count) + ptr += count; + if (ptr >= runtime->buffer_size) + ptr -= runtime->buffer_size; +- ACCESS_ONCE(isight->buffer_pointer) = ptr; ++ ACCESS_ONCE_RW(isight->buffer_pointer) = ptr; + + isight->period_counter += count; + if (isight->period_counter >= runtime->period_size) { +@@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream, + if (err < 0) + return err; + +- ACCESS_ONCE(isight->pcm_active) = true; ++ ACCESS_ONCE_RW(isight->pcm_active) = true; + + return 0; + } +@@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream) + { + struct isight *isight = substream->private_data; + +- ACCESS_ONCE(isight->pcm_active) = false; ++ ACCESS_ONCE_RW(isight->pcm_active) = false; + + mutex_lock(&isight->mutex); + isight_stop_streaming(isight); +@@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd) + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: +- ACCESS_ONCE(isight->pcm_running) = true; ++ ACCESS_ONCE_RW(isight->pcm_running) = true; + break; + case SNDRV_PCM_TRIGGER_STOP: +- ACCESS_ONCE(isight->pcm_running) = false; ++ ACCESS_ONCE_RW(isight->pcm_running) = false; + break; + default: + return -EINVAL; +diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c +index c94578d..0794ac1 100644 +--- a/sound/isa/cmi8330.c ++++ b/sound/isa/cmi8330.c +@@ -172,7 +172,7 @@ struct snd_cmi8330 { + + struct snd_pcm *pcm; + struct snd_cmi8330_stream { +- struct snd_pcm_ops ops; ++ snd_pcm_ops_no_const ops; + snd_pcm_open_callback_t open; + void *private_data; /* sb or wss */ + } streams[2]; +diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c +index 733b014..56ce96f 100644 +--- a/sound/oss/sb_audio.c ++++ b/sound/oss/sb_audio.c +@@ -901,7 +901,7 @@ sb16_copy_from_user(int dev, + buf16 = (signed short *)(localbuf + localoffs); + while (c) + { +- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); ++ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); + if (copy_from_user(lbuf8, + userbuf+useroffs + p, + locallen)) +diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c +index 09d4648..cf234c7 100644 +--- a/sound/oss/swarm_cs4297a.c ++++ b/sound/oss/swarm_cs4297a.c +@@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void) + { + struct cs4297a_state *s; + u32 pwr, id; +- mm_segment_t fs; + int rval; + #ifndef CONFIG_BCM_CS4297A_CSWARM + u64 cfg; +@@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void) + if (!rval) { + char *sb1250_duart_present; + ++#if 0 ++ mm_segment_t fs; + fs = get_fs(); + set_fs(KERNEL_DS); +-#if 0 + val = SOUND_MASK_LINE; + mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val); + for (i = 0; i < ARRAY_SIZE(initvol); i++) { + val = initvol[i].vol; + mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val); + } ++ set_fs(fs); + // cs4297a_write_ac97(s, 0x18, 0x0808); + #else + // cs4297a_write_ac97(s, 0x5e, 0x180); + cs4297a_write_ac97(s, 0x02, 0x0808); + cs4297a_write_ac97(s, 0x18, 0x0808); + #endif +- set_fs(fs); + + list_add(&s->list, &cs4297a_devs); + +diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h +index 71f6744..d8aeae7 100644 +--- a/sound/pci/hda/hda_codec.h ++++ b/sound/pci/hda/hda_codec.h +@@ -614,7 +614,7 @@ struct hda_bus_ops { + /* notify power-up/down from codec to controller */ + void (*pm_notify)(struct hda_bus *bus); + #endif +-}; ++} __no_const; + + /* template to pass to the bus constructor */ + struct hda_bus_template { +@@ -716,6 +716,7 @@ struct hda_codec_ops { + #endif + void (*reboot_notify)(struct hda_codec *codec); + }; ++typedef struct hda_codec_ops __no_const hda_codec_ops_no_const; + + /* record for amp information cache */ + struct hda_cache_head { +@@ -746,7 +747,7 @@ struct hda_pcm_ops { + struct snd_pcm_substream *substream); + int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec, + struct snd_pcm_substream *substream); +-}; ++} __no_const; + + /* PCM information for each substream */ + struct hda_pcm_stream { +@@ -804,7 +805,7 @@ struct hda_codec { + const char *modelname; /* model name for preset */ + + /* set by patch */ +- struct hda_codec_ops patch_ops; ++ hda_codec_ops_no_const patch_ops; + + /* PCM to create, set by patch_ops.build_pcms callback */ + unsigned int num_pcms; +diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h +index 0da778a..bc38b84 100644 +--- a/sound/pci/ice1712/ice1712.h ++++ b/sound/pci/ice1712/ice1712.h +@@ -269,7 +269,7 @@ struct snd_ak4xxx_private { + unsigned int mask_flags; /* total mask bits */ + struct snd_akm4xxx_ops { + void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate); +- } ops; ++ } __no_const ops; + }; + + struct snd_ice1712_spdif { +@@ -285,7 +285,7 @@ struct snd_ice1712_spdif { + int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol); + void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol); + int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol); +- } ops; ++ } __no_const ops; + }; + + +diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c +index 03ee4e3..be86b46 100644 +--- a/sound/pci/ymfpci/ymfpci_main.c ++++ b/sound/pci/ymfpci/ymfpci_main.c +@@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip) + if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0) + break; + } +- if (atomic_read(&chip->interrupt_sleep_count)) { +- atomic_set(&chip->interrupt_sleep_count, 0); ++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) { ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); + wake_up(&chip->interrupt_sleep); + } + __end: +@@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) + continue; + init_waitqueue_entry(&wait, current); + add_wait_queue(&chip->interrupt_sleep, &wait); +- atomic_inc(&chip->interrupt_sleep_count); ++ atomic_inc_unchecked(&chip->interrupt_sleep_count); + schedule_timeout_uninterruptible(msecs_to_jiffies(50)); + remove_wait_queue(&chip->interrupt_sleep, &wait); + } +@@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id) + snd_ymfpci_writel(chip, YDSXGR_MODE, mode); + spin_unlock(&chip->reg_lock); + +- if (atomic_read(&chip->interrupt_sleep_count)) { +- atomic_set(&chip->interrupt_sleep_count, 0); ++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) { ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); + wake_up(&chip->interrupt_sleep); + } + } +@@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card, + spin_lock_init(&chip->reg_lock); + spin_lock_init(&chip->voice_lock); + init_waitqueue_head(&chip->interrupt_sleep); +- atomic_set(&chip->interrupt_sleep_count, 0); ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); + chip->card = card; + chip->pci = pci; + chip->irq = -1; +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index ee15337..e2187a6 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -583,7 +583,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream) + } + + /* ASoC PCM operations */ +-static struct snd_pcm_ops soc_pcm_ops = { ++static snd_pcm_ops_no_const soc_pcm_ops = { + .open = soc_pcm_open, + .close = soc_pcm_close, + .hw_params = soc_pcm_hw_params, +diff --git a/sound/usb/card.h b/sound/usb/card.h +index a39edcc..1014050 100644 +--- a/sound/usb/card.h ++++ b/sound/usb/card.h +@@ -44,6 +44,7 @@ struct snd_urb_ops { + int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u); + int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u); + }; ++typedef struct snd_urb_ops __no_const snd_urb_ops_no_const; + + struct snd_usb_substream { + struct snd_usb_stream *stream; +@@ -93,7 +94,7 @@ struct snd_usb_substream { + struct snd_pcm_hw_constraint_list rate_list; /* limited rates */ + spinlock_t lock; + +- struct snd_urb_ops ops; /* callbacks (must be filled at init) */ ++ snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */ + int last_frame_number; /* stored frame number */ + int last_delay; /* stored delay */ + }; +diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile +new file mode 100644 +index 0000000..ca64170 +--- /dev/null ++++ b/tools/gcc/Makefile +@@ -0,0 +1,26 @@ ++#CC := gcc ++#PLUGIN_SOURCE_FILES := pax_plugin.c ++#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES)) ++GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin) ++#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99 ++ ++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb ++CFLAGS_size_overflow_plugin.o := -Wno-missing-initializer ++ ++hostlibs-y := constify_plugin.so ++hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so ++hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so ++hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so ++hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so ++hostlibs-y += colorize_plugin.so ++hostlibs-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so ++ ++always := $(hostlibs-y) ++ ++constify_plugin-objs := constify_plugin.o ++stackleak_plugin-objs := stackleak_plugin.o ++kallocstat_plugin-objs := kallocstat_plugin.o ++kernexec_plugin-objs := kernexec_plugin.o ++checker_plugin-objs := checker_plugin.o ++colorize_plugin-objs := colorize_plugin.o ++size_overflow_plugin-objs := size_overflow_plugin.o +diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c +new file mode 100644 +index 0000000..d41b5af +--- /dev/null ++++ b/tools/gcc/checker_plugin.c +@@ -0,0 +1,171 @@ ++/* ++ * Copyright 2011 by the PaX Team pageexec@freemail.hu ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to implement various sparse (source code checker) features ++ * ++ * TODO: ++ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch) ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++#include "target.h" ++ ++extern void c_register_addr_space (const char *str, addr_space_t as); ++extern enum machine_mode default_addr_space_pointer_mode (addr_space_t); ++extern enum machine_mode default_addr_space_address_mode (addr_space_t); ++extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as); ++extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as); ++extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as); ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++extern rtx emit_move_insn(rtx x, rtx y); ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info checker_plugin_info = { ++ .version = "201111150100", ++}; ++ ++#define ADDR_SPACE_KERNEL 0 ++#define ADDR_SPACE_FORCE_KERNEL 1 ++#define ADDR_SPACE_USER 2 ++#define ADDR_SPACE_FORCE_USER 3 ++#define ADDR_SPACE_IOMEM 0 ++#define ADDR_SPACE_FORCE_IOMEM 0 ++#define ADDR_SPACE_PERCPU 0 ++#define ADDR_SPACE_FORCE_PERCPU 0 ++#define ADDR_SPACE_RCU 0 ++#define ADDR_SPACE_FORCE_RCU 0 ++ ++static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace) ++{ ++ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC); ++} ++ ++static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace) ++{ ++ return default_addr_space_address_mode(ADDR_SPACE_GENERIC); ++} ++ ++static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as) ++{ ++ return default_addr_space_valid_pointer_mode(mode, as); ++} ++ ++static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as) ++{ ++ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC); ++} ++ ++static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as) ++{ ++ return default_addr_space_legitimize_address(x, oldx, mode, as); ++} ++ ++static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset) ++{ ++ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL) ++ return true; ++ ++ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER) ++ return true; ++ ++ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM) ++ return true; ++ ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER) ++ return true; ++ ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM) ++ return true; ++ ++ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL) ++ return true; ++ ++ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL) ++ return true; ++ ++ return subset == superset; ++} ++ ++static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type) ++{ ++// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type)); ++// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type)); ++ ++ return op; ++} ++ ++static void register_checker_address_spaces(void *event_data, void *data) ++{ ++ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL); ++ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL); ++ c_register_addr_space("__user", ADDR_SPACE_USER); ++ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER); ++// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM); ++// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM); ++// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU); ++// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU); ++// c_register_addr_space("__rcu", ADDR_SPACE_RCU); ++// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU); ++ ++ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode; ++ targetm.addr_space.address_mode = checker_addr_space_address_mode; ++ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode; ++ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p; ++// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address; ++ targetm.addr_space.subset_p = checker_addr_space_subset_p; ++ targetm.addr_space.convert = checker_addr_space_convert; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info); ++ ++ for (i = 0; i < argc; ++i) ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ ++ if (TARGET_64BIT == 0) ++ return 0; ++ ++ register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c +new file mode 100644 +index 0000000..ee950d0 +--- /dev/null ++++ b/tools/gcc/colorize_plugin.c +@@ -0,0 +1,147 @@ ++/* ++ * Copyright 2012 by PaX Team pageexec@freemail.hu ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to colorize diagnostic output ++ * ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info colorize_plugin_info = { ++ .version = "201203092200", ++}; ++ ++#define GREEN "\033[32m\033[2m" ++#define LIGHTGREEN "\033[32m\033[1m" ++#define YELLOW "\033[33m\033[2m" ++#define LIGHTYELLOW "\033[33m\033[1m" ++#define RED "\033[31m\033[2m" ++#define LIGHTRED "\033[31m\033[1m" ++#define BLUE "\033[34m\033[2m" ++#define LIGHTBLUE "\033[34m\033[1m" ++#define BRIGHT "\033[m\033[1m" ++#define NORMAL "\033[m" ++ ++static diagnostic_starter_fn old_starter; ++static diagnostic_finalizer_fn old_finalizer; ++ ++static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic) ++{ ++ const char *color; ++ char *newprefix; ++ ++ switch (diagnostic->kind) { ++ case DK_NOTE: ++ color = LIGHTBLUE; ++ break; ++ ++ case DK_PEDWARN: ++ case DK_WARNING: ++ color = LIGHTYELLOW; ++ break; ++ ++ case DK_ERROR: ++ case DK_FATAL: ++ case DK_ICE: ++ case DK_PERMERROR: ++ case DK_SORRY: ++ color = LIGHTRED; ++ break; ++ ++ default: ++ color = NORMAL; ++ } ++ ++ old_starter(context, diagnostic); ++ if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix)) ++ return; ++ pp_destroy_prefix(context->printer); ++ pp_set_prefix(context->printer, newprefix); ++} ++ ++static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic) ++{ ++ old_finalizer(context, diagnostic); ++} ++ ++static void colorize_arm(void) ++{ ++ old_starter = diagnostic_starter(global_dc); ++ old_finalizer = diagnostic_finalizer(global_dc); ++ ++ diagnostic_starter(global_dc) = start_colorize; ++ diagnostic_finalizer(global_dc) = finalize_colorize; ++} ++ ++static unsigned int execute_colorize_rearm(void) ++{ ++ if (diagnostic_starter(global_dc) == start_colorize) ++ return 0; ++ ++ colorize_arm(); ++ return 0; ++} ++ ++struct simple_ipa_opt_pass pass_ipa_colorize_rearm = { ++ .pass = { ++ .type = SIMPLE_IPA_PASS, ++ .name = "colorize_rearm", ++ .gate = NULL, ++ .execute = execute_colorize_rearm, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static void colorize_start_unit(void *gcc_data, void *user_data) ++{ ++ colorize_arm(); ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info colorize_rearm_pass_info = { ++ .pass = &pass_ipa_colorize_rearm.pass, ++ .reference_pass_name = "*free_lang_data", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info); ++ register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info); ++ return 0; ++} +diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c +new file mode 100644 +index 0000000..704a564 +--- /dev/null ++++ b/tools/gcc/constify_plugin.c +@@ -0,0 +1,303 @@ ++/* ++ * Copyright 2011 by Emese Revfy re.emese@gmail.com ++ * Copyright 2011 by PaX Team pageexec@freemail.hu ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification. ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/const_plugin/ ++ * ++ * Usage: ++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c ++ * $ gcc -fplugin=constify_plugin.so test.c -O2 ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++ ++#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE) ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info const_plugin_info = { ++ .version = "201111150100", ++ .help = "no-constify\tturn off constification\n", ++}; ++ ++static void constify_type(tree type); ++static bool walk_struct(tree node); ++ ++static tree deconstify_type(tree old_type) ++{ ++ tree new_type, field; ++ ++ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST); ++ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type)); ++ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field)) ++ DECL_FIELD_CONTEXT(field) = new_type; ++ TYPE_READONLY(new_type) = 0; ++ C_TYPE_FIELDS_READONLY(new_type) = 0; ++ return new_type; ++} ++ ++static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ tree type; ++ ++ *no_add_attrs = true; ++ if (TREE_CODE(*node) == FUNCTION_DECL) { ++ error("%qE attribute does not apply to functions", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == VAR_DECL) { ++ error("%qE attribute does not apply to variables", name); ++ return NULL_TREE; ++ } ++ ++ if (TYPE_P(*node)) { ++ if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE) ++ *no_add_attrs = false; ++ else ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ type = TREE_TYPE(*node); ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) { ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) { ++ error("%qE attribute is already applied to the type", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) { ++ error("%qE attribute used on type that is not constified", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == TYPE_DECL) { ++ TREE_TYPE(*node) = deconstify_type(type); ++ TREE_READONLY(*node) = 0; ++ return NULL_TREE; ++ } ++ ++ return NULL_TREE; ++} ++ ++static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ *no_add_attrs = true; ++ if (!TYPE_P(*node)) { ++ error("%qE attribute applies to types only", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) { ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ *no_add_attrs = false; ++ constify_type(*node); ++ return NULL_TREE; ++} ++ ++static struct attribute_spec no_const_attr = { ++ .name = "no_const", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_no_const_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static struct attribute_spec do_const_attr = { ++ .name = "do_const", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_do_const_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static void register_attributes(void *event_data, void *data) ++{ ++ register_attribute(&no_const_attr); ++ register_attribute(&do_const_attr); ++} ++ ++static void constify_type(tree type) ++{ ++ TYPE_READONLY(type) = 1; ++ C_TYPE_FIELDS_READONLY(type) = 1; ++} ++ ++static bool is_fptr(tree field) ++{ ++ tree ptr = TREE_TYPE(field); ++ ++ if (TREE_CODE(ptr) != POINTER_TYPE) ++ return false; ++ ++ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE; ++} ++ ++static bool walk_struct(tree node) ++{ ++ tree field; ++ ++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) ++ return false; ++ ++ if (TYPE_FIELDS(node) == NULL_TREE) ++ return false; ++ ++ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) { ++ tree type = TREE_TYPE(field); ++ enum tree_code code = TREE_CODE(type); ++ if (code == RECORD_TYPE || code == UNION_TYPE) { ++ if (!(walk_struct(type))) ++ return false; ++ } else if (!is_fptr(field) && !TREE_READONLY(field)) ++ return false; ++ } ++ return true; ++} ++ ++static void finish_type(void *event_data, void *data) ++{ ++ tree type = (tree)event_data; ++ ++ if (type == NULL_TREE) ++ return; ++ ++ if (TYPE_READONLY(type)) ++ return; ++ ++ if (walk_struct(type)) ++ constify_type(type); ++} ++ ++static unsigned int check_local_variables(void); ++ ++struct gimple_opt_pass pass_local_variable = { ++ { ++ .type = GIMPLE_PASS, ++ .name = "check_local_variables", ++ .gate = NULL, ++ .execute = check_local_variables, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static unsigned int check_local_variables(void) ++{ ++ tree var; ++ referenced_var_iterator rvi; ++ ++#if BUILDING_GCC_VERSION == 4005 ++ FOR_EACH_REFERENCED_VAR(var, rvi) { ++#else ++ FOR_EACH_REFERENCED_VAR(cfun, var, rvi) { ++#endif ++ tree type = TREE_TYPE(var); ++ ++ if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var)) ++ continue; ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) ++ continue; ++ ++ if (!TYPE_READONLY(type)) ++ continue; ++ ++// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var))) ++// continue; ++ ++// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) ++// continue; ++ ++ if (walk_struct(type)) { ++ error("constified variable %qE cannot be local", var); ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ bool constify = true; ++ ++ struct register_pass_info local_variable_pass_info = { ++ .pass = &pass_local_variable.pass, ++ .reference_pass_name = "*referenced_vars", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!(strcmp(argv[i].key, "no-constify"))) { ++ constify = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info); ++ if (constify) { ++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c +new file mode 100644 +index 0000000..a5eabce +--- /dev/null ++++ b/tools/gcc/kallocstat_plugin.c +@@ -0,0 +1,167 @@ ++/* ++ * Copyright 2011 by the PaX Team pageexec@freemail.hu ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to find the distribution of k*alloc sizes ++ * ++ * TODO: ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++ ++int plugin_is_GPL_compatible; ++ ++static const char * const kalloc_functions[] = { ++ "__kmalloc", ++ "kmalloc", ++ "kmalloc_large", ++ "kmalloc_node", ++ "kmalloc_order", ++ "kmalloc_order_trace", ++ "kmalloc_slab", ++ "kzalloc", ++ "kzalloc_node", ++}; ++ ++static struct plugin_info kallocstat_plugin_info = { ++ .version = "201111150100", ++}; ++ ++static unsigned int execute_kallocstat(void); ++ ++static struct gimple_opt_pass kallocstat_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kallocstat", ++ .gate = NULL, ++ .execute = execute_kallocstat, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static bool is_kalloc(const char *fnname) ++{ ++ size_t i; ++ ++ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++) ++ if (!strcmp(fnname, kalloc_functions[i])) ++ return true; ++ return false; ++} ++ ++static unsigned int execute_kallocstat(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: ++ tree fndecl, size; ++ gimple call_stmt; ++ const char *fnname; ++ ++ // is it a call ++ call_stmt = gsi_stmt(gsi); ++ if (!is_gimple_call(call_stmt)) ++ continue; ++ fndecl = gimple_call_fndecl(call_stmt); ++ if (fndecl == NULL_TREE) ++ continue; ++ if (TREE_CODE(fndecl) != FUNCTION_DECL) ++ continue; ++ ++ // is it a call to k*alloc ++ fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl)); ++ if (!is_kalloc(fnname)) ++ continue; ++ ++ // is the size arg the result of a simple const assignment ++ size = gimple_call_arg(call_stmt, 0); ++ while (true) { ++ gimple def_stmt; ++ expanded_location xloc; ++ size_t size_val; ++ ++ if (TREE_CODE(size) != SSA_NAME) ++ break; ++ def_stmt = SSA_NAME_DEF_STMT(size); ++ if (!def_stmt || !is_gimple_assign(def_stmt)) ++ break; ++ if (gimple_num_ops(def_stmt) != 2) ++ break; ++ size = gimple_assign_rhs1(def_stmt); ++ if (!TREE_CONSTANT(size)) ++ continue; ++ xloc = expand_location(gimple_location(def_stmt)); ++ if (!xloc.file) ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); ++ size_val = TREE_INT_CST_LOW(size); ++ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line); ++ break; ++ } ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); ++//debug_tree(gimple_call_fn(call_stmt)); ++//print_node(stderr, "pax", fndecl, 4); ++ } ++ } ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info kallocstat_pass_info = { ++ .pass = &kallocstat_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info); ++ ++ return 0; ++} +diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c +new file mode 100644 +index 0000000..008f159 +--- /dev/null ++++ b/tools/gcc/kernexec_plugin.c +@@ -0,0 +1,427 @@ ++/* ++ * Copyright 2011 by the PaX Team pageexec@freemail.hu ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386 ++ * ++ * TODO: ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++extern rtx emit_move_insn(rtx x, rtx y); ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info kernexec_plugin_info = { ++ .version = "201111291120", ++ .help = "method=[bts|or]\tinstrumentation method\n" ++}; ++ ++static unsigned int execute_kernexec_reload(void); ++static unsigned int execute_kernexec_fptr(void); ++static unsigned int execute_kernexec_retaddr(void); ++static bool kernexec_cmodel_check(void); ++ ++static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *); ++static void (*kernexec_instrument_retaddr)(rtx); ++ ++static struct gimple_opt_pass kernexec_reload_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kernexec_reload", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_reload, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++ } ++}; ++ ++static struct gimple_opt_pass kernexec_fptr_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kernexec_fptr", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_fptr, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++ } ++}; ++ ++static struct rtl_opt_pass kernexec_retaddr_pass = { ++ .pass = { ++ .type = RTL_PASS, ++ .name = "kernexec_retaddr", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_retaddr, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect ++ } ++}; ++ ++static bool kernexec_cmodel_check(void) ++{ ++ tree section; ++ ++ if (ix86_cmodel != CM_KERNEL) ++ return false; ++ ++ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl)); ++ if (!section || !TREE_VALUE(section)) ++ return true; ++ ++ section = TREE_VALUE(TREE_VALUE(section)); ++ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10)) ++ return true; ++ ++ return false; ++} ++ ++/* ++ * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered ++ */ ++static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_movabs_stmt; ++ ++ // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : ); ++ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL); ++ gimple_asm_set_volatile(asm_movabs_stmt, true); ++ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(asm_movabs_stmt); ++} ++ ++/* ++ * find all asm() stmts that clobber r10 and add a reload of r10 ++ */ ++static unsigned int execute_kernexec_reload(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: __asm__ ("" : : : "r10"); ++ gimple asm_stmt; ++ size_t nclobbers; ++ ++ // is it an asm ... ++ asm_stmt = gsi_stmt(gsi); ++ if (gimple_code(asm_stmt) != GIMPLE_ASM) ++ continue; ++ ++ // ... clobbering r10 ++ nclobbers = gimple_asm_nclobbers(asm_stmt); ++ while (nclobbers--) { ++ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers); ++ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10")) ++ continue; ++ kernexec_reload_fptr_mask(&gsi); ++//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO); ++ break; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++/* ++ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce ++ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference ++ */ ++static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi) ++{ ++ gimple assign_intptr, assign_new_fptr, call_stmt; ++ tree intptr, old_fptr, new_fptr, kernexec_mask; ++ ++ call_stmt = gsi_stmt(*gsi); ++ old_fptr = gimple_call_fn(call_stmt); ++ ++ // create temporary unsigned long variable used for bitops and cast fptr to it ++ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts"); ++ add_referenced_var(intptr); ++ mark_sym_for_renaming(intptr); ++ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr)); ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); ++ update_stmt(assign_intptr); ++ ++ // apply logical or to temporary unsigned long and bitmask ++ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL); ++// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL); ++ assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask)); ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); ++ update_stmt(assign_intptr); ++ ++ // cast temporary unsigned long back to a temporary fptr variable ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec"); ++ add_referenced_var(new_fptr); ++ mark_sym_for_renaming(new_fptr); ++ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr)); ++ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT); ++ update_stmt(assign_new_fptr); ++ ++ // replace call stmt fn with the new fptr ++ gimple_call_set_fn(call_stmt, new_fptr); ++ update_stmt(call_stmt); ++} ++ ++static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_or_stmt, call_stmt; ++ tree old_fptr, new_fptr, input, output; ++ VEC(tree, gc) *inputs = NULL; ++ VEC(tree, gc) *outputs = NULL; ++ ++ call_stmt = gsi_stmt(*gsi); ++ old_fptr = gimple_call_fn(call_stmt); ++ ++ // create temporary fptr variable ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or"); ++ add_referenced_var(new_fptr); ++ mark_sym_for_renaming(new_fptr); ++ ++ // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr)); ++ input = build_tree_list(NULL_TREE, build_string(2, "0")); ++ input = chainon(NULL_TREE, build_tree_list(input, old_fptr)); ++ output = build_tree_list(NULL_TREE, build_string(3, "=r")); ++ output = chainon(NULL_TREE, build_tree_list(output, new_fptr)); ++ VEC_safe_push(tree, gc, inputs, input); ++ VEC_safe_push(tree, gc, outputs, output); ++ asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL); ++ gimple_asm_set_volatile(asm_or_stmt, true); ++ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT); ++ update_stmt(asm_or_stmt); ++ ++ // replace call stmt fn with the new fptr ++ gimple_call_set_fn(call_stmt, new_fptr); ++ update_stmt(call_stmt); ++} ++ ++/* ++ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer ++ */ ++static unsigned int execute_kernexec_fptr(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D)); ++ tree fn; ++ gimple call_stmt; ++ ++ // is it a call ... ++ call_stmt = gsi_stmt(gsi); ++ if (!is_gimple_call(call_stmt)) ++ continue; ++ fn = gimple_call_fn(call_stmt); ++ if (TREE_CODE(fn) == ADDR_EXPR) ++ continue; ++ if (TREE_CODE(fn) != SSA_NAME) ++ gcc_unreachable(); ++ ++ // ... through a function pointer ++ fn = SSA_NAME_VAR(fn); ++ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) ++ continue; ++ fn = TREE_TYPE(fn); ++ if (TREE_CODE(fn) != POINTER_TYPE) ++ continue; ++ fn = TREE_TYPE(fn); ++ if (TREE_CODE(fn) != FUNCTION_TYPE) ++ continue; ++ ++ kernexec_instrument_fptr(&gsi); ++ ++//debug_tree(gimple_call_fn(call_stmt)); ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); ++ } ++ } ++ ++ return 0; ++} ++ ++// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn ++static void kernexec_instrument_retaddr_bts(rtx insn) ++{ ++ rtx btsq; ++ rtvec argvec, constraintvec, labelvec; ++ int line; ++ ++ // create asm volatile("btsq $63,(%%rsp)":::) ++ argvec = rtvec_alloc(0); ++ constraintvec = rtvec_alloc(0); ++ labelvec = rtvec_alloc(0); ++ line = expand_location(RTL_LOCATION(insn)).line; ++ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); ++ MEM_VOLATILE_P(btsq) = 1; ++// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS ++ emit_insn_before(btsq, insn); ++} ++ ++// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn ++static void kernexec_instrument_retaddr_or(rtx insn) ++{ ++ rtx orq; ++ rtvec argvec, constraintvec, labelvec; ++ int line; ++ ++ // create asm volatile("orq %%r10,(%%rsp)":::) ++ argvec = rtvec_alloc(0); ++ constraintvec = rtvec_alloc(0); ++ labelvec = rtvec_alloc(0); ++ line = expand_location(RTL_LOCATION(insn)).line; ++ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); ++ MEM_VOLATILE_P(orq) = 1; ++// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS ++ emit_insn_before(orq, insn); ++} ++ ++/* ++ * find all asm level function returns and forcibly set the highest bit of the return address ++ */ ++static unsigned int execute_kernexec_retaddr(void) ++{ ++ rtx insn; ++ ++ // 1. find function returns ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { ++ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil)) ++ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil)) ++ rtx body; ++ ++ // is it a retn ++ if (!JUMP_P(insn)) ++ continue; ++ body = PATTERN(insn); ++ if (GET_CODE(body) == PARALLEL) ++ body = XVECEXP(body, 0, 0); ++ if (GET_CODE(body) != RETURN) ++ continue; ++ kernexec_instrument_retaddr(insn); ++ } ++ ++// print_simple_rtl(stderr, get_insns()); ++// print_rtl(stderr, get_insns()); ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info kernexec_reload_pass_info = { ++ .pass = &kernexec_reload_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ struct register_pass_info kernexec_fptr_pass_info = { ++ .pass = &kernexec_fptr_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ struct register_pass_info kernexec_retaddr_pass_info = { ++ .pass = &kernexec_retaddr_pass.pass, ++ .reference_pass_name = "pro_and_epilogue", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info); ++ ++ if (TARGET_64BIT == 0) ++ return 0; ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "method")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ if (!strcmp(argv[i].value, "bts")) { ++ kernexec_instrument_fptr = kernexec_instrument_fptr_bts; ++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts; ++ } else if (!strcmp(argv[i].value, "or")) { ++ kernexec_instrument_fptr = kernexec_instrument_fptr_or; ++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or; ++ fix_register("r10", 1, 1); ++ } else ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr) ++ error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name); ++ ++ if (kernexec_instrument_fptr == kernexec_instrument_fptr_or) ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info); ++ ++ return 0; ++} +diff --git a/tools/gcc/size_overflow_hash1.h b/tools/gcc/size_overflow_hash1.h +new file mode 100644 +index 0000000..55a1292 +--- /dev/null ++++ b/tools/gcc/size_overflow_hash1.h +@@ -0,0 +1,2760 @@ ++struct size_overflow_hash size_overflow_hash1[65536] = { ++ [10013].file = "security/smack/smackfs.c", ++ [10013].name = "smk_write_direct", ++ [10013].param3 = 1, ++ [10167].file = "sound/core/oss/pcm_plugin.c", ++ [10167].name = "snd_pcm_plugin_build", ++ [10167].param5 = 1, ++ [1020].file = "drivers/usb/misc/usbtest.c", ++ [1020].name = "test_unaligned_bulk", ++ [1020].param3 = 1, ++ [1022].file = "sound/pci/rme9652/rme9652.c", ++ [1022].name = "snd_rme9652_playback_copy", ++ [1022].param5 = 1, ++ [10341].file = "fs/nfsd/nfs4xdr.c", ++ [10341].name = "read_buf", ++ [10341].param2 = 1, ++ [10357].file = "net/sunrpc/cache.c", ++ [10357].name = "cache_read", ++ [10357].param3 = 1, ++ [10397].file = "drivers/gpu/drm/i915/i915_debugfs.c", ++ [10397].name = "i915_wedged_write", ++ [10397].param3 = 1, ++ [10414].file = "drivers/tty/vt/vt.c", ++ [10414].name = "vc_do_resize", ++ [10414].param3 = 1, ++ [10414].param4 = 1, ++ [10496].file = "drivers/bluetooth/hci_vhci.c", ++ [10496].name = "vhci_read", ++ [10496].param3 = 1, ++ [10565].file = "drivers/input/touchscreen/ad7879-spi.c", ++ [10565].name = "ad7879_spi_multi_read", ++ [10565].param3 = 1, ++ [10623].file = "drivers/infiniband/core/user_mad.c", ++ [10623].name = "ib_umad_write", ++ [10623].param3 = 1, ++ [10707].file = "fs/nfs/idmap.c", ++ [10707].name = "nfs_idmap_request_key", ++ [10707].param2 = 1, ++ [10773].file = "drivers/input/mousedev.c", ++ [10773].name = "mousedev_read", ++ [10773].param3 = 1, ++ [10777].file = "fs/ntfs/file.c", ++ [10777].name = "ntfs_file_buffered_write", ++ [10777].param6 = 1, ++ [10919].file = "net/ipv4/netfilter/arp_tables.c", ++ [10919].name = "do_arpt_set_ctl", ++ [10919].param4 = 1, ++ [11054].file = "drivers/net/wireless/libertas/debugfs.c", ++ [11054].name = "lbs_wrmac_write", ++ [11054].param3 = 1, ++ [11068].file = "drivers/net/wireless/libertas/debugfs.c", ++ [11068].name = "lbs_wrrf_write", ++ [11068].param3 = 1, ++ [11364].file = "fs/ext4/super.c", ++ [11364].name = "ext4_kvzalloc", ++ [11364].param1 = 1, ++ [11402].file = "drivers/net/wireless/libertas/debugfs.c", ++ [11402].name = "lbs_threshold_write", ++ [11402].param5 = 1, ++ [11494].file = "drivers/video/via/viafbdev.c", ++ [11494].name = "viafb_dvp1_proc_write", ++ [11494].param3 = 1, ++ [11616].file = "security/selinux/selinuxfs.c", ++ [11616].name = "sel_write_enforce", ++ [11616].param3 = 1, ++ [11699].file = "drivers/net/ethernet/neterion/vxge/vxge-config.h", ++ [11699].name = "vxge_os_dma_malloc", ++ [11699].param2 = 1, ++ [11766].file = "drivers/block/paride/pt.c", ++ [11766].name = "pt_read", ++ [11766].param3 = 1, ++ [11784].file = "fs/bio.c", ++ [11784].name = "bio_kmalloc", ++ [11784].param2 = 1, ++ [11814].file = "drivers/staging/speakup/kobjects.c", ++ [11814].name = "keymap_store", ++ [11814].param4 = 1, ++ [11912].file = "net/sunrpc/cache.c", ++ [11912].name = "cache_write_pipefs", ++ [11912].param3 = 1, ++ [11919].file = "drivers/lguest/core.c", ++ [11919].name = "__lgread", ++ [11919].param4 = 1, ++ [11986].file = "drivers/net/usb/asix.c", ++ [11986].name = "asix_read_cmd", ++ [11986].param5 = 1, ++ [12059].file = "drivers/net/wireless/libertas/debugfs.c", ++ [12059].name = "lbs_debugfs_write", ++ [12059].param3 = 1, ++ [12071].file = "lib/kstrtox.c", ++ [12071].name = "kstrtou8_from_user", ++ [12071].param2 = 1, ++ [12151].file = "fs/compat.c", ++ [12151].name = "compat_rw_copy_check_uvector", ++ [12151].param3 = 1, ++ [12205].file = "fs/reiserfs/journal.c", ++ [12205].name = "reiserfs_allocate_list_bitmaps", ++ [12205].param3 = 1, ++ [12234].file = "include/acpi/platform/aclinux.h", ++ [12234].name = "acpi_os_allocate", ++ [12234].param1 = 1, ++ [1227].file = "lib/cpu_rmap.c", ++ [1227].name = "alloc_cpu_rmap", ++ [1227].param1 = 1, ++ [12395].file = "drivers/char/hw_random/core.c", ++ [12395].name = "rng_dev_read", ++ [12395].param3 = 1, ++ [1248].file = "kernel/kprobes.c", ++ [1248].name = "write_enabled_file_bool", ++ [1248].param3 = 1, ++ [12501].file = "net/mac80211/debugfs.c", ++ [12501].name = "uapsd_max_sp_len_write", ++ [12501].param3 = 1, ++ [12591].file = "sound/core/pcm_lib.c", ++ [12591].name = "snd_pcm_lib_writev_transfer", ++ [12591].param5 = 1, ++ [12602].file = "net/sunrpc/cache.c", ++ [12602].name = "cache_downcall", ++ [12602].param3 = 1, ++ [12712].file = "drivers/net/wimax/i2400m/fw.c", ++ [12712].name = "i2400m_zrealloc_2x", ++ [12712].param3 = 1, ++ [12755].file = "sound/drivers/opl4/opl4_proc.c", ++ [12755].name = "snd_opl4_mem_proc_read", ++ [12755].param5 = 1, ++ [12833].file = "net/sctp/auth.c", ++ [12833].name = "sctp_auth_create_key", ++ [12833].param1 = 1, ++ [12840].file = "net/sctp/tsnmap.c", ++ [12840].name = "sctp_tsnmap_mark", ++ [12840].param2 = 1, ++ [12896].file = "drivers/net/wireless/wl12xx/debugfs.c", ++ [12896].name = "beacon_filtering_write", ++ [12896].param3 = 1, ++ [12931].file = "drivers/hid/hid-roccat.c", ++ [12931].name = "roccat_read", ++ [12931].param3 = 1, ++ [12954].file = "fs/proc/base.c", ++ [12954].name = "oom_adjust_write", ++ [12954].param3 = 1, ++ [13013].file = "drivers/media/dvb/ttpci/av7110_ca.c", ++ [13013].name = "dvb_ca_write", ++ [13013].param3 = 1, ++ [13103].file = "drivers/acpi/acpica/utobject.c", ++ [13103].name = "acpi_ut_create_string_object", ++ [13103].param1 = 1, ++ [13121].file = "net/ipv4/ip_sockglue.c", ++ [13121].name = "do_ip_setsockopt", ++ [13121].param5 = 1, ++ [13337].file = "net/core/iovec.c", ++ [13337].name = "csum_partial_copy_fromiovecend", ++ [13337].param4 = 1, ++ [13339].file = "security/smack/smackfs.c", ++ [13339].name = "smk_write_netlbladdr", ++ [13339].param3 = 1, ++ [13342].file = "fs/jbd2/journal.c", ++ [13342].name = "jbd2_alloc", ++ [13342].param1 = 1, ++ [13412].file = "fs/proc/base.c", ++ [13412].name = "oom_score_adj_write", ++ [13412].param3 = 1, ++ [13659].file = "drivers/net/wan/hdlc.c", ++ [13659].name = "attach_hdlc_protocol", ++ [13659].param3 = 1, ++ [13708].file = "drivers/usb/misc/usbtest.c", ++ [13708].name = "simple_alloc_urb", ++ [13708].param3 = 1, ++ [13863].file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c", ++ [13863].name = "rs_sta_dbgfs_scale_table_write", ++ [13863].param3 = 1, ++ [13924].file = "net/ipv4/netfilter/ip_tables.c", ++ [13924].name = "do_ipt_set_ctl", ++ [13924].param4 = 1, ++ [14019].file = "net/dns_resolver/dns_key.c", ++ [14019].name = "dns_resolver_instantiate", ++ [14019].param2 = 1, ++ [14019].param3 = 1, ++ [14025].file = "net/ax25/af_ax25.c", ++ [14025].name = "ax25_setsockopt", ++ [14025].param5 = 1, ++ [14029].file = "drivers/spi/spidev.c", ++ [14029].name = "spidev_compat_ioctl", ++ [14029].param2 = 1, ++ [14031].file = "drivers/net/wireless/ath/ath5k/debug.c", ++ [14031].name = "write_file_beacon", ++ [14031].param3 = 1, ++ [14086].file = "fs/nfs/nfs4proc.c", ++ [14086].name = "nfs4_reset_slot_table", ++ [14086].param2 = 1, ++ [14090].file = "drivers/bluetooth/btmrvl_debugfs.c", ++ [14090].name = "btmrvl_hsmode_write", ++ [14090].param3 = 1, ++ [14125].file = "kernel/module.c", ++ [14125].name = "load_module", ++ [14125].param2 = 1, ++ [14149].file = "drivers/hid/hidraw.c", ++ [14149].name = "hidraw_ioctl", ++ [14149].param2 = 1, ++ [14153].file = "drivers/staging/bcm/led_control.c", ++ [14153].name = "ValidateDSDParamsChecksum", ++ [14153].param3 = 1, ++ [14174].file = "sound/pci/es1938.c", ++ [14174].name = "snd_es1938_capture_copy", ++ [14174].param5 = 1, ++ [14207].file = "drivers/media/video/v4l2-event.c", ++ [14207].name = "v4l2_event_subscribe", ++ [14207].param3 = 1, ++ [14241].file = "drivers/platform/x86/asus_acpi.c", ++ [14241].name = "brn_proc_write", ++ [14241].param3 = 1, ++ [14299].file = "sound/core/oss/pcm_plugin.c", ++ [14299].name = "snd_pcm_plugin_alloc", ++ [14299].param2 = 1, ++ [14345].file = "fs/cachefiles/daemon.c", ++ [14345].name = "cachefiles_daemon_write", ++ [14345].param3 = 1, ++ [14347].file = "drivers/media/dvb/dvb-core/dvb_ca_en50221.c", ++ [14347].name = "dvb_ca_en50221_io_write", ++ [14347].param3 = 1, ++ [14462].file = "fs/namei.c", ++ [14462].name = "sys_rmdir", ++ [14462].param1 = 1, ++ [14478].file = "drivers/char/random.c", ++ [14478].name = "random_write", ++ [14478].param3 = 1, ++ [1458].file = "drivers/misc/lkdtm.c", ++ [1458].name = "direct_entry", ++ [1458].param3 = 1, ++ [145].file = "lib/xz/xz_dec_test.c", ++ [145].name = "xz_dec_test_write", ++ [145].param3 = 1, ++ [14646].file = "fs/compat.c", ++ [14646].name = "compat_writev", ++ [14646].param3 = 1, ++ [14736].file = "drivers/usb/misc/usbtest.c", ++ [14736].name = "unlink_queued", ++ [14736].param3 = 1, ++ [14842].file = "fs/namei.c", ++ [14842].name = "sys_renameat", ++ [14842].param2 = 1, ++ [14842].param4 = 1, ++ [15017].file = "drivers/edac/edac_device.c", ++ [15017].name = "edac_device_alloc_ctl_info", ++ [15017].param1 = 1, ++ [15087].file = "fs/bio.c", ++ [15087].name = "bio_map_kern", ++ [15087].param2 = 1, ++ [15087].param3 = 1, ++ [15112].file = "drivers/xen/evtchn.c", ++ [15112].name = "evtchn_write", ++ [15112].param3 = 1, ++ [15274].file = "crypto/shash.c", ++ [15274].name = "crypto_shash_setkey", ++ [15274].param3 = 1, ++ [15361].file = "drivers/char/agp/generic.c", ++ [15361].name = "agp_allocate_memory", ++ [15361].param2 = 1, ++ [15497].file = "drivers/media/dvb/ddbridge/ddbridge-core.c", ++ [15497].name = "ts_read", ++ [15497].param3 = 1, ++ [15551].file = "net/ipv4/netfilter/ipt_CLUSTERIP.c", ++ [15551].name = "clusterip_proc_write", ++ [15551].param3 = 1, ++ [15701].file = "drivers/hid/hid-roccat-common.c", ++ [15701].name = "roccat_common_receive", ++ [15701].param4 = 1, ++ [1572].file = "net/ceph/pagevec.c", ++ [1572].name = "ceph_copy_page_vector_to_user", ++ [1572].param4 = 1, ++ [15814].file = "net/mac80211/debugfs_netdev.c", ++ [15814].name = "ieee80211_if_write", ++ [15814].param3 = 1, ++ [15883].file = "security/keys/keyctl.c", ++ [15883].name = "sys_add_key", ++ [15883].param4 = 1, ++ [15884].file = "fs/exofs/super.c", ++ [15884].name = "exofs_read_lookup_dev_table", ++ [15884].param3 = 1, ++ [1603].file = "fs/debugfs/file.c", ++ [1603].name = "write_file_bool", ++ [1603].param3 = 1, ++ [16073].file = "net/sctp/socket.c", ++ [16073].name = "sctp_setsockopt", ++ [16073].param5 = 1, ++ [16138].file = "security/selinux/ss/services.c", ++ [16138].name = "security_context_to_sid_force", ++ [16138].param2 = 1, ++ [16166].file = "drivers/platform/x86/thinkpad_acpi.c", ++ [16166].name = "dispatch_proc_write", ++ [16166].param3 = 1, ++ [16229].file = "drivers/scsi/scsi_transport_iscsi.c", ++ [16229].name = "iscsi_offload_mesg", ++ [16229].param5 = 1, ++ [16353].file = "drivers/base/regmap/regmap.c", ++ [16353].name = "regmap_raw_write", ++ [16353].param4 = 1, ++ [16383].file = "fs/proc/base.c", ++ [16383].name = "comm_write", ++ [16383].param3 = 1, ++ [16447].file = "drivers/hid/usbhid/hiddev.c", ++ [16447].name = "hiddev_ioctl", ++ [16447].param2 = 1, ++ [16453].file = "include/linux/slab.h", ++ [16453].name = "kzalloc", ++ [16453].param1 = 1, ++ [16535].file = "fs/proc/generic.c", ++ [16535].name = "proc_file_read", ++ [16535].param3 = 1, ++ [16605].file = "fs/ecryptfs/miscdev.c", ++ [16605].name = "ecryptfs_send_miscdev", ++ [16605].param2 = 1, ++ [16606].file = "drivers/ide/ide-tape.c", ++ [16606].name = "idetape_chrdev_write", ++ [16606].param3 = 1, ++ [16741].file = "fs/namei.c", ++ [16741].name = "sys_unlinkat", ++ [16741].param2 = 1, ++ [16911].file = "drivers/media/dvb/ttpci/av7110_hw.c", ++ [16911].name = "LoadBitmap", ++ [16911].param2 = 1, ++ [17075].file = "sound/isa/gus/gus_dram.c", ++ [17075].name = "snd_gus_dram_write", ++ [17075].param4 = 1, ++ [17133].file = "drivers/usb/misc/iowarrior.c", ++ [17133].name = "iowarrior_read", ++ [17133].param3 = 1, ++ [17139].file = "fs/ubifs/xattr.c", ++ [17139].name = "ubifs_setxattr", ++ [17139].param4 = 1, ++ [17185].file = "net/wireless/scan.c", ++ [17185].name = "cfg80211_inform_bss", ++ [17185].param8 = 1, ++ [17349].file = "net/tipc/link.c", ++ [17349].name = "tipc_link_send_sections_fast", ++ [17349].param4 = 1, ++ [17377].file = "drivers/usb/class/cdc-wdm.c", ++ [17377].name = "wdm_write", ++ [17377].param3 = 1, ++ [17459].file = "drivers/usb/misc/rio500.c", ++ [17459].name = "write_rio", ++ [17459].param3 = 1, ++ [17460].file = "fs/nfsd/nfscache.c", ++ [17460].name = "nfsd_cache_update", ++ [17460].param3 = 1, ++ [17492].file = "net/dccp/proto.c", ++ [17492].name = "do_dccp_setsockopt", ++ [17492].param5 = 1, ++ [1754].file = "sound/core/oss/pcm_oss.c", ++ [1754].name = "snd_pcm_oss_write", ++ [1754].param3 = 1, ++ [17571].file = "drivers/ptp/ptp_chardev.c", ++ [17571].name = "ptp_read", ++ [17571].param4 = 1, ++ [17684].file = "fs/namei.c", ++ [17684].name = "sys_mknod", ++ [17684].param1 = 1, ++ [17718].file = "net/caif/caif_socket.c", ++ [17718].name = "setsockopt", ++ [17718].param5 = 1, ++ [17875].file = "fs/namei.c", ++ [17875].name = "sys_linkat", ++ [17875].param2 = 1, ++ [17875].param4 = 1, ++ [17946].file = "drivers/net/wireless/libertas/if_spi.c", ++ [17946].name = "if_spi_host_to_card", ++ [17946].param4 = 1, ++ [1800].file = "drivers/media/dvb/dvb-core/dmxdev.c", ++ [1800].name = "dvb_dvr_do_ioctl", ++ [1800].param3 = 1, ++ [18102].file = "net/netlink/af_netlink.c", ++ [18102].name = "netlink_change_ngroups", ++ [18102].param2 = 1, ++ [18183].file = "drivers/tty/tty_buffer.c", ++ [18183].name = "tty_insert_flip_string_fixed_flag", ++ [18183].param4 = 1, ++ [18224].file = "drivers/xen/grant-table.c", ++ [18224].name = "gnttab_map", ++ [18224].param2 = 1, ++ [18232].file = "fs/nfs/write.c", ++ [18232].name = "nfs_writedata_alloc", ++ [18232].param1 = 1, ++ [18277].file = "drivers/char/virtio_console.c", ++ [18277].name = "port_fops_write", ++ [18277].param3 = 1, ++ [18303].file = "fs/xattr.c", ++ [18303].name = "getxattr", ++ [18303].param4 = 1, ++ [18353].file = "net/rfkill/core.c", ++ [18353].name = "rfkill_fop_read", ++ [18353].param3 = 1, ++ [18386].file = "fs/read_write.c", ++ [18386].name = "vfs_readv", ++ [18386].param3 = 1, ++ [18391].file = "fs/ocfs2/stack_user.c", ++ [18391].name = "ocfs2_control_write", ++ [18391].param3 = 1, ++ [183].file = "crypto/ahash.c", ++ [183].name = "crypto_ahash_setkey", ++ [183].param3 = 1, ++ [18406].file = "drivers/media/video/tm6000/tm6000-core.c", ++ [18406].name = "tm6000_read_write_usb", ++ [18406].param7 = 1, ++ [1845].file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ [1845].name = "rt2x00debug_write_rf", ++ [1845].param3 = 1, ++ [18465].file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c", ++ [18465].name = "cxgb_alloc_mem", ++ [18465].param1 = 1, ++ [1858].file = "net/ipv6/netfilter/ip6_tables.c", ++ [1858].name = "do_ip6t_set_ctl", ++ [1858].param4 = 1, ++ [18659].file = "drivers/media/dvb/dvb-core/dvbdev.c", ++ [18659].name = "dvb_usercopy", ++ [18659].param2 = 1, ++ [18775].file = "drivers/net/wireless/ath/ath5k/debug.c", ++ [18775].name = "write_file_frameerrors", ++ [18775].param3 = 1, ++ [18928].file = "drivers/staging/speakup/devsynth.c", ++ [18928].name = "speakup_file_write", ++ [18928].param3 = 1, ++ [18988].file = "drivers/staging/vme/devices/vme_user.c", ++ [18988].name = "vme_user_read", ++ [18988].param3 = 1, ++ [19012].file = "drivers/acpi/event.c", ++ [19012].name = "acpi_system_read_event", ++ [19012].param3 = 1, ++ [19028].file = "mm/filemap.c", ++ [19028].name = "iov_iter_copy_from_user_atomic", ++ [19028].param4 = 1, ++ [19107].file = "security/smack/smackfs.c", ++ [19107].name = "smk_write_load_list", ++ [19107].param3 = 1, ++ [19261].file = "net/netlabel/netlabel_domainhash.c", ++ [19261].name = "netlbl_domhsh_init", ++ [19261].param1 = 1, ++ [19274].file = "net/core/pktgen.c", ++ [19274].name = "pktgen_if_write", ++ [19274].param3 = 1, ++ [19286].file = "drivers/base/regmap/regmap.c", ++ [19286].name = "_regmap_raw_write", ++ [19286].param4 = 1, ++ [19288].file = "net/ipv6/raw.c", ++ [19288].name = "rawv6_setsockopt", ++ [19288].param5 = 1, ++ [19308].file = "drivers/char/mem.c", ++ [19308].name = "read_oldmem", ++ [19308].param3 = 1, ++ [19332].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [19332].name = "iwl_dbgfs_plcp_delta_write", ++ [19332].param3 = 1, ++ [19349].file = "drivers/acpi/acpica/utobject.c", ++ [19349].name = "acpi_ut_create_package_object", ++ [19349].param1 = 1, ++ [19504].file = "drivers/usb/serial/garmin_gps.c", ++ [19504].name = "pkt_add", ++ [19504].param3 = 1, ++ [19522].file = "mm/percpu.c", ++ [19522].name = "pcpu_mem_zalloc", ++ [19522].param1 = 1, ++ [19548].file = "drivers/scsi/qla2xxx/qla_init.c", ++ [19548].name = "qla2x00_get_ctx_sp", ++ [19548].param3 = 1, ++ [19738].file = "fs/sysfs/file.c", ++ [19738].name = "sysfs_write_file", ++ [19738].param3 = 1, ++ [19833].file = "drivers/xen/xenfs/privcmd.c", ++ [19833].name = "gather_array", ++ [19833].param3 = 1, ++ [19909].file = "drivers/net/wireless/libertas/debugfs.c", ++ [19909].name = "lbs_sleepparams_write", ++ [19909].param3 = 1, ++ [19920].file = "drivers/input/joydev.c", ++ [19920].name = "joydev_ioctl", ++ [19920].param2 = 1, ++ [19931].file = "drivers/usb/misc/ftdi-elan.c", ++ [19931].name = "ftdi_elan_write", ++ [19931].param3 = 1, ++ [19943].file = "drivers/net/wireless/ath/ath9k/debug.c", ++ [19943].name = "write_file_regval", ++ [19943].param3 = 1, ++ [19960].file = "drivers/usb/class/usblp.c", ++ [19960].name = "usblp_read", ++ [19960].param3 = 1, ++ [20023].file = "drivers/media/video/gspca/gspca.c", ++ [20023].name = "dev_read", ++ [20023].param3 = 1, ++ [20113].file = "drivers/net/wireless/libertas/debugfs.c", ++ [20113].name = "lbs_rdmac_write", ++ [20113].param3 = 1, ++ [20314].file = "drivers/gpu/drm/drm_hashtab.c", ++ [20314].name = "drm_ht_create", ++ [20314].param2 = 1, ++ [20376].file = "mm/nobootmem.c", ++ [20376].name = "__alloc_bootmem_nopanic", ++ [20376].param1 = 1, ++ [20606].file = "fs/nilfs2/mdt.c", ++ [20606].name = "nilfs_mdt_init", ++ [20606].param3 = 1, ++ [20611].file = "net/netfilter/x_tables.c", ++ [20611].name = "xt_alloc_table_info", ++ [20611].param1 = 1, ++ [20713].file = "drivers/gpu/drm/ttm/ttm_bo_vm.c", ++ [20713].name = "ttm_bo_io", ++ [20713].param5 = 1, ++ [20730].file = "drivers/media/video/videobuf2-vmalloc.c", ++ [20730].name = "vb2_vmalloc_alloc", ++ [20730].param2 = 1, ++ [20801].file = "drivers/vhost/vhost.c", ++ [20801].name = "vhost_add_used_n", ++ [20801].param3 = 1, ++ [20835].file = "drivers/isdn/i4l/isdn_common.c", ++ [20835].name = "isdn_read", ++ [20835].param3 = 1, ++ [20951].file = "crypto/rng.c", ++ [20951].name = "rngapi_reset", ++ [20951].param3 = 1, ++ [21134].file = "drivers/video/via/viafbdev.c", ++ [21134].name = "viafb_dfph_proc_write", ++ [21134].param3 = 1, ++ [21193].file = "net/wireless/sme.c", ++ [21193].name = "cfg80211_disconnected", ++ [21193].param4 = 1, ++ [21277].file = "drivers/usb/storage/shuttle_usbat.c", ++ [21277].name = "usbat_flash_write_data", ++ [21277].param4 = 1, ++ [21312].file = "lib/ts_kmp.c", ++ [21312].name = "kmp_init", ++ [21312].param2 = 1, ++ [21335].file = "net/econet/af_econet.c", ++ [21335].name = "econet_sendmsg", ++ [21335].param4 = 1, ++ [21397].file = "net/core/sock.c", ++ [21397].name = "sock_setsockopt", ++ [21397].param5 = 1, ++ [21406].file = "fs/libfs.c", ++ [21406].name = "simple_write_to_buffer", ++ [21406].param2 = 1, ++ [21406].param5 = 1, ++ [21451].file = "net/netfilter/ipvs/ip_vs_ctl.c", ++ [21451].name = "do_ip_vs_set_ctl", ++ [21451].param4 = 1, ++ [21459].file = "security/smack/smackfs.c", ++ [21459].name = "smk_write_doi", ++ [21459].param3 = 1, ++ [21468].file = "drivers/char/virtio_console.c", ++ [21468].name = "port_fops_read", ++ [21468].param3 = 1, ++ [21511].file = "drivers/input/ff-core.c", ++ [21511].name = "input_ff_create", ++ [21511].param2 = 1, ++ [21538].file = "net/bluetooth/l2cap_sock.c", ++ [21538].name = "l2cap_sock_setsockopt", ++ [21538].param5 = 1, ++ [21608].file = "drivers/char/tpm/tpm.c", ++ [21608].name = "tpm_write", ++ [21608].param3 = 1, ++ [2160].file = "drivers/net/wireless/ray_cs.c", ++ [2160].name = "int_proc_write", ++ [2160].param3 = 1, ++ [21632].file = "fs/afs/cell.c", ++ [21632].name = "afs_cell_create", ++ [21632].param2 = 1, ++ [21679].file = "drivers/net/wireless/ath/carl9170/debug.c", ++ [21679].name = "carl9170_debugfs_write", ++ [21679].param3 = 1, ++ [21712].file = "net/rxrpc/ar-output.c", ++ [21712].name = "rxrpc_send_data", ++ [21712].param5 = 1, ++ [2180].file = "drivers/char/ppdev.c", ++ [2180].name = "pp_write", ++ [2180].param3 = 1, ++ [21946].file = "fs/nfs/idmap.c", ++ [21946].name = "nfs_map_name_to_uid", ++ [21946].param3 = 1, ++ [22085].file = "drivers/staging/sep/sep_driver.c", ++ [22085].name = "sep_lock_user_pages", ++ [22085].param2 = 1, ++ [22085].param3 = 1, ++ [22187].file = "fs/namei.c", ++ [22187].name = "user_path_at_empty", ++ [22187].param2 = 1, ++ [22190].file = "drivers/char/tpm/tpm.c", ++ [22190].name = "tpm_read", ++ [22190].param3 = 1, ++ [22204].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [22204].name = "iwl_dbgfs_echo_test_write", ++ [22204].param3 = 1, ++ [22291].file = "net/core/pktgen.c", ++ [22291].name = "pgctrl_write", ++ [22291].param3 = 1, ++ [22439].file = "fs/afs/rxrpc.c", ++ [22439].name = "afs_alloc_flat_call", ++ [22439].param2 = 1, ++ [22439].param3 = 1, ++ [2243].file = "drivers/scsi/scsi_tgt_lib.c", ++ [2243].name = "scsi_tgt_kspace_exec", ++ [2243].param8 = 1, ++ [22546].file = "drivers/char/pcmcia/cm4040_cs.c", ++ [22546].name = "cm4040_read", ++ [22546].param3 = 1, ++ [22742].file = "drivers/tty/tty_buffer.c", ++ [22742].name = "tty_insert_flip_string_flags", ++ [22742].param4 = 1, ++ [22772].file = "drivers/target/iscsi/iscsi_target_erl1.c", ++ [22772].name = "iscsit_dump_data_payload", ++ [22772].param2 = 1, ++ [2286].file = "drivers/scsi/mvumi.c", ++ [2286].name = "mvumi_alloc_mem_resource", ++ [2286].param3 = 1, ++ [22904].file = "security/selinux/ss/services.c", ++ [22904].name = "security_context_to_sid_default", ++ [22904].param2 = 1, ++ [22932].file = "fs/compat.c", ++ [22932].name = "compat_sys_writev", ++ [22932].param3 = 1, ++ [2302].file = "drivers/media/video/stk-webcam.c", ++ [2302].name = "v4l_stk_read", ++ [2302].param3 = 1, ++ [23037].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [23037].name = "iwl_dbgfs_wd_timeout_write", ++ [23037].param3 = 1, ++ [2307].file = "drivers/pcmcia/cistpl.c", ++ [2307].name = "pcmcia_replace_cis", ++ [2307].param3 = 1, ++ [23093].file = "drivers/scsi/st.c", ++ [23093].name = "st_read", ++ [23093].param3 = 1, ++ [23117].file = "drivers/media/dvb/ttpci/av7110_av.c", ++ [23117].name = "dvb_audio_write", ++ [23117].param3 = 1, ++ [2324].file = "net/ieee802154/wpan-class.c", ++ [2324].name = "wpan_phy_alloc", ++ [2324].param1 = 1, ++ [23535].file = "ipc/sem.c", ++ [23535].name = "sys_semtimedop", ++ [23535].param3 = 1, ++ [2357].file = "drivers/usb/serial/garmin_gps.c", ++ [2357].name = "garmin_read_process", ++ [2357].param3 = 1, ++ [23589].file = "kernel/relay.c", ++ [23589].name = "subbuf_read_actor", ++ [23589].param3 = 1, ++ [23619].file = "drivers/tty/tty_buffer.c", ++ [23619].name = "tty_buffer_request_room", ++ [23619].param2 = 1, ++ [23640].file = "drivers/usb/host/ehci-dbg.c", ++ [23640].name = "debug_lpm_write", ++ [23640].param3 = 1, ++ [23684].file = "drivers/net/wireless/iwlegacy/iwl-debugfs.c", ++ [23684].name = "iwl_legacy_dbgfs_clear_traffic_statistics_write", ++ [23684].param3 = 1, ++ [23848].file = "crypto/blkcipher.c", ++ [23848].name = "async_setkey", ++ [23848].param3 = 1, ++ [2386].file = "drivers/acpi/acpica/exnames.c", ++ [2386].name = "acpi_ex_allocate_name_string", ++ [2386].param2 = 1, ++ [23883].file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c", ++ [23883].name = "iwl_dbgfs_interrupt_write", ++ [23883].param3 = 1, ++ [23999].file = "sound/pci/rme9652/hdsp.c", ++ [23999].name = "snd_hdsp_capture_copy", ++ [23999].param5 = 1, ++ [24072].file = "drivers/staging/pohmelfs/inode.c", ++ [24072].name = "pohmelfs_send_readpages", ++ [24072].param3 = 1, ++ [24233].file = "drivers/pci/pcie/aer/aer_inject.c", ++ [24233].name = "aer_inject_write", ++ [24233].param3 = 1, ++ [24263].file = "kernel/cgroup.c", ++ [24263].name = "cgroup_file_write", ++ [24263].param3 = 1, ++ [24313].file = "drivers/staging/frontier/tranzport.c", ++ [24313].name = "usb_tranzport_write", ++ [24313].param3 = 1, ++ [24359].file = "kernel/power/qos.c", ++ [24359].name = "pm_qos_power_write", ++ [24359].param3 = 1, ++ [24410].file = "drivers/net/wireless/ipw2x00/libipw_module.c", ++ [24410].name = "debug_level_proc_write", ++ [24410].param3 = 1, ++ [24457].file = "fs/btrfs/backref.c", ++ [24457].name = "init_data_container", ++ [24457].param1 = 1, ++ [24539].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c", ++ [24539].name = "vmw_framebuffer_dmabuf_dirty", ++ [24539].param6 = 1, ++ [24719].file = "drivers/input/evdev.c", ++ [24719].name = "bits_to_user", ++ [24719].param2 = 1, ++ [24719].param3 = 1, ++ [2472].file = "net/ipv4/netfilter/ip_tables.c", ++ [2472].name = "compat_do_ipt_set_ctl", ++ [2472].param4 = 1, ++ [24755].file = "drivers/infiniband/hw/qib/qib_diag.c", ++ [24755].name = "qib_diag_write", ++ [24755].param3 = 1, ++ [24805].file = "security/keys/user_defined.c", ++ [24805].name = "user_update", ++ [24805].param3 = 1, ++ [25036].file = "fs/pipe.c", ++ [25036].name = "pipe_iov_copy_from_user", ++ [25036].param3 = 1, ++ [25127].file = "drivers/scsi/device_handler/scsi_dh_alua.c", ++ [25127].name = "realloc_buffer", ++ [25127].param2 = 1, ++ [25157].file = "security/keys/request_key_auth.c", ++ [25157].name = "request_key_auth_new", ++ [25157].param3 = 1, ++ [25158].file = "drivers/net/ethernet/mellanox/mlx4/en_rx.c", ++ [25158].name = "mlx4_en_create_rx_ring", ++ [25158].param3 = 1, ++ [25223].file = "drivers/platform/x86/toshiba_acpi.c", ++ [25223].name = "fan_proc_write", ++ [25223].param3 = 1, ++ [25267].file = "fs/configfs/file.c", ++ [25267].name = "configfs_write_file", ++ [25267].param3 = 1, ++ [25356].file = "net/core/dev.c", ++ [25356].name = "alloc_netdev_mqs", ++ [25356].param4 = 1, ++ [25356].param5 = 1, ++ [25495].file = "drivers/scsi/bfa/bfad_debugfs.c", ++ [25495].name = "bfad_debugfs_write_regwr", ++ [25495].param3 = 1, ++ [25558].file = "fs/proc/task_mmu.c", ++ [25558].name = "clear_refs_write", ++ [25558].param3 = 1, ++ [25692].file = "drivers/net/wireless/ath/ath6kl/wmi.c", ++ [25692].name = "ath6kl_wmi_send_action_cmd", ++ [25692].param6 = 1, ++ [2609].file = "lib/kstrtox.c", ++ [2609].name = "kstrtoul_from_user", ++ [2609].param2 = 1, ++ [26100].file = "sound/core/info.c", ++ [26100].name = "snd_info_entry_write", ++ [26100].param3 = 1, ++ [26215].file = "drivers/md/dm-table.c", ++ [26215].name = "dm_table_create", ++ [26215].param3 = 1, ++ [26256].file = "fs/hpfs/name.c", ++ [26256].name = "hpfs_translate_name", ++ [26256].param3 = 1, ++ [26404].file = "drivers/net/wireless/mwifiex/debugfs.c", ++ [26404].name = "mwifiex_rdeeprom_write", ++ [26404].param3 = 1, ++ [26494].file = "kernel/signal.c", ++ [26494].name = "sys_rt_sigpending", ++ [26494].param2 = 1, ++ [26497].file = "security/keys/keyctl.c", ++ [26497].name = "sys_keyctl", ++ [26497].param4 = 1, ++ [26533].file = "drivers/block/aoe/aoechr.c", ++ [26533].name = "aoechr_write", ++ [26533].param3 = 1, ++ [26560].file = "crypto/algapi.c", ++ [26560].name = "crypto_alloc_instance2", ++ [26560].param3 = 1, ++ [26620].file = "net/bluetooth/mgmt.c", ++ [26620].name = "mgmt_control", ++ [26620].param3 = 1, ++ [26701].file = "drivers/mtd/chips/cfi_util.c", ++ [26701].name = "cfi_read_pri", ++ [26701].param3 = 1, ++ [26757].file = "fs/xattr.c", ++ [26757].name = "sys_fgetxattr", ++ [26757].param4 = 1, ++ [2678].file = "drivers/platform/x86/asus_acpi.c", ++ [2678].name = "disp_proc_write", ++ [2678].param3 = 1, ++ [26834].file = "drivers/gpu/drm/drm_drv.c", ++ [26834].name = "drm_ioctl", ++ [26834].param2 = 1, ++ [26843].file = "drivers/firewire/core-cdev.c", ++ [26843].name = "fw_device_op_compat_ioctl", ++ [26843].param2 = 1, ++ [26845].file = "drivers/scsi/qla2xxx/qla_bsg.c", ++ [26845].name = "qla2x00_get_ctx_bsg_sp", ++ [26845].param3 = 1, ++ [26962].file = "drivers/usb/class/usbtmc.c", ++ [26962].name = "usbtmc_write", ++ [26962].param3 = 1, ++ [26966].file = "drivers/media/dvb/ddbridge/ddbridge-core.c", ++ [26966].name = "ts_write", ++ [26966].param3 = 1, ++ [27004].file = "drivers/misc/hpilo.c", ++ [27004].name = "ilo_write", ++ [27004].param3 = 1, ++ [27025].file = "fs/ntfs/file.c", ++ [27025].name = "__ntfs_copy_from_user_iovec_inatomic", ++ [27025].param3 = 1, ++ [27025].param4 = 1, ++ [27061].file = "drivers/firewire/core-cdev.c", ++ [27061].name = "iso_callback", ++ [27061].param3 = 1, ++ [2711].file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c", ++ [2711].name = "dvb_ringbuffer_read_user", ++ [2711].param3 = 1, ++ [27129].file = "fs/lockd/mon.c", ++ [27129].name = "nsm_get_handle", ++ [27129].param4 = 1, ++ [27142].file = "fs/proc/kcore.c", ++ [27142].name = "read_kcore", ++ [27142].param3 = 1, ++ [27164].file = "include/drm/drm_mem_util.h", ++ [27164].name = "drm_calloc_large", ++ [27164].param1 = 1, ++ [2722].file = "drivers/gpu/drm/ttm/ttm_page_alloc.c", ++ [2722].name = "ttm_alloc_new_pages", ++ [2722].param5 = 1, ++ [27232].file = "security/apparmor/lib.c", ++ [27232].name = "kvmalloc", ++ [27232].param1 = 1, ++ [27275].file = "drivers/scsi/cxgbi/libcxgbi.c", ++ [27275].name = "cxgbi_ddp_reserve", ++ [27275].param4 = 1, ++ [27280].file = "drivers/net/ethernet/mellanox/mlx4/en_tx.c", ++ [27280].name = "mlx4_en_create_tx_ring", ++ [27280].param4 = 1, ++ [27290].file = "security/selinux/ss/services.c", ++ [27290].name = "security_context_to_sid_core", ++ [27290].param2 = 1, ++ [27302].file = "fs/proc/base.c", ++ [27302].name = "proc_loginuid_write", ++ [27302].param3 = 1, ++ [27472].file = "security/selinux/selinuxfs.c", ++ [27472].name = "sel_write_load", ++ [27472].param3 = 1, ++ [27491].file = "fs/proc/base.c", ++ [27491].name = "proc_pid_attr_write", ++ [27491].param3 = 1, ++ [27568].file = "drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c", ++ [27568].name = "t4_alloc_mem", ++ [27568].param1 = 1, ++ [27582].file = "drivers/platform/x86/asus_acpi.c", ++ [27582].name = "ledd_proc_write", ++ [27582].param3 = 1, ++ [27695].file = "fs/namei.c", ++ [27695].name = "sys_link", ++ [27695].param1 = 1, ++ [27695].param2 = 1, ++ [27697].file = "drivers/staging/mei/iorw.c", ++ [27697].name = "amthi_read", ++ [27697].param4 = 1, ++ [27927].file = "drivers/tty/tty_io.c", ++ [27927].name = "redirected_tty_write", ++ [27927].param3 = 1, ++ [28040].file = "kernel/kfifo.c", ++ [28040].name = "__kfifo_alloc", ++ [28040].param2 = 1, ++ [28040].param3 = 1, ++ [28092].file = "fs/select.c", ++ [28092].name = "do_sys_poll", ++ [28092].param2 = 1, ++ [28170].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [28170].name = "iwl_dbgfs_ucode_tracing_write", ++ [28170].param3 = 1, ++ [28247].file = "net/sctp/tsnmap.c", ++ [28247].name = "sctp_tsnmap_init", ++ [28247].param2 = 1, ++ [28265].file = "fs/notify/fanotify/fanotify_user.c", ++ [28265].name = "fanotify_write", ++ [28265].param3 = 1, ++ [28316].file = "drivers/input/joydev.c", ++ [28316].name = "joydev_ioctl_common", ++ [28316].param2 = 1, ++ [28360].file = "drivers/hid/usbhid/hiddev.c", ++ [28360].name = "hiddev_compat_ioctl", ++ [28360].param2 = 1, ++ [28407].file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ [28407].name = "rt2x00debug_write_csr", ++ [28407].param3 = 1, ++ [28462].file = "net/rfkill/core.c", ++ [28462].name = "rfkill_fop_write", ++ [28462].param3 = 1, ++ [28635].file = "drivers/gpu/drm/drm_sman.c", ++ [28635].name = "drm_sman_init", ++ [28635].param2 = 1, ++ [28655].file = "drivers/infiniband/hw/mthca/mthca_allocator.c", ++ [28655].name = "mthca_alloc_init", ++ [28655].param2 = 1, ++ [28688].file = "mm/mempolicy.c", ++ [28688].name = "compat_sys_get_mempolicy", ++ [28688].param3 = 1, ++ [28783].file = "drivers/gpu/drm/i915/i915_debugfs.c", ++ [28783].name = "i915_cache_sharing_write", ++ [28783].param3 = 1, ++ [28787].file = "drivers/media/video/videobuf2-core.c", ++ [28787].name = "vb2_write", ++ [28787].param3 = 1, ++ [28879].file = "drivers/base/map.c", ++ [28879].name = "kobj_map", ++ [28879].param2 = 1, ++ [28879].param3 = 1, ++ [28889].file = "drivers/char/pcmcia/cm4040_cs.c", ++ [28889].name = "cm4040_write", ++ [28889].param3 = 1, ++ [29073].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c", ++ [29073].name = "vmw_kms_readback", ++ [29073].param6 = 1, ++ [29085].file = "security/apparmor/apparmorfs.c", ++ [29085].name = "profile_load", ++ [29085].param3 = 1, ++ [29092].file = "lib/lru_cache.c", ++ [29092].name = "lc_create", ++ [29092].param3 = 1, ++ [29189].file = "drivers/gpu/drm/ttm/ttm_page_alloc.c", ++ [29189].name = "ttm_put_pages", ++ [29189].param2 = 1, ++ [29257].file = "drivers/vhost/vhost.c", ++ [29257].name = "vhost_add_used_and_signal_n", ++ [29257].param4 = 1, ++ [29366].file = "drivers/char/pcmcia/cm4000_cs.c", ++ [29366].name = "cmm_read", ++ [29366].param3 = 1, ++ [29405].file = "drivers/media/dvb/dvb-usb/dw2102.c", ++ [29405].name = "dw210x_op_rw", ++ [29405].param6 = 1, ++ [29437].file = "drivers/net/wireless/iwlegacy/iwl-4965-rs.c", ++ [29437].name = "iwl4965_rs_sta_dbgfs_scale_table_write", ++ [29437].param3 = 1, ++ [29465].file = "drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c", ++ [29465].name = "mem_read", ++ [29465].param3 = 1, ++ [29714].file = "drivers/scsi/cxgbi/libcxgbi.c", ++ [29714].name = "cxgbi_device_register", ++ [29714].param1 = 1, ++ [29714].param2 = 1, ++ [29859].file = "net/rds/page.c", ++ [29859].name = "rds_page_copy_user", ++ [29859].param4 = 1, ++ [29875].file = "sound/isa/gus/gus_pcm.c", ++ [29875].name = "snd_gf1_pcm_playback_copy", ++ [29875].param5 = 1, ++ [29905].file = "mm/nobootmem.c", ++ [29905].name = "___alloc_bootmem", ++ [29905].param1 = 1, ++ [2995].file = "mm/page_alloc.c", ++ [2995].name = "alloc_large_system_hash", ++ [2995].param2 = 1, ++ [30242].file = "fs/cifs/cifssmb.c", ++ [30242].name = "cifs_readdata_alloc", ++ [30242].param1 = 1, ++ [30341].file = "drivers/infiniband/hw/qib/qib_verbs.c", ++ [30341].name = "qib_verbs_send", ++ [30341].param3 = 1, ++ [30341].param5 = 1, ++ [30438].file = "mm/filemap_xip.c", ++ [30438].name = "xip_file_read", ++ [30438].param3 = 1, ++ [30449].file = "drivers/telephony/ixj.c", ++ [30449].name = "ixj_read", ++ [30449].param3 = 1, ++ [30489].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [30489].name = "iwl_dbgfs_rx_handlers_write", ++ [30489].param3 = 1, ++ [30693].file = "fs/namei.c", ++ [30693].name = "sys_rename", ++ [30693].param1 = 1, ++ [30693].param2 = 1, ++ [307].file = "drivers/base/regmap/regmap-debugfs.c", ++ [307].name = "regmap_map_read_file", ++ [307].param3 = 1, ++ [30970].file = "drivers/usb/misc/ldusb.c", ++ [30970].name = "ld_usb_read", ++ [30970].param3 = 1, ++ [31155].file = "drivers/staging/frontier/alphatrack.c", ++ [31155].name = "usb_alphatrack_write", ++ [31155].param3 = 1, ++ [31207].file = "drivers/platform/x86/asus_acpi.c", ++ [31207].name = "parse_arg", ++ [31207].param2 = 1, ++ [31348].file = "kernel/sched.c", ++ [31348].name = "sys_sched_getaffinity", ++ [31348].param2 = 1, ++ [31465].file = "net/rds/message.c", ++ [31465].name = "rds_message_map_pages", ++ [31465].param2 = 1, ++ [31492].file = "drivers/hid/hidraw.c", ++ [31492].name = "hidraw_read", ++ [31492].param3 = 1, ++ [31649].file = "fs/ecryptfs/crypto.c", ++ [31649].name = "ecryptfs_decode_and_decrypt_filename", ++ [31649].param5 = 1, ++ [3170].file = "security/integrity/ima/ima_fs.c", ++ [3170].name = "ima_write_policy", ++ [3170].param3 = 1, ++ [31730].file = "net/dccp/proto.c", ++ [31730].name = "dccp_setsockopt", ++ [31730].param5 = 1, ++ [31782].file = "drivers/misc/pti.c", ++ [31782].name = "pti_char_write", ++ [31782].param3 = 1, ++ [31789].file = "fs/file.c", ++ [31789].name = "alloc_fdmem", ++ [31789].param1 = 1, ++ [31957].file = "fs/afs/proc.c", ++ [31957].name = "afs_proc_cells_write", ++ [31957].param3 = 1, ++ [32025].file = "drivers/nfc/pn544.c", ++ [32025].name = "pn544_write", ++ [32025].param3 = 1, ++ [32182].file = "net/sunrpc/cache.c", ++ [32182].name = "cache_write", ++ [32182].param3 = 1, ++ [32326].file = "drivers/tty/n_r3964.c", ++ [32326].name = "r3964_write", ++ [32326].param4 = 1, ++ [32402].file = "net/ceph/pagevec.c", ++ [32402].name = "ceph_copy_user_to_page_vector", ++ [32402].param4 = 1, ++ [3241].file = "drivers/usb/wusbcore/crypto.c", ++ [3241].name = "wusb_prf", ++ [3241].param7 = 1, ++ [32459].file = "drivers/media/radio/radio-wl1273.c", ++ [32459].name = "wl1273_fm_fops_write", ++ [32459].param3 = 1, ++ [32560].file = "drivers/input/input-mt.c", ++ [32560].name = "input_mt_init_slots", ++ [32560].param2 = 1, ++ [32574].file = "mm/mempolicy.c", ++ [32574].name = "sys_get_mempolicy", ++ [32574].param3 = 1, ++ [32608].file = "security/selinux/selinuxfs.c", ++ [32608].name = "sel_write_checkreqprot", ++ [32608].param3 = 1, ++ [32950].file = "fs/reiserfs/resize.c", ++ [32950].name = "reiserfs_resize", ++ [32950].param2 = 1, ++ [33010].file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c", ++ [33010].name = "dvb_ringbuffer_pkt_read_user", ++ [33010].param5 = 1, ++ [33268].file = "mm/maccess.c", ++ [33268].name = "__probe_kernel_write", ++ [33268].param3 = 1, ++ [33280].file = "fs/xfs/kmem.c", ++ [33280].name = "kmem_realloc", ++ [33280].param2 = 1, ++ [33375].file = "drivers/staging/rtl8712/osdep_service.h", ++ [33375].name = "_malloc", ++ [33375].param1 = 1, ++ [33637].file = "net/9p/client.c", ++ [33637].name = "p9_client_read", ++ [33637].param5 = 1, ++ [33669].file = "fs/gfs2/glock.c", ++ [33669].name = "gfs2_glock_nq_m", ++ [33669].param1 = 1, ++ [33810].file = "net/mac80211/util.c", ++ [33810].name = "ieee80211_send_probe_req", ++ [33810].param6 = 1, ++ [3384].file = "drivers/block/paride/pg.c", ++ [3384].name = "pg_write", ++ [3384].param3 = 1, ++ [34016].file = "drivers/tty/tty_buffer.c", ++ [34016].name = "tty_prepare_flip_string_flags", ++ [34016].param4 = 1, ++ [34105].file = "fs/libfs.c", ++ [34105].name = "simple_read_from_buffer", ++ [34105].param2 = 1, ++ [34105].param5 = 1, ++ [34120].file = "drivers/media/video/pvrusb2/pvrusb2-io.c", ++ [34120].name = "pvr2_stream_buffer_count", ++ [34120].param2 = 1, ++ [34226].file = "mm/shmem.c", ++ [34226].name = "shmem_xattr_set", ++ [34226].param4 = 1, ++ [34251].file = "drivers/staging/cxt1e1/sbecom_inline_linux.h", ++ [34251].name = "OS_kmalloc", ++ [34251].param1 = 1, ++ [34276].file = "drivers/media/video/videobuf2-core.c", ++ [34276].name = "__vb2_perform_fileio", ++ [34276].param3 = 1, ++ [34278].file = "fs/ubifs/debug.c", ++ [34278].name = "dfs_global_file_write", ++ [34278].param3 = 1, ++ [34432].file = "drivers/edac/edac_pci.c", ++ [34432].name = "edac_pci_alloc_ctl_info", ++ [34432].param1 = 1, ++ [34551].file = "fs/ocfs2/stack_user.c", ++ [34551].name = "ocfs2_control_cfu", ++ [34551].param2 = 1, ++ [34666].file = "fs/cifs/cifs_debug.c", ++ [34666].name = "cifs_security_flags_proc_write", ++ [34666].param3 = 1, ++ [34672].file = "drivers/tty/tty_io.c", ++ [34672].name = "tty_write", ++ [34672].param3 = 1, ++ [34760].file = "include/acpi/platform/aclinux.h", ++ [34760].name = "acpi_os_allocate_zeroed", ++ [34760].param1 = 1, ++ [34802].file = "drivers/scsi/cxgbi/libcxgbi.h", ++ [34802].name = "cxgbi_alloc_big_mem", ++ [34802].param1 = 1, ++ [34847].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [34847].name = "iwl_dbgfs_clear_traffic_statistics_write", ++ [34847].param3 = 1, ++ [34863].file = "drivers/video/fbsysfs.c", ++ [34863].name = "framebuffer_alloc", ++ [34863].param1 = 1, ++ [34882].file = "drivers/platform/x86/toshiba_acpi.c", ++ [34882].name = "video_proc_write", ++ [34882].param3 = 1, ++ [34988].file = "drivers/net/wireless/libertas/debugfs.c", ++ [34988].name = "lbs_rdrf_write", ++ [34988].param3 = 1, ++ [35007].file = "drivers/usb/mon/mon_bin.c", ++ [35007].name = "mon_bin_read", ++ [35007].param3 = 1, ++ [35050].file = "fs/ocfs2/dlmfs/dlmfs.c", ++ [35050].name = "dlmfs_file_write", ++ [35050].param3 = 1, ++ [35119].file = "fs/xattr.c", ++ [35119].name = "sys_llistxattr", ++ [35119].param3 = 1, ++ [35129].file = "mm/nobootmem.c", ++ [35129].name = "___alloc_bootmem_nopanic", ++ [35129].param1 = 1, ++ [35176].file = "drivers/usb/misc/ldusb.c", ++ [35176].name = "ld_usb_write", ++ [35176].param3 = 1, ++ [35234].file = "net/irda/irnet/irnet_ppp.c", ++ [35234].name = "irnet_ctrl_write", ++ [35234].param3 = 1, ++ [35256].file = "sound/core/memory.c", ++ [35256].name = "copy_from_user_toio", ++ [35256].param3 = 1, ++ [35268].file = "security/keys/request_key_auth.c", ++ [35268].name = "request_key_auth_read", ++ [35268].param3 = 1, ++ [3541].file = "drivers/mtd/ubi/cdev.c", ++ [3541].name = "vol_cdev_write", ++ [3541].param3 = 1, ++ [35443].file = "sound/core/pcm_memory.c", ++ [35443].name = "_snd_pcm_lib_alloc_vmalloc_buffer", ++ [35443].param2 = 1, ++ [35449].file = "fs/namei.c", ++ [35449].name = "sys_mkdir", ++ [35449].param1 = 1, ++ [35542].file = "drivers/tty/ipwireless/hardware.c", ++ [35542].name = "ipwireless_send_packet", ++ [35542].param4 = 1, ++ [35556].file = "fs/read_write.c", ++ [35556].name = "sys_readv", ++ [35556].param3 = 1, ++ [35610].file = "net/batman-adv/translation-table.c", ++ [35610].name = "tt_save_orig_buffer", ++ [35610].param4 = 1, ++ [35693].file = "drivers/staging/mei/main.c", ++ [35693].name = "mei_read", ++ [35693].param3 = 1, ++ [35729].file = "include/linux/skbuff.h", ++ [35729].name = "__dev_alloc_skb", ++ [35729].param1 = 1, ++ [35731].file = "drivers/usb/class/cdc-wdm.c", ++ [35731].name = "wdm_read", ++ [35731].param3 = 1, ++ [35796].file = "drivers/mtd/nand/nand_bch.c", ++ [35796].name = "nand_bch_init", ++ [35796].param2 = 1, ++ [35796].param3 = 1, ++ [35880].file = "fs/ecryptfs/crypto.c", ++ [35880].name = "ecryptfs_encrypt_and_encode_filename", ++ [35880].param6 = 1, ++ [3604].file = "net/batman-adv/translation-table.c", ++ [3604].name = "tt_update_orig", ++ [3604].param4 = 1, ++ [36080].file = "drivers/media/video/v4l2-ioctl.c", ++ [36080].name = "video_usercopy", ++ [36080].param2 = 1, ++ [36149].file = "fs/udf/inode.c", ++ [36149].name = "udf_alloc_i_data", ++ [36149].param2 = 1, ++ [36183].file = "drivers/tty/vt/vc_screen.c", ++ [36183].name = "vcs_read", ++ [36183].param3 = 1, ++ [36199].file = "net/sunrpc/auth_gss/auth_gss.c", ++ [36199].name = "gss_pipe_downcall", ++ [36199].param3 = 1, ++ [3630].file = "drivers/video/broadsheetfb.c", ++ [3630].name = "broadsheetfb_write", ++ [3630].param3 = 1, ++ [3632].file = "drivers/firewire/core-cdev.c", ++ [3632].name = "fw_device_op_read", ++ [3632].param3 = 1, ++ [36490].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c", ++ [36490].name = "ath6kl_cfg80211_connect_event", ++ [36490].param7 = 1, ++ [36522].file = "drivers/hid/hidraw.c", ++ [36522].name = "hidraw_send_report", ++ [36522].param3 = 1, ++ [36560].file = "net/sunrpc/cache.c", ++ [36560].name = "write_flush", ++ [36560].param3 = 1, ++ [36633].file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ [36633].name = "rt2x00debug_read_queue_stats", ++ [36633].param3 = 1, ++ [3665].file = "drivers/media/video/ivtv/ivtvfb.c", ++ [3665].name = "ivtvfb_write", ++ [3665].param3 = 1, ++ [36981].file = "drivers/video/via/viafbdev.c", ++ [36981].name = "viafb_dfpl_proc_write", ++ [36981].param3 = 1, ++ [37034].file = "fs/cifs/cifssmb.c", ++ [37034].name = "cifs_writedata_alloc", ++ [37034].param1 = 1, ++ [37044].file = "sound/firewire/packets-buffer.c", ++ [37044].name = "iso_packets_buffer_init", ++ [37044].param3 = 1, ++ [37115].file = "drivers/tty/tty_buffer.c", ++ [37115].name = "tty_prepare_flip_string", ++ [37115].param3 = 1, ++ [37163].file = "net/core/skbuff.c", ++ [37163].name = "__netdev_alloc_skb", ++ [37163].param2 = 1, ++ [37204].file = "drivers/isdn/hardware/eicon/divasi.c", ++ [37204].name = "um_idi_read", ++ [37204].param3 = 1, ++ [37233].file = "fs/ocfs2/cluster/tcp.c", ++ [37233].name = "o2net_send_message_vec", ++ [37233].param4 = 1, ++ [37309].file = "drivers/mtd/mtdchar.c", ++ [37309].name = "mtd_do_readoob", ++ [37309].param4 = 1, ++ [37382].file = "drivers/staging/pohmelfs/inode.c", ++ [37382].name = "pohmelfs_readpages_trans_complete", ++ [37382].param2 = 1, ++ [37384].file = "drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c", ++ [37384].name = "vmw_fifo_reserve", ++ [37384].param2 = 1, ++ [37497].file = "net/mac80211/util.c", ++ [37497].name = "ieee80211_build_probe_req", ++ [37497].param7 = 1, ++ [37594].file = "include/linux/poll.h", ++ [37594].name = "get_fd_set", ++ [37594].param1 = 1, ++ [37611].file = "drivers/xen/xenbus/xenbus_xs.c", ++ [37611].name = "split", ++ [37611].param2 = 1, ++ [37661].file = "mm/filemap.c", ++ [37661].name = "file_read_actor", ++ [37661].param4 = 1, ++ [37872].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [37872].name = "iwl_dbgfs_protection_mode_write", ++ [37872].param3 = 1, ++ [37976].file = "drivers/platform/x86/asus_acpi.c", ++ [37976].name = "bluetooth_proc_write", ++ [37976].param3 = 1, ++ [3797].file = "sound/pci/asihpi/hpicmn.c", ++ [3797].name = "hpi_alloc_control_cache", ++ [3797].param1 = 1, ++ [3801].file = "drivers/block/paride/pt.c", ++ [3801].name = "pt_write", ++ [3801].param3 = 1, ++ [38057].file = "fs/coda/psdev.c", ++ [38057].name = "coda_psdev_write", ++ [38057].param3 = 1, ++ [38186].file = "kernel/signal.c", ++ [38186].name = "do_sigpending", ++ [38186].param2 = 1, ++ [38401].file = "drivers/xen/xenfs/xenbus.c", ++ [38401].name = "queue_reply", ++ [38401].param3 = 1, ++ [3841].file = "drivers/platform/x86/asus_acpi.c", ++ [3841].name = "write_led", ++ [3841].param2 = 1, ++ [38532].file = "fs/afs/cell.c", ++ [38532].name = "afs_cell_lookup", ++ [38532].param2 = 1, ++ [38576].file = "drivers/i2c/i2c-dev.c", ++ [38576].name = "i2cdev_read", ++ [38576].param3 = 1, ++ [38747].file = "fs/xattr.c", ++ [38747].name = "sys_lgetxattr", ++ [38747].param4 = 1, ++ [38972].file = "security/smack/smackfs.c", ++ [38972].name = "smk_write_logging", ++ [38972].param3 = 1, ++ [39001].file = "net/xfrm/xfrm_hash.c", ++ [39001].name = "xfrm_hash_alloc", ++ [39001].param1 = 1, ++ [39044].file = "lib/kstrtox.c", ++ [39044].name = "kstrtos16_from_user", ++ [39044].param2 = 1, ++ [39052].file = "drivers/input/evdev.c", ++ [39052].name = "evdev_ioctl", ++ [39052].param2 = 1, ++ [39154].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [39154].name = "iwl_dbgfs_clear_ucode_statistics_write", ++ [39154].param3 = 1, ++ [39155].file = "drivers/xen/grant-table.c", ++ [39155].name = "get_free_entries", ++ [39155].param1 = 1, ++ [39254].file = "drivers/char/pcmcia/cm4000_cs.c", ++ [39254].name = "cmm_write", ++ [39254].param3 = 1, ++ [39415].file = "fs/pstore/inode.c", ++ [39415].name = "pstore_mkfile", ++ [39415].param5 = 1, ++ [39417].file = "drivers/block/DAC960.c", ++ [39417].name = "dac960_user_command_proc_write", ++ [39417].param3 = 1, ++ [39479].file = "drivers/ide/ide-tape.c", ++ [39479].name = "idetape_chrdev_read", ++ [39479].param3 = 1, ++ [39573].file = "drivers/hid/hid-picolcd.c", ++ [39573].name = "picolcd_debug_reset_write", ++ [39573].param3 = 1, ++ [39583].file = "drivers/net/ethernet/broadcom/cnic.c", ++ [39583].name = "cnic_init_id_tbl", ++ [39583].param2 = 1, ++ [39606].file = "drivers/bluetooth/hci_vhci.c", ++ [39606].name = "vhci_write", ++ [39606].param3 = 1, ++ [39638].file = "security/selinux/selinuxfs.c", ++ [39638].name = "sel_write_avc_cache_threshold", ++ [39638].param3 = 1, ++ [39645].file = "drivers/media/dvb/dvb-core/dvbdev.c", ++ [39645].name = "dvb_generic_ioctl", ++ [39645].param2 = 1, ++ [39741].file = "drivers/video/via/viafbdev.c", ++ [39741].name = "viafb_iga2_odev_proc_write", ++ [39741].param3 = 1, ++ [39888].file = "net/core/skbuff.c", ++ [39888].name = "__alloc_skb", ++ [39888].param1 = 1, ++ [40043].file = "drivers/media/video/v4l2-ioctl.c", ++ [40043].name = "video_ioctl2", ++ [40043].param2 = 1, ++ [40049].file = "drivers/bluetooth/btmrvl_debugfs.c", ++ [40049].name = "btmrvl_psmode_write", ++ [40049].param3 = 1, ++ [40075].file = "drivers/media/video/c-qcam.c", ++ [40075].name = "qc_capture", ++ [40075].param3 = 1, ++ [40163].file = "fs/ncpfs/file.c", ++ [40163].name = "ncp_file_write", ++ [40163].param3 = 1, ++ [40240].file = "drivers/char/nvram.c", ++ [40240].name = "nvram_write", ++ [40240].param3 = 1, ++ [40256].file = "drivers/tty/vt/vc_screen.c", ++ [40256].name = "vcs_write", ++ [40256].param3 = 1, ++ [40302].file = "sound/isa/gus/gus_dram.c", ++ [40302].name = "snd_gus_dram_poke", ++ [40302].param4 = 1, ++ [40355].file = "drivers/staging/mei/main.c", ++ [40355].name = "mei_write", ++ [40355].param3 = 1, ++ [40373].file = "fs/cifs/cifs_spnego.c", ++ [40373].name = "cifs_spnego_key_instantiate", ++ [40373].param3 = 1, ++ [40412].file = "fs/namei.c", ++ [40412].name = "user_path_at", ++ [40412].param2 = 1, ++ [40578].file = "sound/soc/soc-core.c", ++ [40578].name = "codec_reg_write_file", ++ [40578].param3 = 1, ++ [40678].file = "drivers/net/wireless/iwlegacy/iwl-debugfs.c", ++ [40678].name = "iwl_legacy_dbgfs_traffic_log_write", ++ [40678].param3 = 1, ++ [40713].file = "net/mac80211/debugfs.c", ++ [40713].name = "noack_write", ++ [40713].param3 = 1, ++ [40754].file = "fs/btrfs/delayed-inode.c", ++ [40754].name = "btrfs_alloc_delayed_item", ++ [40754].param1 = 1, ++ [40786].file = "net/ipv4/netfilter/nf_nat_snmp_basic.c", ++ [40786].name = "asn1_octets_decode", ++ [40786].param2 = 1, ++ [40901].file = "drivers/block/drbd/drbd_bitmap.c", ++ [40901].name = "drbd_bm_resize", ++ [40901].param2 = 1, ++ [40952].file = "drivers/misc/sgi-xp/xpc_partition.c", ++ [40952].name = "xpc_kmalloc_cacheline_aligned", ++ [40952].param1 = 1, ++ [41000].file = "sound/core/pcm_native.c", ++ [41000].name = "snd_pcm_aio_read", ++ [41000].param3 = 1, ++ [41003].file = "fs/namei.c", ++ [41003].name = "user_path_parent", ++ [41003].param2 = 1, ++ [41005].file = "net/bridge/netfilter/ebtables.c", ++ [41005].name = "copy_counters_to_user", ++ [41005].param5 = 1, ++ [41090].file = "drivers/net/wireless/iwlegacy/iwl-debugfs.c", ++ [41090].name = "iwl_legacy_dbgfs_sram_write", ++ [41090].param3 = 1, ++ [41122].file = "fs/binfmt_misc.c", ++ [41122].name = "bm_status_write", ++ [41122].param3 = 1, ++ [41230].file = "drivers/usb/storage/datafab.c", ++ [41230].name = "datafab_read_data", ++ [41230].param4 = 1, ++ [41249].file = "drivers/media/video/zr364xx.c", ++ [41249].name = "send_control_msg", ++ [41249].param6 = 1, ++ [41302].file = "net/dns_resolver/dns_query.c", ++ [41302].name = "dns_query", ++ [41302].param3 = 1, ++ [41418].file = "fs/libfs.c", ++ [41418].name = "simple_attr_write", ++ [41418].param3 = 1, ++ [4155].file = "kernel/kexec.c", ++ [4155].name = "do_kimage_alloc", ++ [4155].param3 = 1, ++ [41592].file = "net/sctp/ssnmap.c", ++ [41592].name = "sctp_ssnmap_new", ++ [41592].param1 = 1, ++ [41592].param2 = 1, ++ [41616].file = "net/core/filter.c", ++ [41616].name = "sk_chk_filter", ++ [41616].param2 = 1, ++ [41676].file = "fs/compat.c", ++ [41676].name = "compat_sys_preadv", ++ [41676].param3 = 1, ++ [41727].file = "drivers/media/video/meye.c", ++ [41727].name = "rvmalloc", ++ [41727].param1 = 1, ++ [41884].file = "sound/core/oss/pcm_plugin.c", ++ [41884].name = "snd_pcm_plug_alloc", ++ [41884].param2 = 1, ++ [41924].file = "security/keys/keyctl.c", ++ [41924].name = "keyctl_get_security", ++ [41924].param3 = 1, ++ [4202].file = "drivers/edac/edac_mc.c", ++ [4202].name = "edac_mc_alloc", ++ [4202].param1 = 1, ++ [42143].file = "drivers/media/video/c-qcam.c", ++ [42143].name = "qcam_read", ++ [42143].param3 = 1, ++ [42206].file = "fs/quota/quota_tree.c", ++ [42206].name = "getdqbuf", ++ [42206].param1 = 1, ++ [42270].file = "net/wireless/scan.c", ++ [42270].name = "cfg80211_inform_bss_frame", ++ [42270].param4 = 1, ++ [4233].file = "fs/select.c", ++ [4233].name = "sys_poll", ++ [4233].param2 = 1, ++ [42378].file = "drivers/net/wireless/ath/ath6kl/debug.c", ++ [42378].name = "ath6kl_regread_write", ++ [42378].param3 = 1, ++ [42420].file = "drivers/net/wireless/hostap/hostap_ioctl.c", ++ [42420].name = "prism2_set_genericelement", ++ [42420].param3 = 1, ++ [42466].file = "drivers/scsi/lpfc/lpfc_debugfs.c", ++ [42466].name = "lpfc_idiag_cmd_get", ++ [42466].param2 = 1, ++ [42472].file = "fs/compat.c", ++ [42472].name = "compat_readv", ++ [42472].param3 = 1, ++ [42483].file = "drivers/media/video/videobuf-dma-sg.c", ++ [42483].name = "videobuf_dma_init_user_locked", ++ [42483].param3 = 1, ++ [42483].param4 = 1, ++ [42562].file = "kernel/kfifo.c", ++ [42562].name = "__kfifo_to_user_r", ++ [42562].param3 = 1, ++ [42666].file = "drivers/pcmcia/cistpl.c", ++ [42666].name = "read_cis_cache", ++ [42666].param4 = 1, ++ [42882].file = "security/keys/user_defined.c", ++ [42882].name = "user_instantiate", ++ [42882].param3 = 1, ++ [42964].file = "drivers/video/fb_sys_fops.c", ++ [42964].name = "fb_sys_read", ++ [42964].param3 = 1, ++ [43023].file = "drivers/usb/misc/usblcd.c", ++ [43023].name = "lcd_write", ++ [43023].param3 = 1, ++ [4324].file = "drivers/video/fbmem.c", ++ [4324].name = "fb_read", ++ [4324].param3 = 1, ++ [43380].file = "drivers/scsi/bfa/bfad_debugfs.c", ++ [43380].name = "bfad_debugfs_write_regrd", ++ [43380].param3 = 1, ++ [43393].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [43393].name = "iwl_dbgfs_sram_write", ++ [43393].param3 = 1, ++ [4344].file = "fs/namei.c", ++ [4344].name = "sys_mkdirat", ++ [4344].param2 = 1, ++ [43510].file = "kernel/kexec.c", ++ [43510].name = "compat_sys_kexec_load", ++ [43510].param2 = 1, ++ [43515].file = "drivers/usb/storage/jumpshot.c", ++ [43515].name = "jumpshot_read_data", ++ [43515].param4 = 1, ++ [43540].file = "include/rdma/ib_verbs.h", ++ [43540].name = "ib_copy_to_udata", ++ [43540].param3 = 1, ++ [4357].file = "security/tomoyo/securityfs_if.c", ++ [4357].name = "tomoyo_read_self", ++ [4357].param3 = 1, ++ [43590].file = "security/smack/smackfs.c", ++ [43590].name = "smk_write_onlycap", ++ [43590].param3 = 1, ++ [43596].file = "drivers/usb/core/buffer.c", ++ [43596].name = "hcd_buffer_alloc", ++ [43596].param2 = 1, ++ [43632].file = "drivers/media/video/videobuf2-core.c", ++ [43632].name = "vb2_read", ++ [43632].param3 = 1, ++ [43731].file = "drivers/hid/hid-picolcd.c", ++ [43731].name = "picolcd_debug_eeprom_read", ++ [43731].param3 = 1, ++ [43777].file = "drivers/acpi/acpica/utobject.c", ++ [43777].name = "acpi_ut_create_buffer_object", ++ [43777].param1 = 1, ++ [43834].file = "security/apparmor/apparmorfs.c", ++ [43834].name = "profile_replace", ++ [43834].param3 = 1, ++ [43899].file = "drivers/media/rc/imon.c", ++ [43899].name = "vfd_write", ++ [43899].param3 = 1, ++ [43982].file = "drivers/platform/x86/toshiba_acpi.c", ++ [43982].name = "keys_proc_write", ++ [43982].param3 = 1, ++ [44039].file = "drivers/video/via/viafbdev.c", ++ [44039].name = "odev_update", ++ [44039].param2 = 1, ++ [44050].file = "fs/nfs/idmap.c", ++ [44050].name = "nfs_map_group_to_gid", ++ [44050].param3 = 1, ++ [44125].file = "fs/ext4/super.c", ++ [44125].name = "ext4_kvmalloc", ++ [44125].param1 = 1, ++ [44180].file = "drivers/video/via/viafbdev.c", ++ [44180].name = "viafb_vt1636_proc_write", ++ [44180].param3 = 1, ++ [44290].file = "drivers/net/usb/dm9601.c", ++ [44290].name = "dm_read", ++ [44290].param3 = 1, ++ [44298].file = "drivers/scsi/pmcraid.c", ++ [44298].name = "pmcraid_copy_sglist", ++ [44298].param3 = 1, ++ [44365].file = "fs/namei.c", ++ [44365].name = "do_rmdir", ++ [44365].param2 = 1, ++ [44640].file = "fs/select.c", ++ [44640].name = "sys_ppoll", ++ [44640].param2 = 1, ++ [44649].file = "mm/page_cgroup.c", ++ [44649].name = "swap_cgroup_swapon", ++ [44649].param2 = 1, ++ [44656].file = "drivers/net/wireless/iwlegacy/iwl-debugfs.c", ++ [44656].name = "iwl_legacy_dbgfs_wd_timeout_write", ++ [44656].param3 = 1, ++ [4471].file = "fs/ntfs/malloc.h", ++ [4471].name = "__ntfs_malloc", ++ [4471].param1 = 1, ++ [44773].file = "drivers/staging/vme/devices/vme_user.c", ++ [44773].name = "vme_user_write", ++ [44773].param3 = 1, ++ [44825].file = "drivers/scsi/osd/osd_initiator.c", ++ [44825].name = "_osd_realloc_seg", ++ [44825].param3 = 1, ++ [44943].file = "mm/util.c", ++ [44943].name = "kmemdup", ++ [44943].param2 = 1, ++ [44990].file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c", ++ [44990].name = "pvr2_ioread_set_sync_key", ++ [44990].param3 = 1, ++ [45000].file = "fs/afs/proc.c", ++ [45000].name = "afs_proc_rootcell_write", ++ [45000].param3 = 1, ++ [45119].file = "drivers/usb/misc/yurex.c", ++ [45119].name = "yurex_write", ++ [45119].param3 = 1, ++ [45169].file = "drivers/video/metronomefb.c", ++ [45169].name = "metronomefb_write", ++ [45169].param3 = 1, ++ [45200].file = "drivers/scsi/scsi_proc.c", ++ [45200].name = "proc_scsi_write_proc", ++ [45200].param3 = 1, ++ [45217].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [45217].name = "iwl_dbgfs_debug_level_write", ++ [45217].param3 = 1, ++ [45231].file = "fs/ecryptfs/crypto.c", ++ [45231].name = "ecryptfs_copy_filename", ++ [45231].param4 = 1, ++ [45233].file = "net/rds/info.c", ++ [45233].name = "rds_info_getsockopt", ++ [45233].param3 = 1, ++ [45244].file = "drivers/mfd/ab3100-core.c", ++ [45244].name = "ab3100_get_set_reg", ++ [45244].param3 = 1, ++ [45264].file = "drivers/net/wireless/ath/ath5k/debug.c", ++ [45264].name = "write_file_ani", ++ [45264].param3 = 1, ++ [45326].file = "drivers/mtd/ubi/cdev.c", ++ [45326].name = "vol_cdev_read", ++ [45326].param3 = 1, ++ [45335].file = "fs/read_write.c", ++ [45335].name = "vfs_writev", ++ [45335].param3 = 1, ++ [45421].file = "drivers/message/fusion/mptctl.c", ++ [45421].name = "mptctl_do_mpt_command", ++ [45421].param3 = 1, ++ [45534].file = "drivers/net/wireless/ath/carl9170/cmd.c", ++ [45534].name = "carl9170_cmd_buf", ++ [45534].param3 = 1, ++ [45576].file = "net/netfilter/xt_recent.c", ++ [45576].name = "recent_mt_proc_write", ++ [45576].param3 = 1, ++ [45586].file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ [45586].name = "rt2x00debug_write_bbp", ++ [45586].param3 = 1, ++ [45629].file = "lib/bch.c", ++ [45629].name = "bch_alloc", ++ [45629].param1 = 1, ++ [45633].file = "drivers/input/evdev.c", ++ [45633].name = "evdev_do_ioctl", ++ [45633].param2 = 1, ++ [45740].file = "drivers/net/wireless/ath/ath6kl/debug.c", ++ [45740].name = "ath6kl_lrssi_roam_write", ++ [45740].param3 = 1, ++ [45747].file = "net/netlink/af_netlink.c", ++ [45747].name = "__netlink_change_ngroups", ++ [45747].param2 = 1, ++ [45930].file = "security/apparmor/apparmorfs.c", ++ [45930].name = "profile_remove", ++ [45930].param3 = 1, ++ [45954].file = "drivers/usb/misc/legousbtower.c", ++ [45954].name = "tower_write", ++ [45954].param3 = 1, ++ [45995].file = "fs/namei.c", ++ [45995].name = "sys_mknodat", ++ [45995].param2 = 1, ++ [46072].file = "drivers/video/arcfb.c", ++ [46072].name = "arcfb_write", ++ [46072].param3 = 1, ++ [46140].file = "sound/core/memalloc.c", ++ [46140].name = "snd_mem_proc_write", ++ [46140].param3 = 1, ++ [4614].file = "sound/core/pcm_lib.c", ++ [4614].name = "snd_pcm_lib_write_transfer", ++ [4614].param5 = 1, ++ [4616].file = "net/sunrpc/cache.c", ++ [4616].name = "cache_do_downcall", ++ [4616].param3 = 1, ++ [46243].file = "fs/binfmt_misc.c", ++ [46243].name = "bm_register_write", ++ [46243].param3 = 1, ++ [46250].file = "fs/xattr.c", ++ [46250].name = "sys_getxattr", ++ [46250].param4 = 1, ++ [46343].file = "fs/compat.c", ++ [46343].name = "compat_do_readv_writev", ++ [46343].param4 = 1, ++ [4644].file = "drivers/net/usb/mcs7830.c", ++ [4644].name = "mcs7830_get_reg", ++ [4644].param3 = 1, ++ [46605].file = "sound/core/oss/pcm_oss.c", ++ [46605].name = "snd_pcm_oss_sync1", ++ [46605].param2 = 1, ++ [46630].file = "net/decnet/af_decnet.c", ++ [46630].name = "__dn_setsockopt", ++ [46630].param5 = 1, ++ [46655].file = "drivers/media/video/hdpvr/hdpvr-video.c", ++ [46655].name = "hdpvr_read", ++ [46655].param3 = 1, ++ [46685].file = "drivers/gpu/drm/ttm/ttm_bo_vm.c", ++ [46685].name = "ttm_bo_fbdev_io", ++ [46685].param4 = 1, ++ [46752].file = "drivers/staging/pohmelfs/dir.c", ++ [46752].name = "pohmelfs_name_alloc", ++ [46752].param1 = 1, ++ [46881].file = "drivers/char/lp.c", ++ [46881].name = "lp_write", ++ [46881].param3 = 1, ++ [47130].file = "kernel/kfifo.c", ++ [47130].name = "kfifo_copy_to_user", ++ [47130].param3 = 1, ++ [47265].file = "drivers/scsi/bnx2fc/bnx2fc_io.c", ++ [47265].name = "bnx2fc_cmd_mgr_alloc", ++ [47265].param2 = 1, ++ [47265].param3 = 1, ++ [47342].file = "fs/proc/base.c", ++ [47342].name = "sched_autogroup_write", ++ [47342].param3 = 1, ++ [47363].file = "drivers/input/evdev.c", ++ [47363].name = "evdev_ioctl_handler", ++ [47363].param2 = 1, ++ [47385].file = "drivers/net/wireless/zd1211rw/zd_usb.c", ++ [47385].name = "zd_usb_iowrite16v", ++ [47385].param3 = 1, ++ [47463].file = "fs/xfs/kmem.c", ++ [47463].name = "kmem_zalloc", ++ [47463].param1 = 1, ++ [47636].file = "drivers/usb/class/usblp.c", ++ [47636].name = "usblp_ioctl", ++ [47636].param2 = 1, ++ [47637].file = "drivers/block/cciss.c", ++ [47637].name = "cciss_proc_write", ++ [47637].param3 = 1, ++ [47652].file = "lib/kstrtox.c", ++ [47652].name = "kstrtoll_from_user", ++ [47652].param2 = 1, ++ [47881].file = "security/selinux/selinuxfs.c", ++ [47881].name = "sel_write_disable", ++ [47881].param3 = 1, ++ [48010].file = "drivers/net/wireless/ath/ath9k/debug.c", ++ [48010].name = "write_file_rx_chainmask", ++ [48010].param3 = 1, ++ [48155].file = "net/sctp/sm_make_chunk.c", ++ [48155].name = "sctp_make_abort_user", ++ [48155].param3 = 1, ++ [48182].file = "crypto/cryptd.c", ++ [48182].name = "cryptd_alloc_instance", ++ [48182].param2 = 1, ++ [48248].file = "security/keys/keyctl.c", ++ [48248].name = "keyctl_instantiate_key", ++ [48248].param3 = 1, ++ [48461].file = "drivers/gpu/drm/drm_memory.c", ++ [48461].name = "agp_remap", ++ [48461].param2 = 1, ++ [48642].file = "fs/hugetlbfs/inode.c", ++ [48642].name = "hugetlbfs_read", ++ [48642].param3 = 1, ++ [48720].file = "drivers/gpu/drm/i915/i915_debugfs.c", ++ [48720].name = "i915_max_freq_write", ++ [48720].param3 = 1, ++ [48768].file = "net/irda/irnet/irnet_ppp.c", ++ [48768].name = "dev_irnet_write", ++ [48768].param3 = 1, ++ [48856].file = "drivers/acpi/acpica/utalloc.c", ++ [48856].name = "acpi_ut_initialize_buffer", ++ [48856].param2 = 1, ++ [48941].file = "drivers/gpu/drm/nouveau/nouveau_vm.c", ++ [48941].name = "nouveau_vm_new", ++ [48941].param2 = 1, ++ [48941].param3 = 1, ++ [49126].file = "lib/prio_heap.c", ++ [49126].name = "heap_init", ++ [49126].param2 = 1, ++ [49143].file = "sound/core/oss/pcm_oss.c", ++ [49143].name = "snd_pcm_oss_write2", ++ [49143].param3 = 1, ++ [49216].file = "fs/read_write.c", ++ [49216].name = "do_readv_writev", ++ [49216].param4 = 1, ++ [49354].file = "drivers/media/video/cx18/cx18-fileops.c", ++ [49354].name = "cx18_v4l2_read", ++ [49354].param3 = 1, ++ [49448].file = "drivers/isdn/gigaset/common.c", ++ [49448].name = "gigaset_initdriver", ++ [49448].param2 = 1, ++ [49494].file = "drivers/virtio/virtio_ring.c", ++ [49494].name = "vring_new_virtqueue", ++ [49494].param1 = 1, ++ [49507].file = "fs/namei.c", ++ [49507].name = "sys_symlink", ++ [49507].param1 = 1, ++ [49604].file = "crypto/af_alg.c", ++ [49604].name = "alg_setsockopt", ++ [49604].param5 = 1, ++ [49646].file = "drivers/tty/vt/vt.c", ++ [49646].name = "vc_resize", ++ [49646].param2 = 1, ++ [49646].param3 = 1, ++ [49663].file = "drivers/media/video/uvc/uvc_driver.c", ++ [49663].name = "uvc_simplify_fraction", ++ [49663].param3 = 1, ++ [49718].file = "drivers/hid/hid-roccat-common.c", ++ [49718].name = "roccat_common_send", ++ [49718].param4 = 1, ++ [4972].file = "drivers/video/fb_sys_fops.c", ++ [4972].name = "fb_sys_write", ++ [4972].param3 = 1, ++ [49746].file = "net/ipv4/netfilter/arp_tables.c", ++ [49746].name = "compat_do_arpt_set_ctl", ++ [49746].param4 = 1, ++ [49780].file = "net/mac80211/key.c", ++ [49780].name = "ieee80211_key_alloc", ++ [49780].param3 = 1, ++ [49845].file = "mm/vmalloc.c", ++ [49845].name = "__vmalloc_node", ++ [49845].param1 = 1, ++ [49935].file = "fs/xfs/kmem.c", ++ [49935].name = "kmem_zalloc_greedy", ++ [49935].param2 = 1, ++ [49935].param3 = 1, ++ [50001].file = "sound/pci/ctxfi/ctresource.c", ++ [50001].name = "rsc_mgr_init", ++ [50001].param3 = 1, ++ [50022].file = "drivers/usb/storage/shuttle_usbat.c", ++ [50022].name = "usbat_flash_read_data", ++ [50022].param4 = 1, ++ [50096].file = "drivers/net/wireless/libertas/debugfs.c", ++ [50096].name = "lbs_rdbbp_write", ++ [50096].param3 = 1, ++ [50102].file = "drivers/telephony/ixj.c", ++ [50102].name = "ixj_write", ++ [50102].param3 = 1, ++ [50238].file = "drivers/net/wireless/iwlegacy/iwl-debugfs.c", ++ [50238].name = "iwl_legacy_dbgfs_clear_ucode_statistics_write", ++ [50238].param3 = 1, ++ [50267].file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ [50267].name = "rt2x00debug_read_crypto_stats", ++ [50267].param3 = 1, ++ [50398].file = "fs/proc/base.c", ++ [50398].name = "mem_write", ++ [50398].param3 = 1, ++ [50518].file = "drivers/gpu/drm/nouveau/nouveau_gem.c", ++ [50518].name = "u_memcpya", ++ [50518].param2 = 1, ++ [50518].param3 = 1, ++ [5052].file = "drivers/char/ppdev.c", ++ [5052].name = "pp_read", ++ [5052].param3 = 1, ++ [50562].file = "drivers/media/video/zoran/zoran_procfs.c", ++ [50562].name = "zoran_write", ++ [50562].param3 = 1, ++ [50653].file = "net/sunrpc/cache.c", ++ [50653].name = "cache_write_procfs", ++ [50653].param3 = 1, ++ [50692].file = "lib/ts_bm.c", ++ [50692].name = "bm_init", ++ [50692].param2 = 1, ++ [50813].file = "mm/vmalloc.c", ++ [50813].name = "__vmalloc_node_flags", ++ [50813].param1 = 1, ++ [5087].file = "drivers/atm/solos-pci.c", ++ [5087].name = "console_store", ++ [5087].param4 = 1, ++ [5102].file = "drivers/usb/misc/usbtest.c", ++ [5102].name = "usbtest_alloc_urb", ++ [5102].param3 = 1, ++ [5102].param5 = 1, ++ [51052].file = "drivers/base/firmware_class.c", ++ [51052].name = "firmware_data_write", ++ [51052].param6 = 1, ++ [51177].file = "net/sunrpc/xprtrdma/transport.c", ++ [51177].name = "xprt_rdma_allocate", ++ [51177].param2 = 1, ++ [51182].file = "drivers/misc/sgi-xp/xpc_main.c", ++ [51182].name = "xpc_kzalloc_cacheline_aligned", ++ [51182].param1 = 1, ++ [51250].file = "fs/read_write.c", ++ [51250].name = "rw_copy_check_uvector", ++ [51250].param3 = 1, ++ [51253].file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ [51253].name = "rt2x00debug_write_eeprom", ++ [51253].param3 = 1, ++ [51284].file = "drivers/net/wireless/iwlegacy/iwl-debugfs.c", ++ [51284].name = "iwl_legacy_dbgfs_interrupt_write", ++ [51284].param3 = 1, ++ [51323].file = "sound/pci/ac97/ac97_pcm.c", ++ [51323].name = "snd_ac97_pcm_assign", ++ [51323].param2 = 1, ++ [51340].file = "drivers/usb/class/usblp.c", ++ [51340].name = "usblp_write", ++ [51340].param3 = 1, ++ [51471].file = "drivers/block/floppy.c", ++ [51471].name = "fd_locked_ioctl", ++ [51471].param3 = 1, ++ [5197].file = "net/core/dev.c", ++ [5197].name = "dev_set_alias", ++ [5197].param3 = 1, ++ [51998].file = "drivers/net/macvtap.c", ++ [51998].name = "macvtap_get_user", ++ [51998].param4 = 1, ++ [5204].file = "drivers/media/video/usbvision/usbvision-video.c", ++ [5204].name = "usbvision_v4l2_read", ++ [5204].param3 = 1, ++ [52086].file = "drivers/usb/image/mdc800.c", ++ [52086].name = "mdc800_device_read", ++ [52086].param3 = 1, ++ [52172].file = "drivers/pcmcia/cistpl.c", ++ [52172].name = "pccard_store_cis", ++ [52172].param6 = 1, ++ [52173].file = "drivers/misc/ibmasm/ibmasmfs.c", ++ [52173].name = "remote_settings_file_write", ++ [52173].param3 = 1, ++ [52199].file = "mm/nobootmem.c", ++ [52199].name = "__alloc_bootmem", ++ [52199].param1 = 1, ++ [52201].file = "drivers/video/via/viafbdev.c", ++ [52201].name = "viafb_dvp0_proc_write", ++ [52201].param3 = 1, ++ [5233].file = "include/linux/poll.h", ++ [5233].name = "set_fd_set", ++ [5233].param1 = 1, ++ [52343].file = "drivers/usb/misc/adutux.c", ++ [52343].name = "adu_read", ++ [52343].param3 = 1, ++ [52364].file = "sound/core/pcm_lib.c", ++ [52364].name = "snd_pcm_lib_readv_transfer", ++ [52364].param5 = 1, ++ [52401].file = "drivers/staging/rtl8712/rtl871x_ioctl_linux.c", ++ [52401].name = "r871x_set_wpa_ie", ++ [52401].param3 = 1, ++ [52699].file = "lib/ts_fsm.c", ++ [52699].name = "fsm_init", ++ [52699].param2 = 1, ++ [52721].file = "security/keys/encrypted-keys/encrypted.c", ++ [52721].name = "encrypted_instantiate", ++ [52721].param3 = 1, ++ [53041].file = "fs/libfs.c", ++ [53041].name = "simple_transaction_get", ++ [53041].param3 = 1, ++ [5313].file = "fs/gfs2/quota.c", ++ [5313].name = "do_sync", ++ [5313].param1 = 1, ++ [53209].file = "drivers/usb/host/ehci-sched.c", ++ [53209].name = "iso_sched_alloc", ++ [53209].param1 = 1, ++ [53302].file = "drivers/firewire/core-cdev.c", ++ [53302].name = "dispatch_ioctl", ++ [53302].param2 = 1, ++ [53355].file = "fs/ceph/dir.c", ++ [53355].name = "ceph_read_dir", ++ [53355].param3 = 1, ++ [53405].file = "drivers/media/video/videobuf-core.c", ++ [53405].name = "__videobuf_copy_to_user", ++ [53405].param4 = 1, ++ [53407].file = "net/wireless/sme.c", ++ [53407].name = "cfg80211_connect_result", ++ [53407].param4 = 1, ++ [53407].param6 = 1, ++ [53426].file = "fs/libfs.c", ++ [53426].name = "simple_transaction_read", ++ [53426].param3 = 1, ++ [5344].file = "security/selinux/ss/hashtab.c", ++ [5344].name = "hashtab_create", ++ [5344].param3 = 1, ++ [53468].file = "drivers/char/mem.c", ++ [53468].name = "write_mem", ++ [53468].param3 = 1, ++ [53513].file = "drivers/mmc/core/mmc_ops.c", ++ [53513].name = "mmc_send_bus_test", ++ [53513].param4 = 1, ++ [53539].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [53539].name = "iwl_dbgfs_txfifo_flush_write", ++ [53539].param3 = 1, ++ [53626].file = "drivers/block/paride/pg.c", ++ [53626].name = "pg_read", ++ [53626].param3 = 1, ++ [53631].file = "mm/util.c", ++ [53631].name = "memdup_user", ++ [53631].param2 = 1, ++ [53680].file = "lib/kstrtox.c", ++ [53680].name = "kstrtol_from_user", ++ [53680].param2 = 1, ++ [5389].file = "drivers/infiniband/core/uverbs_cmd.c", ++ [5389].name = "ib_uverbs_unmarshall_recv", ++ [5389].param5 = 1, ++ [53901].file = "net/rds/message.c", ++ [53901].name = "rds_message_alloc", ++ [53901].param1 = 1, ++ [53904].file = "fs/namei.c", ++ [53904].name = "sys_unlink", ++ [53904].param1 = 1, ++ [5410].file = "kernel/kexec.c", ++ [5410].name = "sys_kexec_load", ++ [5410].param2 = 1, ++ [54182].file = "drivers/block/rbd.c", ++ [54182].name = "rbd_snap_add", ++ [54182].param4 = 1, ++ [5419].file = "drivers/net/wireless/iwlegacy/iwl-debugfs.c", ++ [5419].name = "iwl_legacy_dbgfs_disable_ht40_write", ++ [5419].param3 = 1, ++ [54201].file = "drivers/platform/x86/asus_acpi.c", ++ [54201].name = "mled_proc_write", ++ [54201].param3 = 1, ++ [5422].file = "fs/namei.c", ++ [5422].name = "do_unlinkat", ++ [5422].param2 = 1, ++ [54252].file = "drivers/scsi/st.c", ++ [54252].name = "st_write", ++ [54252].param3 = 1, ++ [54263].file = "security/keys/trusted.c", ++ [54263].name = "trusted_instantiate", ++ [54263].param3 = 1, ++ [54298].file = "drivers/usb/wusbcore/crypto.c", ++ [54298].name = "wusb_ccm_mac", ++ [54298].param7 = 1, ++ [54318].file = "include/drm/drm_mem_util.h", ++ [54318].name = "drm_malloc_ab", ++ [54318].param1 = 1, ++ [54318].param2 = 1, ++ [54335].file = "drivers/md/dm-table.c", ++ [54335].name = "dm_vcalloc", ++ [54335].param1 = 1, ++ [54335].param2 = 1, ++ [54339].file = "security/smack/smackfs.c", ++ [54339].name = "smk_write_cipso", ++ [54339].param3 = 1, ++ [5438].file = "sound/core/memory.c", ++ [5438].name = "copy_to_user_fromio", ++ [5438].param3 = 1, ++ [54401].file = "lib/dynamic_debug.c", ++ [54401].name = "ddebug_proc_write", ++ [54401].param3 = 1, ++ [54427].file = "drivers/usb/storage/jumpshot.c", ++ [54427].name = "jumpshot_write_data", ++ [54427].param4 = 1, ++ [54467].file = "net/packet/af_packet.c", ++ [54467].name = "packet_setsockopt", ++ [54467].param5 = 1, ++ [54573].file = "ipc/sem.c", ++ [54573].name = "sys_semop", ++ [54573].param3 = 1, ++ [54643].file = "drivers/isdn/hardware/eicon/divasi.c", ++ [54643].name = "um_idi_write", ++ [54643].param3 = 1, ++ [54657].file = "mm/migrate.c", ++ [54657].name = "do_pages_stat", ++ [54657].param2 = 1, ++ [54663].file = "drivers/isdn/hardware/eicon/platform.h", ++ [54663].name = "diva_os_malloc", ++ [54663].param2 = 1, ++ [54751].file = "drivers/infiniband/core/device.c", ++ [54751].name = "ib_alloc_device", ++ [54751].param1 = 1, ++ [54806].file = "drivers/scsi/lpfc/lpfc_debugfs.c", ++ [54806].name = "lpfc_debugfs_dif_err_write", ++ [54806].param3 = 1, ++ [5494].file = "fs/cifs/cifsacl.c", ++ [5494].name = "cifs_idmap_key_instantiate", ++ [5494].param3 = 1, ++ [55066].file = "net/ipv6/ipv6_sockglue.c", ++ [55066].name = "do_ipv6_setsockopt", ++ [55066].param5 = 1, ++ [55105].file = "drivers/base/devres.c", ++ [55105].name = "devres_alloc", ++ [55105].param2 = 1, ++ [55115].file = "net/sctp/probe.c", ++ [55115].name = "sctpprobe_read", ++ [55115].param3 = 1, ++ [55155].file = "net/bluetooth/rfcomm/sock.c", ++ [55155].name = "rfcomm_sock_setsockopt", ++ [55155].param5 = 1, ++ [55187].file = "security/keys/keyctl.c", ++ [55187].name = "keyctl_describe_key", ++ [55187].param3 = 1, ++ [5524].file = "lib/kstrtox.c", ++ [5524].name = "kstrtos8_from_user", ++ [5524].param2 = 1, ++ [55253].file = "drivers/net/wireless/ray_cs.c", ++ [55253].name = "ray_cs_essid_proc_write", ++ [55253].param3 = 1, ++ [5548].file = "drivers/media/media-entity.c", ++ [5548].name = "media_entity_init", ++ [5548].param2 = 1, ++ [5548].param4 = 1, ++ [55580].file = "drivers/usb/mon/mon_bin.c", ++ [55580].name = "copy_from_buf", ++ [55580].param2 = 1, ++ [55682].file = "drivers/net/wireless/libertas/debugfs.c", ++ [55682].name = "lbs_host_sleep_write", ++ [55682].param3 = 1, ++ [55712].file = "drivers/char/mem.c", ++ [55712].name = "read_zero", ++ [55712].param3 = 1, ++ [55857].file = "drivers/net/wireless/ath/ath9k/debug.c", ++ [55857].name = "write_file_tx_chainmask", ++ [55857].param3 = 1, ++ [55978].file = "drivers/usb/misc/iowarrior.c", ++ [55978].name = "iowarrior_write", ++ [55978].param3 = 1, ++ [5599].file = "drivers/char/random.c", ++ [5599].name = "write_pool", ++ [5599].param3 = 1, ++ [56090].file = "drivers/media/video/videobuf-dma-sg.c", ++ [56090].name = "__videobuf_alloc_vb", ++ [56090].param1 = 1, ++ [56199].file = "fs/binfmt_misc.c", ++ [56199].name = "parse_command", ++ [56199].param2 = 1, ++ [56218].file = "drivers/mmc/card/mmc_test.c", ++ [56218].name = "mtf_test_write", ++ [56218].param3 = 1, ++ [56416].file = "drivers/misc/lkdtm.c", ++ [56416].name = "do_register_entry", ++ [56416].param4 = 1, ++ [56432].file = "drivers/mfd/aat2870-core.c", ++ [56432].name = "aat2870_reg_write_file", ++ [56432].param3 = 1, ++ [56471].file = "include/linux/slab.h", ++ [56471].name = "kcalloc", ++ [56471].param1 = 1, ++ [56471].param2 = 1, ++ [56513].file = "fs/cifs/connect.c", ++ [56513].name = "cifs_readv_from_socket", ++ [56513].param3 = 1, ++ [56544].file = "drivers/block/drbd/drbd_receiver.c", ++ [56544].name = "receive_DataRequest", ++ [56544].param3 = 1, ++ [5661].file = "lib/dma-debug.c", ++ [5661].name = "filter_write", ++ [5661].param3 = 1, ++ [56672].file = "drivers/char/agp/generic.c", ++ [56672].name = "agp_alloc_page_array", ++ [56672].param1 = 1, ++ [56843].file = "drivers/scsi/scsi_transport_iscsi.c", ++ [56843].name = "iscsi_recv_pdu", ++ [56843].param4 = 1, ++ [57120].file = "lib/kstrtox.c", ++ [57120].name = "kstrtouint_from_user", ++ [57120].param2 = 1, ++ [57128].file = "drivers/pnp/pnpbios/proc.c", ++ [57128].name = "pnpbios_proc_write", ++ [57128].param3 = 1, ++ [57190].file = "drivers/char/agp/generic.c", ++ [57190].name = "agp_generic_alloc_user", ++ [57190].param1 = 1, ++ [57471].file = "drivers/media/video/sn9c102/sn9c102_core.c", ++ [57471].name = "sn9c102_read", ++ [57471].param3 = 1, ++ [57605].file = "net/netlink/af_netlink.c", ++ [57605].name = "netlink_kernel_create", ++ [57605].param3 = 1, ++ [57670].file = "drivers/bluetooth/btmrvl_debugfs.c", ++ [57670].name = "btmrvl_pscmd_write", ++ [57670].param3 = 1, ++ [57675].file = "drivers/net/wireless/ath/ath9k/debug.c", ++ [57675].name = "write_file_regidx", ++ [57675].param3 = 1, ++ [57724].file = "net/bluetooth/hci_sock.c", ++ [57724].name = "hci_sock_setsockopt", ++ [57724].param5 = 1, ++ [57748].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [57748].name = "iwl_dbgfs_missed_beacon_write", ++ [57748].param3 = 1, ++ [57786].file = "net/ipv6/netfilter/ip6_tables.c", ++ [57786].name = "compat_do_ip6t_set_ctl", ++ [57786].param4 = 1, ++ [57872].file = "fs/ceph/xattr.c", ++ [57872].name = "ceph_setxattr", ++ [57872].param4 = 1, ++ [57927].file = "fs/read_write.c", ++ [57927].name = "sys_preadv", ++ [57927].param3 = 1, ++ [58020].file = "drivers/firewire/core-cdev.c", ++ [58020].name = "fw_device_op_ioctl", ++ [58020].param2 = 1, ++ [58043].file = "kernel/auditfilter.c", ++ [58043].name = "audit_unpack_string", ++ [58043].param3 = 1, ++ [5805].file = "drivers/xen/grant-table.c", ++ [5805].name = "gnttab_alloc_grant_references", ++ [5805].param1 = 1, ++ [58087].file = "kernel/module.c", ++ [58087].name = "module_alloc_update_bounds_rw", ++ [58087].param1 = 1, ++ [58107].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [58107].name = "iwl_dbgfs_sleep_level_override_write", ++ [58107].param3 = 1, ++ [58124].file = "drivers/usb/misc/usbtest.c", ++ [58124].name = "ctrl_out", ++ [58124].param3 = 1, ++ [58124].param5 = 1, ++ [58263].file = "security/keys/keyring.c", ++ [58263].name = "keyring_read", ++ [58263].param3 = 1, ++ [58278].file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c", ++ [58278].name = "iwl_dbgfs_log_event_write", ++ [58278].param3 = 1, ++ [5830].file = "drivers/gpu/vga/vga_switcheroo.c", ++ [5830].name = "vga_switcheroo_debugfs_write", ++ [5830].param3 = 1, ++ [58320].file = "drivers/scsi/scsi_proc.c", ++ [58320].name = "proc_scsi_write", ++ [58320].param3 = 1, ++ [58344].file = "net/sunrpc/cache.c", ++ [58344].name = "read_flush", ++ [58344].param3 = 1, ++ [58392].file = "fs/namei.c", ++ [58392].name = "getname_flags", ++ [58392].param1 = 1, ++ [58418].file = "kernel/module.c", ++ [58418].name = "sys_init_module", ++ [58418].param2 = 1, ++ [58502].file = "sound/core/sgbuf.c", ++ [58502].name = "snd_malloc_sgbuf_pages", ++ [58502].param2 = 1, ++ [58597].file = "kernel/kfifo.c", ++ [58597].name = "__kfifo_to_user", ++ [58597].param3 = 1, ++ [58641].file = "drivers/usb/misc/adutux.c", ++ [58641].name = "adu_write", ++ [58641].param3 = 1, ++ [58709].file = "fs/compat.c", ++ [58709].name = "compat_sys_pwritev", ++ [58709].param3 = 1, ++ [58769].file = "drivers/net/wireless/zd1211rw/zd_usb.c", ++ [58769].name = "zd_usb_read_fw", ++ [58769].param4 = 1, ++ [5876].file = "drivers/net/ppp/ppp_generic.c", ++ [5876].name = "ppp_write", ++ [5876].param3 = 1, ++ [58826].file = "net/sunrpc/xprt.c", ++ [58826].name = "xprt_alloc", ++ [58826].param2 = 1, ++ [58867].file = "drivers/platform/x86/asus_acpi.c", ++ [58867].name = "wled_proc_write", ++ [58867].param3 = 1, ++ [58878].file = "drivers/net/wireless/libertas/debugfs.c", ++ [58878].name = "lbs_wrbbp_write", ++ [58878].param3 = 1, ++ [58888].file = "fs/xattr.c", ++ [58888].name = "listxattr", ++ [58888].param3 = 1, ++ [58912].file = "drivers/lguest/core.c", ++ [58912].name = "__lgwrite", ++ [58912].param4 = 1, ++ [58918].file = "sound/core/pcm_native.c", ++ [58918].name = "snd_pcm_aio_write", ++ [58918].param3 = 1, ++ [58919].file = "net/netlabel/netlabel_unlabeled.c", ++ [58919].name = "netlbl_unlabel_init", ++ [58919].param1 = 1, ++ [58942].file = "drivers/block/aoe/aoedev.c", ++ [58942].name = "aoedev_flush", ++ [58942].param2 = 1, ++ [58958].file = "fs/fuse/control.c", ++ [58958].name = "fuse_conn_limit_write", ++ [58958].param3 = 1, ++ [58].file = "lib/kstrtox.c", ++ [58].name = "kstrtoull_from_user", ++ [58].param2 = 1, ++ [59034].file = "drivers/acpi/acpica/dsobject.c", ++ [59034].name = "acpi_ds_build_internal_package_obj", ++ [59034].param3 = 1, ++ [59073].file = "drivers/staging/speakup/i18n.c", ++ [59073].name = "msg_set", ++ [59073].param3 = 1, ++ [59108].file = "drivers/net/wireless/ath/ath5k/debug.c", ++ [59108].name = "write_file_queue", ++ [59108].param3 = 1, ++ [59297].file = "drivers/media/dvb/ttpci/av7110_av.c", ++ [59297].name = "dvb_play", ++ [59297].param3 = 1, ++ [59472].file = "drivers/misc/ibmasm/ibmasmfs.c", ++ [59472].name = "command_file_write", ++ [59472].param3 = 1, ++ [59505].file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c", ++ [59505].name = "pvr2_ioread_read", ++ [59505].param3 = 1, ++ [59681].file = "fs/xfs/kmem.c", ++ [59681].name = "kmem_alloc", ++ [59681].param1 = 1, ++ [5968].file = "net/sunrpc/sched.c", ++ [5968].name = "rpc_malloc", ++ [5968].param2 = 1, ++ [59794].file = "mm/mincore.c", ++ [59794].name = "sys_mincore", ++ [59794].param1 = 1, ++ [59794].param2 = 1, ++ [59838].file = "net/netlink/af_netlink.c", ++ [59838].name = "nl_pid_hash_zalloc", ++ [59838].param1 = 1, ++ [59856].file = "drivers/base/devres.c", ++ [59856].name = "devm_kzalloc", ++ [59856].param2 = 1, ++ [59991].file = "drivers/media/video/uvc/uvc_queue.c", ++ [59991].name = "uvc_alloc_buffers", ++ [59991].param2 = 1, ++ [59991].param3 = 1, ++ [60005].file = "fs/namei.c", ++ [60005].name = "getname", ++ [60005].param1 = 1, ++ [60066].file = "mm/filemap.c", ++ [60066].name = "iov_iter_copy_from_user", ++ [60066].param4 = 1, ++ [60198].file = "fs/nfs/nfs4proc.c", ++ [60198].name = "nfs4_write_cached_acl", ++ [60198].param3 = 1, ++ [60330].file = "drivers/media/video/w9966.c", ++ [60330].name = "w9966_v4l_read", ++ [60330].param3 = 1, ++ [6041].file = "drivers/mtd/mtdchar.c", ++ [6041].name = "mtd_write", ++ [6041].param3 = 1, ++ [60436].file = "drivers/net/macvtap.c", ++ [60436].name = "macvtap_sendmsg", ++ [60436].param4 = 1, ++ [60483].file = "drivers/char/virtio_console.c", ++ [60483].name = "fill_readbuf", ++ [60483].param3 = 1, ++ [604].file = "drivers/staging/rtl8712/usb_ops_linux.c", ++ [604].name = "r8712_usbctrl_vendorreq", ++ [604].param6 = 1, ++ [60543].file = "drivers/usb/class/usbtmc.c", ++ [60543].name = "usbtmc_read", ++ [60543].param3 = 1, ++ [60683].file = "sound/drivers/opl4/opl4_proc.c", ++ [60683].name = "snd_opl4_mem_proc_write", ++ [60683].param5 = 1, ++ [60693].file = "drivers/misc/hpilo.c", ++ [60693].name = "ilo_read", ++ [60693].param3 = 1, ++ [60744].file = "sound/pci/emu10k1/emuproc.c", ++ [60744].name = "snd_emu10k1_fx8010_read", ++ [60744].param5 = 1, ++ [60833].file = "drivers/block/aoe/aoenet.c", ++ [60833].name = "set_aoe_iflist", ++ [60833].param2 = 1, ++ [60878].file = "drivers/net/wireless/rt2x00/rt2x00debug.c", ++ [60878].name = "rt2x00debug_read_queue_dump", ++ [60878].param3 = 1, ++ [60882].file = "drivers/input/joydev.c", ++ [60882].name = "joydev_compat_ioctl", ++ [60882].param2 = 1, ++ [60891].file = "kernel/sched.c", ++ [60891].name = "sys_sched_setaffinity", ++ [60891].param2 = 1, ++ [60927].file = "drivers/net/wireless/ath/ath9k/debug.c", ++ [60927].name = "write_file_disable_ani", ++ [60927].param3 = 1, ++ [60928].file = "drivers/staging/bcm/Bcmchar.c", ++ [60928].name = "bcm_char_read", ++ [60928].param3 = 1, ++ [61058].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [61058].name = "iwl_dbgfs_disable_ht40_write", ++ [61058].param3 = 1, ++ [61120].file = "drivers/char/mem.c", ++ [61120].name = "read_mem", ++ [61120].param3 = 1, ++ [61222].file = "net/sunrpc/rpc_pipe.c", ++ [61222].name = "rpc_pipe_generic_upcall", ++ [61222].param4 = 1, ++ [61254].file = "drivers/scsi/scsi_devinfo.c", ++ [61254].name = "proc_scsi_devinfo_write", ++ [61254].param3 = 1, ++ [61283].file = "drivers/net/wireless/ath/ath6kl/debug.c", ++ [61283].name = "ath6kl_fwlog_read", ++ [61283].param3 = 1, ++ [61289].file = "security/apparmor/apparmorfs.c", ++ [61289].name = "aa_simple_write_to_buffer", ++ [61289].param4 = 1, ++ [61389].file = "include/linux/slab.h", ++ [61389].name = "kzalloc_node", ++ [61389].param1 = 1, ++ [61546].file = "mm/filemap.c", ++ [61546].name = "__iovec_copy_from_user_inatomic", ++ [61546].param3 = 1, ++ [61546].param4 = 1, ++ [61552].file = "drivers/input/evdev.c", ++ [61552].name = "str_to_user", ++ [61552].param2 = 1, ++ [61673].file = "security/keys/trusted.c", ++ [61673].name = "trusted_update", ++ [61673].param3 = 1, ++ [61676].file = "kernel/module.c", ++ [61676].name = "module_alloc_update_bounds_rx", ++ [61676].param1 = 1, ++ [61770].file = "drivers/media/video/et61x251/et61x251_core.c", ++ [61770].name = "et61x251_read", ++ [61770].param3 = 1, ++ [6186].file = "drivers/char/mem.c", ++ [6186].name = "read_kmem", ++ [6186].param3 = 1, ++ [61932].file = "drivers/message/fusion/mptctl.c", ++ [61932].name = "__mptctl_ioctl", ++ [61932].param2 = 1, ++ [62081].file = "drivers/net/irda/vlsi_ir.c", ++ [62081].name = "vlsi_alloc_ring", ++ [62081].param3 = 1, ++ [62116].file = "fs/libfs.c", ++ [62116].name = "simple_attr_read", ++ [62116].param3 = 1, ++ [6225].file = "drivers/block/floppy.c", ++ [6225].name = "fd_ioctl", ++ [6225].param3 = 1, ++ [62294].file = "sound/core/info.c", ++ [62294].name = "resize_info_buffer", ++ [62294].param2 = 1, ++ [62378].file = "net/ipv4/tcp.c", ++ [62378].name = "do_tcp_setsockopt", ++ [62378].param5 = 1, ++ [62387].file = "fs/nfs/idmap.c", ++ [62387].name = "nfs_idmap_lookup_id", ++ [62387].param2 = 1, ++ [62453].file = "fs/namei.c", ++ [62453].name = "user_path_create", ++ [62453].param2 = 1, ++ [62495].file = "drivers/block/floppy.c", ++ [62495].name = "fallback_on_nodma_alloc", ++ [62495].param2 = 1, ++ [62498].file = "fs/xattr.c", ++ [62498].name = "sys_listxattr", ++ [62498].param3 = 1, ++ [62583].file = "drivers/net/wireless/mwifiex/debugfs.c", ++ [62583].name = "mwifiex_regrdwr_write", ++ [62583].param3 = 1, ++ [625].file = "fs/read_write.c", ++ [625].name = "sys_pwritev", ++ [625].param3 = 1, ++ [62669].file = "drivers/platform/x86/asus_acpi.c", ++ [62669].name = "tled_proc_write", ++ [62669].param3 = 1, ++ [62714].file = "security/keys/keyctl.c", ++ [62714].name = "keyctl_update_key", ++ [62714].param3 = 1, ++ [62799].file = "fs/proc/task_mmu.c", ++ [62799].name = "pagemap_read", ++ [62799].param3 = 1, ++ [62811].file = "drivers/usb/misc/legousbtower.c", ++ [62811].name = "tower_read", ++ [62811].param3 = 1, ++ [62851].file = "fs/proc/vmcore.c", ++ [62851].name = "read_vmcore", ++ [62851].param3 = 1, ++ [62925].file = "include/rdma/ib_verbs.h", ++ [62925].name = "ib_copy_from_udata", ++ [62925].param3 = 1, ++ [62967].file = "security/keys/encrypted-keys/encrypted.c", ++ [62967].name = "encrypted_update", ++ [62967].param3 = 1, ++ [62970].file = "net/sched/sch_api.c", ++ [62970].name = "qdisc_class_hash_alloc", ++ [62970].param1 = 1, ++ [62999].file = "net/core/neighbour.c", ++ [62999].name = "neigh_hash_alloc", ++ [62999].param1 = 1, ++ [63004].file = "drivers/usb/storage/datafab.c", ++ [63004].name = "datafab_write_data", ++ [63004].param4 = 1, ++ [63007].file = "fs/proc/base.c", ++ [63007].name = "proc_coredump_filter_write", ++ [63007].param3 = 1, ++ [63010].file = "drivers/gpu/drm/ttm/ttm_page_alloc.c", ++ [63010].name = "ttm_page_pool_free", ++ [63010].param2 = 1, ++ [63076].file = "fs/cifs/xattr.c", ++ [63076].name = "cifs_setxattr", ++ [63076].param4 = 1, ++ [63091].file = "drivers/net/usb/pegasus.c", ++ [63091].name = "get_registers", ++ [63091].param3 = 1, ++ [63169].file = "drivers/scsi/sg.c", ++ [63169].name = "sg_read", ++ [63169].param3 = 1, ++ [6331].file = "drivers/atm/solos-pci.c", ++ [6331].name = "solos_param_store", ++ [6331].param4 = 1, ++ [63367].file = "net/netfilter/ipset/ip_set_core.c", ++ [63367].name = "ip_set_alloc", ++ [63367].param1 = 1, ++ [63473].file = "drivers/staging/pohmelfs/trans.c", ++ [63473].name = "netfs_trans_alloc", ++ [63473].param2 = 1, ++ [63473].param4 = 1, ++ [63489].file = "drivers/bluetooth/btmrvl_debugfs.c", ++ [63489].name = "btmrvl_hscfgcmd_write", ++ [63489].param3 = 1, ++ [63490].file = "crypto/shash.c", ++ [63490].name = "shash_compat_setkey", ++ [63490].param3 = 1, ++ [63583].file = "drivers/char/mem.c", ++ [63583].name = "write_kmem", ++ [63583].param3 = 1, ++ [63605].file = "mm/mempool.c", ++ [63605].name = "mempool_kmalloc", ++ [63605].param2 = 1, ++ [63717].file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c", ++ [63717].name = "iwl_dbgfs_csr_write", ++ [63717].param3 = 1, ++ [63748].file = "drivers/staging/crystalhd/crystalhd_misc.c", ++ [63748].name = "crystalhd_map_dio", ++ [63748].param3 = 1, ++ [63765].file = "fs/seq_file.c", ++ [63765].name = "seq_read", ++ [63765].param3 = 1, ++ [63777].file = "drivers/virtio/virtio_ring.c", ++ [63777].name = "virtqueue_add_buf_gfp", ++ [63777].param3 = 1, ++ [63777].param4 = 1, ++ [63961].file = "fs/xattr.c", ++ [63961].name = "sys_flistxattr", ++ [63961].param3 = 1, ++ [63988].file = "drivers/input/evdev.c", ++ [63988].name = "evdev_ioctl_compat", ++ [63988].param2 = 1, ++ [64118].file = "fs/namei.c", ++ [64118].name = "sys_symlinkat", ++ [64118].param1 = 1, ++ [64156].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c", ++ [64156].name = "ath6kl_mgmt_tx", ++ [64156].param9 = 1, ++ [64227].file = "mm/nobootmem.c", ++ [64227].name = "__alloc_bootmem_node_nopanic", ++ [64227].param2 = 1, ++ [64312].file = "drivers/video/hecubafb.c", ++ [64312].name = "hecubafb_write", ++ [64312].param3 = 1, ++ [64351].file = "kernel/kfifo.c", ++ [64351].name = "kfifo_copy_from_user", ++ [64351].param3 = 1, ++ [64392].file = "drivers/mmc/core/mmc_ops.c", ++ [64392].name = "mmc_send_cxd_data", ++ [64392].param5 = 1, ++ [64471].file = "drivers/bluetooth/btmrvl_debugfs.c", ++ [64471].name = "btmrvl_hscmd_write", ++ [64471].param3 = 1, ++ [64667].file = "sound/core/oss/pcm_oss.c", ++ [64667].name = "snd_pcm_oss_read", ++ [64667].param3 = 1, ++ [64689].file = "sound/isa/gus/gus_dram.c", ++ [64689].name = "snd_gus_dram_read", ++ [64689].param4 = 1, ++ [64692].file = "fs/binfmt_misc.c", ++ [64692].name = "bm_entry_write", ++ [64692].param3 = 1, ++ [64705].file = "drivers/staging/iio/accel/sca3000_ring.c", ++ [64705].name = "sca3000_read_first_n_hw_rb", ++ [64705].param2 = 1, ++ [64743].file = "fs/ocfs2/dlmfs/dlmfs.c", ++ [64743].name = "dlmfs_file_read", ++ [64743].param3 = 1, ++ [6477].file = "net/bluetooth/mgmt.c", ++ [6477].name = "mgmt_pending_add", ++ [6477].param5 = 1, ++ [64898].file = "drivers/media/video/videobuf-dma-sg.c", ++ [64898].name = "videobuf_dma_init_user", ++ [64898].param3 = 1, ++ [64898].param4 = 1, ++ [64906].file = "drivers/net/wireless/b43legacy/debugfs.c", ++ [64906].name = "b43legacy_debugfs_write", ++ [64906].param3 = 1, ++ [64961].file = "drivers/spi/spidev.c", ++ [64961].name = "spidev_ioctl", ++ [64961].param2 = 1, ++ [65033].file = "crypto/shash.c", ++ [65033].name = "shash_async_setkey", ++ [65033].param3 = 1, ++ [65093].file = "security/integrity/evm/evm_secfs.c", ++ [65093].name = "evm_write_key", ++ [65093].param3 = 1, ++ [65098].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [65098].name = "iwl_dbgfs_traffic_log_write", ++ [65098].param3 = 1, ++ [6514].file = "mm/nobootmem.c", ++ [6514].name = "__alloc_bootmem_low", ++ [6514].param1 = 1, ++ [65169].file = "net/core/skbuff.c", ++ [65169].name = "dev_alloc_skb", ++ [65169].param1 = 1, ++ [6517].file = "drivers/md/dm-table.c", ++ [6517].name = "alloc_targets", ++ [6517].param2 = 1, ++ [65195].file = "fs/jffs2/xattr.c", ++ [65195].name = "do_jffs2_setxattr", ++ [65195].param5 = 1, ++ [65237].file = "kernel/profile.c", ++ [65237].name = "read_profile", ++ [65237].param3 = 1, ++ [65345].file = "lib/xz/xz_dec_lzma2.c", ++ [65345].name = "xz_dec_lzma2_create", ++ [65345].param2 = 1, ++ [65364].file = "sound/core/pcm_lib.c", ++ [65364].name = "snd_pcm_lib_read_transfer", ++ [65364].param5 = 1, ++ [65409].file = "net/802/garp.c", ++ [65409].name = "garp_request_join", ++ [65409].param4 = 1, ++ [65432].file = "drivers/hid/hid-roccat-kone.c", ++ [65432].name = "kone_receive", ++ [65432].param4 = 1, ++ [65452].file = "drivers/message/fusion/mptctl.c", ++ [65452].name = "mptctl_ioctl", ++ [65452].param2 = 1, ++ [65514].file = "drivers/media/video/gspca/t613.c", ++ [65514].name = "reg_w_ixbuf", ++ [65514].param4 = 1, ++ [6551].file = "drivers/usb/host/xhci-mem.c", ++ [6551].name = "xhci_alloc_stream_info", ++ [6551].param3 = 1, ++ [65535].file = "drivers/media/dvb/dvb-usb/opera1.c", ++ [65535].name = "opera1_xilinx_rw", ++ [65535].param5 = 1, ++ [6657].file = "drivers/hid/hid-roccat-kone.c", ++ [6657].name = "kone_send", ++ [6657].param4 = 1, ++ [6672].file = "drivers/net/wireless/b43/debugfs.c", ++ [6672].name = "b43_debugfs_write", ++ [6672].param3 = 1, ++ [6691].file = "drivers/acpi/proc.c", ++ [6691].name = "acpi_system_write_wakeup_device", ++ [6691].param3 = 1, ++ [6772].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c", ++ [6772].name = "iwl_dbgfs_force_reset_write", ++ [6772].param3 = 1, ++ [6780].file = "sound/core/info.c", ++ [6780].name = "snd_info_entry_read", ++ [6780].param3 = 1, ++ [6800].file = "drivers/net/wireless/iwlegacy/iwl-debugfs.c", ++ [6800].name = "iwl_legacy_dbgfs_missed_beacon_write", ++ [6800].param3 = 1, ++ [680].file = "drivers/misc/ibmasm/ibmasmfs.c", ++ [680].name = "command_file_read", ++ [680].param3 = 1, ++ [6865].file = "drivers/staging/iio/ring_sw.c", ++ [6865].name = "iio_read_first_n_sw_rb", ++ [6865].param2 = 1, ++ [6867].file = "fs/coda/psdev.c", ++ [6867].name = "coda_psdev_read", ++ [6867].param3 = 1, ++ [6891].file = "drivers/bluetooth/btmrvl_debugfs.c", ++ [6891].name = "btmrvl_gpiogap_write", ++ [6891].param3 = 1, ++ [6944].file = "drivers/ide/ide-proc.c", ++ [6944].name = "ide_settings_proc_write", ++ [6944].param3 = 1, ++ [6950].file = "drivers/isdn/capi/capi.c", ++ [6950].name = "capi_write", ++ [6950].param3 = 1, ++ [697].file = "sound/isa/gus/gus_dram.c", ++ [697].name = "snd_gus_dram_peek", ++ [697].param4 = 1, ++ [7066].file = "security/keys/keyctl.c", ++ [7066].name = "keyctl_instantiate_key_common", ++ [7066].param4 = 1, ++ [7129].file = "mm/maccess.c", ++ [7129].name = "__probe_kernel_read", ++ [7129].param3 = 1, ++ [720].file = "sound/pci/rme9652/hdsp.c", ++ [720].name = "snd_hdsp_playback_copy", ++ [720].param5 = 1, ++ [7411].file = "drivers/vhost/vhost.c", ++ [7411].name = "__vhost_add_used_n", ++ [7411].param3 = 1, ++ [7488].file = "security/keys/user_defined.c", ++ [7488].name = "user_read", ++ [7488].param3 = 1, ++ [7551].file = "drivers/input/touchscreen/ad7879-spi.c", ++ [7551].name = "ad7879_spi_xfer", ++ [7551].param3 = 1, ++ [7676].file = "drivers/acpi/custom_method.c", ++ [7676].name = "cm_write", ++ [7676].param3 = 1, ++ [7832].file = "drivers/net/wireless/ath/ath5k/debug.c", ++ [7832].name = "write_file_antenna", ++ [7832].param3 = 1, ++ [7843].file = "fs/compat.c", ++ [7843].name = "compat_sys_readv", ++ [7843].param3 = 1, ++ [7958].file = "drivers/gpu/vga/vgaarb.c", ++ [7958].name = "vga_arb_write", ++ [7958].param3 = 1, ++ [7976].file = "drivers/usb/gadget/rndis.c", ++ [7976].name = "rndis_add_response", ++ [7976].param2 = 1, ++ [8014].file = "net/netfilter/ipset/ip_set_list_set.c", ++ [8014].name = "init_list_set", ++ [8014].param2 = 1, ++ [8014].param3 = 1, ++ [8087].file = "drivers/video/via/viafbdev.c", ++ [8087].name = "viafb_iga1_odev_proc_write", ++ [8087].param3 = 1, ++ [8126].file = "sound/soc/soc-core.c", ++ [8126].name = "codec_reg_read_file", ++ [8126].param3 = 1, ++ [8185].file = "drivers/net/wireless/ath/ath6kl/debug.c", ++ [8185].name = "ath6kl_regwrite_write", ++ [8185].param3 = 1, ++ [8317].file = "security/smack/smackfs.c", ++ [8317].name = "smk_write_ambient", ++ [8317].param3 = 1, ++ [8334].file = "drivers/scsi/sg.c", ++ [8334].name = "sg_proc_write_adio", ++ [8334].param3 = 1, ++ [8481].file = "drivers/isdn/i4l/isdn_common.c", ++ [8481].name = "isdn_write", ++ [8481].param3 = 1, ++ [8536].file = "fs/cifs/dns_resolve.c", ++ [8536].name = "dns_resolve_server_name_to_ip", ++ [8536].param1 = 1, ++ [8650].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c", ++ [8650].name = "vmw_kms_present", ++ [8650].param9 = 1, ++ [865].file = "drivers/base/regmap/regmap-debugfs.c", ++ [865].name = "regmap_access_read_file", ++ [865].param3 = 1, ++ [8663].file = "net/bridge/netfilter/ebtables.c", ++ [8663].name = "do_update_counters", ++ [8663].param4 = 1, ++ [8684].file = "fs/read_write.c", ++ [8684].name = "sys_writev", ++ [8684].param3 = 1, ++ [8699].file = "security/selinux/selinuxfs.c", ++ [8699].name = "sel_commit_bools_write", ++ [8699].param3 = 1, ++ [8714].file = "lib/kstrtox.c", ++ [8714].name = "kstrtou16_from_user", ++ [8714].param2 = 1, ++ [8764].file = "drivers/usb/core/devio.c", ++ [8764].name = "usbdev_read", ++ [8764].param3 = 1, ++ [8802].file = "fs/dlm/user.c", ++ [8802].name = "device_write", ++ [8802].param3 = 1, ++ [8810].file = "net/mac80211/debugfs_sta.c", ++ [8810].name = "sta_agg_status_write", ++ [8810].param3 = 1, ++ [8815].file = "security/tomoyo/securityfs_if.c", ++ [8815].name = "tomoyo_write_self", ++ [8815].param3 = 1, ++ [8821].file = "net/wireless/sme.c", ++ [8821].name = "cfg80211_roamed", ++ [8821].param5 = 1, ++ [8821].param7 = 1, ++ [8833].file = "security/selinux/ss/services.c", ++ [8833].name = "security_context_to_sid", ++ [8833].param2 = 1, ++ [8851].file = "net/key/af_key.c", ++ [8851].name = "pfkey_sendmsg", ++ [8851].param4 = 1, ++ [8917].file = "net/ipv4/raw.c", ++ [8917].name = "raw_setsockopt", ++ [8917].param5 = 1, ++ [8983].file = "include/linux/skbuff.h", ++ [8983].name = "alloc_skb", ++ [8983].param1 = 1, ++ [9226].file = "mm/migrate.c", ++ [9226].name = "sys_move_pages", ++ [9226].param2 = 1, ++ [9341].file = "drivers/acpi/apei/erst-dbg.c", ++ [9341].name = "erst_dbg_write", ++ [9341].param3 = 1, ++ [9463].file = "drivers/infiniband/hw/ipath/ipath_verbs.c", ++ [9463].name = "ipath_verbs_send", ++ [9463].param3 = 1, ++ [9463].param5 = 1, ++ [9546].file = "drivers/video/fbmem.c", ++ [9546].name = "fb_write", ++ [9546].param3 = 1, ++ [9601].file = "kernel/kfifo.c", ++ [9601].name = "__kfifo_from_user", ++ [9601].param3 = 1, ++ [9618].file = "security/selinux/selinuxfs.c", ++ [9618].name = "sel_write_bool", ++ [9618].param3 = 1, ++ [9768].file = "drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c", ++ [9768].name = "vmw_execbuf_process", ++ [9768].param5 = 1, ++ [9828].file = "drivers/media/dvb/dvb-core/dmxdev.c", ++ [9828].name = "dvb_demux_do_ioctl", ++ [9828].param3 = 1, ++ [9870].file = "net/atm/addr.c", ++ [9870].name = "atm_get_addr", ++ [9870].param3 = 1, ++ [9962].file = "drivers/scsi/sg.c", ++ [9962].name = "sg_proc_write_dressz", ++ [9962].param3 = 1, ++ [9977].file = "drivers/net/wireless/zd1211rw/zd_usb.c", ++ [9977].name = "zd_usb_iowrite16v_async", ++ [9977].param3 = 1, ++ [16344].collision = 1, ++ [30494].collision = 1, ++ [31291].collision = 1, ++ [33040].collision = 1, ++ [38314].collision = 1, ++ [54338].collision = 1, ++ [60651].collision = 1, ++}; +diff --git a/tools/gcc/size_overflow_hash2.h b/tools/gcc/size_overflow_hash2.h +new file mode 100644 +index 0000000..8ed7d96 +--- /dev/null ++++ b/tools/gcc/size_overflow_hash2.h +@@ -0,0 +1,44 @@ ++struct size_overflow_hash size_overflow_hash2[65536] = { ++ [2118].file = "fs/ntfs/malloc.h", ++ [2118].name = "ntfs_malloc_nofs", ++ [2118].param1 = 1, ++ [22224].file = "fs/proc/vmcore.c", ++ [22224].name = "read_from_oldmem", ++ [22224].param2 = 1, ++ [26518].file = "drivers/gpu/vga/vgaarb.c", ++ [26518].name = "vga_arb_read", ++ [26518].param3 = 1, ++ [26569].file = "lib/kstrtox.c", ++ [26569].name = "kstrtoint_from_user", ++ [26569].param2 = 1, ++ [30632].file = "drivers/ide/ide-proc.c", ++ [30632].name = "ide_driver_proc_write", ++ [30632].param3 = 1, ++ [36150].file = "net/ceph/buffer.c", ++ [36150].name = "ceph_buffer_new", ++ [36150].param1 = 1, ++ [39024].file = "lib/scatterlist.c", ++ [39024].name = "sg_kmalloc", ++ [39024].param1 = 1, ++ [39105].file = "drivers/gpu/drm/ttm/ttm_tt.c", ++ [39105].name = "ttm_tt_create", ++ [39105].param2 = 1, ++ [43208].file = "fs/nfs/read.c", ++ [43208].name = "nfs_readdata_alloc", ++ [43208].param1 = 1, ++ [46911].file = "drivers/media/video/ivtv/ivtv-fileops.c", ++ [46911].name = "ivtv_v4l2_read", ++ [46911].param3 = 1, ++ [50359].file = "kernel/sched.c", ++ [50359].name = "alloc_sched_domains", ++ [50359].param1 = 1, ++ [52857].file = "sound/pci/rme9652/rme9652.c", ++ [52857].name = "snd_rme9652_capture_copy", ++ [52857].param5 = 1, ++ [57500].file = "drivers/spi/spidev.c", ++ [57500].name = "spidev_write", ++ [57500].param3 = 1, ++ [65149].file = "fs/nilfs2/ioctl.c", ++ [65149].name = "nilfs_ioctl_wrap_copy", ++ [65149].param4 = 1, ++}; +diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c +new file mode 100644 +index 0000000..a9ae886 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin.c +@@ -0,0 +1,1042 @@ ++/* ++ * Copyright 2011, 2012 by Emese Revfy re.emese@gmail.com ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against INT_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o size_overflow_plugin.so size_overflow_plugin.c ++ * $ gcc -fplugin=size_overflow_plugin.so test.c -O2 ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "intl.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "toplev.h" ++#include "function.h" ++#include "tree-flow.h" ++#include "plugin.h" ++#include "gimple.h" ++#include "c-common.h" ++#include "diagnostic.h" ++ ++struct size_overflow_hash { ++ const char *name; ++ const char *file; ++ unsigned short collision:1; ++ unsigned short param1:1; ++ unsigned short param2:1; ++ unsigned short param3:1; ++ unsigned short param4:1; ++ unsigned short param5:1; ++ unsigned short param6:1; ++ unsigned short param7:1; ++ unsigned short param8:1; ++ unsigned short param9:1; ++}; ++ ++#include "size_overflow_hash1.h" ++#include "size_overflow_hash2.h" ++ ++#define __unused __attribute__((__unused__)) ++#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node)) ++#define BEFORE_STMT true ++#define AFTER_STMT false ++#define CREATE_NEW_VAR NULL_TREE ++ ++int plugin_is_GPL_compatible; ++void debug_gimple_stmt (gimple gs); ++ ++static tree expand(struct pointer_set_t *visited, tree var); ++static tree signed_size_overflow_type; ++static tree unsigned_size_overflow_type; ++static tree report_size_overflow_decl; ++static tree const_char_ptr_type_node; ++static unsigned int handle_function(void); ++ ++static struct plugin_info size_overflow_plugin_info = { ++ .version = "20120311beta", ++ .help = "no-size_overflow\tturn off size overflow checking\n", ++}; ++ ++static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs) ++{ ++ unsigned int arg_count = type_num_arguments(*node); ++ ++ for (; args; args = TREE_CHAIN(args)) { ++ tree position = TREE_VALUE(args); ++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) { ++ error("handle_size_overflow_attribute: overflow parameter outside range."); ++ *no_add_attrs = true; ++ } ++ } ++ return NULL_TREE; ++} ++ ++static struct attribute_spec no_size_overflow_attr = { ++ .name = "size_overflow", ++ .min_length = 1, ++ .max_length = -1, ++ .decl_required = false, ++ .type_required = true, ++ .function_type_required = true, ++ .handler = handle_size_overflow_attribute ++}; ++ ++static void register_attributes(void __unused *event_data, void __unused *data) ++{ ++ register_attribute(&no_size_overflow_attr); ++} ++ ++// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html ++static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed) ++{ ++#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); } ++#define cwmixa( in ) { cwfold( in, m, k, h ); } ++#define cwmixb( in ) { cwfold( in, n, h, k ); } ++ ++ const unsigned int m = 0x57559429; ++ const unsigned int n = 0x5052acdb; ++ const unsigned int *key4 = (const unsigned int *)key; ++ unsigned int h = len; ++ unsigned int k = len + seed + n; ++ unsigned long long p; ++ ++ while (len >= 8) { ++ cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2; ++ len -= 8; ++ } ++ if (len >= 4) { ++ cwmixb(key4[0]) key4 += 1; ++ len -= 4; ++ } ++ if (len) ++ cwmixa(key4[0] & ((1 << (len * 8)) - 1 )); ++ cwmixb(h ^ (k + n)); ++ return k ^ h; ++ ++#undef cwfold ++#undef cwmixa ++#undef cwmixb ++} ++ ++static inline unsigned int size_overflow_hash(const char *fndecl, unsigned int seed) ++{ ++ return CrapWow(fndecl, strlen(fndecl), seed) & 0xffff; ++} ++ ++static inline tree get_original_function_decl(tree fndecl) ++{ ++ if (DECL_ABSTRACT_ORIGIN(fndecl)) ++ return DECL_ABSTRACT_ORIGIN(fndecl); ++ return fndecl; ++} ++ ++static inline gimple get_def_stmt(tree node) ++{ ++ gcc_assert(TREE_CODE(node) == SSA_NAME); ++ return SSA_NAME_DEF_STMT(node); ++} ++ ++static struct size_overflow_hash *get_function_hash(tree fndecl) ++{ ++ unsigned int hash; ++ const char *func = NAME(fndecl); ++ ++ hash = size_overflow_hash(func, 0); ++ ++ if (size_overflow_hash1[hash].collision) { ++ hash = size_overflow_hash(func, 23432); ++ return &size_overflow_hash2[hash]; ++ } ++ return &size_overflow_hash1[hash]; ++} ++ ++static void check_missing_attribute(tree arg) ++{ ++ tree var, func = get_original_function_decl(current_function_decl); ++ const char *curfunc = NAME(func); ++ unsigned int new_hash, argnum = 1; ++ struct size_overflow_hash *hash; ++ location_t loc; ++ expanded_location xloc; ++ bool match = false; ++ ++ loc = DECL_SOURCE_LOCATION(func); ++ xloc = expand_location(loc); ++ ++ if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func)))) ++ return; ++ ++ hash = get_function_hash(func); ++ if (hash->name && !strcmp(hash->name, NAME(func)) && !strcmp(hash->file, xloc.file)) ++ return; ++ ++ gcc_assert(TREE_CODE(arg) != COMPONENT_REF); ++ ++ if (TREE_CODE(arg) == SSA_NAME) ++ arg = SSA_NAME_VAR(arg); ++ ++ for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) { ++ if (strcmp(NAME(arg), NAME(var))) { ++ argnum++; ++ continue; ++ } ++ match = true; ++ if (!TYPE_UNSIGNED(TREE_TYPE(var))) ++ return; ++ break; ++ } ++ if (!match) { ++ warning(0, "check_missing_attribute: cannot find the %s argument in %s", NAME(arg), NAME(func)); ++ return; ++ } ++ ++#define check_param(num) \ ++ if (num == argnum && hash->param##num) \ ++ return; ++ check_param(1); ++ check_param(2); ++ check_param(3); ++ check_param(4); ++ check_param(5); ++ check_param(6); ++ check_param(7); ++ check_param(8); ++ check_param(9); ++#undef check_param ++ ++ new_hash = size_overflow_hash(curfunc, 0); ++ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+%s", curfunc, curfunc, argnum, new_hash, xloc.file); ++} ++ ++static tree create_new_var(tree type) ++{ ++ tree new_var = create_tmp_var(type, "cicus"); ++ ++ add_referenced_var(new_var); ++ mark_sym_for_renaming(new_var); ++ return new_var; ++} ++ ++static bool is_bool(tree node) ++{ ++ tree type; ++ ++ if (node == NULL_TREE) ++ return false; ++ ++ type = TREE_TYPE(node); ++ if (!INTEGRAL_TYPE_P(type)) ++ return false; ++ if (TREE_CODE(type) == BOOLEAN_TYPE) ++ return true; ++ if (TYPE_PRECISION(type) == 1) ++ return true; ++ return false; ++} ++ ++static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc) ++{ ++ gimple assign; ++ ++ if (new_var == CREATE_NEW_VAR) ++ new_var = create_new_var(type); ++ ++ assign = gimple_build_assign(new_var, fold_convert(type, var)); ++ gimple_set_location(assign, loc); ++ gimple_set_lhs(assign, make_ssa_name(new_var, assign)); ++ ++ return assign; ++} ++ ++static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before) ++{ ++ tree oldstmt_rhs1; ++ enum tree_code code; ++ gimple stmt; ++ gimple_stmt_iterator gsi; ++ ++ if (is_bool(rhs1)) { ++ pointer_set_insert(visited, oldstmt); ++ return gimple_get_lhs(oldstmt); ++ } ++ ++ if (rhs1 == NULL_TREE) { ++ debug_gimple_stmt(oldstmt); ++ error("create_assign: rhs1 is NULL_TREE"); ++ gcc_unreachable(); ++ } ++ ++ oldstmt_rhs1 = gimple_assign_rhs1(oldstmt); ++ code = TREE_CODE(oldstmt_rhs1); ++ if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP)) ++ check_missing_attribute(oldstmt_rhs1); ++ ++ stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt)); ++ gsi = gsi_for_stmt(oldstmt); ++ if (before) ++ gsi_insert_before(&gsi, stmt, GSI_NEW_STMT); ++ else ++ gsi_insert_after(&gsi, stmt, GSI_NEW_STMT); ++ update_stmt(stmt); ++ pointer_set_insert(visited, oldstmt); ++ return gimple_get_lhs(stmt); ++} ++ ++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3) ++{ ++ tree new_var, lhs = gimple_get_lhs(oldstmt); ++ gimple stmt; ++ gimple_stmt_iterator gsi; ++ ++ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) { ++ rhs1 = gimple_assign_rhs1(oldstmt); ++ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT); ++ } ++ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) { ++ rhs2 = gimple_assign_rhs2(oldstmt); ++ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT); ++ } ++ ++ stmt = gimple_copy(oldstmt); ++ gimple_set_location(stmt, gimple_location(oldstmt)); ++ ++ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR) ++ gimple_assign_set_rhs_code(stmt, MULT_EXPR); ++ ++ if (is_bool(lhs)) ++ new_var = SSA_NAME_VAR(lhs); ++ else ++ new_var = create_new_var(signed_size_overflow_type); ++ new_var = make_ssa_name(new_var, stmt); ++ gimple_set_lhs(stmt, new_var); ++ ++ if (rhs1 != NULL_TREE) { ++ if (!gimple_assign_cast_p(oldstmt)) ++ rhs1 = fold_convert(signed_size_overflow_type, rhs1); ++ gimple_assign_set_rhs1(stmt, rhs1); ++ } ++ ++ if (rhs2 != NULL_TREE) ++ gimple_assign_set_rhs2(stmt, rhs2); ++#if BUILDING_GCC_VERSION >= 4007 ++ if (rhs3 != NULL_TREE) ++ gimple_assign_set_rhs3(stmt, rhs3); ++#endif ++ gimple_set_vuse(stmt, gimple_vuse(oldstmt)); ++ gimple_set_vdef(stmt, gimple_vdef(oldstmt)); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT); ++ update_stmt(stmt); ++ pointer_set_insert(visited, oldstmt); ++ return gimple_get_lhs(stmt); ++} ++ ++static gimple overflow_create_phi_node(gimple oldstmt, tree var) ++{ ++ basic_block bb; ++ gimple phi; ++ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt); ++ ++ bb = gsi_bb(gsi); ++ phi = make_phi_node(var, EDGE_COUNT(bb->preds)); ++ ++ gsi_insert_after(&gsi, phi, GSI_NEW_STMT); ++ gimple_set_bb(phi, bb); ++ return phi; ++} ++ ++static tree signed_cast_constant(tree node) ++{ ++ gcc_assert(is_gimple_constant(node)); ++ ++ if (TYPE_PRECISION(signed_size_overflow_type) == TYPE_PRECISION(TREE_TYPE(node))) ++ return build_int_cst_wide(signed_size_overflow_type, TREE_INT_CST_LOW(node), TREE_INT_CST_HIGH(node)); ++ else ++ return build_int_cst(signed_size_overflow_type, int_cst_value(node)); ++} ++ ++static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var) ++{ ++ basic_block first_bb; ++ gimple newstmt; ++ gimple_stmt_iterator gsi; ++ ++ newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt)); ++ ++ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR); ++ gsi = gsi_start_bb(first_bb); ++ ++ gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT); ++ return newstmt; ++} ++ ++static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs) ++{ ++ gimple newstmt; ++ gimple_stmt_iterator gsi; ++ void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update); ++ gimple def_newstmt = get_def_stmt(new_rhs); ++ ++ gsi_insert = gsi_insert_after; ++ gsi = gsi_for_stmt(def_newstmt); ++ ++ switch (gimple_code(get_def_stmt(arg))) { ++ case GIMPLE_PHI: ++ newstmt = gimple_build_assign(new_var, new_rhs); ++ gsi = gsi_after_labels(gimple_bb(def_newstmt)); ++ gsi_insert = gsi_insert_before; ++ break; ++ case GIMPLE_ASM: ++ case GIMPLE_CALL: ++ newstmt = gimple_build_assign(new_var, new_rhs); ++ break; ++ case GIMPLE_ASSIGN: ++ newstmt = gimple_copy(def_newstmt); ++ break; ++ default: ++ /* unknown gimple_code (build_new_phi_arg) */ ++ gcc_unreachable(); ++ } ++ ++ gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt)); ++ gsi_insert(&gsi, newstmt, GSI_NEW_STMT); ++ return newstmt; ++} ++ ++static tree build_new_phi_arg(struct pointer_set_t *visited, gimple oldstmt, tree arg, tree new_var) ++{ ++ gimple newstmt; ++ tree new_rhs; ++ ++ if (is_gimple_constant(arg)) ++ return signed_cast_constant(arg); ++ ++ pointer_set_insert(visited, oldstmt); ++ new_rhs = expand(visited, arg); ++ if (new_rhs == NULL_TREE) { ++ gcc_assert(TREE_CODE(TREE_TYPE(arg)) != VOID_TYPE); ++ newstmt = cast_old_phi_arg(oldstmt, arg, new_var); ++ } else ++ newstmt = handle_new_phi_arg(arg, new_var, new_rhs); ++ update_stmt(newstmt); ++ return gimple_get_lhs(newstmt); ++} ++ ++static tree build_new_phi(struct pointer_set_t *visited, gimple oldstmt) ++{ ++ gimple phi; ++ tree new_var = create_new_var(signed_size_overflow_type); ++ unsigned int i, n = gimple_phi_num_args(oldstmt); ++ ++ phi = overflow_create_phi_node(oldstmt, new_var); ++ ++ for (i = 0; i < n; i++) { ++ tree arg, lhs; ++ ++ arg = gimple_phi_arg_def(oldstmt, i); ++ lhs = build_new_phi_arg(visited, oldstmt, arg, new_var); ++ add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt)); ++ } ++ update_stmt(phi); ++ return gimple_phi_result(phi); ++} ++ ++static tree handle_unary_ops(struct pointer_set_t *visited, tree var) ++{ ++ gimple def_stmt = get_def_stmt(var); ++ tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt); ++ ++ if (is_gimple_constant(rhs1)) ++ return dup_assign(visited, def_stmt, signed_cast_constant(rhs1), NULL_TREE, NULL_TREE); ++ ++ switch (TREE_CODE(rhs1)) { ++ case SSA_NAME: ++ new_rhs1 = expand(visited, rhs1); ++ break; ++ case ARRAY_REF: ++ case ADDR_EXPR: ++ case COMPONENT_REF: ++ case COND_EXPR: ++ case INDIRECT_REF: ++#if BUILDING_GCC_VERSION >= 4006 ++ case MEM_REF: ++#endif ++ case PARM_DECL: ++ case TARGET_MEM_REF: ++ case VAR_DECL: ++ return create_assign(visited, def_stmt, var, AFTER_STMT); ++ default: ++ debug_gimple_stmt(def_stmt); ++ debug_tree(rhs1); ++ gcc_unreachable(); ++ } ++ ++ if (new_rhs1 == NULL_TREE) ++ return create_assign(visited, def_stmt, rhs1, AFTER_STMT); ++ return dup_assign(visited, def_stmt, new_rhs1, NULL_TREE, NULL_TREE); ++} ++ ++static tree transform_mult_overflow(tree rhs, tree const_rhs, tree log2const_rhs, location_t loc) ++{ ++ tree new_def_rhs; ++ ++ if (!is_gimple_constant(rhs)) ++ return NULL_TREE; ++ ++ new_def_rhs = fold_build2_loc(loc, MULT_EXPR, TREE_TYPE(const_rhs), rhs, const_rhs); ++ new_def_rhs = signed_cast_constant(new_def_rhs); ++ if (int_cst_value(new_def_rhs) >= 0) ++ return NULL_TREE; ++ return fold_build2_loc(loc, RSHIFT_EXPR, TREE_TYPE(new_def_rhs), new_def_rhs, log2const_rhs); ++} ++ ++static tree handle_intentional_mult_overflow(struct pointer_set_t *visited, tree rhs, tree const_rhs) ++{ ++ gimple new_def_stmt, def_stmt; ++ tree def_rhs1, def_rhs2, new_def_rhs; ++ location_t loc; ++ tree log2const_rhs; ++ int log2 = exact_log2(TREE_INT_CST_LOW(const_rhs)); ++ ++ if (log2 == -1) { ++// warning(0, "Possibly unhandled intentional integer truncation"); ++ return NULL_TREE; ++ } ++ ++ def_stmt = get_def_stmt(rhs); ++ loc = gimple_location(def_stmt); ++ def_rhs1 = gimple_assign_rhs1(def_stmt); ++ def_rhs2 = gimple_assign_rhs2(def_stmt); ++ new_def_stmt = get_def_stmt(expand(visited, rhs)); ++ log2const_rhs = build_int_cstu(TREE_TYPE(const_rhs), log2); ++ ++ new_def_rhs = transform_mult_overflow(def_rhs1, const_rhs, log2const_rhs, loc); ++ if (new_def_rhs != NULL_TREE) { ++ gimple_assign_set_rhs1(new_def_stmt, new_def_rhs); ++ } else { ++ new_def_rhs = transform_mult_overflow(def_rhs2, const_rhs, log2const_rhs, loc); ++ if (new_def_rhs != NULL_TREE) ++ gimple_assign_set_rhs2(new_def_stmt, new_def_rhs); ++ } ++ if (new_def_rhs == NULL_TREE) ++ return NULL_TREE; ++ ++ update_stmt(new_def_stmt); ++// warning(0, "Handle integer truncation (gcc optimization)"); ++ return gimple_get_lhs(new_def_stmt); ++} ++ ++static bool is_mult_overflow(gimple def_stmt, tree rhs1) ++{ ++ gimple rhs1_def_stmt = get_def_stmt(rhs1); ++ ++ if (gimple_assign_rhs_code(def_stmt) != MULT_EXPR) ++ return false; ++ if (gimple_code(rhs1_def_stmt) != GIMPLE_ASSIGN) ++ return false; ++ if (gimple_assign_rhs_code(rhs1_def_stmt) != PLUS_EXPR) ++ return false; ++ return true; ++} ++ ++static tree handle_intentional_overflow(struct pointer_set_t *visited, gimple def_stmt, tree rhs1, tree rhs2) ++{ ++ if (is_mult_overflow(def_stmt, rhs1)) ++ return handle_intentional_mult_overflow(visited, rhs1, rhs2); ++ return NULL_TREE; ++} ++ ++static tree handle_binary_ops(struct pointer_set_t *visited, tree var) ++{ ++ tree rhs1, rhs2; ++ gimple def_stmt = get_def_stmt(var); ++ tree new_rhs1 = NULL_TREE; ++ tree new_rhs2 = NULL_TREE; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ /* no DImode/TImode division in the 32/64 bit kernel */ ++ switch (gimple_assign_rhs_code(def_stmt)) { ++ case RDIV_EXPR: ++ case TRUNC_DIV_EXPR: ++ case CEIL_DIV_EXPR: ++ case FLOOR_DIV_EXPR: ++ case ROUND_DIV_EXPR: ++ case TRUNC_MOD_EXPR: ++ case CEIL_MOD_EXPR: ++ case FLOOR_MOD_EXPR: ++ case ROUND_MOD_EXPR: ++ case EXACT_DIV_EXPR: ++ case POINTER_PLUS_EXPR: ++ /* logical AND cannot cause an overflow */ ++ case BIT_AND_EXPR: ++ return create_assign(visited, def_stmt, var, AFTER_STMT); ++ default: ++ break; ++ } ++ ++ if (is_gimple_constant(rhs2)) { ++ new_rhs2 = signed_cast_constant(rhs2); ++ new_rhs1 = handle_intentional_overflow(visited, def_stmt, rhs1, rhs2); ++ } ++ ++ if (is_gimple_constant(rhs1)) { ++ new_rhs1 = signed_cast_constant(rhs1); ++ new_rhs2 = handle_intentional_overflow(visited, def_stmt, rhs2, rhs1); ++ } ++ ++ if (new_rhs1 == NULL_TREE && TREE_CODE(rhs1) == SSA_NAME) ++ new_rhs1 = expand(visited, rhs1); ++ if (new_rhs2 == NULL_TREE && TREE_CODE(rhs2) == SSA_NAME) ++ new_rhs2 = expand(visited, rhs2); ++ ++ return dup_assign(visited, def_stmt, new_rhs1, new_rhs2, NULL_TREE); ++} ++ ++#if BUILDING_GCC_VERSION >= 4007 ++static tree get_new_rhs(struct pointer_set_t *visited, tree rhs) ++{ ++ if (is_gimple_constant(rhs)) ++ return signed_cast_constant(rhs); ++ if (TREE_CODE(rhs) != SSA_NAME) ++ return NULL_TREE; ++ return expand(visited, rhs); ++} ++ ++static tree handle_ternary_ops(struct pointer_set_t *visited, tree var) ++{ ++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3; ++ gimple def_stmt = get_def_stmt(var); ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ rhs3 = gimple_assign_rhs3(def_stmt); ++ new_rhs1 = get_new_rhs(visited, rhs1); ++ new_rhs2 = get_new_rhs(visited, rhs2); ++ new_rhs3 = get_new_rhs(visited, rhs3); ++ ++ if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE) ++ return dup_assign(visited, def_stmt, new_rhs1, new_rhs2, new_rhs3); ++ error("handle_ternary_ops: unknown rhs"); ++ gcc_unreachable(); ++} ++#endif ++ ++static void set_size_overflow_type(tree node) ++{ ++ switch (TYPE_MODE(TREE_TYPE(node))) { ++ case SImode: ++ signed_size_overflow_type = intDI_type_node; ++ unsigned_size_overflow_type = unsigned_intDI_type_node; ++ break; ++ case DImode: ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) { ++ signed_size_overflow_type = intDI_type_node; ++ unsigned_size_overflow_type = unsigned_intDI_type_node; ++ } else { ++ signed_size_overflow_type = intTI_type_node; ++ unsigned_size_overflow_type = unsigned_intTI_type_node; ++ } ++ break; ++ default: ++ error("set_size_overflow_type: unsupported gcc configuration."); ++ gcc_unreachable(); ++ } ++} ++ ++static tree expand_visited(gimple def_stmt) ++{ ++ gimple tmp; ++ gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt); ++ ++ gsi_next(&gsi); ++ tmp = gsi_stmt(gsi); ++ switch (gimple_code(tmp)) { ++ case GIMPLE_ASSIGN: ++ return gimple_get_lhs(tmp); ++ case GIMPLE_PHI: ++ return gimple_phi_result(tmp); ++ case GIMPLE_CALL: ++ return gimple_call_lhs(tmp); ++ default: ++ return NULL_TREE; ++ } ++} ++ ++static tree expand(struct pointer_set_t *visited, tree var) ++{ ++ gimple def_stmt; ++ ++ if (is_gimple_constant(var)) ++ return NULL_TREE; ++ ++ if (TREE_CODE(var) == ADDR_EXPR) ++ return NULL_TREE; ++ ++ if (SSA_NAME_IS_DEFAULT_DEF(var)) ++ return NULL_TREE; ++ ++ def_stmt = get_def_stmt(var); ++ ++ if (!def_stmt) ++ return NULL_TREE; ++ ++ if (pointer_set_contains(visited, def_stmt)) ++ return expand_visited(def_stmt); ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ check_missing_attribute(var); ++ return NULL_TREE; ++ case GIMPLE_PHI: ++ return build_new_phi(visited, def_stmt); ++ case GIMPLE_CALL: ++ case GIMPLE_ASM: ++ gcc_assert(TREE_CODE(TREE_TYPE(var)) != VOID_TYPE); ++ return create_assign(visited, def_stmt, var, AFTER_STMT); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return handle_unary_ops(visited, var); ++ case 3: ++ return handle_binary_ops(visited, var); ++#if BUILDING_GCC_VERSION >= 4007 ++ case 4: ++ return handle_ternary_ops(visited, var); ++#endif ++ } ++ default: ++ debug_gimple_stmt(def_stmt); ++ error("expand: unknown gimple code"); ++ gcc_unreachable(); ++ } ++} ++ ++static void change_function_arg(gimple func_stmt, tree origarg, unsigned int argnum, tree newarg) ++{ ++ gimple assign, stmt; ++ gimple_stmt_iterator gsi = gsi_for_stmt(func_stmt); ++ tree origtype = TREE_TYPE(origarg); ++ ++ stmt = gsi_stmt(gsi); ++ gcc_assert(gimple_code(stmt) == GIMPLE_CALL); ++ ++ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt)); ++ gsi_insert_before(&gsi, assign, GSI_SAME_STMT); ++ update_stmt(assign); ++ ++ gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign)); ++ update_stmt(stmt); ++} ++ ++static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl) ++{ ++ const char *origid; ++ tree arg, origarg; ++ ++ if (!DECL_ABSTRACT_ORIGIN(fndecl)) { ++ gcc_assert(gimple_call_num_args(stmt) > argnum); ++ return gimple_call_arg(stmt, argnum); ++ } ++ ++ origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl)); ++ while (origarg && argnum) { ++ argnum--; ++ origarg = TREE_CHAIN(origarg); ++ } ++ ++ gcc_assert(argnum == 0); ++ ++ gcc_assert(origarg != NULL_TREE); ++ origid = NAME(origarg); ++ for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) { ++ if (!strcmp(origid, NAME(arg))) ++ return arg; ++ } ++ return NULL_TREE; ++} ++ ++static void insert_cond(tree arg, basic_block cond_bb) ++{ ++ gimple cond_stmt; ++ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb); ++ ++ cond_stmt = gimple_build_cond(GT_EXPR, arg, build_int_cstu(signed_size_overflow_type, 0x7fffffff), NULL_TREE, NULL_TREE); ++ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(cond_stmt); ++} ++ ++static tree create_string_param(tree string) ++{ ++ tree array_ref = build4(ARRAY_REF, TREE_TYPE(string), string, integer_zero_node, NULL, NULL); ++ ++ return build1(ADDR_EXPR, ptr_type_node, array_ref); ++} ++ ++static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg) ++{ ++ gimple func_stmt, def_stmt; ++ tree current_func, loc_file, loc_line; ++ expanded_location xloc; ++ gimple_stmt_iterator gsi = gsi_start_bb(bb_true); ++ ++ def_stmt = get_def_stmt(arg); ++ xloc = expand_location(gimple_location(def_stmt)); ++ ++ if (!gimple_has_location(def_stmt)) { ++ xloc = expand_location(gimple_location(stmt)); ++ gcc_assert(gimple_has_location(stmt)); ++ } ++ ++ loc_line = build_int_cstu(unsigned_type_node, xloc.line); ++ ++ loc_file = build_string(strlen(xloc.file), xloc.file); ++ TREE_TYPE(loc_file) = char_array_type_node; ++ loc_file = create_string_param(loc_file); ++ ++ current_func = build_string(IDENTIFIER_LENGTH(DECL_NAME(current_function_decl)), NAME(current_function_decl)); ++ TREE_TYPE(current_func) = char_array_type_node; ++ current_func = create_string_param(current_func); ++ ++ // void report_size_overflow(const char *file, unsigned int line, const char *func) ++ func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func); ++ ++ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING); ++} ++ ++static void insert_check_size_overflow(gimple stmt, tree arg) ++{ ++ basic_block cond_bb, join_bb, bb_true; ++ edge e; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ ++ cond_bb = gimple_bb(stmt); ++ gsi_prev(&gsi); ++ if (gsi_end_p(gsi)) ++ e = split_block_after_labels(cond_bb); ++ else ++ e = split_block(cond_bb, gsi_stmt(gsi)); ++ cond_bb = e->src; ++ join_bb = e->dest; ++ e->flags = EDGE_FALSE_VALUE; ++ e->probability = REG_BR_PROB_BASE; ++ ++ bb_true = create_empty_bb(cond_bb); ++ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE); ++ ++ if (dom_info_available_p(CDI_DOMINATORS)) { ++ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb); ++ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb); ++ } ++ ++ insert_cond(arg, cond_bb); ++ insert_cond_result(bb_true, stmt, arg); ++} ++ ++static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum) ++{ ++ struct pointer_set_t *visited; ++ tree arg, newarg; ++ gimple ucast_stmt; ++ gimple_stmt_iterator gsi; ++ location_t loc = gimple_location(stmt); ++ ++ arg = get_function_arg(argnum, stmt, fndecl); ++ if (arg == NULL_TREE) ++ return; ++ ++ if (is_gimple_constant(arg)) ++ return; ++ if (TREE_CODE(arg) != SSA_NAME) ++ return; ++ ++ set_size_overflow_type(arg); ++ visited = pointer_set_create(); ++ newarg = expand(visited, arg); ++ pointer_set_destroy(visited); ++ ++ if (newarg == NULL_TREE) ++ return; ++ ++ change_function_arg(stmt, arg, argnum, newarg); ++ ++ ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, newarg, CREATE_NEW_VAR, loc); ++ gsi = gsi_for_stmt(stmt); ++ gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT); ++ ++ insert_check_size_overflow(stmt, gimple_get_lhs(ucast_stmt)); ++// inform(loc, "Integer size_overflow check applied here."); ++} ++ ++static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl) ++{ ++ tree p = TREE_VALUE(attr); ++ do { ++ handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1); ++ p = TREE_CHAIN(p); ++ } while (p); ++} ++ ++static void handle_function_by_hash(gimple stmt, tree fndecl) ++{ ++ struct size_overflow_hash *hash; ++ expanded_location xloc; ++ ++ hash = get_function_hash(fndecl); ++ xloc = expand_location(DECL_SOURCE_LOCATION(fndecl)); ++ ++ fndecl = get_original_function_decl(fndecl); ++ if (!hash->name || !hash->file) ++ return; ++ if (strcmp(hash->name, NAME(fndecl)) || strcmp(hash->file, xloc.file)) ++ return; ++ ++#define search_param(argnum) \ ++ if (hash->param##argnum) \ ++ handle_function_arg(stmt, fndecl, argnum - 1); ++ ++ search_param(1); ++ search_param(2); ++ search_param(3); ++ search_param(4); ++ search_param(5); ++ search_param(6); ++ search_param(7); ++ search_param(8); ++ search_param(9); ++#undef search_param ++} ++ ++static unsigned int handle_function(void) ++{ ++ basic_block bb = ENTRY_BLOCK_PTR->next_bb; ++ int saved_last_basic_block = last_basic_block; ++ ++ do { ++ gimple_stmt_iterator gsi; ++ basic_block next = bb->next_bb; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ tree fndecl, attr; ++ gimple stmt = gsi_stmt(gsi); ++ ++ if (!(is_gimple_call(stmt))) ++ continue; ++ fndecl = gimple_call_fndecl(stmt); ++ if (fndecl == NULL_TREE) ++ continue; ++ if (gimple_call_num_args(stmt) == 0) ++ continue; ++ attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl))); ++ if (!attr || !TREE_VALUE(attr)) ++ handle_function_by_hash(stmt, fndecl); ++ else ++ handle_function_by_attribute(stmt, attr, fndecl); ++ gsi = gsi_for_stmt(stmt); ++ } ++ bb = next; ++ } while (bb && bb->index <= saved_last_basic_block); ++ return 0; ++} ++ ++static struct gimple_opt_pass size_overflow_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "size_overflow", ++ .gate = NULL, ++ .execute = handle_function, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = PROP_cfg | PROP_referenced_vars, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow ++ } ++}; ++ ++static void start_unit_callback(void __unused *gcc_data, void __unused *user_data) ++{ ++ tree fntype; ++ ++ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0)); ++ ++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func) ++ fntype = build_function_type_list(void_type_node, ++ const_char_ptr_type_node, ++ unsigned_type_node, ++ const_char_ptr_type_node, ++ NULL_TREE); ++ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype); ++ ++ TREE_PUBLIC(report_size_overflow_decl) = 1; ++ DECL_EXTERNAL(report_size_overflow_decl) = 1; ++ DECL_ARTIFICIAL(report_size_overflow_decl) = 1; ++} ++ ++extern struct gimple_opt_pass pass_dce; ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ int i; ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ bool enable = true; ++ ++ struct register_pass_info size_overflow_pass_info = { ++ .pass = &size_overflow_pass.pass, ++ .reference_pass_name = "mudflap2", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ ++ struct register_pass_info dce_pass_info = { ++ .pass = &pass_dce.pass, ++ .reference_pass_name = "mudflap2", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!(strcmp(argv[i].key, "no-size_overflow"))) { ++ enable = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info); ++ if (enable) { ++ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dce_pass_info); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c +new file mode 100644 +index 0000000..b87ec9d +--- /dev/null ++++ b/tools/gcc/stackleak_plugin.c +@@ -0,0 +1,313 @@ ++/* ++ * Copyright 2011 by the PaX Team pageexec@freemail.hu ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to help implement various PaX features ++ * ++ * - track lowest stack pointer ++ * ++ * TODO: ++ * - initialize all local variables ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++ ++int plugin_is_GPL_compatible; ++ ++static int track_frame_size = -1; ++static const char track_function[] = "pax_track_stack"; ++static const char check_function[] = "pax_check_alloca"; ++static bool init_locals; ++ ++static struct plugin_info stackleak_plugin_info = { ++ .version = "201203140940", ++ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n" ++// "initialize-locals\t\tforcibly initialize all stack frames\n" ++}; ++ ++static bool gate_stackleak_track_stack(void); ++static unsigned int execute_stackleak_tree_instrument(void); ++static unsigned int execute_stackleak_final(void); ++ ++static struct gimple_opt_pass stackleak_tree_instrument_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "stackleak_tree_instrument", ++ .gate = gate_stackleak_track_stack, ++ .execute = execute_stackleak_tree_instrument, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = PROP_gimple_leh | PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa ++ } ++}; ++ ++static struct rtl_opt_pass stackleak_final_rtl_opt_pass = { ++ .pass = { ++ .type = RTL_PASS, ++ .name = "stackleak_final", ++ .gate = gate_stackleak_track_stack, ++ .execute = execute_stackleak_final, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func ++ } ++}; ++ ++static bool gate_stackleak_track_stack(void) ++{ ++ return track_frame_size >= 0; ++} ++ ++static void stackleak_check_alloca(gimple_stmt_iterator *gsi) ++{ ++ gimple check_alloca; ++ tree fntype, fndecl, alloca_size; ++ ++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE); ++ fndecl = build_fn_decl(check_function, fntype); ++ DECL_ASSEMBLER_NAME(fndecl); // for LTO ++ ++ // insert call to void pax_check_alloca(unsigned long size) ++ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0); ++ check_alloca = gimple_build_call(fndecl, 1, alloca_size); ++ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT); ++} ++ ++static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi) ++{ ++ gimple track_stack; ++ tree fntype, fndecl; ++ ++ fntype = build_function_type_list(void_type_node, NULL_TREE); ++ fndecl = build_fn_decl(track_function, fntype); ++ DECL_ASSEMBLER_NAME(fndecl); // for LTO ++ ++ // insert call to void pax_track_stack(void) ++ track_stack = gimple_build_call(fndecl, 0); ++ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING); ++} ++ ++#if BUILDING_GCC_VERSION == 4005 ++static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code) ++{ ++ tree fndecl; ++ ++ if (!is_gimple_call(stmt)) ++ return false; ++ fndecl = gimple_call_fndecl(stmt); ++ if (!fndecl) ++ return false; ++ if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL) ++ return false; ++// print_node(stderr, "pax", fndecl, 4); ++ return DECL_FUNCTION_CODE(fndecl) == code; ++} ++#endif ++ ++static bool is_alloca(gimple stmt) ++{ ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA)) ++ return true; ++ ++#if BUILDING_GCC_VERSION >= 4007 ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) ++ return true; ++#endif ++ ++ return false; ++} ++ ++static unsigned int execute_stackleak_tree_instrument(void) ++{ ++ basic_block bb, entry_bb; ++ bool prologue_instrumented = false, is_leaf = true; ++ ++ entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ gimple stmt; ++ ++ stmt = gsi_stmt(gsi); ++ ++ if (is_gimple_call(stmt)) ++ is_leaf = false; ++ ++ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450> ++ if (!is_alloca(stmt)) ++ continue; ++ ++ // 2. insert stack overflow check before each __builtin_alloca call ++ stackleak_check_alloca(&gsi); ++ ++ // 3. insert track call after each __builtin_alloca call ++ stackleak_add_instrumentation(&gsi); ++ if (bb == entry_bb) ++ prologue_instrumented = true; ++ } ++ } ++ ++ // special cases for some bad linux code: taking the address of static inline functions will materialize them ++ // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI ++ // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI. ++ // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here. ++ if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl)) ++ return 0; ++ if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10)) ++ return 0; ++ ++ // 4. insert track call at the beginning ++ if (!prologue_instrumented) { ++ gimple_stmt_iterator gsi; ++ ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR); ++ gsi = gsi_start_bb(bb); ++ stackleak_add_instrumentation(&gsi); ++ } ++ ++ return 0; ++} ++ ++static unsigned int execute_stackleak_final(void) ++{ ++ rtx insn; ++ ++ if (cfun->calls_alloca) ++ return 0; ++ ++ // keep calls only if function frame is big enough ++ if (get_frame_size() >= track_frame_size) ++ return 0; ++ ++ // 1. find pax_track_stack calls ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { ++ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil)) ++ rtx body; ++ ++ if (!CALL_P(insn)) ++ continue; ++ body = PATTERN(insn); ++ if (GET_CODE(body) != CALL) ++ continue; ++ body = XEXP(body, 0); ++ if (GET_CODE(body) != MEM) ++ continue; ++ body = XEXP(body, 0); ++ if (GET_CODE(body) != SYMBOL_REF) ++ continue; ++ if (strcmp(XSTR(body, 0), track_function)) ++ continue; ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); ++ // 2. delete call ++ insn = delete_insn_and_edges(insn); ++#if BUILDING_GCC_VERSION >= 4007 ++ if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION) ++ insn = delete_insn_and_edges(insn); ++#endif ++ } ++ ++// print_simple_rtl(stderr, get_insns()); ++// print_rtl(stderr, get_insns()); ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info stackleak_tree_instrument_pass_info = { ++ .pass = &stackleak_tree_instrument_pass.pass, ++// .reference_pass_name = "tree_profile", ++ .reference_pass_name = "optimized", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ struct register_pass_info stackleak_final_pass_info = { ++ .pass = &stackleak_final_rtl_opt_pass.pass, ++ .reference_pass_name = "final", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info); ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "track-lowest-sp")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ track_frame_size = atoi(argv[i].value); ++ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0) ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ if (!strcmp(argv[i].key, "initialize-locals")) { ++ if (argv[i].value) { ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ init_locals = true; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info); ++ ++ return 0; ++} +diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h +index 6789d78..4afd019 100644 +--- a/tools/perf/util/include/asm/alternative-asm.h ++++ b/tools/perf/util/include/asm/alternative-asm.h +@@ -5,4 +5,7 @@ + + #define altinstruction_entry # + ++ .macro pax_force_retaddr rip=0, reload=0 ++ .endm ++ + #endif +diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c +index af0f22f..9a7d479 100644 +--- a/usr/gen_init_cpio.c ++++ b/usr/gen_init_cpio.c +@@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location, + int retval; + int rc = -1; + int namesize; +- int i; ++ unsigned int i; + + mode |= S_IFREG; + +@@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location) + *env_var = *expanded = '\0'; + strncat(env_var, start + 2, end - start - 2); + strncat(expanded, new_location, start - new_location); +- strncat(expanded, getenv(env_var), PATH_MAX); +- strncat(expanded, end + 1, PATH_MAX); ++ strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded)); ++ strncat(expanded, end + 1, PATH_MAX - strlen(expanded)); + strncpy(new_location, expanded, PATH_MAX); ++ new_location[PATH_MAX] = 0; + } else + break; + } +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index d9cfb78..4f27c10 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -75,7 +75,7 @@ LIST_HEAD(vm_list); + + static cpumask_var_t cpus_hardware_enabled; + static int kvm_usage_count = 0; +-static atomic_t hardware_enable_failed; ++static atomic_unchecked_t hardware_enable_failed; + + struct kmem_cache *kvm_vcpu_cache; + EXPORT_SYMBOL_GPL(kvm_vcpu_cache); +@@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk) + + if (r) { + cpumask_clear_cpu(cpu, cpus_hardware_enabled); +- atomic_inc(&hardware_enable_failed); ++ atomic_inc_unchecked(&hardware_enable_failed); + printk(KERN_INFO "kvm: enabling virtualization on " + "CPU%d failed\n", cpu); + } +@@ -2322,10 +2322,10 @@ static int hardware_enable_all(void) + + kvm_usage_count++; + if (kvm_usage_count == 1) { +- atomic_set(&hardware_enable_failed, 0); ++ atomic_set_unchecked(&hardware_enable_failed, 0); + on_each_cpu(hardware_enable_nolock, NULL, 1); + +- if (atomic_read(&hardware_enable_failed)) { ++ if (atomic_read_unchecked(&hardware_enable_failed)) { + hardware_disable_all_nolock(); + r = -EBUSY; + } +@@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, + kvm_arch_vcpu_put(vcpu); + } + +-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, ++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align, + struct module *module) + { + int r; +@@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, + if (!vcpu_align) + vcpu_align = __alignof__(struct kvm_vcpu); + kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, +- 0, NULL); ++ SLAB_USERCOPY, NULL); + if (!kvm_vcpu_cache) { + r = -ENOMEM; + goto out_free_3; +@@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, + if (r) + goto out_free; + +- kvm_chardev_ops.owner = module; +- kvm_vm_fops.owner = module; +- kvm_vcpu_fops.owner = module; ++ pax_open_kernel(); ++ *(void **)&kvm_chardev_ops.owner = module; ++ *(void **)&kvm_vm_fops.owner = module; ++ *(void **)&kvm_vcpu_fops.owner = module; ++ pax_close_kernel(); + + r = misc_register(&kvm_dev); + if (r) { diff --git a/kernel/patches/linux-3.1-bridge-master-device-stuck-in-no-carrier-state-forever-when-in-user-stp-mode.patch b/kernel/patches/linux-3.1-bridge-master-device-stuck-in-no-carrier-state-forever-when-in-user-stp-mode.patch deleted file mode 100644 index 2e3d327..0000000 --- a/kernel/patches/linux-3.1-bridge-master-device-stuck-in-no-carrier-state-forever-when-in-user-stp-mode.patch +++ /dev/null @@ -1,98 +0,0 @@ -From b03b6dd58cef7d15b7c46a6729b83dd535ef08ab Mon Sep 17 00:00:00 2001 -From: Vitalii Demianets vitas@nppfactor.kiev.ua -Date: Fri, 25 Nov 2011 00:16:37 +0000 -Subject: [PATCH] bridge: master device stuck in no-carrier state forever when - in user-stp mode - -When in user-stp mode, bridge master do not follow state of its slaves, so -after the following sequence of events it can stuck forever in no-carrier -state: -1) turn stp off -2) put all slaves down - master device will follow their state and also go in -no-carrier state -3) turn stp on with bridge-stp script returning 0 (go to the user-stp mode) -Now bridge master won't follow slaves' state and will never reach running -state. - -This patch solves the problem by making user-stp and kernel-stp behavior -similar regarding master following slaves' states. - -Signed-off-by: Vitalii Demianets vitas@nppfactor.kiev.ua -Acked-by: Stephen Hemminger shemminger@vyatta.com -Signed-off-by: David S. Miller davem@davemloft.net ---- - net/bridge/br_netlink.c | 6 ++++++ - net/bridge/br_stp.c | 29 ++++++++++++++--------------- - 2 files changed, 20 insertions(+), 15 deletions(-) - -diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c -index e5f9ece3..a1daf82 100644 ---- a/net/bridge/br_netlink.c -+++ b/net/bridge/br_netlink.c -@@ -18,6 +18,7 @@ - #include <net/sock.h> - - #include "br_private.h" -+#include "br_private_stp.h" - - static inline size_t br_nlmsg_size(void) - { -@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) - - p->state = new_state; - br_log_state(p); -+ -+ spin_lock_bh(&p->br->lock); -+ br_port_state_selection(p->br); -+ spin_unlock_bh(&p->br->lock); -+ - br_ifinfo_notify(RTM_NEWLINK, p); - - return 0; -diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c -index ad0a3f7..dd147d7 100644 ---- a/net/bridge/br_stp.c -+++ b/net/bridge/br_stp.c -@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br) - struct net_bridge_port *p; - unsigned int liveports = 0; - -- /* Don't change port states if userspace is handling STP */ -- if (br->stp_enabled == BR_USER_STP) -- return; -- - list_for_each_entry(p, &br->port_list, list) { - if (p->state == BR_STATE_DISABLED) - continue; - -- if (p->port_no == br->root_port) { -- p->config_pending = 0; -- p->topology_change_ack = 0; -- br_make_forwarding(p); -- } else if (br_is_designated_port(p)) { -- del_timer(&p->message_age_timer); -- br_make_forwarding(p); -- } else { -- p->config_pending = 0; -- p->topology_change_ack = 0; -- br_make_blocking(p); -+ /* Don't change port states if userspace is handling STP */ -+ if (br->stp_enabled != BR_USER_STP) { -+ if (p->port_no == br->root_port) { -+ p->config_pending = 0; -+ p->topology_change_ack = 0; -+ br_make_forwarding(p); -+ } else if (br_is_designated_port(p)) { -+ del_timer(&p->message_age_timer); -+ br_make_forwarding(p); -+ } else { -+ p->config_pending = 0; -+ p->topology_change_ack = 0; -+ br_make_blocking(p); -+ } - } - - if (p->state == BR_STATE_FORWARDING) --- -1.7.6.2 - diff --git a/kernel/scripts/configcommon.py b/kernel/scripts/configcommon.py new file mode 100755 index 0000000..54f355a --- /dev/null +++ b/kernel/scripts/configcommon.py @@ -0,0 +1,73 @@ +#!/usr/bin/python +############################################################################### +# IPFire.org - An Open Source Firewall Solution # +# Copyright (C) - IPFire Development Team info@ipfire.org # +############################################################################### + +import re +import sys + +filelist = sys.argv[1:] + +lines = [] +options_state = {} +options_counter = {} + +first_file = True +for filename in filelist: + f = open(filename) + + for line in f.readlines(): + # Strip newline. + line = line.rstrip() + + if line.startswith("# Automatically generated file;"): + continue + + if line.endswith("Kernel Configuration"): + continue + + option = value = None + + m = re.match("^# (.*) is not set$", line) + if m: + option = m.group(1) + value = "n" + + m = re.match("^(.*)=(.*)$", line) + if m: + option = m.group(1) + value = m.group(2) + + if option: + option_value = "%s=%s" % (option, value or "") + + try: + options_counter[option_value] += 1 + continue + + except KeyError: + options_counter[option_value] = 1 + + if first_file: + lines.append(line) + + f.close() + first_file = False + +for line in lines: + m = re.match("^# (.*) is not set$", line) + if m: + if options_counter.get("%s=n" % m.group(1), 0) == len(filelist): + print "# %s is not set" % m.group(1) + + continue + + m = re.match("^(.*)=(.*)$", line) + if m: + if options_counter.get(m.group(0), 0) == len(filelist): + print m.group(0) + + continue + + print line diff --git a/kernel/scripts/configdiff.py b/kernel/scripts/configdiff.py new file mode 100755 index 0000000..612e9bf --- /dev/null +++ b/kernel/scripts/configdiff.py @@ -0,0 +1,84 @@ +#!/usr/bin/python +############################################################################### +# IPFire.org - An Open Source Firewall Solution # +# Copyright (C) - IPFire Development Team info@ipfire.org # +############################################################################### + +import re +import sys + +filelist = sys.argv[1:] + +options = [] + +f = open(filelist[0]) +for line in f.readlines(): + # Strip newline. + line = line.rstrip() + + option = value = None + + m = re.match("^# (.*) is not set$", line) + if m: + option = m.group(1) + value = "n" + + m = re.match("^(.*)=(.*)$", line) + if m: + option = m.group(1) + value = m.group(2) + + if option: + option_value = "%s=%s" % (option, value or "") + options.append(option_value) + +f.close() + +f = open(filelist[1]) + +section = None +for line in f.readlines(): + m = re.match("^# (.*)$", line) + if m: + _section = m.group(1) + if not _section.startswith("CONFIG_") and \ + not _section.endswith("Kernel Configuration") and \ + not _section.startswith("Automatically generated file;"): + section = _section + + option = None + value = None + + m = re.match("^# (.*) is not set$", line) + if m: + option = m.group(1) + value = "n" + + m = re.match("^(.*)=(.*)$", line) + if m: + option = m.group(1) + value = m.group(2) + + if not option: + continue + + # Ignore all options CONFIG_HAVE_ because we cannot + # set them anyway. + elif option.startswith("CONFIG_HAVE_"): + continue + + option_value = "%s=%s" % (option, value) + if not option_value in options: + if section: + print + print "#" + print "# %s" % section + print "#" + section = None + + if value == "n": + print "# %s is not set" % option + else: + print "%s=%s" % (option, value) + +f.close() diff --git a/kernel/scripts/configure b/kernel/scripts/configure new file mode 100755 index 0000000..e60bc83 --- /dev/null +++ b/kernel/scripts/configure @@ -0,0 +1,315 @@ +#!/bin/bash +############################################################################### +# IPFire.org - An Open Source Firewall Solution # +# Copyright (C) - IPFire Development Team info@ipfire.org # +############################################################################### + +BASEDIR=$(dirname ${0}) +SCRIPTS_DIR=${BASEDIR} + +CONFIGS="x86_64:default i686:legacy i686:default" +CONFIGS="${CONFIGS} armv7hl:omap armv5tel:versatile armv5tel:kirkwood" + +function merge_config() { + local arch=${1} + local flavour=${2} + local output=${3} + shift 3 + + local arg + for arg in arch flavour output; do + if [ -z "${!arg}" ]; then + echo >&2 "merge usage: <arch> <flavour> <output filename>" + exit 2 + fi + done + + local config_mode="oldnoconfig" + local extra_configs + while [ $# -gt 0 ]; do + case "${1}" in + --mode=*) + config_mode=${1#--mode=} + shift + ;; + -*) + echo >&2 "Unknown option: ${1}" + ;; + *) + extra_configs="${extra_configs} ${1}" + ;; + esac + shift + done + + local configs="${extra_configs} config-generic" + + case "${arch}:${flavour}" in + # x86 + x86_64:default) + configs="${configs} config-x86-generic config-x86_64-default" + ;; + i686:default) + configs="${configs} config-x86-generic config-i686-default" + ;; + i686:legacy) + configs="${configs} config-x86-generic config-i686-default" + configs="${configs} config-i686-legacy" + ;; + + # ARM + armv5tel:versatile) + configs="${configs} config-arm-generic" + ;; + armv5tel:kirkwood) + configs="${configs} config-arm-generic config-armv5tel-kirkwood" + ;; + armv7hl:omap) + configs="${configs} config-arm-generic config-armv7hl-omap" + ;; + *) + echo >&2 "ERROR: Invalid parameters given: $@" + return 1 + ;; + esac + + # Determine the kernel arch. + local kernel_arch= + case "${arch}" in + arm*) + kernel_arch="arm" + ;; + i?86|x86*) + kernel_arch="x86" + ;; + esac + + # Merge the configuration files from its elementary configuration + # files. + local tmp_out=$(mktemp) + local tmp_in=$(mktemp) + + local config + for config in ${configs}; do + cat ${tmp_out} > ${tmp_in} + perl ${SCRIPTS_DIR}/merge.pl \ + ${config} ${tmp_in} > ${tmp_out} + done + + if [ "${config_mode}" != "none" ]; then + echo "Running 'make oldnoconfig' for ${arch} (${flavour})..." + ( + cd ${KERNEL_DIR} + cat ${tmp_out} > .config + make ARCH="${kernel_arch}" ${config_mode} + cat .config > ${tmp_out} + ) + fi + + cat ${tmp_out} > ${output} + rm -f ${tmp_in} ${tmp_out} +} + +# This function runs an interactive "make oldconfig". +function make_oldconfig() { + local arch="x86_64" + local kernel_arch="x86" + local flavour="default" + + local config_in=$(mktemp) + local config_out=$(mktemp) + local diff_out=$(mktemp) + + merge_config ${arch} ${flavour} ${config_in} --mode=none + + ( + cd ${KERNEL_DIR} + cat ${config_in} > .config + + echo "You may now edit the configuration..." + + local option + select option in oldconfig menuconfig oldnoconfig startover quit; do + case "${option}" in + oldconfig|menuconfig|oldnoconfig) + make ARCH=${kernel_arch} ${option} + ;; + startover) + cat ${config_in} > .config + ;; + quit) + break + ;; + esac + done + + cat .config > ${config_out} + ) + + ${SCRIPTS_DIR}/configdiff.py ${config_in} ${config_out} > ${diff_out} + + # Update the rest of the configurations. + diff_configs ${diff_out} --mode=oldconfig + + rm -f ${config_in} ${config_out} ${diff_out} +} + +# config-generic +# Intersection of all files. +# config-x86-generic +# Diff of (intersection of (i686-{default,legacy} and x86_64-default) +# against config-generic). +# config-x86-x86_64 +# Diff against merge of (config-generic and config-x86-generic). + +function diff_configs() { + local extra_configs="$@" + + declare -A arch_configs + + tmpdir=$(mktemp -d) + + for config in ${CONFIGS}; do + arch=${config%:*} + flavour=${config#*:} + + filename=${tmpdir}/config-${arch}-${flavour} + + merge_config ${arch} ${flavour} ${filename} ${extra_configs} + + # Do not include i686 legacy. + case "${config}" in + i686:legacy) + continue + ;; + armv5tel:versatile) + ;; + arm*) + continue + ;; + esac + + case "${arch}" in + x86*|i?86) + arch_configs[x86]="${arch_configs[x86]} ${filename}" + ;; + arm*) + arch_configs[arm]="${arch_configs[arm]} ${filename}" + ;; + *) + echo >&2 "ERROR: Invalid architecture: ${arch}" + ;; + esac + done + + common_configs="" + for arch in x86 arm; do + filename="${tmpdir}/config-${arch}-common" + ${SCRIPTS_DIR}/configcommon.py ${arch_configs[${arch}]} \ + > ${filename} + + common_configs="${common_configs} ${filename}" + done + + ${SCRIPTS_DIR}/configcommon.py ${common_configs} > ${tmpdir}/config-generic + + ${SCRIPTS_DIR}/configdiff.py ${tmpdir}/config-generic ${tmpdir}/config-arm-common \ + > ${tmpdir}/config-arm-generic + + ${SCRIPTS_DIR}/configdiff.py ${tmpdir}/config-generic ${tmpdir}/config-x86-common \ + > ${tmpdir}/config-x86-generic + + for config in ${CONFIGS}; do + arch=${config%:*} + flavour=${config#*:} + + case "${config}" in + arm*:*) + suparch="arm" + ;; + i?86:*|x86*:*) + suparch="x86" + ;; + esac + filename=${tmpdir}/config-${arch}-${flavour} + + case "${config}" in + i686:legacy) + # Legacy depends directly on the default configuration. + ${SCRIPTS_DIR}/configdiff.py ${tmpdir}/config-i686-default \ + ${filename} > ${filename}.tmp + ;; + armv5tel:versatile) + rm -f ${filename} + continue + ;; + *) + ${SCRIPTS_DIR}/configdiff.py ${tmpdir}/config-${suparch}-common \ + ${filename} > ${filename}.tmp + ;; + esac + mv ${filename}{.tmp,} + done + rm -f ${tmpdir}/config-*-common + + for config in ${tmpdir}/*; do + if ! cmp $(basename ${config}) ${config} &>/dev/null; then + echo "$(basename ${config}) has changed." + fi + cat ${config} > $(basename ${config}) + done + + rm -rf ${tmpdir} +} + +KERNEL_DIR= + +# Parse commandline. +while [ $# -gt 0 ]; do + arg=${1}; shift + case "${arg}" in + --kernel-dir=*) + KERNEL_DIR=${arg#--kernel-dir=} + ;; + help|"") + echo "${0} - available commands:" + echo " * merge <arch> <flavour> <output filename>" + echo " * update ..." + echo " * oldconfig" + echo "" + echo " You must always set --kernel-dir=..." + exit 0 + ;; + merge|oldconfig|update) + action=${arg} + break + ;; + esac +done + +if [ -z "${KERNEL_DIR}" ]; then + echo >&2 "--kernel-dir=... was not set!" + exit 2 +fi + +if [ -z "${action}" ]; then + echo >&2 "No action given... Try ${0} help." + exit 2 +fi + +case "${action}" in + merge) + merge_config $@ + exit $? + ;; + oldconfig) + make_oldconfig + exit $? + ;; + update) + diff_configs $@ + exit $? + ;; +esac + +exit 1 diff --git a/kernel/scripts/merge.pl b/kernel/scripts/merge.pl new file mode 100644 index 0000000..8c31815 --- /dev/null +++ b/kernel/scripts/merge.pl @@ -0,0 +1,66 @@ +#! /usr/bin/perl + +my @args=@ARGV; +my %configvalues; +my @configoptions; +my $configcounter = 0; + +# optionally print out the architecture as the first line of our output +my $arch = $args[2]; +if (defined $arch) { + print "# $arch\n"; +} + +# first, read the override file + +open (FILE,"$args[0]") || die "Could not open $args[0]"; +while (<FILE>) { + my $str = $_; + my $configname; + + if (/# ([\w]+) is not set/) { + $configname = $1; + } elsif (/([\w]+)=/) { + $configname = $1; + } + + if (defined($configname) && !exists($configvalues{$configname})) { + $configvalues{$configname} = $str; + $configoptions[$configcounter] = $configname; + $configcounter ++; + } +}; + +# now, read and output the entire configfile, except for the overridden +# parts... for those the new value is printed. + +open (FILE2,"$args[1]") || die "Could not open $args[1]"; +while (<FILE2>) { + my $configname; + + if (/# ([\w]+) is not set/) { + $configname = $1; + } elsif (/([\w]+)=/) { + $configname = $1; + } + + if (defined($configname) && exists($configvalues{$configname})) { + print "$configvalues{$configname}"; + delete($configvalues{$configname}); + } else { + print "$_"; + } +} + +# now print the new values from the overridden configfile +my $counter = 0; + +while ($counter < $configcounter) { + my $configname = $configoptions[$counter]; + if (exists($configvalues{$configname})) { + print "$configvalues{$configname}"; + } + $counter++; +} + +1;
hooks/post-receive -- IPFire 3.x development tree