summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2013-08-03 08:58:36 -0400
committerAnthony G. Basile <blueness@gentoo.org>2013-08-03 08:58:36 -0400
commit0f2fb34d5390a932d51ab9a864c4df811720266a (patch)
treeefafeade17fab8719026da7d34423291915ea907
parentGrsec/PaX: 2.9.1-3.10.4-201308011855 (diff)
downloadhardened-patchset-20130803.tar.gz
hardened-patchset-20130803.tar.bz2
hardened-patchset-20130803.zip
Grsec/PaX: 2.9.1-{2.6.32.61,3.2.50.3.10.4}-20130803003120130803
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201308030029.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201307181234.patch)859
-rw-r--r--3.10.4/0000_README2
-rw-r--r--3.10.4/4420_grsecurity-2.9.1-3.10.4-201308030031.patch (renamed from 3.10.4/4420_grsecurity-2.9.1-3.10.4-201308011855.patch)352
-rw-r--r--3.2.50/0000_README (renamed from 3.2.49/0000_README)6
-rw-r--r--3.2.50/1021_linux-3.2.22.patch (renamed from 3.2.49/1021_linux-3.2.22.patch)0
-rw-r--r--3.2.50/1022_linux-3.2.23.patch (renamed from 3.2.49/1022_linux-3.2.23.patch)0
-rw-r--r--3.2.50/1023_linux-3.2.24.patch (renamed from 3.2.49/1023_linux-3.2.24.patch)0
-rw-r--r--3.2.50/1024_linux-3.2.25.patch (renamed from 3.2.49/1024_linux-3.2.25.patch)0
-rw-r--r--3.2.50/1025_linux-3.2.26.patch (renamed from 3.2.49/1025_linux-3.2.26.patch)0
-rw-r--r--3.2.50/1026_linux-3.2.27.patch (renamed from 3.2.49/1026_linux-3.2.27.patch)0
-rw-r--r--3.2.50/1027_linux-3.2.28.patch (renamed from 3.2.49/1027_linux-3.2.28.patch)0
-rw-r--r--3.2.50/1028_linux-3.2.29.patch (renamed from 3.2.49/1028_linux-3.2.29.patch)0
-rw-r--r--3.2.50/1029_linux-3.2.30.patch (renamed from 3.2.49/1029_linux-3.2.30.patch)0
-rw-r--r--3.2.50/1030_linux-3.2.31.patch (renamed from 3.2.49/1030_linux-3.2.31.patch)0
-rw-r--r--3.2.50/1031_linux-3.2.32.patch (renamed from 3.2.49/1031_linux-3.2.32.patch)0
-rw-r--r--3.2.50/1032_linux-3.2.33.patch (renamed from 3.2.49/1032_linux-3.2.33.patch)0
-rw-r--r--3.2.50/1033_linux-3.2.34.patch (renamed from 3.2.49/1033_linux-3.2.34.patch)0
-rw-r--r--3.2.50/1034_linux-3.2.35.patch (renamed from 3.2.49/1034_linux-3.2.35.patch)0
-rw-r--r--3.2.50/1035_linux-3.2.36.patch (renamed from 3.2.49/1035_linux-3.2.36.patch)0
-rw-r--r--3.2.50/1036_linux-3.2.37.patch (renamed from 3.2.49/1036_linux-3.2.37.patch)0
-rw-r--r--3.2.50/1037_linux-3.2.38.patch (renamed from 3.2.49/1037_linux-3.2.38.patch)0
-rw-r--r--3.2.50/1038_linux-3.2.39.patch (renamed from 3.2.49/1038_linux-3.2.39.patch)0
-rw-r--r--3.2.50/1039_linux-3.2.40.patch (renamed from 3.2.49/1039_linux-3.2.40.patch)0
-rw-r--r--3.2.50/1040_linux-3.2.41.patch (renamed from 3.2.49/1040_linux-3.2.41.patch)0
-rw-r--r--3.2.50/1041_linux-3.2.42.patch (renamed from 3.2.49/1041_linux-3.2.42.patch)0
-rw-r--r--3.2.50/1042_linux-3.2.43.patch (renamed from 3.2.49/1042_linux-3.2.43.patch)0
-rw-r--r--3.2.50/1043_linux-3.2.44.patch (renamed from 3.2.49/1043_linux-3.2.44.patch)0
-rw-r--r--3.2.50/1044_linux-3.2.45.patch (renamed from 3.2.49/1044_linux-3.2.45.patch)0
-rw-r--r--3.2.50/1045_linux-3.2.46.patch (renamed from 3.2.49/1045_linux-3.2.46.patch)0
-rw-r--r--3.2.50/1046_linux-3.2.47.patch (renamed from 3.2.49/1046_linux-3.2.47.patch)0
-rw-r--r--3.2.50/1047_linux-3.2.48.patch (renamed from 3.2.49/1047_linux-3.2.48.patch)0
-rw-r--r--3.2.50/1048_linux-3.2.49.patch (renamed from 3.2.49/1048_linux-3.2.49.patch)0
-rw-r--r--3.2.50/1049_linux-3.2.50.patch2495
-rw-r--r--3.2.50/4420_grsecurity-2.9.1-3.2.50-201308030030.patch (renamed from 3.2.49/4420_grsecurity-2.9.1-3.2.49-201307302311.patch)1093
-rw-r--r--3.2.50/4425_grsec_remove_EI_PAX.patch (renamed from 3.2.49/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.2.50/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.2.49/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.2.50/4430_grsec-remove-localversion-grsec.patch (renamed from 3.2.49/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.2.50/4435_grsec-mute-warnings.patch (renamed from 3.2.49/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.2.50/4440_grsec-remove-protected-paths.patch (renamed from 3.2.49/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.2.50/4450_grsec-kconfig-default-gids.patch (renamed from 3.2.49/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.2.50/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.2.49/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.2.50/4470_disable-compat_vdso.patch (renamed from 3.2.49/4470_disable-compat_vdso.patch)0
-rw-r--r--3.2.50/4475_emutramp_default_on.patch (renamed from 3.2.49/4475_emutramp_default_on.patch)0
44 files changed, 4141 insertions, 668 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index db4457b..a0fb57e 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -38,7 +38,7 @@ Patch: 1060_linux-2.6.32.61.patch
From: http://www.kernel.org
Desc: Linux 2.6.32.61
-Patch: 4420_grsecurity-2.9.1-2.6.32.61-201307181234.patch
+Patch: 4420_grsecurity-2.9.1-2.6.32.61-201308030029.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201307181234.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201308030029.patch
index 4be9c03..d228405 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201307181234.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201308030029.patch
@@ -86516,10 +86516,10 @@ index 0000000..2147ad0
+endmenu
diff --git a/grsecurity/Makefile b/grsecurity/Makefile
new file mode 100644
-index 0000000..1b9afa9
+index 0000000..36845aa
--- /dev/null
+++ b/grsecurity/Makefile
-@@ -0,0 +1,38 @@
+@@ -0,0 +1,42 @@
+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
+# during 2001-2009 it has been completely redesigned by Brad Spengler
+# into an RBAC system
@@ -86537,6 +86537,10 @@ index 0000000..1b9afa9
+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
+ gracl_learn.o grsec_log.o
++ifdef CONFIG_COMPAT
++obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
++endif
++
+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
+
+ifdef CONFIG_NET
@@ -86560,10 +86564,10 @@ index 0000000..1b9afa9
+endif
diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
new file mode 100644
-index 0000000..56a4704
+index 0000000..38b465b
--- /dev/null
+++ b/grsecurity/gracl.c
-@@ -0,0 +1,4203 @@
+@@ -0,0 +1,4309 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
@@ -86597,6 +86601,7 @@ index 0000000..56a4704
+#include "../fs/btrfs/ctree.h"
+#include "../fs/btrfs/btrfs_inode.h"
+#endif
++#include <linux/compat.h>
+
+#include <asm/uaccess.h>
+#include <asm/errno.h>
@@ -86665,6 +86670,144 @@ index 0000000..56a4704
+extern void gr_remove_uid(uid_t uid);
+extern int gr_find_uid(uid_t uid);
+
++static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
++{
++ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
++{
++ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
++{
++ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
++{
++ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
++{
++ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
++{
++ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
++{
++ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
++{
++ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
++ return -EFAULT;
++
++ return 0;
++}
++
++int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
++{
++ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
++{
++ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
++ return -EFAULT;
++
++ if ((uwrap->version != GRSECURITY_VERSION) || (uwrap->size != sizeof(struct gr_arg)))
++ return -EINVAL;
++
++ return 0;
++}
++
++static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
++{
++ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static size_t get_gr_arg_wrapper_size_normal(void)
++{
++ return sizeof(struct gr_arg_wrapper);
++}
++
++#ifdef CONFIG_COMPAT
++extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
++extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
++extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
++extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
++extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
++extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
++extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
++extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
++extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
++extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
++extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
++extern size_t get_gr_arg_wrapper_size_compat(void);
++
++int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
++int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
++int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
++int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
++int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
++int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
++int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
++int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
++int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
++int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
++int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
++size_t (* get_gr_arg_wrapper_size)(void) __read_only;
++
++#else
++#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
++#define copy_gr_arg copy_gr_arg_normal
++#define copy_gr_hash_struct copy_gr_hash_struct_normal
++#define copy_acl_object_label copy_acl_object_label_normal
++#define copy_acl_subject_label copy_acl_subject_label_normal
++#define copy_acl_role_label copy_acl_role_label_normal
++#define copy_acl_ip_label copy_acl_ip_label_normal
++#define copy_pointer_from_array copy_pointer_from_array_normal
++#define copy_sprole_pw copy_sprole_pw_normal
++#define copy_role_transition copy_role_transition_normal
++#define copy_role_allowed_ip copy_role_allowed_ip_normal
++#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
++#endif
++
+__inline__ int
+gr_acl_is_enabled(void)
+{
@@ -87610,33 +87753,34 @@ index 0000000..56a4704
+ return;
+}
+
-+static __u32
-+count_user_objs(struct acl_object_label *userp)
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
++
++static int alloc_and_copy_string(char **name, unsigned int maxlen)
+{
-+ struct acl_object_label o_tmp;
-+ __u32 num = 0;
++ unsigned int len = strnlen_user(*name, maxlen);
++ char *tmp;
+
-+ while (userp) {
-+ if (copy_from_user(&o_tmp, userp,
-+ sizeof (struct acl_object_label)))
-+ break;
++ if (!len || len >= maxlen)
++ return -EINVAL;
+
-+ userp = o_tmp.prev;
-+ num++;
-+ }
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
+
-+ return num;
-+}
++ if (copy_from_user(tmp, *name, len))
++ return -EFAULT;
+
-+static struct acl_subject_label *
-+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
++ tmp[len-1] = '\0';
++ *name = tmp;
++
++ return 0;
++}
+
+static int
+copy_user_glob(struct acl_object_label *obj)
+{
+ struct acl_object_label *g_tmp, **guser;
-+ unsigned int len;
-+ char *tmp;
++ int error;
+
+ if (obj->globbed == NULL)
+ return 0;
@@ -87648,22 +87792,12 @@ index 0000000..56a4704
+ if (g_tmp == NULL)
+ return -ENOMEM;
+
-+ if (copy_from_user(g_tmp, *guser,
-+ sizeof (struct acl_object_label)))
++ if (copy_acl_object_label(g_tmp, *guser))
+ return -EFAULT;
+
-+ len = strnlen_user(g_tmp->filename, PATH_MAX);
-+
-+ if (!len || len >= PATH_MAX)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, g_tmp->filename, len))
-+ return -EFAULT;
-+ tmp[len-1] = '\0';
-+ g_tmp->filename = tmp;
++ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
++ if (error)
++ return error;
+
+ *guser = g_tmp;
+ guser = &(g_tmp->next);
@@ -87677,33 +87811,21 @@ index 0000000..56a4704
+ struct acl_role_label *role)
+{
+ struct acl_object_label *o_tmp;
-+ unsigned int len;
+ int ret;
-+ char *tmp;
+
+ while (userp) {
+ if ((o_tmp = (struct acl_object_label *)
+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
+ return -ENOMEM;
+
-+ if (copy_from_user(o_tmp, userp,
-+ sizeof (struct acl_object_label)))
++ if (copy_acl_object_label(o_tmp, userp))
+ return -EFAULT;
+
+ userp = o_tmp->prev;
+
-+ len = strnlen_user(o_tmp->filename, PATH_MAX);
-+
-+ if (!len || len >= PATH_MAX)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, o_tmp->filename, len))
-+ return -EFAULT;
-+ tmp[len-1] = '\0';
-+ o_tmp->filename = tmp;
++ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
++ if (ret)
++ return ret;
+
+ insert_acl_obj_label(o_tmp, subj);
+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
@@ -87740,8 +87862,7 @@ index 0000000..56a4704
+ __u32 num = 0;
+
+ while (userp) {
-+ if (copy_from_user(&s_tmp, userp,
-+ sizeof (struct acl_subject_label)))
++ if (copy_acl_subject_label(&s_tmp, userp))
+ break;
+
+ userp = s_tmp.prev;
@@ -87764,8 +87885,7 @@ index 0000000..56a4704
+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
+ return -ENOMEM;
+
-+ if (copy_from_user(rtmp, ruserip,
-+ sizeof (struct role_allowed_ip)))
++ if (copy_role_allowed_ip(rtmp, ruserip))
+ return -EFAULT;
+
+ ruserip = rtmp->prev;
@@ -87789,9 +87909,7 @@ index 0000000..56a4704
+copy_user_transitions(struct acl_role_label *rolep)
+{
+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
-+
-+ unsigned int len;
-+ char *tmp;
++ int error;
+
+ rusertp = rolep->transitions;
+
@@ -87802,24 +87920,14 @@ index 0000000..56a4704
+ acl_alloc(sizeof (struct role_transition))) == NULL)
+ return -ENOMEM;
+
-+ if (copy_from_user(rtmp, rusertp,
-+ sizeof (struct role_transition)))
++ if (copy_role_transition(rtmp, rusertp))
+ return -EFAULT;
+
+ rusertp = rtmp->prev;
+
-+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
-+
-+ if (!len || len >= GR_SPROLE_LEN)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, rtmp->rolename, len))
-+ return -EFAULT;
-+ tmp[len-1] = '\0';
-+ rtmp->rolename = tmp;
++ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
++ if (error)
++ return error;
+
+ if (!rlast) {
+ rtmp->prev = NULL;
@@ -87836,12 +87944,26 @@ index 0000000..56a4704
+ return 0;
+}
+
++static __u32 count_user_objs(const struct acl_object_label __user *userp)
++{
++ struct acl_object_label o_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_acl_object_label(&o_tmp, userp))
++ break;
++
++ userp = o_tmp.prev;
++ num++;
++ }
++
++ return num;
++}
++
+static struct acl_subject_label *
+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
+{
+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
-+ unsigned int len;
-+ char *tmp;
+ __u32 num_objs;
+ struct acl_ip_label **i_tmp, *i_utmp2;
+ struct gr_hash_struct ghash;
@@ -87875,27 +87997,17 @@ index 0000000..56a4704
+ subjmap->kernel = s_tmp;
+ insert_subj_map_entry(subjmap);
+
-+ if (copy_from_user(s_tmp, userp,
-+ sizeof (struct acl_subject_label)))
++ if (copy_acl_subject_label(s_tmp, userp))
+ return ERR_PTR(-EFAULT);
+
-+ len = strnlen_user(s_tmp->filename, PATH_MAX);
-+
-+ if (!len || len >= PATH_MAX)
-+ return ERR_PTR(-EINVAL);
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return ERR_PTR(-ENOMEM);
-+
-+ if (copy_from_user(tmp, s_tmp->filename, len))
-+ return ERR_PTR(-EFAULT);
-+ tmp[len-1] = '\0';
-+ s_tmp->filename = tmp;
++ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
++ if (err)
++ return ERR_PTR(err);
+
+ if (!strcmp(s_tmp->filename, "/"))
+ role->root_label = s_tmp;
+
-+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
++ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
+ return ERR_PTR(-EFAULT);
+
+ /* copy user and group transition tables */
@@ -87976,28 +88088,18 @@ index 0000000..56a4704
+ if (!*(i_tmp + i_num))
+ return ERR_PTR(-ENOMEM);
+
-+ if (copy_from_user
-+ (&i_utmp2, s_tmp->ips + i_num,
-+ sizeof (struct acl_ip_label *)))
++ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
+ return ERR_PTR(-EFAULT);
+
-+ if (copy_from_user
-+ (*(i_tmp + i_num), i_utmp2,
-+ sizeof (struct acl_ip_label)))
++ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
+ return ERR_PTR(-EFAULT);
+
+ if ((*(i_tmp + i_num))->iface == NULL)
+ continue;
+
-+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
-+ if (!len || len >= IFNAMSIZ)
-+ return ERR_PTR(-EINVAL);
-+ tmp = acl_alloc(len);
-+ if (tmp == NULL)
-+ return ERR_PTR(-ENOMEM);
-+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
-+ return ERR_PTR(-EFAULT);
-+ (*(i_tmp + i_num))->iface = tmp;
++ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
++ if (err)
++ return ERR_PTR(err);
+ }
+
+ s_tmp->ips = i_tmp;
@@ -88018,8 +88120,7 @@ index 0000000..56a4704
+ int err;
+
+ while (userp) {
-+ if (copy_from_user(&s_pre, userp,
-+ sizeof (struct acl_subject_label)))
++ if (copy_acl_subject_label(&s_pre, userp))
+ return -EFAULT;
+
+ ret = do_copy_user_subj(userp, role, NULL);
@@ -88045,8 +88146,6 @@ index 0000000..56a4704
+ struct gr_hash_struct *ghash;
+ uid_t *domainlist;
+ unsigned int r_num;
-+ unsigned int len;
-+ char *tmp;
+ int err = 0;
+ __u16 i;
+ __u32 num_subjs;
@@ -88067,26 +88166,17 @@ index 0000000..56a4704
+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
+ if (!sptmp)
+ return -ENOMEM;
-+ if (copy_from_user(sptmp, arg->sprole_pws + i,
-+ sizeof (struct sprole_pw)))
++ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
+ return -EFAULT;
+
-+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
-+
-+ if (!len || len >= GR_SPROLE_LEN)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, sptmp->rolename, len))
-+ return -EFAULT;
++ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
++ if (err)
++ return err;
+
-+ tmp[len-1] = '\0';
+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
-+ printk(KERN_ALERT "Copying special role %s\n", tmp);
++ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
+#endif
-+ sptmp->rolename = tmp;
++
+ acl_special_roles[i] = sptmp;
+ }
+
@@ -88098,27 +88188,15 @@ index 0000000..56a4704
+ if (!r_tmp)
+ return -ENOMEM;
+
-+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
-+ sizeof (struct acl_role_label *)))
++ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
+ return -EFAULT;
+
-+ if (copy_from_user(r_tmp, r_utmp2,
-+ sizeof (struct acl_role_label)))
-+ return -EFAULT;
-+
-+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
-+
-+ if (!len || len >= PATH_MAX)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, r_tmp->rolename, len))
++ if (copy_acl_role_label(r_tmp, r_utmp2))
+ return -EFAULT;
+
-+ tmp[len-1] = '\0';
-+ r_tmp->rolename = tmp;
++ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
++ if (err)
++ return err;
+
+ if (!strcmp(r_tmp->rolename, "default")
+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
@@ -88130,7 +88208,7 @@ index 0000000..56a4704
+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
+ return -ENOMEM;
+
-+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
++ if (copy_gr_hash_struct(ghash, r_tmp->hash))
+ return -EFAULT;
+
+ r_tmp->hash = ghash;
@@ -89669,13 +89747,14 @@ index 0000000..56a4704
+}
+
+ssize_t
-+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
++write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
+{
+ struct gr_arg_wrapper uwrap;
+ unsigned char *sprole_salt = NULL;
+ unsigned char *sprole_sum = NULL;
-+ int error = sizeof (struct gr_arg_wrapper);
++ int error = 0;
+ int error2 = 0;
++ size_t req_count;
+
+ mutex_lock(&gr_dev_mutex);
+
@@ -89684,8 +89763,42 @@ index 0000000..56a4704
+ goto out;
+ }
+
-+ if (count != sizeof (struct gr_arg_wrapper)) {
-+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
++#ifdef CONFIG_COMPAT
++ pax_open_kernel();
++ if (is_compat_task()) {
++ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
++ copy_gr_arg = &copy_gr_arg_compat;
++ copy_acl_object_label = &copy_acl_object_label_compat;
++ copy_acl_subject_label = &copy_acl_subject_label_compat;
++ copy_acl_role_label = &copy_acl_role_label_compat;
++ copy_acl_ip_label = &copy_acl_ip_label_compat;
++ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
++ copy_role_transition = &copy_role_transition_compat;
++ copy_sprole_pw = &copy_sprole_pw_compat;
++ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
++ copy_pointer_from_array = &copy_pointer_from_array_compat;
++ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
++ } else {
++ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
++ copy_gr_arg = &copy_gr_arg_normal;
++ copy_acl_object_label = &copy_acl_object_label_normal;
++ copy_acl_subject_label = &copy_acl_subject_label_normal;
++ copy_acl_role_label = &copy_acl_role_label_normal;
++ copy_acl_ip_label = &copy_acl_ip_label_normal;
++ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
++ copy_role_transition = &copy_role_transition_normal;
++ copy_sprole_pw = &copy_sprole_pw_normal;
++ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
++ copy_pointer_from_array = &copy_pointer_from_array_normal;
++ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
++ }
++ pax_close_kernel();
++#endif
++
++ req_count = get_gr_arg_wrapper_size();
++
++ if (count != req_count) {
++ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
+ error = -EINVAL;
+ goto out;
+ }
@@ -89696,20 +89809,13 @@ index 0000000..56a4704
+ gr_auth_attempts = 0;
+ }
+
-+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
-+ error = -EFAULT;
-+ goto out;
-+ }
-+
-+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
-+ error = -EINVAL;
++ error = copy_gr_arg_wrapper(buf, &uwrap);
++ if (error)
+ goto out;
-+ }
+
-+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
-+ error = -EFAULT;
++ error = copy_gr_arg(uwrap.arg, gr_usermode);
++ if (error)
+ goto out;
-+ }
+
+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
@@ -89902,6 +90008,10 @@ index 0000000..56a4704
+
+ out:
+ mutex_unlock(&gr_dev_mutex);
++
++ if (!error)
++ error = req_count;
++
+ return error;
+}
+
@@ -90985,6 +91095,281 @@ index 0000000..955ddfb
+ return 0;
+}
+
+diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
+new file mode 100644
+index 0000000..a43dd06
+--- /dev/null
++++ b/grsecurity/gracl_compat.c
+@@ -0,0 +1,269 @@
++#include <linux/kernel.h>
++#include <linux/gracl.h>
++#include <linux/compat.h>
++#include <linux/gracl_compat.h>
++
++#include <asm/uaccess.h>
++
++int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
++{
++ struct gr_arg_wrapper_compat uwrapcompat;
++
++ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
++ return -EFAULT;
++
++ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
++ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
++ return -EINVAL;
++
++ uwrap->arg = compat_ptr(uwrapcompat.arg);
++ uwrap->version = uwrapcompat.version;
++ uwrap->size = sizeof(struct gr_arg);
++
++ return 0;
++}
++
++int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
++{
++ struct gr_arg_compat argcompat;
++
++ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
++ return -EFAULT;
++
++ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
++ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
++ arg->role_db.num_roles = argcompat.role_db.num_roles;
++ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
++ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
++ arg->role_db.num_objects = argcompat.role_db.num_objects;
++
++ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
++ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
++ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
++ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
++ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
++ arg->segv_device = argcompat.segv_device;
++ arg->segv_inode = argcompat.segv_inode;
++ arg->segv_uid = argcompat.segv_uid;
++ arg->num_sprole_pws = argcompat.num_sprole_pws;
++ arg->mode = argcompat.mode;
++
++ return 0;
++}
++
++int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
++{
++ struct acl_object_label_compat objcompat;
++
++ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
++ return -EFAULT;
++
++ obj->filename = compat_ptr(objcompat.filename);
++ obj->inode = objcompat.inode;
++ obj->device = objcompat.device;
++ obj->mode = objcompat.mode;
++
++ obj->nested = compat_ptr(objcompat.nested);
++ obj->globbed = compat_ptr(objcompat.globbed);
++
++ obj->prev = compat_ptr(objcompat.prev);
++ obj->next = compat_ptr(objcompat.next);
++
++ return 0;
++}
++
++int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
++{
++ unsigned int i;
++ struct acl_subject_label_compat subjcompat;
++
++ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
++ return -EFAULT;
++
++ subj->filename = compat_ptr(subjcompat.filename);
++ subj->inode = subjcompat.inode;
++ subj->device = subjcompat.device;
++ subj->mode = subjcompat.mode;
++ subj->cap_mask = subjcompat.cap_mask;
++ subj->cap_lower = subjcompat.cap_lower;
++ subj->cap_invert_audit = subjcompat.cap_invert_audit;
++
++ for (i = 0; i < GR_NLIMITS; i++) {
++ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
++ subj->res[i].rlim_cur = RLIM_INFINITY;
++ else
++ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
++ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
++ subj->res[i].rlim_max = RLIM_INFINITY;
++ else
++ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
++ }
++ subj->resmask = subjcompat.resmask;
++
++ subj->user_trans_type = subjcompat.user_trans_type;
++ subj->group_trans_type = subjcompat.group_trans_type;
++ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
++ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
++ subj->user_trans_num = subjcompat.user_trans_num;
++ subj->group_trans_num = subjcompat.group_trans_num;
++
++ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
++ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
++ subj->ip_type = subjcompat.ip_type;
++ subj->ips = compat_ptr(subjcompat.ips);
++ subj->ip_num = subjcompat.ip_num;
++ subj->inaddr_any_override = subjcompat.inaddr_any_override;
++
++ subj->crashes = subjcompat.crashes;
++ subj->expires = subjcompat.expires;
++
++ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
++ subj->hash = compat_ptr(subjcompat.hash);
++ subj->prev = compat_ptr(subjcompat.prev);
++ subj->next = compat_ptr(subjcompat.next);
++
++ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
++ subj->obj_hash_size = subjcompat.obj_hash_size;
++ subj->pax_flags = subjcompat.pax_flags;
++
++ return 0;
++}
++
++int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
++{
++ struct acl_role_label_compat rolecompat;
++
++ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
++ return -EFAULT;
++
++ role->rolename = compat_ptr(rolecompat.rolename);
++ role->uidgid = rolecompat.uidgid;
++ role->roletype = rolecompat.roletype;
++
++ role->auth_attempts = rolecompat.auth_attempts;
++ role->expires = rolecompat.expires;
++
++ role->root_label = compat_ptr(rolecompat.root_label);
++ role->hash = compat_ptr(rolecompat.hash);
++
++ role->prev = compat_ptr(rolecompat.prev);
++ role->next = compat_ptr(rolecompat.next);
++
++ role->transitions = compat_ptr(rolecompat.transitions);
++ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
++ role->domain_children = compat_ptr(rolecompat.domain_children);
++ role->domain_child_num = rolecompat.domain_child_num;
++
++ role->umask = rolecompat.umask;
++
++ role->subj_hash = compat_ptr(rolecompat.subj_hash);
++ role->subj_hash_size = rolecompat.subj_hash_size;
++
++ return 0;
++}
++
++int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
++{
++ struct role_allowed_ip_compat roleip_compat;
++
++ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
++ return -EFAULT;
++
++ roleip->addr = roleip_compat.addr;
++ roleip->netmask = roleip_compat.netmask;
++
++ roleip->prev = compat_ptr(roleip_compat.prev);
++ roleip->next = compat_ptr(roleip_compat.next);
++
++ return 0;
++}
++
++int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
++{
++ struct role_transition_compat trans_compat;
++
++ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
++ return -EFAULT;
++
++ trans->rolename = compat_ptr(trans_compat.rolename);
++
++ trans->prev = compat_ptr(trans_compat.prev);
++ trans->next = compat_ptr(trans_compat.next);
++
++ return 0;
++
++}
++
++int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
++{
++ struct gr_hash_struct_compat hash_compat;
++
++ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
++ return -EFAULT;
++
++ hash->table = compat_ptr(hash_compat.table);
++ hash->nametable = compat_ptr(hash_compat.nametable);
++ hash->first = compat_ptr(hash_compat.first);
++
++ hash->table_size = hash_compat.table_size;
++ hash->used_size = hash_compat.used_size;
++
++ hash->type = hash_compat.type;
++
++ return 0;
++}
++
++int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
++{
++ compat_uptr_t ptrcompat;
++
++ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
++ return -EFAULT;
++
++ *(void **)ptr = compat_ptr(ptrcompat);
++
++ return 0;
++}
++
++int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
++{
++ struct acl_ip_label_compat ip_compat;
++
++ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
++ return -EFAULT;
++
++ ip->iface = compat_ptr(ip_compat.iface);
++ ip->addr = ip_compat.addr;
++ ip->netmask = ip_compat.netmask;
++ ip->low = ip_compat.low;
++ ip->high = ip_compat.high;
++ ip->mode = ip_compat.mode;
++ ip->type = ip_compat.type;
++
++ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
++
++ ip->prev = compat_ptr(ip_compat.prev);
++ ip->next = compat_ptr(ip_compat.next);
++
++ return 0;
++}
++
++int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
++{
++ struct sprole_pw_compat pw_compat;
++
++ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
++ return -EFAULT;
++
++ pw->rolename = compat_ptr(pw_compat.rolename);
++ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
++ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
++
++ return 0;
++}
++
++size_t get_gr_arg_wrapper_size_compat(void)
++{
++ return sizeof(struct gr_arg_wrapper_compat);
++}
++
diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
new file mode 100644
index 0000000..5a3ac97
@@ -98191,6 +98576,168 @@ index 0000000..5f646cf
+
+#endif
+
+diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
+new file mode 100644
+index 0000000..33ebd1f
+--- /dev/null
++++ b/include/linux/gracl_compat.h
+@@ -0,0 +1,156 @@
++#ifndef GR_ACL_COMPAT_H
++#define GR_ACL_COMPAT_H
++
++#include <linux/resource.h>
++#include <asm/resource.h>
++
++struct sprole_pw_compat {
++ compat_uptr_t rolename;
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++};
++
++struct gr_hash_struct_compat {
++ compat_uptr_t table;
++ compat_uptr_t nametable;
++ compat_uptr_t first;
++ __u32 table_size;
++ __u32 used_size;
++ int type;
++};
++
++struct acl_subject_label_compat {
++ compat_uptr_t filename;
++ compat_ino_t inode;
++ __u32 device;
++ __u32 mode;
++ kernel_cap_t cap_mask;
++ kernel_cap_t cap_lower;
++ kernel_cap_t cap_invert_audit;
++
++ struct compat_rlimit res[GR_NLIMITS];
++ __u32 resmask;
++
++ __u8 user_trans_type;
++ __u8 group_trans_type;
++ compat_uptr_t user_transitions;
++ compat_uptr_t group_transitions;
++ __u16 user_trans_num;
++ __u16 group_trans_num;
++
++ __u32 sock_families[2];
++ __u32 ip_proto[8];
++ __u32 ip_type;
++ compat_uptr_t ips;
++ __u32 ip_num;
++ __u32 inaddr_any_override;
++
++ __u32 crashes;
++ compat_ulong_t expires;
++
++ compat_uptr_t parent_subject;
++ compat_uptr_t hash;
++ compat_uptr_t prev;
++ compat_uptr_t next;
++
++ compat_uptr_t obj_hash;
++ __u32 obj_hash_size;
++ __u16 pax_flags;
++};
++
++struct role_allowed_ip_compat {
++ __u32 addr;
++ __u32 netmask;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct role_transition_compat {
++ compat_uptr_t rolename;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct acl_role_label_compat {
++ compat_uptr_t rolename;
++ uid_t uidgid;
++ __u16 roletype;
++
++ __u16 auth_attempts;
++ compat_ulong_t expires;
++
++ compat_uptr_t root_label;
++ compat_uptr_t hash;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++
++ compat_uptr_t transitions;
++ compat_uptr_t allowed_ips;
++ compat_uptr_t domain_children;
++ __u16 domain_child_num;
++
++ umode_t umask;
++
++ compat_uptr_t subj_hash;
++ __u32 subj_hash_size;
++};
++
++struct user_acl_role_db_compat {
++ compat_uptr_t r_table;
++ __u32 num_pointers;
++ __u32 num_roles;
++ __u32 num_domain_children;
++ __u32 num_subjects;
++ __u32 num_objects;
++};
++
++struct acl_object_label_compat {
++ compat_uptr_t filename;
++ compat_ino_t inode;
++ __u32 device;
++ __u32 mode;
++
++ compat_uptr_t nested;
++ compat_uptr_t globbed;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct acl_ip_label_compat {
++ compat_uptr_t iface;
++ __u32 addr;
++ __u32 netmask;
++ __u16 low, high;
++ __u8 mode;
++ __u32 type;
++ __u32 proto[8];
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct gr_arg_compat {
++ struct user_acl_role_db_compat role_db;
++ unsigned char pw[GR_PW_LEN];
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++ unsigned char sp_role[GR_SPROLE_LEN];
++ compat_uptr_t sprole_pws;
++ __u32 segv_device;
++ compat_ino_t segv_inode;
++ uid_t segv_uid;
++ __u16 num_sprole_pws;
++ __u16 mode;
++};
++
++struct gr_arg_wrapper_compat {
++ compat_uptr_t arg;
++ __u32 version;
++ __u32 size;
++};
++
++#endif
diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
new file mode 100644
index 0000000..323ecf2
diff --git a/3.10.4/0000_README b/3.10.4/0000_README
index 52e9f3c..6952dd0 100644
--- a/3.10.4/0000_README
+++ b/3.10.4/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-2.9.1-3.10.4-201308011855.patch
+Patch: 4420_grsecurity-2.9.1-3.10.4-201308030031.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.10.4/4420_grsecurity-2.9.1-3.10.4-201308011855.patch b/3.10.4/4420_grsecurity-2.9.1-3.10.4-201308030031.patch
index 589e333..9cf4026 100644
--- a/3.10.4/4420_grsecurity-2.9.1-3.10.4-201308011855.patch
+++ b/3.10.4/4420_grsecurity-2.9.1-3.10.4-201308030031.patch
@@ -31769,7 +31769,7 @@ index 7c668c8..db3521c 100644
err = -EFAULT;
goto out;
diff --git a/block/genhd.c b/block/genhd.c
-index cdeb527..10aa34d 100644
+index cdeb527..10aa34db 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
@@ -48683,7 +48683,7 @@ index bce8769..7fc7544 100644
fd_offset + ex.a_text);
if (error != N_DATADDR(ex)) {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index f8a0b0e..989dbf2 100644
+index f8a0b0e..8c841c3 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -34,6 +34,7 @@
@@ -48694,7 +48694,7 @@ index f8a0b0e..989dbf2 100644
#include <asm/uaccess.h>
#include <asm/param.h>
#include <asm/page.h>
-@@ -60,6 +61,10 @@ static int elf_core_dump(struct coredump_params *cprm);
+@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
#define elf_core_dump NULL
#endif
@@ -48702,10 +48702,14 @@ index f8a0b0e..989dbf2 100644
+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
+#endif
+
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++static void elf_handle_mmap(struct file *file);
++#endif
++
#if ELF_EXEC_PAGESIZE > PAGE_SIZE
#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
#else
-@@ -79,6 +84,11 @@ static struct linux_binfmt elf_format = {
+@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
.load_binary = load_elf_binary,
.load_shlib = load_elf_library,
.core_dump = elf_core_dump,
@@ -48714,10 +48718,14 @@ index f8a0b0e..989dbf2 100644
+ .handle_mprotect= elf_handle_mprotect,
+#endif
+
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ .handle_mmap = elf_handle_mmap,
++#endif
++
.min_coredump = ELF_EXEC_PAGESIZE,
};
-@@ -86,6 +96,8 @@ static struct linux_binfmt elf_format = {
+@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
static int set_brk(unsigned long start, unsigned long end)
{
@@ -48726,7 +48734,7 @@ index f8a0b0e..989dbf2 100644
start = ELF_PAGEALIGN(start);
end = ELF_PAGEALIGN(end);
if (end > start) {
-@@ -94,7 +106,7 @@ static int set_brk(unsigned long start, unsigned long end)
+@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
if (BAD_ADDR(addr))
return addr;
}
@@ -48735,7 +48743,7 @@ index f8a0b0e..989dbf2 100644
return 0;
}
-@@ -155,12 +167,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
elf_addr_t __user *u_rand_bytes;
const char *k_platform = ELF_PLATFORM;
const char *k_base_platform = ELF_BASE_PLATFORM;
@@ -48750,7 +48758,7 @@ index f8a0b0e..989dbf2 100644
/*
* In some cases (e.g. Hyper-Threading), we want to avoid L1
-@@ -202,8 +215,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
* Generate 16 random bytes for userspace PRNG seeding.
*/
get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
@@ -48765,7 +48773,7 @@ index f8a0b0e..989dbf2 100644
if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
return -EFAULT;
-@@ -318,9 +335,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
return -EFAULT;
current->mm->env_end = p;
@@ -48778,7 +48786,7 @@ index f8a0b0e..989dbf2 100644
return -EFAULT;
return 0;
}
-@@ -388,15 +407,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
+@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
an ELF header */
static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
@@ -48797,7 +48805,7 @@ index f8a0b0e..989dbf2 100644
unsigned long total_size;
int retval, i, size;
-@@ -442,6 +460,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
goto out_close;
}
@@ -48809,7 +48817,7 @@ index f8a0b0e..989dbf2 100644
eppnt = elf_phdata;
for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
if (eppnt->p_type == PT_LOAD) {
-@@ -465,8 +488,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
map_addr = elf_map(interpreter, load_addr + vaddr,
eppnt, elf_prot, elf_type, total_size);
total_size = 0;
@@ -48818,7 +48826,7 @@ index f8a0b0e..989dbf2 100644
error = map_addr;
if (BAD_ADDR(map_addr))
goto out_close;
-@@ -485,8 +506,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
k = load_addr + eppnt->p_vaddr;
if (BAD_ADDR(k) ||
eppnt->p_filesz > eppnt->p_memsz ||
@@ -48829,7 +48837,7 @@ index f8a0b0e..989dbf2 100644
error = -ENOMEM;
goto out_close;
}
-@@ -538,6 +559,315 @@ out:
+@@ -538,6 +567,315 @@ out:
return error;
}
@@ -49145,7 +49153,7 @@ index f8a0b0e..989dbf2 100644
/*
* These are the functions used to load ELF style executables and shared
* libraries. There is no binary dependent code anywhere else.
-@@ -554,6 +884,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+@@ -554,6 +892,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned int random_variable = 0;
@@ -49157,7 +49165,7 @@ index f8a0b0e..989dbf2 100644
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_int() & STACK_RND_MASK;
-@@ -572,7 +907,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -572,7 +915,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
unsigned long load_addr = 0, load_bias = 0;
int load_addr_set = 0;
char * elf_interpreter = NULL;
@@ -49166,7 +49174,7 @@ index f8a0b0e..989dbf2 100644
struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long elf_bss, elf_brk;
int retval, i;
-@@ -582,12 +917,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -582,12 +925,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc __maybe_unused = 0;
int executable_stack = EXSTACK_DEFAULT;
@@ -49180,7 +49188,7 @@ index f8a0b0e..989dbf2 100644
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
-@@ -723,11 +1058,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -723,11 +1066,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
goto out_free_dentry;
/* OK, This is the point of no return */
@@ -49263,7 +49271,7 @@ index f8a0b0e..989dbf2 100644
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
-@@ -819,6 +1224,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -819,6 +1232,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
@@ -49284,7 +49292,7 @@ index f8a0b0e..989dbf2 100644
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -851,9 +1270,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -851,9 +1278,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
@@ -49297,7 +49305,7 @@ index f8a0b0e..989dbf2 100644
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
-@@ -892,17 +1311,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -892,17 +1319,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -49349,7 +49357,7 @@ index f8a0b0e..989dbf2 100644
load_bias);
if (!IS_ERR((void *)elf_entry)) {
/*
-@@ -1124,7 +1571,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
+@@ -1124,7 +1579,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
* Decide what to dump of a segment, part, all or none.
*/
static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -49358,7 +49366,7 @@ index f8a0b0e..989dbf2 100644
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-@@ -1162,7 +1609,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1162,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (vma->vm_file == NULL)
return 0;
@@ -49367,7 +49375,7 @@ index f8a0b0e..989dbf2 100644
goto whole;
/*
-@@ -1387,9 +1834,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1387,9 +1842,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
@@ -49379,7 +49387,7 @@ index f8a0b0e..989dbf2 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1398,7 +1845,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
+@@ -1398,7 +1853,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
{
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
@@ -49388,7 +49396,7 @@ index f8a0b0e..989dbf2 100644
set_fs(old_fs);
fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
}
-@@ -2019,14 +2466,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+@@ -2019,14 +2474,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
}
static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
@@ -49405,7 +49413,7 @@ index f8a0b0e..989dbf2 100644
return size;
}
-@@ -2119,7 +2566,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2119,7 +2574,7 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
@@ -49414,7 +49422,7 @@ index f8a0b0e..989dbf2 100644
offset += elf_core_extra_data_size();
e_shoff = offset;
-@@ -2133,10 +2580,12 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2133,10 +2588,12 @@ static int elf_core_dump(struct coredump_params *cprm)
offset = dataoff;
size += sizeof(*elf);
@@ -49427,7 +49435,7 @@ index f8a0b0e..989dbf2 100644
if (size > cprm->limit
|| !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
-@@ -2150,7 +2599,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2150,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -49436,7 +49444,7 @@ index f8a0b0e..989dbf2 100644
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -2161,6 +2610,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2161,6 +2618,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_align = ELF_EXEC_PAGESIZE;
size += sizeof(phdr);
@@ -49444,7 +49452,7 @@ index f8a0b0e..989dbf2 100644
if (size > cprm->limit
|| !dump_write(cprm->file, &phdr, sizeof(phdr)))
goto end_coredump;
-@@ -2185,7 +2635,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2185,7 +2643,7 @@ static int elf_core_dump(struct coredump_params *cprm)
unsigned long addr;
unsigned long end;
@@ -49453,7 +49461,7 @@ index f8a0b0e..989dbf2 100644
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
-@@ -2194,6 +2644,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2194,6 +2652,7 @@ static int elf_core_dump(struct coredump_params *cprm)
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
@@ -49461,7 +49469,7 @@ index f8a0b0e..989dbf2 100644
stop = ((size += PAGE_SIZE) > cprm->limit) ||
!dump_write(cprm->file, kaddr,
PAGE_SIZE);
-@@ -2211,6 +2662,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2211,6 +2670,7 @@ static int elf_core_dump(struct coredump_params *cprm)
if (e_phnum == PN_XNUM) {
size += sizeof(*shdr4extnum);
@@ -49469,7 +49477,7 @@ index f8a0b0e..989dbf2 100644
if (size > cprm->limit
|| !dump_write(cprm->file, shdr4extnum,
sizeof(*shdr4extnum)))
-@@ -2231,6 +2683,138 @@ out:
+@@ -2231,6 +2691,167 @@ out:
#endif /* CONFIG_ELF_CORE */
@@ -49605,6 +49613,35 @@ index f8a0b0e..989dbf2 100644
+}
+#endif
+
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++
++extern int grsec_enable_log_rwxmaps;
++
++static void elf_handle_mmap(struct file *file)
++{
++ struct elfhdr elf_h;
++ struct elf_phdr elf_p;
++ unsigned long i;
++
++ if (!grsec_enable_log_rwxmaps)
++ return;
++
++ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
++ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++ return;
++
++ for (i = 0UL; i < elf_h.e_phnum; i++) {
++ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++ return;
++ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
++ gr_log_ptgnustack(file);
++ }
++}
++#endif
++
static int __init init_elf_binfmt(void)
{
register_binfmt(&elf_format);
@@ -50818,7 +50855,7 @@ index e4141f2..d8263e8 100644
i += packet_length_size;
if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
diff --git a/fs/exec.c b/fs/exec.c
-index ffd7a81..97f4c7d 100644
+index ffd7a81..d95acf6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -55,8 +55,20 @@
@@ -51300,7 +51337,7 @@ index ffd7a81..97f4c7d 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1701,3 +1875,281 @@ asmlinkage long compat_sys_execve(const char __user * filename,
+@@ -1701,3 +1875,285 @@ asmlinkage long compat_sys_execve(const char __user * filename,
return error;
}
#endif
@@ -51409,6 +51446,10 @@ index ffd7a81..97f4c7d 100644
+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
+ if (vma_fault->vm_file)
+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
++ else if (pc >= mm->start_brk && pc < mm->brk)
++ path_fault = "<heap>";
++ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
++ path_fault = "<stack>";
+ else
+ path_fault = "<anonymous mapping>";
+ }
@@ -57361,10 +57402,10 @@ index ca9ecaa..60100c7 100644
kfree(s);
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
-index 0000000..c9c4ac3
+index 0000000..712a85d
--- /dev/null
+++ b/grsecurity/Kconfig
-@@ -0,0 +1,1054 @@
+@@ -0,0 +1,1043 @@
+#
+# grecurity configuration
+#
@@ -58075,22 +58116,11 @@ index 0000000..c9c4ac3
+ help
+ If you say Y here, calls to mmap() and mprotect() with explicit
+ usage of PROT_WRITE and PROT_EXEC together will be logged when
-+ denied by the PAX_MPROTECT feature. If the sysctl option is
-+ enabled, a sysctl option with name "rwxmap_logging" is created.
-+
-+config GRKERNSEC_AUDIT_TEXTREL
-+ bool 'ELF text relocations logging (READ HELP)'
-+ depends on PAX_MPROTECT
-+ help
-+ If you say Y here, text relocations will be logged with the filename
-+ of the offending library or binary. The purpose of the feature is
-+ to help Linux distribution developers get rid of libraries and
-+ binaries that need text relocations which hinder the future progress
-+ of PaX. Only Linux distribution developers should say Y here, and
-+ never on a production machine, as this option creates an information
-+ leak that could aid an attacker in defeating the randomization of
-+ a single memory region. If the sysctl option is enabled, a sysctl
-+ option with name "audit_textrel" is created.
++ denied by the PAX_MPROTECT feature. This feature will also
++ log other problematic scenarios that can occur when PAX_MPROTECT
++ is enabled on a binary, like textrels and PT_GNU_STACK. If the
++ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
++ is created.
+
+endmenu
+
@@ -65722,10 +65752,10 @@ index 0000000..8ca18bf
+}
diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
new file mode 100644
-index 0000000..a862e9f
+index 0000000..ab2d875
--- /dev/null
+++ b/grsecurity/grsec_init.c
-@@ -0,0 +1,283 @@
+@@ -0,0 +1,279 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
@@ -65749,7 +65779,6 @@ index 0000000..a862e9f
+int grsec_enable_forkfail;
+int grsec_enable_audit_ptrace;
+int grsec_enable_time;
-+int grsec_enable_audit_textrel;
+int grsec_enable_group;
+kgid_t grsec_audit_gid;
+int grsec_enable_chdir;
@@ -65881,9 +65910,6 @@ index 0000000..a862e9f
+ grsec_lock = 1;
+#endif
+
-+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
-+ grsec_enable_audit_textrel = 1;
-+#endif
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+ grsec_enable_log_rwxmaps = 1;
+#endif
@@ -66075,15 +66101,16 @@ index 0000000..5e05e20
+}
diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
new file mode 100644
-index 0000000..7c06085
+index 0000000..dbe0a6b
--- /dev/null
+++ b/grsecurity/grsec_log.c
-@@ -0,0 +1,326 @@
+@@ -0,0 +1,341 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/tty.h>
+#include <linux/fs.h>
++#include <linux/mm.h>
+#include <linux/grinternal.h>
+
+#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -66230,6 +66257,7 @@ index 0000000..7c06085
+ struct vfsmount *mnt = NULL;
+ struct file *file = NULL;
+ struct task_struct *task = NULL;
++ struct vm_area_struct *vma = NULL;
+ const struct cred *cred, *pcred;
+ va_list ap;
+
@@ -66369,6 +66397,19 @@ index 0000000..7c06085
+ file = va_arg(ap, struct file *);
+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
+ break;
++ case GR_RWXMAPVMA:
++ vma = va_arg(ap, struct vm_area_struct *);
++ if (vma->vm_file)
++ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
++ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
++ str1 = "<stack>";
++ else if (vma->vm_start <= current->mm->brk &&
++ vma->vm_end >= current->mm->start_brk)
++ str1 = "<heap>";
++ else
++ str1 = "<anonymous mapping>";
++ gr_log_middle_varargs(audit, msg, str1);
++ break;
+ case GR_PSACCT:
+ {
+ unsigned int wday, cday;
@@ -66521,10 +66562,10 @@ index 0000000..2131422
+}
diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
new file mode 100644
-index 0000000..a3b12a0
+index 0000000..6ee9d50
--- /dev/null
+++ b/grsecurity/grsec_pax.c
-@@ -0,0 +1,36 @@
+@@ -0,0 +1,45 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
@@ -66535,9 +66576,18 @@ index 0000000..a3b12a0
+void
+gr_log_textrel(struct vm_area_struct * vma)
+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
-+ if (grsec_enable_audit_textrel)
-+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ if (grsec_enable_log_rwxmaps)
++ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
++#endif
++ return;
++}
++
++void gr_log_ptgnustack(struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ if (grsec_enable_log_rwxmaps)
++ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
+#endif
+ return;
+}
@@ -66553,11 +66603,11 @@ index 0000000..a3b12a0
+}
+
+void
-+gr_log_rwxmprotect(struct file *file)
++gr_log_rwxmprotect(struct vm_area_struct *vma)
+{
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+ if (grsec_enable_log_rwxmaps)
-+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
++ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
+#endif
+ return;
+}
@@ -67101,10 +67151,10 @@ index 0000000..4030d57
+}
diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
new file mode 100644
-index 0000000..f55ef0f
+index 0000000..7624d1c
--- /dev/null
+++ b/grsecurity/grsec_sysctl.c
-@@ -0,0 +1,469 @@
+@@ -0,0 +1,460 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
@@ -67498,15 +67548,6 @@ index 0000000..f55ef0f
+ .proc_handler = &proc_dointvec,
+ },
+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
-+ {
-+ .procname = "audit_textrel",
-+ .data = &grsec_enable_audit_textrel,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
-+ },
-+#endif
+#ifdef CONFIG_GRKERNSEC_DMESG
+ {
+ .procname = "dmesg",
@@ -68506,14 +68547,15 @@ index c1da539..1dcec55 100644
struct atmphy_ops {
int (*start)(struct atm_dev *dev);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
-index 70cf138..cabb82e 100644
+index 70cf138..0418ee2 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
-@@ -73,8 +73,9 @@ struct linux_binfmt {
+@@ -73,8 +73,10 @@ struct linux_binfmt {
int (*load_binary)(struct linux_binprm *);
int (*load_shlib)(struct file *);
int (*core_dump)(struct coredump_params *cprm);
+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
++ void (*handle_mmap)(struct file *);
unsigned long min_coredump; /* minimal dump size */
-};
+} __do_const;
@@ -70035,10 +70077,10 @@ index 0000000..be66033
+#endif
diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
new file mode 100644
-index 0000000..12994b5
+index 0000000..fd8598b
--- /dev/null
+++ b/include/linux/grinternal.h
-@@ -0,0 +1,227 @@
+@@ -0,0 +1,228 @@
+#ifndef __GRINTERNAL_H
+#define __GRINTERNAL_H
+
@@ -70114,7 +70156,6 @@ index 0000000..12994b5
+extern kgid_t grsec_socket_server_gid;
+extern kgid_t grsec_audit_gid;
+extern int grsec_enable_group;
-+extern int grsec_enable_audit_textrel;
+extern int grsec_enable_log_rwxmaps;
+extern int grsec_enable_mount;
+extern int grsec_enable_chdir;
@@ -70222,7 +70263,8 @@ index 0000000..12994b5
+ GR_CRASH1,
+ GR_CRASH2,
+ GR_PSACCT,
-+ GR_RWXMAP
++ GR_RWXMAP,
++ GR_RWXMAPVMA
+};
+
+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
@@ -70260,6 +70302,7 @@ index 0000000..12994b5
+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
++#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
+
+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
+
@@ -70268,10 +70311,10 @@ index 0000000..12994b5
+#endif
diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
new file mode 100644
-index 0000000..2f159b5
+index 0000000..a4396b5
--- /dev/null
+++ b/include/linux/grmsg.h
-@@ -0,0 +1,112 @@
+@@ -0,0 +1,113 @@
+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
@@ -70375,7 +70418,8 @@ index 0000000..2f159b5
+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
-+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
++#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
++#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
+#define GR_VM86_MSG "denied use of vm86 by "
+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
@@ -70386,10 +70430,10 @@ index 0000000..2f159b5
+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
new file mode 100644
-index 0000000..d957f6d
+index 0000000..3676b0b
--- /dev/null
+++ b/include/linux/grsecurity.h
-@@ -0,0 +1,241 @@
+@@ -0,0 +1,242 @@
+#ifndef GR_SECURITY_H
+#define GR_SECURITY_H
+#include <linux/fs.h>
@@ -70467,8 +70511,9 @@ index 0000000..d957f6d
+void gr_log_unmount(const char *devname, const int retval);
+void gr_log_mount(const char *from, const char *to, const int retval);
+void gr_log_textrel(struct vm_area_struct *vma);
++void gr_log_ptgnustack(struct file *file);
+void gr_log_rwxmmap(struct file *file);
-+void gr_log_rwxmprotect(struct file *file);
++void gr_log_rwxmprotect(struct vm_area_struct *vma);
+
+int gr_handle_follow_link(const struct inode *parent,
+ const struct inode *inode,
@@ -83238,7 +83283,7 @@ index 79b7cf7..9944291 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index f681e18..4c2577f 100644
+index f681e18..623110e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -36,6 +36,7 @@
@@ -83479,12 +83524,19 @@ index f681e18..4c2577f 100644
if (addr & ~PAGE_MASK)
return addr;
-@@ -1250,6 +1333,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1250,6 +1333,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+#ifdef CONFIG_PAX_MPROTECT
+ if (mm->pax_flags & MF_PAX_MPROTECT) {
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ if (file && (vm_flags & VM_EXEC) && mm->binfmt &&
++ mm->binfmt->handle_mmap)
++ mm->binfmt->handle_mmap(file);
++#endif
++
+#ifndef CONFIG_PAX_MPROTECT_COMPAT
+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
+ gr_log_rwxmmap(file);
@@ -83516,7 +83568,7 @@ index f681e18..4c2577f 100644
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
-@@ -1261,6 +1374,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1261,6 +1381,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
@@ -83524,7 +83576,7 @@ index f681e18..4c2577f 100644
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
-@@ -1341,6 +1455,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1341,6 +1462,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags |= VM_NORESERVE;
}
@@ -83534,7 +83586,7 @@ index f681e18..4c2577f 100644
addr = mmap_region(file, addr, len, vm_flags, pgoff);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
-@@ -1432,7 +1549,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+@@ -1432,7 +1556,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
vm_flags_t vm_flags = vma->vm_flags;
/* If it was private or non-writable, the write bit is already clear */
@@ -83543,7 +83595,7 @@ index f681e18..4c2577f 100644
return 0;
/* The backer wishes to know when pages are first written to? */
-@@ -1480,7 +1597,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1480,7 +1604,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long charged = 0;
struct inode *inode = file ? file_inode(file) : NULL;
@@ -83566,7 +83618,7 @@ index f681e18..4c2577f 100644
if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
-@@ -1499,11 +1631,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1499,11 +1638,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Clear old maps */
error = -ENOMEM;
@@ -83579,7 +83631,7 @@ index f681e18..4c2577f 100644
}
/*
-@@ -1534,6 +1665,16 @@ munmap_back:
+@@ -1534,6 +1672,16 @@ munmap_back:
goto unacct_error;
}
@@ -83596,7 +83648,7 @@ index f681e18..4c2577f 100644
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
-@@ -1558,6 +1699,13 @@ munmap_back:
+@@ -1558,6 +1706,13 @@ munmap_back:
if (error)
goto unmap_and_free_vma;
@@ -83610,7 +83662,7 @@ index f681e18..4c2577f 100644
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
-@@ -1596,6 +1744,11 @@ munmap_back:
+@@ -1596,6 +1751,11 @@ munmap_back:
vma_link(mm, vma, prev, rb_link, rb_parent);
file = vma->vm_file;
@@ -83622,7 +83674,7 @@ index f681e18..4c2577f 100644
/* Once vma denies write, undo our temporary denial count */
if (correct_wcount)
atomic_inc(&inode->i_writecount);
-@@ -1603,6 +1756,7 @@ out:
+@@ -1603,6 +1763,7 @@ out:
perf_event_mmap(vma);
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -83630,7 +83682,7 @@ index f681e18..4c2577f 100644
if (vm_flags & VM_LOCKED) {
if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm)))
-@@ -1626,6 +1780,12 @@ unmap_and_free_vma:
+@@ -1626,6 +1787,12 @@ unmap_and_free_vma:
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
free_vma:
@@ -83643,7 +83695,7 @@ index f681e18..4c2577f 100644
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1633,7 +1793,63 @@ unacct_error:
+@@ -1633,7 +1800,63 @@ unacct_error:
return error;
}
@@ -83708,7 +83760,7 @@ index f681e18..4c2577f 100644
{
/*
* We implement the search by looking for an rbtree node that
-@@ -1681,11 +1897,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+@@ -1681,11 +1904,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
}
}
@@ -83739,7 +83791,7 @@ index f681e18..4c2577f 100644
if (gap_end >= low_limit && gap_end - gap_start >= length)
goto found;
-@@ -1735,7 +1969,7 @@ found:
+@@ -1735,7 +1976,7 @@ found:
return gap_start;
}
@@ -83748,7 +83800,7 @@ index f681e18..4c2577f 100644
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
-@@ -1789,6 +2023,24 @@ check_current:
+@@ -1789,6 +2030,24 @@ check_current:
gap_end = vma->vm_start;
if (gap_end < low_limit)
return -ENOMEM;
@@ -83773,7 +83825,7 @@ index f681e18..4c2577f 100644
if (gap_start <= high_limit && gap_end - gap_start >= length)
goto found;
-@@ -1852,6 +2104,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1852,6 +2111,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
@@ -83781,7 +83833,7 @@ index f681e18..4c2577f 100644
if (len > TASK_SIZE)
return -ENOMEM;
-@@ -1859,29 +2112,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1859,29 +2119,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (flags & MAP_FIXED)
return addr;
@@ -83830,7 +83882,7 @@ index f681e18..4c2577f 100644
mm->free_area_cache = addr;
}
-@@ -1899,6 +2168,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1899,6 +2175,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -83838,7 +83890,7 @@ index f681e18..4c2577f 100644
/* requested length too big for entire address space */
if (len > TASK_SIZE)
-@@ -1907,12 +2177,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1907,12 +2184,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED)
return addr;
@@ -83856,7 +83908,7 @@ index f681e18..4c2577f 100644
return addr;
}
-@@ -1921,6 +2194,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1921,6 +2201,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = 0;
@@ -83864,7 +83916,7 @@ index f681e18..4c2577f 100644
addr = vm_unmapped_area(&info);
/*
-@@ -1933,6 +2207,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1933,6 +2214,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
@@ -83877,7 +83929,7 @@ index f681e18..4c2577f 100644
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
-@@ -1943,6 +2223,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1943,6 +2230,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
{
@@ -83890,7 +83942,7 @@ index f681e18..4c2577f 100644
/*
* Is this a new hole at the highest possible address?
*/
-@@ -1950,8 +2236,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1950,8 +2243,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
mm->free_area_cache = addr;
/* dont allow allocations above current base */
@@ -83902,7 +83954,7 @@ index f681e18..4c2577f 100644
}
unsigned long
-@@ -2047,6 +2335,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+@@ -2047,6 +2342,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
return vma;
}
@@ -83931,7 +83983,7 @@ index f681e18..4c2577f 100644
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -2063,6 +2373,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2063,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Stack limit test */
@@ -83939,7 +83991,7 @@ index f681e18..4c2577f 100644
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
-@@ -2073,6 +2384,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2073,6 +2391,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
locked = mm->locked_vm + grow;
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
@@ -83947,7 +83999,7 @@ index f681e18..4c2577f 100644
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -2102,37 +2414,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2102,37 +2421,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
@@ -84005,7 +84057,7 @@ index f681e18..4c2577f 100644
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -2167,6 +2490,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -2167,6 +2497,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
}
@@ -84014,7 +84066,7 @@ index f681e18..4c2577f 100644
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
-@@ -2181,6 +2506,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2181,6 +2513,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
int error;
@@ -84023,7 +84075,7 @@ index f681e18..4c2577f 100644
/*
* We must make sure the anon_vma is allocated
-@@ -2194,6 +2521,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2194,6 +2528,15 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
@@ -84039,7 +84091,7 @@ index f681e18..4c2577f 100644
vma_lock_anon_vma(vma);
/*
-@@ -2203,9 +2539,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2203,9 +2546,17 @@ int expand_downwards(struct vm_area_struct *vma,
*/
/* Somebody else might have raced and expanded it already */
@@ -84058,7 +84110,7 @@ index f681e18..4c2577f 100644
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -2230,13 +2574,27 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2230,13 +2581,27 @@ int expand_downwards(struct vm_area_struct *vma,
vma->vm_pgoff -= grow;
anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma);
@@ -84086,7 +84138,7 @@ index f681e18..4c2577f 100644
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
return error;
-@@ -2334,6 +2692,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2334,6 +2699,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do {
long nrpages = vma_pages(vma);
@@ -84100,7 +84152,7 @@ index f681e18..4c2577f 100644
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2379,6 +2744,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -84117,7 +84169,7 @@ index f681e18..4c2577f 100644
vma_rb_erase(vma, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -2410,14 +2785,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2410,14 +2792,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -84151,7 +84203,7 @@ index f681e18..4c2577f 100644
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -2430,6 +2824,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2430,6 +2831,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -84174,7 +84226,7 @@ index f681e18..4c2577f 100644
pol = mpol_dup(vma_policy(vma));
if (IS_ERR(pol)) {
err = PTR_ERR(pol);
-@@ -2452,6 +2862,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2452,6 +2869,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -84211,7 +84263,7 @@ index f681e18..4c2577f 100644
/* Success. */
if (!err)
return 0;
-@@ -2461,10 +2901,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2461,10 +2908,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_ops->close(new);
if (new->vm_file)
fput(new->vm_file);
@@ -84231,7 +84283,7 @@ index f681e18..4c2577f 100644
kmem_cache_free(vm_area_cachep, new);
out_err:
return err;
-@@ -2477,6 +2925,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2477,6 +2932,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
@@ -84247,7 +84299,7 @@ index f681e18..4c2577f 100644
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -2488,11 +2945,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2488,11 +2952,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -84278,7 +84330,7 @@ index f681e18..4c2577f 100644
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -2567,6 +3043,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2567,6 +3050,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -84287,7 +84339,7 @@ index f681e18..4c2577f 100644
return 0;
}
-@@ -2575,6 +3053,13 @@ int vm_munmap(unsigned long start, size_t len)
+@@ -2575,6 +3060,13 @@ int vm_munmap(unsigned long start, size_t len)
int ret;
struct mm_struct *mm = current->mm;
@@ -84301,7 +84353,7 @@ index f681e18..4c2577f 100644
down_write(&mm->mmap_sem);
ret = do_munmap(mm, start, len);
up_write(&mm->mmap_sem);
-@@ -2588,16 +3073,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2588,16 +3080,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
return vm_munmap(addr, len);
}
@@ -84318,7 +84370,7 @@ index f681e18..4c2577f 100644
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -2611,6 +3086,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2611,6 +3093,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -84326,7 +84378,7 @@ index f681e18..4c2577f 100644
len = PAGE_ALIGN(len);
if (!len)
-@@ -2618,16 +3094,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2618,16 +3101,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -84358,7 +84410,7 @@ index f681e18..4c2577f 100644
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
-@@ -2644,21 +3134,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2644,21 +3141,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
/*
* Clear old maps. this also does some error checking for us
*/
@@ -84383,7 +84435,7 @@ index f681e18..4c2577f 100644
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2672,7 +3161,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2672,7 +3168,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -84392,7 +84444,7 @@ index f681e18..4c2577f 100644
return -ENOMEM;
}
-@@ -2686,9 +3175,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2686,9 +3182,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
@@ -84405,7 +84457,7 @@ index f681e18..4c2577f 100644
return addr;
}
-@@ -2750,6 +3240,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
@@ -84413,7 +84465,7 @@ index f681e18..4c2577f 100644
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
-@@ -2766,6 +3257,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2766,6 +3264,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
@@ -84427,7 +84479,7 @@ index f681e18..4c2577f 100644
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2789,7 +3287,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2789,7 +3294,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -84449,7 +84501,7 @@ index f681e18..4c2577f 100644
return 0;
}
-@@ -2809,6 +3321,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2809,6 +3328,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct mempolicy *pol;
bool faulted_in_anon_vma = true;
@@ -84458,7 +84510,7 @@ index f681e18..4c2577f 100644
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2875,6 +3389,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2875,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return NULL;
}
@@ -84498,7 +84550,7 @@ index f681e18..4c2577f 100644
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2886,6 +3433,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2886,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
@@ -84506,7 +84558,7 @@ index f681e18..4c2577f 100644
if (cur + npages > lim)
return 0;
return 1;
-@@ -2956,6 +3504,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2956,6 +3511,22 @@ int install_special_mapping(struct mm_struct *mm,
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -84530,7 +84582,7 @@ index f681e18..4c2577f 100644
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/mm/mprotect.c b/mm/mprotect.c
-index 94722a4..07d9926 100644
+index 94722a4..e661e29 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -23,10 +23,18 @@
@@ -84741,7 +84793,7 @@ index 94722a4..07d9926 100644
/* newflags >> 4 shift VM_MAY% in place of VM_% */
if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
+ if (prot & (PROT_WRITE | PROT_EXEC))
-+ gr_log_rwxmprotect(vma->vm_file);
++ gr_log_rwxmprotect(vma);
+
+ error = -EACCES;
+ goto out;
diff --git a/3.2.49/0000_README b/3.2.50/0000_README
index e27d48b..56552a3 100644
--- a/3.2.49/0000_README
+++ b/3.2.50/0000_README
@@ -114,7 +114,11 @@ Patch: 1048_linux-3.2.49.patch
From: http://www.kernel.org
Desc: Linux 3.2.49
-Patch: 4420_grsecurity-2.9.1-3.2.49-201307302311.patch
+Patch: 1049_linux-3.2.50.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.50
+
+Patch: 4420_grsecurity-2.9.1-3.2.50-201308030030.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.49/1021_linux-3.2.22.patch b/3.2.50/1021_linux-3.2.22.patch
index e6ad93a..e6ad93a 100644
--- a/3.2.49/1021_linux-3.2.22.patch
+++ b/3.2.50/1021_linux-3.2.22.patch
diff --git a/3.2.49/1022_linux-3.2.23.patch b/3.2.50/1022_linux-3.2.23.patch
index 3d796d0..3d796d0 100644
--- a/3.2.49/1022_linux-3.2.23.patch
+++ b/3.2.50/1022_linux-3.2.23.patch
diff --git a/3.2.49/1023_linux-3.2.24.patch b/3.2.50/1023_linux-3.2.24.patch
index 4692eb4..4692eb4 100644
--- a/3.2.49/1023_linux-3.2.24.patch
+++ b/3.2.50/1023_linux-3.2.24.patch
diff --git a/3.2.49/1024_linux-3.2.25.patch b/3.2.50/1024_linux-3.2.25.patch
index e95c213..e95c213 100644
--- a/3.2.49/1024_linux-3.2.25.patch
+++ b/3.2.50/1024_linux-3.2.25.patch
diff --git a/3.2.49/1025_linux-3.2.26.patch b/3.2.50/1025_linux-3.2.26.patch
index 44065b9..44065b9 100644
--- a/3.2.49/1025_linux-3.2.26.patch
+++ b/3.2.50/1025_linux-3.2.26.patch
diff --git a/3.2.49/1026_linux-3.2.27.patch b/3.2.50/1026_linux-3.2.27.patch
index 5878eb4..5878eb4 100644
--- a/3.2.49/1026_linux-3.2.27.patch
+++ b/3.2.50/1026_linux-3.2.27.patch
diff --git a/3.2.49/1027_linux-3.2.28.patch b/3.2.50/1027_linux-3.2.28.patch
index 4dbba4b..4dbba4b 100644
--- a/3.2.49/1027_linux-3.2.28.patch
+++ b/3.2.50/1027_linux-3.2.28.patch
diff --git a/3.2.49/1028_linux-3.2.29.patch b/3.2.50/1028_linux-3.2.29.patch
index 3c65179..3c65179 100644
--- a/3.2.49/1028_linux-3.2.29.patch
+++ b/3.2.50/1028_linux-3.2.29.patch
diff --git a/3.2.49/1029_linux-3.2.30.patch b/3.2.50/1029_linux-3.2.30.patch
index 86aea4b..86aea4b 100644
--- a/3.2.49/1029_linux-3.2.30.patch
+++ b/3.2.50/1029_linux-3.2.30.patch
diff --git a/3.2.49/1030_linux-3.2.31.patch b/3.2.50/1030_linux-3.2.31.patch
index c6accf5..c6accf5 100644
--- a/3.2.49/1030_linux-3.2.31.patch
+++ b/3.2.50/1030_linux-3.2.31.patch
diff --git a/3.2.49/1031_linux-3.2.32.patch b/3.2.50/1031_linux-3.2.32.patch
index 247fc0b..247fc0b 100644
--- a/3.2.49/1031_linux-3.2.32.patch
+++ b/3.2.50/1031_linux-3.2.32.patch
diff --git a/3.2.49/1032_linux-3.2.33.patch b/3.2.50/1032_linux-3.2.33.patch
index c32fb75..c32fb75 100644
--- a/3.2.49/1032_linux-3.2.33.patch
+++ b/3.2.50/1032_linux-3.2.33.patch
diff --git a/3.2.49/1033_linux-3.2.34.patch b/3.2.50/1033_linux-3.2.34.patch
index d647b38..d647b38 100644
--- a/3.2.49/1033_linux-3.2.34.patch
+++ b/3.2.50/1033_linux-3.2.34.patch
diff --git a/3.2.49/1034_linux-3.2.35.patch b/3.2.50/1034_linux-3.2.35.patch
index 76a9c19..76a9c19 100644
--- a/3.2.49/1034_linux-3.2.35.patch
+++ b/3.2.50/1034_linux-3.2.35.patch
diff --git a/3.2.49/1035_linux-3.2.36.patch b/3.2.50/1035_linux-3.2.36.patch
index 5d192a3..5d192a3 100644
--- a/3.2.49/1035_linux-3.2.36.patch
+++ b/3.2.50/1035_linux-3.2.36.patch
diff --git a/3.2.49/1036_linux-3.2.37.patch b/3.2.50/1036_linux-3.2.37.patch
index ad13251..ad13251 100644
--- a/3.2.49/1036_linux-3.2.37.patch
+++ b/3.2.50/1036_linux-3.2.37.patch
diff --git a/3.2.49/1037_linux-3.2.38.patch b/3.2.50/1037_linux-3.2.38.patch
index a3c106f..a3c106f 100644
--- a/3.2.49/1037_linux-3.2.38.patch
+++ b/3.2.50/1037_linux-3.2.38.patch
diff --git a/3.2.49/1038_linux-3.2.39.patch b/3.2.50/1038_linux-3.2.39.patch
index 5639e92..5639e92 100644
--- a/3.2.49/1038_linux-3.2.39.patch
+++ b/3.2.50/1038_linux-3.2.39.patch
diff --git a/3.2.49/1039_linux-3.2.40.patch b/3.2.50/1039_linux-3.2.40.patch
index f26b39c..f26b39c 100644
--- a/3.2.49/1039_linux-3.2.40.patch
+++ b/3.2.50/1039_linux-3.2.40.patch
diff --git a/3.2.49/1040_linux-3.2.41.patch b/3.2.50/1040_linux-3.2.41.patch
index 0d27fcb..0d27fcb 100644
--- a/3.2.49/1040_linux-3.2.41.patch
+++ b/3.2.50/1040_linux-3.2.41.patch
diff --git a/3.2.49/1041_linux-3.2.42.patch b/3.2.50/1041_linux-3.2.42.patch
index 77a08ed..77a08ed 100644
--- a/3.2.49/1041_linux-3.2.42.patch
+++ b/3.2.50/1041_linux-3.2.42.patch
diff --git a/3.2.49/1042_linux-3.2.43.patch b/3.2.50/1042_linux-3.2.43.patch
index a3f878b..a3f878b 100644
--- a/3.2.49/1042_linux-3.2.43.patch
+++ b/3.2.50/1042_linux-3.2.43.patch
diff --git a/3.2.49/1043_linux-3.2.44.patch b/3.2.50/1043_linux-3.2.44.patch
index 3d5e6ff..3d5e6ff 100644
--- a/3.2.49/1043_linux-3.2.44.patch
+++ b/3.2.50/1043_linux-3.2.44.patch
diff --git a/3.2.49/1044_linux-3.2.45.patch b/3.2.50/1044_linux-3.2.45.patch
index 44e1767..44e1767 100644
--- a/3.2.49/1044_linux-3.2.45.patch
+++ b/3.2.50/1044_linux-3.2.45.patch
diff --git a/3.2.49/1045_linux-3.2.46.patch b/3.2.50/1045_linux-3.2.46.patch
index bc10efd..bc10efd 100644
--- a/3.2.49/1045_linux-3.2.46.patch
+++ b/3.2.50/1045_linux-3.2.46.patch
diff --git a/3.2.49/1046_linux-3.2.47.patch b/3.2.50/1046_linux-3.2.47.patch
index b74563c..b74563c 100644
--- a/3.2.49/1046_linux-3.2.47.patch
+++ b/3.2.50/1046_linux-3.2.47.patch
diff --git a/3.2.49/1047_linux-3.2.48.patch b/3.2.50/1047_linux-3.2.48.patch
index 6d55b1f..6d55b1f 100644
--- a/3.2.49/1047_linux-3.2.48.patch
+++ b/3.2.50/1047_linux-3.2.48.patch
diff --git a/3.2.49/1048_linux-3.2.49.patch b/3.2.50/1048_linux-3.2.49.patch
index 2dab0cf..2dab0cf 100644
--- a/3.2.49/1048_linux-3.2.49.patch
+++ b/3.2.50/1048_linux-3.2.49.patch
diff --git a/3.2.50/1049_linux-3.2.50.patch b/3.2.50/1049_linux-3.2.50.patch
new file mode 100644
index 0000000..20b3015
--- /dev/null
+++ b/3.2.50/1049_linux-3.2.50.patch
@@ -0,0 +1,2495 @@
+diff --git a/Makefile b/Makefile
+index 2e3d791..0799e8e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 49
++SUBLEVEL = 50
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
+index 0192a4e..80de64b 100644
+--- a/arch/powerpc/include/asm/module.h
++++ b/arch/powerpc/include/asm/module.h
+@@ -87,10 +87,9 @@ struct exception_table_entry;
+ void sort_ex_table(struct exception_table_entry *start,
+ struct exception_table_entry *finish);
+
+-#ifdef CONFIG_MODVERSIONS
++#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
+ #define ARCH_RELOCATES_KCRCTAB
+-
+-extern const unsigned long reloc_start[];
++#define reloc_start PHYSICAL_START
+ #endif
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_MODULE_H */
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index 920276c..3e8fe4b 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
+ #endif
+ SECTIONS
+ {
+- . = 0;
+- reloc_start = .;
+-
+ . = KERNELBASE;
+
+ /*
+diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
+index 68f7e11..ce48203 100644
+--- a/arch/sparc/kernel/asm-offsets.c
++++ b/arch/sparc/kernel/asm-offsets.c
+@@ -34,6 +34,8 @@ int foo(void)
+ DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
+ BLANK();
+ DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
++ BLANK();
++ DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
+
+ /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
+ return 0;
+diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
+index 44aad32..969f964 100644
+--- a/arch/sparc/mm/hypersparc.S
++++ b/arch/sparc/mm/hypersparc.S
+@@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out:
+
+ /* The things we do for performance... */
+ hypersparc_flush_cache_range:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ #ifndef CONFIG_SMP
+ ld [%o0 + AOFF_mm_context], %g1
+ cmp %g1, -1
+@@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out:
+ */
+ /* Verified, my ass... */
+ hypersparc_flush_cache_page:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ ld [%o0 + AOFF_mm_context], %g2
+ #ifndef CONFIG_SMP
+ cmp %g2, -1
+@@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out:
+ sta %g5, [%g1] ASI_M_MMUREGS
+
+ hypersparc_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+@@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out:
+ sta %g5, [%g1] ASI_M_MMUREGS
+
+ hypersparc_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 6ff4d78..b4989f9 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -1071,7 +1071,14 @@ static int __init grab_mblocks(struct mdesc_handle *md)
+ m->size = *val;
+ val = mdesc_get_property(md, node,
+ "address-congruence-offset", NULL);
+- m->offset = *val;
++
++ /* The address-congruence-offset property is optional.
++ * Explicity zero it be identifty this.
++ */
++ if (val)
++ m->offset = *val;
++ else
++ m->offset = 0UL;
+
+ numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
+ count - 1, m->base, m->size, m->offset);
+diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
+index c801c39..5d2b88d 100644
+--- a/arch/sparc/mm/swift.S
++++ b/arch/sparc/mm/swift.S
+@@ -105,7 +105,7 @@ swift_flush_cache_mm_out:
+
+ .globl swift_flush_cache_range
+ swift_flush_cache_range:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ sub %o2, %o1, %o2
+ sethi %hi(4096), %o3
+ cmp %o2, %o3
+@@ -116,7 +116,7 @@ swift_flush_cache_range:
+
+ .globl swift_flush_cache_page
+ swift_flush_cache_page:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ 70:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -219,7 +219,7 @@ swift_flush_sig_insns:
+ .globl swift_flush_tlb_range
+ .globl swift_flush_tlb_all
+ swift_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ swift_flush_tlb_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -233,7 +233,7 @@ swift_flush_tlb_all_out:
+
+ .globl swift_flush_tlb_page
+ swift_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index afd021e..072f553 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -115,8 +115,8 @@ no_cache_flush:
+ }
+
+ if (!tb->active) {
+- global_flush_tlb_page(mm, vaddr);
+ flush_tsb_user_page(mm, vaddr);
++ global_flush_tlb_page(mm, vaddr);
+ goto out;
+ }
+
+diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
+index 4e55e8f..bf10a34 100644
+--- a/arch/sparc/mm/tsunami.S
++++ b/arch/sparc/mm/tsunami.S
+@@ -24,7 +24,7 @@
+ /* Sliiick... */
+ tsunami_flush_cache_page:
+ tsunami_flush_cache_range:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ tsunami_flush_cache_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -46,7 +46,7 @@ tsunami_flush_sig_insns:
+
+ /* More slick stuff... */
+ tsunami_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ tsunami_flush_tlb_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -65,7 +65,7 @@ tsunami_flush_tlb_out:
+
+ /* This one can be done in a fine grained manner... */
+ tsunami_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
+index 6dfcc13..a516372 100644
+--- a/arch/sparc/mm/viking.S
++++ b/arch/sparc/mm/viking.S
+@@ -109,7 +109,7 @@ viking_mxcc_flush_page:
+ viking_flush_cache_page:
+ viking_flush_cache_range:
+ #ifndef CONFIG_SMP
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ #endif
+ viking_flush_cache_mm:
+ #ifndef CONFIG_SMP
+@@ -149,7 +149,7 @@ viking_flush_tlb_mm:
+ #endif
+
+ viking_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+@@ -174,7 +174,7 @@ viking_flush_tlb_range:
+ #endif
+
+ viking_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+@@ -240,7 +240,7 @@ sun4dsmp_flush_tlb_range:
+ tst %g5
+ bne 3f
+ mov SRMMU_CTX_REG, %g1
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+ sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+@@ -266,7 +266,7 @@ sun4dsmp_flush_tlb_page:
+ tst %g5
+ bne 2f
+ mov SRMMU_CTX_REG, %g1
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+ and %o1, PAGE_MASK, %o1
+diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
+index d985713..f81597f 100644
+--- a/drivers/acpi/acpi_memhotplug.c
++++ b/drivers/acpi/acpi_memhotplug.c
+@@ -421,6 +421,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
+ /* Get the range from the _CRS */
+ result = acpi_memory_get_device_resources(mem_device);
+ if (result) {
++ device->driver_data = NULL;
+ kfree(mem_device);
+ return result;
+ }
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 7a949af..5b0b5f7 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -352,7 +352,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+- { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 85fdd4b..2232b85 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -277,6 +277,7 @@ int xen_blkif_schedule(void *arg)
+ {
+ struct xen_blkif *blkif = arg;
+ struct xen_vbd *vbd = &blkif->vbd;
++ int ret;
+
+ xen_blkif_get(blkif);
+
+@@ -297,8 +298,12 @@ int xen_blkif_schedule(void *arg)
+ blkif->waiting_reqs = 0;
+ smp_mb(); /* clear flag *before* checking for work */
+
+- if (do_block_io_op(blkif))
++ ret = do_block_io_op(blkif);
++ if (ret > 0)
+ blkif->waiting_reqs = 1;
++ if (ret == -EACCES)
++ wait_event_interruptible(blkif->shutdown_wq,
++ kthread_should_stop());
+
+ if (log_stats && time_after(jiffies, blkif->st_print))
+ print_stats(blkif);
+@@ -539,6 +544,12 @@ __do_block_io_op(struct xen_blkif *blkif)
+ rp = blk_rings->common.sring->req_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
++ if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
++ rc = blk_rings->common.rsp_prod_pvt;
++ pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
++ rp, rc, rp - rc, blkif->vbd.pdevice);
++ return -EACCES;
++ }
+ while (rc != rp) {
+
+ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
+diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
+index dfb1b3a..f67985d 100644
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -198,6 +198,8 @@ struct xen_blkif {
+ int st_wr_sect;
+
+ wait_queue_head_t waiting_to_free;
++ /* Thread shutdown wait queue. */
++ wait_queue_head_t shutdown_wq;
+ };
+
+
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index 674e3c2..77aed26 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -118,6 +118,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
+ atomic_set(&blkif->drain, 0);
+ blkif->st_print = jiffies;
+ init_waitqueue_head(&blkif->waiting_to_free);
++ init_waitqueue_head(&blkif->shutdown_wq);
+
+ return blkif;
+ }
+@@ -178,6 +179,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
+ {
+ if (blkif->xenblkd) {
+ kthread_stop(blkif->xenblkd);
++ wake_up(&blkif->shutdown_wq);
+ blkif->xenblkd = NULL;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index c32fd93..8115557 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
+ enum radeon_combios_table_offset table)
+ {
+ struct radeon_device *rdev = dev->dev_private;
+- int rev;
++ int rev, size;
+ uint16_t offset = 0, check_offset;
+
+ if (!rdev->bios)
+@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
+ switch (table) {
+ /* absolute offset tables */
+ case COMBIOS_ASIC_INIT_1_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0xc);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0xc;
+ break;
+ case COMBIOS_BIOS_SUPPORT_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x14);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x14;
+ break;
+ case COMBIOS_DAC_PROGRAMMING_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x2a;
+ break;
+ case COMBIOS_MAX_COLOR_DEPTH_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x2c;
+ break;
+ case COMBIOS_CRTC_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x2e;
+ break;
+ case COMBIOS_PLL_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x30);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x30;
+ break;
+ case COMBIOS_TV_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x32);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x32;
+ break;
+ case COMBIOS_DFP_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x34);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x34;
+ break;
+ case COMBIOS_HW_CONFIG_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x36);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x36;
+ break;
+ case COMBIOS_MULTIMEDIA_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x38);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x38;
+ break;
+ case COMBIOS_TV_STD_PATCH_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x3e;
+ break;
+ case COMBIOS_LCD_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x40);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x40;
+ break;
+ case COMBIOS_MOBILE_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x42);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x42;
+ break;
+ case COMBIOS_PLL_INIT_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x46);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x46;
+ break;
+ case COMBIOS_MEM_CONFIG_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x48);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x48;
+ break;
+ case COMBIOS_SAVE_MASK_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x4a;
+ break;
+ case COMBIOS_HARDCODED_EDID_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x4c;
+ break;
+ case COMBIOS_ASIC_INIT_2_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x4e;
+ break;
+ case COMBIOS_CONNECTOR_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x50);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x50;
+ break;
+ case COMBIOS_DYN_CLK_1_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x52);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x52;
+ break;
+ case COMBIOS_RESERVED_MEM_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x54);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x54;
+ break;
+ case COMBIOS_EXT_TMDS_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x58);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x58;
+ break;
+ case COMBIOS_MEM_CLK_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x5a;
+ break;
+ case COMBIOS_EXT_DAC_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x5c;
+ break;
+ case COMBIOS_MISC_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x5e;
+ break;
+ case COMBIOS_CRT_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x60);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x60;
+ break;
+ case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x62);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x62;
+ break;
+ case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x64);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x64;
+ break;
+ case COMBIOS_FAN_SPEED_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x66);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x66;
+ break;
+ case COMBIOS_OVERDRIVE_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x68);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x68;
+ break;
+ case COMBIOS_OEM_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x6a;
+ break;
+ case COMBIOS_DYN_CLK_2_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x6c;
+ break;
+ case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x6e;
+ break;
+ case COMBIOS_I2C_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x70);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x70;
+ break;
+ /* relative offset tables */
+ case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
+@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
+ }
+ break;
+ default:
++ check_offset = 0;
+ break;
+ }
+
+- return offset;
++ size = RBIOS8(rdev->bios_header_start + 0x6);
++ /* check absolute offset tables */
++ if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
++ offset = RBIOS16(rdev->bios_header_start + check_offset);
+
++ return offset;
+ }
+
+ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
+@@ -953,16 +890,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ dac = RBIOS8(dac_info + 0x3) & 0xf;
+ p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+ }
+- /* if the values are all zeros, use the table */
+- if (p_dac->ps2_pdac_adj)
++ /* if the values are zeros, use the table */
++ if ((dac == 0) || (bg == 0))
++ found = 0;
++ else
+ found = 1;
+ }
+
+ /* quirks */
++ /* Radeon 7000 (RV100) */
++ if (((dev->pdev->device == 0x5159) &&
++ (dev->pdev->subsystem_vendor == 0x174B) &&
++ (dev->pdev->subsystem_device == 0x7c28)) ||
+ /* Radeon 9100 (R200) */
+- if ((dev->pdev->device == 0x514D) &&
++ ((dev->pdev->device == 0x514D) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+- (dev->pdev->subsystem_device == 0x7149)) {
++ (dev->pdev->subsystem_device == 0x7149))) {
+ /* vbios value is bad, use the default */
+ found = 0;
+ }
+diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
+index 68fe73c..99b1145 100644
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -186,6 +186,8 @@ static int __init dummy_init_module(void)
+
+ rtnl_lock();
+ err = __rtnl_link_register(&dummy_link_ops);
++ if (err < 0)
++ goto out;
+
+ for (i = 0; i < numdummies && !err; i++) {
+ err = dummy_init_one();
+@@ -193,6 +195,8 @@ static int __init dummy_init_module(void)
+ }
+ if (err < 0)
+ __rtnl_link_unregister(&dummy_link_ops);
++
++out:
+ rtnl_unlock();
+
+ return err;
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index dd893b3..87851f0 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1685,8 +1685,8 @@ check_sum:
+ return 0;
+ }
+
+-static void atl1e_tx_map(struct atl1e_adapter *adapter,
+- struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
++static int atl1e_tx_map(struct atl1e_adapter *adapter,
++ struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
+ {
+ struct atl1e_tpd_desc *use_tpd = NULL;
+ struct atl1e_tx_buffer *tx_buffer = NULL;
+@@ -1697,6 +1697,8 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ u16 nr_frags;
+ u16 f;
+ int segment;
++ int ring_start = adapter->tx_ring.next_to_use;
++ int ring_end;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
+@@ -1709,6 +1711,9 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ tx_buffer->length = map_len;
+ tx_buffer->dma = pci_map_single(adapter->pdev,
+ skb->data, hdr_len, PCI_DMA_TODEVICE);
++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
++ return -ENOSPC;
++
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+@@ -1735,6 +1740,22 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ tx_buffer->dma =
+ pci_map_single(adapter->pdev, skb->data + mapped_len,
+ map_len, PCI_DMA_TODEVICE);
++
++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
++ /* We need to unwind the mappings we've done */
++ ring_end = adapter->tx_ring.next_to_use;
++ adapter->tx_ring.next_to_use = ring_start;
++ while (adapter->tx_ring.next_to_use != ring_end) {
++ tpd = atl1e_get_tpd(adapter);
++ tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
++ pci_unmap_single(adapter->pdev, tx_buffer->dma,
++ tx_buffer->length, PCI_DMA_TODEVICE);
++ }
++ /* Reset the tx rings next pointer */
++ adapter->tx_ring.next_to_use = ring_start;
++ return -ENOSPC;
++ }
++
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+@@ -1770,6 +1791,23 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ (i * MAX_TX_BUF_LEN),
+ tx_buffer->length,
+ DMA_TO_DEVICE);
++
++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
++ /* We need to unwind the mappings we've done */
++ ring_end = adapter->tx_ring.next_to_use;
++ adapter->tx_ring.next_to_use = ring_start;
++ while (adapter->tx_ring.next_to_use != ring_end) {
++ tpd = atl1e_get_tpd(adapter);
++ tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
++ dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
++ tx_buffer->length, DMA_TO_DEVICE);
++ }
++
++ /* Reset the ring next to use pointer */
++ adapter->tx_ring.next_to_use = ring_start;
++ return -ENOSPC;
++ }
++
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+ use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
+@@ -1787,6 +1825,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ /* The last buffer info contain the skb address,
+ so it will be free after unmap */
+ tx_buffer->skb = skb;
++ return 0;
+ }
+
+ static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
+@@ -1854,10 +1893,15 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
+ return NETDEV_TX_OK;
+ }
+
+- atl1e_tx_map(adapter, skb, tpd);
++ if (atl1e_tx_map(adapter, skb, tpd)) {
++ dev_kfree_skb_any(skb);
++ goto out;
++ }
++
+ atl1e_tx_queue(adapter, tpd_req, tpd);
+
+ netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
++out:
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 9b23074..b2077ca 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -136,8 +136,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .rmcr_value = 0x00000001,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+- .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+- EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
++ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
++ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
++ EESR_ECI,
+ .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+ .apr = 1,
+@@ -251,9 +252,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+- EESR_ECI,
++ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
++ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
++ EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+ .fdr_value = 0x0000072f,
+@@ -355,9 +356,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+- EESR_ECI,
++ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
++ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
++ EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+
+diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
+index 47877b1..590705c 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.h
++++ b/drivers/net/ethernet/renesas/sh_eth.h
+@@ -461,7 +461,7 @@ enum EESR_BIT {
+
+ #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
+ EESR_RTO)
+-#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \
++#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
+ EESR_RDE | EESR_RFRMER | EESR_ADE | \
+ EESR_TFE | EESR_TDE | EESR_ECI)
+ #define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
+diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
+index 8c6c059..bd08919 100644
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -1248,6 +1248,8 @@ static int vnet_port_remove(struct vio_dev *vdev)
+ dev_set_drvdata(&vdev->dev, NULL);
+
+ kfree(port);
++
++ unregister_netdev(vp->dev);
+ }
+ return 0;
+ }
+diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
+index 46b5f5f..b19841a 100644
+--- a/drivers/net/ifb.c
++++ b/drivers/net/ifb.c
+@@ -290,11 +290,17 @@ static int __init ifb_init_module(void)
+
+ rtnl_lock();
+ err = __rtnl_link_register(&ifb_link_ops);
++ if (err < 0)
++ goto out;
+
+- for (i = 0; i < numifbs && !err; i++)
++ for (i = 0; i < numifbs && !err; i++) {
+ err = ifb_init_one(i);
++ cond_resched();
++ }
+ if (err)
+ __rtnl_link_unregister(&ifb_link_ops);
++
++out:
+ rtnl_unlock();
+
+ return err;
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 26106c0..96b9e3c 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -532,8 +532,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+ if (num_pages != size) {
+- for (i = 0; i < num_pages; i++)
+- put_page(page[i]);
++ int j;
++
++ for (j = 0; j < num_pages; j++)
++ put_page(page[i + j]);
+ return -EFAULT;
+ }
+ truesize = size * PAGE_SIZE;
+@@ -653,6 +655,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ int vnet_hdr_len = 0;
+ int copylen = 0;
+ bool zerocopy = false;
++ size_t linear;
+
+ if (q->flags & IFF_VNET_HDR) {
+ vnet_hdr_len = q->vnet_hdr_sz;
+@@ -707,11 +710,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ copylen = vnet_hdr.hdr_len;
+ if (!copylen)
+ copylen = GOODCOPY_LEN;
+- } else
++ linear = copylen;
++ } else {
+ copylen = len;
++ linear = vnet_hdr.hdr_len;
++ }
+
+ skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+- vnet_hdr.hdr_len, noblock, &err);
++ linear, noblock, &err);
+ if (!skb)
+ goto err;
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 6ee8410..43a6a11 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -508,7 +508,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ {
+ struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
+ void *buf;
+- unsigned int len, received = 0;
++ unsigned int r, len, received = 0;
+
+ again:
+ while (received < budget &&
+@@ -525,8 +525,9 @@ again:
+
+ /* Out of packets? */
+ if (received < budget) {
++ r = virtqueue_enable_cb_prepare(vi->rvq);
+ napi_complete(napi);
+- if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
++ if (unlikely(virtqueue_poll(vi->rvq, r)) &&
+ napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vi->rvq);
+ __napi_schedule(napi);
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
+index 84a78af..182fcb2 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
+@@ -1788,7 +1788,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
+ }
+
+- memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
++ memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
+
+diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
+index 66ad3dc..e294d11 100644
+--- a/drivers/scsi/isci/task.c
++++ b/drivers/scsi/isci/task.c
+@@ -1038,6 +1038,7 @@ int isci_task_abort_task(struct sas_task *task)
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ int perform_termination = 0;
++ int target_done_already = 0;
+
+ /* Get the isci_request reference from the task. Note that
+ * this check does not depend on the pending request list
+@@ -1052,9 +1053,11 @@ int isci_task_abort_task(struct sas_task *task)
+ /* If task is already done, the request isn't valid */
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+ (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+- old_request)
++ old_request) {
+ isci_device = isci_lookup_device(task->dev);
+-
++ target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
++ &old_request->flags);
++ }
+ spin_unlock(&task->task_state_lock);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+@@ -1116,7 +1119,7 @@ int isci_task_abort_task(struct sas_task *task)
+ }
+ if (task->task_proto == SAS_PROTOCOL_SMP ||
+ sas_protocol_ata(task->task_proto) ||
+- test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
++ target_done_already) {
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index a4b267e..9fbe260 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -423,6 +423,8 @@ qla2x00_start_scsi(srb_t *sp)
+ __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ break;
+ }
++ } else {
++ cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ }
+
+ /* Load SCSI command packet. */
+@@ -1244,11 +1246,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
+ fcp_cmnd->task_attribute = TSK_ORDERED;
+ break;
+ default:
+- fcp_cmnd->task_attribute = 0;
++ fcp_cmnd->task_attribute = TSK_SIMPLE;
+ break;
+ }
+ } else {
+- fcp_cmnd->task_attribute = 0;
++ fcp_cmnd->task_attribute = TSK_SIMPLE;
+ }
+
+ cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
+@@ -1454,7 +1456,12 @@ qla24xx_start_scsi(srb_t *sp)
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
++ default:
++ cmd_pkt->task = TSK_SIMPLE;
++ break;
+ }
++ } else {
++ cmd_pkt->task = TSK_SIMPLE;
+ }
+
+ /* Load SCSI command packet. */
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6dace1a..17603da 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -641,10 +641,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
+
+ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
+ {
++ struct scsi_cmnd *SCpnt = rq->special;
++
+ if (rq->cmd_flags & REQ_DISCARD) {
+ free_page((unsigned long)rq->buffer);
+ rq->buffer = NULL;
+ }
++ if (SCpnt->cmnd != rq->cmd) {
++ mempool_free(SCpnt->cmnd, sd_cdb_pool);
++ SCpnt->cmnd = NULL;
++ SCpnt->cmd_len = 0;
++ }
+ }
+
+ /**
+@@ -1452,21 +1459,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
+ if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
+ sd_dif_complete(SCpnt, good_bytes);
+
+- if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
+- == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
+-
+- /* We have to print a failed command here as the
+- * extended CDB gets freed before scsi_io_completion()
+- * is called.
+- */
+- if (result)
+- scsi_print_command(SCpnt);
+-
+- mempool_free(SCpnt->cmnd, sd_cdb_pool);
+- SCpnt->cmnd = NULL;
+- SCpnt->cmd_len = 0;
+- }
+-
+ return good_bytes;
+ }
+
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index fe4dbf3..7e42190 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -1078,22 +1078,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+ DPRINTK("subdevice busy\n");
+ return -EBUSY;
+ }
+- s->busy = file;
+
+ /* make sure channel/gain list isn't too long */
+ if (user_cmd.chanlist_len > s->len_chanlist) {
+ DPRINTK("channel/gain list too long %u > %d\n",
+ user_cmd.chanlist_len, s->len_chanlist);
+- ret = -EINVAL;
+- goto cleanup;
++ return -EINVAL;
+ }
+
+ /* make sure channel/gain list isn't too short */
+ if (user_cmd.chanlist_len < 1) {
+ DPRINTK("channel/gain list too short %u < 1\n",
+ user_cmd.chanlist_len);
+- ret = -EINVAL;
+- goto cleanup;
++ return -EINVAL;
+ }
+
+ async->cmd = user_cmd;
+@@ -1103,8 +1100,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+ kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
+ if (!async->cmd.chanlist) {
+ DPRINTK("allocation failed\n");
+- ret = -ENOMEM;
+- goto cleanup;
++ return -ENOMEM;
+ }
+
+ if (copy_from_user(async->cmd.chanlist, user_cmd.chanlist,
+@@ -1156,6 +1152,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+
+ comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
+
++ /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
++ * comedi_read() or comedi_write() */
++ s->busy = file;
+ ret = s->do_cmd(dev, s);
+ if (ret == 0)
+ return 0;
+@@ -1370,6 +1369,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
+ void *file)
+ {
+ struct comedi_subdevice *s;
++ int ret;
+
+ if (arg >= dev->n_subdevices)
+ return -EINVAL;
+@@ -1386,7 +1386,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
+ if (s->busy != file)
+ return -EBUSY;
+
+- return do_cancel(dev, s);
++ ret = do_cancel(dev, s);
++ if (comedi_get_subdevice_runflags(s) & SRF_USER)
++ wake_up_interruptible(&s->async->wait_head);
++
++ return ret;
+ }
+
+ /*
+@@ -1653,6 +1657,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
+
+ if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
+ if (count == 0) {
++ mutex_lock(&dev->mutex);
+ if (comedi_get_subdevice_runflags(s) &
+ SRF_ERROR) {
+ retval = -EPIPE;
+@@ -1660,6 +1665,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
+ retval = 0;
+ }
+ do_become_nonbusy(dev, s);
++ mutex_unlock(&dev->mutex);
+ }
+ break;
+ }
+@@ -1774,6 +1780,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+
+ if (n == 0) {
+ if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
++ mutex_lock(&dev->mutex);
+ do_become_nonbusy(dev, s);
+ if (comedi_get_subdevice_runflags(s) &
+ SRF_ERROR) {
+@@ -1781,6 +1788,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+ } else {
+ retval = 0;
+ }
++ mutex_unlock(&dev->mutex);
+ break;
+ }
+ if (file->f_flags & O_NONBLOCK) {
+@@ -1818,9 +1826,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+ buf += n;
+ break; /* makes device work like a pipe */
+ }
+- if (!(comedi_get_subdevice_runflags(s) & (SRF_ERROR | SRF_RUNNING)) &&
+- async->buf_read_count - async->buf_write_count == 0) {
+- do_become_nonbusy(dev, s);
++ if (!(comedi_get_subdevice_runflags(s) & (SRF_ERROR | SRF_RUNNING))) {
++ mutex_lock(&dev->mutex);
++ if (async->buf_read_count - async->buf_write_count == 0)
++ do_become_nonbusy(dev, s);
++ mutex_unlock(&dev->mutex);
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&async->wait_head, &wait);
+diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
+index 9d4c8a6..2d3a420 100644
+--- a/drivers/staging/line6/pcm.c
++++ b/drivers/staging/line6/pcm.c
+@@ -360,8 +360,11 @@ static int snd_line6_pcm_free(struct snd_device *device)
+ */
+ static void pcm_disconnect_substream(struct snd_pcm_substream *substream)
+ {
+- if (substream->runtime && snd_pcm_running(substream))
++ if (substream->runtime && snd_pcm_running(substream)) {
++ snd_pcm_stream_lock_irq(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
++ snd_pcm_stream_unlock_irq(substream);
++ }
+ }
+
+ /*
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 22cbe06..2768a7e 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -463,6 +463,15 @@ resubmit:
+ static inline int
+ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
+ {
++ /* Need to clear both directions for control ep */
++ if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
++ USB_ENDPOINT_XFER_CONTROL) {
++ int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
++ HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
++ devinfo ^ 0x8000, tt, NULL, 0, 1000);
++ if (status)
++ return status;
++ }
+ return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+ HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
+ tt, NULL, 0, 1000);
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 29a8e16..4795c0c 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -643,8 +643,8 @@ struct dwc3 {
+
+ struct dwc3_event_type {
+ u32 is_devspec:1;
+- u32 type:6;
+- u32 reserved8_31:25;
++ u32 type:7;
++ u32 reserved8_31:24;
+ } __packed;
+
+ #define DWC3_DEPEVT_XFERCOMPLETE 0x01
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index b368b83..619ee19 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1217,6 +1217,7 @@ err1:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+ err0:
++ dwc->gadget_driver = NULL;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index aca647a..79d2720 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -89,7 +89,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_AMD_PLL_FIX;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+- xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+ xhci->limit_active_eps = 64;
+ xhci->quirks |= XHCI_SW_BW_CHECKING;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index d08a804..633476e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -463,7 +463,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+
+ /* A ring has pending URBs if its TD list is not empty */
+ if (!(ep->ep_state & EP_HAS_STREAMS)) {
+- if (!(list_empty(&ep->ring->td_list)))
++ if (ep->ring && !(list_empty(&ep->ring->td_list)))
+ xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
+ return;
+ }
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 136c357..6e1c92a 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1153,9 +1153,6 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+ }
+
+ xhci = hcd_to_xhci(hcd);
+- if (xhci->xhc_state & XHCI_STATE_HALTED)
+- return -ENODEV;
+-
+ if (check_virt_dev) {
+ if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
+ printk(KERN_DEBUG "xHCI %s called with unaddressed "
+@@ -1171,6 +1168,9 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+ }
+ }
+
++ if (xhci->xhc_state & XHCI_STATE_HALTED)
++ return -ENODEV;
++
+ return 1;
+ }
+
+@@ -4178,6 +4178,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+
+ get_quirks(dev, xhci);
+
++ /* In xhci controllers which follow xhci 1.0 spec gives a spurious
++ * success event after a short transfer. This quirk will ignore such
++ * spurious event.
++ */
++ if (xhci->hci_version > 0x96)
++ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
++
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
+index dd573ab..7af163d 100644
+--- a/drivers/usb/misc/sisusbvga/sisusb.c
++++ b/drivers/usb/misc/sisusbvga/sisusb.c
+@@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = {
+ { USB_DEVICE(0x0711, 0x0903) },
+ { USB_DEVICE(0x0711, 0x0918) },
+ { USB_DEVICE(0x0711, 0x0920) },
++ { USB_DEVICE(0x0711, 0x0950) },
+ { USB_DEVICE(0x182d, 0x021c) },
+ { USB_DEVICE(0x182d, 0x0269) },
+ { }
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 913a178..c408ff7 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
++ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
+ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+@@ -124,6 +125,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
++ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
++ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+@@ -154,6 +157,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
++ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index e89ee48..5e8c736 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -925,20 +925,20 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
+ if (status < 0) {
+ dbg("Reading Spreg failed");
+- return -1;
++ goto err;
+ }
+ Data |= 0x80;
+ status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
+ if (status < 0) {
+ dbg("writing Spreg failed");
+- return -1;
++ goto err;
+ }
+
+ Data &= ~0x80;
+ status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
+ if (status < 0) {
+ dbg("writing Spreg failed");
+- return -1;
++ goto err;
+ }
+ /* End of block to be checked */
+
+@@ -947,7 +947,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ &Data);
+ if (status < 0) {
+ dbg("Reading Controlreg failed");
+- return -1;
++ goto err;
+ }
+ Data |= 0x08; /* Driver done bit */
+ Data |= 0x20; /* rx_disable */
+@@ -955,7 +955,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ mos7840_port->ControlRegOffset, Data);
+ if (status < 0) {
+ dbg("writing Controlreg failed");
+- return -1;
++ goto err;
+ }
+ /* do register settings here */
+ /* Set all regs to the device default values. */
+@@ -966,21 +966,21 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
+ if (status < 0) {
+ dbg("disabling interrupts failed");
+- return -1;
++ goto err;
+ }
+ /* Set FIFO_CONTROL_REGISTER to the default value */
+ Data = 0x00;
+ status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
+ if (status < 0) {
+ dbg("Writing FIFO_CONTROL_REGISTER failed");
+- return -1;
++ goto err;
+ }
+
+ Data = 0xcf;
+ status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
+ if (status < 0) {
+ dbg("Writing FIFO_CONTROL_REGISTER failed");
+- return -1;
++ goto err;
+ }
+
+ Data = 0x03;
+@@ -1136,7 +1136,15 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ dbg ("%s leave", __func__);
+
+ return 0;
+-
++err:
++ for (j = 0; j < NUM_URBS; ++j) {
++ urb = mos7840_port->write_urb_pool[j];
++ if (!urb)
++ continue;
++ kfree(urb->transfer_buffer);
++ usb_free_urb(urb);
++ }
++ return status;
+ }
+
+ /*****************************************************************************
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index b8365a7..c2103f4 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -347,17 +347,12 @@ static void option_instat_callback(struct urb *urb);
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000
+ #define OLIVETTI_PRODUCT_OLICARD145 0xc003
++#define OLIVETTI_PRODUCT_OLICARD200 0xc005
+
+ /* Celot products */
+ #define CELOT_VENDOR_ID 0x211f
+ #define CELOT_PRODUCT_CT680M 0x6801
+
+-/* ONDA Communication vendor id */
+-#define ONDA_VENDOR_ID 0x1ee8
+-
+-/* ONDA MT825UP HSDPA 14.2 modem */
+-#define ONDA_MT825UP 0x000b
+-
+ /* Samsung products */
+ #define SAMSUNG_VENDOR_ID 0x04e8
+ #define SAMSUNG_PRODUCT_GT_B3730 0x6889
+@@ -450,7 +445,8 @@ static void option_instat_callback(struct urb *urb);
+
+ /* Hyundai Petatel Inc. products */
+ #define PETATEL_VENDOR_ID 0x1ff4
+-#define PETATEL_PRODUCT_NP10T 0x600e
++#define PETATEL_PRODUCT_NP10T_600A 0x600a
++#define PETATEL_PRODUCT_NP10T_600E 0x600e
+
+ /* TP-LINK Incorporated products */
+ #define TPLINK_VENDOR_ID 0x2357
+@@ -797,6 +793,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
+@@ -832,7 +829,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+@@ -1278,8 +1276,8 @@ static const struct usb_device_id option_ids[] = {
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+- { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
+@@ -1351,9 +1349,12 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+- { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
+@@ -1361,6 +1362,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 9d3b39e..42038ba 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -408,7 +408,7 @@ static int ti_startup(struct usb_serial *serial)
+ usb_set_serial_data(serial, tdev);
+
+ /* determine device type */
+- if (usb_match_id(serial->interface, ti_id_table_3410))
++ if (serial->type == &ti_1port_device)
+ tdev->td_is_3410 = 1;
+ dbg("%s - device type is %s", __func__,
+ tdev->td_is_3410 ? "3410" : "5052");
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 7b8d564..8a3b531 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -657,6 +657,13 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_FIX_INQUIRY ),
+
++/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */
++UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100,
++ "Sony Corp.",
++ "MicroVault Flash Drive",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_READ_CAPACITY_16 ),
++
+ /* floppy reports multiple luns */
+ UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
+ "SAMSUNG",
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index dc2eed1..4a88ac3 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -360,9 +360,22 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
+
+-bool virtqueue_enable_cb(struct virtqueue *_vq)
++/**
++ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
++ * @vq: the struct virtqueue we're talking about.
++ *
++ * This re-enables callbacks; it returns current queue state
++ * in an opaque unsigned value. This value should be later tested by
++ * virtqueue_poll, to detect a possible race between the driver checking for
++ * more work, and enabling callbacks.
++ *
++ * Caller must ensure we don't call this with other virtqueue
++ * operations at the same time (except where noted).
++ */
++unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+ {
+ struct vring_virtqueue *vq = to_vvq(_vq);
++ u16 last_used_idx;
+
+ START_USE(vq);
+
+@@ -372,15 +385,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
+ * either clear the flags bit or point the event index at the next
+ * entry. Always do both to keep code simple. */
+ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+- vring_used_event(&vq->vring) = vq->last_used_idx;
++ vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
++ END_USE(vq);
++ return last_used_idx;
++}
++EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
++
++/**
++ * virtqueue_poll - query pending used buffers
++ * @vq: the struct virtqueue we're talking about.
++ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
++ *
++ * Returns "true" if there are pending used buffers in the queue.
++ *
++ * This does not need to be serialized.
++ */
++bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
++{
++ struct vring_virtqueue *vq = to_vvq(_vq);
++
+ virtio_mb();
+- if (unlikely(more_used(vq))) {
+- END_USE(vq);
+- return false;
+- }
++ return (u16)last_used_idx != vq->vring.used->idx;
++}
++EXPORT_SYMBOL_GPL(virtqueue_poll);
+
+- END_USE(vq);
+- return true;
++/**
++ * virtqueue_enable_cb - restart callbacks after disable_cb.
++ * @vq: the struct virtqueue we're talking about.
++ *
++ * This re-enables callbacks; it returns "false" if there are pending
++ * buffers in the queue, to detect a possible race between the driver
++ * checking for more work, and enabling callbacks.
++ *
++ * Caller must ensure we don't call this with other virtqueue
++ * operations at the same time (except where noted).
++ */
++bool virtqueue_enable_cb(struct virtqueue *_vq)
++{
++ unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
++ return !virtqueue_poll(_vq, last_used_idx);
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 8d4d53d..49eefdb 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6560,6 +6560,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ int err = 0;
+ int ret;
+ int level;
++ bool root_dropped = false;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+@@ -6614,6 +6615,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ while (1) {
+ btrfs_tree_lock(path->nodes[level]);
+ btrfs_set_lock_blocking(path->nodes[level]);
++ path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
+
+ ret = btrfs_lookup_extent_info(trans, root,
+ path->nodes[level]->start,
+@@ -6627,6 +6629,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ break;
+
+ btrfs_tree_unlock(path->nodes[level]);
++ path->locks[level] = 0;
+ WARN_ON(wc->refs[level] != 1);
+ level--;
+ }
+@@ -6707,11 +6710,21 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ free_extent_buffer(root->commit_root);
+ kfree(root);
+ }
++ root_dropped = true;
+ out_free:
+ btrfs_end_transaction_throttle(trans, tree_root);
+ kfree(wc);
+ btrfs_free_path(path);
+ out:
++ /*
++ * So if we need to stop dropping the snapshot for whatever reason we
++ * need to make sure to add it back to the dead root list so that we
++ * keep trying to do the work later. This also cleans up roots if we
++ * don't have it in the radix (like when we recover after a power fail
++ * or unmount) so we don't leak memory.
++ */
++ if (root_dropped == false)
++ btrfs_add_dead_root(root);
+ if (err)
+ btrfs_std_error(root->fs_info, err);
+ return;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 9243103..9b8c131 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4696,11 +4696,16 @@ do_more:
+ * blocks being freed are metadata. these blocks shouldn't
+ * be used until this transaction is committed
+ */
++ retry:
+ new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+ if (!new_entry) {
+- ext4_mb_unload_buddy(&e4b);
+- err = -ENOMEM;
+- goto error_return;
++ /*
++ * We use a retry loop because
++ * ext4_free_blocks() is not allowed to fail.
++ */
++ cond_resched();
++ congestion_wait(BLK_RW_ASYNC, HZ/50);
++ goto retry;
+ }
+ new_entry->start_cluster = bit;
+ new_entry->group = block_group;
+diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
+index f0179c3..cd8703d 100644
+--- a/fs/lockd/svclock.c
++++ b/fs/lockd/svclock.c
+@@ -913,6 +913,7 @@ nlmsvc_retry_blocked(void)
+ unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
+ struct nlm_block *block;
+
++ spin_lock(&nlm_blocked_lock);
+ while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
+ block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
+
+@@ -922,6 +923,7 @@ nlmsvc_retry_blocked(void)
+ timeout = block->b_when - jiffies;
+ break;
+ }
++ spin_unlock(&nlm_blocked_lock);
+
+ dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
+ block, block->b_when);
+@@ -931,7 +933,9 @@ nlmsvc_retry_blocked(void)
+ retry_deferred_block(block);
+ } else
+ nlmsvc_grant_blocked(block);
++ spin_lock(&nlm_blocked_lock);
+ }
++ spin_unlock(&nlm_blocked_lock);
+
+ return timeout;
+ }
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 1ec1fde..561a3dc 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -782,9 +782,10 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ }
+ *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
+ flags, current_cred());
+- if (IS_ERR(*filp))
++ if (IS_ERR(*filp)) {
+ host_err = PTR_ERR(*filp);
+- else
++ *filp = NULL;
++ } else
+ host_err = ima_file_check(*filp, access);
+ out_nfserr:
+ err = nfserrno(host_err);
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 9fde1c0..9860f6b 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -118,6 +118,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
+ metadata->event_len = FAN_EVENT_METADATA_LEN;
+ metadata->metadata_len = FAN_EVENT_METADATA_LEN;
+ metadata->vers = FANOTIFY_METADATA_VERSION;
++ metadata->reserved = 0;
+ metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
+ metadata->pid = pid_vnr(event->tgid);
+ if (unlikely(event->mask & FAN_Q_OVERFLOW))
+diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
+index b5f927f..732c962 100644
+--- a/include/linux/if_pppox.h
++++ b/include/linux/if_pppox.h
+@@ -128,11 +128,11 @@ struct pppoe_tag {
+
+ struct pppoe_hdr {
+ #if defined(__LITTLE_ENDIAN_BITFIELD)
+- __u8 ver : 4;
+ __u8 type : 4;
++ __u8 ver : 4;
+ #elif defined(__BIG_ENDIAN_BITFIELD)
+- __u8 type : 4;
+ __u8 ver : 4;
++ __u8 type : 4;
+ #else
+ #error "Please fix <asm/byteorder.h>"
+ #endif
+diff --git a/include/linux/virtio.h b/include/linux/virtio.h
+index 4c069d8b..96c7843 100644
+--- a/include/linux/virtio.h
++++ b/include/linux/virtio.h
+@@ -96,6 +96,10 @@ void virtqueue_disable_cb(struct virtqueue *vq);
+
+ bool virtqueue_enable_cb(struct virtqueue *vq);
+
++unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
++
++bool virtqueue_poll(struct virtqueue *vq, unsigned);
++
+ bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
+
+ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index cbc6bb0..44b1110 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -81,6 +81,9 @@ extern int ipv6_dev_get_saddr(struct net *net,
+ const struct in6_addr *daddr,
+ unsigned int srcprefs,
+ struct in6_addr *saddr);
++extern int __ipv6_get_lladdr(struct inet6_dev *idev,
++ struct in6_addr *addr,
++ unsigned char banned_flags);
+ extern int ipv6_get_lladdr(struct net_device *dev,
+ struct in6_addr *addr,
+ unsigned char banned_flags);
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 3b285f4..e158330 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -180,6 +180,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum,
+ extern void udp_err(struct sk_buff *, u32);
+ extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t len);
++extern int udp_push_pending_frames(struct sock *sk);
+ extern void udp_flush_pending_frames(struct sock *sk);
+ extern int udp_rcv(struct sk_buff *skb);
+ extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
+index 75271b9..7d28aff 100644
+--- a/include/xen/interface/io/ring.h
++++ b/include/xen/interface/io/ring.h
+@@ -188,6 +188,11 @@ struct __name##_back_ring { \
+ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
+ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
+
++/* Ill-behaved frontend determination: Can there be this many requests? */
++#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
++ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
++
++
+ #define RING_PUSH_REQUESTS(_r) do { \
+ wmb(); /* back sees requests /before/ updated producer index */ \
+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 0ec6c34..a584ad9 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -631,7 +631,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+
+ memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
+ max_data->pid = tsk->pid;
+- max_data->uid = task_uid(tsk);
++ /*
++ * If tsk == current, then use current_uid(), as that does not use
++ * RCU. The irq tracer can be called out of RCU scope.
++ */
++ if (tsk == current)
++ max_data->uid = current_uid();
++ else
++ max_data->uid = task_uid(tsk);
++
+ max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
+ max_data->policy = tsk->policy;
+ max_data->rt_priority = tsk->rt_priority;
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 0cccca8..b40d3da 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -72,6 +72,8 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct vlan_priority_tci_mapping *mp;
+
++ smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
++
+ mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
+ while (mp) {
+ if (mp->priority == skb->priority) {
+@@ -232,6 +234,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
+ np->next = mp;
+ np->priority = skb_prio;
+ np->vlan_qos = vlan_qos;
++ /* Before inserting this element in hash table, make sure all its fields
++ * are committed to memory.
++ * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
++ */
++ smp_wmb();
+ vlan->egress_priority_map[skb_prio & 0xF] = np;
+ if (vlan_qos)
+ vlan->nr_egress_mappings++;
+diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
+index de8df95..2ee3879 100644
+--- a/net/9p/trans_common.c
++++ b/net/9p/trans_common.c
+@@ -24,11 +24,11 @@
+ */
+ void p9_release_pages(struct page **pages, int nr_pages)
+ {
+- int i = 0;
+- while (pages[i] && nr_pages--) {
+- put_page(pages[i]);
+- i++;
+- }
++ int i;
++
++ for (i = 0; i < nr_pages; i++)
++ if (pages[i])
++ put_page(pages[i]);
+ }
+ EXPORT_SYMBOL(p9_release_pages);
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 5ac1811..b81500c 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -467,8 +467,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
+ skb_set_transport_header(skb, skb->len);
+ mldq = (struct mld_msg *) icmp6_hdr(skb);
+
+- interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
+- br->multicast_query_response_interval;
++ interval = ipv6_addr_any(group) ?
++ br->multicast_query_response_interval :
++ br->multicast_last_member_interval;
+
+ mldq->mld_type = ICMPV6_MGM_QUERY;
+ mldq->mld_code = 0;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 5b9709f..0ea3fd3 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -237,7 +237,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
+ we must kill timers etc. and move
+ it to safe state.
+ */
+- skb_queue_purge(&n->arp_queue);
++ __skb_queue_purge(&n->arp_queue);
+ n->output = neigh_blackhole;
+ if (n->nud_state & NUD_VALID)
+ n->nud_state = NUD_NOARP;
+@@ -291,7 +291,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
+ if (!n)
+ goto out_entries;
+
+- skb_queue_head_init(&n->arp_queue);
++ __skb_queue_head_init(&n->arp_queue);
+ rwlock_init(&n->lock);
+ seqlock_init(&n->ha_lock);
+ n->updated = n->used = now;
+@@ -701,7 +701,9 @@ void neigh_destroy(struct neighbour *neigh)
+ if (neigh_del_timer(neigh))
+ printk(KERN_WARNING "Impossible event.\n");
+
+- skb_queue_purge(&neigh->arp_queue);
++ write_lock_bh(&neigh->lock);
++ __skb_queue_purge(&neigh->arp_queue);
++ write_unlock_bh(&neigh->lock);
+
+ dev_put(neigh->dev);
+ neigh_parms_put(neigh->parms);
+@@ -843,7 +845,7 @@ static void neigh_invalidate(struct neighbour *neigh)
+ neigh->ops->error_report(neigh, skb);
+ write_lock(&neigh->lock);
+ }
+- skb_queue_purge(&neigh->arp_queue);
++ __skb_queue_purge(&neigh->arp_queue);
+ }
+
+ static void neigh_probe(struct neighbour *neigh)
+@@ -1176,7 +1178,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+
+ write_lock_bh(&neigh->lock);
+ }
+- skb_queue_purge(&neigh->arp_queue);
++ __skb_queue_purge(&neigh->arp_queue);
+ }
+ out:
+ if (update_isrouter) {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 5a65eea..5decc93 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -766,7 +766,7 @@ send:
+ /*
+ * Push out all pending data as one UDP datagram. Socket is locked.
+ */
+-static int udp_push_pending_frames(struct sock *sk)
++int udp_push_pending_frames(struct sock *sk)
+ {
+ struct udp_sock *up = udp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+@@ -785,6 +785,7 @@ out:
+ up->pending = 0;
+ return err;
+ }
++EXPORT_SYMBOL(udp_push_pending_frames);
+
+ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len)
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index d603caa..314bda2 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1236,6 +1236,23 @@ try_nextdev:
+ }
+ EXPORT_SYMBOL(ipv6_dev_get_saddr);
+
++int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
++ unsigned char banned_flags)
++{
++ struct inet6_ifaddr *ifp;
++ int err = -EADDRNOTAVAIL;
++
++ list_for_each_entry(ifp, &idev->addr_list, if_list) {
++ if (ifp->scope == IFA_LINK &&
++ !(ifp->flags & banned_flags)) {
++ ipv6_addr_copy(addr, &ifp->addr);
++ err = 0;
++ break;
++ }
++ }
++ return err;
++}
++
+ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ unsigned char banned_flags)
+ {
+@@ -1245,17 +1262,8 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ rcu_read_lock();
+ idev = __in6_dev_get(dev);
+ if (idev) {
+- struct inet6_ifaddr *ifp;
+-
+ read_lock_bh(&idev->lock);
+- list_for_each_entry(ifp, &idev->addr_list, if_list) {
+- if (ifp->scope == IFA_LINK &&
+- !(ifp->flags & banned_flags)) {
+- ipv6_addr_copy(addr, &ifp->addr);
+- err = 0;
+- break;
+- }
+- }
++ err = __ipv6_get_lladdr(idev, addr, banned_flags);
+ read_unlock_bh(&idev->lock);
+ }
+ rcu_read_unlock();
+@@ -2434,6 +2442,9 @@ static void init_loopback(struct net_device *dev)
+ if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
+ continue;
+
++ if (sp_ifa->rt)
++ continue;
++
+ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+
+ /* Failure cases are ignored */
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 6aadaa8..db60043 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -909,11 +909,17 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
+ const struct flowi6 *fl6)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+- struct rt6_info *rt = (struct rt6_info *)dst;
++ struct rt6_info *rt;
+
+ if (!dst)
+ goto out;
+
++ if (dst->ops->family != AF_INET6) {
++ dst_release(dst);
++ return NULL;
++ }
++
++ rt = (struct rt6_info *)dst;
+ /* Yes, checking route validity in not connected
+ * case is not very simple. Take into account,
+ * that we do not support routing by source, TOS,
+@@ -1178,11 +1184,12 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
+ return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
+ }
+
+-static void ip6_append_data_mtu(int *mtu,
++static void ip6_append_data_mtu(unsigned int *mtu,
+ int *maxfraglen,
+ unsigned int fragheaderlen,
+ struct sk_buff *skb,
+- struct rt6_info *rt)
++ struct rt6_info *rt,
++ bool pmtuprobe)
+ {
+ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+ if (skb == NULL) {
+@@ -1194,7 +1201,9 @@ static void ip6_append_data_mtu(int *mtu,
+ * this fragment is not first, the headers
+ * space is regarded as data space.
+ */
+- *mtu = dst_mtu(rt->dst.path);
++ *mtu = min(*mtu, pmtuprobe ?
++ rt->dst.dev->mtu :
++ dst_mtu(rt->dst.path));
+ }
+ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ + fragheaderlen - sizeof(struct frag_hdr);
+@@ -1211,11 +1220,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct inet_cork *cork;
+ struct sk_buff *skb, *skb_prev = NULL;
+- unsigned int maxfraglen, fragheaderlen;
++ unsigned int maxfraglen, fragheaderlen, mtu;
+ int exthdrlen;
+ int dst_exthdrlen;
+ int hh_len;
+- int mtu;
+ int copy;
+ int err;
+ int offset = 0;
+@@ -1378,7 +1386,9 @@ alloc_new_skb:
+ /* update mtu and maxfraglen if necessary */
+ if (skb == NULL || skb_prev == NULL)
+ ip6_append_data_mtu(&mtu, &maxfraglen,
+- fragheaderlen, skb, rt);
++ fragheaderlen, skb, rt,
++ np->pmtudisc ==
++ IPV6_PMTUDISC_PROBE);
+
+ skb_prev = skb;
+
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index f2d74ea..c7ec4bb 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1334,8 +1334,9 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
+ return scount;
+ }
+
+-static struct sk_buff *mld_newpack(struct net_device *dev, int size)
++static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
+ {
++ struct net_device *dev = idev->dev;
+ struct net *net = dev_net(dev);
+ struct sock *sk = net->ipv6.igmp_sk;
+ struct sk_buff *skb;
+@@ -1358,7 +1359,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
+
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+
+- if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
++ if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
+ /* <draft-ietf-magma-mld-source-05.txt>:
+ * use unspecified address as the source address
+ * when a valid link-local address is not available.
+@@ -1461,7 +1462,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ struct mld2_grec *pgr;
+
+ if (!skb)
+- skb = mld_newpack(dev, dev->mtu);
++ skb = mld_newpack(pmc->idev, dev->mtu);
+ if (!skb)
+ return NULL;
+ pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
+@@ -1481,7 +1482,8 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ int type, int gdeleted, int sdeleted)
+ {
+- struct net_device *dev = pmc->idev->dev;
++ struct inet6_dev *idev = pmc->idev;
++ struct net_device *dev = idev->dev;
+ struct mld2_report *pmr;
+ struct mld2_grec *pgr = NULL;
+ struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
+@@ -1510,7 +1512,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
+ if (skb)
+ mld_sendpack(skb);
+- skb = mld_newpack(dev, dev->mtu);
++ skb = mld_newpack(idev, dev->mtu);
+ }
+ }
+ first = 1;
+@@ -1537,7 +1539,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ pgr->grec_nsrcs = htons(scount);
+ if (skb)
+ mld_sendpack(skb);
+- skb = mld_newpack(dev, dev->mtu);
++ skb = mld_newpack(idev, dev->mtu);
+ first = 1;
+ scount = 0;
+ }
+@@ -1592,8 +1594,8 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
+ struct sk_buff *skb = NULL;
+ int type;
+
++ read_lock_bh(&idev->lock);
+ if (!pmc) {
+- read_lock_bh(&idev->lock);
+ for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
+ if (pmc->mca_flags & MAF_NOREPORT)
+ continue;
+@@ -1605,7 +1607,6 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
+ skb = add_grec(skb, pmc, type, 0, 0);
+ spin_unlock_bh(&pmc->mca_lock);
+ }
+- read_unlock_bh(&idev->lock);
+ } else {
+ spin_lock_bh(&pmc->mca_lock);
+ if (pmc->mca_sfcount[MCAST_EXCLUDE])
+@@ -1615,6 +1616,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
+ skb = add_grec(skb, pmc, type, 0, 0);
+ spin_unlock_bh(&pmc->mca_lock);
+ }
++ read_unlock_bh(&idev->lock);
+ if (skb)
+ mld_sendpack(skb);
+ }
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 20f0812..f9e496b 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -893,11 +893,16 @@ static int udp_v6_push_pending_frames(struct sock *sk)
+ struct udphdr *uh;
+ struct udp_sock *up = udp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+- struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
++ struct flowi6 *fl6;
+ int err = 0;
+ int is_udplite = IS_UDPLITE(sk);
+ __wsum csum = 0;
+
++ if (up->pending == AF_INET)
++ return udp_push_pending_frames(sk);
++
++ fl6 = &inet->cork.fl.u.ip6;
++
+ /* Grab the skbuff where UDP header space exists. */
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
+ goto out;
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 1e733e9..6fefdfc 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1705,6 +1705,7 @@ static int key_notify_sa_flush(const struct km_event *c)
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
++ hdr->sadb_msg_reserved = 0;
+
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+
+@@ -2686,6 +2687,7 @@ static int key_notify_policy_flush(const struct km_event *c)
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
++ hdr->sadb_msg_reserved = 0;
+ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+ return 0;
+
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 74410e6..e579006 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -1778,7 +1778,8 @@ static const struct proto_ops pppol2tp_ops = {
+
+ static const struct pppox_proto pppol2tp_proto = {
+ .create = pppol2tp_create,
+- .ioctl = pppol2tp_ioctl
++ .ioctl = pppol2tp_ioctl,
++ .owner = THIS_MODULE,
+ };
+
+ #ifdef CONFIG_L2TP_V3
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 3e16c6a..dc24ba9 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -1586,11 +1586,11 @@ out_cud_release:
+ case SIOCX25CALLACCPTAPPRV: {
+ rc = -EINVAL;
+ lock_sock(sk);
+- if (sk->sk_state != TCP_CLOSE)
+- break;
+- clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
++ if (sk->sk_state == TCP_CLOSE) {
++ clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
++ rc = 0;
++ }
+ release_sock(sk);
+- rc = 0;
+ break;
+ }
+
+@@ -1598,14 +1598,15 @@ out_cud_release:
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state != TCP_ESTABLISHED)
+- break;
++ goto out_sendcallaccpt_release;
+ /* must call accptapprv above */
+ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
+- break;
++ goto out_sendcallaccpt_release;
+ x25_write_internal(sk, X25_CALL_ACCEPTED);
+ x25->state = X25_STATE_3;
+- release_sock(sk);
+ rc = 0;
++out_sendcallaccpt_release:
++ release_sock(sk);
+ break;
+ }
+
+diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
+index 76e0d56..823359e 100644
+--- a/sound/arm/pxa2xx-pcm-lib.c
++++ b/sound/arm/pxa2xx-pcm-lib.c
+@@ -166,7 +166,9 @@ void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id)
+ } else {
+ printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
+ rtd->params->name, dma_ch, dcsr);
++ snd_pcm_stream_lock(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(substream);
+ }
+ }
+ EXPORT_SYMBOL(pxa2xx_pcm_dma_irq);
+diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
+index f4b9e2b..fbf0bcd 100644
+--- a/sound/pci/asihpi/asihpi.c
++++ b/sound/pci/asihpi/asihpi.c
+@@ -768,7 +768,10 @@ static void snd_card_asihpi_timer_function(unsigned long data)
+ s->number);
+ ds->drained_count++;
+ if (ds->drained_count > 2) {
++ unsigned long flags;
++ snd_pcm_stream_lock_irqsave(s, flags);
+ snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(s, flags);
+ continue;
+ }
+ } else {
+diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
+index 15e4e5e..6faa173 100644
+--- a/sound/pci/atiixp.c
++++ b/sound/pci/atiixp.c
+@@ -688,7 +688,9 @@ static void snd_atiixp_xrun_dma(struct atiixp *chip, struct atiixp_dma *dma)
+ if (! dma->substream || ! dma->running)
+ return;
+ snd_printdd("atiixp: XRUN detected (DMA %d)\n", dma->ops->type);
++ snd_pcm_stream_lock(dma->substream);
+ snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(dma->substream);
+ }
+
+ /*
+diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
+index 57bf8f4..d752120 100644
+--- a/sound/pci/atiixp_modem.c
++++ b/sound/pci/atiixp_modem.c
+@@ -638,7 +638,9 @@ static void snd_atiixp_xrun_dma(struct atiixp_modem *chip,
+ if (! dma->substream || ! dma->running)
+ return;
+ snd_printdd("atiixp-modem: XRUN detected (DMA %d)\n", dma->ops->type);
++ snd_pcm_stream_lock(dma->substream);
+ snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(dma->substream);
+ }
+
+ /*
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index d148a2b..55d9b30 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1897,6 +1897,8 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi },
+ { .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi },
+ { .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi },
++{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_generic_hdmi },
++{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_generic_hdmi },
+ { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
+ { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
+ { .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi },
+@@ -1943,6 +1945,8 @@ MODULE_ALIAS("snd-hda-codec-id:10de0041");
+ MODULE_ALIAS("snd-hda-codec-id:10de0042");
+ MODULE_ALIAS("snd-hda-codec-id:10de0043");
+ MODULE_ALIAS("snd-hda-codec-id:10de0044");
++MODULE_ALIAS("snd-hda-codec-id:10de0051");
++MODULE_ALIAS("snd-hda-codec-id:10de0060");
+ MODULE_ALIAS("snd-hda-codec-id:10de0067");
+ MODULE_ALIAS("snd-hda-codec-id:10de8001");
+ MODULE_ALIAS("snd-hda-codec-id:17e80047");
+diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
+index b7cf246..d58c575 100644
+--- a/sound/soc/codecs/max98088.c
++++ b/sound/soc/codecs/max98088.c
+@@ -1595,7 +1595,7 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute)
+
+ static void max98088_sync_cache(struct snd_soc_codec *codec)
+ {
+- u16 *reg_cache = codec->reg_cache;
++ u8 *reg_cache = codec->reg_cache;
+ int i;
+
+ if (!codec->cache_sync)
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index bbcf921..b5d4a97 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -38,7 +38,7 @@
+ static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET] = {
+ [SGTL5000_CHIP_CLK_CTRL] = 0x0008,
+ [SGTL5000_CHIP_I2S_CTRL] = 0x0010,
+- [SGTL5000_CHIP_SSS_CTRL] = 0x0008,
++ [SGTL5000_CHIP_SSS_CTRL] = 0x0010,
+ [SGTL5000_CHIP_DAC_VOL] = 0x3c3c,
+ [SGTL5000_CHIP_PAD_STRENGTH] = 0x015f,
+ [SGTL5000_CHIP_ANA_HP_CTRL] = 0x1818,
+diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
+index 8a9f435..d3a68bb 100644
+--- a/sound/soc/codecs/sgtl5000.h
++++ b/sound/soc/codecs/sgtl5000.h
+@@ -347,7 +347,7 @@
+ #define SGTL5000_PLL_INT_DIV_MASK 0xf800
+ #define SGTL5000_PLL_INT_DIV_SHIFT 11
+ #define SGTL5000_PLL_INT_DIV_WIDTH 5
+-#define SGTL5000_PLL_FRAC_DIV_MASK 0x0700
++#define SGTL5000_PLL_FRAC_DIV_MASK 0x07ff
+ #define SGTL5000_PLL_FRAC_DIV_SHIFT 0
+ #define SGTL5000_PLL_FRAC_DIV_WIDTH 11
+
+diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
+index 55efc2b..75babae 100644
+--- a/sound/soc/s6000/s6000-pcm.c
++++ b/sound/soc/s6000/s6000-pcm.c
+@@ -128,7 +128,9 @@ static irqreturn_t s6000_pcm_irq(int irq, void *data)
+ substream->runtime &&
+ snd_pcm_running(substream)) {
+ dev_dbg(pcm->dev, "xrun\n");
++ snd_pcm_stream_lock(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(substream);
+ ret = IRQ_HANDLED;
+ }
+
+diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
+index d144cdb..888a7c7 100644
+--- a/sound/usb/6fire/pcm.c
++++ b/sound/usb/6fire/pcm.c
+@@ -541,7 +541,7 @@ static snd_pcm_uframes_t usb6fire_pcm_pointer(
+ snd_pcm_uframes_t ret;
+
+ if (rt->panic || !sub)
+- return SNDRV_PCM_STATE_XRUN;
++ return SNDRV_PCM_POS_XRUN;
+
+ spin_lock_irqsave(&sub->lock, flags);
+ ret = sub->dma_off;
+@@ -640,17 +640,25 @@ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+ void usb6fire_pcm_abort(struct sfire_chip *chip)
+ {
+ struct pcm_runtime *rt = chip->pcm;
++ unsigned long flags;
+ int i;
+
+ if (rt) {
+ rt->panic = true;
+
+- if (rt->playback.instance)
++ if (rt->playback.instance) {
++ snd_pcm_stream_lock_irqsave(rt->playback.instance, flags);
+ snd_pcm_stop(rt->playback.instance,
+ SNDRV_PCM_STATE_XRUN);
+- if (rt->capture.instance)
++ snd_pcm_stream_unlock_irqrestore(rt->playback.instance, flags);
++ }
++
++ if (rt->capture.instance) {
++ snd_pcm_stream_lock_irqsave(rt->capture.instance, flags);
+ snd_pcm_stop(rt->capture.instance,
+ SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(rt->capture.instance, flags);
++ }
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ usb_poison_urb(&rt->in_urbs[i].instance);
+diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
+index c0609c2..84052cf 100644
+--- a/sound/usb/misc/ua101.c
++++ b/sound/usb/misc/ua101.c
+@@ -613,14 +613,24 @@ static int start_usb_playback(struct ua101 *ua)
+
+ static void abort_alsa_capture(struct ua101 *ua)
+ {
+- if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
++ unsigned long flags;
++
++ if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) {
++ snd_pcm_stream_lock_irqsave(ua->capture.substream, flags);
+ snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(ua->capture.substream, flags);
++ }
+ }
+
+ static void abort_alsa_playback(struct ua101 *ua)
+ {
+- if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
++ unsigned long flags;
++
++ if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) {
++ snd_pcm_stream_lock_irqsave(ua->playback.substream, flags);
+ snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(ua->playback.substream, flags);
++ }
+ }
+
+ static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream,
+diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
+index 6ffb371..d5724d8 100644
+--- a/sound/usb/usx2y/usbusx2yaudio.c
++++ b/sound/usb/usx2y/usbusx2yaudio.c
+@@ -273,7 +273,11 @@ static void usX2Y_clients_stop(struct usX2Ydev *usX2Y)
+ struct snd_usX2Y_substream *subs = usX2Y->subs[s];
+ if (subs) {
+ if (atomic_read(&subs->state) >= state_PRERUNNING) {
++ unsigned long flags;
++
++ snd_pcm_stream_lock_irqsave(subs->pcm_substream, flags);
+ snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(subs->pcm_substream, flags);
+ }
+ for (u = 0; u < NRURBS; u++) {
+ struct urb *urb = subs->urb[u];
diff --git a/3.2.49/4420_grsecurity-2.9.1-3.2.49-201307302311.patch b/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308030030.patch
index 2d76489..cb05b47 100644
--- a/3.2.49/4420_grsecurity-2.9.1-3.2.49-201307302311.patch
+++ b/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308030030.patch
@@ -270,7 +270,7 @@ index 88fd7f5..b318a78 100644
==============================================================
diff --git a/Makefile b/Makefile
-index 2e3d791..7e68b16 100644
+index 0799e8e..71239c7 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -40050,23 +40050,10 @@ index 301b39e..345c414 100644
};
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index 26106c0..af78205 100644
+index 96b9e3c..d9cfb75 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
-@@ -532,8 +532,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
- return -EMSGSIZE;
- num_pages = get_user_pages_fast(base, size, 0, &page[i]);
- if (num_pages != size) {
-- for (i = 0; i < num_pages; i++)
-- put_page(page[i]);
-+ int j;
-+
-+ for (j = 0; j < num_pages; j++)
-+ put_page(page[i + j]);
- return -EFAULT;
- }
- truesize = size * PAGE_SIZE;
-@@ -1067,7 +1069,7 @@ static int macvtap_device_event(struct notifier_block *unused,
+@@ -1073,7 +1073,7 @@ static int macvtap_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -49140,7 +49127,7 @@ index dede441..f2a2507 100644
WARN_ON(trans->transid != btrfs_header_generation(parent));
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
-index 8d4d53d..d0dec4c 100644
+index 49eefdb..547693e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5642,7 +5642,7 @@ again:
@@ -51318,7 +51305,7 @@ index 60b6ca5..bfa15a7 100644
/* locality groups */
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index 9243103..750691a 100644
+index 9b8c131..d469b31 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
@@ -54507,10 +54494,10 @@ index c45a2ea..1a6bd66 100644
#ifdef CONFIG_PROC_FS
static int create_proc_exports_entry(void)
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
-index 1ec1fde..a58f201 100644
+index 561a3dc..9c46c5e 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
-@@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -915,7 +915,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
} else {
oldfs = get_fs();
set_fs(KERNEL_DS);
@@ -54519,7 +54506,7 @@ index 1ec1fde..a58f201 100644
set_fs(oldfs);
}
-@@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -1019,7 +1019,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
@@ -54528,7 +54515,7 @@ index 1ec1fde..a58f201 100644
set_fs(oldfs);
if (host_err < 0)
goto out_nfserr;
-@@ -1559,7 +1559,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
+@@ -1560,7 +1560,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
*/
oldfs = get_fs(); set_fs(KERNEL_DS);
@@ -54650,18 +54637,10 @@ index e7bc1d7..06bd4bb 100644
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
-index 9fde1c0..55df672 100644
+index 9860f6b..55df672 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
-@@ -118,6 +118,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
- metadata->event_len = FAN_EVENT_METADATA_LEN;
- metadata->metadata_len = FAN_EVENT_METADATA_LEN;
- metadata->vers = FANOTIFY_METADATA_VERSION;
-+ metadata->reserved = 0;
- metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
- metadata->pid = pid_vnr(event->tgid);
- if (unlikely(event->mask & FAN_Q_OVERFLOW))
-@@ -276,7 +277,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+@@ -277,7 +277,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
goto out_close_fd;
ret = -EFAULT;
@@ -58731,10 +58710,10 @@ index 0000000..dc33dcd
+endmenu
diff --git a/grsecurity/Makefile b/grsecurity/Makefile
new file mode 100644
-index 0000000..1b9afa9
+index 0000000..36845aa
--- /dev/null
+++ b/grsecurity/Makefile
-@@ -0,0 +1,38 @@
+@@ -0,0 +1,42 @@
+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
+# during 2001-2009 it has been completely redesigned by Brad Spengler
+# into an RBAC system
@@ -58752,6 +58731,10 @@ index 0000000..1b9afa9
+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
+ gracl_learn.o grsec_log.o
++ifdef CONFIG_COMPAT
++obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
++endif
++
+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
+
+ifdef CONFIG_NET
@@ -58775,10 +58758,10 @@ index 0000000..1b9afa9
+endif
diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
new file mode 100644
-index 0000000..c8f4c9f
+index 0000000..1c950b2
--- /dev/null
+++ b/grsecurity/gracl.c
-@@ -0,0 +1,4218 @@
+@@ -0,0 +1,4323 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
@@ -58882,6 +58865,144 @@ index 0000000..c8f4c9f
+
+DECLARE_BRLOCK(vfsmount_lock);
+
++static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
++{
++ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
++{
++ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
++{
++ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
++{
++ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
++{
++ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
++{
++ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
++{
++ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
++{
++ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
++ return -EFAULT;
++
++ return 0;
++}
++
++int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
++{
++ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
++{
++ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
++ return -EFAULT;
++
++ if ((uwrap->version != GRSECURITY_VERSION) || (uwrap->size != sizeof(struct gr_arg)))
++ return -EINVAL;
++
++ return 0;
++}
++
++static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
++{
++ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static size_t get_gr_arg_wrapper_size_normal(void)
++{
++ return sizeof(struct gr_arg_wrapper);
++}
++
++#ifdef CONFIG_COMPAT
++extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
++extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
++extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
++extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
++extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
++extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
++extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
++extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
++extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
++extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
++extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
++extern size_t get_gr_arg_wrapper_size_compat(void);
++
++int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
++int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
++int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
++int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
++int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
++int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
++int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
++int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
++int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
++int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
++int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
++size_t (* get_gr_arg_wrapper_size)(void) __read_only;
++
++#else
++#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
++#define copy_gr_arg copy_gr_arg_normal
++#define copy_gr_hash_struct copy_gr_hash_struct_normal
++#define copy_acl_object_label copy_acl_object_label_normal
++#define copy_acl_subject_label copy_acl_subject_label_normal
++#define copy_acl_role_label copy_acl_role_label_normal
++#define copy_acl_ip_label copy_acl_ip_label_normal
++#define copy_pointer_from_array copy_pointer_from_array_normal
++#define copy_sprole_pw copy_sprole_pw_normal
++#define copy_role_transition copy_role_transition_normal
++#define copy_role_allowed_ip copy_role_allowed_ip_normal
++#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
++#endif
++
+__inline__ int
+gr_acl_is_enabled(void)
+{
@@ -59823,33 +59944,34 @@ index 0000000..c8f4c9f
+ return;
+}
+
-+static __u32
-+count_user_objs(struct acl_object_label *userp)
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
++
++static int alloc_and_copy_string(char **name, unsigned int maxlen)
+{
-+ struct acl_object_label o_tmp;
-+ __u32 num = 0;
++ unsigned int len = strnlen_user(*name, maxlen);
++ char *tmp;
+
-+ while (userp) {
-+ if (copy_from_user(&o_tmp, userp,
-+ sizeof (struct acl_object_label)))
-+ break;
++ if (!len || len >= maxlen)
++ return -EINVAL;
+
-+ userp = o_tmp.prev;
-+ num++;
-+ }
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
+
-+ return num;
-+}
++ if (copy_from_user(tmp, *name, len))
++ return -EFAULT;
+
-+static struct acl_subject_label *
-+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
++ tmp[len-1] = '\0';
++ *name = tmp;
++
++ return 0;
++}
+
+static int
+copy_user_glob(struct acl_object_label *obj)
+{
+ struct acl_object_label *g_tmp, **guser;
-+ unsigned int len;
-+ char *tmp;
++ int error;
+
+ if (obj->globbed == NULL)
+ return 0;
@@ -59861,22 +59983,12 @@ index 0000000..c8f4c9f
+ if (g_tmp == NULL)
+ return -ENOMEM;
+
-+ if (copy_from_user(g_tmp, *guser,
-+ sizeof (struct acl_object_label)))
++ if (copy_acl_object_label(g_tmp, *guser))
+ return -EFAULT;
+
-+ len = strnlen_user(g_tmp->filename, PATH_MAX);
-+
-+ if (!len || len >= PATH_MAX)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, g_tmp->filename, len))
-+ return -EFAULT;
-+ tmp[len-1] = '\0';
-+ g_tmp->filename = tmp;
++ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
++ if (error)
++ return error;
+
+ *guser = g_tmp;
+ guser = &(g_tmp->next);
@@ -59890,33 +60002,21 @@ index 0000000..c8f4c9f
+ struct acl_role_label *role)
+{
+ struct acl_object_label *o_tmp;
-+ unsigned int len;
+ int ret;
-+ char *tmp;
+
+ while (userp) {
+ if ((o_tmp = (struct acl_object_label *)
+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
+ return -ENOMEM;
+
-+ if (copy_from_user(o_tmp, userp,
-+ sizeof (struct acl_object_label)))
++ if (copy_acl_object_label(o_tmp, userp))
+ return -EFAULT;
+
+ userp = o_tmp->prev;
+
-+ len = strnlen_user(o_tmp->filename, PATH_MAX);
-+
-+ if (!len || len >= PATH_MAX)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, o_tmp->filename, len))
-+ return -EFAULT;
-+ tmp[len-1] = '\0';
-+ o_tmp->filename = tmp;
++ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
++ if (ret)
++ return ret;
+
+ insert_acl_obj_label(o_tmp, subj);
+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
@@ -59953,8 +60053,7 @@ index 0000000..c8f4c9f
+ __u32 num = 0;
+
+ while (userp) {
-+ if (copy_from_user(&s_tmp, userp,
-+ sizeof (struct acl_subject_label)))
++ if (copy_acl_subject_label(&s_tmp, userp))
+ break;
+
+ userp = s_tmp.prev;
@@ -59977,8 +60076,7 @@ index 0000000..c8f4c9f
+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
+ return -ENOMEM;
+
-+ if (copy_from_user(rtmp, ruserip,
-+ sizeof (struct role_allowed_ip)))
++ if (copy_role_allowed_ip(rtmp, ruserip))
+ return -EFAULT;
+
+ ruserip = rtmp->prev;
@@ -60002,9 +60100,7 @@ index 0000000..c8f4c9f
+copy_user_transitions(struct acl_role_label *rolep)
+{
+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
-+
-+ unsigned int len;
-+ char *tmp;
++ int error;
+
+ rusertp = rolep->transitions;
+
@@ -60015,24 +60111,14 @@ index 0000000..c8f4c9f
+ acl_alloc(sizeof (struct role_transition))) == NULL)
+ return -ENOMEM;
+
-+ if (copy_from_user(rtmp, rusertp,
-+ sizeof (struct role_transition)))
++ if (copy_role_transition(rtmp, rusertp))
+ return -EFAULT;
+
+ rusertp = rtmp->prev;
+
-+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
-+
-+ if (!len || len >= GR_SPROLE_LEN)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, rtmp->rolename, len))
-+ return -EFAULT;
-+ tmp[len-1] = '\0';
-+ rtmp->rolename = tmp;
++ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
++ if (error)
++ return error;
+
+ if (!rlast) {
+ rtmp->prev = NULL;
@@ -60049,12 +60135,26 @@ index 0000000..c8f4c9f
+ return 0;
+}
+
++static __u32 count_user_objs(const struct acl_object_label __user *userp)
++{
++ struct acl_object_label o_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_acl_object_label(&o_tmp, userp))
++ break;
++
++ userp = o_tmp.prev;
++ num++;
++ }
++
++ return num;
++}
++
+static struct acl_subject_label *
+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
+{
+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
-+ unsigned int len;
-+ char *tmp;
+ __u32 num_objs;
+ struct acl_ip_label **i_tmp, *i_utmp2;
+ struct gr_hash_struct ghash;
@@ -60088,27 +60188,17 @@ index 0000000..c8f4c9f
+ subjmap->kernel = s_tmp;
+ insert_subj_map_entry(subjmap);
+
-+ if (copy_from_user(s_tmp, userp,
-+ sizeof (struct acl_subject_label)))
++ if (copy_acl_subject_label(s_tmp, userp))
+ return ERR_PTR(-EFAULT);
+
-+ len = strnlen_user(s_tmp->filename, PATH_MAX);
-+
-+ if (!len || len >= PATH_MAX)
-+ return ERR_PTR(-EINVAL);
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return ERR_PTR(-ENOMEM);
-+
-+ if (copy_from_user(tmp, s_tmp->filename, len))
-+ return ERR_PTR(-EFAULT);
-+ tmp[len-1] = '\0';
-+ s_tmp->filename = tmp;
++ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
++ if (err)
++ return ERR_PTR(err);
+
+ if (!strcmp(s_tmp->filename, "/"))
+ role->root_label = s_tmp;
+
-+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
++ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
+ return ERR_PTR(-EFAULT);
+
+ /* copy user and group transition tables */
@@ -60189,28 +60279,18 @@ index 0000000..c8f4c9f
+ if (!*(i_tmp + i_num))
+ return ERR_PTR(-ENOMEM);
+
-+ if (copy_from_user
-+ (&i_utmp2, s_tmp->ips + i_num,
-+ sizeof (struct acl_ip_label *)))
++ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
+ return ERR_PTR(-EFAULT);
+
-+ if (copy_from_user
-+ (*(i_tmp + i_num), i_utmp2,
-+ sizeof (struct acl_ip_label)))
++ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
+ return ERR_PTR(-EFAULT);
+
+ if ((*(i_tmp + i_num))->iface == NULL)
+ continue;
+
-+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
-+ if (!len || len >= IFNAMSIZ)
-+ return ERR_PTR(-EINVAL);
-+ tmp = acl_alloc(len);
-+ if (tmp == NULL)
-+ return ERR_PTR(-ENOMEM);
-+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
-+ return ERR_PTR(-EFAULT);
-+ (*(i_tmp + i_num))->iface = tmp;
++ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
++ if (err)
++ return ERR_PTR(err);
+ }
+
+ s_tmp->ips = i_tmp;
@@ -60231,8 +60311,7 @@ index 0000000..c8f4c9f
+ int err;
+
+ while (userp) {
-+ if (copy_from_user(&s_pre, userp,
-+ sizeof (struct acl_subject_label)))
++ if (copy_acl_subject_label(&s_pre, userp))
+ return -EFAULT;
+
+ ret = do_copy_user_subj(userp, role, NULL);
@@ -60258,8 +60337,6 @@ index 0000000..c8f4c9f
+ struct gr_hash_struct *ghash;
+ uid_t *domainlist;
+ unsigned int r_num;
-+ unsigned int len;
-+ char *tmp;
+ int err = 0;
+ __u16 i;
+ __u32 num_subjs;
@@ -60280,26 +60357,17 @@ index 0000000..c8f4c9f
+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
+ if (!sptmp)
+ return -ENOMEM;
-+ if (copy_from_user(sptmp, arg->sprole_pws + i,
-+ sizeof (struct sprole_pw)))
++ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
+ return -EFAULT;
+
-+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
-+
-+ if (!len || len >= GR_SPROLE_LEN)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, sptmp->rolename, len))
-+ return -EFAULT;
++ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
++ if (err)
++ return err;
+
-+ tmp[len-1] = '\0';
+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
-+ printk(KERN_ALERT "Copying special role %s\n", tmp);
++ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
+#endif
-+ sptmp->rolename = tmp;
++
+ acl_special_roles[i] = sptmp;
+ }
+
@@ -60311,27 +60379,15 @@ index 0000000..c8f4c9f
+ if (!r_tmp)
+ return -ENOMEM;
+
-+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
-+ sizeof (struct acl_role_label *)))
++ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
+ return -EFAULT;
+
-+ if (copy_from_user(r_tmp, r_utmp2,
-+ sizeof (struct acl_role_label)))
++ if (copy_acl_role_label(r_tmp, r_utmp2))
+ return -EFAULT;
+
-+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
-+
-+ if (!len || len >= PATH_MAX)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, r_tmp->rolename, len))
-+ return -EFAULT;
-+
-+ tmp[len-1] = '\0';
-+ r_tmp->rolename = tmp;
++ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
++ if (err)
++ return err;
+
+ if (!strcmp(r_tmp->rolename, "default")
+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
@@ -60343,7 +60399,7 @@ index 0000000..c8f4c9f
+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
+ return -ENOMEM;
+
-+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
++ if (copy_gr_hash_struct(ghash, r_tmp->hash))
+ return -EFAULT;
+
+ r_tmp->hash = ghash;
@@ -61900,13 +61956,14 @@ index 0000000..c8f4c9f
+}
+
+ssize_t
-+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
++write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
+{
+ struct gr_arg_wrapper uwrap;
+ unsigned char *sprole_salt = NULL;
+ unsigned char *sprole_sum = NULL;
-+ int error = sizeof (struct gr_arg_wrapper);
++ int error = 0;
+ int error2 = 0;
++ size_t req_count;
+
+ mutex_lock(&gr_dev_mutex);
+
@@ -61915,8 +61972,42 @@ index 0000000..c8f4c9f
+ goto out;
+ }
+
-+ if (count != sizeof (struct gr_arg_wrapper)) {
-+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
++#ifdef CONFIG_COMPAT
++ pax_open_kernel();
++ if (is_compat_task()) {
++ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
++ copy_gr_arg = &copy_gr_arg_compat;
++ copy_acl_object_label = &copy_acl_object_label_compat;
++ copy_acl_subject_label = &copy_acl_subject_label_compat;
++ copy_acl_role_label = &copy_acl_role_label_compat;
++ copy_acl_ip_label = &copy_acl_ip_label_compat;
++ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
++ copy_role_transition = &copy_role_transition_compat;
++ copy_sprole_pw = &copy_sprole_pw_compat;
++ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
++ copy_pointer_from_array = &copy_pointer_from_array_compat;
++ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
++ } else {
++ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
++ copy_gr_arg = &copy_gr_arg_normal;
++ copy_acl_object_label = &copy_acl_object_label_normal;
++ copy_acl_subject_label = &copy_acl_subject_label_normal;
++ copy_acl_role_label = &copy_acl_role_label_normal;
++ copy_acl_ip_label = &copy_acl_ip_label_normal;
++ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
++ copy_role_transition = &copy_role_transition_normal;
++ copy_sprole_pw = &copy_sprole_pw_normal;
++ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
++ copy_pointer_from_array = &copy_pointer_from_array_normal;
++ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
++ }
++ pax_close_kernel();
++#endif
++
++ req_count = get_gr_arg_wrapper_size();
++
++ if (count != req_count) {
++ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
+ error = -EINVAL;
+ goto out;
+ }
@@ -61927,20 +62018,13 @@ index 0000000..c8f4c9f
+ gr_auth_attempts = 0;
+ }
+
-+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
-+ error = -EFAULT;
-+ goto out;
-+ }
-+
-+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
-+ error = -EINVAL;
++ error = copy_gr_arg_wrapper(buf, &uwrap);
++ if (error)
+ goto out;
-+ }
+
-+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
-+ error = -EFAULT;
++ error = copy_gr_arg(uwrap.arg, gr_usermode);
++ if (error)
+ goto out;
-+ }
+
+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
@@ -62133,6 +62217,10 @@ index 0000000..c8f4c9f
+
+ out:
+ mutex_unlock(&gr_dev_mutex);
++
++ if (!error)
++ error = req_count;
++
+ return error;
+}
+
@@ -63215,6 +63303,281 @@ index 0000000..955ddfb
+ return 0;
+}
+
+diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
+new file mode 100644
+index 0000000..a43dd06
+--- /dev/null
++++ b/grsecurity/gracl_compat.c
+@@ -0,0 +1,269 @@
++#include <linux/kernel.h>
++#include <linux/gracl.h>
++#include <linux/compat.h>
++#include <linux/gracl_compat.h>
++
++#include <asm/uaccess.h>
++
++int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
++{
++ struct gr_arg_wrapper_compat uwrapcompat;
++
++ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
++ return -EFAULT;
++
++ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
++ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
++ return -EINVAL;
++
++ uwrap->arg = compat_ptr(uwrapcompat.arg);
++ uwrap->version = uwrapcompat.version;
++ uwrap->size = sizeof(struct gr_arg);
++
++ return 0;
++}
++
++int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
++{
++ struct gr_arg_compat argcompat;
++
++ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
++ return -EFAULT;
++
++ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
++ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
++ arg->role_db.num_roles = argcompat.role_db.num_roles;
++ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
++ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
++ arg->role_db.num_objects = argcompat.role_db.num_objects;
++
++ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
++ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
++ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
++ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
++ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
++ arg->segv_device = argcompat.segv_device;
++ arg->segv_inode = argcompat.segv_inode;
++ arg->segv_uid = argcompat.segv_uid;
++ arg->num_sprole_pws = argcompat.num_sprole_pws;
++ arg->mode = argcompat.mode;
++
++ return 0;
++}
++
++int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
++{
++ struct acl_object_label_compat objcompat;
++
++ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
++ return -EFAULT;
++
++ obj->filename = compat_ptr(objcompat.filename);
++ obj->inode = objcompat.inode;
++ obj->device = objcompat.device;
++ obj->mode = objcompat.mode;
++
++ obj->nested = compat_ptr(objcompat.nested);
++ obj->globbed = compat_ptr(objcompat.globbed);
++
++ obj->prev = compat_ptr(objcompat.prev);
++ obj->next = compat_ptr(objcompat.next);
++
++ return 0;
++}
++
++int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
++{
++ unsigned int i;
++ struct acl_subject_label_compat subjcompat;
++
++ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
++ return -EFAULT;
++
++ subj->filename = compat_ptr(subjcompat.filename);
++ subj->inode = subjcompat.inode;
++ subj->device = subjcompat.device;
++ subj->mode = subjcompat.mode;
++ subj->cap_mask = subjcompat.cap_mask;
++ subj->cap_lower = subjcompat.cap_lower;
++ subj->cap_invert_audit = subjcompat.cap_invert_audit;
++
++ for (i = 0; i < GR_NLIMITS; i++) {
++ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
++ subj->res[i].rlim_cur = RLIM_INFINITY;
++ else
++ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
++ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
++ subj->res[i].rlim_max = RLIM_INFINITY;
++ else
++ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
++ }
++ subj->resmask = subjcompat.resmask;
++
++ subj->user_trans_type = subjcompat.user_trans_type;
++ subj->group_trans_type = subjcompat.group_trans_type;
++ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
++ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
++ subj->user_trans_num = subjcompat.user_trans_num;
++ subj->group_trans_num = subjcompat.group_trans_num;
++
++ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
++ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
++ subj->ip_type = subjcompat.ip_type;
++ subj->ips = compat_ptr(subjcompat.ips);
++ subj->ip_num = subjcompat.ip_num;
++ subj->inaddr_any_override = subjcompat.inaddr_any_override;
++
++ subj->crashes = subjcompat.crashes;
++ subj->expires = subjcompat.expires;
++
++ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
++ subj->hash = compat_ptr(subjcompat.hash);
++ subj->prev = compat_ptr(subjcompat.prev);
++ subj->next = compat_ptr(subjcompat.next);
++
++ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
++ subj->obj_hash_size = subjcompat.obj_hash_size;
++ subj->pax_flags = subjcompat.pax_flags;
++
++ return 0;
++}
++
++int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
++{
++ struct acl_role_label_compat rolecompat;
++
++ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
++ return -EFAULT;
++
++ role->rolename = compat_ptr(rolecompat.rolename);
++ role->uidgid = rolecompat.uidgid;
++ role->roletype = rolecompat.roletype;
++
++ role->auth_attempts = rolecompat.auth_attempts;
++ role->expires = rolecompat.expires;
++
++ role->root_label = compat_ptr(rolecompat.root_label);
++ role->hash = compat_ptr(rolecompat.hash);
++
++ role->prev = compat_ptr(rolecompat.prev);
++ role->next = compat_ptr(rolecompat.next);
++
++ role->transitions = compat_ptr(rolecompat.transitions);
++ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
++ role->domain_children = compat_ptr(rolecompat.domain_children);
++ role->domain_child_num = rolecompat.domain_child_num;
++
++ role->umask = rolecompat.umask;
++
++ role->subj_hash = compat_ptr(rolecompat.subj_hash);
++ role->subj_hash_size = rolecompat.subj_hash_size;
++
++ return 0;
++}
++
++int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
++{
++ struct role_allowed_ip_compat roleip_compat;
++
++ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
++ return -EFAULT;
++
++ roleip->addr = roleip_compat.addr;
++ roleip->netmask = roleip_compat.netmask;
++
++ roleip->prev = compat_ptr(roleip_compat.prev);
++ roleip->next = compat_ptr(roleip_compat.next);
++
++ return 0;
++}
++
++int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
++{
++ struct role_transition_compat trans_compat;
++
++ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
++ return -EFAULT;
++
++ trans->rolename = compat_ptr(trans_compat.rolename);
++
++ trans->prev = compat_ptr(trans_compat.prev);
++ trans->next = compat_ptr(trans_compat.next);
++
++ return 0;
++
++}
++
++int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
++{
++ struct gr_hash_struct_compat hash_compat;
++
++ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
++ return -EFAULT;
++
++ hash->table = compat_ptr(hash_compat.table);
++ hash->nametable = compat_ptr(hash_compat.nametable);
++ hash->first = compat_ptr(hash_compat.first);
++
++ hash->table_size = hash_compat.table_size;
++ hash->used_size = hash_compat.used_size;
++
++ hash->type = hash_compat.type;
++
++ return 0;
++}
++
++int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
++{
++ compat_uptr_t ptrcompat;
++
++ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
++ return -EFAULT;
++
++ *(void **)ptr = compat_ptr(ptrcompat);
++
++ return 0;
++}
++
++int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
++{
++ struct acl_ip_label_compat ip_compat;
++
++ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
++ return -EFAULT;
++
++ ip->iface = compat_ptr(ip_compat.iface);
++ ip->addr = ip_compat.addr;
++ ip->netmask = ip_compat.netmask;
++ ip->low = ip_compat.low;
++ ip->high = ip_compat.high;
++ ip->mode = ip_compat.mode;
++ ip->type = ip_compat.type;
++
++ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
++
++ ip->prev = compat_ptr(ip_compat.prev);
++ ip->next = compat_ptr(ip_compat.next);
++
++ return 0;
++}
++
++int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
++{
++ struct sprole_pw_compat pw_compat;
++
++ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
++ return -EFAULT;
++
++ pw->rolename = compat_ptr(pw_compat.rolename);
++ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
++ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
++
++ return 0;
++}
++
++size_t get_gr_arg_wrapper_size_compat(void)
++{
++ return sizeof(struct gr_arg_wrapper_compat);
++}
++
diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
new file mode 100644
index 0000000..b20f6e9
@@ -69868,6 +70231,168 @@ index 0000000..ebe6d72
+
+#endif
+
+diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
+new file mode 100644
+index 0000000..33ebd1f
+--- /dev/null
++++ b/include/linux/gracl_compat.h
+@@ -0,0 +1,156 @@
++#ifndef GR_ACL_COMPAT_H
++#define GR_ACL_COMPAT_H
++
++#include <linux/resource.h>
++#include <asm/resource.h>
++
++struct sprole_pw_compat {
++ compat_uptr_t rolename;
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++};
++
++struct gr_hash_struct_compat {
++ compat_uptr_t table;
++ compat_uptr_t nametable;
++ compat_uptr_t first;
++ __u32 table_size;
++ __u32 used_size;
++ int type;
++};
++
++struct acl_subject_label_compat {
++ compat_uptr_t filename;
++ compat_ino_t inode;
++ __u32 device;
++ __u32 mode;
++ kernel_cap_t cap_mask;
++ kernel_cap_t cap_lower;
++ kernel_cap_t cap_invert_audit;
++
++ struct compat_rlimit res[GR_NLIMITS];
++ __u32 resmask;
++
++ __u8 user_trans_type;
++ __u8 group_trans_type;
++ compat_uptr_t user_transitions;
++ compat_uptr_t group_transitions;
++ __u16 user_trans_num;
++ __u16 group_trans_num;
++
++ __u32 sock_families[2];
++ __u32 ip_proto[8];
++ __u32 ip_type;
++ compat_uptr_t ips;
++ __u32 ip_num;
++ __u32 inaddr_any_override;
++
++ __u32 crashes;
++ compat_ulong_t expires;
++
++ compat_uptr_t parent_subject;
++ compat_uptr_t hash;
++ compat_uptr_t prev;
++ compat_uptr_t next;
++
++ compat_uptr_t obj_hash;
++ __u32 obj_hash_size;
++ __u16 pax_flags;
++};
++
++struct role_allowed_ip_compat {
++ __u32 addr;
++ __u32 netmask;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct role_transition_compat {
++ compat_uptr_t rolename;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct acl_role_label_compat {
++ compat_uptr_t rolename;
++ uid_t uidgid;
++ __u16 roletype;
++
++ __u16 auth_attempts;
++ compat_ulong_t expires;
++
++ compat_uptr_t root_label;
++ compat_uptr_t hash;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++
++ compat_uptr_t transitions;
++ compat_uptr_t allowed_ips;
++ compat_uptr_t domain_children;
++ __u16 domain_child_num;
++
++ umode_t umask;
++
++ compat_uptr_t subj_hash;
++ __u32 subj_hash_size;
++};
++
++struct user_acl_role_db_compat {
++ compat_uptr_t r_table;
++ __u32 num_pointers;
++ __u32 num_roles;
++ __u32 num_domain_children;
++ __u32 num_subjects;
++ __u32 num_objects;
++};
++
++struct acl_object_label_compat {
++ compat_uptr_t filename;
++ compat_ino_t inode;
++ __u32 device;
++ __u32 mode;
++
++ compat_uptr_t nested;
++ compat_uptr_t globbed;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct acl_ip_label_compat {
++ compat_uptr_t iface;
++ __u32 addr;
++ __u32 netmask;
++ __u16 low, high;
++ __u8 mode;
++ __u32 type;
++ __u32 proto[8];
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct gr_arg_compat {
++ struct user_acl_role_db_compat role_db;
++ unsigned char pw[GR_PW_LEN];
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++ unsigned char sp_role[GR_SPROLE_LEN];
++ compat_uptr_t sprole_pws;
++ __u32 segv_device;
++ compat_ino_t segv_inode;
++ uid_t segv_uid;
++ __u16 num_sprole_pws;
++ __u16 mode;
++};
++
++struct gr_arg_wrapper_compat {
++ compat_uptr_t arg;
++ __u32 version;
++ __u32 size;
++};
++
++#endif
diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
new file mode 100644
index 0000000..323ecf2
@@ -70711,7 +71236,7 @@ index a6deef4..c56a7f2 100644
and pointers */
#endif
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
-index b5f927f..929b882d 100644
+index 732c962..61c3f70 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -203,7 +203,7 @@ struct pppox_proto {
@@ -74398,18 +74923,6 @@ index fe46019..1422c5a 100644
};
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
-diff --git a/include/net/udp.h b/include/net/udp.h
-index 3b285f4..e158330 100644
---- a/include/net/udp.h
-+++ b/include/net/udp.h
-@@ -180,6 +180,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum,
- extern void udp_err(struct sk_buff *, u32);
- extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len);
-+extern int udp_push_pending_frames(struct sock *sk);
- extern void udp_flush_pending_frames(struct sock *sk);
- extern int udp_rcv(struct sk_buff *skb);
- extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 921f627..4ec32de 100644
--- a/include/net/xfrm.h
@@ -80843,10 +81356,10 @@ index 6fdc629..55739fe 100644
*data_page = bpage;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 0ec6c34..8670e8b 100644
+index a584ad9..ba946c6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2645,7 +2645,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+@@ -2653,7 +2653,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
return 0;
}
@@ -80855,7 +81368,7 @@ index 0ec6c34..8670e8b 100644
{
/* do nothing if flag is already set */
if (!!(trace_flags & mask) == !!enabled)
-@@ -4236,10 +4236,9 @@ static const struct file_operations tracing_dyn_info_fops = {
+@@ -4244,10 +4244,9 @@ static const struct file_operations tracing_dyn_info_fops = {
};
#endif
@@ -80867,7 +81380,7 @@ index 0ec6c34..8670e8b 100644
static int once;
if (d_tracer)
-@@ -4259,10 +4258,9 @@ struct dentry *tracing_init_dentry(void)
+@@ -4267,10 +4266,9 @@ struct dentry *tracing_init_dentry(void)
return d_tracer;
}
@@ -87276,10 +87789,10 @@ index 82ce164..00bd057 100644
err = -EFAULT;
break;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
-index 5ac1811..7eb2320 100644
+index b81500c..92fc8ec 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
-@@ -1408,7 +1408,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
+@@ -1409,7 +1409,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
nexthdr = ip6h->nexthdr;
offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
@@ -89605,7 +90118,7 @@ index 2e0f0af..e2948bf 100644
syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
/* Has it gone just too far? */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
-index 5a65eea..79830d4 100644
+index 5decc93..79830d4 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -86,6 +86,7 @@
@@ -89637,24 +90150,7 @@ index 5a65eea..79830d4 100644
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
-@@ -766,7 +774,7 @@ send:
- /*
- * Push out all pending data as one UDP datagram. Socket is locked.
- */
--static int udp_push_pending_frames(struct sock *sk)
-+int udp_push_pending_frames(struct sock *sk)
- {
- struct udp_sock *up = udp_sk(sk);
- struct inet_sock *inet = inet_sk(sk);
-@@ -785,6 +793,7 @@ out:
- up->pending = 0;
- return err;
- }
-+EXPORT_SYMBOL(udp_push_pending_frames);
-
- int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len)
-@@ -856,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+@@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
dport = usin->sin_port;
if (dport == 0)
return -EINVAL;
@@ -89673,7 +90169,7 @@ index 5a65eea..79830d4 100644
daddr = inet->inet_daddr;
dport = inet->inet_dport;
/* Open fast path for connected socket.
-@@ -1099,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
+@@ -1100,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
udp_lib_checksum_complete(skb)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
@@ -89682,7 +90178,7 @@ index 5a65eea..79830d4 100644
__skb_unlink(skb, rcvq);
__skb_queue_tail(&list_kill, skb);
}
-@@ -1185,6 +1203,10 @@ try_again:
+@@ -1186,6 +1203,10 @@ try_again:
if (!skb)
goto out;
@@ -89693,7 +90189,7 @@ index 5a65eea..79830d4 100644
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
-@@ -1487,7 +1509,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+@@ -1488,7 +1509,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
drop:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -89702,7 +90198,7 @@ index 5a65eea..79830d4 100644
kfree_skb(skb);
return -1;
}
-@@ -1506,7 +1528,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
+@@ -1507,7 +1528,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
@@ -89711,7 +90207,7 @@ index 5a65eea..79830d4 100644
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
-@@ -1675,6 +1697,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+@@ -1676,6 +1697,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
goto csum_error;
UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
@@ -89721,7 +90217,7 @@ index 5a65eea..79830d4 100644
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/*
-@@ -2098,8 +2123,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
+@@ -2099,8 +2123,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
@@ -89738,10 +90234,10 @@ index 5a65eea..79830d4 100644
int udp4_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
-index d603caa..dca1994 100644
+index 314bda2..9503a4f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
-@@ -2151,7 +2151,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
+@@ -2159,7 +2159,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
p.iph.ihl = 5;
p.iph.protocol = IPPROTO_IPV6;
p.iph.ttl = 64;
@@ -89810,79 +90306,6 @@ index 1567fb1..29af910 100644
__sk_dst_reset(sk);
dst = NULL;
}
-diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
-index 6aadaa8..db60043 100644
---- a/net/ipv6/ip6_output.c
-+++ b/net/ipv6/ip6_output.c
-@@ -909,11 +909,17 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
- const struct flowi6 *fl6)
- {
- struct ipv6_pinfo *np = inet6_sk(sk);
-- struct rt6_info *rt = (struct rt6_info *)dst;
-+ struct rt6_info *rt;
-
- if (!dst)
- goto out;
-
-+ if (dst->ops->family != AF_INET6) {
-+ dst_release(dst);
-+ return NULL;
-+ }
-+
-+ rt = (struct rt6_info *)dst;
- /* Yes, checking route validity in not connected
- * case is not very simple. Take into account,
- * that we do not support routing by source, TOS,
-@@ -1178,11 +1184,12 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
- return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
- }
-
--static void ip6_append_data_mtu(int *mtu,
-+static void ip6_append_data_mtu(unsigned int *mtu,
- int *maxfraglen,
- unsigned int fragheaderlen,
- struct sk_buff *skb,
-- struct rt6_info *rt)
-+ struct rt6_info *rt,
-+ bool pmtuprobe)
- {
- if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
- if (skb == NULL) {
-@@ -1194,7 +1201,9 @@ static void ip6_append_data_mtu(int *mtu,
- * this fragment is not first, the headers
- * space is regarded as data space.
- */
-- *mtu = dst_mtu(rt->dst.path);
-+ *mtu = min(*mtu, pmtuprobe ?
-+ rt->dst.dev->mtu :
-+ dst_mtu(rt->dst.path));
- }
- *maxfraglen = ((*mtu - fragheaderlen) & ~7)
- + fragheaderlen - sizeof(struct frag_hdr);
-@@ -1211,11 +1220,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct inet_cork *cork;
- struct sk_buff *skb, *skb_prev = NULL;
-- unsigned int maxfraglen, fragheaderlen;
-+ unsigned int maxfraglen, fragheaderlen, mtu;
- int exthdrlen;
- int dst_exthdrlen;
- int hh_len;
-- int mtu;
- int copy;
- int err;
- int offset = 0;
-@@ -1378,7 +1386,9 @@ alloc_new_skb:
- /* update mtu and maxfraglen if necessary */
- if (skb == NULL || skb_prev == NULL)
- ip6_append_data_mtu(&mtu, &maxfraglen,
-- fragheaderlen, skb, rt);
-+ fragheaderlen, skb, rt,
-+ np->pmtudisc ==
-+ IPV6_PMTUDISC_PROBE);
-
- skb_prev = skb;
-
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index b204df8..8f274f4 100644
--- a/net/ipv6/ipv6_sockglue.c
@@ -90206,7 +90629,7 @@ index c69358c..d1e5855 100644
static int tcp6_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
-index 20f0812..729da61 100644
+index f9e496b..729da61 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -50,6 +50,10 @@
@@ -90257,25 +90680,7 @@ index 20f0812..729da61 100644
bh_unlock_sock(sk);
sock_put(sk);
goto discard;
-@@ -893,11 +900,16 @@ static int udp_v6_push_pending_frames(struct sock *sk)
- struct udphdr *uh;
- struct udp_sock *up = udp_sk(sk);
- struct inet_sock *inet = inet_sk(sk);
-- struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
-+ struct flowi6 *fl6;
- int err = 0;
- int is_udplite = IS_UDPLITE(sk);
- __wsum csum = 0;
-
-+ if (up->pending == AF_INET)
-+ return udp_push_pending_frames(sk);
-+
-+ fl6 = &inet->cork.fl.u.ip6;
-+
- /* Grab the skbuff where UDP header space exists. */
- if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
- goto out;
-@@ -1407,8 +1419,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
+@@ -1412,8 +1419,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
0, 0L, 0,
sock_i_uid(sp), 0,
sock_i_ino(sp),
@@ -90491,26 +90896,10 @@ index 403be43..87f09da 100644
};
diff --git a/net/key/af_key.c b/net/key/af_key.c
-index 1e733e9..b603137 100644
+index 6fefdfc..b603137 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
-@@ -1705,6 +1705,7 @@ static int key_notify_sa_flush(const struct km_event *c)
- hdr->sadb_msg_version = PF_KEY_V2;
- hdr->sadb_msg_errno = (uint8_t) 0;
- hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
-+ hdr->sadb_msg_reserved = 0;
-
- pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
-
-@@ -2686,6 +2687,7 @@ static int key_notify_policy_flush(const struct km_event *c)
- hdr->sadb_msg_version = PF_KEY_V2;
- hdr->sadb_msg_errno = (uint8_t) 0;
- hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
-+ hdr->sadb_msg_reserved = 0;
- pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
- return 0;
-
-@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
+@@ -3018,10 +3018,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
static u32 get_acqseq(void)
{
u32 res;
@@ -90560,20 +90949,6 @@ index 93a41a0..d4b4edb 100644
NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
-diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
-index 74410e6..e579006 100644
---- a/net/l2tp/l2tp_ppp.c
-+++ b/net/l2tp/l2tp_ppp.c
-@@ -1778,7 +1778,8 @@ static const struct proto_ops pppol2tp_ops = {
-
- static const struct pppox_proto pppol2tp_proto = {
- .create = pppol2tp_create,
-- .ioctl = pppol2tp_ioctl
-+ .ioctl = pppol2tp_ioctl,
-+ .owner = THIS_MODULE,
- };
-
- #ifdef CONFIG_L2TP_V3
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 73495f1..ad51356 100644
--- a/net/mac80211/ieee80211_i.h
diff --git a/3.2.49/4425_grsec_remove_EI_PAX.patch b/3.2.50/4425_grsec_remove_EI_PAX.patch
index 7d06ac2..7d06ac2 100644
--- a/3.2.49/4425_grsec_remove_EI_PAX.patch
+++ b/3.2.50/4425_grsec_remove_EI_PAX.patch
diff --git a/3.2.49/4427_force_XATTR_PAX_tmpfs.patch b/3.2.50/4427_force_XATTR_PAX_tmpfs.patch
index 8c7a533..8c7a533 100644
--- a/3.2.49/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.2.50/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.2.49/4430_grsec-remove-localversion-grsec.patch b/3.2.50/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.2.49/4430_grsec-remove-localversion-grsec.patch
+++ b/3.2.50/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.2.49/4435_grsec-mute-warnings.patch b/3.2.50/4435_grsec-mute-warnings.patch
index f099757..f099757 100644
--- a/3.2.49/4435_grsec-mute-warnings.patch
+++ b/3.2.50/4435_grsec-mute-warnings.patch
diff --git a/3.2.49/4440_grsec-remove-protected-paths.patch b/3.2.50/4440_grsec-remove-protected-paths.patch
index 637934a..637934a 100644
--- a/3.2.49/4440_grsec-remove-protected-paths.patch
+++ b/3.2.50/4440_grsec-remove-protected-paths.patch
diff --git a/3.2.49/4450_grsec-kconfig-default-gids.patch b/3.2.50/4450_grsec-kconfig-default-gids.patch
index c882e28..c882e28 100644
--- a/3.2.49/4450_grsec-kconfig-default-gids.patch
+++ b/3.2.50/4450_grsec-kconfig-default-gids.patch
diff --git a/3.2.49/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.50/4465_selinux-avc_audit-log-curr_ip.patch
index 5607ab4..5607ab4 100644
--- a/3.2.49/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.2.50/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.2.49/4470_disable-compat_vdso.patch b/3.2.50/4470_disable-compat_vdso.patch
index 99c691b..99c691b 100644
--- a/3.2.49/4470_disable-compat_vdso.patch
+++ b/3.2.50/4470_disable-compat_vdso.patch
diff --git a/3.2.49/4475_emutramp_default_on.patch b/3.2.50/4475_emutramp_default_on.patch
index 30f6978..30f6978 100644
--- a/3.2.49/4475_emutramp_default_on.patch
+++ b/3.2.50/4475_emutramp_default_on.patch