|
44 | 44 | #include <linux/ratelimit.h>
|
45 | 45 | #include <linux/seccomp.h>
|
46 | 46 | #include <linux/if_vlan.h>
|
| 47 | +#include <linux/bpf.h> |
47 | 48 |
|
48 | 49 | /**
|
49 | 50 | * sk_filter - run a packet through a socket filter
|
@@ -813,8 +814,12 @@ static void bpf_release_orig_filter(struct bpf_prog *fp)
|
813 | 814 |
|
814 | 815 | static void __bpf_prog_release(struct bpf_prog *prog)
|
815 | 816 | {
|
816 |
| - bpf_release_orig_filter(prog); |
817 |
| - bpf_prog_free(prog); |
| 817 | + if (prog->aux->prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { |
| 818 | + bpf_prog_put(prog); |
| 819 | + } else { |
| 820 | + bpf_release_orig_filter(prog); |
| 821 | + bpf_prog_free(prog); |
| 822 | + } |
818 | 823 | }
|
819 | 824 |
|
820 | 825 | static void __sk_filter_release(struct sk_filter *fp)
|
@@ -1088,6 +1093,94 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
1088 | 1093 | }
|
1089 | 1094 | EXPORT_SYMBOL_GPL(sk_attach_filter);
|
1090 | 1095 |
|
| 1096 | +#ifdef CONFIG_BPF_SYSCALL |
| 1097 | +int sk_attach_bpf(u32 ufd, struct sock *sk) |
| 1098 | +{ |
| 1099 | + struct sk_filter *fp, *old_fp; |
| 1100 | + struct bpf_prog *prog; |
| 1101 | + |
| 1102 | + if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
| 1103 | + return -EPERM; |
| 1104 | + |
| 1105 | + prog = bpf_prog_get(ufd); |
| 1106 | + if (!prog) |
| 1107 | + return -EINVAL; |
| 1108 | + |
| 1109 | + if (prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) { |
| 1110 | + /* valid fd, but invalid program type */ |
| 1111 | + bpf_prog_put(prog); |
| 1112 | + return -EINVAL; |
| 1113 | + } |
| 1114 | + |
| 1115 | + fp = kmalloc(sizeof(*fp), GFP_KERNEL); |
| 1116 | + if (!fp) { |
| 1117 | + bpf_prog_put(prog); |
| 1118 | + return -ENOMEM; |
| 1119 | + } |
| 1120 | + fp->prog = prog; |
| 1121 | + |
| 1122 | + atomic_set(&fp->refcnt, 0); |
| 1123 | + |
| 1124 | + if (!sk_filter_charge(sk, fp)) { |
| 1125 | + __sk_filter_release(fp); |
| 1126 | + return -ENOMEM; |
| 1127 | + } |
| 1128 | + |
| 1129 | + old_fp = rcu_dereference_protected(sk->sk_filter, |
| 1130 | + sock_owned_by_user(sk)); |
| 1131 | + rcu_assign_pointer(sk->sk_filter, fp); |
| 1132 | + |
| 1133 | + if (old_fp) |
| 1134 | + sk_filter_uncharge(sk, old_fp); |
| 1135 | + |
| 1136 | + return 0; |
| 1137 | +} |
| 1138 | + |
| 1139 | +/* allow socket filters to call |
| 1140 | + * bpf_map_lookup_elem(), bpf_map_update_elem(), bpf_map_delete_elem() |
| 1141 | + */ |
| 1142 | +static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func_id) |
| 1143 | +{ |
| 1144 | + switch (func_id) { |
| 1145 | + case BPF_FUNC_map_lookup_elem: |
| 1146 | + return &bpf_map_lookup_elem_proto; |
| 1147 | + case BPF_FUNC_map_update_elem: |
| 1148 | + return &bpf_map_update_elem_proto; |
| 1149 | + case BPF_FUNC_map_delete_elem: |
| 1150 | + return &bpf_map_delete_elem_proto; |
| 1151 | + default: |
| 1152 | + return NULL; |
| 1153 | + } |
| 1154 | +} |
| 1155 | + |
| 1156 | +static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type type) |
| 1157 | +{ |
| 1158 | + /* skb fields cannot be accessed yet */ |
| 1159 | + return false; |
| 1160 | +} |
| 1161 | + |
| 1162 | +static struct bpf_verifier_ops sock_filter_ops = { |
| 1163 | + .get_func_proto = sock_filter_func_proto, |
| 1164 | + .is_valid_access = sock_filter_is_valid_access, |
| 1165 | +}; |
| 1166 | + |
| 1167 | +static struct bpf_prog_type_list tl = { |
| 1168 | + .ops = &sock_filter_ops, |
| 1169 | + .type = BPF_PROG_TYPE_SOCKET_FILTER, |
| 1170 | +}; |
| 1171 | + |
| 1172 | +static int __init register_sock_filter_ops(void) |
| 1173 | +{ |
| 1174 | + bpf_register_prog_type(&tl); |
| 1175 | + return 0; |
| 1176 | +} |
| 1177 | +late_initcall(register_sock_filter_ops); |
| 1178 | +#else |
| 1179 | +int sk_attach_bpf(u32 ufd, struct sock *sk) |
| 1180 | +{ |
| 1181 | + return -EOPNOTSUPP; |
| 1182 | +} |
| 1183 | +#endif |
1091 | 1184 | int sk_detach_filter(struct sock *sk)
|
1092 | 1185 | {
|
1093 | 1186 | int ret = -ENOENT;
|
|
0 commit comments